aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/bif.tab1
-rw-r--r--erts/emulator/beam/dist.c39
-rw-r--r--erts/emulator/beam/erl_bif_re.c30
-rw-r--r--erts/emulator/beam/erl_bif_trace.c29
-rw-r--r--erts/emulator/beam/erl_db_catree.c141
-rw-r--r--erts/emulator/beam/erl_db_catree.h2
-rw-r--r--erts/emulator/beam/erl_message.c2
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c3
-rw-r--r--erts/emulator/beam/erl_process.h41
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h2
-rw-r--r--erts/emulator/beam/erl_trace.c8
-rw-r--r--erts/emulator/beam/external.c59
-rw-r--r--erts/emulator/beam/external.h2
-rw-r--r--erts/emulator/beam/ops.tab3
14 files changed, 273 insertions, 89 deletions
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index db9c258cb7..602db106b1 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -413,6 +413,7 @@ bif re:compile/1
bif re:compile/2
bif re:run/2
bif re:run/3
+bif re:internal_run/4
#
# Bifs in lists module.
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 5e48a553af..4537e3e569 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -80,7 +80,7 @@ dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz)
byte *extp = edep->data->extp;
Eterm msg;
Sint ctl_len;
- Sint size = ctl_len = erts_decode_dist_ext_size(edep, 0);
+ Sint size = ctl_len = erts_decode_dist_ext_size(edep, 0, 0);
if (size < 0) {
erts_fprintf(dbg_file,
"DIST MSG DEBUG: erts_decode_dist_ext_size(%s) failed:\n",
@@ -1462,7 +1462,7 @@ int erts_net_message(Port *prt,
#endif
goto data_error;
case ERTS_PREP_DIST_EXT_SUCCESS:
- ctl_len = erts_decode_dist_ext_size(&ede, 1);
+ ctl_len = erts_decode_dist_ext_size(&ede, 1, 0);
if (ctl_len < 0) {
#ifdef ERTS_DIST_MSG_DBG
erts_fprintf(dbg_file, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n");
@@ -1543,39 +1543,6 @@ int erts_net_message(Port *prt,
edep = erts_get_dist_ext(&seq->hfrag);
ede_hfrag = &seq->hfrag;
- /* If the sequence consisted of more than 1 fragment we create one large
- binary out of all of the fragments. This because erts_decode_ext
- cannot handle a segmented buffer.
- TODO: Move this copy to as late as possible, preferably in in the
- erts_decode_dist_ext in the receiving process.
- */
- if (edep->data->frag_id > 1) {
- Uint sz = 0;
- Binary *bin;
- int i;
- byte *ep;
-
- for (i = 0; i < edep->data->frag_id; i++)
- sz += edep->data[i].ext_endp - edep->data[i].extp;
-
- bin = erts_bin_nrml_alloc(sz);
- ep = (byte*)bin->orig_bytes;
-
- for (i = 0; i < edep->data->frag_id; i++) {
- sys_memcpy(ep, edep->data[i].extp, edep->data[i].ext_endp - edep->data[i].extp);
- ep += edep->data[i].ext_endp - edep->data[i].extp;
- erts_bin_release(edep->data[i].binp);
- edep->data[i].binp = NULL;
- edep->data[i].extp = NULL;
- edep->data[i].ext_endp = NULL;
- }
-
- edep->data->frag_id = 1;
- edep->data->extp = (byte*)bin->orig_bytes;
- edep->data->ext_endp = ep;
- edep->data->binp = bin;
- }
-
break;
}
default:
@@ -3375,7 +3342,7 @@ dist_get_stat_1(BIF_ALIST_1)
am_ok,
erts_bld_sint64(hpp, szp, read),
erts_bld_sint64(hpp, szp, write),
- pend ? am_true : am_false);
+ erts_bld_sint64(hpp, szp, pend));
if (hpp)
break;
hp = HAlloc(BIF_P, sz);
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index e0b9202fe7..b3bf1c7ee3 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -46,7 +46,7 @@ static Export *urun_trap_exportp = NULL;
static Export *ucompile_trap_exportp = NULL;
static BIF_RETTYPE re_exec_trap(BIF_ALIST_3);
-static BIF_RETTYPE re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
+static BIF_RETTYPE re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, int first);
static void *erts_erts_pcre_malloc(size_t size) {
return erts_alloc(ERTS_ALC_T_RE_HEAP,size);
@@ -1094,7 +1094,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
* The actual re:run/2,3 BIFs
*/
static BIF_RETTYPE
-re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
+re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, int first)
{
const pcre *code_tmp;
RestartContext restart;
@@ -1120,6 +1120,14 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
< 0) {
BIF_ERROR(p,BADARG);
}
+ if (!first) {
+ /*
+ * 'first' is false when re:grun() previously has called re:internal_run()
+ * with the same subject; i.e., no need to do yet another validation of
+ * the subject regarding utf8 encoding...
+ */
+ options |= PCRE_NO_UTF8_CHECK;
+ }
is_list_cap = ((pflags & PARSE_FLAG_CAPTURE_OPT) &&
(capture[CAPSPEC_TYPE] == am_list));
@@ -1360,15 +1368,28 @@ handle_iolist:
}
BIF_RETTYPE
+re_internal_run_4(BIF_ALIST_4)
+{
+ int first;
+ if (BIF_ARG_4 == am_false)
+ first = 0;
+ else if (BIF_ARG_4 == am_true)
+ first = !0;
+ else
+ BIF_ERROR(BIF_P,BADARG);
+ return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, first);
+}
+
+BIF_RETTYPE
re_run_3(BIF_ALIST_3)
{
- return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, !0);
}
BIF_RETTYPE
re_run_2(BIF_ALIST_2)
{
- return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, NIL);
+ return re_run(BIF_P,BIF_ARG_1, BIF_ARG_2, NIL, !0);
}
/*
@@ -1407,6 +1428,7 @@ static BIF_RETTYPE re_exec_trap(BIF_ALIST_3)
loop_count = 0xFFFFFFFF;
#endif
rc = erts_pcre_exec(NULL, &(restartp->extra), NULL, 0, 0, 0, NULL, 0);
+
ASSERT(loop_count != 0xFFFFFFFF);
BUMP_REDS(BIF_P, loop_count / LOOP_FACTOR);
if (rc == PCRE_ERROR_LOOP_LIMIT) {
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 711e62c795..b31d5b86cb 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -74,7 +74,7 @@ static void smp_bp_finisher(void* arg);
static BIF_RETTYPE
system_monitor(Process *p, Eterm monitor_pid, Eterm list);
-static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/
+static void new_seq_trace_token(Process* p, int); /* help func for seq_trace_2*/
static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
@@ -1874,7 +1874,7 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
if (current_flag && ( (arg2 == am_true) || (arg2 == am_false)) ) {
/* Flags */
- new_seq_trace_token(p);
+ new_seq_trace_token(p, 0);
flags = unsigned_val(SEQ_TRACE_TOKEN_FLAGS(p));
if (build_result) {
old_value = flags & current_flag ? am_true : am_false;
@@ -1889,11 +1889,11 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
return old_value;
}
else if (arg1 == am_label) {
- new_seq_trace_token(p);
+ new_seq_trace_token(p, is_not_immed(arg2));
if (build_result) {
old_value = SEQ_TRACE_TOKEN_LABEL(p);
}
- SEQ_TRACE_TOKEN_LABEL(p) = arg2;
+ SEQ_TRACE_TOKEN_LABEL(p) = arg2;
return old_value;
}
else if (arg1 == am_serial) {
@@ -1905,7 +1905,7 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
if ((*tp != make_arityval(2)) || is_not_small(*(tp+1)) || is_not_small(*(tp+2))) {
return THE_NON_VALUE;
}
- new_seq_trace_token(p);
+ new_seq_trace_token(p, 0);
if (build_result) {
hp = HAlloc(p,3);
old_value = TUPLE2(hp, SEQ_TRACE_TOKEN_LASTCNT(p),
@@ -1940,8 +1940,8 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
}
}
-void
-new_seq_trace_token(Process* p)
+static void
+new_seq_trace_token(Process* p, int ensure_new_heap)
{
Eterm* hp;
@@ -1953,6 +1953,16 @@ new_seq_trace_token(Process* p)
p->common.id, /* Internal pid */ /* From */
make_small(p->seq_trace_lastcnt));
}
+ else if (ensure_new_heap) {
+ Eterm* tpl = tuple_val(SEQ_TRACE_TOKEN(p));
+ ASSERT(arityval(tpl[0]) == 5);
+ if (ErtsInArea(tpl, OLD_HEAP(p),
+ (OLD_HEND(p) - OLD_HEAP(p))*sizeof(Eterm))) {
+ hp = HAlloc(p, 6);
+ sys_memcpy(hp, tpl, 6*sizeof(Eterm));
+ SEQ_TRACE_TOKEN(p) = make_tuple(hp);
+ }
+ }
}
BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item)
@@ -2050,10 +2060,7 @@ BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2)
if (have_no_seqtrace(SEQ_TRACE_TOKEN(BIF_P))) {
BIF_RET(am_false);
}
- if (!(is_atom(BIF_ARG_1) || is_small(BIF_ARG_1))) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (SEQ_TRACE_TOKEN_LABEL(BIF_P) != BIF_ARG_1)
+ if (!EQ(BIF_ARG_1, SEQ_TRACE_TOKEN_LABEL(BIF_P)))
BIF_RET(am_false);
seq_trace_update_send(BIF_P);
seq_trace_output(SEQ_TRACE_TOKEN(BIF_P), BIF_ARG_2,
diff --git a/erts/emulator/beam/erl_db_catree.c b/erts/emulator/beam/erl_db_catree.c
index e0d5e44f58..fed4b44a9b 100644
--- a/erts/emulator/beam/erl_db_catree.c
+++ b/erts/emulator/beam/erl_db_catree.c
@@ -166,8 +166,17 @@ static void split_catree(DbTableCATree *tb,
static void join_catree(DbTableCATree *tb,
DbTableCATreeNode *thiz,
DbTableCATreeNode *parent);
-
-
+static ERTS_INLINE
+int try_wlock_base_node(DbTableCATreeBaseNode *base_node);
+static ERTS_INLINE
+void wunlock_base_node(DbTableCATreeNode *base_node);
+static ERTS_INLINE
+void wlock_base_node_no_stats(DbTableCATreeNode *base_node);
+static ERTS_INLINE
+void wunlock_adapt_base_node(DbTableCATree* tb,
+ DbTableCATreeNode* node,
+ DbTableCATreeNode* parent,
+ int current_level);
/*
** External interface
*/
@@ -210,12 +219,16 @@ DbTableMethod db_catree =
* Constants
*/
-#define ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION 200
+#define ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION 250
#define ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION (-1)
+#define ERL_DB_CATREE_LOCK_GRAVITY_CONTRIBUTION (-500)
+#define ERL_DB_CATREE_LOCK_GRAVITY_PATTERN (0xFF800000)
#define ERL_DB_CATREE_LOCK_MORE_THAN_ONE_CONTRIBUTION (-10)
#define ERL_DB_CATREE_HIGH_CONTENTION_LIMIT 1000
#define ERL_DB_CATREE_LOW_CONTENTION_LIMIT (-1000)
-#define ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT 14
+#define ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT 16
+#define ERL_DB_CATREE_LOCK_LOW_NO_CONTRIBUTION_LIMIT (-20000)
+#define ERL_DB_CATREE_LOCK_HIGH_NO_CONTRIBUTION_LIMIT (20000)
/*
* Internal CA tree related helper functions and macros
@@ -245,6 +258,27 @@ DbTableMethod db_catree =
#define SET_LEFT_RELB(ca_tree_route_node, v) erts_atomic_set_relb(&(ca_tree_route_node->u.route.left), (erts_aint_t)(v));
#define SET_RIGHT_RELB(ca_tree_route_node, v) erts_atomic_set_relb(&(ca_tree_route_node->u.route.right), (erts_aint_t)(v));
+/* Change base node lock statistics */
+#define BASE_NODE_STAT_SET(NODE, VALUE) erts_atomic_set_nob(&(NODE)->u.base.lock_statistics, VALUE)
+#define BASE_NODE_STAT_READ(NODE) erts_atomic_read_nob(&(NODE)->u.base.lock_statistics)
+#define BASE_NODE_STAT_ADD(NODE, VALUE) \
+ do { \
+ Sint v = erts_atomic_read_nob(&((NODE)->u.base.lock_statistics)); \
+ ASSERT(VALUE > 0); \
+ if(v < ERL_DB_CATREE_LOCK_HIGH_NO_CONTRIBUTION_LIMIT) { \
+ erts_atomic_set_nob(&(NODE->u.base.lock_statistics), v + VALUE); \
+ } \
+ }while(0);
+#define BASE_NODE_STAT_SUB(NODE, VALUE) \
+ do { \
+ Sint v = erts_atomic_read_nob(&((NODE)->u.base.lock_statistics)); \
+ ASSERT(VALUE < 0); \
+ if(v > ERL_DB_CATREE_LOCK_LOW_NO_CONTRIBUTION_LIMIT) { \
+ erts_atomic_set_nob(&(NODE->u.base.lock_statistics), v + VALUE); \
+ } \
+ }while(0);
+
+
/* Compares a key to the key in a route node */
static ERTS_INLINE Sint cmp_key_route(Eterm key,
DbTableCATreeNode *obj)
@@ -653,10 +687,10 @@ static void dbg_provoke_random_splitjoin(DbTableCATree* tb,
switch (dbg_fastrand() % 8) {
case 1:
- base_node->u.base.lock_statistics = 1+ERL_DB_CATREE_HIGH_CONTENTION_LIMIT;
+ BASE_NODE_STAT_ADD(base_node, 1+ERL_DB_CATREE_HIGH_CONTENTION_LIMIT);
break;
case 2:
- base_node->u.base.lock_statistics = -1+ERL_DB_CATREE_LOW_CONTENTION_LIMIT;
+ BASE_NODE_STAT_SUB(base_node, -1+ERL_DB_CATREE_LOW_CONTENTION_LIMIT);
break;
}
}
@@ -664,6 +698,48 @@ static void dbg_provoke_random_splitjoin(DbTableCATree* tb,
# define dbg_provoke_random_splitjoin(T,N)
#endif /* PROVOKE_RANDOM_SPLIT_JOIN */
+static ERTS_NOINLINE
+void do_random_join(DbTableCATree* tb, Uint rand)
+{
+ DbTableCATreeNode* node = GET_ROOT_ACQB(tb);
+ DbTableCATreeNode* parent = NULL;
+ int level = 0;
+ Sint stat;
+ while (!node->is_base_node) {
+ parent = node;
+ if ((rand & (1 << level)) == 0) {
+ node = GET_LEFT_ACQB(node);
+ } else {
+ node = GET_RIGHT_ACQB(node);
+ }
+ level++;
+ }
+ BASE_NODE_STAT_SUB(node, ERL_DB_CATREE_LOCK_GRAVITY_CONTRIBUTION);
+ stat = BASE_NODE_STAT_READ(node);
+ if (stat >= ERL_DB_CATREE_LOW_CONTENTION_LIMIT &&
+ stat <= ERL_DB_CATREE_HIGH_CONTENTION_LIMIT) {
+ return; /* No adaptation */
+ }
+ if (parent != NULL && !try_wlock_base_node(&node->u.base)) {
+ if (!node->u.base.is_valid) {
+ wunlock_base_node(node);
+ return;
+ }
+ wunlock_adapt_base_node(tb, node, parent, level);
+ }
+}
+
+static ERTS_INLINE
+void do_random_join_with_low_probability(DbTableCATree* tb, Uint seed)
+{
+#ifndef ERTS_DB_CA_TREE_NO_RANDOM_JOIN_WITH_LOW_PROBABILITY
+ Uint32 rand = erts_sched_local_random(seed);
+ if (((rand & ERL_DB_CATREE_LOCK_GRAVITY_PATTERN)) == 0) {
+ do_random_join(tb, rand);
+ }
+#endif
+}
+
static ERTS_INLINE
int try_wlock_base_node(DbTableCATreeBaseNode *base_node)
{
@@ -691,9 +767,9 @@ void wlock_base_node(DbTableCATreeNode *base_node)
if (try_wlock_base_node(&base_node->u.base)) {
/* The lock is contended */
wlock_base_node_no_stats(base_node);
- base_node->u.base.lock_statistics += ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION;
+ BASE_NODE_STAT_ADD(base_node, ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION);
} else {
- base_node->u.base.lock_statistics += ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION;
+ BASE_NODE_STAT_SUB(base_node, ERL_DB_CATREE_LOCK_SUCCESS_CONTRIBUTION);
}
}
@@ -709,13 +785,14 @@ void wunlock_adapt_base_node(DbTableCATree* tb,
DbTableCATreeNode* parent,
int current_level)
{
+ Sint base_node_lock_stat = BASE_NODE_STAT_READ(node);
dbg_provoke_random_splitjoin(tb,node);
if ((!node->u.base.root && parent && !(tb->common.status
& DB_CATREE_FORCE_SPLIT))
- || node->u.base.lock_statistics < ERL_DB_CATREE_LOW_CONTENTION_LIMIT) {
+ || base_node_lock_stat < ERL_DB_CATREE_LOW_CONTENTION_LIMIT) {
join_catree(tb, node, parent);
}
- else if (node->u.base.lock_statistics > ERL_DB_CATREE_HIGH_CONTENTION_LIMIT
+ else if (base_node_lock_stat > ERL_DB_CATREE_HIGH_CONTENTION_LIMIT
&& current_level < ERL_DB_CATREE_MAX_ROUTE_NODE_LAYER_HEIGHT) {
split_catree(tb, node, parent);
}
@@ -728,11 +805,23 @@ static ERTS_INLINE
void rlock_base_node(DbTableCATreeNode *base_node)
{
ASSERT(base_node->is_base_node);
- erts_rwmtx_rlock(&base_node->u.base.lock);
+ if (EBUSY == erts_rwmtx_tryrlock(&base_node->u.base.lock)) {
+ /* The lock is contended */
+ BASE_NODE_STAT_ADD(base_node, ERL_DB_CATREE_LOCK_FAILURE_CONTRIBUTION);
+ erts_rwmtx_rlock(&base_node->u.base.lock);
+ }
+}
+
+static ERTS_INLINE
+void runlock_base_node(DbTableCATreeNode *base_node, DbTableCATree* tb)
+{
+ ASSERT(base_node->is_base_node);
+ erts_rwmtx_runlock(&base_node->u.base.lock);
+ do_random_join_with_low_probability(tb, (Uint)base_node);
}
static ERTS_INLINE
-void runlock_base_node(DbTableCATreeNode *base_node)
+void runlock_base_node_no_rand(DbTableCATreeNode *base_node)
{
ASSERT(base_node->is_base_node);
erts_rwmtx_runlock(&base_node->u.base.lock);
@@ -814,7 +903,7 @@ void unlock_iter_base_node(CATreeRootIterator* iter)
{
ASSERT(iter->locked_bnode);
if (iter->read_only)
- runlock_base_node(iter->locked_bnode);
+ runlock_base_node(iter->locked_bnode, iter->tb);
else if (iter->locked_bnode->u.base.is_valid) {
wunlock_adapt_base_node(iter->tb, iter->locked_bnode,
iter->bnode_parent, iter->bnode_level);
@@ -874,7 +963,7 @@ DbTableCATreeNode* find_rlock_valid_base_node(DbTableCATree* tb, Eterm key)
rlock_base_node(base_node);
if (base_node->u.base.is_valid)
break;
- runlock_base_node(base_node);
+ runlock_base_node_no_rand(base_node);
}
return base_node;
}
@@ -923,8 +1012,8 @@ static DbTableCATreeNode *create_base_node(DbTableCATree *tb,
"erl_db_catree_base_node",
NIL,
ERTS_LOCK_FLAGS_CATEGORY_DB);
- p->u.base.lock_statistics = ((tb->common.status & DB_CATREE_FORCE_SPLIT)
- ? INT_MAX : 0);
+ BASE_NODE_STAT_SET(p, ((tb->common.status & DB_CATREE_FORCE_SPLIT)
+ ? INT_MAX : 0));
p->u.base.is_valid = 1;
return p;
}
@@ -1094,7 +1183,7 @@ static void join_catree(DbTableCATree *tb,
ASSERT(thiz->is_base_node);
if (parent == NULL) {
- thiz->u.base.lock_statistics = 0;
+ BASE_NODE_STAT_SET(thiz, 0);
wunlock_base_node(thiz);
return;
}
@@ -1103,11 +1192,11 @@ static void join_catree(DbTableCATree *tb,
neighbor = leftmost_base_node(GET_RIGHT_ACQB(parent));
if (try_wlock_base_node(&neighbor->u.base)) {
/* Failed to acquire lock */
- thiz->u.base.lock_statistics = 0;
+ BASE_NODE_STAT_SET(thiz, 0);
wunlock_base_node(thiz);
return;
} else if (!neighbor->u.base.is_valid) {
- thiz->u.base.lock_statistics = 0;
+ BASE_NODE_STAT_SET(thiz, 0);
wunlock_base_node(thiz);
wunlock_base_node(neighbor);
return;
@@ -1153,11 +1242,11 @@ static void join_catree(DbTableCATree *tb,
neighbor = rightmost_base_node(GET_LEFT_ACQB(parent));
if (try_wlock_base_node(&neighbor->u.base)) {
/* Failed to acquire lock */
- thiz->u.base.lock_statistics = 0;
+ BASE_NODE_STAT_SET(thiz, 0);
wunlock_base_node(thiz);
return;
} else if (!neighbor->u.base.is_valid) {
- thiz->u.base.lock_statistics = 0;
+ BASE_NODE_STAT_SET(thiz, 0);
wunlock_base_node(thiz);
wunlock_base_node(neighbor);
return;
@@ -1241,7 +1330,7 @@ static void split_catree(DbTableCATree *tb,
if (less_than_two_elements(base->u.base.root)) {
if (!(tb->common.status & DB_CATREE_FORCE_SPLIT))
- base->u.base.lock_statistics = 0;
+ BASE_NODE_STAT_SET(base, 0);
wunlock_base_node(base);
return;
} else {
@@ -1521,7 +1610,7 @@ static int db_get_catree(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
int result = db_get_tree_common(p, &tb->common,
node->u.base.root,
key, ret, NULL);
- runlock_base_node(node);
+ runlock_base_node(node, tb);
return result;
}
@@ -1804,7 +1893,7 @@ static int db_member_catree(DbTable *tbl, Eterm key, Eterm *ret)
int result = db_member_tree_common(&tb->common,
node->u.base.root,
key, ret, NULL);
- runlock_base_node(node);
+ runlock_base_node(node, tb);
return result;
}
@@ -1816,7 +1905,7 @@ static int db_get_element_catree(Process *p, DbTable *tbl,
int result = db_get_element_tree_common(p, &tb->common,
node->u.base.root,
key, ndex, ret, NULL);
- runlock_base_node(node);
+ runlock_base_node(node, tb);
return result;
}
@@ -2250,7 +2339,7 @@ void db_catree_force_split(DbTableCATree* tb, int on)
init_root_iterator(tb, &iter, 1);
root = catree_find_first_root(&iter);
do {
- iter.locked_bnode->u.base.lock_statistics = (on ? INT_MAX : 0);
+ BASE_NODE_STAT_SET(iter.locked_bnode, (on ? INT_MAX : 0));
root = catree_find_next_root(&iter, NULL);
} while (root);
destroy_root_iterator(&iter);
diff --git a/erts/emulator/beam/erl_db_catree.h b/erts/emulator/beam/erl_db_catree.h
index cf3498dabb..c2c884eee3 100644
--- a/erts/emulator/beam/erl_db_catree.h
+++ b/erts/emulator/beam/erl_db_catree.h
@@ -42,7 +42,7 @@ typedef struct {
typedef struct {
erts_rwmtx_t lock; /* The lock for this base node */
- Sint lock_statistics;
+ erts_atomic_t lock_statistics;
int is_valid; /* If this base node is still valid */
TreeDbTerm *root; /* The root of the sequential tree */
ErtsThrPrgrLaterOp free_item; /* Used when freeing using thread progress */
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 6645341512..1bebf6efe2 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -527,7 +527,7 @@ erts_msg_attached_data_size_aux(ErtsMessage *msg)
if (edep->heap_size < 0) {
- sz = erts_decode_dist_ext_size(edep, 1);
+ sz = erts_decode_dist_ext_size(edep, 1, 1);
if (sz < 0) {
/* Bad external
* We leave the message intact in this case as it's not worth the trouble
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
index f58a606d57..fb900ca7ba 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.c
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -3028,7 +3028,7 @@ erts_proc_sig_decode_dist(Process *proc, ErtsProcLocks proc_locks,
if (edep->heap_size >= 0)
need = edep->heap_size;
else {
- need = erts_decode_dist_ext_size(edep, 1);
+ need = erts_decode_dist_ext_size(edep, 1, 1);
if (need < 0) {
/* bad signal; remove it... */
return 0;
@@ -4051,6 +4051,7 @@ erts_proc_sig_signal_size(ErtsSignal *sig)
case ERTS_MON_TYPE_DIST_PROC:
case ERTS_MON_TYPE_NODE:
size = erts_monitor_size((ErtsMonitor *) sig);
+ break;
default:
ERTS_INTERNAL_ERROR("Unexpected sig type");
break;
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 6118c671ee..0d6b512f78 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1230,9 +1230,10 @@ void erts_check_for_holes(Process* p);
/* The sequential tracing token is a tuple of size 5:
*
- * {Flags, Label, Serial, Sender}
+ * {Flags, Label, Serial, Sender, LastCnt}
+ *
+ * WARNING: The top 5-tuple is *MUTABLE* and thus INTERNAL ONLY.
*/
-
#define SEQ_TRACE_TOKEN_ARITY(p) (arityval(*(tuple_val(SEQ_TRACE_TOKEN(p)))))
#define SEQ_TRACE_TOKEN_FLAGS(p) (*(tuple_val(SEQ_TRACE_TOKEN(p)) + 1))
#define SEQ_TRACE_TOKEN_LABEL(p) (*(tuple_val(SEQ_TRACE_TOKEN(p)) + 2))
@@ -2631,6 +2632,9 @@ void erts_notify_inc_runq(ErtsRunQueue *runq);
void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
void erts_aux_thread_poke(void);
+ERTS_GLB_INLINE Uint32 erts_sched_local_random_hash_64_to_32_shift(Uint64 key);
+ERTS_GLB_INLINE Uint32 erts_sched_local_random(Uint additional_seed);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -2647,6 +2651,39 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
}
+/*
+ * Source: https://gist.github.com/badboy/6267743
+ * http://web.archive.org/web/20071223173210/http://www.concentric.net/~Ttwang/tech/inthash.htm
+ */
+ERTS_GLB_INLINE
+Uint32 erts_sched_local_random_hash_64_to_32_shift(Uint64 key)
+{
+ key = (~key) + (key << 18); /* key = (key << 18) - key - 1; */
+ key = key ^ (key >> 31);
+ key = (key + (key << 2)) + (key << 4);
+ key = key ^ (key >> 11);
+ key = key + (key << 6);
+ key = key ^ (key >> 22);
+ return (Uint32) key;
+}
+
+/*
+ * This function attempts to return a random number based on the state
+ * of the scheduler, the current process and the additional_seed
+ * parameter.
+ */
+ERTS_GLB_INLINE
+Uint32 erts_sched_local_random(Uint additional_seed)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Uint64 seed =
+ additional_seed +
+ esdp->reductions +
+ esdp->current_process->fcalls +
+ (((Uint64)esdp->no) << 32);
+ return erts_sched_local_random_hash_64_to_32_shift(seed);
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
index 74cc966cbe..6f715ae80d 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -49,7 +49,7 @@ do { \
#endif
#ifdef DEBUG
-extern Uint erts_no_schedulers;
+extern Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers);
#endif
#define ERTS_SSPA_FORCE_THR_CHECK_PROGRESS 10
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index c85a7df5ec..9c835ac357 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -713,7 +713,9 @@ trace_sched(Process *p, ErtsProcLocks locks, Eterm what)
trace_sched_aux(p, locks, what);
}
-/* Send {trace_ts, Pid, Send, Msg, DestPid, Timestamp}
+/* Send {trace_ts, Pid, Send, Msg, DestPid, PamResult, Timestamp}
+ * or {trace_ts, Pid, Send, Msg, DestPid, Timestamp}
+ * or {trace, Pid, Send, Msg, DestPid, PamResult}
* or {trace, Pid, Send, Msg, DestPid}
*
* where 'Send' is 'send' or 'send_to_non_existing_process'.
@@ -773,7 +775,9 @@ trace_send(Process *p, Eterm to, Eterm msg)
erts_match_set_release_result_trace(p, pam_result);
}
-/* Send {trace_ts, Pid, receive, Msg, Timestamp}
+/* Send {trace_ts, Pid, receive, Msg, PamResult, Timestamp}
+ * or {trace_ts, Pid, receive, Msg, Timestamp}
+ * or {trace, Pid, receive, Msg, PamResult}
* or {trace, Pid, receive, Msg}
*/
void
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index ec67ab2aed..ce61cdf040 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1062,11 +1062,38 @@ bad_dist_ext(ErtsDistExternal *edep)
}
Sint
-erts_decode_dist_ext_size(ErtsDistExternal *edep, int kill_connection)
+erts_decode_dist_ext_size(ErtsDistExternal *edep, int kill_connection, int payload)
{
Sint res;
byte *ep;
+ if (edep->data->frag_id > 1 && payload) {
+ Uint sz = 0;
+ Binary *bin;
+ int i;
+ byte *ep;
+
+ for (i = 0; i < edep->data->frag_id; i++)
+ sz += edep->data[i].ext_endp - edep->data[i].extp;
+
+ bin = erts_bin_nrml_alloc(sz);
+ ep = (byte*)bin->orig_bytes;
+
+ for (i = 0; i < edep->data->frag_id; i++) {
+ sys_memcpy(ep, edep->data[i].extp, edep->data[i].ext_endp - edep->data[i].extp);
+ ep += edep->data[i].ext_endp - edep->data[i].extp;
+ erts_bin_release(edep->data[i].binp);
+ edep->data[i].binp = NULL;
+ edep->data[i].extp = NULL;
+ edep->data[i].ext_endp = NULL;
+ }
+
+ edep->data->frag_id = 1;
+ edep->data->extp = (byte*)bin->orig_bytes;
+ edep->data->ext_endp = ep;
+ edep->data->binp = bin;
+ }
+
if (edep->data->extp >= edep->data->ext_endp)
goto fail;
#ifndef ERTS_DEBUG_USE_DIST_SEP
@@ -1164,6 +1191,7 @@ Eterm erts_decode_ext(ErtsHeapFactory* factory, byte **ext, Uint32 flags)
if (flags) {
ASSERT(flags == ERTS_DIST_EXT_BTT_SAFE);
ede.flags = flags; /* a dummy struct just for the flags */
+ ede.data = NULL;
edep = &ede;
} else {
edep = NULL;
@@ -1233,8 +1261,10 @@ BIF_RETTYPE erts_debug_dist_ext_to_term_2(BIF_ALIST_2)
ede.data->extp = binary_bytes(real_bin)+offset;
ede.data->ext_endp = ede.data->extp + size;
+ ede.data->frag_id = 1;
+ ede.data->binp = NULL;
- hsz = erts_decode_dist_ext_size(&ede, 1);
+ hsz = erts_decode_dist_ext_size(&ede, 1, 1);
if (hsz < 0)
goto badarg;
@@ -1765,6 +1795,7 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Eterm bin, B2TContext *ctx)
case B2TDecodeBinary: {
ErtsDistExternal fakedep;
fakedep.flags = ctx->flags;
+ fakedep.data = NULL;
dec_term(&fakedep, NULL, NULL, NULL, ctx);
break;
}
@@ -3762,6 +3793,30 @@ dec_term_atom_common:
hp += heap_bin_size(n);
sys_memcpy(hb->data, ep, n);
*objp = make_binary(hb);
+ } else if (edep && edep->data && edep->data->binp &&
+ n > (edep->data->binp->orig_size / 4)) {
+ /* If we decode a refc binary from a distribution data
+ entry we know that it is a refc binary to begin with
+ so we just increment it and use the reference. This
+ means that the entire distribution data entry will
+ remain until this binary is de-allocated so we only
+ do it if a substantial part (> 25%) of the data
+ is a binary. */
+ ProcBin* pb = (ProcBin *) hp;
+ Binary* bptr = edep->data->binp;
+ erts_refc_inc(&bptr->intern.refc, 1);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = n;
+ pb->next = factory->off_heap->first;
+ factory->off_heap->first = (struct erl_off_heap_header*)pb;
+ pb->val = bptr;
+ pb->bytes = (byte*) ep;
+ ERTS_ASSERT((byte*)(bptr->orig_bytes) < ep &&
+ ep+n <= (byte*)(bptr->orig_bytes+bptr->orig_size));
+ pb->flags = 0;
+ OH_OVERHEAD(factory->off_heap, pb->size / sizeof(Eterm));
+ hp += PROC_BIN_SIZE;
+ *objp = make_binary(pb);
} else {
Binary* dbin = erts_bin_nrml_alloc(n);
diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h
index b556c9076c..e362a6c81f 100644
--- a/erts/emulator/beam/external.h
+++ b/erts/emulator/beam/external.h
@@ -199,7 +199,7 @@ typedef enum {
ErtsPrepDistExtRes erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint, struct binary *,
DistEntry *, Uint32, ErtsAtomCache *);
-Sint erts_decode_dist_ext_size(ErtsDistExternal *, int);
+Sint erts_decode_dist_ext_size(ErtsDistExternal *, int, int);
Eterm erts_decode_dist_ext(ErtsHeapFactory*, ErtsDistExternal *, int);
Sint erts_decode_ext_size(byte*, Uint);
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index e9107933f9..b9d4f6afcc 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -703,7 +703,7 @@ get_tuple_element Reg=x P1 D1=x | \
get_tuple_element Reg=x P2 D2=x | \
get_tuple_element Reg=x P3 D3=x | \
succ(P1, P2) | succ(P2, P3) | succ(D1, D2) | succ(D2, D3) | \
- distinct(D1, Reg) | distinct(D2, Reg) | distinct(D3, Reg) => \
+ distinct(D1, Reg) | distinct(D2, Reg) => \
i_get_tuple_element3 Reg P1 D1
get_tuple_element Reg=x P1 D1=x | \
@@ -1692,6 +1692,7 @@ i_increment rxy W d
# Handle unoptimized code.
i_plus S1=c S2=c Fail Dst => move S1 x | i_plus x S2 Fail Dst
+i_plus S1=c S2=xy Fail Dst => i_plus S2 S1 Fail Dst
i_plus xy xyc j? d