aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/beam_bif_load.c27
-rw-r--r--erts/emulator/beam/beam_bp.c60
-rw-r--r--erts/emulator/beam/beam_bp.h6
-rw-r--r--erts/emulator/beam/beam_emu.c239
-rw-r--r--erts/emulator/beam/beam_load.c11
-rw-r--r--erts/emulator/beam/break.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c4
-rw-r--r--erts/emulator/beam/erl_bif_trace.c7
-rw-r--r--erts/emulator/beam/erl_gc.c206
-rw-r--r--erts/emulator/beam/erl_gc.h5
-rw-r--r--erts/emulator/beam/erl_hl_timer.c5
-rw-r--r--erts/emulator/beam/erl_init.c1
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_message.c2
-rw-r--r--erts/emulator/beam/erl_nif.c4
-rw-r--r--erts/emulator/beam/erl_process.c258
-rw-r--r--erts/emulator/beam/erl_process.h2
-rw-r--r--erts/emulator/beam/erl_process_dump.c3
-rw-r--r--erts/emulator/beam/erl_trace.c5
-rw-r--r--erts/emulator/beam/export.c2
-rw-r--r--erts/emulator/beam/external.c25
-rw-r--r--erts/emulator/beam/index.c9
-rw-r--r--erts/emulator/beam/index.h14
-rw-r--r--erts/emulator/beam/sys.h3
24 files changed, 633 insertions, 269 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index ea1323d651..7f5e9ff691 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -658,7 +658,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
else if (modp->old.code_hdr) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
+ erts_dsprintf(dsbufp, "Module %T must be purged before deleting\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
@@ -786,7 +786,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
}
if (BIF_ARG_2 == am_true) {
- int i;
+ int i, num_exps;
/*
* Make the code with the on_load function current.
@@ -802,7 +802,8 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
/*
* The on_load function succeded. Fix up export entries.
*/
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
if (ep == NULL || ep->code[0] != BIF_ARG_1) {
continue;
@@ -822,14 +823,15 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
modp->curr.code_hdr->on_load_function_ptr = NULL;
set_default_trace_pattern(BIF_ARG_1);
} else if (BIF_ARG_2 == am_false) {
- int i;
+ int i, num_exps;
/*
* The on_load function failed. Remove references to the
* code that is about to be purged from the export entries.
*/
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
if (ep == NULL || ep->code[0] != BIF_ARG_1) {
continue;
@@ -995,6 +997,12 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
if (any_heap_refs(c_p->heap, c_p->htop, literals, lit_bsize))
goto literal_gc;
*redsp += 1;
+ if (c_p->abandoned_heap) {
+ if (any_heap_refs(c_p->abandoned_heap, c_p->abandoned_heap + c_p->heap_sz,
+ literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+ }
if (any_heap_refs(c_p->old_heap, c_p->old_htop, literals, lit_bsize))
goto literal_gc;
@@ -1295,6 +1303,11 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
#endif
if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize))
goto try_literal_gc;
+ if (rp->abandoned_heap) {
+ if (any_heap_refs(rp->abandoned_heap, rp->abandoned_heap + rp->heap_sz,
+ literals, lit_bsize))
+ goto try_literal_gc;
+ }
if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize))
goto try_literal_gc;
@@ -2022,9 +2035,9 @@ delete_code(Module* modp)
{
ErtsCodeIndex code_ix = erts_staging_code_ix();
Eterm module = make_atom(modp->module);
- int i;
+ int i, num_exps = export_list_size(code_ix);
- for (i = 0; i < export_list_size(code_ix); i++) {
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
if (ep != NULL && (ep->code[0] == module)) {
if (ep->addressv[code_ix] == ep->code+3) {
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index bbb2e4f34f..0df2df0eaa 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -74,6 +74,9 @@ extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
erts_smp_atomic32_t erts_active_bp_index;
erts_smp_atomic32_t erts_staging_bp_index;
+#ifdef ERTS_DIRTY_SCHEDULERS
+erts_smp_mtx_t erts_dirty_bp_ix_mtx;
+#endif
/*
* Inlined helpers
@@ -85,6 +88,31 @@ get_mtime(Process *c_p)
return erts_get_monotonic_time(erts_proc_sched_data(c_p));
}
+static ERTS_INLINE Uint32
+acquire_bp_sched_ix(Process *c_p)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx);
+ return (Uint32) erts_no_schedulers;
+ }
+#endif
+ return (Uint32) esdp->no - 1;
+}
+
+static ERTS_INLINE void
+release_bp_sched_ix(Uint32 ix)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ix == (Uint32) erts_no_schedulers)
+ erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx);
+#endif
+}
+
+
+
/* *************************************************************************
** Local prototypes
*/
@@ -135,6 +163,9 @@ void
erts_bp_init(void) {
erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index");
+#endif
}
@@ -973,6 +1004,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(c_p);
ASSERT(c_p);
ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
@@ -980,7 +1012,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* get previous timestamp and breakpoint
* from the process psd */
-
+
pbt = ERTS_PROC_GET_CALL_TIME(c_p);
time = get_mtime(c_p);
@@ -1006,7 +1038,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* if null then the breakpoint was removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1027,7 +1059,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* this breakpoint */
ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(bdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1041,6 +1073,8 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
pbt->pc = I;
pbt->time = time;
+
+ release_bp_sched_ix(six);
}
void
@@ -1051,6 +1085,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
@@ -1071,6 +1106,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
*/
if (pbt) {
+
/* might have been removed due to
* trace_pattern(false)
*/
@@ -1085,7 +1121,8 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
/* beware, the trace_pattern might have been removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1096,11 +1133,15 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
} else {
BP_TIME_ADD(item, &sitem);
}
+
}
pbt->pc = pc;
pbt->time = time;
+
}
+
+ release_bp_sched_ix(six);
}
int
@@ -1353,6 +1394,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
@@ -1375,7 +1417,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
sitem.pid = p->common.id;
sitem.count = 0;
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1401,6 +1443,8 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
break;
}
} /* pbt */
+
+ release_bp_sched_ix(six);
}
/* *************************************************************************
@@ -1517,7 +1561,11 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
bdt = Alloc(sizeof(BpDataTime));
erts_refc_init(&bdt->refc, 1);
- bdt->n = erts_no_total_schedulers;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ bdt->n = erts_no_schedulers + 1;
+#else
+ bdt->n = erts_no_schedulers;
+#endif
bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 7206ef471a..4743e4fc2f 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -79,10 +79,8 @@ typedef struct generic_bp {
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#ifdef ERTS_SMP
-#define bp_sched2ix_proc(p) (erts_proc_sched_data(p)->thr_id - 1)
-#else
-#define bp_sched2ix_proc(p) (0)
+#ifdef ERTS_DIRTY_SCHEDULERS
+extern erts_smp_mtx_t erts_dirty_bp_ix_mtx;
#endif
enum erts_break_op{
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 59a9ea1417..f392feb06b 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1047,9 +1047,11 @@ static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
Eterm* reg, BifFunction bf) NOINLINE;
static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
Eterm* reg, Eterm func) NOINLINE;
-static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
+static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* apply(Process* p, Eterm module, Eterm function,
- Eterm args, Eterm* reg) NOINLINE;
+ Eterm args, Eterm* reg,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* call_fun(Process* p, int arity,
Eterm* reg, Eterm args) NOINLINE;
static BeamInstr* apply_fun(Process* p, Eterm fun,
@@ -3194,7 +3196,7 @@ do { \
OpCase(i_apply): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, NULL, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, I+1);
@@ -3208,7 +3210,7 @@ do { \
OpCase(i_apply_last_P): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, I, Arg(0));
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, (BeamInstr *) E[0]);
@@ -3223,7 +3225,7 @@ do { \
OpCase(i_apply_only): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, I, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_I(next);
@@ -3237,7 +3239,7 @@ do { \
BeamInstr *next;
HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
+ next = fixed_apply(c_p, reg, Arg(0), NULL, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, I+2);
@@ -3252,7 +3254,7 @@ do { \
BeamInstr *next;
HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
+ next = fixed_apply(c_p, reg, Arg(0), I, Arg(1));
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, (BeamInstr *) E[0]);
@@ -3550,18 +3552,11 @@ do { \
BifFunction vbf;
ErlHeapFragment *live_hf_end;
- if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
- /* If we have run out of reductions, we do a context
- switch before calling the nif */
- goto context_switch;
- }
-
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- c_p->fcalls = FCALLS - 1;
+ HEAVY_SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
bif_nif_arity = I[-1];
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
@@ -5844,12 +5839,13 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
s->depth = 0;
/*
- * If the failure was in a BIF other than 'error', 'exit' or
- * 'throw', find the bif-table index and save the argument
- * registers by consing up an arglist.
+ * If the failure was in a BIF other than 'error/1', 'error/2',
+ * 'exit/1' or 'throw/1', find the bif-table index and save the
+ * argument registers by consing up an arglist.
*/
- if (bf != NULL && bf != error_1 && bf != error_2 &&
- bf != exit_1 && bf != throw_1) {
+ if (bf != NULL && bf != error_1 && bf != error_2 && bf != exit_1
+ && bf != throw_1 && bf != wrap_error_1 && bf != wrap_error_2
+ && bf != wrap_exit_1 && bf != wrap_throw_1) {
int i;
int a = 0;
for (i = 0; i < BIF_SIZE; i++) {
@@ -5945,12 +5941,30 @@ erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
p->cp) {
/* Cannot follow cp here - code may be unloaded */
BeamInstr *cpp = p->cp;
+ int trace_cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ trace_cp = 1;
} else if (cpp == beam_return_to_trace) {
/* Skip return_to_trace parameters */
ptr += 1;
+ trace_cp = 1;
+ }
+ else {
+ trace_cp = 0;
+ }
+ if (trace_cp && s->pc == cpp) {
+ /*
+ * If process 'cp' points to a return/exception trace
+ * instruction and 'cp' has been saved as 'pc' in
+ * stacktrace, we need to update 'pc' in stacktrace
+ * with the actual 'cp' located on the top of the
+ * stack; otherwise, we will lose the top stackframe
+ * when building the stack trace.
+ */
+ ASSERT(is_CP(p->stop[0]));
+ s->pc = cp_val(p->stop[0]);
}
}
while (ptr < STACK_START(p) && depth > 0) {
@@ -6070,12 +6084,15 @@ build_stacktrace(Process* c_p, Eterm exc) {
erts_set_current_function(&fi, s->current);
}
+ depth = s->depth;
/*
- * If fi.current is still NULL, default to the initial function
+ * If fi.current is still NULL, and we have no
+ * stack at all, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
if (fi.current == NULL) {
- erts_set_current_function(&fi, c_p->u.initial);
+ if (depth <= 0)
+ erts_set_current_function(&fi, c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -6085,10 +6102,9 @@ build_stacktrace(Process* c_p, Eterm exc) {
* Look up all saved continuation pointers and calculate
* needed heap space.
*/
- depth = s->depth;
stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
depth*sizeof(FunctionInfo));
- heap_size = fi.needed + 2;
+ heap_size = fi.current ? fi.needed + 2 : 0;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
if (stkp->current) {
@@ -6107,8 +6123,10 @@ build_stacktrace(Process* c_p, Eterm exc) {
res = CONS(hp, mfa, res);
hp += 2;
}
- hp = erts_build_mfa_item(&fi, hp, args, &mfa);
- res = CONS(hp, mfa, res);
+ if (fi.current) {
+ hp = erts_build_mfa_item(&fi, hp, args, &mfa);
+ res = CONS(hp, mfa, res);
+ }
erts_free(ERTS_ALC_T_TMP, (void *) stk);
return res;
@@ -6205,8 +6223,107 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
return ep;
}
+static ERTS_INLINE void
+apply_bif_error_adjustment(Process *p, Export *ep,
+ Eterm *reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
+{
+ /*
+ * I is only set when the apply is a tail call, i.e.,
+ * from the instructions i_apply_only, i_apply_last_P,
+ * and apply_last_IP.
+ */
+ if (I
+ && ep->code[3] == (BeamInstr) em_apply_bif
+ && (ep == bif_export[BIF_error_1]
+ || ep == bif_export[BIF_error_2]
+ || ep == bif_export[BIF_exit_1]
+ || ep == bif_export[BIF_throw_1])) {
+ /*
+ * We are about to tail apply one of the BIFs
+ * erlang:error/1, erlang:error/2, erlang:exit/1,
+ * or erlang:throw/1. Error handling of these BIFs is
+ * special!
+ *
+ * We need 'p->cp' to point into the calling
+ * function when handling the error after the BIF has
+ * been applied. This in order to get the topmost
+ * stackframe correct. Without the following adjustment,
+ * 'p->cp' will point into the function that called
+ * current function when handling the error. We add a
+ * dummy stackframe in order to achive this.
+ *
+ * Note that these BIFs unconditionally will cause
+ * an exception to be raised. That is, our modifications
+ * of 'p->cp' as well as the stack will be corrected by
+ * the error handling code.
+ *
+ * If we find an exception/return-to trace continuation
+ * pointer as the topmost continuation pointer, we do not
+ * need to do anything since the information already will
+ * be available for generation of the stacktrace.
+ */
+ int apply_only = stack_offset == 0;
+ BeamInstr *cpp;
+
+ if (apply_only) {
+ ASSERT(p->cp != NULL);
+ cpp = p->cp;
+ }
+ else {
+ ASSERT(is_CP(p->stop[0]));
+ cpp = cp_val(p->stop[0]);
+ }
+
+ if (cpp != beam_exception_trace
+ && cpp != beam_return_trace
+ && cpp != beam_return_to_trace) {
+ Uint need = stack_offset /* bytes */ / sizeof(Eterm);
+ if (need == 0)
+ need = 1; /* i_apply_only */
+ if (p->stop - p->htop < need)
+ erts_garbage_collect(p, (int) need, reg, arity+1);
+ p->stop -= need;
+
+ if (apply_only) {
+ /*
+ * Called from the i_apply_only instruction.
+ *
+ * 'p->cp' contains continuation pointer pointing
+ * into the function that called current function.
+ * We push that continuation pointer onto the stack,
+ * and set 'p->cp' to point into current function.
+ */
+
+ p->stop[0] = make_cp(p->cp);
+ p->cp = I;
+ }
+ else {
+ /*
+ * Called from an i_apply_last_p, or apply_last_IP,
+ * instruction.
+ *
+ * Calling instruction will after we return read
+ * a continuation pointer from the stack and write
+ * it to 'p->cp', and then remove the topmost
+ * stackframe of size 'stack_offset'.
+ *
+ * We have sized the dummy-stackframe so that it
+ * will be removed by the instruction we currently
+ * are executing, and leave the stackframe that
+ * normally would have been removed intact.
+ *
+ */
+ p->stop[0] = make_cp(I);
+ }
+ }
+ }
+}
+
static BeamInstr*
-apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+apply(
+Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg,
+BeamInstr *I, Uint stack_offset)
{
int arity;
Export* ep;
@@ -6231,21 +6348,54 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
return 0;
}
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- this = THE_NON_VALUE;
- if (is_not_atom(module)) {
- Eterm* tp;
+ while (1) {
+ Eterm m, f, a;
+ /* The module argument may be either an atom or an abstract module
+ * (currently implemented using tuples, but this might change).
+ */
+ this = THE_NON_VALUE;
+ if (is_not_atom(module)) {
+ Eterm* tp;
+
+ if (is_not_tuple(module)) goto error;
+ tp = tuple_val(module);
+ if (arityval(tp[0]) < 1) goto error;
+ this = module;
+ module = tp[1];
+ if (is_not_atom(module)) goto error;
+ }
- if (is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- this = module;
- module = tp[1];
- if (is_not_atom(module)) goto error;
+ if (module != am_erlang || function != am_apply)
+ break;
+
+ /* Adjust for multiple apply of apply/3... */
+
+ a = args;
+ if (is_list(a)) {
+ Eterm *consp = list_val(a);
+ m = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ f = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ a = CAR(consp);
+ if (is_nil(CDR(consp))) {
+ /* erlang:apply/3 */
+ module = m;
+ function = f;
+ args = a;
+ if (is_not_atom(f))
+ goto error;
+ continue;
+ }
+ }
+ }
+ }
+ break; /* != erlang:apply/3 */
}
-
/*
* Walk down the 3rd parameter of apply (the argument list) and copy
* the parameters to the x registers (reg[]). If the module argument
@@ -6283,12 +6433,14 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
static BeamInstr*
-fixed_apply(Process* p, Eterm* reg, Uint arity)
+fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
{
Export* ep;
Eterm module;
@@ -6318,6 +6470,10 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
if (is_not_atom(module)) goto error;
++arity;
}
+
+ /* Handle apply of apply/3... */
+ if (module == am_erlang && function == am_apply && arity == 3)
+ return apply(p, reg[0], reg[1], reg[2], reg, I, stack_offset);
/*
* Get the index into the export table, or failing that the export
@@ -6332,6 +6488,7 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 0afdedf6c2..3f2bdf3f9d 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -797,14 +797,14 @@ erts_finish_loading(Binary* magic, Process* c_p,
} else {
ErtsCodeIndex code_ix = erts_staging_code_ix();
Eterm module = stp->module;
- int i;
+ int i, num_exps;
/*
* There is an -on_load() function. We will keep the current
* code, but we must turn off any tracing.
*/
-
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
if (ep == NULL || ep->code[0] != module) {
continue;
@@ -5754,12 +5754,13 @@ exported_from_module(Process* p, /* Process whose heap to use. */
ErtsCodeIndex code_ix,
Eterm mod) /* Tagged atom for module. */
{
- int i;
+ int i, num_exps;
Eterm* hp = NULL;
Eterm* hend = NULL;
Eterm result = NIL;
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i,code_ix);
if (ep->code[0] == mod) {
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index dfbe1ced47..61907cc7ff 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -717,6 +717,8 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
* We have to be very very careful when doing this as the schedulers
* could be anywhere.
*/
+ sys_init_suspend_handler();
+
for (i = 0; i < erts_no_schedulers; i++) {
erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
if (!erts_equal_tids(tid,erts_thr_self()))
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 735aabbee3..88a052cad7 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1672,11 +1672,11 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
Eterm mfa;
Eterm res = NIL;
- depth = 8;
+ depth = erts_backtrace_depth;
sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
s->depth = 0;
- if (rp->i) {
+ if (depth > 0 && rp->i) {
s->trace[s->depth++] = rp->i;
depth--;
}
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 96275eb228..0627526d7e 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1062,9 +1062,16 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_smp_thr_progress_block();
}
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx);
+#endif
+
r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx);
+#endif
#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
erts_smp_thr_progress_unblock();
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index af799d09da..edbdbb4b8f 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -59,6 +59,10 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
+#if defined(DEBUG) && 0
+# define HARDDEBUG 1
+#endif
+
/*
* Returns number of elements in an array.
*/
@@ -110,7 +114,6 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
-static void remove_message_buffers(Process* p);
static Eterm *full_sweep_heaps(Process *p,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
@@ -153,7 +156,7 @@ static void init_gc_info(ErtsGCInfo *gcip);
static Uint64 next_vheap_size(Process* p, Uint64 vheap, Uint64 vheap_sz);
#ifdef HARDDEBUG
-static void disallow_heap_frag_ref_in_heap(Process* p);
+static void disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop);
static void disallow_heap_frag_ref_in_old_heap(Process* p);
#endif
@@ -176,6 +179,13 @@ typedef struct {
erts_smp_atomic32_t refc;
} ErtsGCInfoReq;
+#ifdef ERTS_DIRTY_SCHEDULERS
+static struct {
+ erts_mtx_t mtx;
+ ErtsGCInfo info;
+} dirty_gc;
+#endif
+
static ERTS_INLINE int
gc_cost(Uint gc_moved_live_words, Uint resize_moved_words)
{
@@ -259,6 +269,11 @@ erts_init_gc(void)
init_gc_info(&esdp->gc_info);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info");
+ init_gc_info(&dirty_gc.info);
+#endif
+
init_gcireq_alloc();
}
@@ -477,24 +492,26 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
hsz = ssz + need + ERTS_DELAY_GC_EXTRA_FREE;
hfrag = new_message_buffer(hsz);
- hfrag->next = p->mbuf;
- p->mbuf = hfrag;
- p->mbuf_sz += hsz;
p->heap = p->htop = &hfrag->mem[0];
p->hend = hend = &hfrag->mem[hsz];
p->stop = stop = hend - ssz;
sys_memcpy((void *) stop, (void *) orig_stop, ssz * sizeof(Eterm));
if (p->abandoned_heap) {
- /* Active heap already in a fragment; adjust it... */
- ErlHeapFragment *hfrag = ((ErlHeapFragment *)
- (((char *) orig_heap)
- - offsetof(ErlHeapFragment, mem)));
- Uint unused = orig_hend - orig_htop;
- ASSERT(hfrag->used_size == hfrag->alloc_size);
- ASSERT(hfrag->used_size >= unused);
- hfrag->used_size -= unused;
- p->mbuf_sz -= unused;
+ /*
+ * Active heap already in a fragment; adjust it and
+ * save it into mbuf list...
+ */
+ ErlHeapFragment *hfrag = ((ErlHeapFragment *)
+ (((char *) orig_heap)
+ - offsetof(ErlHeapFragment, mem)));
+ Uint used = orig_htop - orig_heap;
+ hfrag->used_size = used;
+ p->mbuf_sz += used;
+ ASSERT(hfrag->used_size <= hfrag->alloc_size);
+ ASSERT(!hfrag->off_heap.first && !hfrag->off_heap.overhead);
+ hfrag->next = p->mbuf;
+ p->mbuf = hfrag;
}
else {
/* Do not leave a hole in the abandoned heap... */
@@ -556,17 +573,14 @@ young_gen_usage(Process *p)
}
}
+ hsz += p->htop - p->heap;
aheap = p->abandoned_heap;
- if (!aheap)
- hsz += p->htop - p->heap;
- else {
+ if (aheap) {
/* used in orig heap */
if (p->flags & F_ABANDONED_HEAP_USE)
hsz += aheap[p->heap_sz-1];
else
hsz += p->heap_sz;
- /* Remove unused part in latest fragment */
- hsz -= p->hend - p->htop;
}
return hsz;
}
@@ -602,7 +616,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
Uint reclaimed_now = 0;
Eterm gc_trace_end_tag;
int reds;
- ErtsMonotonicTime start_time = 0; /* Shut up faulty warning... */
+ ErtsMonotonicTime start_time;
ErtsSchedulerData *esdp;
erts_aint32_t state;
ERTS_MSACC_PUSH_STATE_M();
@@ -610,6 +624,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
+ ERTS_UNDEF(start_time, 0);
ERTS_CHK_MBUF_SZ(p);
ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls)
@@ -735,8 +750,19 @@ do_major_collection:
monitor_large_heap(p);
}
- esdp->gc_info.garbage_cols++;
- esdp->gc_info.reclaimed += reclaimed_now;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ dirty_gc.info.garbage_cols++;
+ dirty_gc.info.reclaimed += reclaimed_now;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+ else
+#endif
+ {
+ esdp->gc_info.garbage_cols++;
+ esdp->gc_info.reclaimed += reclaimed_now;
+ }
FLAGS(p) &= ~F_FORCE_GC;
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -765,6 +791,7 @@ do_major_collection:
ASSERT(!p->mbuf);
ASSERT(!ERTS_IS_GC_DESIRED(p));
+ ASSERT(need <= HEAP_LIMIT(p) - HEAP_TOP(p));
return reds;
}
@@ -789,6 +816,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
>= erts_proc_sched_data(p)->virtual_reds);
}
+
/*
* Place all living data on a the new heap; deallocate any old heap.
* Meant to be used by hibernate/3.
@@ -835,11 +863,11 @@ erts_garbage_collect_hibernate(Process* p)
p->arg_reg,
p->arity);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : p->heap),
- p->heap_sz * sizeof(Eterm));
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, heap, htop);
+#endif
+
+ erts_deallocate_young_generation(p);
p->heap = heap;
p->high_water = htop;
@@ -874,8 +902,6 @@ erts_garbage_collect_hibernate(Process* p)
sys_memcpy((void *) heap, (void *) p->heap, actual_size*sizeof(Eterm));
ERTS_HEAP_FREE(ERTS_ALC_T_TMP_HEAP, p->heap, p->heap_sz*sizeof(Eterm));
- remove_message_buffers(p);
-
p->stop = p->hend = heap + heap_size;
offs = heap - p->heap;
@@ -1494,22 +1520,16 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : HEAP_START(p)),
- HEAP_SIZE(p) * sizeof(Eterm));
- p->abandoned_heap = NULL;
- p->flags &= ~F_ABANDONED_HEAP_USE;
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
HEAP_END(p) = n_heap + new_sz;
-
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
}
/*
@@ -1598,13 +1618,12 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : HEAP_START(p)),
- p->heap_sz * sizeof(Eterm));
- p->abandoned_heap = NULL;
- p->flags &= ~F_ABANDONED_HEAP_USE;
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
@@ -1613,11 +1632,6 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
HIGH_WATER(p) = HEAP_TOP(p);
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
-
if (p->flags & F_ON_HEAP_MSGQ)
move_msgq_to_heap(p);
@@ -1770,22 +1784,56 @@ adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj)
return adjusted;
}
-/*
- * Remove all message buffers.
- */
-static void
-remove_message_buffers(Process* p)
+void
+erts_deallocate_young_generation(Process *c_p)
{
- if (MBUF(p) != NULL) {
- free_message_buffer(MBUF(p));
- MBUF(p) = NULL;
+ Eterm *orig_heap;
+
+ if (!c_p->abandoned_heap) {
+ orig_heap = c_p->heap;
+ ASSERT(!(c_p->flags & F_ABANDONED_HEAP_USE));
+ }
+ else {
+ ErlHeapFragment *hfrag;
+
+ orig_heap = c_p->abandoned_heap;
+ c_p->abandoned_heap = NULL;
+ c_p->flags &= ~F_ABANDONED_HEAP_USE;
+
+ /*
+ * Temporary heap located in heap fragment
+ * only referred to by 'c_p->heap'. Add it to
+ * 'c_p->mbuf' list and deallocate it as any
+ * other heap fragment...
+ */
+ hfrag = ((ErlHeapFragment *)
+ (((char *) c_p->heap)
+ - offsetof(ErlHeapFragment, mem)));
+
+ ASSERT(!hfrag->off_heap.first);
+ ASSERT(!hfrag->off_heap.overhead);
+ ASSERT(!hfrag->next);
+ ASSERT(c_p->htop - c_p->heap <= hfrag->alloc_size);
+
+ hfrag->next = c_p->mbuf;
+ c_p->mbuf = hfrag;
+ }
+
+ if (c_p->mbuf) {
+ free_message_buffer(c_p->mbuf);
+ c_p->mbuf = NULL;
}
- if (p->msg_frag) {
- erts_cleanup_messages(p->msg_frag);
- p->msg_frag = NULL;
+ if (c_p->msg_frag) {
+ erts_cleanup_messages(c_p->msg_frag);
+ c_p->msg_frag = NULL;
}
- MBUF_SIZE(p) = 0;
+ c_p->mbuf_sz = 0;
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ orig_heap,
+ c_p->heap_sz * sizeof(Eterm));
}
+
#ifdef HARDDEBUG
/*
@@ -1797,19 +1845,15 @@ remove_message_buffers(Process* p)
*/
static void
-disallow_heap_frag_ref_in_heap(Process* p)
+disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop)
{
Eterm* hp;
- Eterm* htop;
- Eterm* heap;
Uint heap_size;
if (p->mbuf == 0) {
return;
}
- htop = p->htop;
- heap = p->heap;
heap_size = (htop - heap)*sizeof(Eterm);
hp = heap;
@@ -3017,6 +3061,18 @@ reply_gc_info(void *vgcirp)
reclaimed = esdp->gc_info.reclaimed;
garbage_cols = esdp->gc_info.garbage_cols;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /*
+ * Add dirty schedulers info on requesting
+ * schedulers info
+ */
+ if (gcirp->req_sched == esdp->no) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ reclaimed += dirty_gc.info.reclaimed;
+ garbage_cols += dirty_gc.info.garbage_cols;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+#endif
sz = 0;
hpp = NULL;
@@ -3297,13 +3353,15 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
ErtsMessage* mp;
Eterm *htop, *heap;
- if (p->abandoned_heap)
+ if (p->abandoned_heap) {
ERTS_GET_ORIG_HEAP(p, heap, htop);
- else {
- heap = p->heap;
- htop = real_htop ? real_htop : HEAP_TOP(p);
+ if (heap <= ptr && ptr < htop)
+ return 1;
}
+ heap = p->heap;
+ htop = real_htop ? real_htop : HEAP_TOP(p);
+
if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) {
return 1;
}
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 54ea9ca3c0..2521379664 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -27,10 +27,6 @@
#include "erl_map.h"
-#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
-# define HARDDEBUG 1
-#endif
-
#define IS_MOVED_BOXED(x) (!is_header((x)))
#define IS_MOVED_CONS(x) (is_non_value((x)))
@@ -157,5 +153,6 @@ void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_free_heap_frags(struct process* p);
Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *);
int erts_max_heap_size(Eterm, Uint *, Uint *);
+void erts_deallocate_young_generation(Process *c_p);
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index d29d079fc5..647fa26811 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -2666,7 +2666,7 @@ erts_start_timer_callback(ErtsMonotonicTime tmo,
tmo);
twt = tmo < ERTS_TIMER_WHEEL_MSEC;
- if (esdp)
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
start_callback_timer(esdp,
twt,
timeout_pos,
@@ -2865,7 +2865,8 @@ btm_print(ErtsHLTimer *tmr, void *vbtmp)
if (tmr->timeout <= btmp->now)
left = 0;
- left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
receiver = ((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
? tmr->receiver.name
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 2fd97208cc..070cc3f2d0 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -2220,7 +2220,6 @@ erl_start(int argc, char **argv)
init_break_handler();
if (replace_intr)
erts_replace_intr();
- sys_init_suspend_handler();
#endif
boot_argc = argc - i; /* Number of arguments to init */
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 06266363b5..5d4823c077 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -129,6 +129,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "run_queue", "address" },
#ifdef ERTS_DIRTY_SCHEDULERS
{ "dirty_run_queue_sleep_list", "address" },
+ { "dirty_gc_info", NULL },
+ { "dirty_break_point_index", NULL },
#endif
{ "process_table", NULL },
{ "cpu_info", NULL },
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index e4c696ae3b..f45e6974cd 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -193,7 +193,7 @@ free_message_buffer(ErlHeapFragment* bp)
erts_cleanup_offheap(&bp->off_heap);
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size));
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
bp = next_bp;
}while (bp != NULL);
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 6b265a8b80..19ce0f6965 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -3561,8 +3561,8 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
#endif
if (p) {
/* This is almost a normal nif call like in beam_emu,
- except that any heap fragment created in the nif will be
- discarded without checking if anything in it is live.
+ except that any heap consumed by the nif will be
+ released without checking if anything in it is live.
This is because we cannot do a GC here as we don't know
the number of live registers that have to be preserved.
This means that any heap part of the returned term may
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index b345c35a7e..41741764e8 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -2060,6 +2060,7 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
@@ -6234,6 +6235,12 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
int queue;
erts_aint32_t dact, max_qbit;
+ /* Do not enqueue free process... */
+ if (actual & ERTS_PSFLG_FREE) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NOT;
+ }
+
/* Termination should be done on an ordinary scheduler */
if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
@@ -6426,6 +6433,9 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
/*
* schedule_out_process() return with c_rq locked.
+ *
+ * Return non-zero value if caller should decrease
+ * reference count on the process when done with it...
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
@@ -6480,12 +6490,18 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 0;
-
+#if !defined(ERTS_SMP)
+ /* Decrement refc if process struct is free... */
+ return !!(n & ERTS_PSFLG_FREE);
+#else
+ /* Decrement refc if scheduled out from dirty scheduler... */
+ return !is_normal_sched;
+#endif
}
else {
Process* sched_p;
+ ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6501,11 +6517,14 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(runq);
+ if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */
+
/* Enqueue the process */
enqueue_process(runq, (int) enq_prio, sched_p);
if (runq == c_rq)
- return 1;
+ return 0;
erts_smp_runq_unlock(runq);
@@ -6513,9 +6532,15 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 1;
+ /*
+ * Decrement refc if process is scheduled out by a
+ * dirty scheduler, and we have not just scheduled
+ * the process using the ordinary process struct
+ * on a dirty run-queue again...
+ */
+ return !is_normal_sched && (sched_p != p
+ || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix));
}
-
}
static ERTS_INLINE void
@@ -6530,8 +6555,17 @@ add2runq(int enqueue, erts_aint32_t prio,
if (runq) {
Process *sched_p;
- if (enqueue > 0)
+ if (enqueue > 0) {
sched_p = proc;
+ /*
+ * Refc on process struct (i.e. true struct,
+ * not proxy-struct) increased while in a
+ * dirty run-queue or executing on a dirty
+ * scheduler.
+ */
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(proc);
+ }
else {
Process *pxy;
@@ -7055,12 +7089,18 @@ typedef struct {
} ErtsSchdlrSspndResume;
static void
-schdlr_sspnd_resume_proc(Eterm pid)
+schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid)
{
- Process *p = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_STATUS);
+ Process *p;
+ p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS,
+ (sched_type != ERTS_SCHED_NORMAL
+ ? ERTS_P2P_FLG_INC_REFC
+ : 0));
if (p) {
resume_process(p, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ erts_proc_dec_refc(p);
}
}
@@ -7069,17 +7109,20 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
ErtsSchdlrSspndResume *resume)
{
if (is_internal_pid(resume->onln.chngr)) {
- schdlr_sspnd_resume_proc(resume->onln.chngr);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.chngr);
resume->onln.chngr = NIL;
}
if (is_internal_pid(resume->onln.nxt)) {
- schdlr_sspnd_resume_proc(resume->onln.nxt);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.nxt);
resume->onln.nxt = NIL;
}
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(plp->pid);
+ schdlr_sspnd_resume_proc(sched_type,
+ plp->pid);
proclist_destroy(plp);
}
}
@@ -7131,16 +7174,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- }
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (sched_type != ERTS_SCHED_NORMAL)
erts_smp_runq_unlock(esdp->run_queue);
- }
else {
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7153,8 +7188,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
sched_wall_time_change(esdp, 0);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
@@ -7198,15 +7233,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
(void) erts_proclist_fetch(&msb[i]->chngq, &end_plp);
/* resume processes that initiated the multi scheduling block... */
plp = msb[i]->chngq;
- while (plp) {
- erts_proclist_store_last(&msb[i]->blckrs,
- proclist_copy(plp));
- plp = plp->next;
- }
- if (end_plp)
+ if (plp) {
+ ASSERT(end_plp);
+ ASSERT(msb[i]->ongoing);
+ do {
+ erts_proclist_store_last(&msb[i]->blckrs,
+ proclist_copy(plp));
+ plp = plp->next;
+ } while (plp);
end_plp->next = resume.msb.chngrs;
- resume.msb.chngrs = msb[i]->chngq;
- msb[i]->chngq = NULL;
+ resume.msb.chngrs = msb[i]->chngq;
+ msb[i]->chngq = NULL;
+ }
}
}
}
@@ -7270,24 +7308,14 @@ suspend_scheduler(ErtsSchedulerData *esdp)
while (1) {
ErtsMonotonicTime current_time;
- erts_aint32_t qmask;
erts_aint32_t flgs;
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
-
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (qmask) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
+ if (sched_type != ERTS_SCHED_NORMAL)
aux_work = 0;
- }
else {
+ erts_aint32_t qmask;
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
@@ -7415,12 +7443,23 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && schdlr_sspnd.online == schdlr_sspnd.active) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
-
+ if (changing) {
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && !schdlr_sspnd.msb.ongoing
+ && schdlr_sspnd.online == schdlr_sspnd.active) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ && !schdlr_sspnd.nmsb.ongoing
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL))) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_NMSB);
+ }
+ }
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
ASSERT((sched_type == ERTS_SCHED_NORMAL
? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
@@ -7553,7 +7592,7 @@ abort_sched_onln_chng_waitq(Process *p)
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
if (is_internal_pid(resume))
- schdlr_sspnd_resume_proc(resume);
+ schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume);
}
ErtsSchedSuspendResult
@@ -7951,21 +7990,20 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
else { /* ------ UNBLOCK ------ */
if (p->flags & have_blckd_flg) {
- ErtsProcList *plps[2];
+ ErtsProcList **plpps[3] = {0};
ErtsProcList *plp;
- int limit = 0;
- plps[limit++] = erts_proclist_peek_first(msbp->blckrs);
- if (all)
- plps[limit++] = erts_proclist_peek_first(msbp->chngq);
+ plpps[0] = &msbp->blckrs;
+ if (all)
+ plpps[1] = &msbp->chngq;
- for (ix = 0; ix < limit; ix++) {
- plp = plps[ix];
+ for (ix = 0; plpps[ix]; ix++) {
+ plp = erts_proclist_peek_first(*plpps[ix]);
while (plp) {
ErtsProcList *tmp_plp = plp;
- plp = erts_proclist_peek_next(msbp->blckrs, plp);
+ plp = erts_proclist_peek_next(*plpps[ix], plp);
if (erts_proclist_same(tmp_plp, p)) {
- erts_proclist_remove(&msbp->blckrs, tmp_plp);
+ erts_proclist_remove(plpps[ix], tmp_plp);
proclist_destroy(tmp_plp);
if (!all)
break;
@@ -8604,8 +8642,15 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
* from being selected for normal execution regardless
* of locks held or not held on it...
*/
- ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)
- & erts_smp_atomic32_read_nob(&rp->state)));
+#ifdef DEBUG
+ {
+ erts_aint32_t state;
+ state = erts_smp_atomic32_read_nob(&rp->state);
+ ASSERT((state & ERTS_PSFLG_PENDING_EXIT)
+ || !(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ }
+#endif
if (!suspend)
resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
@@ -9587,40 +9632,45 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
esdp->reductions += reds;
- /* schedule_out_process() returns with rq locked! */
- schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
- proxy_p = NULL;
+ {
+ int dec_refc;
- ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
- (int) ERTS_PSFLGS_GET_USR_PRIO(state),
- reds,
- actual_reds);
+ /* schedule_out_process() returns with rq locked! */
+ dec_refc = schedule_out_process(rq, state, p,
+ proxy_p, is_normal_sched);
+ proxy_p = NULL;
- esdp->current_process = NULL;
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+
+ esdp->current_process = NULL;
#ifdef ERTS_SMP
- p->scheduler_data = NULL;
+ p->scheduler_data = NULL;
#endif
- erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_STATUS
- | ERTS_PROC_LOCK_TRACE));
+ erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS
+ | ERTS_PROC_LOCK_TRACE));
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
- if (state & ERTS_PSFLG_FREE) {
- if (!is_normal_sched) {
- ASSERT(p->flags & F_DELAYED_DEL_PROC);
- erts_proc_dec_refc(p);
- }
- else {
#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
-#else
- erts_proc_dec_refc(p);
+ if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ }
+ else {
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ }
+ }
#endif
- }
- }
+
+ if (dec_refc)
+ erts_proc_dec_refc(p);
+ }
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
@@ -9896,8 +9946,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
else {
+ Eterm pid = p->common.id;
proxy_p = p;
- p = erts_proc_lookup_raw(proxy_p->common.id);
+ p = (is_normal_sched
+ ? erts_proc_lookup_raw(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0,
+ ERTS_P2P_FLG_INC_REFC));
if (!p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9929,6 +9983,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_FREE)))
#ifdef ERTS_DIRTY_SCHEDULERS
| (((state & (ERTS_PSFLG_RUNNING
+
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_EXITING))
@@ -9961,6 +10016,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* free and not queued by proxy */
erts_proc_dec_refc(p);
}
+ if (!is_normal_sched)
+ erts_proc_dec_refc(p);
goto pick_next_process;
}
state = new;
@@ -11912,7 +11969,6 @@ erts_cleanup_empty_process(Process* p)
static void
delete_process(Process* p)
{
- Eterm *heap;
ErtsPSD *psd;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
@@ -11967,13 +12023,8 @@ delete_process(Process* p)
hipe_delete_process(&p->hipe);
#endif
- heap = p->abandoned_heap ? p->abandoned_heap : p->heap;
+ erts_deallocate_young_generation(p);
-#ifdef DEBUG
- sys_memset(heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
-#endif
-
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) heap, p->heap_sz*sizeof(Eterm));
if (p->old_heap != NULL) {
#ifdef DEBUG
@@ -11985,16 +12036,6 @@ delete_process(Process* p)
(p->old_hend-p->old_heap)*sizeof(Eterm));
}
- /*
- * Free all pending message buffers.
- */
- if (p->mbuf != NULL) {
- free_message_buffer(p->mbuf);
- }
-
- if (p->msg_frag)
- erts_cleanup_messages(p->msg_frag);
-
erts_erase_dicts(p);
/* free all pending messages */
@@ -13091,15 +13132,14 @@ erts_continue_exit_process(Process *p)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
p->flags |= F_DELAYED_DEL_PROC;
delay_del_proc = 1;
/*
- * The dirty scheduler will also decrease
- * refc when done...
+ * The dirty scheduler decrease refc
+ * when done with the process...
*/
- erts_proc_inc_refc(p);
}
#endif
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 9f7084c127..68fbb10602 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1684,7 +1684,7 @@ ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
return NULL;
else {
ErtsProcList *res = *list;
- if (res == *list)
+ if (res->next == *list)
*list = NULL;
else
*list = res->next;
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index d8bb00e8c6..a19db74763 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -90,9 +90,12 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) {
erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size);
erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size);
size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
+ if (p->abandoned_heap)
+ size += (p->hend - p->heap) * sizeof(Eterm);
if (p->old_hend && p->old_heap)
size += (p->old_hend - p->old_heap) * sizeof(Eterm);
+
size += p->msg.len * sizeof(ErtsMessage);
for (mp = p->msg.first; mp; mp = mp->next)
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 8c84303997..ac9e91e31f 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1436,6 +1436,7 @@ void
trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
{
ErtsTracerNif *tnif = NULL;
+ Eterm* o_hp = NULL;
Eterm* hp;
Uint sz = 0;
Eterm tup;
@@ -1446,7 +1447,7 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
if (is_non_value(msg)) {
(void) erts_process_gc_info(p, &sz, NULL, 0, 0);
- hp = HAlloc(p, sz + 3 + 2);
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, (sz + 3 + 2) * sizeof(Eterm));
msg = erts_process_gc_info(p, NULL, &hp, 0, 0);
tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3;
@@ -1455,6 +1456,8 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_GC,
what, msg, THE_NON_VALUE, am_true);
+ if (o_hp)
+ erts_free(ERTS_ALC_T_TMP, o_hp);
}
}
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 7f043e191b..00fa452f79 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -348,7 +348,7 @@ Export *export_list(int i, ErtsCodeIndex code_ix)
int export_list_size(ErtsCodeIndex code_ix)
{
- return export_tables[code_ix].entries;
+ return erts_index_num_entries(&export_tables[code_ix]);
}
int export_table_sz(void)
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index beed847578..587c29e3b5 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1222,6 +1222,7 @@ typedef struct B2TContext_t {
} u;
} B2TContext;
+static B2TContext* b2t_export_context(Process*, B2TContext* src);
static uLongf binary2term_uncomp_size(byte* data, Sint size)
{
@@ -1254,7 +1255,7 @@ static uLongf binary2term_uncomp_size(byte* data, Sint size)
static ERTS_INLINE int
binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
- B2TContext* ctx)
+ B2TContext** ctxp, Process* p)
{
byte *bytes = data;
Sint size = data_size;
@@ -1268,8 +1269,8 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
size--;
if (size < 5 || *bytes != COMPRESSED) {
state->extp = bytes;
- if (ctx)
- ctx->state = B2TSizeInit;
+ if (ctxp)
+ (*ctxp)->state = B2TSizeInit;
}
else {
uLongf dest_len = (Uint32) get_int32(bytes+1);
@@ -1286,16 +1287,26 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
return -1;
}
state->extp = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA, dest_len);
- ctx->reds -= dest_len;
+ if (ctxp)
+ (*ctxp)->reds -= dest_len;
}
state->exttmp = 1;
- if (ctx) {
+ if (ctxp) {
+ /*
+ * Start decompression by exporting trap context
+ * so we don't have to deal with deep-copying z_stream.
+ */
+ B2TContext* ctx = b2t_export_context(p, *ctxp);
+ ASSERT(state = &(*ctxp)->b2ts);
+ state = &ctx->b2ts;
+
if (erl_zlib_inflate_start(&ctx->u.uc.stream, bytes, size) != Z_OK)
return -1;
ctx->u.uc.dbytes = state->extp;
ctx->u.uc.dleft = dest_len;
ctx->state = B2TUncompressChunk;
+ *ctxp = ctx;
}
else {
uLongf dlen = dest_len;
@@ -1339,7 +1350,7 @@ erts_binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size
{
Sint res;
- if (binary2term_prepare(state, data, data_size, NULL) < 0 ||
+ if (binary2term_prepare(state, data, data_size, NULL, NULL) < 0 ||
(res=decoded_size(state->extp, state->extp + state->extsize, 0, NULL)) < 0) {
if (state->exttmp)
@@ -1485,7 +1496,7 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
if (ctx->aligned_alloc) {
ctx->reds -= bin_size / 8;
}
- if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, ctx) < 0) {
+ if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, &ctx, p) < 0) {
ctx->state = B2TBadArg;
}
break;
diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c
index c86a2122f6..cd834e2c12 100644
--- a/erts/emulator/beam/index.c
+++ b/erts/emulator/beam/index.c
@@ -91,9 +91,16 @@ index_put_entry(IndexTable* t, void* tmpl)
t->seg_table[ix>>INDEX_PAGE_SHIFT] = erts_alloc(t->type, sz);
t->size += INDEX_PAGE_SIZE;
}
- t->entries++;
p->index = ix;
t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p;
+
+ /*
+ * Do a write barrier here to allow readers to do lock free iteration.
+ * erts_index_num_entries() does matching read barrier.
+ */
+ ERTS_SMP_WRITE_MEMORY_BARRIER;
+ t->entries++;
+
return p;
}
diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h
index b2e3c0eab5..10f5d1eb39 100644
--- a/erts/emulator/beam/index.h
+++ b/erts/emulator/beam/index.h
@@ -65,6 +65,7 @@ void index_erase_latest_from(IndexTable*, Uint ix);
ERTS_GLB_INLINE int index_put(IndexTable*, void*);
ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable*, Uint);
+ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -78,6 +79,19 @@ erts_index_lookup(IndexTable* t, Uint ix)
{
return t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK];
}
+
+ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t)
+{
+ int ret = t->entries;
+ /*
+ * Do a read barrier here to allow lock free iteration
+ * on tables where entries are never erased.
+ * index_put_entry() does matching write barrier.
+ */
+ ERTS_SMP_READ_MEMORY_BARRIER;
+ return ret;
+}
+
#endif
#endif
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 7740dd4373..41a7ff4c16 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -859,9 +859,12 @@ int erts_sys_unsetenv(char *key);
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
+#if defined(ERTS_SMP)
#if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX)
extern void sys_thr_resume(erts_tid_t tid);
extern void sys_thr_suspend(erts_tid_t tid);
+#define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2
+#endif
#endif
/* utils.c */