aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/beam_bif_load.c27
-rw-r--r--erts/emulator/beam/beam_bp.c78
-rw-r--r--erts/emulator/beam/beam_bp.h12
-rw-r--r--erts/emulator/beam/beam_emu.c241
-rw-r--r--erts/emulator/beam/beam_load.c15
-rw-r--r--erts/emulator/beam/break.c30
-rw-r--r--erts/emulator/beam/copy.c12
-rw-r--r--erts/emulator/beam/dist.c4
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c22
-rw-r--r--erts/emulator/beam/erl_bif_guard.c5
-rw-r--r--erts/emulator/beam/erl_bif_info.c6
-rw-r--r--erts/emulator/beam/erl_bif_trace.c7
-rw-r--r--erts/emulator/beam/erl_db.c20
-rw-r--r--erts/emulator/beam/erl_db_util.c2
-rw-r--r--erts/emulator/beam/erl_db_util.h4
-rw-r--r--erts/emulator/beam/erl_fun.c20
-rw-r--r--erts/emulator/beam/erl_fun.h2
-rw-r--r--erts/emulator/beam/erl_gc.c212
-rw-r--r--erts/emulator/beam/erl_gc.h5
-rw-r--r--erts/emulator/beam/erl_hl_timer.c5
-rw-r--r--erts/emulator/beam/erl_init.c6
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_message.c4
-rw-r--r--erts/emulator/beam/erl_monitors.c2
-rw-r--r--erts/emulator/beam/erl_nif.c4
-rw-r--r--erts/emulator/beam/erl_node_tables.c50
-rw-r--r--erts/emulator/beam/erl_node_tables.h8
-rw-r--r--erts/emulator/beam/erl_process.c262
-rw-r--r--erts/emulator/beam/erl_process.h2
-rw-r--r--erts/emulator/beam/erl_process_dump.c5
-rw-r--r--erts/emulator/beam/erl_trace.c5
-rw-r--r--erts/emulator/beam/erl_vm.h17
-rw-r--r--erts/emulator/beam/export.c2
-rw-r--r--erts/emulator/beam/external.c27
-rw-r--r--erts/emulator/beam/global.h2
-rw-r--r--erts/emulator/beam/index.c9
-rw-r--r--erts/emulator/beam/index.h14
-rw-r--r--erts/emulator/beam/io.c31
-rw-r--r--erts/emulator/beam/sys.h131
-rw-r--r--erts/emulator/beam/utils.c2
40 files changed, 895 insertions, 419 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index ea1323d651..7f5e9ff691 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -658,7 +658,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
else if (modp->old.code_hdr) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
+ erts_dsprintf(dsbufp, "Module %T must be purged before deleting\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
@@ -786,7 +786,7 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
}
if (BIF_ARG_2 == am_true) {
- int i;
+ int i, num_exps;
/*
* Make the code with the on_load function current.
@@ -802,7 +802,8 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
/*
* The on_load function succeded. Fix up export entries.
*/
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
if (ep == NULL || ep->code[0] != BIF_ARG_1) {
continue;
@@ -822,14 +823,15 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
modp->curr.code_hdr->on_load_function_ptr = NULL;
set_default_trace_pattern(BIF_ARG_1);
} else if (BIF_ARG_2 == am_false) {
- int i;
+ int i, num_exps;
/*
* The on_load function failed. Remove references to the
* code that is about to be purged from the export entries.
*/
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i,code_ix);
if (ep == NULL || ep->code[0] != BIF_ARG_1) {
continue;
@@ -995,6 +997,12 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
if (any_heap_refs(c_p->heap, c_p->htop, literals, lit_bsize))
goto literal_gc;
*redsp += 1;
+ if (c_p->abandoned_heap) {
+ if (any_heap_refs(c_p->abandoned_heap, c_p->abandoned_heap + c_p->heap_sz,
+ literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+ }
if (any_heap_refs(c_p->old_heap, c_p->old_htop, literals, lit_bsize))
goto literal_gc;
@@ -1295,6 +1303,11 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
#endif
if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize))
goto try_literal_gc;
+ if (rp->abandoned_heap) {
+ if (any_heap_refs(rp->abandoned_heap, rp->abandoned_heap + rp->heap_sz,
+ literals, lit_bsize))
+ goto try_literal_gc;
+ }
if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize))
goto try_literal_gc;
@@ -2022,9 +2035,9 @@ delete_code(Module* modp)
{
ErtsCodeIndex code_ix = erts_staging_code_ix();
Eterm module = make_atom(modp->module);
- int i;
+ int i, num_exps = export_list_size(code_ix);
- for (i = 0; i < export_list_size(code_ix); i++) {
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
if (ep != NULL && (ep->code[0] == module)) {
if (ep->addressv[code_ix] == ep->code+3) {
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index bbb2e4f34f..a66d9f68d9 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -74,6 +74,9 @@ extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
erts_smp_atomic32_t erts_active_bp_index;
erts_smp_atomic32_t erts_staging_bp_index;
+#ifdef ERTS_DIRTY_SCHEDULERS
+erts_smp_mtx_t erts_dirty_bp_ix_mtx;
+#endif
/*
* Inlined helpers
@@ -85,6 +88,31 @@ get_mtime(Process *c_p)
return erts_get_monotonic_time(erts_proc_sched_data(c_p));
}
+static ERTS_INLINE Uint32
+acquire_bp_sched_ix(Process *c_p)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx);
+ return (Uint32) erts_no_schedulers;
+ }
+#endif
+ return (Uint32) esdp->no - 1;
+}
+
+static ERTS_INLINE void
+release_bp_sched_ix(Uint32 ix)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ix == (Uint32) erts_no_schedulers)
+ erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx);
+#endif
+}
+
+
+
/* *************************************************************************
** Local prototypes
*/
@@ -135,6 +163,9 @@ void
erts_bp_init(void) {
erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index");
+#endif
}
@@ -347,17 +378,17 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
}
if (flags & ERTS_BPF_META_TRACE) {
dst->meta_tracer = src->meta_tracer;
- erts_refc_inc(&dst->meta_tracer->refc, 1);
+ erts_smp_refc_inc(&dst->meta_tracer->refc, 1);
dst->meta_ms = src->meta_ms;
MatchSetRef(dst->meta_ms);
}
if (flags & ERTS_BPF_COUNT) {
dst->count = src->count;
- erts_refc_inc(&dst->count->refc, 1);
+ erts_smp_refc_inc(&dst->count->refc, 1);
}
if (flags & ERTS_BPF_TIME_TRACE) {
dst->time = src->time;
- erts_refc_inc(&dst->time->refc, 1);
+ erts_smp_refc_inc(&dst->time->refc, 1);
ASSERT(dst->time->hash);
}
}
@@ -973,6 +1004,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(c_p);
ASSERT(c_p);
ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
@@ -980,7 +1012,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* get previous timestamp and breakpoint
* from the process psd */
-
+
pbt = ERTS_PROC_GET_CALL_TIME(c_p);
time = get_mtime(c_p);
@@ -1006,7 +1038,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* if null then the breakpoint was removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1027,7 +1059,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* this breakpoint */
ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(bdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1041,6 +1073,8 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
pbt->pc = I;
pbt->time = time;
+
+ release_bp_sched_ix(six);
}
void
@@ -1051,6 +1085,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
@@ -1071,6 +1106,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
*/
if (pbt) {
+
/* might have been removed due to
* trace_pattern(false)
*/
@@ -1085,7 +1121,8 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
/* beware, the trace_pattern might have been removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1096,11 +1133,15 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
} else {
BP_TIME_ADD(item, &sitem);
}
+
}
pbt->pc = pc;
pbt->time = time;
+
}
+
+ release_bp_sched_ix(six);
}
int
@@ -1353,6 +1394,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
@@ -1375,7 +1417,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
sitem.pid = p->common.id;
sitem.count = 0;
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1401,6 +1443,8 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
break;
}
} /* pbt */
+
+ release_bp_sched_ix(six);
}
/* *************************************************************************
@@ -1498,7 +1542,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
MatchSetRef(match_spec);
bp->meta_ms = match_spec;
bmt = Alloc(sizeof(BpMetaTracer));
- erts_refc_init(&bmt->refc, 1);
+ erts_smp_refc_init(&bmt->refc, 1);
erts_tracer_update(&meta_tracer, tracer); /* copy tracer */
erts_smp_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer);
bp->meta_tracer = bmt;
@@ -1507,7 +1551,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
bcp = Alloc(sizeof(BpCount));
- erts_refc_init(&bcp->refc, 1);
+ erts_smp_refc_init(&bcp->refc, 1);
erts_smp_atomic_init_nob(&bcp->acount, 0);
bp->count = bcp;
} else if (break_flags & ERTS_BPF_TIME_TRACE) {
@@ -1516,8 +1560,12 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
bdt = Alloc(sizeof(BpDataTime));
- erts_refc_init(&bdt->refc, 1);
- bdt->n = erts_no_total_schedulers;
+ erts_smp_refc_init(&bdt->refc, 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ bdt->n = erts_no_schedulers + 1;
+#else
+ bdt->n = erts_no_schedulers;
+#endif
bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
@@ -1583,7 +1631,7 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
static void
bp_meta_unref(BpMetaTracer* bmt)
{
- if (erts_refc_dectest(&bmt->refc, 0) <= 0) {
+ if (erts_smp_refc_dectest(&bmt->refc, 0) <= 0) {
ErtsTracer trc = erts_smp_atomic_read_nob(&bmt->tracer);
ERTS_TRACER_CLEAR(&trc);
Free(bmt);
@@ -1593,7 +1641,7 @@ bp_meta_unref(BpMetaTracer* bmt)
static void
bp_count_unref(BpCount* bcp)
{
- if (erts_refc_dectest(&bcp->refc, 0) <= 0) {
+ if (erts_smp_refc_dectest(&bcp->refc, 0) <= 0) {
Free(bcp);
}
}
@@ -1601,7 +1649,7 @@ bp_count_unref(BpCount* bcp)
static void
bp_time_unref(BpDataTime* bdt)
{
- if (erts_refc_dectest(&bdt->refc, 0) <= 0) {
+ if (erts_smp_refc_dectest(&bdt->refc, 0) <= 0) {
Uint i = 0;
Uint j = 0;
Process *h_p = NULL;
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 7206ef471a..9e4ab3cd20 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -41,7 +41,7 @@ typedef struct {
typedef struct bp_data_time { /* Call time */
Uint n;
bp_time_hash_t *hash;
- erts_refc_t refc;
+ erts_smp_refc_t refc;
} BpDataTime;
typedef struct {
@@ -51,12 +51,12 @@ typedef struct {
typedef struct {
erts_smp_atomic_t acount;
- erts_refc_t refc;
+ erts_smp_refc_t refc;
} BpCount;
typedef struct {
erts_smp_atomic_t tracer;
- erts_refc_t refc;
+ erts_smp_refc_t refc;
} BpMetaTracer;
typedef struct generic_bp_data {
@@ -79,10 +79,8 @@ typedef struct generic_bp {
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#ifdef ERTS_SMP
-#define bp_sched2ix_proc(p) (erts_proc_sched_data(p)->thr_id - 1)
-#else
-#define bp_sched2ix_proc(p) (0)
+#ifdef ERTS_DIRTY_SCHEDULERS
+extern erts_smp_mtx_t erts_dirty_bp_ix_mtx;
#endif
enum erts_break_op{
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 59a9ea1417..1c83d8f02e 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1047,9 +1047,11 @@ static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
Eterm* reg, BifFunction bf) NOINLINE;
static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
Eterm* reg, Eterm func) NOINLINE;
-static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
+static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* apply(Process* p, Eterm module, Eterm function,
- Eterm args, Eterm* reg) NOINLINE;
+ Eterm args, Eterm* reg,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* call_fun(Process* p, int arity,
Eterm* reg, Eterm args) NOINLINE;
static BeamInstr* apply_fun(Process* p, Eterm fun,
@@ -3194,7 +3196,7 @@ do { \
OpCase(i_apply): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, NULL, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, I+1);
@@ -3208,7 +3210,7 @@ do { \
OpCase(i_apply_last_P): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, I, Arg(0));
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, (BeamInstr *) E[0]);
@@ -3223,7 +3225,7 @@ do { \
OpCase(i_apply_only): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, I, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_I(next);
@@ -3237,7 +3239,7 @@ do { \
BeamInstr *next;
HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
+ next = fixed_apply(c_p, reg, Arg(0), NULL, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, I+2);
@@ -3252,7 +3254,7 @@ do { \
BeamInstr *next;
HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
+ next = fixed_apply(c_p, reg, Arg(0), I, Arg(1));
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, (BeamInstr *) E[0]);
@@ -3550,18 +3552,11 @@ do { \
BifFunction vbf;
ErlHeapFragment *live_hf_end;
- if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
- /* If we have run out of reductions, we do a context
- switch before calling the nif */
- goto context_switch;
- }
-
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- c_p->fcalls = FCALLS - 1;
+ HEAVY_SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
bif_nif_arity = I[-1];
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
@@ -5844,12 +5839,13 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
s->depth = 0;
/*
- * If the failure was in a BIF other than 'error', 'exit' or
- * 'throw', find the bif-table index and save the argument
- * registers by consing up an arglist.
+ * If the failure was in a BIF other than 'error/1', 'error/2',
+ * 'exit/1' or 'throw/1', find the bif-table index and save the
+ * argument registers by consing up an arglist.
*/
- if (bf != NULL && bf != error_1 && bf != error_2 &&
- bf != exit_1 && bf != throw_1) {
+ if (bf != NULL && bf != error_1 && bf != error_2 && bf != exit_1
+ && bf != throw_1 && bf != wrap_error_1 && bf != wrap_error_2
+ && bf != wrap_exit_1 && bf != wrap_throw_1) {
int i;
int a = 0;
for (i = 0; i < BIF_SIZE; i++) {
@@ -5945,12 +5941,30 @@ erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
p->cp) {
/* Cannot follow cp here - code may be unloaded */
BeamInstr *cpp = p->cp;
+ int trace_cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ trace_cp = 1;
} else if (cpp == beam_return_to_trace) {
/* Skip return_to_trace parameters */
ptr += 1;
+ trace_cp = 1;
+ }
+ else {
+ trace_cp = 0;
+ }
+ if (trace_cp && s->pc == cpp) {
+ /*
+ * If process 'cp' points to a return/exception trace
+ * instruction and 'cp' has been saved as 'pc' in
+ * stacktrace, we need to update 'pc' in stacktrace
+ * with the actual 'cp' located on the top of the
+ * stack; otherwise, we will lose the top stackframe
+ * when building the stack trace.
+ */
+ ASSERT(is_CP(p->stop[0]));
+ s->pc = cp_val(p->stop[0]);
}
}
while (ptr < STACK_START(p) && depth > 0) {
@@ -6070,12 +6084,15 @@ build_stacktrace(Process* c_p, Eterm exc) {
erts_set_current_function(&fi, s->current);
}
+ depth = s->depth;
/*
- * If fi.current is still NULL, default to the initial function
+ * If fi.current is still NULL, and we have no
+ * stack at all, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
if (fi.current == NULL) {
- erts_set_current_function(&fi, c_p->u.initial);
+ if (depth <= 0)
+ erts_set_current_function(&fi, c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -6085,10 +6102,9 @@ build_stacktrace(Process* c_p, Eterm exc) {
* Look up all saved continuation pointers and calculate
* needed heap space.
*/
- depth = s->depth;
stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
depth*sizeof(FunctionInfo));
- heap_size = fi.needed + 2;
+ heap_size = fi.current ? fi.needed + 2 : 0;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
if (stkp->current) {
@@ -6107,8 +6123,10 @@ build_stacktrace(Process* c_p, Eterm exc) {
res = CONS(hp, mfa, res);
hp += 2;
}
- hp = erts_build_mfa_item(&fi, hp, args, &mfa);
- res = CONS(hp, mfa, res);
+ if (fi.current) {
+ hp = erts_build_mfa_item(&fi, hp, args, &mfa);
+ res = CONS(hp, mfa, res);
+ }
erts_free(ERTS_ALC_T_TMP, (void *) stk);
return res;
@@ -6205,8 +6223,107 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
return ep;
}
+static ERTS_INLINE void
+apply_bif_error_adjustment(Process *p, Export *ep,
+ Eterm *reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
+{
+ /*
+ * I is only set when the apply is a tail call, i.e.,
+ * from the instructions i_apply_only, i_apply_last_P,
+ * and apply_last_IP.
+ */
+ if (I
+ && ep->code[3] == (BeamInstr) em_apply_bif
+ && (ep == bif_export[BIF_error_1]
+ || ep == bif_export[BIF_error_2]
+ || ep == bif_export[BIF_exit_1]
+ || ep == bif_export[BIF_throw_1])) {
+ /*
+ * We are about to tail apply one of the BIFs
+ * erlang:error/1, erlang:error/2, erlang:exit/1,
+ * or erlang:throw/1. Error handling of these BIFs is
+ * special!
+ *
+ * We need 'p->cp' to point into the calling
+ * function when handling the error after the BIF has
+ * been applied. This in order to get the topmost
+ * stackframe correct. Without the following adjustment,
+ * 'p->cp' will point into the function that called
+ * current function when handling the error. We add a
+ * dummy stackframe in order to achive this.
+ *
+ * Note that these BIFs unconditionally will cause
+ * an exception to be raised. That is, our modifications
+ * of 'p->cp' as well as the stack will be corrected by
+ * the error handling code.
+ *
+ * If we find an exception/return-to trace continuation
+ * pointer as the topmost continuation pointer, we do not
+ * need to do anything since the information already will
+ * be available for generation of the stacktrace.
+ */
+ int apply_only = stack_offset == 0;
+ BeamInstr *cpp;
+
+ if (apply_only) {
+ ASSERT(p->cp != NULL);
+ cpp = p->cp;
+ }
+ else {
+ ASSERT(is_CP(p->stop[0]));
+ cpp = cp_val(p->stop[0]);
+ }
+
+ if (cpp != beam_exception_trace
+ && cpp != beam_return_trace
+ && cpp != beam_return_to_trace) {
+ Uint need = stack_offset /* bytes */ / sizeof(Eterm);
+ if (need == 0)
+ need = 1; /* i_apply_only */
+ if (p->stop - p->htop < need)
+ erts_garbage_collect(p, (int) need, reg, arity+1);
+ p->stop -= need;
+
+ if (apply_only) {
+ /*
+ * Called from the i_apply_only instruction.
+ *
+ * 'p->cp' contains continuation pointer pointing
+ * into the function that called current function.
+ * We push that continuation pointer onto the stack,
+ * and set 'p->cp' to point into current function.
+ */
+
+ p->stop[0] = make_cp(p->cp);
+ p->cp = I;
+ }
+ else {
+ /*
+ * Called from an i_apply_last_p, or apply_last_IP,
+ * instruction.
+ *
+ * Calling instruction will after we return read
+ * a continuation pointer from the stack and write
+ * it to 'p->cp', and then remove the topmost
+ * stackframe of size 'stack_offset'.
+ *
+ * We have sized the dummy-stackframe so that it
+ * will be removed by the instruction we currently
+ * are executing, and leave the stackframe that
+ * normally would have been removed intact.
+ *
+ */
+ p->stop[0] = make_cp(I);
+ }
+ }
+ }
+}
+
static BeamInstr*
-apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+apply(
+Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg,
+BeamInstr *I, Uint stack_offset)
{
int arity;
Export* ep;
@@ -6231,21 +6348,54 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
return 0;
}
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- this = THE_NON_VALUE;
- if (is_not_atom(module)) {
- Eterm* tp;
+ while (1) {
+ Eterm m, f, a;
+ /* The module argument may be either an atom or an abstract module
+ * (currently implemented using tuples, but this might change).
+ */
+ this = THE_NON_VALUE;
+ if (is_not_atom(module)) {
+ Eterm* tp;
+
+ if (is_not_tuple(module)) goto error;
+ tp = tuple_val(module);
+ if (arityval(tp[0]) < 1) goto error;
+ this = module;
+ module = tp[1];
+ if (is_not_atom(module)) goto error;
+ }
- if (is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- this = module;
- module = tp[1];
- if (is_not_atom(module)) goto error;
+ if (module != am_erlang || function != am_apply)
+ break;
+
+ /* Adjust for multiple apply of apply/3... */
+
+ a = args;
+ if (is_list(a)) {
+ Eterm *consp = list_val(a);
+ m = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ f = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ a = CAR(consp);
+ if (is_nil(CDR(consp))) {
+ /* erlang:apply/3 */
+ module = m;
+ function = f;
+ args = a;
+ if (is_not_atom(f))
+ goto error;
+ continue;
+ }
+ }
+ }
+ }
+ break; /* != erlang:apply/3 */
}
-
/*
* Walk down the 3rd parameter of apply (the argument list) and copy
* the parameters to the x registers (reg[]). If the module argument
@@ -6283,12 +6433,14 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
static BeamInstr*
-fixed_apply(Process* p, Eterm* reg, Uint arity)
+fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
{
Export* ep;
Eterm module;
@@ -6318,6 +6470,10 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
if (is_not_atom(module)) goto error;
++arity;
}
+
+ /* Handle apply of apply/3... */
+ if (module == am_erlang && function == am_apply && arity == 3)
+ return apply(p, reg[0], reg[1], reg[2], reg, I, stack_offset);
/*
* Get the index into the export table, or failing that the export
@@ -6332,6 +6488,7 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
@@ -6678,7 +6835,7 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
p->htop = hp + needed;
funp = (ErlFunThing *) hp;
hp = funp->env;
- erts_refc_inc(&fe->refc, 2);
+ erts_smp_refc_inc(&fe->refc, 2);
funp->thing_word = HEADER_FUN;
funp->next = MSO(p).first;
MSO(p).first = (struct erl_off_heap_header*) funp;
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 0afdedf6c2..88b6e89a02 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -797,14 +797,14 @@ erts_finish_loading(Binary* magic, Process* c_p,
} else {
ErtsCodeIndex code_ix = erts_staging_code_ix();
Eterm module = stp->module;
- int i;
+ int i, num_exps;
/*
* There is an -on_load() function. We will keep the current
* code, but we must turn off any tracing.
*/
-
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export *ep = export_list(i, code_ix);
if (ep == NULL || ep->code[0] != module) {
continue;
@@ -4867,7 +4867,7 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
/*
* We are hiding a pointer into older code.
*/
- erts_refc_dec(&fe->refc, 1);
+ erts_smp_refc_dec(&fe->refc, 1);
}
fe->address = code_ptr;
#ifdef HIPE
@@ -5754,12 +5754,13 @@ exported_from_module(Process* p, /* Process whose heap to use. */
ErtsCodeIndex code_ix,
Eterm mod) /* Tagged atom for module. */
{
- int i;
+ int i, num_exps;
Eterm* hp = NULL;
Eterm* hend = NULL;
Eterm result = NIL;
- for (i = 0; i < export_list_size(code_ix); i++) {
+ num_exps = export_list_size(code_ix);
+ for (i = 0; i < num_exps; i++) {
Export* ep = export_list(i,code_ix);
if (ep->code[0] == mod) {
@@ -6286,7 +6287,7 @@ patch_funentries(Eterm Patchlist)
*
* Reproduced on a debug emulator with stdlib_test/qlc_SUITE:join_merge
*
- * erts_refc_dec(&fe->refc, 1);
+ * erts_smp_refc_dec(&fe->refc, 1);
*/
if (!patch(Addresses, (Uint) fe))
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index dfbe1ced47..f2eda6c0f8 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -379,6 +379,16 @@ info(fmtfn_t to, void *to_arg)
}
+static int code_size(struct erl_module_instance* modi)
+{
+ ErtsLiteralArea* lit = modi->code_hdr->literal_area;
+ int size = modi->code_length;
+ if (lit) {
+ size += (lit->end - lit->start) * sizeof(Eterm);
+ }
+ return size;
+}
+
void
loaded(fmtfn_t to, void *to_arg)
{
@@ -399,9 +409,9 @@ loaded(fmtfn_t to, void *to_arg)
if ((modp = module_code(i, code_ix)) != NULL &&
((modp->curr.code_length != 0) ||
(modp->old.code_length != 0))) {
- cur += modp->curr.code_length;
+ cur += code_size(&modp->curr);
if (modp->old.code_length != 0) {
- old += modp->old.code_length;
+ old += code_size(&modp->old);
}
}
}
@@ -422,12 +432,12 @@ loaded(fmtfn_t to, void *to_arg)
((modp->curr.code_length != 0) ||
(modp->old.code_length != 0))) {
erts_print(to, to_arg, "%T", make_atom(modp->module));
- cur += modp->curr.code_length;
- erts_print(to, to_arg, " %d", modp->curr.code_length );
+ cur += code_size(&modp->curr);
+ erts_print(to, to_arg, " %d", code_size(&modp->curr));
if (modp->old.code_length != 0) {
erts_print(to, to_arg, " (%d old)",
- modp->old.code_length );
- old += modp->old.code_length;
+ code_size(&modp->old));
+ old += code_size(&modp->old);
}
erts_print(to, to_arg, "\n");
}
@@ -442,7 +452,7 @@ loaded(fmtfn_t to, void *to_arg)
erts_print(to, to_arg, "%T", make_atom(modp->module));
erts_print(to, to_arg, "\n");
erts_print(to, to_arg, "Current size: %d\n",
- modp->curr.code_length);
+ code_size(&modp->curr));
code = modp->curr.code_hdr;
if (code != NULL && code->attr_ptr) {
erts_print(to, to_arg, "Current attributes: ");
@@ -456,7 +466,7 @@ loaded(fmtfn_t to, void *to_arg)
}
if (modp->old.code_length != 0) {
- erts_print(to, to_arg, "Old size: %d\n", modp->old.code_length);
+ erts_print(to, to_arg, "Old size: %d\n", code_size(&modp->old));
code = modp->old.code_hdr;
if (code->attr_ptr) {
erts_print(to, to_arg, "Old attributes: ");
@@ -647,7 +657,7 @@ bin_check(void)
erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
bp->val,
bp->val->orig_size,
- erts_smp_atomic_read_nob(&bp->val->refc));
+ erts_refc_read(&bp->val->refc, 1));
}
}
if (printed) {
@@ -717,6 +727,8 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
* We have to be very very careful when doing this as the schedulers
* could be anywhere.
*/
+ sys_init_suspend_handler();
+
for (i = 0; i < erts_no_schedulers; i++) {
erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
if (!erts_equal_tids(tid,erts_thr_self()))
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index ccc4cbad43..78db141cfc 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -826,7 +826,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
funp = (ErlFunThing *) tp;
funp->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*) funp;
- erts_refc_inc(&funp->fe->refc, 2);
+ erts_smp_refc_inc(&funp->fe->refc, 2);
*argp = make_fun(tp);
}
break;
@@ -845,7 +845,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
etp->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*)etp;
- erts_refc_inc(&etp->node->refc, 2);
+ erts_smp_refc_inc(&etp->node->refc, 2);
*argp = make_external(tp);
}
@@ -1491,7 +1491,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
}
funp->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*) funp;
- erts_refc_inc(&funp->fe->refc, 2);
+ erts_smp_refc_inc(&funp->fe->refc, 2);
goto cleanup_next;
}
case MAP_SUBTAG:
@@ -1626,7 +1626,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
}
etp->next = off_heap->first;
off_heap->first = (struct erl_off_heap_header*) etp;
- erts_refc_inc(&etp->node->refc, 2);
+ erts_smp_refc_inc(&etp->node->refc, 2);
goto cleanup_next;
}
default:
@@ -1802,7 +1802,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
case FUN_SUBTAG:
{
ErlFunThing* funp = (ErlFunThing *) (tp-1);
- erts_refc_inc(&funp->fe->refc, 2);
+ erts_smp_refc_inc(&funp->fe->refc, 2);
}
goto off_heap_common;
case EXTERNAL_PID_SUBTAG:
@@ -1810,7 +1810,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
case EXTERNAL_REF_SUBTAG:
{
ExternalThing* etp = (ExternalThing *) (tp-1);
- erts_refc_inc(&etp->node->refc, 2);
+ erts_smp_refc_inc(&etp->node->refc, 2);
}
off_heap_common:
{
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index d79245e0e6..52b2174609 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -2081,7 +2081,7 @@ erts_dist_command(Port *prt, int reds_limit)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be
+ erts_smp_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be
removed if port command fails */
erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 0);
@@ -2514,7 +2514,7 @@ info_dist_entry(fmtfn_t to, void *arg, DistEntry *dep, int visible, int connecte
erts_print(to, arg, "Name: %T", dep->sysname);
#ifdef DEBUG
- erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 0));
+ erts_print(to, arg, " (refc=%d)", erts_smp_refc_read(&dep->refc, 0));
#endif
erts_print(to, arg, "\n");
if (!connected && is_nil(dep->cid)) {
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index ef77201544..bace51bbbb 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1109,25 +1109,25 @@ void erts_ddll_decrement_port_count(DE_Handle *dh)
static void first_ddll_reference(DE_Handle *dh)
{
assert_drv_list_rwlocked();
- erts_refc_init(&(dh->refc),1);
+ erts_smp_refc_init(&(dh->refc),1);
}
void erts_ddll_reference_driver(DE_Handle *dh)
{
assert_drv_list_locked();
- if (erts_refc_inctest(&(dh->refc),1) == 1) {
- erts_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */
+ if (erts_smp_refc_inctest(&(dh->refc),1) == 1) {
+ erts_smp_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */
}
}
void erts_ddll_reference_referenced_driver(DE_Handle *dh)
{
- erts_refc_inc(&(dh->refc),2);
+ erts_smp_refc_inc(&(dh->refc),2);
}
void erts_ddll_dereference_driver(DE_Handle *dh)
{
- if (erts_refc_dectest(&(dh->refc),0) == 0) {
+ if (erts_smp_refc_dectest(&(dh->refc),0) == 0) {
/* No lock here, but if the driver is referenced again,
the scheduled deletion is added as a reference too, see above */
erts_schedule_misc_op(ddll_no_more_references, (void *) dh);
@@ -1150,11 +1150,11 @@ static void restore_process_references(DE_Handle *dh)
{
DE_ProcEntry *p;
assert_drv_list_rwlocked();
- ASSERT(erts_refc_read(&(dh->refc),0) == 0);
+ ASSERT(erts_smp_refc_read(&(dh->refc),0) == 0);
for(p = dh->procs;p != NULL; p = p->next) {
if (p->awaiting_status == ERL_DE_PROC_LOADED) {
ASSERT(p->flags & ERL_DE_FL_DEREFERENCED);
- erts_refc_inc(&(dh->refc),1);
+ erts_smp_refc_inc(&(dh->refc),1);
p->flags &= ~ERL_DE_FL_DEREFERENCED;
}
}
@@ -1176,9 +1176,9 @@ static void ddll_no_more_references(void *vdh)
lock_drv_list();
- x = erts_refc_read(&(dh->refc),0);
+ x = erts_smp_refc_read(&(dh->refc),0);
if (x > 0) {
- x = erts_refc_dectest(&(dh->refc),0); /* delete the reference added for me */
+ x = erts_smp_refc_dectest(&(dh->refc),0); /* delete the reference added for me */
}
@@ -1643,7 +1643,7 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
dh->handle = NULL;
dh->procs = NULL;
erts_smp_atomic32_init_nob(&dh->port_count, 0);
- erts_refc_init(&(dh->refc), (erts_aint_t) 0);
+ erts_smp_refc_init(&(dh->refc), (erts_aint_t) 0);
dh->status = -1;
dh->reload_full_path = NULL;
dh->reload_driver_name = NULL;
@@ -1681,7 +1681,7 @@ static int reload_driver_entry(DE_Handle *dh)
dh->reload_full_path = NULL;
dh->reload_driver_name = NULL;
- ASSERT(erts_refc_read(&(dh->refc),0) == 0);
+ ASSERT(erts_smp_refc_read(&(dh->refc),0) == 0);
ASSERT(dh->full_path != NULL);
erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh->full_path);
dh->full_path = NULL;
diff --git a/erts/emulator/beam/erl_bif_guard.c b/erts/emulator/beam/erl_bif_guard.c
index b42d2dc28b..74cf7cf11a 100644
--- a/erts/emulator/beam/erl_bif_guard.c
+++ b/erts/emulator/beam/erl_bif_guard.c
@@ -157,7 +157,7 @@ BIF_RETTYPE round_1(BIF_ALIST_1)
GET_DOUBLE(BIF_ARG_1, f);
/* round it and return the resultant integer */
- res = double_to_integer(BIF_P, (f.fd > 0.0) ? f.fd + 0.5 : f.fd - 0.5);
+ res = double_to_integer(BIF_P, round(f.fd));
BIF_RET(res);
}
@@ -597,8 +597,7 @@ Eterm erts_gc_round_1(Process* p, Eterm* reg, Uint live)
}
GET_DOUBLE(arg, f);
- return gc_double_to_integer(p, (f.fd > 0.0) ? f.fd + 0.5 : f.fd - 0.5,
- reg, live);
+ return gc_double_to_integer(p, round(f.fd), reg, live);
}
Eterm erts_gc_trunc_1(Process* p, Eterm* reg, Uint live)
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 735aabbee3..95a56a3de9 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -174,7 +174,7 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
if (szp)
*szp += 4+2;
if (hpp) {
- Uint refc = (Uint) erts_smp_atomic_read_nob(&pb->val->refc);
+ Uint refc = (Uint) erts_refc_read(&pb->val->refc, 1);
tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
res = CONS(*hpp + 4, tuple, res);
*hpp += 4+2;
@@ -1672,11 +1672,11 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
Eterm mfa;
Eterm res = NIL;
- depth = 8;
+ depth = erts_backtrace_depth;
sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
s->depth = 0;
- if (rp->i) {
+ if (depth > 0 && rp->i) {
s->trace[s->depth++] = rp->i;
depth--;
}
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 96275eb228..0627526d7e 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1062,9 +1062,16 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_smp_thr_progress_block();
}
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx);
+#endif
+
r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx);
+#endif
#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
erts_smp_thr_progress_unblock();
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index dceadc46f4..3cc2b21a20 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -266,7 +266,7 @@ static void schedule_free_dbtable(DbTable* tb)
* Caller is *not* allowed to access the specialized part
* (hash or tree) of *tb after this function has returned.
*/
- ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
+ ASSERT(erts_smp_refc_read(&tb->common.ref, 0) == 0);
erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
(void *) tb,
&tb->release.data,
@@ -600,11 +600,11 @@ done:
*/
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
- erts_refc_inc(&tb->common.ref, 1);
+ erts_smp_refc_inc(&tb->common.ref, 1);
}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
{
- if (erts_refc_dectest(&tb->common.ref, 0) == 0) {
+ if (erts_smp_refc_dectest(&tb->common.ref, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
}
@@ -1487,7 +1487,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
- erts_refc_init(&tb->common.ref, 0);
+ erts_smp_refc_init(&tb->common.ref, 0);
db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
"db_tab", "db_tab_fix");
tb->common.keypos = keypos;
@@ -2990,7 +2990,7 @@ void init_db(ErtsDbSpinCount db_spin_count)
meta_pid_to_tab->common.meth = &db_hash;
meta_pid_to_tab->common.compress = 0;
- erts_refc_init(&meta_pid_to_tab->common.ref, 0);
+ erts_smp_refc_init(&meta_pid_to_tab->common.ref, 0);
/* Neither rwlock or fixlock used
db_init_lock(meta_pid_to_tab, "meta_pid_to_tab", "meta_pid_to_tab_FIX");*/
@@ -3021,7 +3021,7 @@ void init_db(ErtsDbSpinCount db_spin_count)
meta_pid_to_fixed_tab->common.meth = &db_hash;
meta_pid_to_fixed_tab->common.compress = 0;
- erts_refc_init(&meta_pid_to_fixed_tab->common.ref, 0);
+ erts_smp_refc_init(&meta_pid_to_fixed_tab->common.ref, 0);
/* Neither rwlock or fixlock used
db_init_lock(meta_pid_to_fixed_tab, "meta_pid_to_fixed_tab", "meta_pid_to_fixed_tab_FIX");*/
@@ -3382,7 +3382,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
if ((*pp)->pid == pid) {
DbFixation* fix = *pp;
erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_refc_add(&tb->common.ref,diff,0);
+ erts_smp_refc_add(&tb->common.ref,diff,0);
*pp = fix->next;
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, fix, sizeof(DbFixation));
@@ -3458,7 +3458,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
- erts_refc_inc(&tb->common.ref,1);
+ erts_smp_refc_inc(&tb->common.ref,1);
fix = tb->common.fixations;
if (fix == NULL) {
tb->common.time.monotonic
@@ -3514,7 +3514,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
if ((*pp)->pid == p->common.id) {
DbFixation* fix = *pp;
- erts_refc_dec(&tb->common.ref,0);
+ erts_smp_refc_dec(&tb->common.ref,0);
--(fix->counter);
ASSERT(fix->counter >= 0);
if (fix->counter > 0) {
@@ -3563,7 +3563,7 @@ static void free_fixations_locked(DbTable *tb)
fix = tb->common.fixations;
while (fix != NULL) {
erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_refc_add(&tb->common.ref,diff,0);
+ erts_smp_refc_add(&tb->common.ref,diff,0);
next_fix = fix->next;
db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
db_erase_bag_exact2(meta_pid_to_fixed_tab,
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 6732b708a8..f4db7ca3dd 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -3107,7 +3107,7 @@ void db_cleanup_offheap_comp(DbTerm* obj)
break;
case FUN_SUBTAG:
ASSERT(u.pb != &tmp);
- if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) {
+ if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) {
erts_erase_fun_entry(u.fun->fe);
}
break;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 49e5f6b4cf..3fb063797e 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -211,7 +211,7 @@ typedef struct db_fixation {
*/
typedef struct db_table_common {
- erts_refc_t ref; /* fixation counter */
+ erts_smp_refc_t ref; /* fixation counter */
#ifdef ERTS_SMP
erts_smp_rwmtx_t rwlock; /* rw lock on table */
erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */
@@ -260,7 +260,7 @@ typedef struct db_table_common {
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
#define IS_TREE_TABLE(Status) (!!((Status) & \
DB_ORDERED_SET))
-#define NFIXED(T) (erts_refc_read(&(T)->common.ref,0))
+#define NFIXED(T) (erts_smp_refc_read(&(T)->common.ref,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
/*
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index d0a57f0ad0..fd4d5b1c5c 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -110,9 +110,9 @@ erts_put_fun_entry(Eterm mod, int uniq, int index)
fe = (ErlFunEntry *) hash_put(&erts_fun_table, (void*) &template);
sys_memset(fe->uniq, 0, sizeof(fe->uniq));
fe->index = 0;
- refc = erts_refc_inctest(&fe->refc, 0);
+ refc = erts_smp_refc_inctest(&fe->refc, 0);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&fe->refc, 1);
+ erts_smp_refc_inc(&fe->refc, 1);
erts_fun_write_unlock();
return fe;
}
@@ -134,9 +134,9 @@ erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
sys_memcpy(fe->uniq, uniq, sizeof(fe->uniq));
fe->index = index;
fe->arity = arity;
- refc = erts_refc_inctest(&fe->refc, 0);
+ refc = erts_smp_refc_inctest(&fe->refc, 0);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&fe->refc, 1);
+ erts_smp_refc_inc(&fe->refc, 1);
erts_fun_write_unlock();
return fe;
}
@@ -161,9 +161,9 @@ erts_get_fun_entry(Eterm mod, int uniq, int index)
erts_fun_read_lock();
ret = (ErlFunEntry *) hash_get(&erts_fun_table, (void*) &template);
if (ret) {
- erts_aint_t refc = erts_refc_inctest(&ret->refc, 1);
+ erts_aint_t refc = erts_smp_refc_inctest(&ret->refc, 1);
if (refc < 2) /* Pending delete */
- erts_refc_inc(&ret->refc, 1);
+ erts_smp_refc_inc(&ret->refc, 1);
}
erts_fun_read_unlock();
return ret;
@@ -184,7 +184,7 @@ erts_erase_fun_entry(ErlFunEntry* fe)
* We have to check refc again since someone might have looked up
* the fun entry and incremented refc after last check.
*/
- if (erts_refc_dectest(&fe->refc, -1) <= 0)
+ if (erts_smp_refc_dectest(&fe->refc, -1) <= 0)
#endif
{
if (fe->address != unloaded_fun)
@@ -256,7 +256,7 @@ erts_fun_purge_complete(ErlFunEntry **funs, Uint no)
for (ix = 0; ix < no; ix++) {
ErlFunEntry *fe = funs[ix];
fe->pend_purge_address = NULL;
- if (erts_refc_dectest(&fe->refc, 0) == 0)
+ if (erts_smp_refc_dectest(&fe->refc, 0) == 0)
erts_erase_fun_entry(fe);
}
ERTS_SMP_WRITE_MEMORY_BARRIER;
@@ -288,7 +288,7 @@ erts_dump_fun_entries(fmtfn_t to, void *to_arg)
#ifdef HIPE
erts_print(to, to_arg, "Native_address: %p\n", fe->native_address);
#endif
- erts_print(to, to_arg, "Refc: %ld\n", erts_refc_read(&fe->refc, 1));
+ erts_print(to, to_arg, "Refc: %ld\n", erts_smp_refc_read(&fe->refc, 1));
b = b->next;
}
}
@@ -319,7 +319,7 @@ fun_alloc(ErlFunEntry* template)
obj->old_uniq = template->old_uniq;
obj->old_index = template->old_index;
obj->module = template->module;
- erts_refc_init(&obj->refc, -1);
+ erts_smp_refc_init(&obj->refc, -1);
obj->address = unloaded_fun;
obj->pend_purge_address = NULL;
#ifdef HIPE
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index caa55c730c..0fd0d62343 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -42,7 +42,7 @@ typedef struct erl_fun_entry {
Uint arity; /* The arity of the fun. */
Eterm module; /* Tagged atom for module. */
- erts_refc_t refc; /* Reference count: One for code + one for each
+ erts_smp_refc_t refc; /* Reference count: One for code + one for each
fun object in each process. */
BeamInstr *pend_purge_address; /* address stored during a pending purge */
} ErlFunEntry;
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index af799d09da..9b7744068e 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -59,6 +59,10 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
+#if defined(DEBUG) && 0
+# define HARDDEBUG 1
+#endif
+
/*
* Returns number of elements in an array.
*/
@@ -110,7 +114,6 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
-static void remove_message_buffers(Process* p);
static Eterm *full_sweep_heaps(Process *p,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
@@ -153,7 +156,7 @@ static void init_gc_info(ErtsGCInfo *gcip);
static Uint64 next_vheap_size(Process* p, Uint64 vheap, Uint64 vheap_sz);
#ifdef HARDDEBUG
-static void disallow_heap_frag_ref_in_heap(Process* p);
+static void disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop);
static void disallow_heap_frag_ref_in_old_heap(Process* p);
#endif
@@ -176,6 +179,13 @@ typedef struct {
erts_smp_atomic32_t refc;
} ErtsGCInfoReq;
+#ifdef ERTS_DIRTY_SCHEDULERS
+static struct {
+ erts_mtx_t mtx;
+ ErtsGCInfo info;
+} dirty_gc;
+#endif
+
static ERTS_INLINE int
gc_cost(Uint gc_moved_live_words, Uint resize_moved_words)
{
@@ -259,6 +269,11 @@ erts_init_gc(void)
init_gc_info(&esdp->gc_info);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info");
+ init_gc_info(&dirty_gc.info);
+#endif
+
init_gcireq_alloc();
}
@@ -477,24 +492,26 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
hsz = ssz + need + ERTS_DELAY_GC_EXTRA_FREE;
hfrag = new_message_buffer(hsz);
- hfrag->next = p->mbuf;
- p->mbuf = hfrag;
- p->mbuf_sz += hsz;
p->heap = p->htop = &hfrag->mem[0];
p->hend = hend = &hfrag->mem[hsz];
p->stop = stop = hend - ssz;
sys_memcpy((void *) stop, (void *) orig_stop, ssz * sizeof(Eterm));
if (p->abandoned_heap) {
- /* Active heap already in a fragment; adjust it... */
- ErlHeapFragment *hfrag = ((ErlHeapFragment *)
- (((char *) orig_heap)
- - offsetof(ErlHeapFragment, mem)));
- Uint unused = orig_hend - orig_htop;
- ASSERT(hfrag->used_size == hfrag->alloc_size);
- ASSERT(hfrag->used_size >= unused);
- hfrag->used_size -= unused;
- p->mbuf_sz -= unused;
+ /*
+ * Active heap already in a fragment; adjust it and
+ * save it into mbuf list...
+ */
+ ErlHeapFragment *hfrag = ((ErlHeapFragment *)
+ (((char *) orig_heap)
+ - offsetof(ErlHeapFragment, mem)));
+ Uint used = orig_htop - orig_heap;
+ hfrag->used_size = used;
+ p->mbuf_sz += used;
+ ASSERT(hfrag->used_size <= hfrag->alloc_size);
+ ASSERT(!hfrag->off_heap.first && !hfrag->off_heap.overhead);
+ hfrag->next = p->mbuf;
+ p->mbuf = hfrag;
}
else {
/* Do not leave a hole in the abandoned heap... */
@@ -556,17 +573,14 @@ young_gen_usage(Process *p)
}
}
+ hsz += p->htop - p->heap;
aheap = p->abandoned_heap;
- if (!aheap)
- hsz += p->htop - p->heap;
- else {
+ if (aheap) {
/* used in orig heap */
if (p->flags & F_ABANDONED_HEAP_USE)
hsz += aheap[p->heap_sz-1];
else
hsz += p->heap_sz;
- /* Remove unused part in latest fragment */
- hsz -= p->hend - p->htop;
}
return hsz;
}
@@ -602,7 +616,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
Uint reclaimed_now = 0;
Eterm gc_trace_end_tag;
int reds;
- ErtsMonotonicTime start_time = 0; /* Shut up faulty warning... */
+ ErtsMonotonicTime start_time;
ErtsSchedulerData *esdp;
erts_aint32_t state;
ERTS_MSACC_PUSH_STATE_M();
@@ -610,6 +624,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
+ ERTS_UNDEF(start_time, 0);
ERTS_CHK_MBUF_SZ(p);
ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls)
@@ -735,8 +750,19 @@ do_major_collection:
monitor_large_heap(p);
}
- esdp->gc_info.garbage_cols++;
- esdp->gc_info.reclaimed += reclaimed_now;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ dirty_gc.info.garbage_cols++;
+ dirty_gc.info.reclaimed += reclaimed_now;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+ else
+#endif
+ {
+ esdp->gc_info.garbage_cols++;
+ esdp->gc_info.reclaimed += reclaimed_now;
+ }
FLAGS(p) &= ~F_FORCE_GC;
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -765,6 +791,7 @@ do_major_collection:
ASSERT(!p->mbuf);
ASSERT(!ERTS_IS_GC_DESIRED(p));
+ ASSERT(need <= HEAP_LIMIT(p) - HEAP_TOP(p));
return reds;
}
@@ -789,6 +816,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
>= erts_proc_sched_data(p)->virtual_reds);
}
+
/*
* Place all living data on a the new heap; deallocate any old heap.
* Meant to be used by hibernate/3.
@@ -835,11 +863,11 @@ erts_garbage_collect_hibernate(Process* p)
p->arg_reg,
p->arity);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : p->heap),
- p->heap_sz * sizeof(Eterm));
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, heap, htop);
+#endif
+
+ erts_deallocate_young_generation(p);
p->heap = heap;
p->high_water = htop;
@@ -874,8 +902,6 @@ erts_garbage_collect_hibernate(Process* p)
sys_memcpy((void *) heap, (void *) p->heap, actual_size*sizeof(Eterm));
ERTS_HEAP_FREE(ERTS_ALC_T_TMP_HEAP, p->heap, p->heap_sz*sizeof(Eterm));
- remove_message_buffers(p);
-
p->stop = p->hend = heap + heap_size;
offs = heap - p->heap;
@@ -1494,22 +1520,16 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : HEAP_START(p)),
- HEAP_SIZE(p) * sizeof(Eterm));
- p->abandoned_heap = NULL;
- p->flags &= ~F_ABANDONED_HEAP_USE;
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
HEAP_END(p) = n_heap + new_sz;
-
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
}
/*
@@ -1598,13 +1618,12 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : HEAP_START(p)),
- p->heap_sz * sizeof(Eterm));
- p->abandoned_heap = NULL;
- p->flags &= ~F_ABANDONED_HEAP_USE;
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
@@ -1613,11 +1632,6 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
HIGH_WATER(p) = HEAP_TOP(p);
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
-
if (p->flags & F_ON_HEAP_MSGQ)
move_msgq_to_heap(p);
@@ -1770,22 +1784,56 @@ adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj)
return adjusted;
}
-/*
- * Remove all message buffers.
- */
-static void
-remove_message_buffers(Process* p)
+void
+erts_deallocate_young_generation(Process *c_p)
{
- if (MBUF(p) != NULL) {
- free_message_buffer(MBUF(p));
- MBUF(p) = NULL;
+ Eterm *orig_heap;
+
+ if (!c_p->abandoned_heap) {
+ orig_heap = c_p->heap;
+ ASSERT(!(c_p->flags & F_ABANDONED_HEAP_USE));
+ }
+ else {
+ ErlHeapFragment *hfrag;
+
+ orig_heap = c_p->abandoned_heap;
+ c_p->abandoned_heap = NULL;
+ c_p->flags &= ~F_ABANDONED_HEAP_USE;
+
+ /*
+ * Temporary heap located in heap fragment
+ * only referred to by 'c_p->heap'. Add it to
+ * 'c_p->mbuf' list and deallocate it as any
+ * other heap fragment...
+ */
+ hfrag = ((ErlHeapFragment *)
+ (((char *) c_p->heap)
+ - offsetof(ErlHeapFragment, mem)));
+
+ ASSERT(!hfrag->off_heap.first);
+ ASSERT(!hfrag->off_heap.overhead);
+ ASSERT(!hfrag->next);
+ ASSERT(c_p->htop - c_p->heap <= hfrag->alloc_size);
+
+ hfrag->next = c_p->mbuf;
+ c_p->mbuf = hfrag;
+ }
+
+ if (c_p->mbuf) {
+ free_message_buffer(c_p->mbuf);
+ c_p->mbuf = NULL;
}
- if (p->msg_frag) {
- erts_cleanup_messages(p->msg_frag);
- p->msg_frag = NULL;
+ if (c_p->msg_frag) {
+ erts_cleanup_messages(c_p->msg_frag);
+ c_p->msg_frag = NULL;
}
- MBUF_SIZE(p) = 0;
+ c_p->mbuf_sz = 0;
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ orig_heap,
+ c_p->heap_sz * sizeof(Eterm));
}
+
#ifdef HARDDEBUG
/*
@@ -1797,19 +1845,15 @@ remove_message_buffers(Process* p)
*/
static void
-disallow_heap_frag_ref_in_heap(Process* p)
+disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop)
{
Eterm* hp;
- Eterm* htop;
- Eterm* heap;
Uint heap_size;
if (p->mbuf == 0) {
return;
}
- htop = p->htop;
- heap = p->heap;
heap_size = (htop - heap)*sizeof(Eterm);
hp = heap;
@@ -2718,7 +2762,7 @@ sweep_off_heap(Process *p, int fullsweep)
case FUN_SUBTAG:
{
ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe;
- if (erts_refc_dectest(&fe->refc, 0) == 0) {
+ if (erts_smp_refc_dectest(&fe->refc, 0) == 0) {
erts_erase_fun_entry(fe);
}
break;
@@ -3017,6 +3061,18 @@ reply_gc_info(void *vgcirp)
reclaimed = esdp->gc_info.reclaimed;
garbage_cols = esdp->gc_info.garbage_cols;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /*
+ * Add dirty schedulers info on requesting
+ * schedulers info
+ */
+ if (gcirp->req_sched == esdp->no) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ reclaimed += dirty_gc.info.reclaimed;
+ garbage_cols += dirty_gc.info.garbage_cols;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+#endif
sz = 0;
hpp = NULL;
@@ -3297,13 +3353,15 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
ErtsMessage* mp;
Eterm *htop, *heap;
- if (p->abandoned_heap)
+ if (p->abandoned_heap) {
ERTS_GET_ORIG_HEAP(p, heap, htop);
- else {
- heap = p->heap;
- htop = real_htop ? real_htop : HEAP_TOP(p);
+ if (heap <= ptr && ptr < htop)
+ return 1;
}
+ heap = p->heap;
+ htop = real_htop ? real_htop : HEAP_TOP(p);
+
if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) {
return 1;
}
@@ -3374,12 +3432,12 @@ erts_check_off_heap2(Process *p, Eterm *htop)
refc = erts_refc_read(&u.pb->val->refc, 1);
break;
case FUN_SUBTAG:
- refc = erts_refc_read(&u.fun->fe->refc, 1);
+ refc = erts_smp_refc_read(&u.fun->fe->refc, 1);
break;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
- refc = erts_refc_read(&u.ext->node->refc, 1);
+ refc = erts_smp_refc_read(&u.ext->node->refc, 1);
break;
default:
ASSERT(!"erts_check_off_heap2: Invalid thing_word");
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 54ea9ca3c0..2521379664 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -27,10 +27,6 @@
#include "erl_map.h"
-#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
-# define HARDDEBUG 1
-#endif
-
#define IS_MOVED_BOXED(x) (!is_header((x)))
#define IS_MOVED_CONS(x) (is_non_value((x)))
@@ -157,5 +153,6 @@ void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_free_heap_frags(struct process* p);
Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *);
int erts_max_heap_size(Eterm, Uint *, Uint *);
+void erts_deallocate_young_generation(Process *c_p);
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index d29d079fc5..647fa26811 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -2666,7 +2666,7 @@ erts_start_timer_callback(ErtsMonotonicTime tmo,
tmo);
twt = tmo < ERTS_TIMER_WHEEL_MSEC;
- if (esdp)
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
start_callback_timer(esdp,
twt,
timeout_pos,
@@ -2865,7 +2865,8 @@ btm_print(ErtsHLTimer *tmr, void *vbtmp)
if (tmr->timeout <= btmp->now)
left = 0;
- left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
receiver = ((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
? tmr->receiver.name
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 2fd97208cc..f65a06c85f 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -117,6 +117,11 @@ const int etp_big_endian = 1;
const int etp_big_endian = 0;
#endif
const Eterm etp_the_non_value = THE_NON_VALUE;
+#ifdef ERTS_HOLE_MARKER
+const Eterm etp_hole_marker = ERTS_HOLE_MARKER;
+#else
+const Eterm etp_hole_marker = 0;
+#endif
/*
* Note about VxWorks: All variables must be initialized by executable code,
@@ -2220,7 +2225,6 @@ erl_start(int argc, char **argv)
init_break_handler();
if (replace_intr)
erts_replace_intr();
- sys_init_suspend_handler();
#endif
boot_argc = argc - i; /* Number of arguments to init */
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 06266363b5..5d4823c077 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -129,6 +129,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "run_queue", "address" },
#ifdef ERTS_DIRTY_SCHEDULERS
{ "dirty_run_queue_sleep_list", "address" },
+ { "dirty_gc_info", NULL },
+ { "dirty_break_point_index", NULL },
#endif
{ "process_table", NULL },
{ "cpu_info", NULL },
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index e4c696ae3b..019e079e67 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -172,7 +172,7 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
}
break;
case FUN_SUBTAG:
- if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) {
+ if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) {
erts_erase_fun_entry(u.fun->fe);
}
break;
@@ -193,7 +193,7 @@ free_message_buffer(ErlHeapFragment* bp)
erts_cleanup_offheap(&bp->off_heap);
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size));
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
bp = next_bp;
}while (bp != NULL);
}
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index 910598690d..c207dea10f 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -104,7 +104,7 @@ do { \
(*((Hp)++)) = boxed_val((From))[i__]; \
if (is_external((To))) { \
external_thing_ptr((To))->next = NULL; \
- erts_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\
+ erts_smp_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\
} \
} \
} while (0)
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index dd4b88e4a3..4fa15a8dd8 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -3561,8 +3561,8 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
#endif
if (p) {
/* This is almost a normal nif call like in beam_emu,
- except that any heap fragment created in the nif will be
- discarded without checking if anything in it is live.
+ except that any heap consumed by the nif will be
+ released without checking if anything in it is live.
This is because we cannot do a GC here as we don't know
the number of live registers that have to be preserved.
This means that any heap part of the returned term may
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 70500ed6e1..467a05f950 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -98,7 +98,7 @@ dist_table_alloc(void *dep_tmpl)
dist_entries++;
dep->prev = NULL;
- erts_refc_init(&dep->refc, -1);
+ erts_smp_refc_init(&dep->refc, -1);
erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
dep->sysname = sysname;
dep->cid = NIL;
@@ -208,7 +208,7 @@ erts_channel_no_to_dist_entry(Uint cno)
* to the node name is used as channel no.
*/
if(cno == ERST_INTERNAL_CHANNEL_NO) {
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ erts_smp_refc_inc(&erts_this_dist_entry->refc, 2);
return erts_this_dist_entry;
}
@@ -231,16 +231,16 @@ erts_sysname_to_connected_dist_entry(Eterm sysname)
de.sysname = sysname;
if(erts_this_dist_entry->sysname == sysname) {
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ erts_smp_refc_inc(&erts_this_dist_entry->refc, 2);
return erts_this_dist_entry;
}
erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
if (res_dep) {
- erts_aint_t refc = erts_refc_inctest(&res_dep->refc, 1);
+ erts_aint_t refc = erts_smp_refc_inctest(&res_dep->refc, 1);
if (refc < 2) /* Pending delete */
- erts_refc_inc(&res_dep->refc, 1);
+ erts_smp_refc_inc(&res_dep->refc, 1);
}
erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
if (res_dep) {
@@ -267,9 +267,9 @@ DistEntry *erts_find_or_insert_dist_entry(Eterm sysname)
de.sysname = sysname;
erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
res = hash_put(&erts_dist_table, (void *) &de);
- refc = erts_refc_inctest(&res->refc, 0);
+ refc = erts_smp_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&res->refc, 1);
+ erts_smp_refc_inc(&res->refc, 1);
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
return res;
}
@@ -282,9 +282,9 @@ DistEntry *erts_find_dist_entry(Eterm sysname)
erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
res = hash_get(&erts_dist_table, (void *) &de);
if (res) {
- erts_aint_t refc = erts_refc_inctest(&res->refc, 1);
+ erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 1);
if (refc < 2) /* Pending delete */
- erts_refc_inc(&res->refc, 1);
+ erts_smp_refc_inc(&res->refc, 1);
}
erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
return res;
@@ -311,7 +311,7 @@ static void try_delete_dist_entry(void *vdep)
*
* If refc > 0, the entry is in use. Keep the entry.
*/
- refc = erts_refc_dectest(&dep->refc, -1);
+ refc = erts_smp_refc_dectest(&dep->refc, -1);
if (refc == -1)
(void) hash_erase(&erts_dist_table, (void *) dep);
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
@@ -518,7 +518,7 @@ node_table_alloc(void *venp_tmpl)
node_entries++;
- erts_refc_init(&enp->refc, -1);
+ erts_smp_refc_init(&enp->refc, -1);
enp->creation = ((ErlNode *) venp_tmpl)->creation;
enp->sysname = ((ErlNode *) venp_tmpl)->sysname;
enp->dist_entry = erts_find_or_insert_dist_entry(((ErlNode *) venp_tmpl)->sysname);
@@ -585,9 +585,9 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation)
erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
res = hash_get(&erts_node_table, (void *) &ne);
if (res && res != erts_this_node) {
- erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
+ erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&res->refc, 1);
+ erts_smp_refc_inc(&res->refc, 1);
}
erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
if (res)
@@ -597,9 +597,9 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation)
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
if (res != erts_this_node) {
- erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
+ erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
- erts_refc_inc(&res->refc, 1);
+ erts_smp_refc_inc(&res->refc, 1);
}
erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
return res;
@@ -626,7 +626,7 @@ static void try_delete_node(void *venp)
*
* If refc > 0, the entry is in use. Keep the entry.
*/
- refc = erts_refc_dectest(&enp->refc, -1);
+ refc = erts_smp_refc_dectest(&enp->refc, -1);
if (refc == -1)
(void) hash_erase(&erts_node_table, (void *) enp);
erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
@@ -672,7 +672,7 @@ static void print_node(void *venp, void *vpndp)
erts_print(pndp->to, pndp->to_arg, " %d", enp->creation);
#ifdef DEBUG
erts_print(pndp->to, pndp->to_arg, " (refc=%ld)",
- erts_refc_read(&enp->refc, 0));
+ erts_smp_refc_read(&enp->refc, 0));
#endif
pndp->no_sysname++;
}
@@ -715,19 +715,19 @@ void
erts_set_this_node(Eterm sysname, Uint creation)
{
ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking());
- ASSERT(erts_refc_read(&erts_this_dist_entry->refc, 2));
+ ASSERT(erts_smp_refc_read(&erts_this_dist_entry->refc, 2));
- if (erts_refc_dectest(&erts_this_node->refc, 0) == 0)
+ if (erts_smp_refc_dectest(&erts_this_node->refc, 0) == 0)
try_delete_node(erts_this_node);
- if (erts_refc_dectest(&erts_this_dist_entry->refc, 0) == 0)
+ if (erts_smp_refc_dectest(&erts_this_dist_entry->refc, 0) == 0)
try_delete_dist_entry(erts_this_dist_entry);
erts_this_node = NULL; /* to make sure refc is bumped for this node */
erts_this_node = erts_find_or_insert_node(sysname, creation);
erts_this_dist_entry = erts_this_node->dist_entry;
- erts_refc_inc(&erts_this_dist_entry->refc, 2);
+ erts_smp_refc_inc(&erts_this_dist_entry->refc, 2);
erts_this_node_sysname = erts_this_node_sysname_BUFFER;
erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER),
@@ -789,13 +789,13 @@ void erts_init_node_tables(int dd_sec)
node_tmpl.creation = 0;
erts_this_node = hash_put(&erts_node_table, &node_tmpl);
/* +1 for erts_this_node */
- erts_refc_init(&erts_this_node->refc, 1);
+ erts_smp_refc_init(&erts_this_node->refc, 1);
ASSERT(erts_this_node->dist_entry != NULL);
erts_this_dist_entry = erts_this_node->dist_entry;
/* +1 for erts_this_dist_entry */
/* +1 for erts_this_node->dist_entry */
- erts_refc_init(&erts_this_dist_entry->refc, 2);
+ erts_smp_refc_init(&erts_this_dist_entry->refc, 2);
erts_this_node_sysname = erts_this_node_sysname_BUFFER;
@@ -1623,7 +1623,7 @@ reference_table_term(Uint **hpp, Uint *szp)
tup = MK_2TUP(referred_nodes[i].node->sysname,
MK_UINT(referred_nodes[i].node->creation));
- tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 0)), nril);
+ tup = MK_3TUP(tup, MK_UINT(erts_smp_refc_read(&referred_nodes[i].node->refc, 0)), nril);
nl = MK_CONS(tup, nl);
}
@@ -1684,7 +1684,7 @@ reference_table_term(Uint **hpp, Uint *szp)
/* DistList = [{Dist, Refc, ReferenceIdList}] */
tup = MK_3TUP(referred_dists[i].dist->sysname,
- MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 0)),
+ MK_UINT(erts_smp_refc_read(&referred_dists[i].dist->refc, 0)),
dril);
dl = MK_CONS(tup, dl);
}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index 47a6724c21..35051173d0 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -107,7 +107,7 @@ typedef struct dist_entry_ {
HashBucket hash_bucket; /* Hash bucket */
struct dist_entry_ *next; /* Next entry in dist_table (not sorted) */
struct dist_entry_ *prev; /* Previous entry in dist_table (not sorted) */
- erts_refc_t refc; /* Reference count */
+ erts_smp_refc_t refc; /* Reference count */
erts_smp_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */
Eterm sysname; /* name@host atom for efficiency */
@@ -149,7 +149,7 @@ typedef struct dist_entry_ {
typedef struct erl_node_ {
HashBucket hash_bucket; /* Hash bucket */
- erts_refc_t refc; /* Reference count */
+ erts_smp_refc_t refc; /* Reference count */
Eterm sysname; /* name@host atom for efficiency */
Uint32 creation; /* Creation */
DistEntry *dist_entry; /* Corresponding dist entry */
@@ -210,7 +210,7 @@ ERTS_GLB_INLINE void
erts_deref_dist_entry(DistEntry *dep)
{
ASSERT(dep);
- if (erts_refc_dectest(&dep->refc, 0) == 0)
+ if (erts_smp_refc_dectest(&dep->refc, 0) == 0)
erts_schedule_delete_dist_entry(dep);
}
@@ -218,7 +218,7 @@ ERTS_GLB_INLINE void
erts_deref_node_entry(ErlNode *np)
{
ASSERT(np);
- if (erts_refc_dectest(&np->refc, 0) == 0)
+ if (erts_smp_refc_dectest(&np->refc, 0) == 0)
erts_schedule_delete_node(np);
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index b345c35a7e..485949c84a 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -2060,6 +2060,7 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
@@ -6234,6 +6235,12 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
int queue;
erts_aint32_t dact, max_qbit;
+ /* Do not enqueue free process... */
+ if (actual & ERTS_PSFLG_FREE) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NOT;
+ }
+
/* Termination should be done on an ordinary scheduler */
if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
@@ -6426,6 +6433,9 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
/*
* schedule_out_process() return with c_rq locked.
+ *
+ * Return non-zero value if caller should decrease
+ * reference count on the process when done with it...
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
@@ -6480,12 +6490,18 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 0;
-
+#if !defined(ERTS_SMP)
+ /* Decrement refc if process struct is free... */
+ return !!(n & ERTS_PSFLG_FREE);
+#else
+ /* Decrement refc if scheduled out from dirty scheduler... */
+ return !is_normal_sched;
+#endif
}
else {
Process* sched_p;
+ ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6501,11 +6517,14 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(runq);
+ if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */
+
/* Enqueue the process */
enqueue_process(runq, (int) enq_prio, sched_p);
if (runq == c_rq)
- return 1;
+ return 0;
erts_smp_runq_unlock(runq);
@@ -6513,9 +6532,15 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 1;
+ /*
+ * Decrement refc if process is scheduled out by a
+ * dirty scheduler, and we have not just scheduled
+ * the process using the ordinary process struct
+ * on a dirty run-queue again...
+ */
+ return !is_normal_sched && (sched_p != p
+ || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix));
}
-
}
static ERTS_INLINE void
@@ -6530,8 +6555,17 @@ add2runq(int enqueue, erts_aint32_t prio,
if (runq) {
Process *sched_p;
- if (enqueue > 0)
+ if (enqueue > 0) {
sched_p = proc;
+ /*
+ * Refc on process struct (i.e. true struct,
+ * not proxy-struct) increased while in a
+ * dirty run-queue or executing on a dirty
+ * scheduler.
+ */
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(proc);
+ }
else {
Process *pxy;
@@ -7055,12 +7089,18 @@ typedef struct {
} ErtsSchdlrSspndResume;
static void
-schdlr_sspnd_resume_proc(Eterm pid)
+schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid)
{
- Process *p = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_STATUS);
+ Process *p;
+ p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS,
+ (sched_type != ERTS_SCHED_NORMAL
+ ? ERTS_P2P_FLG_INC_REFC
+ : 0));
if (p) {
resume_process(p, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ erts_proc_dec_refc(p);
}
}
@@ -7069,17 +7109,20 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
ErtsSchdlrSspndResume *resume)
{
if (is_internal_pid(resume->onln.chngr)) {
- schdlr_sspnd_resume_proc(resume->onln.chngr);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.chngr);
resume->onln.chngr = NIL;
}
if (is_internal_pid(resume->onln.nxt)) {
- schdlr_sspnd_resume_proc(resume->onln.nxt);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.nxt);
resume->onln.nxt = NIL;
}
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(plp->pid);
+ schdlr_sspnd_resume_proc(sched_type,
+ plp->pid);
proclist_destroy(plp);
}
}
@@ -7131,16 +7174,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- }
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (sched_type != ERTS_SCHED_NORMAL)
erts_smp_runq_unlock(esdp->run_queue);
- }
else {
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7153,8 +7188,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
sched_wall_time_change(esdp, 0);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
@@ -7198,15 +7233,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
(void) erts_proclist_fetch(&msb[i]->chngq, &end_plp);
/* resume processes that initiated the multi scheduling block... */
plp = msb[i]->chngq;
- while (plp) {
- erts_proclist_store_last(&msb[i]->blckrs,
- proclist_copy(plp));
- plp = plp->next;
- }
- if (end_plp)
+ if (plp) {
+ ASSERT(end_plp);
+ ASSERT(msb[i]->ongoing);
+ do {
+ erts_proclist_store_last(&msb[i]->blckrs,
+ proclist_copy(plp));
+ plp = plp->next;
+ } while (plp);
end_plp->next = resume.msb.chngrs;
- resume.msb.chngrs = msb[i]->chngq;
- msb[i]->chngq = NULL;
+ resume.msb.chngrs = msb[i]->chngq;
+ msb[i]->chngq = NULL;
+ }
}
}
}
@@ -7270,24 +7308,14 @@ suspend_scheduler(ErtsSchedulerData *esdp)
while (1) {
ErtsMonotonicTime current_time;
- erts_aint32_t qmask;
erts_aint32_t flgs;
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
-
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (qmask) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
+ if (sched_type != ERTS_SCHED_NORMAL)
aux_work = 0;
- }
else {
+ erts_aint32_t qmask;
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
@@ -7415,12 +7443,23 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && schdlr_sspnd.online == schdlr_sspnd.active) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
-
+ if (changing) {
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && !schdlr_sspnd.msb.ongoing
+ && schdlr_sspnd.online == schdlr_sspnd.active) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ && !schdlr_sspnd.nmsb.ongoing
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL))) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_NMSB);
+ }
+ }
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
ASSERT((sched_type == ERTS_SCHED_NORMAL
? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
@@ -7553,7 +7592,7 @@ abort_sched_onln_chng_waitq(Process *p)
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
if (is_internal_pid(resume))
- schdlr_sspnd_resume_proc(resume);
+ schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume);
}
ErtsSchedSuspendResult
@@ -7951,21 +7990,20 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
else { /* ------ UNBLOCK ------ */
if (p->flags & have_blckd_flg) {
- ErtsProcList *plps[2];
+ ErtsProcList **plpps[3] = {0};
ErtsProcList *plp;
- int limit = 0;
- plps[limit++] = erts_proclist_peek_first(msbp->blckrs);
- if (all)
- plps[limit++] = erts_proclist_peek_first(msbp->chngq);
+ plpps[0] = &msbp->blckrs;
+ if (all)
+ plpps[1] = &msbp->chngq;
- for (ix = 0; ix < limit; ix++) {
- plp = plps[ix];
+ for (ix = 0; plpps[ix]; ix++) {
+ plp = erts_proclist_peek_first(*plpps[ix]);
while (plp) {
ErtsProcList *tmp_plp = plp;
- plp = erts_proclist_peek_next(msbp->blckrs, plp);
+ plp = erts_proclist_peek_next(*plpps[ix], plp);
if (erts_proclist_same(tmp_plp, p)) {
- erts_proclist_remove(&msbp->blckrs, tmp_plp);
+ erts_proclist_remove(plpps[ix], tmp_plp);
proclist_destroy(tmp_plp);
if (!all)
break;
@@ -8604,8 +8642,15 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
* from being selected for normal execution regardless
* of locks held or not held on it...
*/
- ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)
- & erts_smp_atomic32_read_nob(&rp->state)));
+#ifdef DEBUG
+ {
+ erts_aint32_t state;
+ state = erts_smp_atomic32_read_nob(&rp->state);
+ ASSERT((state & ERTS_PSFLG_PENDING_EXIT)
+ || !(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ }
+#endif
if (!suspend)
resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
@@ -9587,40 +9632,46 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
esdp->reductions += reds;
- /* schedule_out_process() returns with rq locked! */
- schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
- proxy_p = NULL;
+ {
+ int dec_refc;
+
+ /* schedule_out_process() returns with rq locked! */
+ dec_refc = schedule_out_process(rq, state, p,
+ proxy_p, is_normal_sched);
+ proxy_p = NULL;
- ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
- (int) ERTS_PSFLGS_GET_USR_PRIO(state),
- reds,
- actual_reds);
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
- esdp->current_process = NULL;
+ esdp->current_process = NULL;
#ifdef ERTS_SMP
- p->scheduler_data = NULL;
+ if (is_normal_sched)
+ p->scheduler_data = NULL;
#endif
- erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_STATUS
- | ERTS_PROC_LOCK_TRACE));
+ erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS
+ | ERTS_PROC_LOCK_TRACE));
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
- if (state & ERTS_PSFLG_FREE) {
- if (!is_normal_sched) {
- ASSERT(p->flags & F_DELAYED_DEL_PROC);
- erts_proc_dec_refc(p);
- }
- else {
#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
-#else
- erts_proc_dec_refc(p);
+ if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ }
+ else {
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ }
+ }
#endif
- }
- }
+
+ if (dec_refc)
+ erts_proc_dec_refc(p);
+ }
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
@@ -9896,8 +9947,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
else {
+ Eterm pid = p->common.id;
proxy_p = p;
- p = erts_proc_lookup_raw(proxy_p->common.id);
+ p = (is_normal_sched
+ ? erts_proc_lookup_raw(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0,
+ ERTS_P2P_FLG_INC_REFC));
if (!p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9929,6 +9984,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_FREE)))
#ifdef ERTS_DIRTY_SCHEDULERS
| (((state & (ERTS_PSFLG_RUNNING
+
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_EXITING))
@@ -9961,6 +10017,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* free and not queued by proxy */
erts_proc_dec_refc(p);
}
+ if (!is_normal_sched)
+ erts_proc_dec_refc(p);
goto pick_next_process;
}
state = new;
@@ -10013,8 +10071,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
state = erts_smp_atomic32_read_nob(&p->state);
- ASSERT(!p->scheduler_data);
#ifndef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!p->scheduler_data);
p->scheduler_data = esdp;
#else /* ERTS_DIRTY_SCHEDULERS */
if (is_normal_sched) {
@@ -10025,6 +10083,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
goto sched_out_proc;
}
+ ASSERT(!p->scheduler_data);
p->scheduler_data = esdp;
}
else {
@@ -11912,7 +11971,6 @@ erts_cleanup_empty_process(Process* p)
static void
delete_process(Process* p)
{
- Eterm *heap;
ErtsPSD *psd;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
@@ -11967,13 +12025,8 @@ delete_process(Process* p)
hipe_delete_process(&p->hipe);
#endif
- heap = p->abandoned_heap ? p->abandoned_heap : p->heap;
-
-#ifdef DEBUG
- sys_memset(heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
-#endif
+ erts_deallocate_young_generation(p);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) heap, p->heap_sz*sizeof(Eterm));
if (p->old_heap != NULL) {
#ifdef DEBUG
@@ -11985,16 +12038,6 @@ delete_process(Process* p)
(p->old_hend-p->old_heap)*sizeof(Eterm));
}
- /*
- * Free all pending message buffers.
- */
- if (p->mbuf != NULL) {
- free_message_buffer(p->mbuf);
- }
-
- if (p->msg_frag)
- erts_cleanup_messages(p->msg_frag);
-
erts_erase_dicts(p);
/* free all pending messages */
@@ -13091,15 +13134,14 @@ erts_continue_exit_process(Process *p)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
p->flags |= F_DELAYED_DEL_PROC;
delay_del_proc = 1;
/*
- * The dirty scheduler will also decrease
- * refc when done...
+ * The dirty scheduler decrease refc
+ * when done with the process...
*/
- erts_proc_inc_refc(p);
}
#endif
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 9f7084c127..68fbb10602 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1684,7 +1684,7 @@ ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
return NULL;
else {
ErtsProcList *res = *list;
- if (res == *list)
+ if (res->next == *list)
*list = NULL;
else
*list = res->next;
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index d8bb00e8c6..4a74cc2b82 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -90,9 +90,12 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) {
erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size);
erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size);
size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
+ if (p->abandoned_heap)
+ size += (p->hend - p->heap) * sizeof(Eterm);
if (p->old_hend && p->old_heap)
size += (p->old_hend - p->old_heap) * sizeof(Eterm);
+
size += p->msg.len * sizeof(ErtsMessage);
for (mp = p->msg.first; mp; mp = mp->next)
@@ -443,7 +446,7 @@ heap_dump(fmtfn_t to, void *to_arg, Eterm x)
ProcBin* pb = (ProcBin *) binary_val(x);
Binary* val = pb->val;
- if (erts_smp_atomic_xchg_nob(&val->refc, 0) != 0) {
+ if (erts_atomic_xchg_nob(&val->refc, 0) != 0) {
val->flags = (UWord) all_binaries;
all_binaries = val;
}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 8c84303997..ac9e91e31f 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1436,6 +1436,7 @@ void
trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
{
ErtsTracerNif *tnif = NULL;
+ Eterm* o_hp = NULL;
Eterm* hp;
Uint sz = 0;
Eterm tup;
@@ -1446,7 +1447,7 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
if (is_non_value(msg)) {
(void) erts_process_gc_info(p, &sz, NULL, 0, 0);
- hp = HAlloc(p, sz + 3 + 2);
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, (sz + 3 + 2) * sizeof(Eterm));
msg = erts_process_gc_info(p, NULL, &hp, 0, 0);
tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3;
@@ -1455,6 +1456,8 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_GC,
what, msg, THE_NON_VALUE, am_true);
+ if (o_hp)
+ erts_free(ERTS_ALC_T_TMP, o_hp);
}
}
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index f97716d030..899e2a275a 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -110,8 +110,21 @@
#define HeapWordsLeft(p) (HEAP_LIMIT(p) - HEAP_TOP(p))
#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
-# define ERTS_HOLE_MARKER (((0xdeadbeef << 24) << 8) | 0xdeadbeef)
-#endif /* egil: 32-bit ? */
+
+/*
+ * ERTS_HOLE_MARKER must *not* be mistaken for a valid term
+ * on the heap...
+ */
+# ifdef ARCH_64
+# define ERTS_HOLE_MARKER \
+ make_catch(UWORD_CONSTANT(0xdeadbeaf00000000) >> _TAG_IMMED2_SIZE)
+/* Will (at the time of writing) appear as 0xdeadbeaf0000001b */
+# else
+# define ERTS_HOLE_MARKER \
+ make_catch(UWORD_CONSTANT(0xdead0000) >> _TAG_IMMED2_SIZE)
+/* Will (at the time of writing) appear as 0xdead001b */
+# endif
+#endif
/*
* Allocate heap memory on the ordinary heap, NEVER in a heap
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 7f043e191b..00fa452f79 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -348,7 +348,7 @@ Export *export_list(int i, ErtsCodeIndex code_ix)
int export_list_size(ErtsCodeIndex code_ix)
{
- return export_tables[code_ix].entries;
+ return erts_index_num_entries(&export_tables[code_ix]);
}
int export_table_sz(void)
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index beed847578..0ef8114bac 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -616,7 +616,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize)
sys_memcpy((void *) ep, (void *) edep, dist_ext_sz);
ep += dist_ext_sz;
if (new_edep->dep)
- erts_refc_inc(&new_edep->dep->refc, 1);
+ erts_smp_refc_inc(&new_edep->dep->refc, 1);
new_edep->extp = ep;
new_edep->ext_endp = ep + ext_sz;
new_edep->heap_size = -1;
@@ -1222,6 +1222,7 @@ typedef struct B2TContext_t {
} u;
} B2TContext;
+static B2TContext* b2t_export_context(Process*, B2TContext* src);
static uLongf binary2term_uncomp_size(byte* data, Sint size)
{
@@ -1254,7 +1255,7 @@ static uLongf binary2term_uncomp_size(byte* data, Sint size)
static ERTS_INLINE int
binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
- B2TContext* ctx)
+ B2TContext** ctxp, Process* p)
{
byte *bytes = data;
Sint size = data_size;
@@ -1268,8 +1269,8 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
size--;
if (size < 5 || *bytes != COMPRESSED) {
state->extp = bytes;
- if (ctx)
- ctx->state = B2TSizeInit;
+ if (ctxp)
+ (*ctxp)->state = B2TSizeInit;
}
else {
uLongf dest_len = (Uint32) get_int32(bytes+1);
@@ -1286,16 +1287,26 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
return -1;
}
state->extp = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA, dest_len);
- ctx->reds -= dest_len;
+ if (ctxp)
+ (*ctxp)->reds -= dest_len;
}
state->exttmp = 1;
- if (ctx) {
+ if (ctxp) {
+ /*
+ * Start decompression by exporting trap context
+ * so we don't have to deal with deep-copying z_stream.
+ */
+ B2TContext* ctx = b2t_export_context(p, *ctxp);
+ ASSERT(state = &(*ctxp)->b2ts);
+ state = &ctx->b2ts;
+
if (erl_zlib_inflate_start(&ctx->u.uc.stream, bytes, size) != Z_OK)
return -1;
ctx->u.uc.dbytes = state->extp;
ctx->u.uc.dleft = dest_len;
ctx->state = B2TUncompressChunk;
+ *ctxp = ctx;
}
else {
uLongf dlen = dest_len;
@@ -1339,7 +1350,7 @@ erts_binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size
{
Sint res;
- if (binary2term_prepare(state, data, data_size, NULL) < 0 ||
+ if (binary2term_prepare(state, data, data_size, NULL, NULL) < 0 ||
(res=decoded_size(state->extp, state->extp + state->extsize, 0, NULL)) < 0) {
if (state->exttmp)
@@ -1485,7 +1496,7 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
if (ctx->aligned_alloc) {
ctx->reds -= bin_size / 8;
}
- if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, ctx) < 0) {
+ if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, &ctx, p) < 0) {
ctx->state = B2TBadArg;
}
break;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index d6df85034c..26aa39b65a 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -127,7 +127,7 @@ typedef struct {
void *handle; /* Handle for DLL or SO (for dyn. drivers). */
DE_ProcEntry *procs; /* List of pids that have loaded this driver,
or that wait for it to change state */
- erts_refc_t refc; /* Number of ports/processes having
+ erts_smp_refc_t refc; /* Number of ports/processes having
references to the driver */
erts_smp_atomic32_t port_count; /* Number of ports using the driver */
Uint flags; /* ERL_DE_FL_KILL_PORTS */
diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c
index c86a2122f6..cd834e2c12 100644
--- a/erts/emulator/beam/index.c
+++ b/erts/emulator/beam/index.c
@@ -91,9 +91,16 @@ index_put_entry(IndexTable* t, void* tmpl)
t->seg_table[ix>>INDEX_PAGE_SHIFT] = erts_alloc(t->type, sz);
t->size += INDEX_PAGE_SIZE;
}
- t->entries++;
p->index = ix;
t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p;
+
+ /*
+ * Do a write barrier here to allow readers to do lock free iteration.
+ * erts_index_num_entries() does matching read barrier.
+ */
+ ERTS_SMP_WRITE_MEMORY_BARRIER;
+ t->entries++;
+
return p;
}
diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h
index b2e3c0eab5..10f5d1eb39 100644
--- a/erts/emulator/beam/index.h
+++ b/erts/emulator/beam/index.h
@@ -65,6 +65,7 @@ void index_erase_latest_from(IndexTable*, Uint ix);
ERTS_GLB_INLINE int index_put(IndexTable*, void*);
ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable*, Uint);
+ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -78,6 +79,19 @@ erts_index_lookup(IndexTable* t, Uint ix)
{
return t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK];
}
+
+ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t)
+{
+ int ret = t->entries;
+ /*
+ * Do a read barrier here to allow lock free iteration
+ * on tables where entries are never erased.
+ * index_put_entry() does matching write barrier.
+ */
+ ERTS_SMP_READ_MEMORY_BARRIER;
+ return ret;
+}
+
#endif
#endif
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 4f131c74de..31396d06c6 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -6890,12 +6890,6 @@ ErlDrvSizeT driver_vec_to_buf(ErlIOVec *vec, char *buf, ErlDrvSizeT len)
return (orig_len - len);
}
-
-/*
- * - driver_alloc_binary() is thread safe (efile driver depend on it).
- * - driver_realloc_binary(), and driver_free_binary() are *not* thread safe.
- */
-
/*
* reference count on driver binaries...
*/
@@ -6938,26 +6932,15 @@ driver_alloc_binary(ErlDrvSizeT size)
return Binary2ErlDrvBinary(bin);
}
-/* Reallocate space hold by binary */
+/* Reallocate space held by binary */
ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
{
Binary* oldbin;
Binary* newbin;
- if (!bin) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Bad use of driver_realloc_binary(%p, %lu): "
- "called with ",
- bin, (unsigned long)size);
- if (!bin) {
- erts_dsprintf(dsbufp, "NULL pointer as first argument");
- }
- erts_send_warning_to_logger_nogl(dsbufp);
- if (!bin)
- return driver_alloc_binary(size);
- }
+ if (!bin)
+ return driver_alloc_binary(size);
oldbin = ErlDrvBinary2Binary(bin);
newbin = (Binary *) erts_bin_realloc_fnf(oldbin, size);
@@ -6971,14 +6954,8 @@ ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
void driver_free_binary(ErlDrvBinary* dbin)
{
Binary *bin;
- if (!dbin) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Bad use of driver_free_binary(%p): called with "
- "NULL pointer as argument", dbin);
- erts_send_warning_to_logger_nogl(dsbufp);
+ if (!dbin)
return;
- }
bin = ErlDrvBinary2Binary(dbin);
if (erts_refc_dectest(&bin->refc, 0) == 0)
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 7740dd4373..79ec34e717 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -859,9 +859,12 @@ int erts_sys_unsetenv(char *key);
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
+#if defined(ERTS_SMP)
#if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX)
extern void sys_thr_resume(erts_tid_t tid);
extern void sys_thr_suspend(erts_tid_t tid);
+#define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2
+#endif
#endif
/* utils.c */
@@ -894,7 +897,7 @@ void sys_alloc_stat(SysAllocStat *);
#define ERTS_REFC_DEBUG
#endif
-typedef erts_smp_atomic_t erts_refc_t;
+typedef erts_atomic_t erts_refc_t;
ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, erts_aint_t val);
ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val);
@@ -913,27 +916,27 @@ ERTS_GLB_INLINE erts_aint_t erts_refc_read(erts_refc_t *refcp,
ERTS_GLB_INLINE void
erts_refc_init(erts_refc_t *refcp, erts_aint_t val)
{
- erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val);
+ erts_atomic_init_nob((erts_atomic_t *) refcp, val);
}
ERTS_GLB_INLINE void
erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_inc_read_nob((erts_atomic_t *) refcp);
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
"erts_refc_inc(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#else
- erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp);
+ erts_atomic_inc_nob((erts_atomic_t *) refcp);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_inc_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
@@ -947,20 +950,20 @@ ERTS_GLB_INLINE void
erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_dec_read_nob((erts_atomic_t *) refcp);
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
"erts_refc_dec(): Bad refc found (refc=%ld < %ld)!\n",
val, min_val);
#else
- erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp);
+ erts_atomic_dec_nob((erts_atomic_t *) refcp);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_dectest(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_dec_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
@@ -974,20 +977,20 @@ ERTS_GLB_INLINE void
erts_refc_add(erts_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff);
+ erts_aint_t val = erts_atomic_add_read_nob((erts_atomic_t *) refcp, diff);
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
"erts_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n",
diff, val, min_val);
#else
- erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff);
+ erts_atomic_add_nob((erts_atomic_t *) refcp, diff);
#endif
}
ERTS_GLB_INLINE erts_aint_t
erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
{
- erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_atomic_read_nob((erts_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erts_exit(ERTS_ABORT_EXIT,
@@ -999,6 +1002,112 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+typedef erts_smp_atomic_t erts_smp_refc_t;
+
+ERTS_GLB_INLINE void erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val);
+ERTS_GLB_INLINE void erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inctest(erts_smp_refc_t *refcp,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE void erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_smp_refc_dectest(erts_smp_refc_t *refcp,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE void erts_smp_refc_add(erts_smp_refc_t *refcp, erts_aint_t diff,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_smp_refc_read(erts_smp_refc_t *refcp,
+ erts_aint_t min_val);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val)
+{
+ erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val)
+{
+#ifdef ERTS_REFC_DEBUG
+ erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
+ if (val < min_val)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_smp_refc_inc(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#else
+ erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_refc_inctest(erts_smp_refc_t *refcp, erts_aint_t min_val)
+{
+ erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp);
+#ifdef ERTS_REFC_DEBUG
+ if (val < min_val)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_smp_refc_inctest(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ return val;
+}
+
+ERTS_GLB_INLINE void
+erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val)
+{
+#ifdef ERTS_REFC_DEBUG
+ erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
+ if (val < min_val)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_smp_refc_dec(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#else
+ erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_refc_dectest(erts_smp_refc_t *refcp, erts_aint_t min_val)
+{
+ erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp);
+#ifdef ERTS_REFC_DEBUG
+ if (val < min_val)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_smp_refc_dectest(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ return val;
+}
+
+ERTS_GLB_INLINE void
+erts_smp_refc_add(erts_smp_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val)
+{
+#ifdef ERTS_REFC_DEBUG
+ erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff);
+ if (val < min_val)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_smp_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n",
+ diff, val, min_val);
+#else
+ erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_refc_read(erts_smp_refc_t *refcp, erts_aint_t min_val)
+{
+ erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp);
+#ifdef ERTS_REFC_DEBUG
+ if (val < min_val)
+ erts_exit(ERTS_ABORT_EXIT,
+ "erts_smp_refc_read(): Bad refc found (refc=%ld < %ld)!\n",
+ val, min_val);
+#endif
+ return val;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
#ifdef ERTS_ENABLE_KERNEL_POLL
extern int erts_use_kernel_poll;
#endif
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 87ea4f05a1..7f7e38c22a 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -3842,7 +3842,7 @@ store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
for(i = 0; i < size; i++)
to_hp[i] = from_hp[i];
- erts_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
+ erts_smp_refc_inc(&((ExternalThing *) to_hp)->node->refc, 2);
((struct erl_off_heap_header*) to_hp)->next = oh->first;
oh->first = (struct erl_off_heap_header*) to_hp;