diff options
Diffstat (limited to 'erts/emulator/beam')
41 files changed, 2483 insertions, 1109 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 3022c0a99a..8f65e71531 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -275,6 +275,7 @@ atom garbage_collection_info atom gc_end atom gc_major_end atom gc_major_start +atom gc_max_heap_size atom gc_minor_end atom gc_minor_start atom gc_start @@ -366,6 +367,7 @@ atom match_spec atom match_spec_result atom max atom maximum +atom max_heap_size atom max_tables max_processes atom mbuf_size atom md5 diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 2ee98ed7b5..8489897d3a 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -82,7 +82,7 @@ erts_smp_atomic32_t erts_staging_bp_index; static ERTS_INLINE ErtsMonotonicTime get_mtime(Process *c_p) { - return erts_get_monotonic_time(ERTS_PROC_GET_SCHDATA(c_p)); + return erts_get_monotonic_time(erts_proc_sched_data(c_p)); } /* ************************************************************************* @@ -248,7 +248,10 @@ erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified) void erts_bp_free_matched_functions(BpFunctions* f) { - Free(f->matching); + if (f->matching) { + Free(f->matching); + } + else ASSERT(f->matched == 0); } void @@ -652,8 +655,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg) erts_smp_atomic_inc_nob(&bp->count->acount); } - if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE - && ERTS_TRACER_PROC_IS_ENABLED(c_p)) { + if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) { Eterm w; erts_trace_time_call(c_p, I, bp->time); w = (BeamInstr) *c_p->cp; @@ -750,8 +752,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) } } if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && - IS_TRACED_FL(p, F_TRACE_CALLS) && - ERTS_TRACER_PROC_IS_ENABLED(p)) { + IS_TRACED_FL(p, F_TRACE_CALLS)) { BeamInstr *pc = (BeamInstr *)ep->code+3; erts_trace_time_call(p, pc, bp->time); } @@ -973,7 +974,8 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt) BpDataTime *pbdt = NULL; ASSERT(c_p); - ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING); + ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING)); /* get previous timestamp and breakpoint * from the process psd */ @@ -1050,7 +1052,8 @@ erts_trace_time_return(Process *p, BeamInstr *pc) BpDataTime *pbdt = NULL; ASSERT(p); - ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING); + ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING)); /* get previous timestamp and breakpoint * from the process psd */ @@ -1432,7 +1435,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags, g = (GenericBp *) pc[-4]; if (g == 0) { int i; - if (count_op == erts_break_reset || count_op == erts_break_stop) { + if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) { /* Do not insert a new breakpoint */ return; } @@ -1456,7 +1459,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags, MatchSetUnref(bp->meta_ms); bp_meta_unref(bp->meta_tracer); } else if (common & ERTS_BPF_COUNT) { - if (count_op == erts_break_stop) { + if (count_op == ERTS_BREAK_PAUSE) { bp->flags &= ~ERTS_BPF_COUNT_ACTIVE; } else { bp->flags |= ERTS_BPF_COUNT_ACTIVE; @@ -1468,7 +1471,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags, BpDataTime* bdt = bp->time; Uint i = 0; - if (count_op == erts_break_stop) { + if (count_op == ERTS_BREAK_PAUSE) { bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE; } else { bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE; diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h index 08641b86d6..541af77211 100644 --- a/erts/emulator/beam/beam_bp.h +++ b/erts/emulator/beam/beam_bp.h @@ -80,16 +80,16 @@ typedef struct generic_bp { #define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2) #ifdef ERTS_SMP -#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1) +#define bp_sched2ix_proc(p) (erts_proc_sched_data(p)->no - 1) #else #define bp_sched2ix_proc(p) (0) #endif enum erts_break_op{ - erts_break_nop = 0, /* Must be false */ - erts_break_set = !0, /* Must be true */ - erts_break_reset, - erts_break_stop + ERTS_BREAK_NOP = 0, /* Must be false */ + ERTS_BREAK_SET = !0, /* Must be true */ + ERTS_BREAK_RESTART, + ERTS_BREAK_PAUSE }; typedef Uint32 ErtsBpIndex; diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 59112e1e43..f8f2e29c95 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -64,18 +64,21 @@ # ifdef ERTS_SMP # define PROCESS_MAIN_CHK_LOCKS(P) \ do { \ - if ((P)) { \ + if ((P)) \ erts_proc_lc_chk_only_proc_main((P)); \ - } \ - else \ - erts_lc_check_exact(NULL, 0); \ - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \ + ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \ +} while (0) +# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +do { \ + if ((P)) \ + erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \ + __FILE__, __LINE__); \ +} while (0) +# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +do { \ + if ((P)) \ + erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \ } while (0) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ - if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\ - __FILE__, __LINE__) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ - if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) # else # define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) # define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) @@ -1202,12 +1205,12 @@ init_emulator(void) do { \ if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \ ASSERT(FC <= 0); \ - ASSERT(ERTS_PROC_GET_SCHDATA(c_p)->virtual_reds \ + ASSERT(erts_proc_sched_data(c_p)->virtual_reds \ <= 0 - (FC)); \ } \ else { \ ASSERT(FC <= CONTEXT_REDS); \ - ASSERT(ERTS_PROC_GET_SCHDATA(c_p)->virtual_reds \ + ASSERT(erts_proc_sched_data(c_p)->virtual_reds \ <= CONTEXT_REDS - (FC)); \ } \ } while (0) @@ -1321,8 +1324,8 @@ void process_main(void) if (start_time != 0) { Sint64 diff = erts_timestamp_millis() - start_time; if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule -#ifdef ERTS_DIRTY_SCHEDULERS - && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data) +#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS) + && !ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)) #endif ) { BeamInstr *inptr = find_function_from_pc(start_time_i); @@ -1351,8 +1354,8 @@ void process_main(void) start_time_i = c_p->i; } - reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array; - freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array; + reg = erts_proc_sched_data(c_p)->x_reg_array; + freg = erts_proc_sched_data(c_p)->f_reg_array; ERL_BITS_RELOAD_STATEP(c_p); { int reds; @@ -1704,6 +1707,14 @@ void process_main(void) BeamInstr *next; Eterm result; + if (!(FCALLS > 0 || FCALLS > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + c_p->arity = 2; + c_p->current = NULL; + goto context_switch3; + } + PRE_BIF_SWAPOUT(c_p); c_p->fcalls = FCALLS - 1; result = erl_send(c_p, r(0), x(1)); @@ -2200,7 +2211,7 @@ void process_main(void) PreFetch(0, next); if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) { - trace_receive(c_p, am_timeout); + trace_receive(c_p, am_clock_service, am_timeout, NULL); } if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { save_calls(c_p, &exp_timeout); @@ -2810,6 +2821,15 @@ do { \ BeamInstr *next; ErlHeapFragment *live_hf_end; + + if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + c_p->arity = ((Export *)Arg(0))->code[2]; + c_p->current = ((Export *)Arg(0))->code; + goto context_switch3; + } + if (ERTS_MSACC_IS_ENABLED_CACHED_X()) { if (GET_BIF_MODULE(Arg(0)) == am_ets) { ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS); @@ -3338,10 +3358,19 @@ do { \ context_switch2: /* Entry for fun calls. */ c_p->current = I-3; /* Pointer to Mod, Func, Arity */ + context_switch3: + { Eterm* argp; int i; + if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) { + c_p->i = beam_exit; + c_p->arity = 0; + c_p->current = NULL; + goto do_schedule; + } + /* * Make sure that there is enough room for the argument registers to be saved. */ @@ -3509,6 +3538,12 @@ do { \ BifFunction vbf; ErlHeapFragment *live_hf_end; + if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the nif */ + goto context_switch; + } + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF); DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]); @@ -3524,18 +3559,27 @@ do { \ typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]); NifF* fp = vbf = (NifF*) I[1]; struct enif_environment_t env; +#ifdef ERTS_DIRTY_SCHEDULERS + if (!c_p->scheduler_data) + live_hf_end = ERTS_INVALID_HFRAG_PTR; /* On dirty scheduler */ + else +#endif + live_hf_end = c_p->mbuf; erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL); - live_hf_end = c_p->mbuf; nif_bif_result = (*fp)(&env, bif_nif_arity, reg); if (env.exception_thrown) nif_bif_result = THE_NON_VALUE; erts_post_nif(&env); - } - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result)); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + if (env.exiting) { + ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + goto do_schedule; + } + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + } DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]); goto apply_bif_or_nif_epilogue; @@ -3551,6 +3595,13 @@ do { \ * code[3]: &&apply_bif * code[4]: Function pointer to BIF function */ + + if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + goto context_switch; + } + if (ERTS_MSACC_IS_ENABLED_CACHED_X()) { if ((Eterm)I[-3] == am_ets) { ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS); @@ -4887,8 +4938,8 @@ do { \ #ifdef DEBUG pid = c_p->common.id; /* may have switched process... */ #endif - reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array; - freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array; + reg = erts_proc_sched_data(c_p)->x_reg_array; + freg = erts_proc_sched_data(c_p)->f_reg_array; ERL_BITS_RELOAD_STATEP(c_p); /* XXX: this abuse of def_arg_reg[] is horrid! */ neg_o_reds = -c_p->def_arg_reg[4]; diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 9d6dfefa3c..2a3bd4afe5 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -44,6 +44,7 @@ #include "erl_ptab.h" #include "erl_bits.h" #include "erl_bif_unique.h" +#include "erl_map.h" #include "erl_msacc.h" Export *erts_await_result; @@ -611,7 +612,7 @@ erts_queue_monitor_message(Process *p, ref_copy = copy_struct(ref, ref_size, &hp, ohp); tup = TUPLE5(hp, am_DOWN, ref_copy, type, item_copy, reason_copy); - erts_queue_message(p, p_locksp, msgp, tup); + erts_queue_message(p, *p_locksp, msgp, tup, am_system); } static BIF_RETTYPE @@ -880,6 +881,8 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1) so.flags = erts_default_spo_flags|SPO_USE_ARGS; so.min_heap_size = H_MIN_SIZE; so.min_vheap_size = BIN_VH_MIN_SIZE; + so.max_heap_size = H_MAX_SIZE; + so.max_heap_flags = H_MAX_FLAGS; so.priority = PRIORITY_NORMAL; so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); so.scheduler = 0; @@ -937,6 +940,9 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1) } else { so.min_heap_size = erts_next_heap_size(min_heap_size, 0); } + } else if (arg == am_max_heap_size) { + if (!erts_max_heap_size(val, &so.max_heap_size, &so.max_heap_flags)) + goto error; } else if (arg == am_min_bin_vheap_size && is_small(val)) { Sint min_vheap_size = signed_val(val); if (min_vheap_size < 0) { @@ -970,6 +976,10 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1) goto error; } + if (so.max_heap_size != 0 && so.max_heap_size < so.min_heap_size) { + goto error; + } + /* * Spawn the process. */ @@ -1686,7 +1696,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) ERTS_PSFLG_BOUND); } - curr = ERTS_GET_SCHEDULER_DATA_FROM_PROC(BIF_P)->run_queue; + curr = erts_proc_sched_data(BIF_P)->run_queue; old = (ERTS_PSFLG_BOUND & state) ? curr : NULL; ASSERT(!old || old == curr); @@ -1731,6 +1741,23 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) } BIF_RET(old_value); } + else if (BIF_ARG_1 == am_max_heap_size) { + Eterm *hp; + Uint sz = 0, max_heap_size, max_heap_flags; + + if (!erts_max_heap_size(BIF_ARG_2, &max_heap_size, &max_heap_flags)) + goto error; + + if ((max_heap_size < MIN_HEAP_SIZE(BIF_P) && max_heap_size != 0)) + goto error; + + erts_max_heap_size_map(MAX_HEAP_SIZE_GET(BIF_P), MAX_HEAP_SIZE_FLAGS_GET(BIF_P), NULL, &sz); + hp = HAlloc(BIF_P, sz); + old_value = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(BIF_P), MAX_HEAP_SIZE_FLAGS_GET(BIF_P), &hp, NULL); + MAX_HEAP_SIZE_SET(BIF_P, max_heap_size); + MAX_HEAP_SIZE_FLAGS_SET(BIF_P, max_heap_flags); + BIF_RET(old_value); + } else if (BIF_ARG_1 == am_message_queue_data) { old_value = erts_change_message_queue_management(BIF_P, BIF_ARG_2); if (is_non_value(old_value)) @@ -4198,8 +4225,28 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) else { locks &= ~ERTS_PROC_LOCK_STATUS; erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS); - new_member->group_leader = STORE_NC_IN_PROC(new_member, - BIF_ARG_1); + if (erts_smp_atomic32_read_nob(&new_member->state) + & !(ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + new_member->group_leader = STORE_NC_IN_PROC(new_member, + BIF_ARG_1); + } + else { + ErlHeapFragment *bp; + Eterm *hp; + /* + * Other process executing on a dirty scheduler, + * so we are not allowed to write to its heap. + * Store in heap fragment. + */ + + bp = new_message_buffer(NC_HEAP_SIZE(BIF_ARG_1)); + hp = bp->mem; + new_member->group_leader = STORE_NC(&hp, + &new_member->off_heap, + BIF_ARG_1); + bp->next = new_member->mbuf; + new_member->mbuf = bp; + } } } @@ -4350,6 +4397,31 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(make_small(oval)); + } else if (BIF_ARG_1 == am_max_heap_size) { + + Eterm *hp, old_value; + Uint sz = 0, max_heap_size, max_heap_flags; + + if (!erts_max_heap_size(BIF_ARG_2, &max_heap_size, &max_heap_flags)) + goto error; + + if (max_heap_size < H_MIN_SIZE && max_heap_size != 0) + goto error; + + erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz); + hp = HAlloc(BIF_P, sz); + old_value = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL); + + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + + H_MAX_SIZE = max_heap_size; + H_MAX_FLAGS = max_heap_flags; + + erts_smp_thr_progress_unblock(); + erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + + BIF_RET(old_value); } else if (BIF_ARG_1 == am_display_items) { int oval = display_items; if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) { diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h index 5d751dd67d..2203182a0d 100644 --- a/erts/emulator/beam/bif.h +++ b/erts/emulator/beam/bif.h @@ -59,12 +59,12 @@ extern Export *erts_convert_time_unit_trap; do { \ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \ if ((fcalls) > 0) \ - ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (fcalls); \ + erts_proc_sched_data((p))->virtual_reds += (fcalls); \ (fcalls) = 0; \ } \ else { \ if ((fcalls) > -CONTEXT_REDS) \ - ERTS_PROC_GET_SCHDATA((p))->virtual_reds \ + erts_proc_sched_data((p))->virtual_reds \ += ((fcalls) - (-CONTEXT_REDS)); \ (fcalls) = -CONTEXT_REDS; \ } \ @@ -91,22 +91,22 @@ do { \ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \ if ((p)->fcalls >= reds) { \ (p)->fcalls -= reds; \ - ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \ + erts_proc_sched_data((p))->virtual_reds += reds; \ } \ else { \ if ((p)->fcalls > 0) \ - ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (p)->fcalls;\ + erts_proc_sched_data((p))->virtual_reds += (p)->fcalls; \ (p)->fcalls = 0; \ } \ } \ else { \ if ((p)->fcalls >= reds - CONTEXT_REDS) { \ (p)->fcalls -= reds; \ - ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \ + erts_proc_sched_data((p))->virtual_reds += reds; \ } \ else { \ if ((p)->fcalls > -CONTEXT_REDS) \ - ERTS_PROC_GET_SCHDATA((p))->virtual_reds \ + erts_proc_sched_data((p))->virtual_reds \ += (p)->fcalls - (-CONTEXT_REDS); \ (p)->fcalls = -CONTEXT_REDS; \ } \ @@ -118,14 +118,14 @@ do { \ if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \ int nreds__ = ((int)(Reds)) - CONTEXT_REDS; \ if ((FCalls) > nreds__) { \ - ERTS_PROC_GET_SCHDATA((P))->virtual_reds \ + erts_proc_sched_data((P))->virtual_reds \ += (FCalls) - nreds__; \ (FCalls) = nreds__; \ } \ } \ else { \ if ((FCalls) > (Reds)) { \ - ERTS_PROC_GET_SCHDATA((P))->virtual_reds \ + erts_proc_sched_data((P))->virtual_reds \ += (FCalls) - (Reds); \ (FCalls) = (Reds); \ } \ @@ -165,7 +165,7 @@ do { \ #define ERTS_BIF_ERROR_TRAPPED1(Proc, Reason, Bif, A0) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->freason = (Reason); \ (Proc)->current = (Bif)->code; \ reg[0] = (Eterm) (A0); \ @@ -174,7 +174,7 @@ do { \ #define ERTS_BIF_ERROR_TRAPPED2(Proc, Reason, Bif, A0, A1) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->freason = (Reason); \ (Proc)->current = (Bif)->code; \ reg[0] = (Eterm) (A0); \ @@ -184,7 +184,7 @@ do { \ #define ERTS_BIF_ERROR_TRAPPED3(Proc, Reason, Bif, A0, A1, A2) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->freason = (Reason); \ (Proc)->current = (Bif)->code; \ reg[0] = (Eterm) (A0); \ @@ -208,7 +208,7 @@ do { \ #define ERTS_BIF_PREP_ERROR_TRAPPED1(Ret, Proc, Reason, Bif, A0) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->freason = (Reason); \ (Proc)->current = (Bif)->code; \ reg[0] = (Eterm) (A0); \ @@ -217,7 +217,7 @@ do { \ #define ERTS_BIF_PREP_ERROR_TRAPPED2(Ret, Proc, Reason, Bif, A0, A1) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->freason = (Reason); \ (Proc)->current = (Bif)->code; \ reg[0] = (Eterm) (A0); \ @@ -227,7 +227,7 @@ do { \ #define ERTS_BIF_PREP_ERROR_TRAPPED3(Ret, Proc, Reason, Bif, A0, A1, A2) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->freason = (Reason); \ (Proc)->current = (Bif)->code; \ reg[0] = (Eterm) (A0); \ @@ -246,7 +246,7 @@ do { \ #define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->arity = 1; \ reg[0] = (Eterm) (A0); \ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \ @@ -256,7 +256,7 @@ do { \ #define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->arity = 2; \ reg[0] = (Eterm) (A0); \ reg[1] = (Eterm) (A1); \ @@ -267,7 +267,7 @@ do { \ #define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2) \ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->arity = 3; \ reg[0] = (Eterm) (A0); \ reg[1] = (Eterm) (A1); \ @@ -279,7 +279,7 @@ do { \ #define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\ do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \ (Proc)->arity = 3; \ reg[0] = (Eterm) (A0); \ reg[1] = (Eterm) (A1); \ @@ -296,7 +296,7 @@ do { \ } while(0) #define BIF_TRAP1(Trap_, p, A0) do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \ (p)->arity = 1; \ reg[0] = (A0); \ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \ @@ -305,7 +305,7 @@ do { \ } while(0) #define BIF_TRAP2(Trap_, p, A0, A1) do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \ (p)->arity = 2; \ reg[0] = (A0); \ reg[1] = (A1); \ @@ -315,7 +315,7 @@ do { \ } while(0) #define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \ - Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \ + Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \ (p)->arity = 3; \ reg[0] = (A0); \ reg[1] = (A1); \ diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index d02c6828f9..3c19e82b66 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -118,7 +118,9 @@ process_killer(void) | ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_IN_RUNQ | ERTS_PSFLG_RUNNING - | ERTS_PSFLG_RUNNING_SYS)) { + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { erts_printf("Can only kill WAITING processes this way\n"); } else { @@ -214,7 +216,8 @@ print_process_info(int to, void *to_arg, Process *p) if (state & ERTS_PSFLG_GC) { garbing = 1; running = 1; - } else if (state & ERTS_PSFLG_RUNNING) + } else if (state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING)) running = 1; /* diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 277e0668a2..09c83f1117 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -397,7 +397,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) msgp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp); tup = TUPLE2(hp, am_nodedown, name); - erts_queue_message(rp, &rp_locks, msgp, tup); + erts_queue_message(rp, rp_locks, msgp, tup, am_system); } erts_smp_proc_unlock(rp, rp_locks); } @@ -1456,7 +1456,7 @@ int erts_net_message(Port *prt, token = copy_struct(token, token_size, &hp, ohp); } - erts_queue_dist_message(rp, &locks, ede_copy, token); + erts_queue_dist_message(rp, locks, ede_copy, token, from); if (locks) erts_smp_proc_unlock(rp, locks); } @@ -1505,7 +1505,7 @@ int erts_net_message(Port *prt, token = copy_struct(token, token_size, &hp, ohp); } - erts_queue_dist_message(rp, &locks, ede_copy, token); + erts_queue_dist_message(rp, locks, ede_copy, token, tuple[2]); if (locks) erts_smp_proc_unlock(rp, locks); } @@ -3317,7 +3317,7 @@ send_nodes_mon_msg(Process *rp, } ASSERT(hend == hp); - erts_queue_message(rp, rp_locksp, mp, msg); + erts_queue_message(rp, *rp_locksp, mp, msg, am_system); } static void diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index d04977b9ae..c367d4162c 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -137,6 +137,11 @@ static ErtsAllocatorState_t exec_alloc_state; #endif static ErtsAllocatorState_t test_alloc_state; +#define ERTS_ALC_INFO_A_ALLOC_UTIL (ERTS_ALC_A_MAX + 1) +#define ERTS_ALC_INFO_A_MSEG_ALLOC (ERTS_ALC_A_MAX + 2) +#define ERTS_ALC_INFO_A_ERTS_MMAP (ERTS_ALC_A_MAX + 3) +#define ERTS_ALC_INFO_A_MAX ERTS_ALC_INFO_A_ERTS_MMAP + typedef struct { erts_smp_atomic32_t refc; int only_sz; @@ -145,13 +150,9 @@ typedef struct { Process *proc; Eterm ref; Eterm ref_heap[REF_THING_SIZE]; - int allocs[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+1+2]; + int allocs[ERTS_ALC_INFO_A_MAX - ERTS_ALC_A_MIN + 1 + 1]; } ErtsAllocInfoReq; -#define ERTS_ALC_INFO_A_ALLOC_UTIL (ERTS_ALC_A_MAX + 1) -#define ERTS_ALC_INFO_A_MSEG_ALLOC (ERTS_ALC_A_MAX + 2) -#define ERTS_ALC_INFO_A_MAX ERTS_ALC_INFO_A_MSEG_ALLOC - ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq, ErtsAllocInfoReq, 5, @@ -2840,10 +2841,18 @@ erts_allocator_info(int to, void *arg) int i; for (i = 0; i <= max; i++) { erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i); - erts_mseg_info(i, &to, arg, 0, NULL, NULL); + erts_mseg_info(i, &to, arg, 0, 0, NULL, NULL); } - erts_print(to, arg, "=allocator:mseg_alloc.erts_mmap\n"); + erts_print(to, arg, "=allocator:erts_mmap.default_mmap\n"); erts_mmap_info(&erts_dflt_mmapper, &to, arg, NULL, NULL, &emis); +#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) + erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n"); + erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis); +#endif +#ifdef ERTS_ALC_A_EXEC + erts_print(to, arg, "=allocator:erts_mmap.exec_mmap\n"); + erts_mmap_info(&erts_exec_mmapper, &to, arg, NULL, NULL, &emis); +#endif } #endif @@ -2954,6 +2963,11 @@ erts_allocator_options(void *proc) atoms[length] = am_atom_put("alloc_util", 10); terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp); +#if HAVE_ERTS_MMAP + atoms[length] = ERTS_MAKE_AM("erts_mmap"); + terms[length++] = erts_mmap_info_options(&erts_dflt_mmapper, NULL, NULL, + NULL, hpp, szp); +#endif { Eterm o[3], v[3]; o[0] = am_atom_put("m", 1); @@ -2990,7 +3004,12 @@ erts_allocator_options(void *proc) #if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC terms[length++] = am_atom_put("sys_aligned_alloc", 17); #endif - +#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) + terms[length++] = ERTS_MAKE_AM("literal_mmap"); +#endif +#ifdef ERTS_ALC_A_EXEC + terms[length++] = ERTS_MAKE_AM("exec_mmap"); +#endif features = length ? erts_bld_list(hpp, szp, length, terms) : NIL; #if defined(__GLIBC__) @@ -3075,7 +3094,15 @@ reply_alloc_info(void *vair) Uint sz, *szp; ErlOffHeap *ohp = NULL; ErtsMessage *mp = NULL; - struct erts_mmap_info_struct emis; +#if HAVE_ERTS_MMAP + struct erts_mmap_info_struct mmap_info_dflt; +# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) + struct erts_mmap_info_struct mmap_info_literal; +# endif +# ifdef ERTS_ALC_A_EXEC + struct erts_mmap_info_struct mmap_info_exec; +# endif +#endif int i; Eterm (*info_func)(Allctr_t *, int, @@ -3181,26 +3208,53 @@ reply_alloc_info(void *vair) make_small(0), ainfo); break; + case ERTS_ALC_INFO_A_ERTS_MMAP: + alloc_atom = erts_bld_atom(hpp, szp, "erts_mmap"); +#if HAVE_ERTS_MMAP + ainfo = (air->only_sz ? NIL : + erts_mmap_info(&erts_dflt_mmapper, NULL, NULL, + hpp, szp, &mmap_info_dflt)); + ainfo = erts_bld_tuple3(hpp, szp, + alloc_atom, + erts_bld_atom(hpp,szp,"default_mmap"), + ainfo); +# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) + ai_list = erts_bld_cons(hpp, szp, + ainfo, ai_list); + ainfo = (air->only_sz ? NIL : + erts_mmap_info(&erts_literal_mmapper, NULL, NULL, + hpp, szp, &mmap_info_literal)); + ainfo = erts_bld_tuple3(hpp, szp, + alloc_atom, + erts_bld_atom(hpp,szp,"literal_mmap"), + ainfo); +# endif +# ifdef ERTS_ALC_A_EXEC + ai_list = erts_bld_cons(hpp, szp, + ainfo, ai_list); + ainfo = (air->only_sz ? NIL : + erts_mmap_info(&erts_exec_mmapper, NULL, NULL, + hpp, szp, &mmap_info_exec)); + ainfo = erts_bld_tuple3(hpp, szp, + alloc_atom, + erts_bld_atom(hpp,szp,"exec_mmap"), + ainfo); +# endif +#else /* !HAVE_ERTS_MMAP */ + ainfo = erts_bld_tuple2(hpp, szp, alloc_atom, + am_false); +#endif + break; case ERTS_ALC_INFO_A_MSEG_ALLOC: alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc"); #if HAVE_ERTS_MSEG - ainfo = (air->only_sz - ? NIL - : erts_mseg_info(0, NULL, NULL, hpp != NULL, - hpp, szp)); + ainfo = erts_mseg_info(0, NULL, NULL, hpp != NULL, + air->only_sz, hpp, szp); ainfo = erts_bld_tuple3(hpp, szp, alloc_atom, make_small(0), ainfo); - ai_list = erts_bld_cons(hpp, szp, - ainfo, ai_list); - ainfo = (air->only_sz ? NIL : - erts_mmap_info(&erts_dflt_mmapper, NULL, NULL, hpp, szp, &emis)); - ainfo = erts_bld_tuple3(hpp, szp, - alloc_atom, - erts_bld_atom(hpp,szp,"erts_mmap"), - ainfo); #else ainfo = erts_bld_tuple2(hpp, szp, alloc_atom, am_false); @@ -3232,15 +3286,14 @@ reply_alloc_info(void *vair) } switch (ai) { case ERTS_ALC_A_SYSTEM: - case ERTS_ALC_INFO_A_ALLOC_UTIL: + case ERTS_ALC_INFO_A_ALLOC_UTIL: + case ERTS_ALC_INFO_A_ERTS_MMAP: break; case ERTS_ALC_INFO_A_MSEG_ALLOC: #if HAVE_ERTS_MSEG && defined(ERTS_SMP) alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc"); - ainfo = (air->only_sz - ? NIL - : erts_mseg_info(sched_id, NULL, NULL, - hpp != NULL, hpp, szp)); + ainfo = erts_mseg_info(sched_id, NULL, NULL, + hpp != NULL, air->only_sz, hpp, szp); ainfo = erts_bld_tuple(hpp, szp, 3, alloc_atom, make_small(sched_id), @@ -3286,7 +3339,7 @@ reply_alloc_info(void *vair) if (hp != hp_end) erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1); - erts_queue_message(rp, &rp_locks, mp, msg); + erts_queue_message(rp, rp_locks, mp, msg, am_system); if (air->req_sched == sched_id) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -3306,7 +3359,7 @@ erts_request_alloc_info(struct process *c_p, int internal) { ErtsAllocInfoReq *air = aireq_alloc(); - Eterm req_ai[ERTS_ALC_A_MAX+1+2] = {0}; + Eterm req_ai[ERTS_ALC_INFO_A_MAX+1] = {0}; Eterm alist; Eterm *hp; int airix = 0, ai; @@ -3342,6 +3395,10 @@ erts_request_alloc_info(struct process *c_p, ai = ERTS_ALC_INFO_A_MSEG_ALLOC; goto save_alloc; } + if (erts_is_atom_str("erts_mmap", alloc, 0)) { + ai = ERTS_ALC_INFO_A_ERTS_MMAP; + goto save_alloc; + } if (erts_is_atom_str("alloc_util", alloc, 0)) { ai = ERTS_ALC_INFO_A_ALLOC_UTIL; save_alloc: diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index ba216c7eb4..227fedfb69 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -278,6 +278,7 @@ type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request type TRACER_NIF LONG_LIVED SYSTEM tracer_nif type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls +type DIRTY_START STANDARD PROCESSES dirty_start +if threads_no_smp # Need thread safe allocs, but std_alloc and fix_alloc are not; diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index 1e2db38442..ef77201544 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -1737,7 +1737,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type, hp += REF_THING_SIZE; mess = TUPLE5(hp,type,r,am_driver,driver_name,tag); } - erts_queue_message(proc, &rp_locks, mp, mess); + erts_queue_message(proc, rp_locks, mp, mess, am_system); erts_smp_proc_unlock(proc, rp_locks); ERTS_SMP_CHK_NO_PROC_LOCKS; } diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 99fe847ba2..2e195db0ee 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -45,6 +45,7 @@ #include "erl_async.h" #include "erl_thr_progress.h" #include "erl_bif_unique.h" +#include "erl_map.h" #define ERTS_PTAB_WANT_DEBUG_FUNCS__ #include "erl_ptab.h" #ifdef HIPE @@ -594,6 +595,7 @@ static Eterm pi_args[] = { am_suspending, am_min_heap_size, am_min_bin_vheap_size, + am_max_heap_size, am_current_location, am_current_stacktrace, am_message_queue_data, @@ -643,10 +645,11 @@ pi_arg2ix(Eterm arg) case am_suspending: return 26; case am_min_heap_size: return 27; case am_min_bin_vheap_size: return 28; - case am_current_location: return 29; - case am_current_stacktrace: return 30; - case am_message_queue_data: return 31; - case am_garbage_collection_info: return 32; + case am_max_heap_size: return 29; + case am_current_location: return 30; + case am_current_stacktrace: return 31; + case am_message_queue_data: return 32; + case am_garbage_collection_info: return 33; default: return -1; } } @@ -1107,7 +1110,7 @@ process_info_aux(Process *BIF_P, break; case am_status: - res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid); + res = erts_process_status(rp, rpid); ASSERT(res != am_undefined); hp = HAlloc(BIF_P, 3); break; @@ -1348,6 +1351,18 @@ process_info_aux(Process *BIF_P, break; } + case am_max_heap_size: { + Uint hsz = 3; + (void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), + MAX_HEAP_SIZE_FLAGS_GET(rp), + NULL, &hsz); + hp = HAlloc(BIF_P, hsz); + res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), + MAX_HEAP_SIZE_FLAGS_GET(rp), + &hp, NULL); + break; + } + case am_total_heap_size: { ErtsMessage *mp; Uint total_heap_size; @@ -1391,8 +1406,12 @@ process_info_aux(Process *BIF_P, case am_garbage_collection: { DECL_AM(minor_gcs); Eterm t; + Uint map_sz = 0; + + erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz); - hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3); /* last "3" is for outside tuple */ + hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz + 3); + /* last "3" is for outside tuple */ t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3; res = CONS(hp, t, NIL); hp += 2; @@ -1403,6 +1422,11 @@ process_info_aux(Process *BIF_P, res = CONS(hp, t, res); hp += 2; t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3; res = CONS(hp, t, res); hp += 2; + + t = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), &hp, NULL); + + t = TUPLE2(hp, am_max_heap_size, t); hp += 3; + res = CONS(hp, t, res); hp += 2; break; } @@ -1412,12 +1436,12 @@ process_info_aux(Process *BIF_P, if (rp == BIF_P) { sz += ERTS_PROCESS_GC_INFO_MAX_SIZE; } else { - erts_process_gc_info(rp, &sz, NULL); + erts_process_gc_info(rp, &sz, NULL, 0, 0); sz += 3; } hp = HAlloc(BIF_P, sz); - res = erts_process_gc_info(rp, &actual_sz, &hp); + res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0); /* We may have some extra space, fill with 0 tuples */ if (actual_sz <= sz - 3) { @@ -2035,12 +2059,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) Uint arity = *tp++; return info_1_tuple(BIF_P, tp, arityval(arity)); } else if (BIF_ARG_1 == am_scheduler_id) { -#ifdef ERTS_SMP - ASSERT(BIF_P->scheduler_data); - BIF_RET(make_small(BIF_P->scheduler_data->no)); -#else - BIF_RET(make_small(1)); -#endif + ErtsSchedulerData *esdp = erts_proc_sched_data(BIF_P); + BIF_RET(make_small(esdp->no)); } else if (BIF_ARG_1 == am_compat_rel) { ASSERT(erts_compat_rel > 0); BIF_RET(make_small(erts_compat_rel)); @@ -2173,7 +2193,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (BIF_ARG_1 == am_garbage_collection){ Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); Eterm tup; - hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2); + hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2); tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3; res = CONS(hp, tup, NIL); hp += 2; @@ -2184,6 +2204,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3; res = CONS(hp, tup, res); hp += 2; + tup = TUPLE2(hp, am_max_heap_size, make_small(H_MAX_SIZE)); hp += 3; + res = CONS(hp, tup, res); hp += 2; + BIF_RET(res); } else if (BIF_ARG_1 == am_fullsweep_after){ Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); @@ -2194,6 +2217,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE)); BIF_RET(res); + } else if (BIF_ARG_1 == am_max_heap_size) { + Uint sz = 0; + erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz); + hp = HAlloc(BIF_P, sz); + res = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL); + BIF_RET(res); } else if (BIF_ARG_1 == am_min_bin_vheap_size) { hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE)); @@ -3555,7 +3584,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) BIF_RET(res); } else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) { - BIF_RET(erts_mmap_debug_info(&erts_dflt_mmapper, BIF_P)); + BIF_RET(erts_mmap_debug_info(BIF_P)); } else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) { BIF_RET(erts_debug_get_unique_monotonic_integer_state(BIF_P)); @@ -3589,10 +3618,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) /* Used by timer process_SUITE, timer_bif_SUITE, and node_container_SUITE (emulator) */ if (is_internal_pid(tp[2])) { - BIF_RET(erts_process_status(BIF_P, - ERTS_PROC_LOCK_MAIN, - NULL, - tp[2])); + BIF_RET(erts_process_status(NULL, tp[2])); } } else if (ERTS_IS_ATOM_STR("link_list", tp[1])) { diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index ff2018aa27..66e5146da0 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -68,6 +68,9 @@ static struct { /* Protected by code write permission */ static Eterm trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist); +static int +erts_set_tracing_event_pattern(Eterm event, Binary*, int on); + #ifdef ERTS_SMP static void smp_bp_finisher(void* arg); #endif @@ -78,6 +81,8 @@ static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/ static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key); static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key); static Eterm trace_info_on_load(Process* p, Eterm key); +static Eterm trace_info_event(Process* p, Eterm event, Eterm key); + static void reset_bif_trace(void); static void setup_bif_trace(void); @@ -85,14 +90,26 @@ static void install_exp_breakpoints(BpFunctions* f); static void uninstall_exp_breakpoints(BpFunctions* f); static void clean_export_entries(BpFunctions* f); +ErtsTracingEvent erts_send_tracing[ERTS_NUM_BP_IX]; +ErtsTracingEvent erts_receive_tracing[ERTS_NUM_BP_IX]; + void erts_bif_trace_init(void) { + int i; + erts_default_trace_pattern_is_on = 0; erts_default_match_spec = NULL; erts_default_meta_match_spec = NULL; erts_default_trace_pattern_flags = erts_trace_pattern_flags_off; erts_default_meta_tracer = erts_tracer_nil; + + for (i=0; i<ERTS_NUM_BP_IX; i++) { + erts_send_tracing[i].on = 1; + erts_send_tracing[i].match_spec = NULL; + erts_receive_tracing[i].on = 1; + erts_receive_tracing[i].match_spec = NULL; + } } /* @@ -137,15 +154,18 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) on = 1; } else if (Pattern == am_restart) { match_prog_set = NULL; - on = erts_break_reset; + on = ERTS_BREAK_RESTART; } else if (Pattern == am_pause) { match_prog_set = NULL; - on = erts_break_stop; - } else if ((match_prog_set = erts_match_set_compile(p, Pattern)) != NULL) { - MatchSetRef(match_prog_set); - on = 1; - } else{ - goto error; + on = ERTS_BREAK_PAUSE; + } else { + match_prog_set = erts_match_set_compile(p, Pattern, MFA); + if (match_prog_set) { + MatchSetRef(match_prog_set); + on = 1; + } else{ + goto error; + } } is_global = 0; @@ -314,6 +334,11 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) matches = erts_set_trace_pattern(p, mfa, specified, match_prog_set, match_prog_set, on, flags, meta_tracer, 0); + } else if (is_atom(MFA)) { + if (is_global || flags.breakpoint || on > ERTS_BREAK_SET) { + goto error; + } + matches = erts_set_tracing_event_pattern(MFA, match_prog_set, on); } error: @@ -487,8 +512,7 @@ start_trace(Process *c_p, ErtsTracer tracer, && !ERTS_TRACER_COMPARE(ERTS_TRACER(port), tracer)) { /* This tracee is already being traced, and not by the * tracer to be */ - if (erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCKS_ALL, - common, am_trace_status)) { + if (erts_is_tracer_enabled(tracer, common)) { /* The tracer is still in use */ return 1; } @@ -791,6 +815,8 @@ Eterm trace_info_2(BIF_ALIST_2) if (What == am_on_load) { res = trace_info_on_load(p, Key); + } else if (What == am_send || What == am_receive) { + res = trace_info_event(p, What, Key); } else if (is_atom(What) || is_pid(What) || is_port(What)) { res = trace_info_pid(p, What, Key); } else if (is_tuple(What)) { @@ -829,7 +855,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) return am_undefined; if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee))) - erts_is_tracer_proc_enabled(NULL, 0, &tracee->common, am_trace_status); + erts_is_tracer_proc_enabled(NULL, 0, &tracee->common); tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee)); trace_flags = ERTS_TRACE_FLAGS(tracee); @@ -837,22 +863,24 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) erts_port_release(tracee); } else if (is_internal_pid(pid_spec)) { - Process *tracee; - tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, - pid_spec, ERTS_PROC_LOCK_MAIN); + Process *tracee = erts_pid2proc_not_running(p, ERTS_PROC_LOCK_MAIN, + pid_spec, ERTS_PROC_LOCK_MAIN); + + if (tracee == ERTS_PROC_LOCK_BUSY) + ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, pid_spec, key); if (!tracee) return am_undefined; if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee))) erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN, - &tracee->common, am_trace_status); + &tracee->common); tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee)); trace_flags = ERTS_TRACE_FLAGS(tracee); - if (tracee != p) - erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); + if (tracee != p) + erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); } else if (is_external_pid(pid_spec) && external_pid_dist_entry(pid_spec) == erts_this_dist_entry) { return am_undefined; @@ -1280,6 +1308,42 @@ trace_info_on_load(Process* p, Eterm key) } } +static Eterm +trace_info_event(Process* p, Eterm event, Eterm key) +{ + ErtsTracingEvent* te; + Eterm retval; + Eterm* hp; + + switch (event) { + case am_send: te = erts_send_tracing; break; + case am_receive: te = erts_receive_tracing; break; + default: + goto error; + } + + if (key != am_match_spec) + goto error; + + te = &te[erts_active_bp_ix()]; + + if (te->on) { + if (!te->match_spec) + retval = am_true; + else + retval = copy_object(MatchSetGetSource(te->match_spec), p); + } + else + retval = am_false; + + hp = HAlloc(p, 3); + return TUPLE2(hp, key, retval); + + error: + BIF_ERROR(p, BADARG); +} + + #undef FUNC_TRACE_NOEXIST #undef FUNC_TRACE_UNTRACED #undef FUNC_TRACE_GLOBAL_TRACE @@ -1307,7 +1371,7 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified, for (i = 0; i < n; i++) { BeamInstr* pc = fp[i].pc; - Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code)); + Export* ep = ErtsContainerStruct(pc, Export, code[3]); if (on && !flags.breakpoint) { /* Turn on global call tracing */ @@ -1468,12 +1532,57 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified, } int +erts_set_tracing_event_pattern(Eterm event, Binary* match_spec, int on) +{ + ErtsBpIndex ix = erts_staging_bp_ix(); + ErtsTracingEvent* st; + + switch (event) { + case am_send: st = &erts_send_tracing[ix]; break; + case am_receive: st = &erts_receive_tracing[ix]; break; + default: return -1; + } + + MatchSetUnref(st->match_spec); + + st->on = on; + st->match_spec = match_spec; + MatchSetRef(match_spec); + + finish_bp.current = 1; /* prepare phase not needed for event trace */ + finish_bp.install = on; + finish_bp.e.matched = 0; + finish_bp.e.matching = NULL; + finish_bp.f.matched = 0; + finish_bp.f.matching = NULL; + +#ifndef ERTS_SMP + while (erts_finish_breakpointing()) { + /* Empty loop body */ + } +#endif + return 1; +} + +static void +consolidate_event_tracing(ErtsTracingEvent te[]) +{ + ErtsTracingEvent* src = &te[erts_active_bp_ix()]; + ErtsTracingEvent* dst = &te[erts_staging_bp_ix()]; + + MatchSetUnref(dst->match_spec); + dst->on = src->on; + dst->match_spec = src->match_spec; + MatchSetRef(dst->match_spec); +} + +int erts_finish_breakpointing(void) { ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); /* - * Memory barriers will be issued for all processes *before* + * Memory barriers will be issued for all schedulers *before* * each of the stages below. (Unless the other schedulers * are blocked, in which case memory barriers will be issued * when they are awaken.) @@ -1542,6 +1651,8 @@ erts_finish_breakpointing(void) erts_consolidate_bp_data(&finish_bp.f, 1); erts_bp_free_matched_functions(&finish_bp.e); erts_bp_free_matched_functions(&finish_bp.f); + consolidate_event_tracing(erts_send_tracing); + consolidate_event_tracing(erts_receive_tracing); return 0; default: ASSERT(0); @@ -1556,11 +1667,10 @@ install_exp_breakpoints(BpFunctions* f) BpFunction* fp = f->matching; Uint ne = f->matched; Uint i; - Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); for (i = 0; i < ne; i++) { BeamInstr* pc = fp[i].pc; - Export* ep = (Export *) (((char *)pc)-offset); + Export* ep = ErtsContainerStruct(pc, Export, code[3]); ep->addressv[code_ix] = pc; } @@ -1573,11 +1683,10 @@ uninstall_exp_breakpoints(BpFunctions* f) BpFunction* fp = f->matching; Uint ne = f->matched; Uint i; - Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); for (i = 0; i < ne; i++) { BeamInstr* pc = fp[i].pc; - Export* ep = (Export *) (((char *)pc)-offset); + Export* ep = ErtsContainerStruct(pc, Export, code[3]); if (ep->addressv[code_ix] != pc) { continue; @@ -1594,11 +1703,10 @@ clean_export_entries(BpFunctions* f) BpFunction* fp = f->matching; Uint ne = f->matched; Uint i; - Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); for (i = 0; i < ne; i++) { BeamInstr* pc = fp[i].pc; - Export* ep = (Export *) (((char *)pc)-offset); + Export* ep = ErtsContainerStruct(pc, Export, code[3]); if (ep->addressv[code_ix] == pc) { continue; @@ -2277,7 +2385,7 @@ reply_trace_delivered_all(void *vtdarp) #ifdef ERTS_SMP erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp); #else - erts_queue_message(rp, &rp_locks, mp, msg); + erts_queue_message(rp, rp_locks, mp, msg, am_system); #endif erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp); diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c index 1e57e9fa53..7c70217d8d 100644 --- a/erts/emulator/beam/erl_bif_unique.c +++ b/erts/emulator/beam/erl_bif_unique.c @@ -257,7 +257,7 @@ static ERTS_INLINE Eterm unique_integer_bif(Process *c_p, int positive) Uint hsz; Eterm *hp; - esdp = ERTS_PROC_GET_SCHDATA(c_p); + esdp = erts_proc_sched_data(c_p); thr_id = (Uint64) esdp->thr_id; unique = esdp->unique++; bld_unique_integer_term(NULL, &hsz, thr_id, unique, positive); @@ -515,7 +515,7 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0) hp = HAlloc(BIF_P, REF_THING_SIZE); - res = erts_sched_make_ref_in_buffer(ERTS_PROC_GET_SCHDATA(BIF_P), hp); + res = erts_sched_make_ref_in_buffer(erts_proc_sched_data(BIF_P), hp); BIF_RET(res); } diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h index 1c2a090f07..4bd5b24157 100644 --- a/erts/emulator/beam/erl_bits.h +++ b/erts/emulator/beam/erl_bits.h @@ -83,8 +83,8 @@ typedef struct erl_bin_match_struct{ #ifdef ERTS_SMP /* the state resides in the current process' scheduler data */ #define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS -#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &(P)->scheduler_data->erl_bits_state;}while(0) -#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &(P)->scheduler_data->erl_bits_state +#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &erts_proc_sched_data((P))->erl_bits_state;}while(0) +#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &erts_proc_sched_data((P))->erl_bits_state #else /* reentrant API but with a hidden single global state, for testing only */ extern struct erl_bits_state ErlBitsState_; diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 615d23402b..bad34211a5 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -2833,7 +2833,8 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3) BIF_TRAP3(bif_export[BIF_ets_match_spec_run_r_3], BIF_P,lst,BIF_ARG_2,ret); } - res = db_prog_match(BIF_P, mp, CAR(list_val(lst)), NULL, 0, + res = db_prog_match(BIF_P, BIF_P, + mp, CAR(list_val(lst)), NULL, 0, ERTS_PAM_COPY_RESULT, &dummy); if (is_value(res)) { hp = HAlloc(BIF_P, 2); @@ -3461,7 +3462,7 @@ static void fix_table_locked(Process* p, DbTable* tb) fix = tb->common.fixations; if (fix == NULL) { tb->common.time.monotonic - = erts_get_monotonic_time(ERTS_PROC_GET_SCHDATA(p)); + = erts_get_monotonic_time(erts_proc_sched_data(p)); tb->common.time.offset = erts_get_time_offset(); } else { diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index 76b96637ae..6732b708a8 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -123,6 +123,9 @@ do { \ #define TermWords(t) (((t) / (sizeof(UWord)/sizeof(Eterm))) + !!((t) % (sizeof(UWord)/sizeof(Eterm)))) +#define add_dmc_err(EINFO, STR, VAR, TERM, SEV) \ + vadd_dmc_err(EINFO, SEV, VAR, STR, TERM) + static ERTS_INLINE Process * get_proc(Process *cp, Uint32 cp_locks, Eterm id, Uint32 id_locks) @@ -171,7 +174,8 @@ set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer, ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p) || erts_thr_progress_is_blocking()); - if (ERTS_TRACER_IS_NIL(tracer) || erts_is_tracer_enabled(tracee_p, tracer)) + if (ERTS_TRACER_IS_NIL(tracer) + || erts_is_tracer_enabled(tracer, &tracee_p->common)) return set_tracee_flags(tracee_p, tracer, d_flags, e_flags); return fail_term; } @@ -411,17 +415,27 @@ get_match_pseudo_process(Process *c_p, Uint heap_size) { ErtsMatchPseudoProcess *mpsp; #ifdef ERTS_SMP - mpsp = (ErtsMatchPseudoProcess *) c_p->scheduler_data->match_pseudo_process; - if (mpsp) + ErtsSchedulerData *esdp; + + esdp = c_p ? c_p->scheduler_data : erts_get_scheduler_data(); + + mpsp = esdp ? esdp->match_pseudo_process : + (ErtsMatchPseudoProcess*) erts_smp_tsd_get(match_pseudo_process_key); + + if (mpsp) { + ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key)); + ASSERT(mpsp->process.scheduler_data == esdp); cleanup_match_pseudo_process(mpsp, 0); + } else { ASSERT(erts_smp_tsd_get(match_pseudo_process_key) == NULL); mpsp = create_match_pseudo_process(); - c_p->scheduler_data->match_pseudo_process = (void *) mpsp; + if (esdp) { + esdp->match_pseudo_process = (void *) mpsp; + } + mpsp->process.scheduler_data = esdp; erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp); } - ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key)); - mpsp->process.scheduler_data = c_p->scheduler_data; #else mpsp = match_pseudo_process; cleanup_match_pseudo_process(mpsp, 0); @@ -889,11 +903,7 @@ void db_match_dis(Binary *prog); #define TRACE /* Nothing */ #define FENCE_PATTERN_SIZE 0 #endif -static void add_dmc_err(DMCErrInfo *err_info, - char *str, - int variable, - Eterm term, - DMCErrorSeverity severity); +static void vadd_dmc_err(DMCErrInfo*, DMCErrorSeverity, int var, const char *str, ...); static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity); @@ -989,12 +999,20 @@ Eterm erts_match_set_get_source(Binary *mpsp) } /* This one is for the tracing */ -Binary *erts_match_set_compile(Process *p, Eterm matchexpr) { +Binary *erts_match_set_compile(Process *p, Eterm matchexpr, Eterm MFA) { Binary *bin; Uint sz; Eterm *hp; + Uint flags; + + switch (MFA) { + case am_receive: flags = DCOMP_TRACE; break; + case am_send: flags = DCOMP_TRACE | DCOMP_ALLOW_TRACE_OPS; break; + default: + flags = DCOMP_TRACE | DCOMP_CALL_TRACE | DCOMP_ALLOW_TRACE_OPS; + } - bin = db_match_set_compile(p, matchexpr, DCOMP_TRACE); + bin = db_match_set_compile(p, matchexpr, flags); if (bin != NULL) { MatchProg *prog = Binary2MatchProg(bin); sz = size_object(matchexpr); @@ -1124,8 +1142,8 @@ Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags) int i; if (!is_list(matchexpr)) { - add_dmc_err(err_info, "Match programs are not in a list.", - -1, 0UL, dmcError); + add_dmc_err(err_info, "Match programs are not in a list.", + -1, 0UL, dmcError); goto done; } num_heads = 0; @@ -1133,9 +1151,8 @@ Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags) ++num_heads; if (l != NIL) { /* proper list... */ - add_dmc_err(err_info, "Match programs are not in a proper " - "list.", - -1, 0UL, dmcError); + add_dmc_err(err_info, "Match programs are not in a proper list.", + -1, 0UL, dmcError); goto done; } @@ -1202,30 +1219,37 @@ done: return ret; } -Eterm erts_match_set_run(Process *p, Binary *mpsp, - Eterm *args, int num_args, - enum erts_pam_run_flags in_flags, - Uint32 *return_flags) +/* Returns + * am_false if no match or + * if {message,false} has been called, + * am_true if {message,_} has NOT been called or + * if {message,true} has been called, + * Msg if {message,Msg} has been called. + * + * If return value is_not_immed + * then erts_match_set_release_result_trace() must be called to release it. + */ +Eterm erts_match_set_run_trace(Process *c_p, + Process *self, + Binary *mpsp, + Eterm *args, int num_args, + enum erts_pam_run_flags in_flags, + Uint32 *return_flags) { Eterm ret; - ret = db_prog_match(p, mpsp, NIL, args, num_args, + ret = db_prog_match(c_p, self, mpsp, NIL, args, num_args, in_flags, return_flags); -#if defined(HARDDEBUG) - if (is_non_value(ret)) { - erts_fprintf(stderr, "Failed\n"); - } else { - erts_fprintf(stderr, "Returning : %T\n", ret); + + ASSERT(!(is_non_value(ret) && *return_flags)); + + if (is_non_value(ret) || ret == am_false) { + erts_match_set_release_result(c_p); + return am_false; } -#endif + if (is_immed(ret)) + erts_match_set_release_result(c_p); return ret; - /* Returns - * THE_NON_VALUE if no match - * am_false if {message,false} has been called, - * am_true if {message,_} has not been called or - * if {message,true} has been called, - * Msg if {message,Msg} has been called. - */ } static Eterm erts_match_set_run_ets(Process *p, Binary *mpsp, @@ -1234,7 +1258,8 @@ static Eterm erts_match_set_run_ets(Process *p, Binary *mpsp, { Eterm ret; - ret = db_prog_match(p, mpsp, args, NULL, num_args, + ret = db_prog_match(p, p, + mpsp, args, NULL, num_args, ERTS_PAM_COPY_RESULT, return_flags); #if defined(HARDDEBUG) @@ -1730,7 +1755,9 @@ static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity) ** the parameter 'arity' is only used if 'term' is actually an array, ** i.e. 'DCOMP_TRACE' was specified */ -Eterm db_prog_match(Process *c_p, Binary *bprog, +Eterm db_prog_match(Process *c_p, + Process *self, + Binary *bprog, Eterm term, Eterm *termp, int arity, @@ -1743,10 +1770,10 @@ Eterm db_prog_match(Process *c_p, Binary *bprog, Eterm *esp; MatchVariable* variables; BeamInstr *cp; - UWord *pc = prog->text; + const UWord *pc = prog->text; Eterm *ehp; Eterm ret; - Uint n = 0; /* To avoid warning. */ + Uint n; int i; unsigned do_catch; ErtsMatchPseudoProcess *mpsp; @@ -1758,46 +1785,28 @@ Eterm db_prog_match(Process *c_p, Binary *bprog, Eterm (*bif)(Process*, ...); Eterm bif_args[3]; int fail_label; - int atomic_trace; #ifdef DMC_DEBUG Uint *heap_fence; Uint *stack_fence; Uint save_op; #endif /* DMC_DEBUG */ + ERTS_UNDEF(n,0); + ERTS_UNDEF(current_scheduled,NULL); + + ASSERT(c_p || !(in_flags & ERTS_PAM_COPY_RESULT)); + mpsp = get_match_pseudo_process(c_p, prog->heap_size); psp = &mpsp->process; /* We need to lure the scheduler into believing in the pseudo process, because of floating point exceptions. Do *after* mpsp is set!!! */ - esdp = ERTS_GET_SCHEDULER_DATA_FROM_PROC(c_p); - ASSERT(esdp != NULL); - current_scheduled = esdp->current_process; + esdp = erts_get_scheduler_data(); + if (esdp) + current_scheduled = esdp->current_process; /* SMP: psp->scheduler_data is set by get_match_pseudo_process */ - atomic_trace = 0; -#define BEGIN_ATOMIC_TRACE(p) \ - do { \ - if (! atomic_trace) { \ - erts_refc_inc(&bprog->refc, 2); \ - erts_smp_proc_unlock((p), ERTS_PROC_LOCK_MAIN); \ - erts_smp_thr_progress_block(); \ - atomic_trace = !0; \ - } \ - } while (0) -#define END_ATOMIC_TRACE(p) \ - do { \ - if (atomic_trace) { \ - erts_smp_thr_progress_unblock(); \ - erts_smp_proc_lock((p), ERTS_PROC_LOCK_MAIN); \ - if (erts_refc_dectest(&bprog->refc, 0) == 0) {\ - erts_bin_free(bprog); \ - } \ - atomic_trace = 0; \ - } \ - } while (0) - #ifdef DMC_DEBUG save_op = 0; heap_fence = (Eterm*)((char*) mpsp->u.heap + prog->stack_offset) - 1; @@ -2256,7 +2265,7 @@ restart: pc += n; break; case matchSelf: - *esp++ = c_p->common.id; + *esp++ = self->common.id; break; case matchWaste: --esp; @@ -2266,6 +2275,7 @@ restart: break; case matchProcessDump: { erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0); + ASSERT(c_p == self); print_process_info(ERTS_PRINT_DSBUF, (void *) dsbufp, c_p); *esp++ = new_binary(build_proc, (byte *)dsbufp->str, dsbufp->str_len); @@ -2284,14 +2294,16 @@ restart: *return_flags |= MATCH_SET_EXCEPTION_TRACE; *esp++ = am_true; break; - case matchIsSeqTrace: + case matchIsSeqTrace: + ASSERT(c_p == self); if (have_seqtrace(SEQ_TRACE_TOKEN(c_p))) *esp++ = am_true; else *esp++ = am_false; break; case matchSetSeqToken: - t = erts_seq_trace(c_p, esp[-1], esp[-2], 0); + ASSERT(c_p == self); + t = erts_seq_trace(c_p, esp[-1], esp[-2], 0); if (is_non_value(t)) { esp[-2] = FAIL_TERM; } else { @@ -2299,7 +2311,8 @@ restart: } --esp; break; - case matchSetSeqTokenFake: + case matchSetSeqTokenFake: + ASSERT(c_p == self); t = seq_trace_fake(c_p, esp[-1]); if (is_non_value(t)) { esp[-2] = FAIL_TERM; @@ -2308,7 +2321,8 @@ restart: } --esp; break; - case matchGetSeqToken: + case matchGetSeqToken: + ASSERT(c_p == self); if (have_no_seqtrace(SEQ_TRACE_TOKEN(c_p))) *esp++ = NIL; else { @@ -2332,49 +2346,62 @@ restart: ASSERT(is_immed(ehp[5])); } break; - case matchEnableTrace: + case matchEnableTrace: + ASSERT(c_p == self); if ( (n = erts_trace_flag2bit(esp[-1]))) { - BEGIN_ATOMIC_TRACE(c_p); + erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n); + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; } break; - case matchEnableTrace2: + case matchEnableTrace2: + ASSERT(c_p == self); n = erts_trace_flag2bit((--esp)[-1]); esp[-1] = FAIL_TERM; if (n) { - BEGIN_ATOMIC_TRACE(c_p); - if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) { + if ( (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN, esp[0], ERTS_PROC_LOCKS_ALL))) { /* Always take over the tracer of the current process */ set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n); - esp[-1] = am_true; + if (tmpp == c_p) + erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); + else + erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + esp[-1] = am_true; } } break; - case matchDisableTrace: + case matchDisableTrace: + ASSERT(c_p == self); if ( (n = erts_trace_flag2bit(esp[-1]))) { - BEGIN_ATOMIC_TRACE(c_p); + erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0); + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; } break; - case matchDisableTrace2: + case matchDisableTrace2: + ASSERT(c_p == self); n = erts_trace_flag2bit((--esp)[-1]); esp[-1] = FAIL_TERM; if (n) { - BEGIN_ATOMIC_TRACE(c_p); - if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) { + if ( (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN, esp[0], ERTS_PROC_LOCKS_ALL))) { /* Always take over the tracer of the current process */ set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0); - esp[-1] = am_true; + if (tmpp == c_p) + erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); + else + erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + esp[-1] = am_true; } } break; - case matchCaller: + case matchCaller: + ASSERT(c_p == self); if (!(c_p->cp) || !(cp = find_function_from_pc(c_p->cp))) { *esp++ = am_undefined; } else { @@ -2386,7 +2413,8 @@ restart: ehp[3] = make_small((Uint) cp[2]); } break; - case matchSilent: + case matchSilent: + ASSERT(c_p == self); --esp; if (in_flags & ERTS_PAM_IGNORE_TRACE_SILENT) break; @@ -2401,7 +2429,8 @@ restart: erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } break; - case matchTrace2: + case matchTrace2: + ASSERT(c_p == self); { /* disable enable */ Uint d_flags = 0, e_flags = 0; /* process trace flags */ @@ -2433,7 +2462,8 @@ restart: ERTS_TRACER_CLEAR(&tracer); } break; - case matchTrace3: + case matchTrace3: + ASSERT(c_p == self); { /* disable enable */ Uint d_flags = 0, e_flags = 0; /* process trace flags */ @@ -2506,15 +2536,12 @@ success: } #endif - esdp->current_process = current_scheduled; - - END_ATOMIC_TRACE(c_p); + if (esdp) + esdp->current_process = current_scheduled; return ret; #undef FAIL #undef FAIL_TERM -#undef BEGIN_ATOMIC_TRACE -#undef END_ATOMIC_TRACE } @@ -3259,20 +3286,20 @@ int erts_db_is_compiled_ms(Eterm term) ** Utility to add an error */ -static void add_dmc_err(DMCErrInfo *err_info, - char *str, - int variable, - Eterm term, - DMCErrorSeverity severity) +static void vadd_dmc_err(DMCErrInfo *err_info, + DMCErrorSeverity severity, + int variable, + const char *str, + ...) { + DMCError *e; + va_list args; + va_start(args, str); + + /* Linked in in reverse order, to ease the formatting */ - DMCError *e = erts_alloc(ERTS_ALC_T_DB_DMC_ERROR, sizeof(DMCError)); - if (term != 0UL) { - erts_snprintf(e->error_string, DMC_ERR_STR_LEN, str, term); - } else { - strncpy(e->error_string, str, DMC_ERR_STR_LEN); - e->error_string[DMC_ERR_STR_LEN] ='\0'; - } + e = erts_alloc(ERTS_ALC_T_DB_DMC_ERROR, sizeof(DMCError)); + erts_vsnprintf(e->error_string, DMC_ERR_STR_LEN, str, args); e->variable = variable; e->severity = severity; e->next = err_info->first; @@ -3282,8 +3309,11 @@ static void add_dmc_err(DMCErrInfo *err_info, err_info->first = e; if (severity >= dmcError) err_info->error_added = 1; + + va_end(args); } + /* ** Handle one term in the match expression (not the guard) */ @@ -3482,24 +3512,21 @@ static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(UWord) *text, context->stack_need = context->stack_used; } -#define RETURN_ERROR_X(String, X, Y, ContextP, ConstantF) \ -do { \ -if ((ContextP)->err_info != NULL) { \ - (ConstantF) = 0; \ - add_dmc_err((ContextP)->err_info, String, X, Y, dmcError); \ - return retOk; \ -} else \ - return retFail; \ -} while(0) +#define RETURN_ERROR_X(VAR, ContextP, ConstantF, String, ARG) \ + (((ContextP)->err_info != NULL) \ + ? ((ConstantF) = 0, \ + vadd_dmc_err((ContextP)->err_info, dmcError, VAR, String, ARG), \ + retOk) \ + : retFail) #define RETURN_ERROR(String, ContextP, ConstantF) \ - RETURN_ERROR_X(String, -1, 0UL, ContextP, ConstantF) + return RETURN_ERROR_X(-1, ContextP, ConstantF, String, 0) #define RETURN_VAR_ERROR(String, N, ContextP, ConstantF) \ - RETURN_ERROR_X(String, N, 0UL, ContextP, ConstantF) + return RETURN_ERROR_X(N, ContextP, ConstantF, String, 0) #define RETURN_TERM_ERROR(String, T, ContextP, ConstantF) \ - RETURN_ERROR_X(String, -1, T, ContextP, ConstantF) + return RETURN_ERROR_X(-1, ContextP, ConstantF, String, T) #define WARNING(String, ContextP) \ add_dmc_err((ContextP)->err_info, String, -1, 0UL, dmcWarning) @@ -3765,7 +3792,7 @@ static DMCRet dmc_variable(DMCContext *context, Uint n = db_is_variable(t); if (n >= heap->vars_used || !heap->vars[n].is_bound) { - RETURN_VAR_ERROR("Variable $%d is unbound.", n, context, *constant); + RETURN_VAR_ERROR("Variable $%%d is unbound.", n, context, *constant); } dmc_add_pushv_variant(context, heap, text, n); @@ -4097,7 +4124,30 @@ static DMCRet dmc_exception_trace(DMCContext *context, return retOk; } - +static int check_trace(const char* op, + DMCContext *context, + int *constant, + int need_cflags, + int allow_in_guard, + DMCRet* retp) +{ + if (!(context->cflags & DCOMP_TRACE)) { + *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' " + "used in wrong dialect.", op); + return 0; + } + if ((context->cflags & need_cflags) != need_cflags) { + *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' " + "not allow for this trace event.", op); + return 0; + } + if (context->is_guard && !allow_in_guard) { + *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' " + "called in guard context.", op); + return 0; + } + return 1; +} static DMCRet dmc_is_seq_trace(DMCContext *context, DMCHeap *heap, @@ -4107,12 +4157,11 @@ static DMCRet dmc_is_seq_trace(DMCContext *context, { Eterm *p = tuple_val(t); Uint a = arityval(*p); + DMCRet ret; - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'is_seq_trace' used in wrong dialect.", - context, - *constant); - } + if (!check_trace("is_seq_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 1, &ret)) + return ret; + if (a != 1) { RETURN_TERM_ERROR("Special form 'is_seq_trace' called with " "arguments in %T.", t, context, *constant); @@ -4136,16 +4185,8 @@ static DMCRet dmc_set_seq_token(DMCContext *context, DMCRet ret; int c; - - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'set_seq_token' used in wrong dialect.", - context, - *constant); - } - if (context->is_guard) { - RETURN_ERROR("Special form 'set_seq_token' called in " - "guard context.", context, *constant); - } + if (!check_trace("set_seq_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret)) + return ret; if (a != 3) { RETURN_TERM_ERROR("Special form 'set_seq_token' called with wrong " @@ -4182,16 +4223,11 @@ static DMCRet dmc_get_seq_token(DMCContext *context, { Eterm *p = tuple_val(t); Uint a = arityval(*p); + DMCRet ret; + + if (!check_trace("get_seq_token", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret)) + return ret; - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'get_seq_token' used in wrong dialect.", - context, - *constant); - } - if (context->is_guard) { - RETURN_ERROR("Special form 'get_seq_token' called in " - "guard context.", context, *constant); - } if (a != 1) { RETURN_TERM_ERROR("Special form 'get_seq_token' called with " "arguments in %T.", t, context, @@ -4255,16 +4291,10 @@ static DMCRet dmc_process_dump(DMCContext *context, { Eterm *p = tuple_val(t); Uint a = arityval(*p); - - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'process_dump' used in wrong dialect.", - context, - *constant); - } - if (context->is_guard) { - RETURN_ERROR("Special form 'process_dump' called in " - "guard context.", context, *constant); - } + DMCRet ret; + + if (!check_trace("process_dump", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret)) + return ret; if (a != 1) { RETURN_TERM_ERROR("Special form 'process_dump' called with " @@ -4288,17 +4318,8 @@ static DMCRet dmc_enable_trace(DMCContext *context, DMCRet ret; int c; - - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'enable_trace' used in wrong dialect.", - context, - *constant); - } - if (context->is_guard) { - RETURN_ERROR("Special form 'enable_trace' called in guard context.", - context, - *constant); - } + if (!check_trace("enable_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret)) + return ret; switch (a) { case 2: @@ -4347,18 +4368,9 @@ static DMCRet dmc_disable_trace(DMCContext *context, Uint a = arityval(*p); DMCRet ret; int c; - - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'disable_trace' used in wrong dialect.", - context, - *constant); - } - if (context->is_guard) { - RETURN_ERROR("Special form 'disable_trace' called in guard context.", - context, - *constant); - } + if (!check_trace("disable_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret)) + return ret; switch (a) { case 2: @@ -4408,17 +4420,8 @@ static DMCRet dmc_trace(DMCContext *context, DMCRet ret; int c; - - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'trace' used in wrong dialect.", - context, - *constant); - } - if (context->is_guard) { - RETURN_ERROR("Special form 'trace' called in guard context.", - context, - *constant); - } + if (!check_trace("trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret)) + return ret; switch (a) { case 3: @@ -4479,16 +4482,11 @@ static DMCRet dmc_caller(DMCContext *context, { Eterm *p = tuple_val(t); Uint a = arityval(*p); + DMCRet ret; - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'caller' used in wrong dialect.", - context, - *constant); - } - if (context->is_guard) { - RETURN_ERROR("Special form 'caller' called in " - "guard context.", context, *constant); - } + if (!check_trace("caller", context, constant, + (DCOMP_CALL_TRACE|DCOMP_ALLOW_TRACE_OPS), 0, &ret)) + return ret; if (a != 1) { RETURN_TERM_ERROR("Special form 'caller' called with " @@ -4514,15 +4512,8 @@ static DMCRet dmc_silent(DMCContext *context, DMCRet ret; int c; - if (!(context->cflags & DCOMP_TRACE)) { - RETURN_ERROR("Special form 'silent' used in wrong dialect.", - context, - *constant); - } - if (context->is_guard) { - RETURN_ERROR("Special form 'silent' called in " - "guard context.", context, *constant); - } + if (!check_trace("silent", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret)) + return ret; if (a != 2) { RETURN_TERM_ERROR("Special form 'silent' called with wrong " @@ -5062,11 +5053,14 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace) return THE_NON_VALUE; } if (trace) { - lint_res = db_match_set_lint(p, spec, DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE); - mps = db_match_set_compile(p, spec, DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE); + const Uint cflags = (DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE | + DCOMP_CALL_TRACE | DCOMP_ALLOW_TRACE_OPS); + lint_res = db_match_set_lint(p, spec, cflags); + mps = db_match_set_compile(p, spec, cflags); } else { - lint_res = db_match_set_lint(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE); - mps = db_match_set_compile(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE); + const Uint cflags = (DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE); + lint_res = db_match_set_lint(p, spec, cflags); + mps = db_match_set_compile(p, spec, cflags); } if (mps == NULL) { @@ -5099,7 +5093,8 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace) } save_cp = p->cp; p->cp = NULL; - res = erts_match_set_run(p, mps, arr, n, + res = erts_match_set_run_trace(p, p, + mps, arr, n, ERTS_PAM_COPY_RESULT|ERTS_PAM_IGNORE_TRACE_SILENT, &ret_flags); p->cp = save_cp; @@ -5182,7 +5177,8 @@ Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog, obj = db_alloc_tmp_uncompressed(tb, obj); } - res = db_prog_match(c_p, bprog, make_tuple(obj->tpl), NULL, 0, + res = db_prog_match(c_p, c_p, + bprog, make_tuple(obj->tpl), NULL, 0, ERTS_PAM_COPY_RESULT|ERTS_PAM_CONTIGUOUS_TUPLE, &dummy); if (is_value(res) && hpp!=NULL) { diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index 5d7946a525..60f7067d70 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -425,6 +425,11 @@ typedef struct dmc_err_info { #define DCOMP_FAKE_DESTRUCTIVE ((Uint) 8) /* When this is active, no setting of trace control words or seq_trace tokens will be done. */ +/* Allow lock seizing operations on the tracee and 3rd party processes */ +#define DCOMP_ALLOW_TRACE_OPS ((Uint) 0x10) + +/* This is call trace */ +#define DCOMP_CALL_TRACE ((Uint) 0x20) Binary *db_match_compile(Eterm *matchexpr, Eterm *guards, Eterm *body, int num_matches, @@ -435,7 +440,8 @@ Binary *db_match_compile(Eterm *matchexpr, Eterm *guards, Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog, int all, DbTerm* obj, Eterm** hpp, Uint extra); -Eterm db_prog_match(Process *p, Binary *prog, Eterm term, +Eterm db_prog_match(Process *p, Process *self, + Binary *prog, Eterm term, Eterm *termp, int arity, enum erts_pam_run_flags in_flags, Uint32 *return_flags /* Zeroed on enter */); diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h index 2700b62854..6ec5fbb895 100644 --- a/erts/emulator/beam/erl_drv_nif.h +++ b/erts/emulator/beam/erl_drv_nif.h @@ -43,12 +43,11 @@ typedef struct { int suggested_stack_size; } ErlDrvThreadOpts; -#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) + typedef enum { - ERL_DRV_DIRTY_JOB_CPU_BOUND = 1, - ERL_DRV_DIRTY_JOB_IO_BOUND = 2 -} ErlDrvDirtyJobFlags; -#endif + ERL_DIRTY_JOB_CPU_BOUND = 1, + ERL_DIRTY_JOB_IO_BOUND = 2 +} ErlDirtyJobFlags; #ifdef SIZEOF_CHAR # define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index e21fba6bf1..d740b2baec 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -41,6 +41,7 @@ #endif #include "dtrace-wrapper.h" #include "erl_bif_unique.h" +#include "dist.h" #define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1 #define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20 @@ -146,7 +147,8 @@ static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size, static void offset_off_heap(Process* p, Sint offs, char* area, Uint area_size); static void offset_mqueue(Process *p, Sint offs, char* area, Uint area_size); static void move_msgq_to_heap(Process *p); - +static int reached_max_heap_size(Process *p, Uint total_heap_size, + Uint extra_heap_size, Uint extra_old_heap_size); static void init_gc_info(ErtsGCInfo *gcip); #ifdef HARDDEBUG @@ -389,7 +391,7 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end, if (p->freason == TRAP) { #if HIPE if (regs == NULL) { - regs = ERTS_PROC_GET_SCHDATA(p)->x_reg_array; + regs = erts_proc_sched_data(p)->x_reg_array; } #endif cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls); @@ -404,6 +406,7 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end, result = val[0]; } BUMP_REDS(p, cost); + return result; } @@ -507,14 +510,14 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int /* Make sure that we do a proper GC as soon as possible... */ p->flags |= F_FORCE_GC; reds_left = ERTS_REDS_LEFT(p, fcalls); - ASSERT(CONTEXT_REDS - reds_left >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds); + ASSERT(CONTEXT_REDS - reds_left >= erts_proc_sched_data(p)->virtual_reds); if (reds_left > ERTS_ABANDON_HEAP_COST) { int vreds = reds_left - ERTS_ABANDON_HEAP_COST; - ERTS_PROC_GET_SCHDATA((p))->virtual_reds += vreds; + erts_proc_sched_data((p))->virtual_reds += vreds; } - ASSERT(CONTEXT_REDS >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds); + ASSERT(CONTEXT_REDS >= erts_proc_sched_data(p)->virtual_reds); return reds_left; } @@ -577,18 +580,22 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, int need, Eterm* objv, int nobj, int fcalls) { Uint reclaimed_now = 0; + Eterm gc_trace_end_tag; int reds; ErtsMonotonicTime start_time = 0; /* Shut up faulty warning... */ ErtsSchedulerData *esdp; + erts_aint32_t state; ERTS_MSACC_PUSH_STATE_M(); #ifdef USE_VM_PROBES DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE); #endif ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls) - >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds); + >= erts_proc_sched_data(p)->virtual_reds); - if (p->flags & (F_DISABLE_GC|F_DELAY_GC)) + state = erts_smp_atomic32_read_nob(&p->state); + + if (p->flags & (F_DISABLE_GC|F_DELAY_GC) || state & ERTS_PSFLG_EXITING) return delay_garbage_collection(p, live_hf_end, need, fcalls); if (p->abandoned_heap) @@ -623,28 +630,28 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, if (GEN_GCS(p) < MAX_GEN_GCS(p) && !(FLAGS(p) & F_NEED_FULLSWEEP)) { if (IS_TRACED_FL(p, F_TRACE_GC)) { - trace_gc(p, am_gc_minor_start, need); + trace_gc(p, am_gc_minor_start, need, THE_NON_VALUE); } DTRACE2(gc_minor_start, pidbuf, need); reds = minor_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now); DTRACE2(gc_minor_end, pidbuf, reclaimed_now); - if (IS_TRACED_FL(p, F_TRACE_GC)) { - trace_gc(p, am_gc_minor_end, reclaimed_now); - } - if (reds < 0) + if (reds == -1) { + if (IS_TRACED_FL(p, F_TRACE_GC)) { + trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE); + } goto do_major_collection; + } + gc_trace_end_tag = am_gc_minor_end; } else { do_major_collection: ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC_FULL); if (IS_TRACED_FL(p, F_TRACE_GC)) { - trace_gc(p, am_gc_major_start, need); + trace_gc(p, am_gc_major_start, need, THE_NON_VALUE); } DTRACE2(gc_major_start, pidbuf, need); reds = major_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now); DTRACE2(gc_major_end, pidbuf, reclaimed_now); - if (IS_TRACED_FL(p, F_TRACE_GC)) { - trace_gc(p, am_gc_major_end, reclaimed_now); - } + gc_trace_end_tag = am_gc_major_end; ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC); } @@ -658,8 +665,33 @@ do_major_collection: ErtsGcQuickSanityCheck(p); + /* Max heap size has been reached and the process was configured + to be killed, so we kill it and set it in a delayed garbage + collecting state. There should be no gc_end trace or + long_gc/large_gc triggers when this happens as process was + killed before a GC could be done. */ + if (reds == -2) { + ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL; + + erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_send_exit_signal(p, p->common.id, p, &locks, + am_kill, NIL, NULL, 0); + erts_smp_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR); + + /* erts_send_exit_signal looks for ERTS_PSFLG_GC, so + we have to remove it after the signal is sent */ + erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + + /* We have to make sure that we have space for need on the heap */ + return delay_garbage_collection(p, live_hf_end, need, fcalls); + } + erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + if (IS_TRACED_FL(p, F_TRACE_GC)) { + trace_gc(p, gc_trace_end_tag, reclaimed_now, THE_NON_VALUE); + } + if (erts_system_monitor_long_gc != 0) { ErtsMonotonicTime end_time; Uint gc_time; @@ -716,7 +748,7 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca int reds_left = ERTS_REDS_LEFT(p, fcalls); if (reds > reds_left) reds = reds_left; - ASSERT(CONTEXT_REDS - (reds_left - reds) >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds); + ASSERT(CONTEXT_REDS - (reds_left - reds) >= erts_proc_sched_data(p)->virtual_reds); return reds; } @@ -726,7 +758,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls); BUMP_REDS(p, reds); ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p) - >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds); + >= erts_proc_sched_data(p)->virtual_reds); } /* @@ -761,6 +793,7 @@ erts_garbage_collect_hibernate(Process* p) heap_size = p->heap_sz + (p->old_htop - p->old_heap) + p->mbuf_sz; + heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_TMP_HEAP, sizeof(Eterm)*heap_size); htop = heap; @@ -1031,6 +1064,34 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end, Uint size_before = young_gen_usage(p); /* + * Check if we have gone past the max heap size limit + */ + + if (MAX_HEAP_SIZE_GET(p)) { + Uint heap_size = size_before, + /* Note that we also count the un-allocated area + in between the stack and heap */ + stack_size = HEAP_END(p) - HEAP_TOP(p), + extra_heap_size, + extra_old_heap_size = 0; + + /* Add potential old heap size */ + if (OLD_HEAP(p) == NULL && mature_size != 0) { + extra_old_heap_size = erts_next_heap_size(size_before, 1); + heap_size += extra_old_heap_size; + } else if (OLD_HEAP(p)) + heap_size += OLD_HEND(p) - OLD_HEAP(p); + + /* Add potential new young heap size */ + extra_heap_size = next_heap_size(p, stack_size + size_before, 0); + heap_size += extra_heap_size; + + if (heap_size > MAX_HEAP_SIZE_GET(p)) + if (reached_max_heap_size(p, heap_size, extra_heap_size, extra_old_heap_size)) + return -2; + } + + /* * Allocate an old heap if we don't have one and if we'll need one. */ @@ -1139,6 +1200,16 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end, ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0)); ASSERT(MBUF(p) == NULL); + /* The heap usage during GC should be larger than what we end up + after a GC, even if we grow it. If this assertion is not true + we have to check size in grow_new_heap and potentially kill the + process from there */ + ASSERT(!MAX_HEAP_SIZE_GET(p) || + !(MAX_HEAP_SIZE_FLAGS_GET(p) & MAX_HEAP_SIZE_KILL) || + MAX_HEAP_SIZE_GET(p) > (young_gen_usage(p) + + (OLD_HEND(p) - OLD_HEAP(p)) + + (HEAP_END(p) - HEAP_TOP(p)))); + return gc_cost(size_after, adjust_size); } @@ -1457,6 +1528,25 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end, if (new_sz == HEAP_SIZE(p) && FLAGS(p) & F_HEAP_GROW) { new_sz = next_heap_size(p, HEAP_SIZE(p), 1); } + + + if (MAX_HEAP_SIZE_GET(p)) { + Uint heap_size = size_before; + + /* Add unused space in old heap */ + heap_size += OLD_HEND(p) - OLD_HTOP(p); + + /* Add stack + unused space in young heap */ + heap_size += HEAP_END(p) - HEAP_TOP(p); + + /* Add size of new young heap */ + heap_size += new_sz; + + if (MAX_HEAP_SIZE_GET(p) < heap_size) + if (reached_max_heap_size(p, heap_size, new_sz, 0)) + return -2; + } + FLAGS(p) &= ~(F_HEAP_GROW|F_NEED_FULLSWEEP); n_htop = n_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*new_sz); @@ -2938,7 +3028,7 @@ reply_gc_info(void *vgcirp) hpp = &hp; } - erts_queue_message(rp, &rp_locks, mp, msg); + erts_queue_message(rp, rp_locks, mp, msg, am_system); if (gcirp->req_sched == esdp->no) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -2955,7 +3045,7 @@ reply_gc_info(void *vgcirp) Eterm erts_gc_info_request(Process *c_p) { - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); Eterm ref; ErtsGCInfoReq *gcirp; Eterm *hp; @@ -2986,7 +3076,9 @@ erts_gc_info_request(Process *c_p) } Eterm -erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp) +erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp, + Uint extra_heap_block, + Uint extra_old_heap_block_size) { ERTS_DECL_AM(bin_vheap_size); ERTS_DECL_AM(bin_vheap_block_size); @@ -3009,8 +3101,9 @@ erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp) AM_bin_old_vheap_block_size }; UWord values[] = { - OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0, - HEAP_SIZE(p), + OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) + extra_old_heap_block_size + : extra_old_heap_block_size, + HEAP_SIZE(p) + extra_heap_block, MBUF_SIZE(p), HIGH_WATER(p) - HEAP_START(p), STACK_START(p) - p->stop, @@ -3056,6 +3149,130 @@ erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp) return res; } +static int +reached_max_heap_size(Process *p, Uint total_heap_size, + Uint extra_heap_size, Uint extra_old_heap_size) +{ + Uint max_heap_flags = MAX_HEAP_SIZE_FLAGS_GET(p); + if (IS_TRACED_FL(p, F_TRACE_GC) || + max_heap_flags & MAX_HEAP_SIZE_LOG) { + Eterm msg; + Uint size = 0; + Eterm *o_hp , *hp; + erts_process_gc_info(p, &size, NULL, extra_heap_size, + extra_old_heap_size); + o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, size * sizeof(Eterm)); + msg = erts_process_gc_info(p, NULL, &hp, extra_heap_size, + extra_old_heap_size); + + if (max_heap_flags & MAX_HEAP_SIZE_LOG) { + int alive = erts_is_alive; + erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); + Eterm *o_hp, *hp, args = NIL; + + /* Build the format message */ + erts_dsprintf(dsbufp, " Process: ~p "); + if (alive) + erts_dsprintf(dsbufp, "on node ~p"); + erts_dsprintf(dsbufp, "~n Context: maximum heap size reached~n"); + erts_dsprintf(dsbufp, " Max Heap Size: ~p~n"); + erts_dsprintf(dsbufp, " Total Heap Size: ~p~n"); + erts_dsprintf(dsbufp, " Kill: ~p~n"); + erts_dsprintf(dsbufp, " Error Logger: ~p~n"); + erts_dsprintf(dsbufp, " GC Info: ~p~n"); + + /* Build the args in reverse order */ + o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, 2*(alive ? 7 : 6) * sizeof(Eterm)); + args = CONS(hp, msg, args); hp += 2; + args = CONS(hp, am_true, args); hp += 2; + args = CONS(hp, (max_heap_flags & MAX_HEAP_SIZE_KILL ? am_true : am_false), args); hp += 2; + args = CONS(hp, make_small(total_heap_size), args); hp += 2; + args = CONS(hp, make_small(MAX_HEAP_SIZE_GET(p)), args); hp += 2; + if (alive) { + args = CONS(hp, erts_this_node->sysname, args); hp += 2; + } + args = CONS(hp, p->common.id, args); hp += 2; + + erts_send_error_term_to_logger(p->group_leader, dsbufp, args); + erts_free(ERTS_ALC_T_TMP, o_hp); + } + + if (IS_TRACED_FL(p, F_TRACE_GC)) + trace_gc(p, am_gc_max_heap_size, 0, msg); + + erts_free(ERTS_ALC_T_TMP, o_hp); + } + /* returns true if we should kill the process */ + return max_heap_flags & MAX_HEAP_SIZE_KILL; +} + +Eterm +erts_max_heap_size_map(Sint max_heap_size, Uint max_heap_flags, + Eterm **hpp, Uint *sz) +{ + if (!hpp) { + *sz += (2*3 + 1 + MAP_HEADER_FLATMAP_SZ); + return THE_NON_VALUE; + } else { + Eterm *hp = *hpp; + Eterm keys = TUPLE3(hp, am_error_logger, am_kill, am_size); + flatmap_t *mp; + hp += 4; + mp = (flatmap_t*) hp; + mp->thing_word = MAP_HEADER_FLATMAP; + mp->size = 3; + mp->keys = keys; + hp += MAP_HEADER_FLATMAP_SZ; + *hp++ = max_heap_flags & MAX_HEAP_SIZE_LOG ? am_true : am_false; + *hp++ = max_heap_flags & MAX_HEAP_SIZE_KILL ? am_true : am_false; + *hp++ = make_small(max_heap_size); + *hpp = hp; + return make_flatmap(mp); + } +} + +int +erts_max_heap_size(Eterm arg, Uint *max_heap_size, Uint *max_heap_flags) +{ + Sint sz; + *max_heap_flags = H_MAX_FLAGS; + if (is_small(arg)) { + sz = signed_val(arg); + *max_heap_flags = H_MAX_FLAGS; + } else if (is_map(arg)) { + const Eterm *size = erts_maps_get(am_size, arg); + const Eterm *kill = erts_maps_get(am_kill, arg); + const Eterm *log = erts_maps_get(am_error_logger, arg); + if (size && is_small(*size)) { + sz = signed_val(*size); + } else { + /* size is mandatory */ + return 0; + } + if (kill) { + if (*kill == am_true) + *max_heap_flags |= MAX_HEAP_SIZE_KILL; + else if (*kill == am_false) + *max_heap_flags &= ~MAX_HEAP_SIZE_KILL; + else + return 0; + } + if (log) { + if (*log == am_true) + *max_heap_flags |= MAX_HEAP_SIZE_LOG; + else if (*log == am_false) + *max_heap_flags &= ~MAX_HEAP_SIZE_LOG; + else + return 0; + } + } else + return 0; + if (sz < 0) + return 0; + *max_heap_size = sz; + return 1; +} + #if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG) static int diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h index 8a6ff99990..54ea9ca3c0 100644 --- a/erts/emulator/beam/erl_gc.h +++ b/erts/emulator/beam/erl_gc.h @@ -135,7 +135,7 @@ typedef struct { #define ERTS_PROCESS_GC_INFO_MAX_TERMS (11) /* number of elements in process_gc_info*/ #define ERTS_PROCESS_GC_INFO_MAX_SIZE \ (ERTS_PROCESS_GC_INFO_MAX_TERMS * (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE)) -Eterm erts_process_gc_info(struct process*, Uint *, Eterm **); +Eterm erts_process_gc_info(struct process*, Uint *, Eterm **, Uint, Uint); void erts_gc_info(ErtsGCInfo *gcip); void erts_init_gc(void); @@ -155,5 +155,7 @@ void erts_offset_off_heap(struct erl_off_heap*, Sint, Eterm*, Eterm*); void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*); void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*); void erts_free_heap_frags(struct process* p); +Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *); +int erts_max_heap_size(Eterm, Uint *, Uint *); #endif /* __ERL_GC_H__ */ diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c index 8e201d5711..ebeff51aac 100644 --- a/erts/emulator/beam/erl_hl_timer.c +++ b/erts/emulator/beam/erl_hl_timer.c @@ -1247,8 +1247,8 @@ hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs) if (!ERTS_PROC_IS_EXITING(proc)) { ErtsMessage *mp = erts_alloc_message(0, NULL); mp->data.heap_frag = tmr->btm.bp; - erts_queue_message(proc, &proc_locks, mp, - tmr->btm.message); + erts_queue_message(proc, proc_locks, mp, + tmr->btm.message, am_clock_service); erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_SEND); queued_message = 1; proc_locks &= ~ERTS_PROC_LOCKS_MSG_SEND; @@ -1766,7 +1766,7 @@ setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos, if (is_not_internal_pid(rcvr) && is_not_atom(rcvr)) goto badarg; - esdp = ERTS_PROC_GET_SCHDATA(c_p); + esdp = erts_proc_sched_data(c_p); hp = HAlloc(c_p, REF_THING_SIZE); ref = erts_sched_make_ref_in_buffer(esdp, hp); @@ -1871,7 +1871,7 @@ access_sched_local_btm(Process *c_p, Eterm pid, if (!c_p) esdp = erts_get_scheduler_data(); else { - esdp = ERTS_PROC_GET_SCHDATA(c_p); + esdp = erts_proc_sched_data(c_p); ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data()); } @@ -1980,7 +1980,7 @@ access_sched_local_btm(Process *c_p, Eterm pid, ERTS_HLT_ASSERT(hp + (async ? 4 : 3) == hp_end); - erts_queue_message(proc, &proc_locks, mp, msg); + erts_queue_message(proc, proc_locks, mp, msg, am_clock_service); if (c_p) proc_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -2111,7 +2111,7 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp, msg = TUPLE3(hp, tag, tref, res); - erts_queue_message(c_p, &proc_locks, mp, msg); + erts_queue_message(c_p, proc_locks, mp, msg, am_clock_service); proc_locks &= ~ERTS_PROC_LOCK_MAIN; if (proc_locks) @@ -2138,7 +2138,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info) goto no_timer; } - esdp = ERTS_PROC_GET_SCHDATA(c_p); + esdp = erts_proc_sched_data(c_p); trefn = internal_ref_numbers(tref); sid = erts_get_ref_numbers_thr_id(trefn); @@ -2363,7 +2363,7 @@ typedef struct { int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp) { - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(p); + ErtsSchedulerData *esdp = erts_proc_sched_data(p); ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}}; ErtsBifTimerYieldState *ysp; int res; @@ -2409,7 +2409,7 @@ detach_bif_timer(ErtsHLTimer *tmr, void *vesdp) int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp) { - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(p); + ErtsSchedulerData *esdp = erts_proc_sched_data(p); ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}}; ErtsBifTimerYieldState *ysp; int res; @@ -2516,7 +2516,7 @@ BIF_RETTYPE send_after_3(BIF_ALIST_3) ErtsMonotonicTime timeout_pos; int short_time, tres; - tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL, + tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL, 0, &timeout_pos, &short_time); if (tres != 0) BIF_ERROR(BIF_P, BADARG); @@ -2534,7 +2534,7 @@ BIF_RETTYPE send_after_4(BIF_ALIST_4) if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor)) BIF_ERROR(BIF_P, BADARG); - tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL, + tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL, abs, &timeout_pos, &short_time); if (tres != 0) BIF_ERROR(BIF_P, BADARG); @@ -2548,7 +2548,7 @@ BIF_RETTYPE start_timer_3(BIF_ALIST_3) ErtsMonotonicTime timeout_pos; int short_time, tres; - tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL, + tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL, 0, &timeout_pos, &short_time); if (tres != 0) BIF_ERROR(BIF_P, BADARG); @@ -2566,7 +2566,7 @@ BIF_RETTYPE start_timer_4(BIF_ALIST_4) if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor)) BIF_ERROR(BIF_P, BADARG); - tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL, + tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL, abs, &timeout_pos, &short_time); if (tres != 0) BIF_ERROR(BIF_P, BADARG); @@ -2720,7 +2720,7 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo, int erts_set_proc_timer_term(Process *c_p, Eterm etmo) { - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); ErtsMonotonicTime tmo, timeout_pos; int short_time, tres; @@ -2742,7 +2742,7 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo) void erts_set_proc_timer_uword(Process *c_p, UWord tmo) { - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer) == ERTS_PTMR_NONE); @@ -2776,7 +2776,7 @@ erts_cancel_proc_timer(Process *c_p) erts_smp_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE); return; } - continue_cancel_ptimer(ERTS_PROC_GET_SCHDATA(c_p), + continue_cancel_ptimer(erts_proc_sched_data(c_p), (ErtsTimer *) tval); } diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index fcd2739ac3..0649fb68de 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -164,6 +164,8 @@ int erts_use_sender_punish; Uint display_items; /* no of items to display in traces etc */ int H_MIN_SIZE; /* The minimum heap grain */ int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/ +int H_MAX_SIZE; /* The maximum heap size */ +int H_MAX_FLAGS; /* The maximum heap flags */ Uint32 erts_debug_flags; /* Debug flags. */ int erts_backtrace_depth; /* How many functions to show in a backtrace @@ -576,6 +578,10 @@ void erts_usage(void) H_DEFAULT_SIZE); erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n", VH_DEFAULT_SIZE); + erts_fprintf(stderr, "-hmax size set maximum heap size in words (default %d)\n", + H_DEFAULT_MAX_SIZE); + erts_fprintf(stderr, "-hmaxk bool enable or disable kill at max heap size (default true)\n"); + erts_fprintf(stderr, "-hmaxel bool enable or disable error_logger report at max heap size (default true)\n"); erts_fprintf(stderr, "-hpds size initial process dictionary size (default %d)\n", erts_pd_initial_size); erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n"); @@ -759,6 +765,8 @@ early_init(int *argc, char **argv) /* erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE; H_MIN_SIZE = H_DEFAULT_SIZE; BIN_VH_MIN_SIZE = VH_DEFAULT_SIZE; + H_MAX_SIZE = H_DEFAULT_MAX_SIZE; + H_MAX_FLAGS = MAX_HEAP_SIZE_KILL|MAX_HEAP_SIZE_LOG; erts_initialized = 0; @@ -1484,10 +1492,13 @@ erl_start(int argc, char **argv) char *sub_param = argv[i]+2; /* set default heap size * - * h|ms - min_heap_size - * h|mbs - min_bin_vheap_size - * h|pds - erts_pd_initial_size - * h|mqd - message_queue_data + * h|ms - min_heap_size + * h|mbs - min_bin_vheap_size + * h|pds - erts_pd_initial_size + * h|mqd - message_queue_data + * h|max - max_heap_size + * h|maxk - max_heap_kill + * h|maxel - max_heap_error_logger * */ if (has_prefix("mbs", sub_param)) { @@ -1530,6 +1541,41 @@ erl_start(int argc, char **argv) "Invalid message_queue_data flag: %s\n", arg); erts_usage(); } + } else if (has_prefix("maxk", sub_param)) { + arg = get_arg(sub_param+4, argv[i+1], &i); + if (strcmp(arg,"true") == 0) { + H_MAX_FLAGS |= MAX_HEAP_SIZE_KILL; + } else if (strcmp(arg,"false") == 0) { + H_MAX_FLAGS &= ~MAX_HEAP_SIZE_KILL; + } else { + erts_fprintf(stderr, "bad max heap kill %s\n", arg); + erts_usage(); + } + VERBOSE(DEBUG_SYSTEM, ("using max heap kill %d\n", H_MAX_FLAGS)); + } else if (has_prefix("maxel", sub_param)) { + arg = get_arg(sub_param+5, argv[i+1], &i); + if (strcmp(arg,"true") == 0) { + H_MAX_FLAGS |= MAX_HEAP_SIZE_LOG; + } else if (strcmp(arg,"false") == 0) { + H_MAX_FLAGS &= ~MAX_HEAP_SIZE_LOG; + } else { + erts_fprintf(stderr, "bad max heap error logger %s\n", arg); + erts_usage(); + } + VERBOSE(DEBUG_SYSTEM, ("using max heap log %d\n", H_MAX_FLAGS)); + } else if (has_prefix("max", sub_param)) { + arg = get_arg(sub_param+3, argv[i+1], &i); + if ((H_MAX_SIZE = atoi(arg)) < 0) { + erts_fprintf(stderr, "bad max heap size %s\n", arg); + erts_usage(); + } + if (H_MAX_SIZE < H_MIN_SIZE && H_MAX_SIZE) { + erts_fprintf(stderr, "max heap size (%s) is not allowed to be " + "smaller than min heap size (%d)\n", + arg, H_MIN_SIZE); + erts_usage(); + } + VERBOSE(DEBUG_SYSTEM, ("using max heap size %d\n", H_MAX_SIZE)); } else { /* backward compatibility */ arg = get_arg(argv[i]+2, argv[i+1], &i); diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 9beff52835..579f6e427d 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -32,6 +32,7 @@ #include "erl_process.h" #include "erl_binary.h" #include "dtrace-wrapper.h" +#include "beam_bp.h" ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message_ref, ErtsMessageRef, @@ -251,9 +252,10 @@ erts_realloc_shrink_message(ErtsMessage *mp, Uint sz, Eterm *brefs, Uint brefs_s void erts_queue_dist_message(Process *rcvr, - ErtsProcLocks *rcvr_locks, + ErtsProcLocks rcvr_locks, ErtsDistExternal *dist_ext, - Eterm token) + Eterm token, + Eterm from) { ErtsMessage* mp; #ifdef USE_VM_PROBES @@ -265,7 +267,7 @@ erts_queue_dist_message(Process *rcvr, erts_aint_t state; #endif - ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); + ERTS_SMP_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = erts_alloc_message(0, NULL); mp->data.dist_ext = dist_ext; @@ -280,10 +282,10 @@ erts_queue_dist_message(Process *rcvr, ERL_MESSAGE_TOKEN(mp) = token; #ifdef ERTS_SMP - if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { + if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; - if (*rcvr_locks & ERTS_PROC_LOCK_STATUS) { + if (rcvr_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS); need_locks |= ERTS_PROC_LOCK_STATUS; } @@ -293,7 +295,7 @@ erts_queue_dist_message(Process *rcvr, state = erts_smp_atomic32_read_acqb(&rcvr->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { - if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) + if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); @@ -301,10 +303,13 @@ erts_queue_dist_message(Process *rcvr, else #endif if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { + if (from == am_Empty) + from = dist_ext->dep->sysname; + /* Ahh... need to decode it in order to trace it... */ - if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) + if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); - if (!erts_decode_dist_message(rcvr, *rcvr_locks, mp, 0)) + if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0)) erts_free_message(mp); else { Eterm msg = ERL_MESSAGE_TERM(mp); @@ -324,7 +329,7 @@ erts_queue_dist_message(Process *rcvr, tok_label, tok_lastcnt, tok_serial); } #endif - erts_queue_message(rcvr, rcvr_locks, mp, msg); + erts_queue_message(rcvr, rcvr_locks, mp, msg, from); } } else { @@ -351,12 +356,12 @@ erts_queue_dist_message(Process *rcvr, LINK_MESSAGE(rcvr, mp, &mp->next, 1); - if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) + if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, #ifdef ERTS_SMP - *rcvr_locks + rcvr_locks #else 0 #endif @@ -366,14 +371,15 @@ erts_queue_dist_message(Process *rcvr, /* Add messages last in message queue */ static Sint -queue_messages(Process *c_p, - Process* receiver, +queue_messages(Process* receiver, erts_aint32_t *receiver_state, - ErtsProcLocks *receiver_locks, + ErtsProcLocks receiver_locks, ErtsMessage* first, ErtsMessage** last, - Uint len) + Uint len, + Eterm from) { + ErtsTracingEvent* te; Sint res; int locked_msgq = 0; erts_aint32_t state; @@ -386,12 +392,12 @@ queue_messages(Process *c_p, #ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || - *receiver_locks == erts_proc_lc_my_proc_locks(receiver)); + receiver_locks == erts_proc_lc_my_proc_locks(receiver)); #endif - if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) { + if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { - ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; + ErtsProcLocks need_locks; if (receiver_state) state = *receiver_state; @@ -400,10 +406,11 @@ queue_messages(Process *c_p, if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; - if (*receiver_locks & ERTS_PROC_LOCK_STATUS) { - erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); - need_locks |= ERTS_PROC_LOCK_STATUS; + need_locks = receiver_locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + if (need_locks) { + erts_smp_proc_unlock(receiver, need_locks); } + need_locks |= ERTS_PROC_LOCK_MSGQ; erts_smp_proc_lock(receiver, need_locks); } locked_msgq = 1; @@ -426,7 +433,7 @@ queue_messages(Process *c_p, res = receiver->msg.len; #ifdef ERTS_SMP - if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { + if (receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place * message at the end of 'private queue' in order @@ -445,7 +452,10 @@ queue_messages(Process *c_p, LINK_MESSAGE(receiver, first, last, len); } - if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { + if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE) + && (te = &erts_receive_tracing[erts_active_bp_ix()], + te->on)) { + ErtsMessage *msg = first; #ifdef USE_VM_PROBES @@ -468,52 +478,50 @@ queue_messages(Process *c_p, tok_label, tok_lastcnt, tok_serial); } #endif - while (msg) { - trace_receive(receiver, ERL_MESSAGE_TERM(msg)); + trace_receive(receiver, from, ERL_MESSAGE_TERM(msg), te); msg = msg->next; } } - - if (locked_msgq) + if (locked_msgq) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + } #ifdef ERTS_SMP - erts_proc_notify_new_message(receiver, *receiver_locks); + erts_proc_notify_new_message(receiver, receiver_locks); #else erts_proc_notify_new_message(receiver, 0); - ERTS_HOLE_CHECK(receiver); #endif return res; } static Sint -queue_message(Process *c_p, - Process* receiver, +queue_message(Process* receiver, erts_aint32_t *receiver_state, - ErtsProcLocks *receiver_locks, - ErtsMessage* mp, Eterm msg) + ErtsProcLocks receiver_locks, + ErtsMessage* mp, Eterm msg, Eterm from) { ERL_MESSAGE_TERM(mp) = msg; - return queue_messages(c_p, receiver, receiver_state, receiver_locks, - mp, &mp->next, 1 ); + return queue_messages(receiver, receiver_state, receiver_locks, + mp, &mp->next, 1, from); } Sint -erts_queue_message(Process* receiver, ErtsProcLocks *receiver_locks, - ErtsMessage* mp, Eterm msg) +erts_queue_message(Process* receiver, ErtsProcLocks receiver_locks, + ErtsMessage* mp, Eterm msg, Eterm from) { - return queue_message(NULL, receiver, NULL, receiver_locks, mp, msg); + return queue_message(receiver, NULL, receiver_locks, mp, msg, from); } Sint -erts_queue_messages(Process* receiver, ErtsProcLocks *receiver_locks, - ErtsMessage* first, ErtsMessage** last, Uint len) +erts_queue_messages(Process* receiver, ErtsProcLocks receiver_locks, + ErtsMessage* first, ErtsMessage** last, Uint len, + Eterm from) { - return queue_messages(NULL, receiver, NULL, receiver_locks, - first, last, len); + return queue_messages(receiver, NULL, receiver_locks, + first, last, len, from); } void @@ -592,7 +600,9 @@ erts_try_alloc_message_on_heap(Process *pp, ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ)); - if ( + if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP) + goto in_message_fragment; + else if ( #if defined(ERTS_SMP) *plp & ERTS_PROC_LOCK_MAIN #else @@ -602,7 +612,7 @@ erts_try_alloc_message_on_heap(Process *pp, #ifdef ERTS_SMP try_on_heap: #endif - if ((*psp & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) + if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP) || (pp->flags & F_DISABLE_GC) || HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) { /* @@ -830,11 +840,11 @@ erts_send_message(Process* sender, #ifdef USE_VM_PROBES ERL_MESSAGE_DT_UTAG(mp) = utag; #endif - res = queue_message(sender, - receiver, + res = queue_message(receiver, &receiver_state, - receiver_locks, - mp, message); + *receiver_locks, + mp, message, + sender->common.id); BM_SWAP_TIMER(send,system); @@ -891,7 +901,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL); temptoken = copy_struct(token, sz_token, &hp, ohp); ERL_MESSAGE_TOKEN(mp) = temptoken; - erts_queue_message(to, to_locksp, mp, save); + erts_queue_message(to, *to_locksp, mp, save, am_system); } else { sz_from = IS_CONST(from) ? 0 : size_object(from); #ifdef SHCOPY_SEND @@ -913,7 +923,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, ? from : copy_struct(from, sz_from, &hp, ohp)); save = TUPLE3(hp, am_EXIT, from_copy, mess); - erts_queue_message(to, to_locksp, mp, save); + erts_queue_message(to, *to_locksp, mp, save, am_system); } } diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 608cf552a2..851ac37fda 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -295,10 +295,10 @@ ErlHeapFragment* new_message_buffer(Uint); ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint, Eterm *, Uint); void free_message_buffer(ErlHeapFragment *); -void erts_queue_dist_message(Process*, ErtsProcLocks*, ErtsDistExternal *, Eterm); -Sint erts_queue_message(Process*, ErtsProcLocks*,ErtsMessage*, Eterm); -Sint erts_queue_messages(Process*, ErtsProcLocks*, - ErtsMessage*, ErtsMessage**, Uint); +void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *, Eterm, Eterm); +Sint erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm); +Sint erts_queue_messages(Process*, ErtsProcLocks, + ErtsMessage*, ErtsMessage**, Uint, Eterm); void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm); Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned); void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp); diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c index d0f305900a..544bc8b983 100644 --- a/erts/emulator/beam/erl_msacc.c +++ b/erts/emulator/beam/erl_msacc.c @@ -257,7 +257,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) { if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx); - erts_queue_message(rp, &rp_locks, msgp, msg); + erts_queue_message(rp, rp_locks, msgp, msg, am_system); if (esdp && msaccrp->req_sched == esdp->no) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -338,7 +338,7 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) { #ifdef ERTS_ENABLE_MSACC ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET(); - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); Eterm ref; ErtsMSAccReq *msaccrp; Eterm *hp; diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index dc83a780a2..8a3007d52a 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -20,6 +20,23 @@ /* Erlang Native InterFace */ +/* + * Environment contains a pointer to currently executing process. + * In the dirty case this pointer do however not point to the + * actual process structure of the executing process, but instead + * a "shadow process structure". This in order to be able to handle + * heap allocation without the need to acquire the main lock on + * the process. + * + * The dirty process is allowed to allocate on the heap without + * the main lock, i.e., incrementing htop, but is not allowed to + * modify mbuf, offheap, etc without the main lock. The dirty + * process moves mbuf list and offheap list of the shadow process + * structure into the real structure when the dirty nif call + * completes. + */ + + #ifdef HAVE_CONFIG_H # include "config.h" #endif @@ -81,6 +98,43 @@ void dtrace_nifenv_str(ErlNifEnv *, char *); #define MIN_HEAP_FRAG_SZ 200 static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp); +static ERTS_INLINE int +is_scheduler(void) +{ + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + if (!esdp) + return 0; + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) + return -1; + return 1; +} + +static ERTS_INLINE void +execution_state(ErlNifEnv *env, Process **c_pp, int *schedp) +{ + if (schedp) + *schedp = is_scheduler(); + if (c_pp) { + if (!env || env->proc->common.id == ERTS_INVALID_PID) + *c_pp = NULL; + else { + Process *c_p = env->proc; + + if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) + ASSERT(is_scheduler() > 0); + else { + c_p = env->proc->next; + ASSERT(is_scheduler() < 0); + ASSERT(c_p && env->proc->common.id == c_p->common.id); + } + + *c_pp = c_p; + + ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)); + } + } +} + static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need) { Eterm* hp = env->hp; @@ -124,6 +178,9 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, unsigned may_need) void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, Process* tracee) { +#ifdef ERTS_DIRTY_SCHEDULERS + ErtsSchedulerData *esdp; +#endif env->mod_nif = mod_nif; env->proc = p; env->hp = HEAP_TOP(p); @@ -133,6 +190,61 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, env->tmp_obj_list = NULL; env->exception_thrown = 0; env->tracee = tracee; + + ASSERT(p->common.id != ERTS_INVALID_PID); + +#ifdef ERTS_DIRTY_SCHEDULERS + esdp = erts_get_scheduler_data(); + ASSERT(esdp); + + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { +#ifdef DEBUG + erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + + ASSERT(p->scheduler_data == esdp); + ASSERT((state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS)) + && !(state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS))); +#endif + + } + else { + Process *sproc; +#ifdef DEBUG + erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + + ASSERT(!p->scheduler_data); + ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING) + && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))); +#endif + + sproc = esdp->dirty_shadow_process; + ASSERT(sproc); + ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC); + ASSERT(erts_smp_atomic32_read_nob(&sproc->state) + == (ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_PROXY)); + + sproc->next = p; + sproc->common.id = p->common.id; + sproc->htop = p->htop; + sproc->stop = p->stop; + sproc->hend = p->hend; + sproc->heap = p->heap; + sproc->abandoned_heap = p->abandoned_heap; + sproc->heap_sz = p->heap_sz; + sproc->high_water = p->high_water; + sproc->old_hend = p->old_hend; + sproc->old_htop = p->old_htop; + sproc->old_heap = p->old_heap; + sproc->mbuf = NULL; + sproc->mbuf_sz = 0; + ERTS_INIT_OFF_HEAP(&sproc->off_heap); + env->proc = sproc; + } +#endif } /* Temporary object header, auto-deallocated when NIF returns @@ -157,18 +269,75 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env) void erts_post_nif(ErlNifEnv* env) { erts_unblock_fpe(env->fpe_was_unmasked); - if (env->heap_frag == NULL) { - ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); - ASSERT(env->hp >= HEAP_TOP(env->proc)); - ASSERT(env->hp <= HEAP_LIMIT(env->proc)); - HEAP_TOP(env->proc) = env->hp; + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) +#endif + { + ASSERT(is_scheduler() > 0); + if (env->heap_frag == NULL) { + ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); + ASSERT(env->hp >= HEAP_TOP(env->proc)); + ASSERT(env->hp <= HEAP_LIMIT(env->proc)); + HEAP_TOP(env->proc) = env->hp; + } + else { + ASSERT(env->hp_end != HEAP_LIMIT(env->proc)); + ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); + env->heap_frag->used_size = env->hp - env->heap_frag->mem; + ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); + } + env->exiting = ERTS_PROC_IS_EXITING(env->proc); } - else { - ASSERT(env->hp_end != HEAP_LIMIT(env->proc)); - ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); - env->heap_frag->used_size = env->hp - env->heap_frag->mem; - ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); +#ifdef ERTS_DIRTY_SCHEDULERS + else { /* Dirty nif call using shadow process struct */ + Process *c_p = env->proc->next; + + ASSERT(is_scheduler() < 0); + ASSERT(env->proc->common.id == c_p->common.id); + + if (!env->heap_frag) { + ASSERT(env->hp_end == HEAP_LIMIT(c_p)); + ASSERT(env->hp >= HEAP_TOP(c_p)); + ASSERT(env->hp <= HEAP_LIMIT(c_p)); + HEAP_TOP(c_p) = env->hp; + } + else { + ASSERT(env->hp_end != HEAP_LIMIT(c_p)); + ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); + + HEAP_TOP(c_p) = HEAP_TOP(env->proc); + env->heap_frag->used_size = env->hp - env->heap_frag->mem; + + ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); + + if (c_p->mbuf) { + ErlHeapFragment *bp; + for (bp = env->proc->mbuf; bp->next; bp = bp->next) + ; + bp->next = c_p->mbuf; + } + + c_p->mbuf = env->proc->mbuf; + c_p->mbuf_sz += env->proc->mbuf_sz; + + } + + if (!c_p->off_heap.first) + c_p->off_heap.first = env->proc->off_heap.first; + else if (env->proc->off_heap.first) { + struct erl_off_heap_header *ohhp; + for (ohhp = env->proc->off_heap.first; ohhp->next; ohhp = ohhp->next) + ; + ohhp->next = c_p->off_heap.first; + c_p->off_heap.first = env->proc->off_heap.first; + } + c_p->off_heap.overhead += env->proc->off_heap.overhead; + + env->exiting = ERTS_PROC_IS_EXITING(c_p); + BUMP_ALL_REDS(c_p); } +#endif free_tmp_objs(env); } @@ -366,7 +535,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) rp_locks = 0; if (rp->common.id == c_p->common.id) rp_locks = c_p_locks; - erts_queue_messages(rp, &rp_locks, first, last, len); + erts_queue_messages(rp, rp_locks, first, last, len, c_p->common.id); if (rp->common.id == c_p->common.id) rp_locks &= ~c_p_locks; if (rp_locks) @@ -400,44 +569,44 @@ error: #endif -int -enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, - ErlNifEnv* msg_env, ERL_NIF_TERM msg) +int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, + ErlNifEnv* msg_env, ERL_NIF_TERM msg) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env; - ErtsProcLocks rp_locks = 0, lc_locks = 0, c_p_locks = ERTS_PROC_LOCK_MAIN; + ErtsProcLocks rp_locks = 0; +#ifdef ERTS_SMP + ErtsProcLocks lc_locks = 0; +#endif Process* rp; Process* c_p; ErtsMessage *mp; Eterm receiver = to_pid->pid; - int flush_me = 0; - ErtsSchedulerData *esdp = erts_get_scheduler_data(); - int scheduler = esdp ? esdp->no : 0; + int scheduler; - if (env != NULL) { - c_p = env->proc; - if (receiver == c_p->common.id) { - rp_locks = c_p_locks; - flush_me = 1; - } + execution_state(env, &c_p, &scheduler); + +#ifndef ERTS_SMP + if (!scheduler) { + erts_exit(ERTS_ABORT_EXIT, + "enif_send: called from non-scheduler thread on non-SMP VM"); + return 0; + } +#endif + + if (scheduler > 0) { /* Normal scheduler */ + rp = erts_proc_lookup(receiver); + if (c_p == rp) + rp_locks = ERTS_PROC_LOCK_MAIN; } else { -#ifdef ERTS_SMP - c_p = NULL; -#else - erts_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM"); -#endif + if (c_p && ERTS_PROC_IS_EXITING(c_p)) + return 0; + rp = erts_pid2proc_opt(c_p, 0, receiver, rp_locks, + ERTS_P2P_FLG_INC_REFC); } - - rp = (scheduler - ? erts_proc_lookup(receiver) - : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, - receiver, rp_locks, ERTS_P2P_FLG_INC_REFC)); - - if (rp == NULL) { - ASSERT(env == NULL || receiver != c_p->common.id); + if (rp == NULL) return 0; - } + if (menv) { flush_env(msg_env); mp = erts_alloc_message(0, NULL); @@ -462,22 +631,12 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ERL_MESSAGE_TERM(mp) = msg; - if (flush_me) { - flush_env(env); /* Needed for ERTS_HOLE_CHECK */ - } - if (!env || !env->tracee) { if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) trace_send(c_p, receiver, msg); - -#ifndef ERTS_SMP } -#endif - - erts_queue_message(rp, &rp_locks, mp, msg); #ifdef ERTS_SMP - } else { /* This clause is taken when the nif is called in the context of a traced process. We do not know which locks we have @@ -502,8 +661,6 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, #ifdef ERTS_ENABLE_LOCK_CHECK lc_locks = erts_proc_lc_my_proc_locks(rp); rp_locks |= lc_locks; - if (receiver == c_p->common.id) - c_p_locks |= lc_locks; #endif if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq || rp_locks & ERTS_PROC_LOCK_MSGQ || @@ -532,24 +689,28 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, msgq->last = &mp->next; erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); } + goto done; } else { erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); rp_locks &= ~ERTS_PROC_LOCK_TRACE; rp_locks |= ERTS_PROC_LOCK_MSGQ; - erts_queue_message(rp, &rp_locks, mp, msg); } } -#endif +#endif /* ERTS_SMP */ + + erts_queue_message(rp, rp_locks, mp, msg, + c_p ? c_p->common.id : am_undefined); +#ifdef ERTS_SMP +done: if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks & ~lc_locks) erts_smp_proc_unlock(rp, rp_locks & ~lc_locks); - if (!scheduler) +#endif + if (scheduler <= 0) erts_proc_dec_refc(rp); - if (flush_me) { - cache_env(env); - } + return 1; } @@ -557,26 +718,52 @@ int enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg) { - - ErtsSchedulerData *esdp = erts_get_scheduler_data(); - int scheduler = esdp ? esdp->no : 0; + int iflags = (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP); + int scheduler; + Process *c_p; Port *prt; + int res; - if (scheduler == 0 || !env) - return 0; + if (!env) + erts_exit(ERTS_ABORT_EXIT, "enif_port_command: env == NULL"); + + execution_state(env, &c_p, &scheduler); - prt = erts_port_lookup(to_port->port_id, - (erts_port_synchronous_ops - ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP - : ERTS_PORT_SFLGS_INVALID_LOOKUP)); + if (!c_p) + c_p = env->proc; + + if (scheduler > 0) + prt = erts_port_lookup(to_port->port_id, iflags); +#ifdef ERTS_DIRTY_SCHEDULERS + else if (scheduler < 0) { + if (ERTS_PROC_IS_EXITING(c_p)) + return 0; + prt = erts_thr_port_lookup(to_port->port_id, iflags); + } +#endif + else { + erts_exit(ERTS_ABORT_EXIT, "enif_port_command: " + "called from non-scheduler thread"); + } if (!prt) - return 0; + res = 0; + else { + + if (IS_TRACED_FL(prt, F_TRACE_RECEIVE)) + trace_port_receive(prt, c_p->common.id, am_command, msg); + + res = erts_port_output_async(prt, c_p->common.id, msg); + } - if (IS_TRACED_FL(prt, F_TRACE_RECEIVE)) - trace_port_receive(prt, env->proc->common.id, am_command, msg); +#ifdef ERTS_DIRTY_SCHEDULERS + if (scheduler < 0) + erts_port_dec_refc(prt); +#endif - return erts_port_output_async(prt, env->proc->common.id, msg); + return res; } ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term) @@ -1038,15 +1225,21 @@ Eterm enif_make_badarg(ErlNifEnv* env) Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason) { + Process *c_p; + + execution_state(env, &c_p, NULL); + env->exception_thrown = 1; - env->proc->fvalue = reason; - BIF_ERROR(env->proc, EXC_ERROR); + c_p->fvalue = reason; + BIF_ERROR(c_p, EXC_ERROR); } int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason) { if (env->exception_thrown && reason != NULL) { - *reason = env->proc->fvalue; + Process *c_p; + execution_state(env, &c_p, NULL); + *reason = c_p->fvalue; } return env->exception_thrown; } @@ -1440,56 +1633,71 @@ int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list return 1; } +int enif_is_current_process_alive(ErlNifEnv* env) +{ + Process *c_p; + int scheduler; + + execution_state(env, &c_p, &scheduler); + + if (!c_p) + erts_exit(ERTS_ABORT_EXIT, + "enif_is_current_process_alive: " + "Invalid environment"); + + if (!scheduler) + erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: " + "called from non-scheduler thread"); + + return !ERTS_PROC_IS_EXITING(c_p); +} + int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc) { - ErtsProcLocks rp_locks = 0; /* We don't need any locks, - just to check if it is alive */ - Eterm target = proc->pid; - Process* rp; - Process* c_p; - int scheduler = erts_get_scheduler_id() != 0; + int scheduler; - if (env != NULL) { - c_p = env->proc; - if (target == c_p->common.id) { - /* We are alive! */ - return 1; - } - } + execution_state(env, NULL, &scheduler); + + if (scheduler > 0) + return !!erts_proc_lookup(proc->pid); else { #ifdef ERTS_SMP - c_p = NULL; + Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0, + ERTS_P2P_FLG_INC_REFC); + if (rp) + erts_proc_dec_refc(rp); + return !!rp; #else - erts_exit(ERTS_ABORT_EXIT,"enif_is_process_alive: " - "env==NULL on non-SMP VM"); -#endif - } - - rp = (scheduler - ? erts_proc_lookup(target) - : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, - target, rp_locks, ERTS_P2P_FLG_INC_REFC)); - if (rp == NULL) { - ASSERT(env == NULL || target != c_p->common.id); + erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: " + "called from non-scheduler thread"); return 0; - } else { - if (!scheduler) - erts_proc_dec_refc(rp); - return 1; +#endif } } int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port) { - /* only allowed if called from scheduler */ - if (erts_get_scheduler_id() == 0) - erts_exit(ERTS_ABORT_EXIT,"enif_is_port_alive: called from non-scheduler"); + int scheduler; + Uint32 iflags = (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP); + + execution_state(env, NULL, &scheduler); - return erts_port_lookup( - port->port_id, - (erts_port_synchronous_ops - ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP - : ERTS_PORT_SFLGS_INVALID_LOOKUP)) != NULL; + if (scheduler > 0) + return !!erts_port_lookup(port->port_id, iflags); + else { +#ifdef ERTS_SMP + Port *prt = erts_thr_port_lookup(port->port_id, iflags); + if (prt) + erts_port_dec_refc(prt); + return !!prt; +#else + erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: " + "called from non-scheduler thread"); + return 0; +#endif + } } ERL_NIF_TERM @@ -1956,16 +2164,19 @@ void* enif_dlsym(void* handle, const char* symbol, int enif_consume_timeslice(ErlNifEnv* env, int percent) { + Process *proc; Sint reds; + execution_state(env, &proc, NULL); + ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100); if (percent < 1) percent = 1; else if (percent > 100) percent = 100; reds = ((CONTEXT_REDS+99) / 100) * percent; ASSERT(reds > 0 && reds <= CONTEXT_REDS); - BUMP_REDS(env->proc, reds); - return ERTS_BIF_REDS_LEFT(env->proc) == 0; + BUMP_REDS(proc, reds); + return ERTS_BIF_REDS_LEFT(proc) == 0; } /* @@ -2060,10 +2271,19 @@ static ERL_NIF_TERM init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, int need_save, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; - Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; + Process* proc; + Eterm* reg; NifExport* ep; - int i; + int i, scheduler; + + execution_state(env, &proc, &scheduler); + + ASSERT(scheduler); + + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc) + & ERTS_PROC_LOCK_MAIN); + + reg = erts_proc_sched_data(proc)->x_reg_array; ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); if (!ep) @@ -2075,12 +2295,13 @@ init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirec } if (env->exception_thrown) { ep->exception_thrown = 1; - ep->rootset[0] = env->proc->fvalue; + ep->rootset[0] = proc->fvalue; } else { ep->exception_thrown = 0; ep->rootset[0] = NIL; } - ERTS_VBUMP_ALL_REDS(proc); + if (scheduler > 0) + ERTS_VBUMP_ALL_REDS(proc); for (i = 0; i < argc; i++) { if (need_save) ep->rootset[i+1] = reg[i]; @@ -2112,7 +2333,12 @@ static void restore_nif_mfa(Process* proc, NifExport* ep, int exception) { int i; - Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; + Eterm* reg = erts_proc_sched_data(proc)->x_reg_array; + + ERTS_SMP_LC_ASSERT(!(proc->static_flags + & ERTS_STC_FLG_SHADOW_PROC)); + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc) + & ERTS_PROC_LOCK_MAIN); proc->current[0] = ep->saved_mfa[0]; proc->current[1] = ep->saved_mfa[1]; @@ -2137,11 +2363,13 @@ restore_nif_mfa(Process* proc, NifExport* ep, int exception) static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; + Process* proc; NifExport* ep; + execution_state(env, &proc, NULL); + ASSERT(argc == 1); - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc))); ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); ASSERT(!ep->exception_thrown); @@ -2156,10 +2384,12 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; + Process* proc; NifExport* ep; - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + execution_state(env, &proc, NULL); + + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc))); ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); ASSERT(ep->exception_thrown); @@ -2176,23 +2406,32 @@ dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) static ERL_NIF_TERM execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; - NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + Process* proc; + NativeFunPtr fp; NifExport* ep; ERL_NIF_TERM result; - ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + execution_state(env, &proc, NULL); + + fp = (NativeFunPtr) proc->current[6]; + + ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc))); /* * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution */ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); - ASSERT(ep); + ASSERT(ep && fp); ep->fp = NULL; erts_smp_atomic32_read_band_mb(&proc->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); + + erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + result = (*fp)(env, argc, argv); + erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL) close_lib(env->mod_nif); /* @@ -2229,29 +2468,49 @@ execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) static ERTS_INLINE ERL_NIF_TERM schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[]) { - erts_aint32_t state, n, a; - Process* proc = env->proc; - NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + ERL_NIF_TERM result; + erts_aint32_t act, dirty_flag; + Process* proc; + NativeFunPtr fp; NifExport* ep; - int need_save; + int need_save, scheduler; + + execution_state(env, &proc, &scheduler); + if (scheduler <= 0) { + ASSERT(scheduler < 0); + erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + } + + fp = (NativeFunPtr) proc->current[6]; + + ASSERT(fp); ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND); - a = erts_smp_atomic32_read_acqb(&proc->state); - while (1) { - n = state = a; + if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND) + dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC; + else + dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC; + + act = erts_smp_atomic32_read_bor_nob(&proc->state, dirty_flag); + if (!(act & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))) + erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1); + else if ((act & (ERTS_PSFLG_DIRTY_CPU_PROC + | ERTS_PSFLG_DIRTY_IO_PROC)) & ~dirty_flag) { + /* clear other flag... */ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND) - n |= ERTS_PSFLG_DIRTY_CPU_PROC; + dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC; else - n |= ERTS_PSFLG_DIRTY_IO_PROC; - a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state); - if (a == state) - break; + dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC; + erts_smp_atomic32_read_band_nob(&proc->state, ~dirty_flag); } - erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); need_save = (ep == NULL || is_non_value(ep->saved_mfa[0])); - return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv); + result = init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv); + if (scheduler <= 0) + erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + return result; } static ERL_NIF_TERM @@ -2276,11 +2535,14 @@ schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) static ERL_NIF_TERM execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; - NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + Process* proc; + NativeFunPtr fp; NifExport* ep; ERL_NIF_TERM result; + execution_state(env, &proc, NULL); + fp = (NativeFunPtr) proc->current[6]; + ASSERT(!env->exception_thrown); ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); @@ -2303,10 +2565,10 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]), int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; + Process* proc; NifExport* ep; ERL_NIF_TERM fun_name_atom, result; - int need_save; + int need_save, scheduler; if (argc > MAX_ARG) return enif_make_badarg(env); @@ -2314,6 +2576,13 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, if (enif_is_exception(env, fun_name_atom)) return fun_name_atom; + execution_state(env, &proc, &scheduler); + if (scheduler <= 0) { + if (scheduler == 0) + enif_make_badarg(env); + erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + } + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); need_save = (ep == NULL || is_non_value(ep->saved_mfa[0])); @@ -2325,12 +2594,15 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, sched_fun = schedule_dirty_io_nif; else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND) sched_fun = schedule_dirty_cpu_nif; - else - return enif_make_badarg(env); + else { + result = enif_make_badarg(env); + goto done; + } result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv); #else - return enif_make_badarg(env); + result = enif_make_badarg(env); #endif + goto done; } else result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv); @@ -2338,18 +2610,28 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); ep->exp.code[1] = (BeamInstr) fun_name_atom; + +done: + if (scheduler < 0) + erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + return result; } -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT - int enif_is_on_dirty_scheduler(ErlNifEnv* env) { - return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data); -} + int scheduler; + Process *c_p; -#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */ + execution_state(env, &c_p, &scheduler); + + if (!c_p || !scheduler) + erts_exit(ERTS_ABORT_EXIT, "enif_is_on_dirty_scheduler: " + "Invalid env"); + + return scheduler < 0; +} /* Maps */ @@ -3050,16 +3332,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) (BeamInstr) BeamOp(op_i_generic_breakpoint)); g->orig_instr = (BeamInstr) BeamOp(op_call_nif); } +#ifdef ERTS_DIRTY_SCHEDULERS if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7)) && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) { -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT code_ptr[5+3] = (BeamInstr) f->fptr; code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ? (BeamInstr) schedule_dirty_io_nif : (BeamInstr) schedule_dirty_cpu_nif; -#endif } else +#endif code_ptr[5+1] = (BeamInstr) f->fptr; code_ptr[5+2] = (BeamInstr) lib; f = next_func(entry, &incr, f); diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index 3964f7f679..02c82415fd 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -28,7 +28,6 @@ # include "config.h" #endif -#include "erl_native_features_config.h" #include "erl_drv_nif.h" /* Version history: @@ -167,13 +166,11 @@ typedef int ErlNifTSDKey; typedef ErlDrvThreadOpts ErlNifThreadOpts; -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT typedef enum { - ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND, - ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND + ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DIRTY_JOB_CPU_BOUND, + ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DIRTY_JOB_IO_BOUND }ErlNifDirtyTaskFlags; -#endif typedef struct /* All fields all internal and may change */ { @@ -257,11 +254,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks; # define ERL_NIF_INIT_DECL(MODNAME) ERL_NIF_INIT_EXPORT ErlNifEntry* nif_init(ERL_NIF_INIT_ARGS) #endif -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT -# define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION -#else -# define ERL_NIF_ENTRY_OPTIONS 0 -#endif +#define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION #ifdef __cplusplus } diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h index a5acd86551..1bdac51d1f 100644 --- a/erts/emulator/beam/erl_nif_api_funcs.h +++ b/erts/emulator/beam/erl_nif_api_funcs.h @@ -166,26 +166,18 @@ ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_convert_time_unit, (ErlNifTime, ErlNifTim ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_now_time, (ErlNifEnv *env)); ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_cpu_time, (ErlNifEnv *env)); ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_unique_integer, (ErlNifEnv *env, ErlNifUniqueInteger properties)); +ERL_NIF_API_FUNC_DECL(int, enif_is_current_process_alive, (ErlNifEnv *env)); ERL_NIF_API_FUNC_DECL(int, enif_is_process_alive, (ErlNifEnv *env, ErlNifPid *pid)); ERL_NIF_API_FUNC_DECL(int, enif_is_port_alive, (ErlNifEnv *env, ErlNifPort *port_id)); ERL_NIF_API_FUNC_DECL(int, enif_get_local_port, (ErlNifEnv* env, ERL_NIF_TERM, ErlNifPort* port_id)); ERL_NIF_API_FUNC_DECL(int, enif_term_to_binary, (ErlNifEnv *env, ERL_NIF_TERM term, ErlNifBinary *bin)); ERL_NIF_API_FUNC_DECL(size_t, enif_binary_to_term, (ErlNifEnv *env, const unsigned char* data, size_t sz, ERL_NIF_TERM *term, unsigned int opts)); ERL_NIF_API_FUNC_DECL(int, enif_port_command, (ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg)); +ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*)); /* ** ADD NEW ENTRIES HERE (before this comment) !!! */ - - -/* - * Conditional EXPERIMENTAL stuff always last. - * Must be moved up and made unconditional to support binary backward - * compatibility on Windows. - */ -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT -ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*)); -#endif #endif /* ERL_NIF_API_FUNC_DECL */ /* @@ -330,12 +322,14 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*)); # define enif_now_time ERL_NIF_API_FUNC_MACRO(enif_now_time) # define enif_cpu_time ERL_NIF_API_FUNC_MACRO(enif_cpu_time) # define enif_make_unique_integer ERL_NIF_API_FUNC_MACRO(enif_make_unique_integer) +# define enif_is_current_process_alive ERL_NIF_API_FUNC_MACRO(enif_is_current_process_alive) # define enif_is_process_alive ERL_NIF_API_FUNC_MACRO(enif_is_process_alive) # define enif_is_port_alive ERL_NIF_API_FUNC_MACRO(enif_is_port_alive) # define enif_get_local_port ERL_NIF_API_FUNC_MACRO(enif_get_local_port) # define enif_term_to_binary ERL_NIF_API_FUNC_MACRO(enif_term_to_binary) # define enif_binary_to_term ERL_NIF_API_FUNC_MACRO(enif_binary_to_term) # define enif_port_command ERL_NIF_API_FUNC_MACRO(enif_port_command) +# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler) /* ** ADD NEW ENTRIES HERE (before this comment) @@ -346,9 +340,6 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*)); * Must be moved up and made unconditional to support binary backward * compatibility on Windows. */ -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT -# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler) -#endif #endif /* ERL_NIF_API_FUNC_MACRO */ diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h index c2588e718d..f0075ca2b9 100644 --- a/erts/emulator/beam/erl_port.h +++ b/erts/emulator/beam/erl_port.h @@ -487,6 +487,7 @@ ERTS_GLB_INLINE Port*erts_id2port(Eterm id); ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32); ERTS_GLB_INLINE void erts_port_release(Port *); #ifdef ERTS_SMP +ERTS_GLB_INLINE Port *erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs); ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs); ERTS_GLB_INLINE void erts_thr_port_release(Port *prt); #endif @@ -626,6 +627,44 @@ erts_port_release(Port *prt) } #ifdef ERTS_SMP +/* + * erts_thr_id2port_sflgs() and erts_port_dec_refc(prt) can + * be used by unmanaged threads in the SMP case. + */ +ERTS_GLB_INLINE Port * +erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs) +{ + Port *prt; + ErtsThrPrgrDelayHandle dhndl; + + if (is_not_internal_port(id)) + return NULL; + + dhndl = erts_thr_progress_unmanaged_delay(); + + prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port, + internal_port_index(id)); + + if (!prt || prt->common.id != id) { + erts_thr_progress_unmanaged_continue(dhndl); + return NULL; + } + else { + erts_aint32_t state; + erts_port_inc_refc(prt); + + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_thr_progress_unmanaged_continue(dhndl); + + state = erts_atomic32_read_acqb(&prt->state); + if (state & invalid_sflgs) { + erts_port_dec_refc(prt); + return NULL; + } + + return prt; + } +} /* * erts_thr_id2port_sflgs() and erts_thr_port_release() can diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index bc32f3f167..a853ec585b 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -409,6 +409,10 @@ ErtsAlignedSchedulerData *erts_aligned_scheduler_data; #ifdef ERTS_DIRTY_SCHEDULERS ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data; ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data; +typedef union { + Process dsp; + char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))]; +} ErtsAlignedDirtyShadowProcess; #endif typedef union { @@ -589,6 +593,7 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS; valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; + valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS; #endif #if HAVE_ERTS_MSEG valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK; @@ -611,7 +616,7 @@ dbg_chk_aux_work_val(erts_aint32_t value) #endif #ifdef ERTS_SMP -static void handle_pending_exiters(ErtsProcList *); +static void do_handle_pending_exiters(ErtsProcList *); static void wake_scheduler(ErtsRunQueue *rq); #endif @@ -679,6 +684,8 @@ erts_pre_init_process(void) = "MISC_THR_PRGR"; erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX] = "MISC"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX] + = "PENDING_EXITERS"; erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX] = "SET_TMO"; erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX] @@ -1139,7 +1146,7 @@ reply_sched_wall_time(void *vswtrp) hpp = &hp; } - erts_queue_message(rp, &rp_locks, mp, msg); + erts_queue_message(rp, rp_locks, mp, msg, am_system); if (swtrp->req_sched == esdp->no) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -1156,7 +1163,7 @@ reply_sched_wall_time(void *vswtrp) Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable) { - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); Eterm ref; ErtsSchedWallTimeReq *swtrp; Eterm *hp; @@ -1218,7 +1225,7 @@ reply_system_check(void *vscrp) hpp = &hp; msg = STORE_NC(hpp, ohp, scrp->ref); - erts_queue_message(rp, &rp_locks, mp, msg); + erts_queue_message(rp, rp_locks, mp, msg, am_system); if (scrp->req_sched == esdp->no) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -1234,7 +1241,7 @@ reply_system_check(void *vscrp) Eterm erts_system_check_request(Process *c_p) { - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); Eterm ref; ErtsSystemCheckReq *scrp; Eterm *hp; @@ -2336,6 +2343,30 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiti #endif +#ifdef ERTS_SMP + +static ERTS_INLINE erts_aint32_t +handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + ErtsProcList *pnd_xtrs; + ErtsRunQueue *rq; + + rq = awdp->esdp->run_queue; + unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS); + + erts_smp_runq_lock(rq); + pnd_xtrs = rq->procs.pending_exiters; + rq->procs.pending_exiters = NULL; + erts_smp_runq_unlock(rq); + + if (erts_proclist_fetch(&pnd_xtrs, NULL)) + do_handle_pending_exiters(pnd_xtrs); + + return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS; +} + +#endif + static ERTS_INLINE erts_aint32_t handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { @@ -2427,6 +2458,10 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC, handle_misc_aux_work); +#ifdef ERTS_SMP + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS, + handle_pending_exiters); +#endif HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO, handle_setup_aux_work_timer); @@ -3979,6 +4014,33 @@ schedule_bound_processes(ErtsRunQueue *rq, } } +#ifdef ERTS_DIRTY_SCHEDULERS + +static ERTS_INLINE void +clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) +{ +#ifdef DEBUG + erts_aint32_t old; +#endif + erts_aint32_t qb = prio_bit; + if (rq == ERTS_DIRTY_CPU_RUNQ) + qb <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET; + else { + ASSERT(rq == ERTS_DIRTY_IO_RUNQ); + qb <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET; + } +#ifdef DEBUG + old = (int) +#else + (void) +#endif + erts_smp_atomic32_read_band_mb(&p->dirty_state, ~qb); + ASSERT(old & qb); +} + +#endif /* ERTS_DIRTY_SCHEDULERS */ + + static void evacuate_run_queue(ErtsRunQueue *rq, ErtsStuckBoundProcesses *sbpp) @@ -4141,29 +4203,8 @@ evacuate_run_queue(ErtsRunQueue *rq, } #ifdef ERTS_DIRTY_SCHEDULERS - - if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { - erts_aint32_t dqbit = qbit; -#ifdef DEBUG - erts_aint32_t old_dqbit; -#endif - - if (rq == ERTS_DIRTY_CPU_RUNQ) - dqbit <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET; - else { - ASSERT(rq == ERTS_DIRTY_IO_RUNQ); - dqbit <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET; - } - -#ifdef DEBUG - old_dqbit = (int) -#else - (void) -#endif - erts_smp_atomic32_read_band_mb(&real_proc->dirty_state, - ~dqbit); - ASSERT(old_dqbit & dqbit); - } + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) + clear_proc_dirty_queue_bit(real_proc, rq, qbit); #endif if (ERTS_PSFLG_BOUND & real_state) { @@ -5653,7 +5694,8 @@ static void init_scheduler_data(ErtsSchedulerData* esdp, int num, ErtsSchedulerSleepInfo* ssi, ErtsRunQueue* runq, - char** daww_ptr, size_t daww_sz) + char** daww_ptr, size_t daww_sz, + Process *shadow_proc) { esdp->timer_wheel = NULL; #ifdef ERTS_SMP @@ -5677,6 +5719,15 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->no = (Uint) num; ERTS_DIRTY_SCHEDULER_NO(esdp) = 0; } + esdp->dirty_shadow_process = shadow_proc; + if (shadow_proc) { + erts_init_empty_process(shadow_proc); + erts_smp_atomic32_init_nob(&shadow_proc->state, + (ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_PROXY)); + shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC; + } #else esdp->no = (Uint) num; #endif @@ -5928,31 +5979,41 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online for (ix = 0; ix < n; ix++) { ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix); init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix), - ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz); + ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz, + NULL); } #ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP - erts_aligned_dirty_cpu_scheduler_data = - erts_alloc_permanent_cache_aligned( - ERTS_ALC_T_SCHDLR_DATA, - no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData)); - for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { - ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); - init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix), - ERTS_DIRTY_CPU_RUNQ, NULL, 0); - } - erts_aligned_dirty_io_scheduler_data = - erts_alloc_permanent_cache_aligned( - ERTS_ALC_T_SCHDLR_DATA, - no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData)); - for (ix = 0; ix < no_dirty_io_schedulers; ix++) { - ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); - init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix), - ERTS_DIRTY_IO_RUNQ, NULL, 0); + { + int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers; + int adspix = 0; + ErtsAlignedDirtyShadowProcess *adsp = + erts_alloc_permanent_cache_aligned( + ERTS_ALC_T_SCHDLR_DATA, + dirty_scheds * sizeof(ErtsAlignedDirtyShadowProcess)); + + erts_aligned_dirty_cpu_scheduler_data = + erts_alloc_permanent_cache_aligned( + ERTS_ALC_T_SCHDLR_DATA, + dirty_scheds * sizeof(ErtsAlignedSchedulerData)); + + erts_aligned_dirty_io_scheduler_data = + &erts_aligned_dirty_cpu_scheduler_data[no_dirty_cpu_schedulers]; + + for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { + ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); + init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix), + ERTS_DIRTY_CPU_RUNQ, NULL, 0, + &adsp[adspix++].dsp); + } + for (ix = 0; ix < no_dirty_io_schedulers; ix++) { + ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); + init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix), + ERTS_DIRTY_IO_RUNQ, NULL, 0, + &adsp[adspix++].dsp); + } } #endif -#endif init_misc_aux_work(); init_swtreq_alloc(); @@ -6167,7 +6228,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p, erts_aint32_t dact, max_qbit; /* Termination should be done on an ordinary scheduler */ - if (actual & ERTS_PSFLG_EXITING) { + if ((*newp) & ERTS_PSFLG_EXITING) { *newp &= ~ERTS_PSFLGS_DIRTY_WORK; return ERTS_ENQUEUE_NORMAL_QUEUE; } @@ -6176,7 +6237,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p, * If we have system tasks, we enqueue on ordinary run-queue * and take care of those system tasks first. */ - if (actual & ERTS_PSFLG_ACTIVE_SYS) + if ((*newp) & ERTS_PSFLG_ACTIVE_SYS) return ERTS_ENQUEUE_NORMAL_QUEUE; dact = erts_smp_atomic32_read_mb(&c_p->dirty_state); @@ -6356,23 +6417,29 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st * schedule_out_process() return with c_rq locked. */ static ERTS_INLINE int -schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy) +schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, + Process *proxy, int is_normal_sched) { - erts_aint32_t a, e, n, enq_prio = -1; + erts_aint32_t a, e, n, enq_prio = -1, running_flgs; int enqueue; /* < 0 -> use proxy */ ErtsRunQueue* runq; + if (is_normal_sched) + running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS; + else + running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS; + a = state; while (1) { n = e = a; - ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)); + ASSERT(a & running_flgs); enqueue = ERTS_ENQUEUE_NOT; - n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS); - if (a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS) + n &= ~running_flgs; + if ((a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)) || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); } @@ -6485,8 +6552,9 @@ change_proc_schedule_state(Process *p, ErtsProcLocks locks) { /* - * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and - * ERTS_PSFLG_ACTIVE_SYS are not allowed to be + * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS, + * ERTS_PSFLG_DIRTY_RUNNING, ERTS_PSFLG_DIRTY_RUNNING_SYS + * and ERTS_PSFLG_ACTIVE_SYS are not allowed to be * altered by this function! */ erts_aint32_t a = *statep, n; @@ -6500,9 +6568,13 @@ change_proc_schedule_state(Process *p, ASSERT(!(a & ERTS_PSFLG_PROXY)); ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS | ERTS_PSFLG_ACTIVE_SYS)) == 0); ASSERT((set_state_flags & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS | ERTS_PSFLG_ACTIVE_SYS)) == 0); if (lock_status) @@ -6526,8 +6598,16 @@ change_proc_schedule_state(Process *p, if ((n & (ERTS_PSFLG_SUSPENDED | ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS | ERTS_PSFLG_IN_RUNQ - | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) { + | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE +#ifdef ERTS_DIRTY_SCHEDULERS + || (n & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING +#endif + ) { /* * Active and seemingly need to be enqueued, but * process may be in a run queue via proxy, need @@ -6551,7 +6631,9 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) && (!(a & (ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_RUNNING - | ERTS_PSFLG_RUNNING_SYS) + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS) && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))))) { /* We activated a prevously inactive process */ @@ -6693,7 +6775,10 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st) enqueue = ERTS_ENQUEUE_NOT; n |= ERTS_PSFLG_ACTIVE_SYS; - if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) + if (!(a & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS))) enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) @@ -6706,7 +6791,9 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st) if (!(a & (ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_RUNNING - | ERTS_PSFLG_RUNNING_SYS)) + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) { /* We activated a prevously inactive process */ profile_runnable_proc(p, am_active); @@ -6746,11 +6833,16 @@ suspend_process(Process *c_p, Process *p) if (c_p == p) { state = erts_smp_atomic32_read_bor_relb(&p->state, ERTS_PSFLG_SUSPENDED); - ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)); + ASSERT(state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)); suspended = (state & ERTS_PSFLG_SUSPENDED) ? -1: 1; } else { - while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) { + while (!(state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_EXITING))) { erts_aint32_t n, e; n = e = state; @@ -6776,8 +6868,11 @@ suspend_process(Process *c_p, Process *p) if ((state & (ERTS_PSFLG_ACTIVE | ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_DIRTY_ACTIVE_SYS | ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS | ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { /* We made process inactive */ profile_runnable_proc(p, am_inactive); @@ -7759,8 +7854,10 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal plp = proclist_create(p); erts_proclist_store_last(&msbp->blckrs, plp); p->flags |= have_blckd_flg; - ASSERT(schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)); - ASSERT(p->scheduler_data->no == 1); + ASSERT(normal + ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL) + : schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)); + ASSERT(erts_proc_sched_data(p)->no == 1); if (schdlr_sspnd.msb.ongoing) res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED; else @@ -7780,7 +7877,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal if (schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0) || (normal && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL) == 1)) { - ASSERT(p->scheduler_data->no == 1); + ASSERT(erts_proc_sched_data(p)->no == 1); plp = proclist_create(p); erts_proclist_store_last(&msbp->blckrs, plp); if (schdlr_sspnd.msb.ongoing) @@ -7830,7 +7927,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal else res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED; } - ASSERT(p->scheduler_data); + ASSERT(erts_proc_sched_data(p)); } } else if (!msbp->ongoing) { @@ -8420,9 +8517,23 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, if (!suspend_process(c_p, rp)) { /* Other process running */ - ASSERT(ERTS_PSFLG_RUNNING + ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING) & erts_smp_atomic32_read_nob(&rp->state)); +#ifdef ERTS_DIRTY_SCHEDULERS + if (!suspend + && (erts_smp_atomic32_read_nob(&rp->state) + & ERTS_PSFLG_DIRTY_RUNNING)) { + ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; + if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, + pid, pid_locks|ERTS_PROC_LOCK_STATUS); + } + goto done; + } +#endif + running: /* @@ -8447,7 +8558,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, else { ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { - if (ERTS_PSFLG_RUNNING_SYS + if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS) & erts_smp_atomic32_read_nob(&rp->state)) { /* Executing system task... */ resume_process(rp, ERTS_PROC_LOCK_STATUS); @@ -8474,7 +8585,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, * from being selected for normal execution regardless * of locks held or not held on it... */ - ASSERT(!(ERTS_PSFLG_RUNNING + ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS) & erts_smp_atomic32_read_nob(&rp->state))); if (!suspend) @@ -9015,28 +9126,43 @@ erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched) } Eterm -erts_process_status(Process *c_p, ErtsProcLocks c_p_locks, - Process *rp, Eterm rpid) +erts_process_state2status(erts_aint32_t state) +{ + if (state & ERTS_PSFLG_FREE) + return am_free; + + if (state & ERTS_PSFLG_EXITING) + return am_exiting; + + if (state & ERTS_PSFLG_GC) + return am_garbage_collecting; + + if (state & ERTS_PSFLG_SUSPENDED) + return am_suspended; + + if (state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) + return am_running; + + if (state & (ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_DIRTY_ACTIVE_SYS)) + return am_runnable; + + return am_waiting; +} + +Eterm +erts_process_status(Process *rp, Eterm rpid) { Eterm res = am_undefined; Process *p = rp ? rp : erts_proc_lookup_raw(rpid); if (p) { erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); - if (state & ERTS_PSFLG_FREE) - res = am_free; - else if (state & ERTS_PSFLG_EXITING) - res = am_exiting; - else if (state & ERTS_PSFLG_GC) - res = am_garbage_collecting; - else if (state & ERTS_PSFLG_SUSPENDED) - res = am_suspended; - else if (state & ERTS_PSFLG_RUNNING) - res = am_running; - else if (state & ERTS_PSFLG_ACTIVE) - res = am_runnable; - else - res = am_waiting; + res = erts_process_state2status(state); } #ifdef ERTS_SMP else { @@ -9251,7 +9377,76 @@ scheduler_gc_proc(Process *c_p, int reds_left) return reds; } +static ERTS_INLINE void +clean_dirty_start(Process *p) +{ +#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ARCH_64) + void *ptr = ERTS_PROC_SET_DIRTY_CPU_START(p, NULL); + if (ptr) + erts_free(ERTS_ALC_T_DIRTY_START, ptr); +#endif +} +static ERTS_INLINE void +save_dirty_start(ErtsSchedulerData *esdp, Process *c_p) +{ +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) { + ErtsMonotonicTime time = erts_get_monotonic_time(esdp); +#ifdef ARCH_64 + ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) time); +#else + ErtsMonotonicTime *stimep; + + stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p); + if (!stimep) { + stimep = erts_alloc(ERTS_ALC_T_DIRTY_START, + sizeof(ErtsMonotonicTime)); + ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) stimep); + } + *stimep = time; +#endif + } +#endif +} + +static ERTS_INLINE int +get_dirty_reds(ErtsSchedulerData *esdp, Process *c_p) +{ + +#ifndef ERTS_DIRTY_SCHEDULERS + return -1; +#else + ErtsMonotonicTime stime, time; + + if (!ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) + return 1; + +#ifdef ARCH_64 + stime = (ErtsMonotonicTime) ERTS_PROC_GET_DIRTY_CPU_START(c_p); +#else + { + ErtsMonotonicTime *stimep; + stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p); + ASSERT(stimep); + stime = *stimep; + } +#endif + + time = erts_get_monotonic_time(esdp); + + ASSERT(stime && stime < time); + + time -= stime; + time = ERTS_MONOTONIC_TO_USEC(time); + time *= 2; + + if (time > INT_MAX) + return INT_MAX; + return (int) time; +#endif + +} /* * schedule() is called from BEAM (process_main()) or HiPE @@ -9283,6 +9478,7 @@ Process *schedule(Process *p, int calls) int reds; Uint32 flags; erts_aint32_t state = 0; /* Supress warning... */ + int is_normal_sched; ERTS_MSACC_DECLARE_CACHE(); @@ -9312,25 +9508,44 @@ Process *schedule(Process *p, int calls) */ if (!p) { /* NULL in the very first schedule() call */ esdp = erts_get_scheduler_data(); + is_normal_sched = !ERTS_SCHEDULER_IS_DIRTY(esdp); rq = erts_get_runq_current(esdp); ASSERT(esdp); fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls); actual_reds = reds = 0; erts_smp_runq_lock(rq); } else { - sched_out_proc: - #ifdef ERTS_SMP - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); +#ifdef ERTS_DIRTY_SCHEDULERS + esdp = p->scheduler_data; + is_normal_sched = esdp != NULL; + if (is_normal_sched) + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); + else { + esdp = erts_get_scheduler_data(); + ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); + } +#else esdp = p->scheduler_data; + is_normal_sched = 1; +#endif ASSERT(esdp->current_process == p || esdp->free_process == p); #else esdp = erts_scheduler_data; ASSERT(esdp->current_process == p); + is_normal_sched = 1; #endif - reds = actual_reds = calls - esdp->virtual_reds; + sched_out_proc: + + ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + + if (is_normal_sched) + reds = actual_reds = calls - esdp->virtual_reds; + else + reds = actual_reds = get_dirty_reds(esdp, p); + ASSERT(actual_reds >= 0); if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST) reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST; @@ -9377,7 +9592,7 @@ Process *schedule(Process *p, int calls) state = erts_smp_atomic32_read_nob(&p->state); #ifdef ERTS_SMP - if (state & ERTS_PSFLG_PENDING_EXIT) + if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT)) erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_STATUS)); if (p->pending_suspenders) @@ -9387,7 +9602,8 @@ Process *schedule(Process *p, int calls) esdp->reductions += reds; - schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */ + /* schedule_out_process() returns with rq locked! */ + schedule_out_process(rq, state, p, proxy_p, is_normal_sched); proxy_p = NULL; ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq, @@ -9405,13 +9621,20 @@ Process *schedule(Process *p, int calls) ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER); if (state & ERTS_PSFLG_FREE) { + if (!is_normal_sched) { + ASSERT(p->flags & F_DELAYED_DEL_PROC); + erts_proc_dec_refc(p); + } + else { #ifdef ERTS_SMP - ASSERT(esdp->free_process == p); - esdp->free_process = NULL; + ASSERT(esdp->free_process == p); + esdp->free_process = NULL; #else - erts_proc_dec_refc(p); + erts_proc_dec_refc(p); #endif + } } + #ifdef ERTS_SMP ASSERT(!esdp->free_process); #endif @@ -9419,7 +9642,7 @@ Process *schedule(Process *p, int calls) ERTS_SMP_CHK_NO_PROC_LOCKS; - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (is_normal_sched) { if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS) (void) erts_get_monotonic_time(esdp); @@ -9433,23 +9656,15 @@ Process *schedule(Process *p, int calls) } - ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) - || !erts_thr_progress_is_blocking()); + ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking()); check_activities_to_run: { + erts_aint32_t psflg_running, psflg_running_sys; #ifdef ERTS_SMP ErtsMigrationPaths *mps; ErtsMigrationPath *mp; - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - ErtsProcList *pnd_xtrs = rq->procs.pending_exiters; - if (erts_proclist_fetch(&pnd_xtrs, NULL)) { - rq->procs.pending_exiters = NULL; - erts_smp_runq_unlock(rq); - handle_pending_exiters(pnd_xtrs); - erts_smp_runq_lock(rq); - } - + if (is_normal_sched) { if (rq->check_balance_reds <= 0) check_balance(rq); @@ -9466,32 +9681,35 @@ Process *schedule(Process *p, int calls) continue_check_activities_to_run: flags = ERTS_RUNQ_FLGS_GET_NOB(rq); continue_check_activities_to_run_known_flags: - ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) - || flags & ERTS_RUNQ_FLG_NONEMPTY); + ASSERT(!is_normal_sched || (flags & ERTS_RUNQ_FLG_NONEMPTY)); - if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) { - if (flags & ERTS_RUNQ_FLG_SUSPENDED) { - (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC); + if (!is_normal_sched) { + if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + & ERTS_SSI_FLG_SUSPENDED) { suspend_scheduler(esdp); - flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC); - flags |= ERTS_RUNQ_FLG_EXEC; - } - if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) { - flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND); - flags &= ~ ERTS_RUNQ_FLG_CHK_CPU_BIND; - erts_sched_check_cpu_bind(esdp); } } -#ifdef ERTS_DIRTY_SCHEDULERS - else if (ERTS_SCHEDULER_IS_DIRTY(esdp) - && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) - & ERTS_SSI_FLG_SUSPENDED)) - suspend_scheduler(esdp); -#endif - - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + else { erts_aint32_t aux_work; - int leader_update = erts_thr_progress_update(esdp); + int leader_update; + + ASSERT(is_normal_sched); + + if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) { + if (flags & ERTS_RUNQ_FLG_SUSPENDED) { + (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC); + suspend_scheduler(esdp); + flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC); + flags |= ERTS_RUNQ_FLG_EXEC; + } + if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) { + flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND); + flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND; + erts_sched_check_cpu_bind(esdp); + } + } + + leader_update = erts_thr_progress_update(esdp); aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); if (aux_work | leader_update) { erts_smp_runq_unlock(rq); @@ -9517,19 +9735,13 @@ Process *schedule(Process *p, int calls) flags = ERTS_RUNQ_FLGS_GET_NOB(rq); -#ifdef ERTS_DIRTY_SCHEDULERS - if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) { - /* - * TODO: if halt in progress, need to put the dirty scheduler - * to sleep somewhere around here to prevent it from picking up - * new work - */ + if (!is_normal_sched && rq->halt_in_progress) { + /* Wait for emulator to terminate... */ + while (1) + erts_milli_sleep(1000*1000); } - else -#endif - - if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start) - || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) { + else if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start) + || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) { /* Prepare for scheduler wait */ #ifdef ERTS_SMP ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); @@ -9543,7 +9755,7 @@ Process *schedule(Process *p, int calls) if (flags & ERTS_RUNQ_FLG_INACTIVE) empty_runq(rq); else { - if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && try_steal_task(rq)) + if (is_normal_sched && try_steal_task(rq)) goto continue_check_activities_to_run; empty_runq(rq); @@ -9572,9 +9784,9 @@ Process *schedule(Process *p, int calls) goto check_activities_to_run; } - else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && - (fcalls > input_reductions && - prepare_for_sys_schedule(!0))) { + else if (is_normal_sched + && (fcalls > input_reductions + && prepare_for_sys_schedule(!0))) { ErtsMonotonicTime current_time; /* * Schedule system-level activities. @@ -9688,11 +9900,17 @@ Process *schedule(Process *p, int calls) ASSERT(p); /* Wrong qmask in rq->flags? */ - if (ERTS_SCHEDULER_IS_DIRTY(esdp)) - psflg_band_mask = ~((erts_aint32_t) 0); - else + if (is_normal_sched) { + psflg_running = ERTS_PSFLG_RUNNING; + psflg_running_sys = ERTS_PSFLG_RUNNING_SYS; psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state) + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET)); + } + else { + psflg_running = ERTS_PSFLG_DIRTY_RUNNING; + psflg_running_sys = ERTS_PSFLG_DIRTY_RUNNING_SYS; + psflg_band_mask = ~((erts_aint32_t) 0); + } if (!(state & ERTS_PSFLG_PROXY)) psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ; @@ -9707,34 +9925,53 @@ Process *schedule(Process *p, int calls) state = erts_smp_atomic32_read_nob(&p->state); } +#ifdef ERTS_DIRTY_SCHEDULERS + if (!is_normal_sched) + clear_proc_dirty_queue_bit(p, rq, qbit); +#endif + while (1) { - erts_aint32_t exp, new, tmp; - tmp = new = exp = state; + erts_aint32_t exp, new; + int run_process; + new = exp = state; new &= psflg_band_mask; - if (!(state & (ERTS_PSFLG_RUNNING - | ERTS_PSFLG_RUNNING_SYS))) { - tmp = state & (ERTS_PSFLG_SUSPENDED - | ERTS_PSFLG_PENDING_EXIT - | ERTS_PSFLG_ACTIVE_SYS - | ERTS_PSFLG_DIRTY_ACTIVE_SYS); - if (tmp != ERTS_PSFLG_SUSPENDED) { - if (state & (ERTS_PSFLG_ACTIVE_SYS - | ERTS_PSFLG_DIRTY_ACTIVE_SYS)) - new |= ERTS_PSFLG_RUNNING_SYS; - else - new |= ERTS_PSFLG_RUNNING; - } + /* + * Run process if not already running (or free) + * or exiting and not running on a normal + * scheduler, and not suspended (and not in a + * state where suspend should be ignored). + */ + run_process = (((!(state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS + | ERTS_PSFLG_FREE))) +#ifdef ERTS_DIRTY_SCHEDULERS + | (((state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_FREE + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_EXITING)) + == ERTS_PSFLG_EXITING) + & (!!is_normal_sched)) +#endif + ) + & ((state & (ERTS_PSFLG_SUSPENDED + | ERTS_PSFLG_EXITING + | ERTS_PSFLG_FREE + | ERTS_PSFLG_PENDING_EXIT + | ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_DIRTY_ACTIVE_SYS)) + != ERTS_PSFLG_SUSPENDED)); + if (run_process) { + if (state & (ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_DIRTY_ACTIVE_SYS)) + new |= psflg_running_sys; + else + new |= psflg_running; } state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp); if (state == exp) { - if ((state & (ERTS_PSFLG_RUNNING - | ERTS_PSFLG_RUNNING_SYS - | ERTS_PSFLG_FREE)) - || ((state & (ERTS_PSFLG_SUSPENDED - | ERTS_PSFLG_PENDING_EXIT - | ERTS_PSFLG_ACTIVE_SYS - | ERTS_PSFLG_DIRTY_ACTIVE_SYS)) - == ERTS_PSFLG_SUSPENDED)) { + if (!run_process) { if (proxy_p) { free_proxy_proc(proxy_p); proxy_p = NULL; @@ -9761,34 +9998,13 @@ Process *schedule(Process *p, int calls) erts_smp_runq_unlock(rq); -#ifdef ERTS_DIRTY_SCHEDULERS - if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { -#ifdef DEBUG - int old_dqbit; -#endif - int dqbit = qbit; - - if (rq == ERTS_DIRTY_CPU_RUNQ) - dqbit <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET; - else { - ASSERT(rq == ERTS_DIRTY_IO_RUNQ); - dqbit <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET; - } - -#ifdef DEBUG - old_dqbit = (int) -#else - (void) -#endif - erts_smp_atomic32_read_band_mb(&p->dirty_state, ~dqbit); - ASSERT(old_dqbit & dqbit); - } -#endif /* ERTS_DIRTY_SCHEDULERS */ - #endif /* ERTS_SMP */ } + if (!is_normal_sched) + save_dirty_start(esdp, p); + #ifdef ERTS_SMP if (flags & ERTS_RUNQ_FLG_PROTECTED) @@ -9805,9 +10021,7 @@ Process *schedule(Process *p, int calls) UWord old = ERTS_PROC_SCHED_ID(p, (UWord) esdp->no); int migrated = old && old != esdp->no; -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif + ASSERT(is_normal_sched); prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); @@ -9821,20 +10035,21 @@ Process *schedule(Process *p, int calls) erts_smp_spin_unlock(&erts_sched_stat.lock); } - ASSERT(!p->scheduler_data); - p->scheduler_data = esdp; - state = erts_smp_atomic32_read_nob(&p->state); -#ifdef ERTS_DIRTY_SCHEDULERS - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - if (!!(state & ERTS_PSFLGS_DIRTY_WORK) - & !(state & ERTS_PSFLG_ACTIVE_SYS)) { + ASSERT(!p->scheduler_data); +#ifndef ERTS_DIRTY_SCHEDULERS + p->scheduler_data = esdp; +#else /* ERTS_DIRTY_SCHEDULERS */ + if (is_normal_sched) { + if ((!!(state & ERTS_PSFLGS_DIRTY_WORK)) + & (!(state & ERTS_PSFLG_ACTIVE_SYS))) { /* Migrate to dirty scheduler... */ sunlock_sched_out_proc: erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); goto sched_out_proc; } + p->scheduler_data = esdp; } else { if (state & (ERTS_PSFLG_ACTIVE_SYS @@ -9870,7 +10085,10 @@ Process *schedule(Process *p, int calls) erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); - if (IS_TRACED(p)) { + /* Clear tracer if it has been removed */ + if (IS_TRACED(p) && erts_is_tracer_proc_enabled( + p, ERTS_PROC_LOCK_MAIN, &p->common)) { + if (state & ERTS_PSFLG_EXITING) { if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting); @@ -9885,13 +10103,8 @@ Process *schedule(Process *p, int calls) } } - -#ifdef ERTS_SMP - /* Clears tracer if it has been removed */ - (void)ERTS_TRACER_PROC_IS_ENABLED(p); -#endif - - if (state & ERTS_PSFLG_RUNNING_SYS) { + if (state & (ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { /* * GC is normally never delayed when a process * is scheduled out, but might be when executing @@ -9905,7 +10118,7 @@ Process *schedule(Process *p, int calls) reds -= cost; if (reds <= 0 #ifdef ERTS_DIRTY_SCHEDULERS - || ERTS_SCHEDULER_IS_DIRTY(esdp) + || !is_normal_sched || (state & ERTS_PSFLGS_DIRTY_WORK) #endif ) { @@ -9913,8 +10126,8 @@ Process *schedule(Process *p, int calls) } } - ASSERT(state & ERTS_PSFLG_RUNNING_SYS); - ASSERT(!(state & ERTS_PSFLG_RUNNING)); + ASSERT(state & psflg_running_sys); + ASSERT(!(state & psflg_running)); while (1) { erts_aint32_t n, e; @@ -9926,8 +10139,8 @@ Process *schedule(Process *p, int calls) } n = e = state; - n &= ~ERTS_PSFLG_RUNNING_SYS; - n |= ERTS_PSFLG_RUNNING; + n &= ~psflg_running_sys; + n |= psflg_running; state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (state == e) { @@ -9935,8 +10148,8 @@ Process *schedule(Process *p, int calls) break; } - ASSERT(state & ERTS_PSFLG_RUNNING_SYS); - ASSERT(!(state & ERTS_PSFLG_RUNNING)); + ASSERT(state & psflg_running_sys); + ASSERT(!(state & psflg_running)); } } @@ -10024,7 +10237,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result) ASSERT(hp_start + hsz == hp); #endif - erts_queue_message(rp, &rp_locks, mp, msg); + erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id); if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -10525,7 +10738,10 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) } state = erts_smp_atomic32_read_nob(&c_p->state); - ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) & state); + ASSERT((ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS) & state); while (!(state & ERTS_PSFLG_DELAYED_SYS) || prio < ERTS_PSFLGS_GET_ACT_PRIO(state)) { @@ -10850,6 +11066,8 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } +static void delete_process(Process* p); + void erts_free_proc(Process *p) { @@ -10858,6 +11076,8 @@ erts_free_proc(Process *p) #endif ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); ASSERT(0 == erts_proc_read_refc(p)); + if (p->flags & F_DELAYED_DEL_PROC) + delete_process(p); erts_free(ERTS_ALC_T_PROC, (void *) p); } @@ -11021,9 +11241,13 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->min_heap_size = so->min_heap_size; p->min_vheap_size = so->min_vheap_size; p->max_gen_gcs = so->max_gen_gcs; + MAX_HEAP_SIZE_SET(p, so->max_heap_size); + MAX_HEAP_SIZE_FLAGS_SET(p, so->max_heap_flags); } else { p->min_heap_size = H_MIN_SIZE; p->min_vheap_size = BIN_VH_MIN_SIZE; + MAX_HEAP_SIZE_SET(p, H_MAX_SIZE); + MAX_HEAP_SIZE_FLAGS_SET(p, H_MAX_FLAGS); p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); } p->schedule_count = 0; @@ -11486,18 +11710,36 @@ erts_cleanup_empty_process(Process* p) #endif } -/* - * p must be the currently executing process. - */ static void delete_process(Process* p) { Eterm *heap; ErtsPSD *psd; + struct saved_calls *scb; + process_breakpoint_time_t *pbt; + void *nif_export; + VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id)); VERBOSE(DEBUG_SHCOPY, ("[pid=%T] delete process: %p %p %p %p\n", p->common.id, HEAP_START(p), HEAP_END(p), OLD_HEAP(p), OLD_HEND(p))); + scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL); + + if (scb) { + p->fcalls += CONTEXT_REDS; /* Reduction counting depends on this... */ + erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb); + } + + pbt = ERTS_PROC_SET_CALL_TIME(p, NULL); + if (pbt) + erts_free(ERTS_ALC_T_BPD, (void *) pbt); + + nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL); + if (nif_export) + erts_destroy_nif_export(nif_export); + + clean_dirty_start(p); + /* Cleanup psd */ psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); @@ -11600,7 +11842,8 @@ set_proc_exiting(Process *p, p->i = (BeamInstr *) beam_exit; #ifndef ERTS_SMP - if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)) { + if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) + && !(state & ERTS_PSFLG_GC)) { /* * I non smp case: * @@ -11629,7 +11872,10 @@ set_proc_self_exiting(Process *c_p) ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL); state = erts_smp_atomic32_read_nob(&c_p->state); - ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)); + ASSERT(state & (ERTS_PSFLG_RUNNING + |ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)); #ifdef DEBUG enqueue = @@ -11679,51 +11925,73 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) erts_smp_proc_unlock(c_p, xlocks); } +static void save_pending_exiter(Process *p, ErtsProcList *plp); + static void -handle_pending_exiters(ErtsProcList *pnd_xtrs) +do_handle_pending_exiters(ErtsProcList *pnd_xtrs) { /* 'list' is expected to have been fetched (i.e. not a ring anymore) */ ErtsProcList *plp = pnd_xtrs; while (plp) { - ErtsProcList *free_plp; - Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL); + ErtsProcList *next_plp = plp->next; + Process *p = erts_proc_lookup(plp->pid); if (p) { - if (erts_proclist_same(plp, p)) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); - if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) { - ASSERT(state & ERTS_PSFLG_PENDING_EXIT); - erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL); + erts_aint32_t state; + /* + * If the process is running on a normal scheduler, the + * pending exit will soon be detected and handled by the + * scheduler running the process (at schedule in/out). + */ + if (erts_smp_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) { + if (erts_proclist_same(plp, p)) { + state = erts_smp_atomic32_read_acqb(&p->state); + if (!(state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_EXITING))) { + ASSERT(state & ERTS_PSFLG_PENDING_EXIT); + erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL); + } + } + erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + } + else { + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + if (erts_proclist_same(plp, p)) { + state = erts_smp_atomic32_read_acqb(&p->state); + if (!(state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS + | ERTS_PSFLG_EXITING))) { + /* + * Save process and try to acquire all + * locks at a later time... + */ + save_pending_exiter(p, plp); + plp = NULL; + } } + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); } - free_plp = plp; - plp = plp->next; - proclist_destroy(free_plp); + if (plp) + proclist_destroy(plp); + plp = next_plp; } } static void -save_pending_exiter(Process *p) +save_pending_exiter(Process *p, ErtsProcList *plp) { - ErtsProcList *plp; + ErtsSchedulerSleepInfo *ssi; ErtsRunQueue *rq; - ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - if (!esdp) - rq = RUNQ_READ_RQ(&p->run_queue); - else - rq = esdp->run_queue; - -#ifdef ERTS_DIRTY_SCHEDULERS - if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) - rq = ERTS_RUNQ_IX(0); /* Handle on ordinary scheduler */ -#endif + rq = RUNQ_READ_RQ(&p->run_queue); + ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); - plp = proclist_create(p); + if (!plp) + plp = proclist_create(p); erts_smp_runq_lock(rq); @@ -11731,9 +11999,11 @@ save_pending_exiter(Process *p) non_empty_runq(rq); + ssi = rq->scheduler->ssi; + erts_smp_runq_unlock(rq); - wake_scheduler(rq); + set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS); } #endif @@ -11766,7 +12036,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp, mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp); mess = copy_struct(exit_term, term_size, &hp, ohp); #endif - erts_queue_message(to, to_locksp, mp, mess); + erts_queue_message(to, *to_locksp, mp, mess, am_system); } else { Eterm temp_token; Uint sz_token; @@ -11787,7 +12057,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp, seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, to); temp_token = copy_struct(token, sz_token, &hp, ohp); ERL_MESSAGE_TOKEN(mp) = temp_token; - erts_queue_message(to, to_locksp, mp, mess); + erts_queue_message(to, *to_locksp, mp, mess, am_system); } } @@ -11933,7 +12203,7 @@ send_exit_signal(Process *c_p, /* current process if and only if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { /* ... but we havn't got all locks on it ... */ - save_pending_exiter(rp); + save_pending_exiter(rp, NULL); /* * The pending exit will be discovered when next * process is scheduled in @@ -12401,10 +12671,8 @@ erts_continue_exit_process(Process *p) ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN; Eterm reason = p->fvalue; DistEntry *dep; - struct saved_calls *scb; - process_breakpoint_time_t *pbt; erts_aint32_t state; - void *nif_export; + int delay_del_proc = 0; #ifdef DEBUG int yield_allowed = 1; @@ -12547,7 +12815,7 @@ erts_continue_exit_process(Process *p) { /* Do *not* use erts_get_runq_proc() */ ErtsRunQueue *rq; - rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p)); + rq = erts_get_runq_current(erts_proc_sched_data(p)); erts_smp_runq_lock(rq); @@ -12593,16 +12861,24 @@ erts_continue_exit_process(Process *p) break; } +#ifdef ERTS_DIRTY_SCHEDULERS + if (a & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + p->flags |= F_DELAYED_DEL_PROC; + delay_del_proc = 1; + /* + * The dirty scheduler will also decrease + * refc when done... + */ + erts_proc_inc_refc(p); + } +#endif + if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ)) erts_proc_dec_refc(p); } dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL; - scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL); - if (scb) - p->fcalls += CONTEXT_REDS; /* Reduction counting depends on this... */ - pbt = ERTS_PROC_SET_CALL_TIME(p, NULL); - nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL); erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); #ifdef BM_COUNTERS @@ -12642,22 +12918,14 @@ erts_continue_exit_process(Process *p) have none here */ } - if (scb) - erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb); - - if (pbt) - erts_free(ERTS_ALC_T_BPD, (void *) pbt); - - if (nif_export) - erts_destroy_nif_export(nif_export); - #ifdef ERTS_SMP erts_flush_trace_messages(p, 0); #endif ERTS_TRACER_CLEAR(&ERTS_TRACER(p)); - delete_process(p); + if (!delay_del_proc) + delete_process(p); #ifdef ERTS_SMP erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); @@ -12687,6 +12955,7 @@ erts_continue_exit_process(Process *p) ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); + BUMP_ALL_REDS(p); } /* @@ -13006,11 +13275,13 @@ void erts_halt(int code) int erts_dbg_check_halloc_lock(Process *p) { + ErtsSchedulerData *esdp; if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)) return 1; if (p->common.id == ERTS_INVALID_PID) return 1; - if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process) + esdp = erts_proc_sched_data(p); + if (esdp && p == esdp->match_pseudo_process) return 1; if (erts_thr_progress_is_blocking()) return 1; diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 59da9c1779..2801947613 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -304,6 +304,7 @@ typedef enum { ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX, ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX, ERTS_SSI_AUX_WORK_MISC_IX, + ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX, ERTS_SSI_AUX_WORK_SET_TMO_IX, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX, ERTS_SSI_AUX_WORK_REAP_PORTS_IX, @@ -336,6 +337,8 @@ typedef enum { (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX) #define ERTS_SSI_AUX_WORK_MISC \ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_IX) +#define ERTS_SSI_AUX_WORK_PENDING_EXITERS \ + (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX) #define ERTS_SSI_AUX_WORK_SET_TMO \ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX) #define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \ @@ -645,6 +648,7 @@ struct ErtsSchedulerData_ { Uint no; /* Scheduler number for normal schedulers */ #ifdef ERTS_DIRTY_SCHEDULERS ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */ + Process *dirty_shadow_process; #endif Port *current_port; ErtsRunQueue *run_queue; @@ -805,14 +809,26 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) #define ERTS_PSD_CALL_TIME_BP 3 #define ERTS_PSD_DELAYED_GC_TASK_QS 4 #define ERTS_PSD_NIF_TRAP_EXPORT 5 -#ifdef HIPE #define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6 -#endif - -#ifdef HIPE -#define ERTS_PSD_SIZE 7 -#else -#define ERTS_PSD_SIZE 6 +#define ERTS_PSD_DIRTY_CPU_START 7 + +#define ERTS_PSD_SIZE 8 + +#if !defined(HIPE) && !defined(ERTS_DIRTY_SCHEDULERS) +# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF +# undef ERTS_PSD_DIRTY_CPU_START +# undef ERTS_PSD_SIZE +# define ERTS_PSD_SIZE 6 +#elif !defined(HIPE) +# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF +# undef ERTS_PSD_DIRTY_CPU_START +# undef ERTS_PSD_SIZE +# define ERTS_PSD_DIRTY_CPU_START 6 +# define ERTS_PSD_SIZE 7 +#elif !defined(ERTS_DIRTY_SCHEDULERS) +# undef ERTS_PSD_DIRTY_CPU_START +# undef ERTS_PSD_SIZE +# define ERTS_PSD_SIZE 7 #endif typedef struct { @@ -918,6 +934,15 @@ struct ErtsPendingSuspend_ { # define BIN_OLD_VHEAP_SZ(p) (p)->bin_old_vheap_sz # define BIN_OLD_VHEAP(p) (p)->bin_old_vheap +# define MAX_HEAP_SIZE_GET(p) ((p)->max_heap_size >> 2) +# define MAX_HEAP_SIZE_SET(p, sz) ((p)->max_heap_size = ((sz) << 2) | \ + MAX_HEAP_SIZE_FLAGS_GET(p)) +# define MAX_HEAP_SIZE_FLAGS_GET(p) ((p)->max_heap_size & 0x3) +# define MAX_HEAP_SIZE_FLAGS_SET(p, flags) ((p)->max_heap_size = flags | \ + ((p)->max_heap_size & ~0x3)) +# define MAX_HEAP_SIZE_KILL 1 +# define MAX_HEAP_SIZE_LOG 2 + struct process { ErtsPTabElementCommon common; /* *Need* to be first in struct */ @@ -935,6 +960,7 @@ struct process { Uint heap_sz; /* Size of heap in words */ Uint min_heap_size; /* Minimum size of heap (in words). */ Uint min_vheap_size; /* Minimum size of virtual heap (in words). */ + Uint max_heap_size; /* Maximum size of heap (in words). */ #if !defined(NO_FPE_SIGNALS) || defined(HIPE) volatile unsigned long fp_exception; @@ -1183,7 +1209,10 @@ void erts_check_for_holes(Process* p); #define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(20) #define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(21) #define ERTS_PSFLG_DIRTY_ACTIVE_SYS ERTS_PSFLG_BIT(22) -#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 22) +#define ERTS_PSFLG_DIRTY_RUNNING ERTS_PSFLG_BIT(23) +#define ERTS_PSFLG_DIRTY_RUNNING_SYS ERTS_PSFLG_BIT(24) + +#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 24) #define ERTS_PSFLGS_DIRTY_WORK (ERTS_PSFLG_DIRTY_CPU_PROC \ | ERTS_PSFLG_DIRTY_IO_PROC \ @@ -1194,6 +1223,11 @@ void erts_check_for_holes(Process* p); | ERTS_PSFLG_IN_PRQ_NORMAL \ | ERTS_PSFLG_IN_PRQ_LOW) +#define ERTS_PSFLGS_VOLATILE_HEAP (ERTS_PSFLG_EXITING \ + | ERTS_PSFLG_PENDING_EXIT \ + | ERTS_PSFLG_DIRTY_RUNNING \ + | ERTS_PSFLG_DIRTY_RUNNING_SYS) + #define ERTS_PSFLGS_GET_ACT_PRIO(PSFLGS) \ (((PSFLGS) >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) #define ERTS_PSFLGS_GET_USR_PRIO(PSFLGS) \ @@ -1235,6 +1269,7 @@ void erts_check_for_holes(Process* p); * Static flags that do not change after process creation. */ #define ERTS_STC_FLG_SYSTEM_PROC (((Uint32) 1) << 0) +#define ERTS_STC_FLG_SHADOW_PROC (((Uint32) 1) << 1) /* The sequential tracing token is a tuple of size 5: * @@ -1285,6 +1320,8 @@ typedef struct { Uint min_vheap_size; /* Minimum virtual heap size */ int priority; /* Priority for process. */ Uint16 max_gen_gcs; /* Maximum number of gen GCs before fullsweep. */ + Uint max_heap_size; /* Maximum heap size in words */ + Uint max_heap_flags; /* Maximum heap flags (kill | log) */ int scheduler; } ErlSpawnOpts; @@ -1363,6 +1400,7 @@ extern int erts_system_profile_ts_type; #define F_SCHDLR_ONLN_WAITQ (1 << 17) /* Process enqueued waiting to change schedulers online */ #define F_HAVE_BLCKD_NMSCHED (1 << 18) /* Process has blocked normal multi-scheduling */ #define F_HIPE_MODE (1 << 19) +#define F_DELAYED_DEL_PROC (1 << 20) /* Delay delete process (dirty proc exit case) */ /* * F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent @@ -1783,7 +1821,8 @@ erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int); void erts_sched_notify_check_cpu_bind(void); Uint erts_active_schedulers(void); void erts_init_process(int, int, int); -Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm); +Eterm erts_process_state2status(erts_aint32_t); +Eterm erts_process_status(Process *, Eterm); Uint erts_run_queues_len(Uint *, int, int); void erts_add_to_runq(Process *); Eterm erts_bound_schedulers_term(Process *c_p); @@ -1860,19 +1899,11 @@ int erts_debug_wait_completed(Process *c_p, int flags); Uint erts_process_memory(Process *c_p, int incl_msg_inq); -#ifdef ERTS_SMP -# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) ((PROC)->scheduler_data) -# define ERTS_PROC_GET_SCHDATA(PROC) ((PROC)->scheduler_data) -#else -# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) (erts_scheduler_data) -# define ERTS_PROC_GET_SCHDATA(PROC) (erts_scheduler_data) -#endif - #ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC # define ERTS_VERIFY_UNUSED_TEMP_ALLOC(P) \ do { \ ErtsSchedulerData *esdp__ = ((P) \ - ? ERTS_PROC_GET_SCHDATA((Process *) (P)) \ + ? erts_proc_sched_data((Process *) (P)) \ : erts_get_scheduler_data()); \ if (esdp__ && !ERTS_SCHEDULER_IS_DIRTY(esdp__)) \ esdp__->verify_unused_temp_alloc( \ @@ -1965,12 +1996,15 @@ erts_psd_set(Process *p, int ix, void *data) ErtsPSD *psd; #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p); - if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks) - ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking()); - else { - locks &= erts_psd_required_locks[ix].set_locks; - ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks - || erts_thr_progress_is_blocking()); + erts_aint32_t state = state = erts_smp_atomic32_read_nob(&p->state); + if (!(state & ERTS_PSFLG_FREE)) { + if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks) + ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking()); + else { + locks &= erts_psd_required_locks[ix].set_locks; + ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks + || erts_thr_progress_is_blocking()); + } } #endif psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); @@ -2027,6 +2061,13 @@ erts_psd_set(Process *p, int ix, void *data) ((struct saved_calls *) erts_psd_set((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF, (void *) (SCB))) #endif +#ifdef ERTS_DIRTY_SCHEDULERS +#define ERTS_PROC_GET_DIRTY_CPU_START(P) \ + ((void *) erts_psd_get((P), ERTS_PSD_DIRTY_CPU_START)) +#define ERTS_PROC_SET_DIRTY_CPU_START(P, DCS) \ + ((void *) erts_psd_set((P), ERTS_PSD_DIRTY_CPU_START, (void *) (DCS))) +#endif + ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p); ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, Eterm handler); @@ -2169,6 +2210,7 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio) #endif +ERTS_GLB_INLINE ErtsSchedulerData *erts_proc_sched_data(Process *c_p); ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp); ERTS_GLB_INLINE Process *erts_get_current_process(void); ERTS_GLB_INLINE Eterm erts_get_current_pid(void); @@ -2202,6 +2244,31 @@ ERTS_GLB_INLINE void erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp, #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE +ErtsSchedulerData *erts_proc_sched_data(Process *c_p) +{ + ErtsSchedulerData *esdp; + ASSERT(c_p); +#if !defined(ERTS_SMP) + esdp = erts_get_scheduler_data(); +#else + esdp = c_p->scheduler_data; +# if defined(ERTS_DIRTY_SCHEDULERS) + if (esdp) { + ASSERT(esdp == erts_get_scheduler_data()); + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); + } + else { + esdp = erts_get_scheduler_data(); + ASSERT(esdp); + ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); + } +# endif +#endif + ASSERT(esdp); + return esdp; +} + +ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp) { if (!esdp) @@ -2416,7 +2483,7 @@ ERTS_GLB_INLINE ErtsAtomCacheMap * erts_get_atom_cache_map(Process *c_p) { ErtsSchedulerData *esdp = (c_p - ? ERTS_PROC_GET_SCHDATA(c_p) + ? erts_proc_sched_data(c_p) : erts_get_scheduler_data()); ASSERT(esdp); return &esdp->atom_cache_map; diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index fa76773cac..eeaa9a569c 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -568,23 +568,21 @@ dump_externally(int to, void *to_arg, Eterm term) } } -void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg) { - if (psflg & ERTS_PSFLG_FREE) - erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */ - else if (psflg & ERTS_PSFLG_EXITING) - erts_print(to, to_arg, "Exiting\n"); - else if (psflg & ERTS_PSFLG_GC) { - erts_print(to, to_arg, "Garbing\n"); - } - else if (psflg & ERTS_PSFLG_SUSPENDED) - erts_print(to, to_arg, "Suspended\n"); - else if (psflg & ERTS_PSFLG_RUNNING) { - erts_print(to, to_arg, "Running\n"); +void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg) +{ + char *s; + switch (erts_process_state2status(psflg)) { + case am_free: s = "Non Existing"; break; /* Should never happen */ + case am_exiting: s = "Exiting"; break; + case am_garbage_collecting: s = "Garbing"; break; + case am_suspended: s = "Suspended"; break; + case am_running: s = "Running"; break; + case am_runnable: s = "Scheduled"; break; + case am_waiting: s = "Waiting"; break; + default: s = "Undefined"; break; /* Should never happen */ } - else if (psflg & ERTS_PSFLG_ACTIVE) - erts_print(to, to_arg, "Scheduled\n"); - else - erts_print(to, to_arg, "Waiting\n"); + + erts_print(to, to_arg, "%s\n", s); } void @@ -668,6 +666,10 @@ erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) { erts_print(to, to_arg, "DIRTY_IO_PROC"); break; case ERTS_PSFLG_DIRTY_ACTIVE_SYS: erts_print(to, to_arg, "DIRTY_ACTIVE_SYS"); break; + case ERTS_PSFLG_DIRTY_RUNNING: + erts_print(to, to_arg, "DIRTY_RUNNING"); break; + case ERTS_PSFLG_DIRTY_RUNNING_SYS: + erts_print(to, to_arg, "DIRTY_RUNNING_SYS"); break; default: erts_print(to, to_arg, "UNKNOWN(%d)", chk); break; } diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c index 346404fe2a..9e37106b88 100644 --- a/erts/emulator/beam/erl_time_sup.c +++ b/erts/emulator/beam/erl_time_sup.c @@ -1966,7 +1966,7 @@ send_time_offset_changed_notifications(void *new_offsetp) *patch_refp = ref; ASSERT(hsz == size_object(message_template)); message = copy_struct(message_template, hsz, &hp, ohp); - erts_queue_message(rp, &rp_locks, mp, message); + erts_queue_message(rp, rp_locks, mp, message, am_clock_service); } erts_smp_proc_unlock(rp, rp_locks); } @@ -2348,7 +2348,7 @@ erts_napi_convert_time_unit(ErtsMonotonicTime val, int from, int to) BIF_RETTYPE monotonic_time_0(BIF_ALIST_0) { ErtsMonotonicTime mtime = time_sup.r.o.get_time(); - update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime); + update_last_mtime(erts_proc_sched_data(BIF_P), mtime); mtime += ERTS_MONOTONIC_OFFSET_NATIVE; BIF_RET(make_time_val(BIF_P, mtime)); } @@ -2356,7 +2356,7 @@ BIF_RETTYPE monotonic_time_0(BIF_ALIST_0) BIF_RETTYPE monotonic_time_1(BIF_ALIST_1) { ErtsMonotonicTime mtime = time_sup.r.o.get_time(); - update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime); + update_last_mtime(erts_proc_sched_data(BIF_P), mtime); BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime, 1)); } @@ -2365,7 +2365,7 @@ BIF_RETTYPE system_time_0(BIF_ALIST_0) ErtsMonotonicTime mtime, offset; mtime = time_sup.r.o.get_time(); offset = get_time_offset(); - update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime); + update_last_mtime(erts_proc_sched_data(BIF_P), mtime); BIF_RET(make_time_val(BIF_P, mtime + offset)); } @@ -2374,7 +2374,7 @@ BIF_RETTYPE system_time_1(BIF_ALIST_0) ErtsMonotonicTime mtime, offset; mtime = time_sup.r.o.get_time(); offset = get_time_offset(); - update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime); + update_last_mtime(erts_proc_sched_data(BIF_P), mtime); BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime + offset, 0)); } @@ -2404,7 +2404,7 @@ BIF_RETTYPE timestamp_0(BIF_ALIST_0) mtime = time_sup.r.o.get_time(); offset = get_time_offset(); - update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime); + update_last_mtime(erts_proc_sched_data(BIF_P), mtime); make_timestamp_value(&mega_sec, &sec, µ_sec, mtime, offset); diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index 436b4aca21..ca001fc156 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -238,7 +238,6 @@ write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp) } #ifdef ERTS_SMP -#define PATCH_TS_SIZE(p) patch_ts_size(TFLGS_TS_TYPE(p)) static ERTS_INLINE Uint patch_ts_size(int ts_type) @@ -258,7 +257,7 @@ patch_ts_size(int ts_type) return 0; } } -#endif +#endif /* ERTS_SMP */ /* * Write a timestamp. The timestamp MUST be the last @@ -394,9 +393,10 @@ static ERTS_INLINE int send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p, Eterm t_p_id, ErtsTracerNif *tnif, enum ErtsTracerOpt topt, - Eterm tag, Eterm msg, Eterm extra); + Eterm tag, Eterm msg, Eterm extra, + Eterm pam_result); static ERTS_INLINE Eterm -call_enabled_tracer(Process *c_p, const ErtsTracer tracer, +call_enabled_tracer(const ErtsTracer tracer, ErtsTracerNif **tnif_ref, enum ErtsTracerOpt topt, Eterm tag, Eterm t_p_id); @@ -459,8 +459,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new if (!ERTS_TRACER_IS_NIL(new)) { Eterm nif_result = call_enabled_tracer( - NULL, new, NULL, - TRACE_FUN_ENABLED, am_trace_status, am_undefined); + new, NULL, TRACE_FUN_ENABLED, am_trace_status, am_undefined); switch (nif_result) { case am_trace: break; default: @@ -492,7 +491,7 @@ erts_get_system_seq_tracer(void) erts_smp_rwmtx_runlock(&sys_trace_rwmtx); if (st != erts_tracer_nil && - call_enabled_tracer(NULL, st, NULL, TRACE_FUN_ENABLED, + call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED, am_trace_status, am_undefined) == am_remove) { erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil); st = erts_tracer_nil; @@ -513,7 +512,7 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp, *default_trace_flags &= ~TRACEE_FLAGS; } else { Eterm nif_res; - nif_res = call_enabled_tracer(NULL, *default_tracer, + nif_res = call_enabled_tracer(*default_tracer, NULL, TRACE_FUN_ENABLED, am_trace_status, am_undefined); switch (nif_res) { @@ -739,7 +738,7 @@ profile_send(Eterm from, Eterm message) { else msg = copy_struct(message, sz, &hp, &mp->hfrag.off_heap); - erts_queue_message(profile_p, NULL, mp, msg); + erts_queue_message(profile_p, 0, mp, msg, from); } } @@ -787,7 +786,7 @@ trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what) } send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SCHED_PROC, - what, tmp, THE_NON_VALUE); + what, tmp, THE_NON_VALUE, am_true); } /* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp} @@ -812,9 +811,32 @@ trace_send(Process *p, Eterm to, Eterm msg) { Eterm operation = am_send; ErtsTracerNif *tnif = NULL; + ErtsTracingEvent* te; + Eterm pam_result; ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND)); + te = &erts_send_tracing[erts_active_bp_ix()]; + if (!te->on) { + return; + } + if (te->match_spec) { + Eterm args[2]; + Uint32 return_flags; + args[0] = to; + args[1] = msg; + pam_result = erts_match_set_run_trace(p, p, + te->match_spec, args, 2, + ERTS_PAM_TMP_RESULT, &return_flags); + if (pam_result == am_false) + return; + if (ERTS_TRACE_FLAGS(p) & F_TRACE_SILENT) { + erts_match_set_release_result_trace(p, pam_result); + return; + } + } else + pam_result = am_true; + if (is_internal_pid(to)) { if (!erts_proc_lookup(to)) goto send_to_non_existing_process; @@ -826,23 +848,63 @@ trace_send(Process *p, Eterm to, Eterm msg) } if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, - TRACE_FUN_E_SEND, operation)) + TRACE_FUN_E_SEND, operation)) { send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SEND, - operation, msg, to); + operation, msg, to, pam_result); + } + erts_match_set_release_result_trace(p, pam_result); } /* Send {trace_ts, Pid, receive, Msg, Timestamp} * or {trace, Pid, receive, Msg} */ void -trace_receive(Process *c_p, Eterm msg) +trace_receive(Process* receiver, + Eterm from, + Eterm msg, ErtsTracingEvent* te) { ErtsTracerNif *tnif = NULL; - if (is_tracer_enabled(NULL, 0, &c_p->common, &tnif, - TRACE_FUN_E_RECEIVE, am_receive)) - send_to_tracer_nif(NULL, &c_p->common, c_p->common.id, + Eterm pam_result; + + if (!te) { + te = &erts_receive_tracing[erts_active_bp_ix()]; + if (!te->on) + return; + } + else ASSERT(te->on); + + if (te->match_spec) { + Eterm args[3]; + Uint32 return_flags; + if (is_pid(from)) { + args[0] = pid_node_name(from); + args[1] = from; + } + else { + ASSERT(is_atom(from)); + args[0] = from; /* node name or other atom (e.g 'system') */ + args[1] = am_undefined; + } + args[2] = msg; + pam_result = erts_match_set_run_trace(NULL, receiver, + te->match_spec, args, 3, + ERTS_PAM_TMP_RESULT, &return_flags); + if (pam_result == am_false) + return; + if (ERTS_TRACE_FLAGS(receiver) & F_TRACE_SILENT) { + erts_match_set_release_result_trace(NULL, pam_result); + return; + } + } else + pam_result = am_true; + + if (is_tracer_enabled(NULL, 0, &receiver->common, &tnif, + TRACE_FUN_E_RECEIVE, am_receive)) { + send_to_tracer_nif(NULL, &receiver->common, receiver->common.id, tnif, TRACE_FUN_T_RECEIVE, - am_receive, msg, THE_NON_VALUE); + am_receive, msg, THE_NON_VALUE, pam_result); + } + erts_match_set_release_result_trace(NULL, pam_result); } int @@ -852,8 +914,8 @@ seq_trace_update_send(Process *p) ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p)))); if (have_no_seqtrace(SEQ_TRACE_TOKEN(p)) || (seq_tracer != NIL && - call_enabled_tracer(NULL, seq_tracer, NULL, - TRACE_FUN_ENABLED, am_trace_status, + call_enabled_tracer(seq_tracer, NULL, + TRACE_FUN_ENABLED, am_seq_trace, p ? p->common.id : am_undefined) != am_trace) #ifdef USE_VM_PROBES || (SEQ_TRACE_TOKEN(p) == am_have_dt_utag) @@ -900,9 +962,9 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type, ASSERT(is_tuple(token) || is_nil(token)); if (token == NIL || (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE) || ERTS_TRACER_IS_NIL(seq_tracer) || - call_enabled_tracer(NULL, seq_tracer, + call_enabled_tracer(seq_tracer, NULL, TRACE_FUN_ENABLED, - am_trace_status, + am_seq_trace, process ? process->common.id : am_undefined) != am_trace) { return; } @@ -964,7 +1026,7 @@ erts_trace_return_to(Process *p, BeamInstr *pc) } send_to_tracer_nif(p, &p->common, p->common.id, NULL, TRACE_FUN_T_CALL, - am_return_to, mfa, THE_NON_VALUE); + am_return_to, mfa, THE_NON_VALUE, am_true); } @@ -1123,7 +1185,11 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, * use process flags */ tracee_flags = &ERTS_TRACE_FLAGS(p); + /* Is is not ideal at all to call this check twice, + it should be optimized so that only one call is made. */ if (!is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, + TRACE_FUN_ENABLED, am_trace_status) + || !is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, TRACE_FUN_E_CALL, am_call)) { return 0; } @@ -1139,13 +1205,21 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, } meta_flags = F_TRACE_CALLS | F_NOW_TS; tracee_flags = &meta_flags; - switch (call_enabled_tracer(p, *tracer, - &tnif, TRACE_FUN_T_CALL, - am_call, p->common.id)) { + switch (call_enabled_tracer(*tracer, + &tnif, TRACE_FUN_ENABLED, + am_trace_status, p->common.id)) { default: case am_remove: *tracer = erts_tracer_nil; case am_discard: return 0; - case am_trace: break; + case am_trace: + switch (call_enabled_tracer(*tracer, + &tnif, TRACE_FUN_T_CALL, + am_call, p->common.id)) { + default: + case am_discard: return 0; + case am_trace: break; + } + break; } } @@ -1202,20 +1276,14 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, may remove it, and we still want to generate a trace message */ erts_tracer_update(&pre_ms_tracer, *tracer); tracer = &pre_ms_tracer; - pam_result = erts_match_set_run(p, match_spec, args, arity, - ERTS_PAM_TMP_RESULT, &return_flags); - if (is_non_value(pam_result)) { - erts_match_set_release_result(p); - UnUseTmpHeap(ERL_SUB_BIN_SIZE,p); - ERTS_TRACER_CLEAR(&pre_ms_tracer); - return 0; - } + pam_result = erts_match_set_run_trace(p, p, + match_spec, args, arity, + ERTS_PAM_TMP_RESULT, &return_flags); } if (tracee_flags == &meta_flags) { /* Meta trace */ if (pam_result == am_false) { - erts_match_set_release_result(p); UnUseTmpHeap(ERL_SUB_BIN_SIZE,p); ERTS_TRACER_CLEAR(&pre_ms_tracer); return return_flags; @@ -1223,13 +1291,12 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, } else { /* Non-meta trace */ if (*tracee_flags & F_TRACE_SILENT) { - erts_match_set_release_result(p); + erts_match_set_release_result_trace(p, pam_result); UnUseTmpHeap(ERL_SUB_BIN_SIZE,p); ERTS_TRACER_CLEAR(&pre_ms_tracer); return 0; } if (pam_result == am_false) { - erts_match_set_release_result(p); UnUseTmpHeap(ERL_SUB_BIN_SIZE,p); ERTS_TRACER_CLEAR(&pre_ms_tracer); return return_flags; @@ -1265,11 +1332,14 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, * Build the trace tuple and send it to the port. */ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id, - tnif, TRACE_FUN_T_CALL, am_call, mfa_tuple, THE_NON_VALUE, pam_result); - erts_match_set_release_result(p); + tnif, TRACE_FUN_T_CALL, am_call, mfa_tuple, + THE_NON_VALUE, pam_result); - if (match_spec && tracer == &pre_ms_tracer) - ERTS_TRACER_CLEAR(&pre_ms_tracer); + if (match_spec) { + erts_match_set_release_result_trace(p, pam_result); + if (tracer == &pre_ms_tracer) + ERTS_TRACER_CLEAR(&pre_ms_tracer); + } return return_flags; } @@ -1287,10 +1357,10 @@ trace_proc(Process *c_p, ErtsProcLocks c_p_locks, Process *t_p, Eterm what, Eterm data) { ErtsTracerNif *tnif = NULL; - if (is_tracer_enabled(c_p, c_p_locks, &t_p->common, &tnif, + if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PROCS, what)) - send_to_tracer_nif(c_p, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PROCS, - what, data, THE_NON_VALUE); + send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PROCS, + what, data, THE_NON_VALUE, am_true); } @@ -1306,17 +1376,16 @@ trace_proc_spawn(Process *p, Eterm what, Eterm pid, Eterm mod, Eterm func, Eterm args) { ErtsTracerNif *tnif = NULL; - if (is_tracer_enabled(p, ERTS_PROC_LOCKS_ALL & - ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE), - &p->common, &tnif, TRACE_FUN_E_PROCS, what)) { + if (is_tracer_enabled(NULL, 0, + &p->common, &tnif, TRACE_FUN_E_PROCS, what)) { Eterm mfa; Eterm* hp; hp = HAlloc(p, 4); mfa = TUPLE3(hp, mod, func, args); hp += 4; - send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_PROCS, - what, pid, mfa); + send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PROCS, + what, pid, mfa, am_true); } } @@ -1350,25 +1419,28 @@ void save_calls(Process *p, Export *e) * are all small (atomic) integers. */ void -trace_gc(Process *p, Eterm what, Uint size) +trace_gc(Process *p, Eterm what, Uint size, Eterm msg) { ErtsTracerNif *tnif = NULL; Eterm* hp; - Eterm msg = NIL; Uint sz = 0; Eterm tup; - if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, TRACE_FUN_E_GC, what)) { + if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, + TRACE_FUN_E_GC, what)) { - (void) erts_process_gc_info(p, &sz, NULL); - hp = HAlloc(p, sz + 3 + 2); + if (is_non_value(msg)) { - msg = erts_process_gc_info(p, NULL, &hp); - tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3; - msg = CONS(hp, tup, msg); hp += 2; + (void) erts_process_gc_info(p, &sz, NULL, 0, 0); + hp = HAlloc(p, sz + 3 + 2); + + msg = erts_process_gc_info(p, NULL, &hp, 0, 0); + tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3; + msg = CONS(hp, tup, msg); hp += 2; + } send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_GC, - what, msg, am_undefined); + what, msg, THE_NON_VALUE, am_true); } } @@ -1431,7 +1503,7 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint { ErtsMessage *mp = erts_alloc_message(0, NULL); mp->data.heap_frag = bp; - erts_queue_message(monitor_p, NULL, mp, msg); + erts_queue_message(monitor_p, 0, mp, msg, am_system); } #endif } @@ -1496,7 +1568,7 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time) { ErtsMessage *mp = erts_alloc_message(0, NULL); mp->data.heap_frag = bp; - erts_queue_message(monitor_p, NULL, mp, msg); + erts_queue_message(monitor_p, 0, mp, msg, am_system); } #endif } @@ -1571,7 +1643,7 @@ monitor_long_gc(Process *p, Uint time) { { ErtsMessage *mp = erts_alloc_message(0, NULL); mp->data.heap_frag = bp; - erts_queue_message(monitor_p, NULL, mp, msg); + erts_queue_message(monitor_p, 0, mp, msg, am_system); } #endif } @@ -1646,7 +1718,7 @@ monitor_large_heap(Process *p) { { ErtsMessage *mp = erts_alloc_message(0, NULL); mp->data.heap_frag = bp; - erts_queue_message(monitor_p, NULL, mp, msg); + erts_queue_message(monitor_p, 0, mp, msg, am_system); } #endif } @@ -1678,7 +1750,7 @@ monitor_generic(Process *p, Eterm type, Eterm spec) { { ErtsMessage *mp = erts_alloc_message(0, NULL); mp->data.heap_frag = bp; - erts_queue_message(monitor_p, NULL, mp, msg); + erts_queue_message(monitor_p, 0, mp, msg, am_system); } #endif @@ -1748,7 +1820,7 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) { ERTS_SMP_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &p->common, &tnif, TRACE_FUN_E_PORTS, am_open)) send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PORTS, - am_open, calling_pid, drv_name); + am_open, calling_pid, drv_name, am_true); } /* Sends trace message: @@ -1767,7 +1839,7 @@ trace_port(Port *t_p, Eterm what, Eterm data) { ERTS_SMP_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PORTS, what)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PORTS, - what, data, THE_NON_VALUE); + what, data, THE_NON_VALUE, am_true); } @@ -1907,7 +1979,7 @@ trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...) ASSERT(hp <= (local_heap + LOCAL_HEAP_SIZE) || orig_hp); send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_RECEIVE, - am_receive, data, THE_NON_VALUE); + am_receive, data, THE_NON_VALUE, am_true); if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0) erts_bin_free(bptr); @@ -1930,7 +2002,7 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists) ERTS_SMP_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, op)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND, - op, msg, receiver); + op, msg, receiver, am_true); } void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz) @@ -1959,7 +2031,7 @@ void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz) hp += 3; send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND, - am_send, msg, to); + am_send, msg, to, am_true); if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0) erts_bin_free(bptr); @@ -1990,7 +2062,7 @@ trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) { if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SCHED_PORT, what)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SCHED_PORT, - what, where, THE_NON_VALUE); + what, where, THE_NON_VALUE, am_true); } /* Port profiling */ @@ -2430,7 +2502,7 @@ sys_msg_dispatcher_func(void *unused) queue_proc_msg: mp = erts_alloc_message(0, NULL); mp->data.heap_frag = smqp->bp; - erts_queue_message(proc,&proc_locks,mp,smqp->msg); + erts_queue_message(proc,proc_locks,mp,smqp->msg,am_system); #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "delivered\n"); #endif @@ -2813,7 +2885,7 @@ send_to_tracer_nif_raw(Process *c_p, Process *tracee, static ERTS_INLINE int send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p, Eterm t_p_id, ErtsTracerNif *tnif, enum ErtsTracerOpt topt, - Eterm tag, Eterm msg, Eterm extra) + Eterm tag, Eterm msg, Eterm extra, Eterm pam_result) { #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) if (c_p) { @@ -2833,27 +2905,32 @@ send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p, is_internal_pid(t_p->id) ? (Process*)t_p : NULL, t_p->tracer, t_p->trace_flags, t_p_id, tnif, topt, tag, msg, extra, - am_true); + pam_result); } static ERTS_INLINE Eterm -call_enabled_tracer(Process *c_p, const ErtsTracer tracer, +call_enabled_tracer(const ErtsTracer tracer, ErtsTracerNif **tnif_ret, enum ErtsTracerOpt topt, Eterm tag, Eterm t_p_id) { ErtsTracerNif *tnif = lookup_tracer_nif(tracer); if (tnif) { - Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id}; + Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id}, + ret; topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_ENABLED; ASSERT(topt < NIF_TRACER_TYPES); ASSERT(tnif->tracers[topt].cb != NULL); if (tnif_ret) *tnif_ret = tnif; - return erts_nif_call_function(c_p, NULL, tnif->nif_mod, - tnif->tracers[topt].cb, - tnif->tracers[topt].arity, - argv); + ret = erts_nif_call_function(NULL, NULL, tnif->nif_mod, + tnif->tracers[topt].cb, + tnif->tracers[topt].arity, + argv); + if (tag == am_trace_status && ret != am_remove) + return am_trace; + ASSERT(tag == am_trace_status || ret != am_remove); + return ret; } - return am_remove; + return tag == am_trace_status ? am_remove : am_discard; } static int @@ -2878,12 +2955,12 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, } #endif - nif_result = call_enabled_tracer(c_p, t_p->tracer, tnif_ret, topt, tag, t_p->id); + nif_result = call_enabled_tracer(t_p->tracer, tnif_ret, topt, tag, t_p->id); switch (nif_result) { case am_discard: return 0; case am_trace: return 1; case THE_NON_VALUE: - case am_remove: break; + case am_remove: ASSERT(tag == am_trace_status); break; default: /* only am_remove should be returned, but if something else is returned we fall-through @@ -2914,19 +2991,14 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, return 0; } -int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks, - ErtsPTabElementCommon *t_p, Eterm type) -{ - return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_ENABLED, am_trace_status); -} - -int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer) +int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p) { ErtsTracerNif *tnif = lookup_tracer_nif(tracer); if (tnif) { - Eterm nif_result = call_enabled_tracer(c_p, tracer, &tnif, - TRACE_FUN_ENABLED, am_trace_status, - c_p->common.id); + Eterm nif_result = call_enabled_tracer(tracer, &tnif, + TRACE_FUN_ENABLED, + am_trace_status, + t_p->id); switch (nif_result) { case am_discard: case am_trace: return 1; @@ -2937,6 +3009,20 @@ int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer) return 0; } +int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks, + ErtsPTabElementCommon *t_p) +{ + return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_ENABLED, + am_trace_status); +} + +int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks, + ErtsPTabElementCommon *t_p) +{ + return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_T_SEND, am_send); +} + + void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer) { #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h index 9a007e62ec..0095d4386b 100644 --- a/erts/emulator/beam/erl_trace.h +++ b/erts/emulator/beam/erl_trace.h @@ -56,6 +56,15 @@ struct binary; +typedef struct +{ + int on; + struct binary* match_spec; +} ErtsTracingEvent; + +extern ErtsTracingEvent erts_send_tracing[]; +extern ErtsTracingEvent erts_receive_tracing[]; + /* erl_bif_trace.c */ Eterm erl_seq_trace_info(Process *p, Eterm arg1); void erts_system_monitor_clear(Process *c_p); @@ -91,7 +100,7 @@ void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *); #endif void trace_send(Process*, Eterm, Eterm); -void trace_receive(Process *, Eterm); +void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*); Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec, Eterm* args, int local, ErtsTracer *tracer); void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, @@ -103,7 +112,7 @@ void trace_sched(Process*, ErtsProcLocks, Eterm); void trace_proc(Process*, ErtsProcLocks, Process*, Eterm, Eterm); void trace_proc_spawn(Process*, Eterm what, Eterm pid, Eterm mod, Eterm func, Eterm args); void save_calls(Process *p, Export *); -void trace_gc(Process *p, Eterm what, Uint size); +void trace_gc(Process *p, Eterm what, Uint size, Eterm msg); /* port tracing */ void trace_virtual_sched(Process*, ErtsProcLocks, Eterm); void trace_sched_ports(Port *pp, Eterm); @@ -184,8 +193,10 @@ int erts_finish_breakpointing(void); /* Nif tracer functions */ int erts_is_tracer_proc_enabled(Process *c_p, ErtsProcLocks c_p_locks, - ErtsPTabElementCommon *t_p, Eterm type); -int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer); + ErtsPTabElementCommon *t_p); +int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks, + ErtsPTabElementCommon *t_p); +int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p); Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer); ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term); void erts_tracer_replace(ErtsPTabElementCommon *t_p, @@ -215,9 +226,4 @@ ERTS_DECLARE_DUMMY(erts_tracer_nil) = NIL; #define ERTS_TRACER_FROM_ETERM(termp) \ ((ErtsTracer*)(termp)) -#define ERTS_TRACER_PROC_IS_ENABLED(PROC) \ - (!ERTS_TRACER_IS_NIL(ERTS_TRACER(PROC)) \ - && erts_is_tracer_proc_enabled(PROC, ERTS_PROC_LOCK_MAIN, \ - &(PROC)->common, am_trace_status)) - #endif /* ERL_TRACE_H__ */ diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index f3c54de214..f97716d030 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -50,6 +50,7 @@ #define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */ #define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */ +#define H_DEFAULT_MAX_SIZE 0 /* default max heap size is off */ #define CP_SIZE 1 @@ -160,6 +161,8 @@ extern int num_instructions; /* Number of instruction in opc[]. */ extern int H_MIN_SIZE; /* minimum (heap + stack) */ extern int BIN_VH_MIN_SIZE; /* minimum virtual (bin) heap */ +extern int H_MAX_SIZE; /* maximum (heap + stack) */ +extern int H_MAX_FLAGS; /* maximum heap flags */ extern int erts_atom_table_size;/* Atom table size */ extern int erts_pd_initial_size;/* Initial Process dictionary table size */ diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 49eec44053..1abcc6cbf4 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -57,6 +57,7 @@ struct enif_environment_t /* ErlNifEnv */ struct enif_tmp_obj_t* tmp_obj_list; int exception_thrown; /* boolean */ Process *tracee; + int exiting; /* boolean (dirty nifs might return in exiting state) */ }; extern void erts_pre_nif(struct enif_environment_t*, Process*, struct erl_module_nif*, Process* tracee); @@ -1483,9 +1484,19 @@ do { \ #define MatchSetGetSource(MPSP) erts_match_set_get_source(MPSP) -extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr); +extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr, Eterm MFA); Eterm erts_match_set_lint(Process *p, Eterm matchexpr); extern void erts_match_set_release_result(Process* p); +ERTS_GLB_INLINE void erts_match_set_release_result_trace(Process* p, Eterm); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE +void erts_match_set_release_result_trace(Process* p, Eterm pam_result) +{ + if (is_not_immed(pam_result)) + erts_match_set_release_result(p); +} +#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ enum erts_pam_run_flags { ERTS_PAM_TMP_RESULT=1, @@ -1493,10 +1504,12 @@ enum erts_pam_run_flags { ERTS_PAM_CONTIGUOUS_TUPLE=4, ERTS_PAM_IGNORE_TRACE_SILENT=8 }; -extern Eterm erts_match_set_run(Process *p, Binary *mpsp, - Eterm *args, int num_args, - enum erts_pam_run_flags in_flags, - Uint32 *return_flags); +extern Eterm erts_match_set_run_trace(Process *p, + Process *self, + Binary *mpsp, + Eterm *args, int num_args, + enum erts_pam_run_flags in_flags, + Uint32 *return_flags); extern Eterm erts_match_set_get_source(Binary *mpsp); extern void erts_match_prog_foreach_offheap(Binary *b, void (*)(ErlOffHeap *, void *), diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 5c2595c69d..0377f6cb5e 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -1432,10 +1432,11 @@ finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp) static ERTS_INLINE void queue_port_sched_op_reply(Process *rp, - ErtsProcLocks *rp_locksp, + ErtsProcLocks rp_locks, ErtsHeapFactory* factory, Uint32 *ref_num, - Eterm msg) + Eterm msg, + Port* prt) { Eterm* hp = erts_produce_heap(factory, ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE, 0); Eterm ref; @@ -1448,11 +1449,12 @@ queue_port_sched_op_reply(Process *rp, erts_factory_trim_and_close(factory, &msg, 1); - erts_queue_message(rp, rp_locksp, factory->message, msg); + erts_queue_message(rp, rp_locks, factory->message, msg, + prt ? prt->common.id : am_undefined); } static void -port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg) +port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg, Port* prt) { Process *rp = erts_proc_lookup_raw(to); if (rp) { @@ -1478,10 +1480,11 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg) factory.off_heap)); queue_port_sched_op_reply(rp, - &rp_locks, + rp_locks, &factory, ref_num, - msg_copy); + msg_copy, + prt); if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); @@ -1651,7 +1654,7 @@ port_badsig(Port *prt, erts_aint32_t state, int op, state, sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT); if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) - port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); + port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt); return ERTS_PORT_REDS_BADSIG; } /* port_badsig */ /* bad_port_signal() will @@ -1820,7 +1823,7 @@ port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s } if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) - port_sched_op_reply(sigdp->caller, sigdp->ref, reply); + port_sched_op_reply(sigdp->caller, sigdp->ref, reply, prt); cleanup_scheduled_outputv(sigdp->u.outputv.evp, sigdp->u.outputv.cbinp); @@ -1928,7 +1931,7 @@ port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si } if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) - port_sched_op_reply(sigdp->caller, sigdp->ref, reply); + port_sched_op_reply(sigdp->caller, sigdp->ref, reply, prt); cleanup_scheduled_output(sigdp->u.output.bufp); @@ -2509,7 +2512,7 @@ erts_port_output(Process *c_p, sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND; else if (async_nosuspend) { ErtsSchedulerData *esdp = (c_p - ? ERTS_PROC_GET_SCHDATA(c_p) + ? erts_proc_sched_data(c_p) : erts_get_scheduler_data()); ASSERT(esdp); ns_pthp = &esdp->nosuspend_port_task_handle; @@ -2636,7 +2639,7 @@ port_sig_exit(Port *prt, if (sigdp->u.exit.bp) free_message_buffer(sigdp->u.exit.bp); if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) - port_sched_op_reply(sigdp->caller, sigdp->ref, msg); + port_sched_op_reply(sigdp->caller, sigdp->ref, msg, prt); return ERTS_PORT_REDS_EXIT; } @@ -2829,7 +2832,7 @@ port_sig_connect(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s msg = am_true; } if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) - port_sched_op_reply(sigdp->caller, sigdp->ref, msg); + port_sched_op_reply(sigdp->caller, sigdp->ref, msg, prt); return ERTS_PORT_REDS_CONNECT; } @@ -2912,7 +2915,7 @@ port_sig_unlink(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si if (op == ERTS_PROC2PORT_SIG_EXEC) port_unlink(prt, sigdp->u.unlink.from); if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) - port_sched_op_reply(sigdp->caller, sigdp->ref, am_true); + port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt); return ERTS_PORT_REDS_UNLINK; } @@ -3007,7 +3010,7 @@ port_sig_link(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigd port_link_failure(sigdp->u.link.port, sigdp->u.link.to); } if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) - port_sched_op_reply(sigdp->caller, sigdp->ref, am_true); + port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt); return ERTS_PORT_REDS_LINK; } @@ -3064,7 +3067,8 @@ init_ack_send_reply(Port *port, Eterm resp) } port_sched_op_reply(port->async_open_port->to, port->async_open_port->ref, - resp); + resp, + port); erts_free(ERTS_ALC_T_PRTSD, port->async_open_port); port->async_open_port = NULL; @@ -3461,7 +3465,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res) sz_res + 3, &hp, &ohp); res = copy_struct(res, sz_res, &hp, ohp); tuple = TUPLE2(hp, sender, res); - erts_queue_message(rp, &rp_locks, mp, tuple); + erts_queue_message(rp, rp_locks, mp, tuple, sender); if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); @@ -3562,7 +3566,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, trace_port_send(prt, to, tuple, 1); ERL_MESSAGE_TOKEN(mp) = am_undefined; - erts_queue_message(rp, &rp_locks, mp, tuple); + erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); if (!scheduler) @@ -3734,7 +3738,7 @@ deliver_vec_message(Port* prt, /* Port */ trace_port_send(prt, to, tuple, 1); ERL_MESSAGE_TOKEN(mp) = am_undefined; - erts_queue_message(rp, &rp_locks, mp, tuple); + erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); erts_smp_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); @@ -4388,10 +4392,11 @@ port_sig_control(Port *prt, &factory.hp, factory.off_heap); queue_port_sched_op_reply(rp, - &rp_locks, + rp_locks, &factory, sigdp->ref, - msg); + msg, + prt); if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); @@ -4402,7 +4407,7 @@ port_sig_control(Port *prt, /* failure */ if (sigdp->caller != ERTS_INVALID_PID) - port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); + port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt); done: @@ -4739,10 +4744,11 @@ port_sig_call(Port *prt, msg = TUPLE2(hp, am_ok, msg); queue_port_sched_op_reply(rp, - &rp_locks, + rp_locks, &factory, sigdp->ref, - msg); + msg, + prt); if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); @@ -4754,7 +4760,7 @@ port_sig_call(Port *prt, } } - port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); + port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt); done: @@ -4969,7 +4975,7 @@ port_sig_info(Port *prt, { ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY); if (op != ERTS_PROC2PORT_SIG_EXEC) - port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined); + port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined, prt); else { Eterm *hp, *hp_start; Uint hsz; @@ -4995,10 +5001,11 @@ port_sig_info(Port *prt, mp->data.heap_frag = bp; erts_factory_selfcontained_message_init(&factory, mp, hp); queue_port_sched_op_reply(rp, - &rp_locks, + rp_locks, &factory, sigdp->ref, - value); + value, + prt); } if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); @@ -5115,7 +5122,7 @@ reply_io_bytes(void *vreq) msg = TUPLE4(hp, ref, make_small(sched_id), ein, eout); - erts_queue_message(rp, &rp_locks, mp, msg); + erts_queue_message(rp, rp_locks, mp, msg, am_system); if (req->sched_id == sched_id) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -5133,7 +5140,7 @@ erts_request_io_bytes(Process *c_p) Uint *hp; Eterm ref; Uint32 *refn; - ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); ErtsIOBytesReq *req = erts_alloc(ERTS_ALC_T_IOB_REQ, sizeof(ErtsIOBytesReq)); @@ -5613,7 +5620,7 @@ void driver_report_exit(ErlDrvPort ix, int status) trace_port_send(prt, pid, tuple, 1); ERL_MESSAGE_TOKEN(mp) = am_undefined; - erts_queue_message(rp, &rp_locks, mp, tuple); + erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); erts_smp_proc_unlock(rp, rp_locks); if (!scheduler) @@ -5934,7 +5941,7 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len) if (!rp) { if (!prt || !IS_TRACED_FL(prt, F_TRACE_SEND)) goto done; - if (!erts_is_tracer_proc_enabled(NULL, 0, &prt->common, am_send)) + if (!erts_is_tracer_proc_enabled_send(NULL, 0, &prt->common)) goto done; res = -2; @@ -6217,15 +6224,20 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len) done: if (res > 0) { + Eterm from = am_undefined; mess = ESTACK_POP(stack); /* get resulting value */ erts_factory_trim_and_close(&factory, &mess, 1); - if (prt && IS_TRACED_FL(prt, F_TRACE_SEND)) - trace_port_send(prt, to, mess, 1); + if (prt) { + if (IS_TRACED_FL(prt, F_TRACE_SEND)) { + trace_port_send(prt, to, mess, 1); + } + from = prt->common.id; + } /* send message */ ERL_MESSAGE_TOKEN(factory.message) = am_undefined; - erts_queue_message(rp, &rp_locks, factory.message, mess); + erts_queue_message(rp, rp_locks, factory.message, mess, from); } else if (res == -2) { /* this clause only happens when we were requested to @@ -7869,11 +7881,13 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size) * (driver version 3.1, NIF version 2.7) */ if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) { -#if defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) && defined(USE_THREADS) - sip->dirty_scheduler_support = 1; + sip->dirty_scheduler_support = +#ifdef ERTS_DIRTY_SCHEDULERS + 1 #else - sip->dirty_scheduler_support = 0; + 0 #endif + ; } } diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 748fba15c7..f303d4f167 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -34,6 +34,10 @@ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) #endif +#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ERTS_SMP) +# error "Dirty schedulers not supported without smp support" +#endif + #ifdef ERTS_INLINE # ifndef ERTS_CAN_INLINE # define ERTS_CAN_INLINE 1 @@ -92,6 +96,9 @@ #define ErtsInArea(ptr,start,nbytes) \ ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes)) +#define ErtsContainerStruct(ptr, type, member) \ + (type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member)) + #if defined (__WIN32__) # include "erl_win_sys.h" #else diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 68006e7ef3..cedc88e5fe 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -2263,7 +2263,7 @@ static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment * { ErtsMessage *mp = erts_alloc_message(0, NULL); mp->data.heap_frag = bp; - erts_queue_message(p, NULL /* only used for smp build */, mp, message); + erts_queue_message(p, 0, mp, message, am_system); } #endif } |