diff options
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r-- | erts/emulator/beam/beam_bif_load.c | 2 | ||||
-rw-r--r-- | erts/emulator/beam/beam_emu.c | 230 | ||||
-rw-r--r-- | erts/emulator/beam/erl_bif_info.c | 4 | ||||
-rw-r--r-- | erts/emulator/beam/erl_gc.c | 39 | ||||
-rw-r--r-- | erts/emulator/beam/erl_hl_timer.c | 5 | ||||
-rw-r--r-- | erts/emulator/beam/erl_lock_check.c | 1 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.c | 240 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.h | 2 |
8 files changed, 390 insertions, 133 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 1307fe22b5..e2a0b8818e 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -658,7 +658,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1) } else if (modp->old.code_hdr) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "Module %T must be purged before loading\n", + erts_dsprintf(dsbufp, "Module %T must be purged before deleting\n", BIF_ARG_1); erts_send_error_to_logger(BIF_P->group_leader, dsbufp); ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 59a9ea1417..66bccedd94 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -1047,9 +1047,11 @@ static BeamInstr* handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf) NOINLINE; static BeamInstr* call_error_handler(Process* p, BeamInstr* ip, Eterm* reg, Eterm func) NOINLINE; -static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE; +static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity, + BeamInstr *I, Uint offs) NOINLINE; static BeamInstr* apply(Process* p, Eterm module, Eterm function, - Eterm args, Eterm* reg) NOINLINE; + Eterm args, Eterm* reg, + BeamInstr *I, Uint offs) NOINLINE; static BeamInstr* call_fun(Process* p, int arity, Eterm* reg, Eterm args) NOINLINE; static BeamInstr* apply_fun(Process* p, Eterm fun, @@ -3194,7 +3196,7 @@ do { \ OpCase(i_apply): { BeamInstr *next; HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg); + next = apply(c_p, r(0), x(1), x(2), reg, NULL, 0); HEAVY_SWAPIN; if (next != NULL) { SET_CP(c_p, I+1); @@ -3208,7 +3210,7 @@ do { \ OpCase(i_apply_last_P): { BeamInstr *next; HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg); + next = apply(c_p, r(0), x(1), x(2), reg, I, Arg(0)); HEAVY_SWAPIN; if (next != NULL) { SET_CP(c_p, (BeamInstr *) E[0]); @@ -3223,7 +3225,7 @@ do { \ OpCase(i_apply_only): { BeamInstr *next; HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg); + next = apply(c_p, r(0), x(1), x(2), reg, I, 0); HEAVY_SWAPIN; if (next != NULL) { SET_I(next); @@ -3237,7 +3239,7 @@ do { \ BeamInstr *next; HEAVY_SWAPOUT; - next = fixed_apply(c_p, reg, Arg(0)); + next = fixed_apply(c_p, reg, Arg(0), NULL, 0); HEAVY_SWAPIN; if (next != NULL) { SET_CP(c_p, I+2); @@ -3252,7 +3254,7 @@ do { \ BeamInstr *next; HEAVY_SWAPOUT; - next = fixed_apply(c_p, reg, Arg(0)); + next = fixed_apply(c_p, reg, Arg(0), I, Arg(1)); HEAVY_SWAPIN; if (next != NULL) { SET_CP(c_p, (BeamInstr *) E[0]); @@ -5844,12 +5846,13 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf, s->depth = 0; /* - * If the failure was in a BIF other than 'error', 'exit' or - * 'throw', find the bif-table index and save the argument - * registers by consing up an arglist. + * If the failure was in a BIF other than 'error/1', 'error/2', + * 'exit/1' or 'throw/1', find the bif-table index and save the + * argument registers by consing up an arglist. */ - if (bf != NULL && bf != error_1 && bf != error_2 && - bf != exit_1 && bf != throw_1) { + if (bf != NULL && bf != error_1 && bf != error_2 && bf != exit_1 + && bf != throw_1 && bf != wrap_error_1 && bf != wrap_error_2 + && bf != wrap_exit_1 && bf != wrap_throw_1) { int i; int a = 0; for (i = 0; i < BIF_SIZE; i++) { @@ -5945,12 +5948,30 @@ erts_save_stacktrace(Process* p, struct StackTrace* s, int depth) p->cp) { /* Cannot follow cp here - code may be unloaded */ BeamInstr *cpp = p->cp; + int trace_cp; if (cpp == beam_exception_trace || cpp == beam_return_trace) { /* Skip return_trace parameters */ ptr += 2; + trace_cp = 1; } else if (cpp == beam_return_to_trace) { /* Skip return_to_trace parameters */ ptr += 1; + trace_cp = 1; + } + else { + trace_cp = 0; + } + if (trace_cp && s->pc == cpp) { + /* + * If process 'cp' points to a return/exception trace + * instruction and 'cp' has been saved as 'pc' in + * stacktrace, we need to update 'pc' in stacktrace + * with the actual 'cp' located on the top of the + * stack; otherwise, we will lose the top stackframe + * when building the stack trace. + */ + ASSERT(is_CP(p->stop[0])); + s->pc = cp_val(p->stop[0]); } } while (ptr < STACK_START(p) && depth > 0) { @@ -6070,12 +6091,15 @@ build_stacktrace(Process* c_p, Eterm exc) { erts_set_current_function(&fi, s->current); } + depth = s->depth; /* - * If fi.current is still NULL, default to the initial function + * If fi.current is still NULL, and we have no + * stack at all, default to the initial function * (e.g. spawn_link(erlang, abs, [1])). */ if (fi.current == NULL) { - erts_set_current_function(&fi, c_p->u.initial); + if (depth <= 0) + erts_set_current_function(&fi, c_p->u.initial); args = am_true; /* Just in case */ } else { args = get_args_from_exc(exc); @@ -6085,10 +6109,9 @@ build_stacktrace(Process* c_p, Eterm exc) { * Look up all saved continuation pointers and calculate * needed heap space. */ - depth = s->depth; stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP, depth*sizeof(FunctionInfo)); - heap_size = fi.needed + 2; + heap_size = fi.current ? fi.needed + 2 : 0; for (i = 0; i < depth; i++) { erts_lookup_function_info(stkp, s->trace[i], 1); if (stkp->current) { @@ -6107,8 +6130,10 @@ build_stacktrace(Process* c_p, Eterm exc) { res = CONS(hp, mfa, res); hp += 2; } - hp = erts_build_mfa_item(&fi, hp, args, &mfa); - res = CONS(hp, mfa, res); + if (fi.current) { + hp = erts_build_mfa_item(&fi, hp, args, &mfa); + res = CONS(hp, mfa, res); + } erts_free(ERTS_ALC_T_TMP, (void *) stk); return res; @@ -6205,8 +6230,107 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, return ep; } +static ERTS_INLINE void +apply_bif_error_adjustment(Process *p, Export *ep, + Eterm *reg, Uint arity, + BeamInstr *I, Uint stack_offset) +{ + /* + * I is only set when the apply is a tail call, i.e., + * from the instructions i_apply_only, i_apply_last_P, + * and apply_last_IP. + */ + if (I + && ep->code[3] == (BeamInstr) em_apply_bif + && (ep == bif_export[BIF_error_1] + || ep == bif_export[BIF_error_2] + || ep == bif_export[BIF_exit_1] + || ep == bif_export[BIF_throw_1])) { + /* + * We are about to tail apply one of the BIFs + * erlang:error/1, erlang:error/2, erlang:exit/1, + * or erlang:throw/1. Error handling of these BIFs is + * special! + * + * We need 'p->cp' to point into the calling + * function when handling the error after the BIF has + * been applied. This in order to get the topmost + * stackframe correct. Without the following adjustment, + * 'p->cp' will point into the function that called + * current function when handling the error. We add a + * dummy stackframe in order to achive this. + * + * Note that these BIFs unconditionally will cause + * an exception to be raised. That is, our modifications + * of 'p->cp' as well as the stack will be corrected by + * the error handling code. + * + * If we find an exception/return-to trace continuation + * pointer as the topmost continuation pointer, we do not + * need to do anything since the information already will + * be available for generation of the stacktrace. + */ + int apply_only = stack_offset == 0; + BeamInstr *cpp; + + if (apply_only) { + ASSERT(p->cp != NULL); + cpp = p->cp; + } + else { + ASSERT(is_CP(p->stop[0])); + cpp = cp_val(p->stop[0]); + } + + if (cpp != beam_exception_trace + && cpp != beam_return_trace + && cpp != beam_return_to_trace) { + Uint need = stack_offset /* bytes */ / sizeof(Eterm); + if (need == 0) + need = 1; /* i_apply_only */ + if (p->stop - p->htop < need) + erts_garbage_collect(p, (int) need, reg, arity+1); + p->stop -= need; + + if (apply_only) { + /* + * Called from the i_apply_only instruction. + * + * 'p->cp' contains continuation pointer pointing + * into the function that called current function. + * We push that continuation pointer onto the stack, + * and set 'p->cp' to point into current function. + */ + + p->stop[0] = make_cp(p->cp); + p->cp = I; + } + else { + /* + * Called from an i_apply_last_p, or apply_last_IP, + * instruction. + * + * Calling instruction will after we return read + * a continuation pointer from the stack and write + * it to 'p->cp', and then remove the topmost + * stackframe of size 'stack_offset'. + * + * We have sized the dummy-stackframe so that it + * will be removed by the instruction we currently + * are executing, and leave the stackframe that + * normally would have been removed intact. + * + */ + p->stop[0] = make_cp(I); + } + } + } +} + static BeamInstr* -apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg) +apply( +Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg, +BeamInstr *I, Uint stack_offset) { int arity; Export* ep; @@ -6231,21 +6355,54 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg) return 0; } - /* The module argument may be either an atom or an abstract module - * (currently implemented using tuples, but this might change). - */ - this = THE_NON_VALUE; - if (is_not_atom(module)) { - Eterm* tp; + while (1) { + Eterm m, f, a; + /* The module argument may be either an atom or an abstract module + * (currently implemented using tuples, but this might change). + */ + this = THE_NON_VALUE; + if (is_not_atom(module)) { + Eterm* tp; + + if (is_not_tuple(module)) goto error; + tp = tuple_val(module); + if (arityval(tp[0]) < 1) goto error; + this = module; + module = tp[1]; + if (is_not_atom(module)) goto error; + } - if (is_not_tuple(module)) goto error; - tp = tuple_val(module); - if (arityval(tp[0]) < 1) goto error; - this = module; - module = tp[1]; - if (is_not_atom(module)) goto error; + if (module != am_erlang || function != am_apply) + break; + + /* Adjust for multiple apply of apply/3... */ + + a = args; + if (is_list(a)) { + Eterm *consp = list_val(a); + m = CAR(consp); + a = CDR(consp); + if (is_list(a)) { + consp = list_val(a); + f = CAR(consp); + a = CDR(consp); + if (is_list(a)) { + consp = list_val(a); + a = CAR(consp); + if (is_nil(CDR(consp))) { + /* erlang:apply/3 */ + module = m; + function = f; + args = a; + if (is_not_atom(f)) + goto error; + continue; + } + } + } + } + break; /* != erlang:apply/3 */ } - /* * Walk down the 3rd parameter of apply (the argument list) and copy * the parameters to the x registers (reg[]). If the module argument @@ -6283,12 +6440,14 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg) } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) { save_calls(p, ep); } + apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset); DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep); return ep->addressv[erts_active_code_ix()]; } static BeamInstr* -fixed_apply(Process* p, Eterm* reg, Uint arity) +fixed_apply(Process* p, Eterm* reg, Uint arity, + BeamInstr *I, Uint stack_offset) { Export* ep; Eterm module; @@ -6318,6 +6477,10 @@ fixed_apply(Process* p, Eterm* reg, Uint arity) if (is_not_atom(module)) goto error; ++arity; } + + /* Handle apply of apply/3... */ + if (module == am_erlang && function == am_apply && arity == 3) + return apply(p, reg[0], reg[1], reg[2], reg, I, stack_offset); /* * Get the index into the export table, or failing that the export @@ -6332,6 +6495,7 @@ fixed_apply(Process* p, Eterm* reg, Uint arity) } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) { save_calls(p, ep); } + apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset); DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep); return ep->addressv[erts_active_code_ix()]; } diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 735aabbee3..88a052cad7 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -1672,11 +1672,11 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp) Eterm mfa; Eterm res = NIL; - depth = 8; + depth = erts_backtrace_depth; sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth; s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz); s->depth = 0; - if (rp->i) { + if (depth > 0 && rp->i) { s->trace[s->depth++] = rp->i; depth--; } diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 561e7fbdee..a33a81babd 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -176,6 +176,13 @@ typedef struct { erts_smp_atomic32_t refc; } ErtsGCInfoReq; +#ifdef ERTS_DIRTY_SCHEDULERS +static struct { + erts_mtx_t mtx; + ErtsGCInfo info; +} dirty_gc; +#endif + static ERTS_INLINE int gc_cost(Uint gc_moved_live_words, Uint resize_moved_words) { @@ -259,6 +266,11 @@ erts_init_gc(void) init_gc_info(&esdp->gc_info); } +#ifdef ERTS_DIRTY_SCHEDULERS + erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info"); + init_gc_info(&dirty_gc.info); +#endif + init_gcireq_alloc(); } @@ -734,8 +746,19 @@ do_major_collection: monitor_large_heap(p); } - esdp->gc_info.garbage_cols++; - esdp->gc_info.reclaimed += reclaimed_now; +#ifdef ERTS_DIRTY_SCHEDULERS + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { + erts_mtx_lock(&dirty_gc.mtx); + dirty_gc.info.garbage_cols++; + dirty_gc.info.reclaimed += reclaimed_now; + erts_mtx_unlock(&dirty_gc.mtx); + } + else +#endif + { + esdp->gc_info.garbage_cols++; + esdp->gc_info.reclaimed += reclaimed_now; + } FLAGS(p) &= ~F_FORCE_GC; p->live_hf_end = ERTS_INVALID_HFRAG_PTR; @@ -3033,6 +3056,18 @@ reply_gc_info(void *vgcirp) reclaimed = esdp->gc_info.reclaimed; garbage_cols = esdp->gc_info.garbage_cols; +#ifdef ERTS_DIRTY_SCHEDULERS + /* + * Add dirty schedulers info on requesting + * schedulers info + */ + if (gcirp->req_sched == esdp->no) { + erts_mtx_lock(&dirty_gc.mtx); + reclaimed += dirty_gc.info.reclaimed; + garbage_cols += dirty_gc.info.garbage_cols; + erts_mtx_unlock(&dirty_gc.mtx); + } +#endif sz = 0; hpp = NULL; diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c index d29d079fc5..647fa26811 100644 --- a/erts/emulator/beam/erl_hl_timer.c +++ b/erts/emulator/beam/erl_hl_timer.c @@ -2666,7 +2666,7 @@ erts_start_timer_callback(ErtsMonotonicTime tmo, tmo); twt = tmo < ERTS_TIMER_WHEEL_MSEC; - if (esdp) + if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) start_callback_timer(esdp, twt, timeout_pos, @@ -2865,7 +2865,8 @@ btm_print(ErtsHLTimer *tmr, void *vbtmp) if (tmr->timeout <= btmp->now) left = 0; - left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now); + else + left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now); receiver = ((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME) ? tmr->receiver.name diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 06266363b5..13a4b2cd93 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -129,6 +129,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "run_queue", "address" }, #ifdef ERTS_DIRTY_SCHEDULERS { "dirty_run_queue_sleep_list", "address" }, + { "dirty_gc_info", NULL }, #endif { "process_table", NULL }, { "cpu_info", NULL }, diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b345c35a7e..6affbd794c 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -2060,6 +2060,7 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait #endif for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) { ErtsThrPrgrLaterOp *lop = awdp->later_op.first; + if (!erts_thr_progress_has_reached_this(current, lop->later)) return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; awdp->later_op.first = lop->next; @@ -6234,6 +6235,12 @@ check_dirty_enqueue_in_prio_queue(Process *c_p, int queue; erts_aint32_t dact, max_qbit; + /* Do not enqueue free process... */ + if (actual & ERTS_PSFLG_FREE) { + *newp &= ~ERTS_PSFLGS_DIRTY_WORK; + return ERTS_ENQUEUE_NOT; + } + /* Termination should be done on an ordinary scheduler */ if ((*newp) & ERTS_PSFLG_EXITING) { *newp &= ~ERTS_PSFLGS_DIRTY_WORK; @@ -6426,6 +6433,9 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st /* * schedule_out_process() return with c_rq locked. + * + * Return non-zero value if caller should decrease + * reference count on the process when done with it... */ static ERTS_INLINE int schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, @@ -6480,12 +6490,18 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, erts_smp_runq_lock(c_rq); - return 0; - +#if !defined(ERTS_SMP) + /* Decrement refc if process struct is free... */ + return !!(n & ERTS_PSFLG_FREE); +#else + /* Decrement refc if scheduled out from dirty scheduler... */ + return !is_normal_sched; +#endif } else { Process* sched_p; + ASSERT(!(n & ERTS_PSFLG_FREE)); ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_DIRTY_ACTIVE_SYS))); @@ -6501,11 +6517,14 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, erts_smp_runq_lock(runq); + if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) + erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */ + /* Enqueue the process */ enqueue_process(runq, (int) enq_prio, sched_p); if (runq == c_rq) - return 1; + return 0; erts_smp_runq_unlock(runq); @@ -6513,9 +6532,15 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, erts_smp_runq_lock(c_rq); - return 1; + /* + * Decrement refc if process is scheduled out by a + * dirty scheduler, and we have not just scheduled + * the process using the ordinary process struct + * on a dirty run-queue again... + */ + return !is_normal_sched && (sched_p != p + || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix)); } - } static ERTS_INLINE void @@ -6530,8 +6555,17 @@ add2runq(int enqueue, erts_aint32_t prio, if (runq) { Process *sched_p; - if (enqueue > 0) + if (enqueue > 0) { sched_p = proc; + /* + * Refc on process struct (i.e. true struct, + * not proxy-struct) increased while in a + * dirty run-queue or executing on a dirty + * scheduler. + */ + if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) + erts_proc_inc_refc(proc); + } else { Process *pxy; @@ -7055,12 +7089,18 @@ typedef struct { } ErtsSchdlrSspndResume; static void -schdlr_sspnd_resume_proc(Eterm pid) +schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid) { - Process *p = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_STATUS); + Process *p; + p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS, + (sched_type != ERTS_SCHED_NORMAL + ? ERTS_P2P_FLG_INC_REFC + : 0)); if (p) { resume_process(p, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + if (sched_type != ERTS_SCHED_NORMAL) + erts_proc_dec_refc(p); } } @@ -7069,17 +7109,20 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type, ErtsSchdlrSspndResume *resume) { if (is_internal_pid(resume->onln.chngr)) { - schdlr_sspnd_resume_proc(resume->onln.chngr); + schdlr_sspnd_resume_proc(sched_type, + resume->onln.chngr); resume->onln.chngr = NIL; } if (is_internal_pid(resume->onln.nxt)) { - schdlr_sspnd_resume_proc(resume->onln.nxt); + schdlr_sspnd_resume_proc(sched_type, + resume->onln.nxt); resume->onln.nxt = NIL; } while (resume->msb.chngrs) { ErtsProcList *plp = resume->msb.chngrs; resume->msb.chngrs = plp->next; - schdlr_sspnd_resume_proc(plp->pid); + schdlr_sspnd_resume_proc(sched_type, + plp->pid); proclist_destroy(plp); } } @@ -7131,16 +7174,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1); - if (sched_type != ERTS_SCHED_NORMAL) { - if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) { - erts_smp_runq_unlock(esdp->run_queue); - erts_smp_mtx_lock(&schdlr_sspnd.mtx); - erts_smp_runq_lock(esdp->run_queue); - } - if (schdlr_sspnd.msb.ongoing) - evacuate_run_queue(esdp->run_queue, &sbp); + if (sched_type != ERTS_SCHED_NORMAL) erts_smp_runq_unlock(esdp->run_queue); - } else { evacuate_run_queue(esdp->run_queue, &sbp); @@ -7153,8 +7188,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) sched_wall_time_change(esdp, 0); - erts_smp_mtx_lock(&schdlr_sspnd.mtx); } + erts_smp_mtx_lock(&schdlr_sspnd.mtx); flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED); if (flgs & ERTS_SSI_FLG_SUSPENDED) { @@ -7198,15 +7233,18 @@ suspend_scheduler(ErtsSchedulerData *esdp) (void) erts_proclist_fetch(&msb[i]->chngq, &end_plp); /* resume processes that initiated the multi scheduling block... */ plp = msb[i]->chngq; - while (plp) { - erts_proclist_store_last(&msb[i]->blckrs, - proclist_copy(plp)); - plp = plp->next; - } - if (end_plp) + if (plp) { + ASSERT(end_plp); + ASSERT(msb[i]->ongoing); + do { + erts_proclist_store_last(&msb[i]->blckrs, + proclist_copy(plp)); + plp = plp->next; + } while (plp); end_plp->next = resume.msb.chngrs; - resume.msb.chngrs = msb[i]->chngq; - msb[i]->chngq = NULL; + resume.msb.chngrs = msb[i]->chngq; + msb[i]->chngq = NULL; + } } } } @@ -7270,24 +7308,14 @@ suspend_scheduler(ErtsSchedulerData *esdp) while (1) { ErtsMonotonicTime current_time; - erts_aint32_t qmask; erts_aint32_t flgs; - qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue) - & ERTS_RUNQ_FLGS_QMASK); - - if (sched_type != ERTS_SCHED_NORMAL) { - if (qmask) { - erts_smp_mtx_lock(&schdlr_sspnd.mtx); - erts_smp_runq_lock(esdp->run_queue); - if (schdlr_sspnd.msb.ongoing) - evacuate_run_queue(esdp->run_queue, &sbp); - erts_smp_runq_unlock(esdp->run_queue); - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); - } + if (sched_type != ERTS_SCHED_NORMAL) aux_work = 0; - } else { + erts_aint32_t qmask; + qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue) + & ERTS_RUNQ_FLGS_QMASK); aux_work = erts_atomic32_read_acqb(&ssi->aux_work); @@ -7415,12 +7443,23 @@ suspend_scheduler(ErtsSchedulerData *esdp) schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type); changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); - if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB) - && schdlr_sspnd.online == schdlr_sspnd.active) { - erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, - ~ERTS_SCHDLR_SSPND_CHNG_MSB); - } - + if (changing) { + if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB) + && !schdlr_sspnd.msb.ongoing + && schdlr_sspnd.online == schdlr_sspnd.active) { + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_MSB); + } + if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB) + && !schdlr_sspnd.nmsb.ongoing + && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, + ERTS_SCHED_NORMAL) + == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, + ERTS_SCHED_NORMAL))) { + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_NMSB); + } + } ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type)); ASSERT((sched_type == ERTS_SCHED_NORMAL ? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) @@ -7553,7 +7592,7 @@ abort_sched_onln_chng_waitq(Process *p) erts_smp_mtx_unlock(&schdlr_sspnd.mtx); if (is_internal_pid(resume)) - schdlr_sspnd_resume_proc(resume); + schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume); } ErtsSchedSuspendResult @@ -7951,21 +7990,20 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal } else { /* ------ UNBLOCK ------ */ if (p->flags & have_blckd_flg) { - ErtsProcList *plps[2]; + ErtsProcList **plpps[3] = {0}; ErtsProcList *plp; - int limit = 0; - plps[limit++] = erts_proclist_peek_first(msbp->blckrs); - if (all) - plps[limit++] = erts_proclist_peek_first(msbp->chngq); + plpps[0] = &msbp->blckrs; + if (all) + plpps[1] = &msbp->chngq; - for (ix = 0; ix < limit; ix++) { - plp = plps[ix]; + for (ix = 0; plpps[ix]; ix++) { + plp = erts_proclist_peek_first(*plpps[ix]); while (plp) { ErtsProcList *tmp_plp = plp; - plp = erts_proclist_peek_next(msbp->blckrs, plp); + plp = erts_proclist_peek_next(*plpps[ix], plp); if (erts_proclist_same(tmp_plp, p)) { - erts_proclist_remove(&msbp->blckrs, tmp_plp); + erts_proclist_remove(plpps[ix], tmp_plp); proclist_destroy(tmp_plp); if (!all) break; @@ -8604,8 +8642,15 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, * from being selected for normal execution regardless * of locks held or not held on it... */ - ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS) - & erts_smp_atomic32_read_nob(&rp->state))); +#ifdef DEBUG + { + erts_aint32_t state; + state = erts_smp_atomic32_read_nob(&rp->state); + ASSERT((state & ERTS_PSFLG_PENDING_EXIT) + || !(state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS))); + } +#endif if (!suspend) resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS); @@ -9587,40 +9632,45 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) esdp->reductions += reds; - /* schedule_out_process() returns with rq locked! */ - schedule_out_process(rq, state, p, proxy_p, is_normal_sched); - proxy_p = NULL; + { + int dec_refc; + + /* schedule_out_process() returns with rq locked! */ + dec_refc = schedule_out_process(rq, state, p, + proxy_p, is_normal_sched); + proxy_p = NULL; - ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq, - (int) ERTS_PSFLGS_GET_USR_PRIO(state), - reds, - actual_reds); + ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq, + (int) ERTS_PSFLGS_GET_USR_PRIO(state), + reds, + actual_reds); - esdp->current_process = NULL; + esdp->current_process = NULL; #ifdef ERTS_SMP - p->scheduler_data = NULL; + p->scheduler_data = NULL; #endif - erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN - | ERTS_PROC_LOCK_STATUS - | ERTS_PROC_LOCK_TRACE)); + erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN + | ERTS_PROC_LOCK_STATUS + | ERTS_PROC_LOCK_TRACE)); - ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER); + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER); - if (state & ERTS_PSFLG_FREE) { - if (!is_normal_sched) { - ASSERT(p->flags & F_DELAYED_DEL_PROC); - erts_proc_dec_refc(p); - } - else { #ifdef ERTS_SMP - ASSERT(esdp->free_process == p); - esdp->free_process = NULL; -#else - erts_proc_dec_refc(p); + if (state & ERTS_PSFLG_FREE) { + if (!is_normal_sched) { + ASSERT(p->flags & F_DELAYED_DEL_PROC); + } + else { + ASSERT(esdp->free_process == p); + esdp->free_process = NULL; + } + } #endif - } - } + + if (dec_refc) + erts_proc_dec_refc(p); + } #ifdef ERTS_SMP ASSERT(!esdp->free_process); @@ -9896,8 +9946,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (!(state & ERTS_PSFLG_PROXY)) psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ; else { + Eterm pid = p->common.id; proxy_p = p; - p = erts_proc_lookup_raw(proxy_p->common.id); + p = (is_normal_sched + ? erts_proc_lookup_raw(pid) + : erts_pid2proc_opt(NULL, 0, pid, 0, + ERTS_P2P_FLG_INC_REFC)); if (!p) { free_proxy_proc(proxy_p); proxy_p = NULL; @@ -9929,6 +9983,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_PSFLG_FREE))) #ifdef ERTS_DIRTY_SCHEDULERS | (((state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_FREE | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING)) @@ -9961,6 +10016,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) /* free and not queued by proxy */ erts_proc_dec_refc(p); } + if (!is_normal_sched) + erts_proc_dec_refc(p); goto pick_next_process; } state = new; @@ -13091,15 +13148,14 @@ erts_continue_exit_process(Process *p) } #ifdef ERTS_DIRTY_SCHEDULERS - if (a & (ERTS_PSFLG_DIRTY_RUNNING - | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + if (a & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { p->flags |= F_DELAYED_DEL_PROC; delay_del_proc = 1; /* - * The dirty scheduler will also decrease - * refc when done... + * The dirty scheduler decrease refc + * when done with the process... */ - erts_proc_inc_refc(p); } #endif diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 9f7084c127..68fbb10602 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1684,7 +1684,7 @@ ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list) return NULL; else { ErtsProcList *res = *list; - if (res == *list) + if (res->next == *list) *list = NULL; else *list = res->next; |