diff options
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r-- | erts/emulator/beam/erl_process.c | 415 |
1 files changed, 201 insertions, 214 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 1478b71195..a21acb9a57 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -3761,6 +3761,8 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; state = erts_atomic32_read_nob(&p->state); + ASSERT(state & ERTS_PSFLG_IN_RUNQ); + if (statep) *statep = state; @@ -3768,8 +3770,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) rqi = &runq->procs.prio_info[prio]; - if (p) - unqueue_process(runq, rpq, rqi, prio, NULL, p); + unqueue_process(runq, rpq, rqi, prio, NULL, p); return p; } @@ -4021,9 +4022,7 @@ schedule_bound_processes(ErtsRunQueue *rq, static ERTS_INLINE void clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) { -#ifdef DEBUG erts_aint32_t old; -#endif erts_aint32_t qb = prio_bit; if (rq == ERTS_DIRTY_CPU_RUNQ) qb <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET; @@ -4031,13 +4030,8 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) ASSERT(rq == ERTS_DIRTY_IO_RUNQ); qb <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET; } -#ifdef DEBUG - old = (int) -#else - (void) -#endif - erts_atomic32_read_band_mb(&p->dirty_state, ~qb); - ASSERT(old & qb); + old = (int) erts_atomic32_read_band_mb(&p->dirty_state, ~qb); + ASSERT(old & qb); (void)old; } @@ -4088,7 +4082,7 @@ evacuate_run_queue(ErtsRunQueue *rq, erts_runq_unlock(to_rq); smp_notify_inc_runq(to_rq); - erts_runq_lock(to_rq); + erts_runq_lock(rq); } if (rq->ports.start) { @@ -4157,22 +4151,17 @@ evacuate_run_queue(ErtsRunQueue *rq, free_proxy_proc(proc); else { erts_aint32_t clr_bits; -#ifdef DEBUG - erts_aint32_t old; -#endif clr_bits = ERTS_PSFLG_IN_RUNQ; clr_bits |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET; -#ifdef DEBUG - old = -#else - (void) -#endif - erts_atomic32_read_band_mb(&proc->state, - ~clr_bits); - ASSERT((old & clr_bits) == clr_bits); + state = erts_atomic32_read_band_mb(&proc->state, ~clr_bits); + ASSERT((state & clr_bits) == clr_bits); + if (state & ERTS_PSFLG_FREE) { + /* free and not queued by proxy */ + erts_proc_dec_refc(proc); + } } goto handle_next_proc; @@ -6208,13 +6197,14 @@ fin_dirty_enq_s_change(Process *p, /* Already enqueue by someone else... */ if (pstruct_reserved) { /* We reserved process struct for enqueue; clear it... */ -#ifdef DEBUG - erts_aint32_t old = -#else - (void) -#endif - erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ); - ASSERT(old & ERTS_PSFLG_IN_RUNQ); + erts_aint32_t state; + + state = erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ); + ASSERT(state & ERTS_PSFLG_IN_RUNQ); + + if (state & ERTS_PSFLG_FREE) { + erts_proc_dec_refc(p); + } } return 0; } @@ -6407,8 +6397,9 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, == ERTS_PSFLG_ACTIVE)); n &= ~running_flgs; - if ((a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)) - || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { + if ((!!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)) + | ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE)) + & !(a & ERTS_PSFLG_FREE)) { enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); } a = erts_atomic32_cmpxchg_mb(&p->state, n, e); @@ -6655,62 +6646,72 @@ erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks) schedule_process(p, state, locks); } +/* Enqueues the given sys task on the process and schedules it. The task may be + * NULL if only scheduling is desired. */ static ERTS_INLINE erts_aint32_t -active_sys_enqueue(Process *p, erts_aint32_t state, - erts_aint32_t enable_flags, int status_locked) -{ - /* - * This function may or may not be called with status locke held. - * It always returns without the status lock held! - */ - unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs; - erts_aint32_t n, a = state, enq_prio = -1; - int slocked = status_locked; +active_sys_enqueue(Process *p, ErtsProcSysTask *sys_task, + erts_aint32_t task_prio, erts_aint32_t enable_flags, + erts_aint32_t state, erts_aint32_t *fail_state_p) +{ + int runnable_procs = erts_system_profile_flags.runnable_procs; + erts_aint32_t n, a, enq_prio, fail_state; + int already_scheduled; + int status_locked; int enqueue; /* < 0 -> use proxy */ - /* Status lock prevents out of order "runnable proc" trace msgs */ - ERTS_LC_ASSERT(slocked || !(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); - ERTS_LC_ASSERT(!slocked || (ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); - - if (!prof_runnable_procs) { - if (slocked) { - erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); - slocked = 0; - } - } - else { - if (!slocked) { - erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); - slocked = !0; - } - } + enable_flags |= ERTS_PSFLG_ACTIVE_SYS; + fail_state = *fail_state_p; + already_scheduled = 0; + status_locked = 0; + enq_prio = -1; + a = state; + ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); + ASSERT(fail_state & (ERTS_PSFLG_EXITING | ERTS_PSFLG_FREE)); + ASSERT(!(fail_state & enable_flags)); ASSERT(!(state & ERTS_PSFLG_PROXY)); + /* When runnable_procs is enabled, we need to take the status lock to + * prevent trace messages from being sent in the wrong order. The lock must + * be held over the call to add2runq. + * + * Otherwise, we only need to take it when we're enqueuing a task and can + * safely release it before add2runq. */ + if (sys_task || runnable_procs) { + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); + status_locked = 1; + } + while (1) { erts_aint32_t e; n = e = a; - if (a & ERTS_PSFLG_FREE) - goto cleanup; /* We don't want to schedule free processes... */ + if (a & fail_state) { + *fail_state_p = a & fail_state; + goto cleanup; + } enqueue = ERTS_ENQUEUE_NOT; - n |= enable_flags; - n |= ERTS_PSFLG_ACTIVE_SYS; + n |= enable_flags; + if (!(a & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING - | ERTS_PSFLG_DIRTY_RUNNING_SYS))) + | ERTS_PSFLG_DIRTY_RUNNING_SYS))) { enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); + } + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); - if (a == e) + if (a == e) { break; - if (a == n && enqueue == ERTS_ENQUEUE_NOT) - goto cleanup; + } + else if (a == n && enqueue == ERTS_ENQUEUE_NOT) { + already_scheduled = 1; + break; + } } - if (prof_runnable_procs) { - + if (!already_scheduled && runnable_procs) { if (!(a & (ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS @@ -6720,19 +6721,56 @@ active_sys_enqueue(Process *p, erts_aint32_t state, /* We activated a prevously inactive process */ profile_runnable_proc(p, am_active); } - - erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); - slocked = 0; } - add2runq(enqueue, enq_prio, p, n, NULL); + if (sys_task) { + ErtsProcSysTaskQs *stqs = p->sys_task_qs; -cleanup: + if (!stqs) { + sys_task->next = sys_task->prev = sys_task; - if (slocked) - erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + stqs = proc_sys_task_queues_alloc(); - ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); + stqs->qmask = 1 << task_prio; + stqs->ncount = 0; + stqs->q[PRIORITY_MAX] = NULL; + stqs->q[PRIORITY_HIGH] = NULL; + stqs->q[PRIORITY_NORMAL] = NULL; + stqs->q[PRIORITY_LOW] = NULL; + stqs->q[task_prio] = sys_task; + + p->sys_task_qs = stqs; + } + else { + if (!stqs->q[task_prio]) { + sys_task->next = sys_task->prev = sys_task; + + stqs->q[task_prio] = sys_task; + stqs->qmask |= 1 << task_prio; + } + else { + sys_task->next = stqs->q[task_prio]; + sys_task->prev = stqs->q[task_prio]->prev; + sys_task->next->prev = sys_task; + sys_task->prev->next = sys_task; + ASSERT(stqs->qmask & (1 << task_prio)); + } + } + } + + if (status_locked && !runnable_procs) { + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + status_locked = 0; + } + + if (!already_scheduled) { + add2runq(enqueue, enq_prio, p, n, NULL); + } + +cleanup: + if (status_locked) { + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + } return n; } @@ -6740,103 +6778,41 @@ cleanup: erts_aint32_t erts_proc_sys_schedule(Process *p, erts_aint32_t state, erts_aint32_t enable_flag) { - /* We are not allowed to call this function with status lock held... */ - return active_sys_enqueue(p, state, enable_flag, 0); + erts_aint32_t fail_state = ERTS_PSFLG_FREE; + + return active_sys_enqueue(p, NULL, 0, enable_flag, state, &fail_state); } static int schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, erts_aint32_t *fail_state_p) { - int res; - int locked; - ErtsProcSysTaskQs *stqs, *free_stqs; erts_aint32_t fail_state, state; - fail_state = *fail_state_p; - - res = 1; /* prepare for success */ - st->next = st->prev = st; /* Prep for empty prio queue */ + /* Elevate priority if needed. */ state = erts_atomic32_read_nob(&p->state); - locked = 0; - free_stqs = NULL; - if (state & ERTS_PSFLG_SYS_TASKS) - stqs = NULL; - else { - alloc_qs: - stqs = proc_sys_task_queues_alloc(); - stqs->qmask = 1 << prio; - stqs->ncount = 0; - stqs->q[PRIORITY_MAX] = NULL; - stqs->q[PRIORITY_HIGH] = NULL; - stqs->q[PRIORITY_NORMAL] = NULL; - stqs->q[PRIORITY_LOW] = NULL; - stqs->q[prio] = st; - } - - if (!locked) { - locked = 1; - erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); - - state = erts_atomic32_read_nob(&p->state); - if (state & fail_state) { - erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); - *fail_state_p = (state & fail_state); - free_stqs = stqs; - res = 0; - goto cleanup; - } - } - - if (!p->sys_task_qs) { - if (stqs) - p->sys_task_qs = stqs; - else - goto alloc_qs; - } - else { - free_stqs = stqs; - stqs = p->sys_task_qs; - if (!stqs->q[prio]) { - stqs->q[prio] = st; - stqs->qmask |= 1 << prio; - } - else { - st->next = stqs->q[prio]; - st->prev = stqs->q[prio]->prev; - st->next->prev = st; - st->prev->next = st; - ASSERT(stqs->qmask & (1 << prio)); - } - } - if (ERTS_PSFLGS_GET_ACT_PRIO(state) > prio) { - erts_aint32_t n, a, e; - /* Need to elevate actual prio */ - - a = state; - do { - if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) { - n = a; - break; - } - n = e = a; - n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; - n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET); - a = erts_atomic32_cmpxchg_nob(&p->state, n, e); - } while (a != e); - state = n; - } + erts_aint32_t n, a, e; - /* active_sys_enqueue() always return with status lock unlocked */ - (void) active_sys_enqueue(p, state, ERTS_PSFLG_SYS_TASKS, locked); + a = state; + do { + if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) { + n = a; + break; + } + n = e = a; + n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; + n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET); + a = erts_atomic32_cmpxchg_nob(&p->state, n, e); + } while (a != e); -cleanup: + state = n; + } - if (free_stqs) - proc_sys_task_queues_free(free_stqs); + fail_state = *fail_state_p; - return res; + return !(active_sys_enqueue(p, st, prio, ERTS_PSFLG_SYS_TASKS, + state, fail_state_p) & fail_state); } static ERTS_INLINE int @@ -7192,9 +7168,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, Uint32 nrml_prio, dcpu_prio, dio_prio; ErtsSchedType exec_type; ErtsRunQueue *exec_rq; -#ifdef DEBUG erts_aint32_t dbg_val; -#endif ASSERT(schdlr_sspnd.msb.ongoing); @@ -7309,16 +7283,12 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, * Suspend this scheduler and wake up scheduler * number one of another type... */ -#ifdef DEBUG dbg_val = -#else - (void) -#endif erts_atomic32_read_bset_mb(&esdp->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), ERTS_SSI_FLG_SUSPENDED); - ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC); + ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC); (void)dbg_val; switch (exec_type) { case ERTS_SCHED_NORMAL: @@ -7336,11 +7306,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, break; } -#ifdef DEBUG dbg_val = -#else - (void) -#endif erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), @@ -8905,11 +8871,8 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) suspend = 1; if (suspend) { -#ifdef DEBUG - int res = -#endif - suspend_process(c_p, c_p); - ASSERT(res); + int res = suspend_process(c_p, c_p); + ASSERT(res); (void)res; } if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) @@ -9096,6 +9059,9 @@ unlock_lock_rq(int pre_free, void *vrq) } +static void trace_schedule_in(Process *p, erts_aint32_t state); +static void trace_schedule_out(Process *p, erts_aint32_t state); + /* * schedule() is called from BEAM (process_main()) or HiPE * (hipe_mode_switch()) when the current process is to be @@ -9186,6 +9152,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) internal_sched_out_proc: ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ASSERT(p->scheduler_data || ERTS_SCHEDULER_IS_DIRTY(esdp)); ASSERT(actual_reds >= 0); if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST) @@ -9200,22 +9167,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) state = erts_atomic32_read_nob(&p->state); - if (IS_TRACED(p)) { - if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) - erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT); - if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) { - if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) - trace_sched(p, ERTS_PROC_LOCK_MAIN, - ((state & ERTS_PSFLG_FREE) - ? am_out_exited - : am_out_exiting)); - } - else { - if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) || - ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS)) - trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out); - } - } + if (IS_TRACED(p)) + trace_schedule_out(p, state); erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); @@ -9562,6 +9515,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } else if (state & ERTS_PSFLG_FREE) { /* free and not queued by proxy */ + ASSERT(state & ERTS_PSFLG_IN_RUNQ); erts_proc_dec_refc(p); } if (!is_normal_sched) @@ -9618,15 +9572,17 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) state = erts_atomic32_read_nob(&p->state); if (is_normal_sched) { + ASSERT(!p->scheduler_data); + p->scheduler_data = esdp; if ((!!(state & ERTS_PSFLGS_DIRTY_WORK)) - & (!(state & ERTS_PSFLG_ACTIVE_SYS))) { + & (!(state & ERTS_PSFLG_RUNNING_SYS))) { /* Migrate to dirty scheduler... */ sunlock_sched_out_proc: erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + if (IS_TRACED(p)) + trace_schedule_in(p, state); goto sched_out_proc; } - ASSERT(!p->scheduler_data); - p->scheduler_data = esdp; } else { if (!(state & ERTS_PSFLGS_DIRTY_WORK)) { @@ -9658,29 +9614,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); - /* Clear tracer if it has been removed */ - if (IS_TRACED(p) && erts_is_tracer_proc_enabled( - p, ERTS_PROC_LOCK_MAIN, &p->common)) { - - if (state & ERTS_PSFLG_EXITING) { - if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) - trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting); - } - else { - if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) || - ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS)) - trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in); - } - if (IS_TRACED_FL(p, F_TRACE_CALLS)) { - erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN); - } - } + if (IS_TRACED(p)) + trace_schedule_in(p, state); if (is_normal_sched) { if (state & ERTS_PSFLG_RUNNING_SYS) { if (state & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) { int local_only = (!!(p->flags & F_LOCAL_SIGS_ONLY) - & !(state & ERTS_PSFLG_SUSPENDED)); + & !(state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLGS_DIRTY_WORK))); if (!local_only | !!(state & ERTS_PSFLG_SIG_Q)) { int sig_reds; /* @@ -9838,6 +9779,53 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } } +static void +trace_schedule_in(Process *p, erts_aint32_t state) +{ + ASSERT(IS_TRACED(p)); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCK_MAIN); + + /* Clear tracer if it has been removed */ + if (erts_is_tracer_proc_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common)) { + + if (state & ERTS_PSFLG_EXITING) { + if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) + trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting); + } + else { + if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) || + ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS)) + trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in); + } + if (IS_TRACED_FL(p, F_TRACE_CALLS)) + erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN); + } + +} + +static void +trace_schedule_out(Process *p, erts_aint32_t state) +{ + ASSERT(IS_TRACED(p)); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCK_MAIN); + + if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) + erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT); + + if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) { + if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) + trace_sched(p, ERTS_PROC_LOCK_MAIN, + ((state & ERTS_PSFLG_FREE) + ? am_out_exited + : am_out_exiting)); + } + else { + if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) || + ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS)) + trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out); + } +} + static int notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result, int normal_sched) @@ -9889,6 +9877,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, ASSERT(hp_start + hsz == hp); #endif + ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_proc_message(c_p, rp, rp_locks, mp, msg); if (c_p == rp) @@ -12481,9 +12470,7 @@ erts_continue_exit_process(Process *p) yield: -#ifdef DEBUG ASSERT(yield_allowed); -#endif ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); |