diff options
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r-- | erts/emulator/beam/erl_process.c | 267 |
1 files changed, 153 insertions, 114 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b73f9b7f92..b0276fbae0 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -148,6 +148,12 @@ extern BeamInstr beam_apply[]; extern BeamInstr beam_exit[]; extern BeamInstr beam_continue_exit[]; +#ifdef __OSE__ +/* Eager check I/O not supported on OSE yet. */ +int erts_eager_check_io = 0; +#else +int erts_eager_check_io = 1; +#endif int erts_sched_compact_load; int erts_sched_balance_util = 0; Uint erts_no_schedulers; @@ -590,12 +596,10 @@ erts_pre_init_process(void) erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks = ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS; -#ifdef ERTS_DIRTY_SCHEDULERS - erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks - = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS; - erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks - = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS; -#endif + erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks + = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks + = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS; /* Check that we have locks for all entries */ for (ix = 0; ix < ERTS_PSD_SIZE; ix++) { @@ -712,72 +716,24 @@ sched_wall_time_ts(void) #if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT -#ifdef ARCH_64 - static ERTS_INLINE Uint64 aschedtime_read(ErtsAtomicSchedTime *var) { - return (Uint64) erts_atomic_read_nob((erts_atomic_t *) var); + return (Uint64) erts_atomic64_read_nob((erts_atomic64_t *) var); } static ERTS_INLINE void aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val) { - erts_atomic_set_nob((erts_atomic_t *) var, (erts_aint_t) val); + erts_atomic64_set_nob((erts_atomic64_t *) var, (erts_aint64_t) val); } static ERTS_INLINE void aschedtime_init(ErtsAtomicSchedTime *var) { - erts_atomic_init_nob((erts_atomic_t *) var, (erts_aint_t) 0); -} - -#elif defined(ARCH_32) - -static ERTS_INLINE Uint64 -aschedtime_read(ErtsAtomicSchedTime *var) -{ - erts_dw_aint_t dw; - erts_dw_atomic_read_nob((erts_dw_atomic_t *) var, &dw); -#ifdef ETHR_SU_DW_NAINT_T__ - return (Uint64) dw.dw_sint; -#else - { - Uint64 res; - res = (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_HIGH_WORD]); - res <<= 32; - res |= (Uint64) ((Uint32) dw.sint[ERTS_DW_AINT_LOW_WORD]); - return res; - } -#endif + erts_atomic64_init_nob((erts_atomic64_t *) var, (erts_aint64_t) 0); } -static ERTS_INLINE void -aschedtime_set(ErtsAtomicSchedTime *var, Uint64 val) -{ - erts_dw_aint_t dw; -#ifdef ETHR_SU_DW_NAINT_T__ - dw.dw_sint = (ETHR_SU_DW_NAINT_T__) val; -#else - dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff); - dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff); -#endif - erts_dw_atomic_set_nob((erts_dw_atomic_t *) var, &dw); -} - -static ERTS_INLINE void -aschedtime_init(ErtsAtomicSchedTime *var) -{ - erts_dw_aint_t dw; - dw.sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) 0; - dw.sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) 0; - erts_dw_atomic_init_nob((erts_dw_atomic_t *) var, &dw); -} - -#else -# error :-/ -#endif - #define ERTS_GET_AVG_MAX_UNLOCKED_TRY 50 #define ERTS_SCHED_AVG_UTIL_WRITE_MARKER (~((Uint64) 0)) @@ -2211,6 +2167,9 @@ aux_work_timeout_early_init(int no_schedulers) p = (UWord) malloc((sizeof(ErtsAuxWorkTmo) + sizeof(erts_atomic32_t)*(no_schedulers+1)) + ERTS_CACHE_LINE_SIZE-1); + if (!p) { + ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); + } if (p & ERTS_CACHE_LINE_MASK) p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE; ASSERT((p & ERTS_CACHE_LINE_MASK) == 0); @@ -2380,29 +2339,47 @@ try_set_sys_scheduling(void) #endif static ERTS_INLINE int -prepare_for_sys_schedule(ErtsSchedulerData *esdp) +prepare_for_sys_schedule(ErtsSchedulerData *esdp, int non_blocking) { + if (non_blocking && erts_eager_check_io) { #ifdef ERTS_SMP - while (!erts_port_task_have_outstanding_io_tasks() - && try_set_sys_scheduling()) { #ifdef ERTS_SCHED_ONLY_POLL_SCHED_1 - if (esdp->no != 1) { - /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used - then we make sure to wake scheduler 1 */ - ErtsRunQueue *rq = ERTS_RUNQ_IX(0); - clear_sys_scheduling(); - wake_scheduler(rq); - return 0; - } + if (esdp->no != 1) { + /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used + then we make sure to wake scheduler 1 */ + ErtsRunQueue *rq = ERTS_RUNQ_IX(0); + wake_scheduler(rq); + return 0; + } #endif - if (!erts_port_task_have_outstanding_io_tasks()) + return try_set_sys_scheduling(); +#else return 1; - clear_sys_scheduling(); +#endif } - return 0; + else { +#ifdef ERTS_SMP + while (!erts_port_task_have_outstanding_io_tasks() + && try_set_sys_scheduling()) { +#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1 + if (esdp->no != 1) { + /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used + then we make sure to wake scheduler 1 */ + ErtsRunQueue *rq = ERTS_RUNQ_IX(0); + clear_sys_scheduling(); + wake_scheduler(rq); + return 0; + } +#endif + if (!erts_port_task_have_outstanding_io_tasks()) + return 1; + clear_sys_scheduling(); + } + return 0; #else - return !erts_port_task_have_outstanding_io_tasks(); + return !erts_port_task_have_outstanding_io_tasks(); #endif + } } #ifdef ERTS_SMP @@ -2779,7 +2756,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * be waiting in erl_sys_schedule() */ - if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp)) { + if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp, 0)) { sched_waiting(esdp->no, rq); @@ -2943,7 +2920,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * Got to check that we still got I/O tasks; otherwise * we have to continue checking for I/O... */ - if (!prepare_for_sys_schedule(esdp)) { + if (!prepare_for_sys_schedule(esdp, 0)) { spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT; goto tse_wait; } @@ -2965,7 +2942,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * Got to check that we still got I/O tasks; otherwise * we have to wait in erl_sys_schedule() after all... */ - if (!prepare_for_sys_schedule(esdp)) { + if (!prepare_for_sys_schedule(esdp, 0)) { /* * Not allowed to wait in erl_sys_schedule; * do tse wait instead... @@ -3219,11 +3196,11 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate) return 0; wrq = ERTS_RUNQ_IX(ix); flags = ERTS_RUNQ_FLGS_GET(wrq); + if (activate && !(flags & ERTS_RUNQ_FLG_SUSPENDED)) { + if (try_inc_no_active_runqs(ix+1)) + (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE); + } if (!(flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_NONEMPTY))) { - if (activate) { - if (try_inc_no_active_runqs(ix+1)) - (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE); - } wake_scheduler(wrq); return 1; } @@ -3755,17 +3732,25 @@ evacuate_run_queue(ErtsRunQueue *rq, } #ifdef ERTS_DIRTY_SCHEDULERS else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) { - erts_aint32_t old; - old = erts_smp_atomic32_read_band_nob(&proc->state, - ~(ERTS_PSFLG_DIRTY_CPU_PROC - | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)); +#ifdef DEBUG + erts_aint32_t old = +#else + (void) +#endif + erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)); /* assert that no other dirty flags are set */ ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))); } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) { - erts_aint32_t old; - old = erts_smp_atomic32_read_band_nob(&proc->state, - ~(ERTS_PSFLG_DIRTY_IO_PROC - | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); +#ifdef DEBUG + erts_aint32_t old = +#else + (void) +#endif + erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_IO_PROC + | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); /* assert that no other dirty flags are set */ ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q))); } @@ -5874,6 +5859,9 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces case ERTS_ENQUEUE_NOT: if (erts_system_profile_flags.runnable_procs) { + /* Status lock prevents out of order "runnable proc" trace msgs */ + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + if (!(a & ERTS_PSFLG_ACTIVE_SYS) && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) { @@ -5987,7 +5975,8 @@ change_proc_schedule_state(Process *p, erts_aint32_t clear_state_flags, erts_aint32_t set_state_flags, erts_aint32_t *statep, - erts_aint32_t *enq_prio_p) + erts_aint32_t *enq_prio_p, + ErtsProcLocks locks) { /* * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and @@ -5996,6 +5985,11 @@ change_proc_schedule_state(Process *p, */ erts_aint32_t a = *statep, n; int enqueue; /* < 0 -> use proxy */ + unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs; + unsigned int lock_status = (prof_runnable_procs + && !(locks & ERTS_PROC_LOCK_STATUS)); + + ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); ASSERT(!(a & ERTS_PSFLG_PROXY)); ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING @@ -6005,6 +5999,9 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_ACTIVE_SYS)) == 0); + if (lock_status) + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + while (1) { erts_aint32_t e; n = e = a; @@ -6040,7 +6037,9 @@ change_proc_schedule_state(Process *p, break; } - if (erts_system_profile_flags.runnable_procs) { + if (prof_runnable_procs) { + + /* Status lock prevents out of order "runnable proc" trace msgs */ if (((n & (ERTS_PSFLG_SUSPENDED | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) @@ -6053,15 +6052,18 @@ change_proc_schedule_state(Process *p, profile_runnable_proc(p, am_active); } + if (lock_status) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } + *statep = a; return enqueue; } static ERTS_INLINE void -schedule_process(Process *p, erts_aint32_t in_state) +schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks) { erts_aint32_t enq_prio = -1; erts_aint32_t state = in_state; @@ -6069,7 +6071,8 @@ schedule_process(Process *p, erts_aint32_t in_state) 0, ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + locks); if (enqueue != ERTS_ENQUEUE_NOT) add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), state, @@ -6077,16 +6080,27 @@ schedule_process(Process *p, erts_aint32_t in_state) } void -erts_schedule_process(Process *p, erts_aint32_t state) +erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks) { - schedule_process(p, state); + schedule_process(p, state, locks); } static void schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) { + /* + * Expects status lock to be locked when called, and + * returns with status lock unlocked... + */ erts_aint32_t a = state, n, enq_prio = -1; int enqueue; /* < 0 -> use proxy */ + unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs; + + /* Status lock prevents out of order "runnable proc" trace msgs */ + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + + if (!prof_runnable_procs) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); ASSERT(!(state & ERTS_PSFLG_PROXY)); @@ -6095,7 +6109,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) n = e = a; if (a & ERTS_PSFLG_FREE) - return; /* We don't want to schedule free processes... */ + goto cleanup; /* We don't want to schedule free processes... */ enqueue = ERTS_ENQUEUE_NOT; n |= ERTS_PSFLG_ACTIVE_SYS; @@ -6108,7 +6122,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) goto cleanup; } - if (erts_system_profile_flags.runnable_procs) { + if (prof_runnable_procs) { if (!(a & (ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_RUNNING @@ -6118,6 +6132,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) profile_runnable_proc(p, am_active); } + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + prof_runnable_procs = 0; } if (enqueue != ERTS_ENQUEUE_NOT) { @@ -6132,8 +6148,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) } cleanup: + + if (prof_runnable_procs) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + if (proxy) free_proxy_proc(proxy); + + ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); } static ERTS_INLINE int @@ -6200,7 +6222,7 @@ suspend_process(Process *c_p, Process *p) } static ERTS_INLINE void -resume_process(Process *p) +resume_process(Process *p, ErtsProcLocks locks) { erts_aint32_t state, enq_prio = -1; int enqueue; @@ -6217,7 +6239,8 @@ resume_process(Process *p) ERTS_PSFLG_SUSPENDED, 0, &state, - &enq_prio); + &enq_prio, + locks); if (enqueue) add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), state, @@ -7818,6 +7841,9 @@ erts_start_schedulers(void) #ifdef ETHR_HAVE_THREAD_NAMES opts.name = malloc(80); + if (!opts.name) { + ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); + } #endif #ifdef ERTS_SMP @@ -8030,7 +8056,8 @@ handle_pend_sync_suspend(Process *suspendee, } /* suspender is suspended waiting for suspendee to suspend; resume suspender */ - resume_process(suspender); + ASSERT(suspendee != suspender); + resume_process(suspender, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); } } @@ -8065,7 +8092,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, ASSERT(c_p->flags & F_P2PNR_RESCHED); c_p->flags &= ~F_P2PNR_RESCHED; if (!suspend && rp) - resume_process(rp); + resume_process(rp, rp_locks); } else { @@ -8223,7 +8250,8 @@ handle_pend_bif_sync_suspend(Process *suspendee, } /* suspender is suspended waiting for suspendee to suspend; resume suspender */ - resume_process(suspender); + ASSERT(suspender != suspendee); + resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); } @@ -8583,7 +8611,8 @@ resume_process_1(BIF_ALIST_1) ASSERT(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&suspendee->state)); - resume_process(suspendee); + ASSERT(BIF_P != suspendee); + resume_process(suspendee, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } @@ -8713,7 +8742,7 @@ erts_resume(Process* process, ErtsProcLocks process_locks) ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS); - resume_process(process); + resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS); } @@ -8732,7 +8761,7 @@ erts_resume_processes(ErtsProcList *list) proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS); if (proc) { if (erts_proclist_same(plp, proc)) { - resume_process(proc); + resume_process(proc, ERTS_PROC_LOCK_STATUS); nresumed++; } erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); @@ -9147,7 +9176,7 @@ Process *schedule(Process *p, int calls) } else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && (fcalls > input_reductions && - prepare_for_sys_schedule(esdp))) { + prepare_for_sys_schedule(esdp, !0))) { /* * Schedule system-level activities. */ @@ -9155,8 +9184,6 @@ Process *schedule(Process *p, int calls) erts_smp_atomic32_set_relb(&function_calls, 0); fcalls = 0; - ASSERT(!erts_port_task_have_outstanding_io_tasks()); - #if 0 /* Not needed since we wont wait in sys schedule */ erts_sys_schedule_interrupt(0); #endif @@ -9188,7 +9215,9 @@ Process *schedule(Process *p, int calls) if (RUNQ_READ_LEN(&rq->ports.info.len)) { int have_outstanding_io; have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port); - if ((have_outstanding_io && fcalls > 2*input_reductions) + if ((!erts_eager_check_io + && have_outstanding_io + && fcalls > 2*input_reductions) || rq->halt_in_progress) { /* * If we have performed more than 2*INPUT_REDUCTIONS since @@ -9968,8 +9997,10 @@ erts_internal_request_system_task_3(BIF_ALIST_3) rp_state = n; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - + /* + * schedule_process_sys_task() unlocks status + * lock on process. + */ schedule_process_sys_task(rp, rp_state, NULL); if (free_stqs) @@ -10714,7 +10745,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Schedule process for execution. */ - schedule_process(p, state); + schedule_process(p, state, 0); VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id)); @@ -11035,7 +11066,8 @@ set_proc_exiting(Process *p, ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT, ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + ERTS_PROC_LOCKS_ALL); p->fvalue = reason; if (bp) @@ -11076,7 +11108,8 @@ set_proc_self_exiting(Process *c_p) ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT, ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + ERTS_PROC_LOCKS_ALL); ASSERT(!enqueue); return state; @@ -11721,8 +11754,9 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN, smon->pid, ERTS_PROC_LOCK_STATUS); if (suspendee) { + ASSERT(suspendee != vc_p); if (smon->active) - resume_process(suspendee); + resume_process(suspendee, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } erts_destroy_suspend_monitor(smon); @@ -11814,6 +11848,7 @@ erts_continue_exit_process(Process *p) struct saved_calls *scb; process_breakpoint_time_t *pbt; erts_aint32_t state; + void *nif_export; #ifdef DEBUG int yield_allowed = 1; @@ -11964,6 +11999,7 @@ erts_continue_exit_process(Process *p) : NULL); scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL); pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL); + nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, ERTS_PROC_LOCKS_ALL, NULL); erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); #ifdef BM_COUNTERS @@ -12011,6 +12047,9 @@ erts_continue_exit_process(Process *p) if (pbt) erts_free(ERTS_ALC_T_BPD, (void *) pbt); + if (nif_export) + erts_destroy_nif_export(nif_export); + delete_process(p); #ifdef ERTS_SMP @@ -12055,7 +12094,7 @@ timeout_proc(Process* p) state = erts_smp_atomic32_read_acqb(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) - schedule_process(p, state); + schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); } |