diff options
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r-- | erts/emulator/beam/erl_process.c | 931 |
1 files changed, 660 insertions, 271 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index ea63d20dfa..b64a7f8902 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -43,8 +43,11 @@ #include "erl_async.h" #include "dtrace-wrapper.h" #include "erl_ptab.h" +#include "erl_bif_unique.h" +#define ERTS_WANT_TIMER_WHEEL_API +#include "erl_time.h" - +#define ERTS_CHECK_TIME_REDS CONTEXT_REDS #define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0) #define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2) @@ -152,7 +155,7 @@ extern BeamInstr beam_continue_exit[]; /* Eager check I/O not supported on OSE yet. */ int erts_eager_check_io = 0; #else -int erts_eager_check_io = 0; +int erts_eager_check_io = 1; #endif int erts_sched_compact_load; int erts_sched_balance_util = 0; @@ -457,13 +460,12 @@ do { \ static void exec_misc_ops(ErtsRunQueue *); static void print_function_from_pc(int to, void *to_arg, BeamInstr* x); -static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, - int yreg); +static int stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg); static void aux_work_timeout(void *unused); static void aux_work_timeout_early_init(int no_schedulers); static void aux_work_timeout_late_init(void); -static void setup_aux_work_timer(void); +static void setup_aux_work_timer(ErtsSchedulerData *esdp); static int execute_sys_tasks(Process *c_p, erts_aint32_t *statep, @@ -493,6 +495,8 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_DD; valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR; + valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS; + valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; #endif #if HAVE_ERTS_MSEG @@ -610,13 +614,11 @@ erts_pre_init_process(void) #endif } -#ifdef ERTS_SMP static void release_process(void *vproc) { - erts_smp_proc_dec_refc((Process *) vproc); + erts_proc_dec_refc((Process *) vproc); } -#endif /* initialize the scheduler */ void @@ -632,16 +634,18 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab) erts_ptab_init_table(&erts_proc, ERTS_ALC_T_PROC_TABLE, -#ifdef ERTS_SMP release_process, -#else - NULL, -#endif (ErtsPTabElementCommon *) &erts_invalid_process.common, proc_tab_size, sizeof(Process), "process_table", - legacy_proc_tab); + legacy_proc_tab, +#ifdef ERTS_SMP + 1 +#else + 0 +#endif + ); last_reductions = 0; last_exact_reductions = 0; @@ -702,8 +706,8 @@ init_sched_wall_time(ErtsSchedWallTime *swtp) static ERTS_INLINE Uint64 sched_wall_time_ts(void) { -#ifdef HAVE_GETHRTIME - return (Uint64) sys_gethrtime(); +#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT + return (Uint64) erts_os_monotonic_time(); #else Uint64 res; SysTimeval tv; @@ -1023,11 +1027,7 @@ reply_sched_wall_time(void *vswtrp) hpp = &hp; } - erts_queue_message(rp, &rp_locks, bp, msg, NIL -#ifdef USE_VM_PROBES - , NIL -#endif - ); + erts_queue_message(rp, &rp_locks, bp, msg, NIL); if (swtrp->req_sched == esdp->no) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -1035,7 +1035,7 @@ reply_sched_wall_time(void *vswtrp) if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); + erts_proc_dec_refc(rp); if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0) swtreq_free(vswtrp); @@ -1067,7 +1067,7 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable) erts_smp_atomic32_init_nob(&swtrp->refc, (erts_aint32_t) erts_no_schedulers); - erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers); + erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers); #ifdef ERTS_SMP if (erts_no_schedulers > 1) @@ -1128,7 +1128,7 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data) xplocks &= ~plocks; if (xplocks && erts_smp_proc_trylock(p, xplocks) == EBUSY) { if (xplocks & ERTS_PROC_LOCK_MAIN) { - erts_smp_proc_inc_refc(p); + erts_proc_inc_refc(p); erts_smp_proc_unlock(p, plocks); erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL); refc = 1; @@ -1144,7 +1144,7 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data) if (xplocks) erts_smp_proc_unlock(p, xplocks); if (refc) - erts_smp_proc_dec_refc(p); + erts_proc_dec_refc(p); ASSERT(p->psd); if (p->psd != psd) erts_free(ERTS_ALC_T_PSD, psd); @@ -1772,6 +1772,101 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i } /* + * Canceled timers + */ + +void +erts_notify_canceled_timer(ErtsSchedulerData *esdp, int rsid) +{ + ASSERT(esdp && esdp == erts_get_scheduler_data()); + if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) + schedule_aux_work_wakeup(&esdp->aux_work_data, + rsid, + ERTS_SSI_AUX_WORK_CNCLD_TMRS); + else + set_aux_work_flags_wakeup_relb(ERTS_SCHED_SLEEP_INFO_IX(rsid-1), + ERTS_SSI_AUX_WORK_CNCLD_TMRS); +} + +static ERTS_INLINE erts_aint32_t +handle_canceled_timers(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + ErtsSchedulerSleepInfo *ssi = awdp->ssi; + int need_thr_progress = 0; + ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; + int more_work = 0; + +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS); + erts_handle_canceled_timers((void *) awdp->esdp, + &need_thr_progress, + &wakeup, + &more_work); + if (more_work) { + if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS) + & ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR) { + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); + aux_work &= ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; + } + return aux_work; + } + + if (need_thr_progress) { + if (wakeup == ERTS_THR_PRGR_INVALID) + wakeup = erts_thr_progress_later(awdp->esdp); + awdp->cncld_tmrs.thr_prgr = wakeup; + set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); + haw_thr_prgr_soft_wakeup(awdp, wakeup); + } + return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS; +} + +static ERTS_INLINE erts_aint32_t +handle_canceled_timers_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + ErtsSchedulerSleepInfo *ssi; + int need_thr_progress; + int more_work; + ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; + ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); + +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif + if (!erts_thr_progress_has_reached_this(current, awdp->cncld_tmrs.thr_prgr)) + return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; + + ssi = awdp->ssi; + need_thr_progress = 0; + more_work = 0; + + erts_handle_canceled_timers((void *) awdp->esdp, + &need_thr_progress, + &wakeup, + &more_work); + if (more_work) { + set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS); + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); + return ((aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR) + | ERTS_SSI_AUX_WORK_CNCLD_TMRS); + } + + if (need_thr_progress) { + if (wakeup == ERTS_THR_PRGR_INVALID) + wakeup = erts_thr_progress_later(awdp->esdp); + awdp->cncld_tmrs.thr_prgr = wakeup; + haw_thr_prgr_soft_wakeup(awdp, wakeup); + } + else { + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); + } + + return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; +} + +/* * Handle scheduled thread progress later operations. */ #define ERTS_MAX_THR_PRGR_LATER_OPS 50 @@ -1869,7 +1964,7 @@ completed_dealloc(void *vproc) { if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) { erts_resume((Process *) vproc, (ErtsProcLocks) 0); - erts_smp_proc_dec_refc((Process *) vproc); + erts_proc_dec_refc((Process *) vproc); } } @@ -1918,7 +2013,7 @@ erts_debug_wait_deallocations(Process *c_p) count, 0)) { erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); - erts_smp_proc_inc_refc(c_p); + erts_proc_inc_refc(c_p); /* scheduler threads */ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers, @@ -2033,7 +2128,7 @@ static ERTS_INLINE erts_aint32_t handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO); - setup_aux_work_timer(); + setup_aux_work_timer(awdp->esdp); return aux_work & ~ERTS_SSI_AUX_WORK_SET_TMO; } @@ -2093,6 +2188,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) #ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP, handle_thr_prgr_later_op); + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS, + handle_canceled_timers); + /* CNCLD_TMRS must be before CNCLD_TMRS_THR_PRGR */ + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR, + handle_canceled_timers_thr_prgr); #endif #if ERTS_USE_ASYNC_READY_Q @@ -2142,8 +2242,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) typedef struct { union { - ErlTimer data; - char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErlTimer))]; + ErtsTWheelTimer data; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsTWheelTimer))]; } timer; int initialized; @@ -2153,6 +2253,22 @@ typedef struct { static ErtsAuxWorkTmo *aux_work_tmo; +static ERTS_INLINE void +start_aux_work_timer(ErtsSchedulerData *esdp) +{ + ErtsMonotonicTime tmo = erts_get_monotonic_time(esdp); + tmo = ERTS_MONOTONIC_TO_CLKTCKS(tmo-1); + tmo += ERTS_MSEC_TO_CLKTCKS(1000) + 1; + erts_twheel_init_timer(&aux_work_tmo->timer.data); + ASSERT(esdp); + erts_twheel_set_timer(esdp->timer_wheel, + &aux_work_tmo->timer.data, + aux_work_timeout, + NULL, + (void *) esdp, + tmo); +} + static void aux_work_timeout_early_init(int no_schedulers) { @@ -2185,18 +2301,12 @@ void aux_work_timeout_late_init(void) { aux_work_tmo->initialized = 1; - if (erts_atomic32_read_nob(&aux_work_tmo->refc)) { - aux_work_tmo->timer.data.active = 0; - erts_set_timer(&aux_work_tmo->timer.data, - aux_work_timeout, - NULL, - NULL, - 1000); - } + if (erts_atomic32_read_nob(&aux_work_tmo->refc)) + start_aux_work_timer(erts_get_scheduler_data()); } static void -aux_work_timeout(void *unused) +aux_work_timeout(void *vesdp) { erts_aint32_t refc; int i; @@ -2219,32 +2329,18 @@ aux_work_timeout(void *unused) if (refc != 1 || 1 != erts_atomic32_cmpxchg_relb(&aux_work_tmo->refc, 0, 1)) { /* Setup next timeout... */ - aux_work_tmo->timer.data.active = 0; - erts_set_timer(&aux_work_tmo->timer.data, - aux_work_timeout, - NULL, - NULL, - 1000); + start_aux_work_timer((ErtsSchedulerData *) vesdp); } } static void -setup_aux_work_timer(void) +setup_aux_work_timer(ErtsSchedulerData *esdp) { -#ifndef ERTS_SMP - if (!erts_get_scheduler_data()) + if (!esdp || !esdp->timer_wheel) set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(0), ERTS_SSI_AUX_WORK_SET_TMO); else -#endif - { - aux_work_tmo->timer.data.active = 0; - erts_set_timer(&aux_work_tmo->timer.data, - aux_work_timeout, - NULL, - NULL, - 1000); - } + start_aux_work_timer(esdp); } erts_aint32_t @@ -2275,7 +2371,7 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable) if (refc == 1) { erts_atomic32_inc_acqb(&aux_work_tmo->refc); if (aux_work_tmo->initialized) - setup_aux_work_timer(); + setup_aux_work_timer(erts_get_scheduler_data()); } } return old; @@ -2318,7 +2414,6 @@ erts_active_schedulers(void) ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting)); - ASSERT(as >= 0); return as; } @@ -2640,6 +2735,13 @@ thr_prgr_fin_wait(void *vssi) static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp); +void +erts_interupt_aux_thread_timed(ErtsMonotonicTime timeout_time) +{ + /* TODO only poke when needed (based on timeout_time) */ + erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(-1)); +} + static void * aux_thread(void *unused) { @@ -2770,6 +2872,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_wall_time_change(esdp, thr_prgr_active); while (1) { + ErtsMonotonicTime current_time; aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work) { @@ -2783,34 +2886,76 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_thr_progress_leader_update(esdp); } - if (aux_work) - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + if (aux_work) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + erts_bump_timers(esdp->timer_wheel, current_time); + } + } + } else { + ErtsMonotonicTime timeout_time; + int do_timeout = 0; if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - if (thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 0); - sched_wall_time_change(esdp, 0); + timeout_time = erts_check_next_timeout_time(esdp); + current_time = erts_get_monotonic_time(esdp); + do_timeout = (current_time >= timeout_time); + } else + timeout_time = ERTS_MONOTONIC_TIME_MAX; + if (do_timeout) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); } - erts_thr_progress_prepare_wait(esdp); } + else { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(esdp); + } - ERTS_SCHED_FAIR_YIELD(); + ERTS_SCHED_FAIR_YIELD(); - flgs = sched_spin_wait(ssi, spincount); - if (flgs & ERTS_SSI_FLG_SLEEPING) { - ASSERT(flgs & ERTS_SSI_FLG_WAITING); - flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING); + flgs = sched_spin_wait(ssi, spincount); if (flgs & ERTS_SSI_FLG_SLEEPING) { - int res; - ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING); ASSERT(flgs & ERTS_SSI_FLG_WAITING); - do { - res = erts_tse_wait(ssi->event); - } while (res == EINTR); + flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING); + if (flgs & ERTS_SSI_FLG_SLEEPING) { + int res; + ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING); + ASSERT(flgs & ERTS_SSI_FLG_WAITING); + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + do { + Sint64 timeout; + if (current_time >= timeout_time) + break; + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time + - current_time + - 1) + 1; + } else + timeout = -1; + res = erts_tse_twait(ssi->event, timeout); + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + } while (res == EINTR); + } } + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) + erts_thr_progress_finalize_wait(esdp); } - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) - erts_thr_progress_finalize_wait(esdp); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time) + erts_bump_timers(esdp->timer_wheel, current_time); } if (!(flgs & ERTS_SSI_FLG_WAITING)) { @@ -2843,7 +2988,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) else #endif { - erts_aint_t dt; erts_smp_atomic32_set_relb(&function_calls, 0); *fcalls = 0; @@ -2868,6 +3012,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) goto sys_aux_work; while (spincount-- > 0) { + ErtsMonotonicTime current_time; sys_poll_aux_work: @@ -2877,8 +3022,11 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ASSERT(!erts_port_task_have_outstanding_io_tasks()); erl_sys_schedule(1); /* Might give us something to do */ - dt = erts_do_time_read_and_reset(); - if (dt) erts_bump_timer(dt); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) + erts_bump_timers(esdp->timer_wheel, current_time); + } sys_aux_work: #ifndef ERTS_SMP @@ -2887,15 +3035,18 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work) { - if (!working) - sched_wall_time_change(esdp, working = 1); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (!working) + sched_wall_time_change(esdp, working = 1); #ifdef ERTS_SMP - if (!thr_prgr_active) - erts_thr_progress_active(esdp, thr_prgr_active = 1); + if (!thr_prgr_active) + erts_thr_progress_active(esdp, thr_prgr_active = 1); #endif + } aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); #ifdef ERTS_SMP - if (aux_work && erts_thr_progress_update(esdp)) + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && aux_work && + erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); #endif } @@ -2993,8 +3144,11 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erl_sys_schedule(0); - dt = erts_do_time_read_and_reset(); - if (dt) erts_bump_timer(dt); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + ErtsMonotonicTime current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) + erts_bump_timers(esdp->timer_wheel, current_time); + } #ifndef ERTS_SMP if (rq->len == 0 && !rq->misc.start) @@ -4885,9 +5039,11 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) + ERTS_WAKEUP_OTHER_FIXED_INC); if (rq->wakeup_other > wakeup_other.limit) { #ifdef ERTS_DIRTY_SCHEDULERS - if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->waiting) - wake_dirty_schedulers(rq, 1); - else + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { + if (rq->waiting) { + wake_dirty_schedulers(rq, 1); + } + } else #endif { int empty_rqs = @@ -5200,6 +5356,7 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->dd.completed_callback = NULL; awdp->dd.completed_arg = NULL; + awdp->cncld_tmrs.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->later_op.thr_prgr = ERTS_THR_PRGR_VAL_FIRST; awdp->later_op.size = 0; awdp->later_op.first = NULL; @@ -5264,6 +5421,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, #else esdp->no = (Uint) num; #endif + esdp->ssi = ssi; esdp->current_process = NULL; esdp->current_port = NULL; @@ -5276,6 +5434,12 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->run_queue = runq; esdp->run_queue->scheduler = esdp; + esdp->last_monotonic_time = 0; + esdp->check_time_reds = 0; + + esdp->thr_id = (Uint32) num; + erts_sched_bif_unique_init(esdp); + if (daww_ptr) { init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr); #ifdef ERTS_SMP @@ -5874,6 +6038,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces free_proxy_proc(proxy); erts_smp_runq_lock(c_rq); + return 0; #ifdef ERTS_DIRTY_SCHEDULERS @@ -6586,7 +6751,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) int res; do { - res = erts_tse_wait(ssi->event); + res = erts_tse_twait(ssi->event, -1); } while (res == EINTR); } } @@ -6642,6 +6807,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) } } + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) + (void) erts_get_monotonic_time(esdp); erts_smp_runq_lock(esdp->run_queue); non_empty_runq(esdp->run_queue); @@ -6749,6 +6916,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) erts_smp_mtx_unlock(&schdlr_sspnd.mtx); while (1) { + ErtsMonotonicTime current_time; erts_aint32_t qmask; erts_aint32_t flgs; @@ -6756,7 +6924,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) & ERTS_RUNQ_FLGS_QMASK); aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work|qmask) { - if (!thr_prgr_active) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); sched_wall_time_change(esdp, 1); } @@ -6764,7 +6932,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); - if (aux_work && erts_thr_progress_update(esdp)) + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && aux_work && + erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); if (qmask) { erts_smp_runq_lock(esdp->run_queue); @@ -6773,30 +6942,77 @@ suspend_scheduler(ErtsSchedulerData *esdp) } } - if (!aux_work) { - if (thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 0); - sched_wall_time_change(esdp, 0); + if (aux_work) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + erts_bump_timers(esdp->timer_wheel, current_time); + } } - erts_thr_progress_prepare_wait(esdp); - flgs = sched_spin_suspended(ssi, - ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); - if (flgs == (ERTS_SSI_FLG_SLEEPING - | ERTS_SSI_FLG_WAITING - | ERTS_SSI_FLG_SUSPENDED)) { - flgs = sched_set_suspended_sleeptype(ssi); + } + else { + ErtsMonotonicTime timeout_time; + int do_timeout = 0; + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + timeout_time = erts_check_next_timeout_time(esdp); + current_time = erts_get_monotonic_time(esdp); + do_timeout = (current_time >= timeout_time); + } else + timeout_time = ERTS_MONOTONIC_TIME_MAX; + if (do_timeout) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + } + else { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(esdp); + } + flgs = sched_spin_suspended(ssi, + ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); if (flgs == (ERTS_SSI_FLG_SLEEPING - | ERTS_SSI_FLG_TSE_SLEEPING | ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED)) { - int res; - - do { - res = erts_tse_wait(ssi->event); - } while (res == EINTR); + flgs = sched_set_suspended_sleeptype(ssi); + if (flgs == (ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_TSE_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)) { + int res; + + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + do { + Sint64 timeout; + if (current_time >= timeout_time) + break; + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time + - current_time + - 1) + 1; + } else + timeout = -1; + res = erts_tse_twait(ssi->event, timeout); + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + } while (res == EINTR); + } } + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) + erts_thr_progress_finalize_wait(esdp); } - erts_thr_progress_finalize_wait(esdp); + + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time) + erts_bump_timers(esdp->timer_wheel, current_time); } flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING @@ -7615,6 +7831,9 @@ sched_thread_func(void *vesdp) ErtsThrPrgrCallbacks callbacks; ErtsSchedulerData *esdp = vesdp; Uint no = esdp->no; + + erts_sched_init_time_sup(esdp); + #ifdef ERTS_SMP ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch(); callbacks.arg = (void *) esdp->ssi; @@ -7717,6 +7936,8 @@ sched_dirty_cpu_thread_func(void *vesdp) callbacks.wait = NULL; callbacks.finalize_wait = NULL; + esdp->thr_id += erts_no_schedulers; + erts_thr_progress_register_unmanaged_thread(&callbacks); #ifdef ERTS_ENABLE_LOCK_CHECK { @@ -7778,6 +7999,8 @@ sched_dirty_io_thread_func(void *vesdp) callbacks.wait = NULL; callbacks.finalize_wait = NULL; + esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers; + erts_thr_progress_register_unmanaged_thread(&callbacks); #ifdef ERTS_ENABLE_LOCK_CHECK { @@ -7835,23 +8058,17 @@ erts_start_schedulers(void) Uint actual; Uint wanted = erts_no_schedulers; Uint wanted_no_schedulers = erts_no_schedulers; + char name[16]; ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER; opts.detached = 1; -#ifdef ETHR_HAVE_THREAD_NAMES - opts.name = malloc(80); - if (!opts.name) { - ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); - } -#endif + opts.name = name; #ifdef ERTS_SMP if (erts_runq_supervision_interval) { opts.suggested_stack_size = 16; -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name, "runq_supervisor"); -#endif + erts_snprintf(opts.name, 16, "runq_supervisor"); erts_atomic_init_nob(&runq_supervisor_sleeping, 0); if (0 != ethr_event_init(&runq_supervision_event)) erl_exit(1, "Failed to create run-queue supervision event\n"); @@ -7878,9 +8095,7 @@ erts_start_schedulers(void) ASSERT(actual == esdp->no - 1); -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name, "scheduler_%d", actual + 1); -#endif + erts_snprintf(opts.name, 16, "%lu_scheduler", actual + 1); #ifdef __OSE__ /* This should be done in the bind strategy */ @@ -7902,18 +8117,14 @@ erts_start_schedulers(void) int ix; for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1); -#endif + erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1); res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts); if (res != 0) erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix); } for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1); -#endif + erts_snprintf(opts.name, 16, "%d_dirty_io_scheduler", ix + 1); res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts); if (res != 0) erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix); @@ -7924,9 +8135,7 @@ erts_start_schedulers(void) ERTS_THR_MEMORY_BARRIER; -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name, "aux"); -#endif + erts_snprintf(opts.name, 16, "aux"); #ifdef __OSE__ opts.coreNo = 0; @@ -7952,9 +8161,6 @@ erts_start_schedulers(void) erts_send_error_to_logger_nogl(dsbufp); } -#ifdef ETHR_HAVE_THREAD_NAMES - free(opts.name); -#endif } #endif /* ERTS_SMP */ @@ -8897,7 +9103,6 @@ Process *schedule(Process *p, int calls) { Process *proxy_p = NULL; ErtsRunQueue *rq; - erts_aint_t dt; ErtsSchedulerData *esdp; int context_reds; int fcalls; @@ -8997,7 +9202,7 @@ Process *schedule(Process *p, int calls) schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */ proxy_p = NULL; - ERTS_PROC_REDUCTIONS_EXECUTED(rq, + ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq, (int) ERTS_PSFLGS_GET_USR_PRIO(state), reds, actual_reds); @@ -9014,12 +9219,9 @@ Process *schedule(Process *p, int calls) ASSERT(esdp->free_process == p); esdp->free_process = NULL; #else - state = erts_smp_atomic32_read_nob(&p->state); - if (!(state & ERTS_PSFLG_IN_RUNQ)) - erts_free_proc(p); + erts_proc_dec_refc(p); #endif } - #ifdef ERTS_SMP ASSERT(!esdp->free_process); #endif @@ -9027,11 +9229,15 @@ Process *schedule(Process *p, int calls) ERTS_SMP_CHK_NO_PROC_LOCKS; - dt = erts_do_time_read_and_reset(); - if (dt) { - erts_smp_runq_unlock(rq); - erts_bump_timer(dt); - erts_smp_runq_lock(rq); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS) + (void) erts_get_monotonic_time(esdp); + + if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { + erts_smp_runq_unlock(rq); + erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time); + erts_smp_runq_lock(rq); + } } BM_STOP_TIMER(system); @@ -9177,6 +9383,7 @@ Process *schedule(Process *p, int calls) else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && (fcalls > input_reductions && prepare_for_sys_schedule(esdp, !0))) { + ErtsMonotonicTime current_time; /* * Schedule system-level activities. */ @@ -9189,8 +9396,10 @@ Process *schedule(Process *p, int calls) #endif erts_smp_runq_unlock(rq); erl_sys_schedule(1); - dt = erts_do_time_read_and_reset(); - if (dt) erts_bump_timer(dt); + + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) + erts_bump_timers(esdp->timer_wheel, current_time); #ifdef ERTS_SMP erts_smp_runq_lock(rq); @@ -9329,13 +9538,8 @@ Process *schedule(Process *p, int calls) | ERTS_PSFLG_PENDING_EXIT | ERTS_PSFLG_ACTIVE_SYS)) == ERTS_PSFLG_SUSPENDED)) { - if (state & ERTS_PSFLG_FREE) { -#ifdef ERTS_SMP - erts_smp_proc_dec_refc(p); -#else - erts_free_proc(p); -#endif - } + if (state & ERTS_PSFLG_FREE) + erts_proc_dec_refc(p); if (proxy_p) { free_proxy_proc(proxy_p); proxy_p = NULL; @@ -9478,6 +9682,20 @@ Process *schedule(Process *p, int calls) /* Never run a suspended process */ ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state))); + ASSERT(erts_proc_read_refc(p) > 0); + + if (!(state & ERTS_PSFLG_EXITING) && ERTS_PTMR_IS_TIMED_OUT(p)) { + BeamInstr** pi; +#ifdef ERTS_SMP + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); +#endif + pi = (BeamInstr **) p->def_arg_reg; + p->i = *pi; + p->flags &= ~F_INSLPQUEUE; + p->flags |= F_TIMO; + ERTS_PTMR_CLEAR(p); + } + return p; } } @@ -9530,15 +9748,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result) ASSERT(hp_start + hsz == hp); #endif - erts_queue_message(rp, - &rp_locks, - bp, - msg, - NIL -#ifdef USE_VM_PROBES - , NIL -#endif - ); + erts_queue_message(rp, &rp_locks, bp, msg, NIL); if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -10394,6 +10604,8 @@ erts_free_proc(Process *p) #ifdef ERTS_SMP erts_proc_lock_fin(p); #endif + ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); + ASSERT(0 == erts_proc_read_refc(p)); erts_free(ERTS_ALC_T_PROC, (void *) p); } @@ -10436,7 +10648,7 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state) init_arg.run_queue = rq; init_arg.state = state; - ASSERT(((char *) p) == ((char *) &p->common)); + ERTS_CT_ASSERT(offsetof(Process,common) == 0); if (!erts_ptab_new_element(&erts_proc, &p->common, @@ -10446,6 +10658,8 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state) return NULL; } + ASSERT(erts_proc_read_refc(p) > 0); + ASSERT(internal_pid_serial(p->common.id) <= ERTS_MAX_PID_SERIAL); p->approx_started = erts_get_approx_time(); @@ -10495,6 +10709,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). int ix = so->scheduler-1; ASSERT(0 <= ix && ix < erts_no_run_queues); rq = ERTS_RUNQ_IX(ix); + /* Unsupported feature... */ state |= ERTS_PSFLG_BOUND; } prio = (erts_aint32_t) so->priority; @@ -10527,6 +10742,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->flags = erts_default_process_flags; + p->static_flags = 0; + if (so->flags & SPO_SYSTEM_PROC) + p->static_flags |= ERTS_STC_FLG_SYSTEM_PROC; if (so->flags & SPO_USE_ARGS) { p->min_heap_size = so->min_heap_size; p->min_vheap_size = so->min_vheap_size; @@ -10539,9 +10757,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->schedule_count = 0; ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0)); - p->initial[INITIAL_MOD] = mod; - p->initial[INITIAL_FUN] = func; - p->initial[INITIAL_ARI] = (Uint) arity; + p->u.initial[INITIAL_MOD] = mod; + p->u.initial[INITIAL_FUN] = func; + p->u.initial[INITIAL_ARI] = (Uint) arity; /* * Must initialize binary lists here before copying binaries to process. @@ -10582,7 +10800,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). /* No need to initialize p->fcalls. */ - p->current = p->initial+INITIAL_MOD; + p->current = p->u.initial+INITIAL_MOD; p->i = (BeamInstr *) beam_apply; p->cp = (BeamInstr *) beam_apply+1; @@ -10605,11 +10823,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->ftrace = NIL; p->reds = 0; -#ifdef ERTS_SMP - p->common.u.alive.ptimer = NULL; -#else - sys_memset(&p->common.u.alive.tm, 0, sizeof(ErlTimer)); -#endif + ERTS_PTMR_INIT(p); p->common.u.alive.reg = NULL; ERTS_P_LINKS(p) = NULL; @@ -10640,7 +10854,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->msg_inq.last = &p->msg_inq.first; p->msg_inq.len = 0; #endif - p->u.bif_timers = NULL; + p->bif_timers = NULL; + p->accessor_bif_timers = NULL; p->mbuf = NULL; p->mbuf_sz = 0; p->psd = NULL; @@ -10798,11 +11013,7 @@ void erts_init_empty_process(Process *p) p->bin_old_vheap = 0; p->sys_task_qs = NULL; p->bin_vheap_mature = 0; -#ifdef ERTS_SMP - p->common.u.alive.ptimer = NULL; -#else - memset(&(p->common.u.alive.tm), 0, sizeof(ErlTimer)); -#endif + ERTS_PTMR_INIT(p); p->next = NULL; p->off_heap.first = NULL; p->off_heap.overhead = 0; @@ -10823,14 +11034,15 @@ void erts_init_empty_process(Process *p) p->msg.last = &p->msg.first; p->msg.save = &p->msg.first; p->msg.len = 0; - p->u.bif_timers = NULL; + p->bif_timers = NULL; + p->accessor_bif_timers = NULL; p->dictionary = NULL; p->seq_trace_clock = 0; p->seq_trace_lastcnt = 0; p->seq_trace_token = NIL; - p->initial[0] = 0; - p->initial[1] = 0; - p->initial[2] = 0; + p->u.initial[0] = 0; + p->u.initial[1] = 0; + p->u.initial[2] = 0; p->catches = 0; p->cp = NULL; p->i = NULL; @@ -10851,6 +11063,8 @@ void erts_init_empty_process(Process *p) p->parent = NIL; p->approx_started = 0; + p->static_flags = 0; + p->common.u.alive.started_interval = 0; #ifdef HIPE @@ -10915,7 +11129,8 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->suspend_monitors == NULL); ASSERT(p->msg.first == NULL); ASSERT(p->msg.len == 0); - ASSERT(p->u.bif_timers == NULL); + ASSERT(p->bif_timers == NULL); + ASSERT(p->accessor_bif_timers == NULL); ASSERT(p->dictionary == NULL); ASSERT(p->catches == 0); ASSERT(p->cp == NULL); @@ -11079,7 +11294,6 @@ set_proc_exiting(Process *p, */ p->freason = EXTAG_EXIT; KILL_CATCHES(p); - cancel_timer(p); p->i = (BeamInstr *) beam_exit; if (enqueue) @@ -11224,11 +11438,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp, hp = erts_alloc_message_heap(term_size, &bp, &ohp, to, to_locksp); mess = copy_struct(exit_term, term_size, &hp, ohp); - erts_queue_message(to, to_locksp, bp, mess, NIL -#ifdef USE_VM_PROBES - , NIL -#endif - ); + erts_queue_message(to, to_locksp, bp, mess, NIL); } else { ErlHeapFragment* bp; Eterm* hp; @@ -11244,11 +11454,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp, /* the trace token must in this case be updated by the caller */ seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL); temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap); - erts_queue_message(to, to_locksp, bp, mess, temp_token -#ifdef USE_VM_PROBES - , NIL -#endif - ); + erts_queue_message(to, to_locksp, bp, mess, temp_token); } } @@ -11501,7 +11707,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) ErtsMonitor *rmon; Process *rp; - if (mon->type == MON_ORIGIN) { + switch (mon->type) { + case MON_ORIGIN: /* We are monitoring someone else, we need to demonitor that one.. */ if (is_atom(mon->pid)) { /* remote by name */ ASSERT(is_node_name_atom(mon->pid)); @@ -11564,7 +11771,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } } } - } else { /* type == MON_TARGET */ + break; + case MON_TARGET: ASSERT(mon->type == MON_TARGET); ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid)); if (is_internal_port(mon->pid)) { @@ -11623,6 +11831,12 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } } } + break; + case MON_TIME_OFFSET: + erts_demonitor_time_offset(mon->ref); + break; + default: + ERTS_INTERNAL_ERROR("Invalid monitor type"); } done: /* As the monitors are previously removed from the process, @@ -11769,6 +11983,7 @@ erts_do_exit_process(Process* p, Eterm reason) { p->arity = 0; /* No live registers */ p->fvalue = reason; + #ifdef USE_VM_PROBES if (DTRACE_ENABLED(process_exit)) { @@ -11781,6 +11996,9 @@ erts_do_exit_process(Process* p, Eterm reason) } #endif + if (p->static_flags & ERTS_STC_FLG_SYSTEM_PROC) + erl_exit(1, "System process %T terminated: %T\n", p->common.id, reason); + #ifdef ERTS_SMP ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* By locking all locks (main lock is already locked) when going @@ -11820,20 +12038,20 @@ erts_do_exit_process(Process* p, Eterm reason) ASSERT((ERTS_TRACE_FLAGS(p) & F_INITIAL_TRACE_FLAGS) == F_INITIAL_TRACE_FLAGS); - cancel_timer(p); /* Always cancel timer just in case */ - - if (p->u.bif_timers) - erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL); + ASSERT(erts_proc_read_refc(p) > 0); + if (ERTS_PTMR_IS_SET(p)) { + erts_cancel_proc_timer(p); + ASSERT(erts_proc_read_refc(p) > 0); + } erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); /* - * The p->u.bif_timers of this process can *not* be used anymore; + * p->u.initial of this process can *not* be used anymore; * will be overwritten by misc termination data. */ p->u.terminate = NULL; - erts_continue_exit_process(p); } @@ -11858,6 +12076,27 @@ erts_continue_exit_process(Process *p) ASSERT(ERTS_PROC_IS_EXITING(p)); + ASSERT(erts_proc_read_refc(p) > 0); + if (p->bif_timers) { + if (erts_cancel_bif_timers(p, p->bif_timers, &p->u.terminate)) { + ASSERT(erts_proc_read_refc(p) > 0); + goto yield; + } + ASSERT(erts_proc_read_refc(p) > 0); + p->bif_timers = NULL; + } + + if (p->accessor_bif_timers) { + if (erts_detach_accessor_bif_timers(p, + p->accessor_bif_timers, + &p->u.terminate)) { + ASSERT(erts_proc_read_refc(p) > 0); + goto yield; + } + ASSERT(erts_proc_read_refc(p) > 0); + p->accessor_bif_timers = NULL; + } + #ifdef ERTS_SMP if (p->flags & F_HAVE_BLCKD_MSCHED) { ErtsSchedSuspendResult ssr; @@ -11951,6 +12190,8 @@ erts_continue_exit_process(Process *p) p->scheduler_data->current_process = NULL; p->scheduler_data->free_process = p; +#else + erts_proc_inc_refc(p); /* Decremented in schedule() */ #endif /* Time of death! */ @@ -11969,29 +12210,23 @@ erts_continue_exit_process(Process *p) { /* Inactivate and notify free */ erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state); -#ifdef ERTS_SMP int refc_inced = 0; -#endif while (1) { n = e = a; ASSERT(a & ERTS_PSFLG_EXITING); n |= ERTS_PSFLG_FREE; n &= ~ERTS_PSFLG_ACTIVE; -#ifdef ERTS_SMP if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) { - erts_smp_proc_inc_refc(p); + erts_proc_inc_refc(p); refc_inced = 1; } -#endif a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } -#ifdef ERTS_SMP if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ)) - erts_smp_proc_dec_refc(p); -#endif + erts_proc_dec_refc(p); } dep = ((p->flags & F_DISTRIBUTION) @@ -12082,64 +12317,6 @@ erts_continue_exit_process(Process *p) } -/* Callback for process timeout */ -static void -timeout_proc(Process* p) -{ - erts_aint32_t state; - BeamInstr** pi = (BeamInstr **) p->def_arg_reg; - p->i = *pi; - p->flags |= F_TIMO; - p->flags &= ~F_INSLPQUEUE; - - state = erts_smp_atomic32_read_acqb(&p->state); - if (!(state & ERTS_PSFLG_ACTIVE)) - schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); -} - - -void -cancel_timer(Process* p) -{ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); - p->flags &= ~(F_INSLPQUEUE|F_TIMO); -#ifdef ERTS_SMP - erts_cancel_smp_ptimer(p->common.u.alive.ptimer); -#else - erts_cancel_timer(&p->common.u.alive.tm); -#endif -} - -/* - * Insert a process into the time queue, with a timeout 'timeout' in ms. - */ -void -set_timer(Process* p, Uint timeout) -{ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); - - /* check for special case timeout=0 DONT ADD TO time queue */ - if (timeout == 0) { - p->flags |= F_TIMO; - return; - } - p->flags |= F_INSLPQUEUE; - p->flags &= ~F_TIMO; - -#ifdef ERTS_SMP - erts_create_smp_ptimer(&p->common.u.alive.ptimer, - p->common.id, - (ErlTimeoutProc) timeout_proc, - timeout); -#else - erts_set_timer(&p->common.u.alive.tm, - (ErlTimeoutProc) timeout_proc, - NULL, - (void*) p, - timeout); -#endif -} - /* * Stack dump functions follow. */ @@ -12155,7 +12332,7 @@ erts_stack_dump(int to, void *to_arg, Process *p) } erts_program_counter_info(to, to_arg, p); for (sp = p->stop; sp < STACK_START(p); sp++) { - yreg = stack_element_dump(to, to_arg, p, sp, yreg); + yreg = stack_element_dump(to, to_arg, sp, yreg); } } @@ -12212,7 +12389,7 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x) } static int -stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg) +stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg) { Eterm x = *sp; @@ -12241,6 +12418,218 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg) } /* + * Print scheduler information + */ +void +erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) { + int i; + erts_aint32_t flg; + Process *p; + + erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); + +#ifdef ERTS_SMP + flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags); + erts_print(to, to_arg, "Scheduler Sleep Info Flags: "); + for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) { + erts_aint32_t chk = (1 << i); + if (flg & chk) { + switch (chk) { + case ERTS_SSI_FLG_SLEEPING: + erts_print(to, to_arg, "SLEEPING"); break; + case ERTS_SSI_FLG_POLL_SLEEPING: + erts_print(to, to_arg, "POLL_SLEEPING"); break; + case ERTS_SSI_FLG_TSE_SLEEPING: + erts_print(to, to_arg, "TSE_SLEEPING"); break; + case ERTS_SSI_FLG_WAITING: + erts_print(to, to_arg, "WAITING"); break; + case ERTS_SSI_FLG_SUSPENDED: + erts_print(to, to_arg, "SUSPENDED"); break; + default: + erts_print(to, to_arg, "UNKNOWN(%d)", flg); break; + } + if (flg > chk) + erts_print(to, to_arg, " | "); + flg -= chk; + } + } + erts_print(to, to_arg, "\n"); +#endif + + flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work); + erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: "); + for (i = 0; i < ERTS_SSI_AUX_WORK_MAX && flg; i++) { + erts_aint32_t chk = (1 << i); + if (flg & chk) { + switch (chk) { + case ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP: + erts_print(to, to_arg, "DELAYED_AW_WAKEUP"); break; + case ERTS_SSI_AUX_WORK_DD: + erts_print(to, to_arg, "DELAYED_DEALLOC"); break; + case ERTS_SSI_AUX_WORK_DD_THR_PRGR: + erts_print(to, to_arg, "DELAYED_DEALLOC_THR_PRGR"); break; + case ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC: + erts_print(to, to_arg, "FIX_ALLOC_DEALLOC"); break; + case ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM: + erts_print(to, to_arg, "FIX_ALLOC_LOWER_LIM"); break; + case ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP: + erts_print(to, to_arg, "THR_PRGR_LATER_OP"); break; + case ERTS_SSI_AUX_WORK_CNCLD_TMRS: + erts_print(to, to_arg, "CANCELED_TIMERS"); break; + case ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR: + erts_print(to, to_arg, "CANCELED_TIMERS_THR_PRGR"); break; + case ERTS_SSI_AUX_WORK_ASYNC_READY: + erts_print(to, to_arg, "ASYNC_READY"); break; + case ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN: + erts_print(to, to_arg, "ASYNC_READY_CLEAN"); break; + case ERTS_SSI_AUX_WORK_MISC_THR_PRGR: + erts_print(to, to_arg, "MISC_THR_PRGR"); break; + case ERTS_SSI_AUX_WORK_MISC: + erts_print(to, to_arg, "MISC"); break; + case ERTS_SSI_AUX_WORK_CHECK_CHILDREN: + erts_print(to, to_arg, "CHECK_CHILDREN"); break; + case ERTS_SSI_AUX_WORK_SET_TMO: + erts_print(to, to_arg, "SET_TMO"); break; + case ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK: + erts_print(to, to_arg, "MSEG_CACHE_CHECK"); break; + case ERTS_SSI_AUX_WORK_REAP_PORTS: + erts_print(to, to_arg, "REAP_PORTS"); break; + default: + erts_print(to, to_arg, "UNKNOWN(%d)", flg); break; + } + if (flg > chk) + erts_print(to, to_arg, " | "); + flg -= chk; + } + } + erts_print(to, to_arg, "\n"); + + erts_print(to, to_arg, "Current Port: "); + if (esdp->current_port) + erts_print(to, to_arg, "%T", esdp->current_port->common.id); + erts_print(to, to_arg, "\n"); + + p = esdp->current_process; + erts_print(to, to_arg, "Current Process: "); + if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) { + flg = erts_smp_atomic32_read_dirty(&p->state); + erts_print(to, to_arg, "%T\n", p->common.id); + + erts_print(to, to_arg, "Current Process State: "); + erts_dump_process_state(to, to_arg, flg); + + erts_print(to, to_arg, "Current Process Internal State: "); + erts_dump_extended_process_state(to, to_arg, flg); + + erts_print(to, to_arg, "Current Process Program counter: %p (", p->i); + print_function_from_pc(to, to_arg, p->i); + erts_print(to, to_arg, ")\n"); + erts_print(to, to_arg, "Current Process CP: %p (", p->cp); + print_function_from_pc(to, to_arg, p->cp); + erts_print(to, to_arg, ")\n"); + + /* Getting this stacktrace can segfault if we are very very + unlucky if called while a process is being garbage collected. + Therefore we only call this on other schedulers if we either + have protection against segfaults, or we know that the process + is not garbage collecting. It *should* always be safe to call + on a process owned by us, even if it is currently being garbage + collected. + */ + erts_print(to, to_arg, "Current Process Limited Stack Trace:\n"); + erts_limited_stack_trace(to, to_arg, p); + } else + erts_print(to, to_arg, "\n"); + + for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) { + erts_print(to, to_arg, "Run Queue "); + switch (i) { + case PRIORITY_MAX: + erts_print(to, to_arg, "Max "); + break; + case PRIORITY_HIGH: + erts_print(to, to_arg, "High "); + break; + case PRIORITY_NORMAL: + erts_print(to, to_arg, "Normal "); + break; + case PRIORITY_LOW: + erts_print(to, to_arg, "Low "); + break; + default: + erts_print(to, to_arg, "Unknown "); + break; + } + erts_print(to, to_arg, "Length: %d\n", + erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); + } + erts_print(to, to_arg, "Run Queue Port Length: %d\n", + erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); + + flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags); + erts_print(to, to_arg, "Run Queue Flags: "); + for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) { + erts_aint32_t chk = (1 << i); + if (flg & chk) { + switch (chk) { + case (1 << PRIORITY_MAX): + erts_print(to, to_arg, "NONEMPTY_MAX"); break; + case (1 << PRIORITY_HIGH): + erts_print(to, to_arg, "NONEMPTY_HIGH"); break; + case (1 << PRIORITY_NORMAL): + erts_print(to, to_arg, "NONEMPTY_NORMAL"); break; + case (1 << PRIORITY_LOW): + erts_print(to, to_arg, "NONEMPTY_LOW"); break; + case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)): + erts_print(to, to_arg, "EMIGRATE_MAX"); break; + case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)): + erts_print(to, to_arg, "EMIGRATE_HIGH"); break; + case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)): + erts_print(to, to_arg, "EMIGRATE_NORMAL"); break; + case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)): + erts_print(to, to_arg, "EMIGRATE_LOW"); break; + case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)): + erts_print(to, to_arg, "IMMIGRATE_MAX"); break; + case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)): + erts_print(to, to_arg, "IMMIGRATE_HIGH"); break; + case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)): + erts_print(to, to_arg, "IMMIGRATE_NORMAL"); break; + case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)): + erts_print(to, to_arg, "IMMIGRATE_LOW"); break; + case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EVACUATE_SHFT)): + erts_print(to, to_arg, "EVACUATE_MAX"); break; + case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EVACUATE_SHFT)): + erts_print(to, to_arg, "EVACUATE_HIGH"); break; + case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EVACUATE_SHFT)): + erts_print(to, to_arg, "EVACUATE_NORMAL"); break; + case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EVACUATE_SHFT)): + erts_print(to, to_arg, "EVACUATE_LOW"); break; + case ERTS_RUNQ_FLG_OUT_OF_WORK: + erts_print(to, to_arg, "OUT_OF_WORK"); break; + case ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK: + erts_print(to, to_arg, "HALFTIME_OUT_OF_WORK"); break; + case ERTS_RUNQ_FLG_SUSPENDED: + erts_print(to, to_arg, "SUSPENDED"); break; + case ERTS_RUNQ_FLG_CHK_CPU_BIND: + erts_print(to, to_arg, "CHK_CPU_BIND"); break; + case ERTS_RUNQ_FLG_INACTIVE: + erts_print(to, to_arg, "INACTIVE"); break; + case ERTS_RUNQ_FLG_NONEMPTY: + erts_print(to, to_arg, "NONEMPTY"); break; + case ERTS_RUNQ_FLG_PROTECTED: + erts_print(to, to_arg, "PROTECTED"); break; + default: + erts_print(to, to_arg, "UNKNOWN(%d)", flg); break; + } + if (flg > chk) + erts_print(to, to_arg, " | "); + flg -= chk; + } + } + erts_print(to, to_arg, "\n"); +} + +/* * A nice system halt closing all open port goes as follows: * 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS * on all schedulers, then schedules itself out. |