diff options
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r-- | erts/emulator/beam/erl_process.c | 1528 |
1 files changed, 1084 insertions, 444 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b6ad8575cf..4ec98d5cc7 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -3,16 +3,17 @@ * * Copyright Ericsson AB 1996-2014. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ @@ -43,8 +44,11 @@ #include "erl_async.h" #include "dtrace-wrapper.h" #include "erl_ptab.h" +#include "erl_bif_unique.h" +#define ERTS_WANT_TIMER_WHEEL_API +#include "erl_time.h" - +#define ERTS_CHECK_TIME_REDS CONTEXT_REDS #define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0) #define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2) @@ -152,7 +156,7 @@ extern BeamInstr beam_continue_exit[]; /* Eager check I/O not supported on OSE yet. */ int erts_eager_check_io = 0; #else -int erts_eager_check_io = 0; +int erts_eager_check_io = 1; #endif int erts_sched_compact_load; int erts_sched_balance_util = 0; @@ -162,6 +166,9 @@ Uint erts_no_dirty_cpu_schedulers; Uint erts_no_dirty_io_schedulers; #endif +static char *erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_NO_FLAGS] = {0}; +int erts_aux_work_no_flags = ERTS_SSI_AUX_WORK_NO_FLAGS; + #define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_LAZY (4*1024*1024) #define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_LAZY (512*1024) #define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_MEDIUM (64*1024) @@ -346,6 +353,7 @@ struct erts_system_monitor_flags_t erts_system_monitor_flags; /* system performance monitor */ Eterm erts_system_profile; struct erts_system_profile_flags_t erts_system_profile_flags; +int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP; #if ERTS_MAX_PROCESSES > 0x7fffffff #error "Need to store process_count in another type" @@ -457,13 +465,12 @@ do { \ static void exec_misc_ops(ErtsRunQueue *); static void print_function_from_pc(int to, void *to_arg, BeamInstr* x); -static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, - int yreg); +static int stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg); static void aux_work_timeout(void *unused); static void aux_work_timeout_early_init(int no_schedulers); static void aux_work_timeout_late_init(void); -static void setup_aux_work_timer(void); +static void setup_aux_work_timer(ErtsSchedulerData *esdp); static int execute_sys_tasks(Process *c_p, erts_aint32_t *statep, @@ -493,6 +500,8 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_DD; valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR; + valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS; + valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; #endif #if HAVE_ERTS_MSEG @@ -504,9 +513,10 @@ dbg_chk_aux_work_val(erts_aint32_t value) #ifdef ERTS_SSI_AUX_WORK_REAP_PORTS valid |= ERTS_SSI_AUX_WORK_REAP_PORTS; #endif + valid |= ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; if (~valid & value) - erl_exit(ERTS_ABORT_EXIT, + erts_exit(ERTS_ABORT_EXIT, "Invalid aux_work value found: 0x%x\n", ~valid & value); } @@ -562,6 +572,41 @@ erts_pre_init_process(void) erts_tsd_key_create(&sched_data_key, "erts_sched_data_key"); #endif + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX] + = "DELAYED_AW_WAKEUP"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DD_IX] + = "DD"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX] + = "DD_THR_PRGR"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX] + = "FIX_ALLOC_DEALLOC"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX] + = "FIX_ALLOC_LOWER_LIM"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX] + = "THR_PRGR_LATER_OP"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX] + = "CNCLD_TMRS"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX] + = "CNCLD_TMRS_THR_PRGR"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_ASYNC_READY_IX] + = "ASYNC_READY"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX] + = "ASYNC_READY_CLEAN"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX] + = "MISC_THR_PRGR"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX] + = "MISC"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CHECK_CHILDREN_IX] + = "CHECK_CHILDREN"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX] + = "SET_TMO"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX] + = "MSEG_CACHE_CHECK"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_REAP_PORTS_IX] + = "REAP_PORTS"; + erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX] + = "DEBUG_WAIT_COMPLETED"; + #ifdef ERTS_ENABLE_LOCK_CHECK { int ix; @@ -581,11 +626,6 @@ erts_pre_init_process(void) erts_psd_required_locks[ERTS_PSD_SCHED_ID].set_locks = ERTS_PSD_SCHED_ID_SET_LOCKS; - erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks - = ERTS_PSD_DIST_ENTRY_GET_LOCKS; - erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks - = ERTS_PSD_DIST_ENTRY_SET_LOCKS; - erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].get_locks = ERTS_PSD_CALL_TIME_BP_GET_LOCKS; erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks @@ -610,13 +650,11 @@ erts_pre_init_process(void) #endif } -#ifdef ERTS_SMP static void release_process(void *vproc) { - erts_smp_proc_dec_refc((Process *) vproc); + erts_proc_dec_refc((Process *) vproc); } -#endif /* initialize the scheduler */ void @@ -632,16 +670,18 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab) erts_ptab_init_table(&erts_proc, ERTS_ALC_T_PROC_TABLE, -#ifdef ERTS_SMP release_process, -#else - NULL, -#endif (ErtsPTabElementCommon *) &erts_invalid_process.common, proc_tab_size, sizeof(Process), "process_table", - legacy_proc_tab); + legacy_proc_tab, +#ifdef ERTS_SMP + 1 +#else + 0 +#endif + ); last_reductions = 0; last_exact_reductions = 0; @@ -702,8 +742,8 @@ init_sched_wall_time(ErtsSchedWallTime *swtp) static ERTS_INLINE Uint64 sched_wall_time_ts(void) { -#ifdef HAVE_GETHRTIME - return (Uint64) sys_gethrtime(); +#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT + return (Uint64) erts_os_monotonic_time(); #else Uint64 res; SysTimeval tv; @@ -927,11 +967,25 @@ typedef struct { erts_smp_atomic32_t refc; } ErtsSchedWallTimeReq; +typedef struct { + Process *proc; + Eterm ref; + Eterm ref_heap[REF_THING_SIZE]; + Uint req_sched; + erts_smp_atomic32_t refc; +} ErtsSystemCheckReq; + + #if !HALFWORD_HEAP ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(swtreq, ErtsSchedWallTimeReq, 5, ERTS_ALC_T_SCHED_WTIME_REQ) + +ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(screq, + ErtsSystemCheckReq, + 5, + ERTS_ALC_T_SYS_CHECK_REQ) #else static ERTS_INLINE ErtsSchedWallTimeReq * swtreq_alloc(void) @@ -945,6 +999,19 @@ swtreq_free(ErtsSchedWallTimeReq *ptr) { erts_free(ERTS_ALC_T_SCHED_WTIME_REQ, ptr); } + +static ERTS_INLINE ErtsSystemCheckReq * +screq_alloc(void) +{ + return erts_alloc(ERTS_ALC_T_SYS_CHECK_REQ, + sizeof(ErtsSystemCheckReq)); +} + +static ERTS_INLINE void +screq_free(ErtsSystemCheckReq *ptr) +{ + erts_free(ERTS_ALC_T_SYS_CHECK_REQ, ptr); +} #endif static void @@ -1023,11 +1090,7 @@ reply_sched_wall_time(void *vswtrp) hpp = &hp; } - erts_queue_message(rp, &rp_locks, bp, msg, NIL -#ifdef USE_VM_PROBES - , NIL -#endif - ); + erts_queue_message(rp, &rp_locks, bp, msg, NIL); if (swtrp->req_sched == esdp->no) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -1035,7 +1098,7 @@ reply_sched_wall_time(void *vswtrp) if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); + erts_proc_dec_refc(rp); if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0) swtreq_free(vswtrp); @@ -1067,7 +1130,7 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable) erts_smp_atomic32_init_nob(&swtrp->refc, (erts_aint32_t) erts_no_schedulers); - erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers); + erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers); #ifdef ERTS_SMP if (erts_no_schedulers > 1) @@ -1082,6 +1145,75 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable) return ref; } +static void +reply_system_check(void *vscrp) +{ + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + ErtsSystemCheckReq *scrp = (ErtsSystemCheckReq *) vscrp; + ErtsProcLocks rp_locks = (scrp->req_sched == esdp->no ? ERTS_PROC_LOCK_MAIN : 0); + Process *rp = scrp->proc; + Eterm msg; + Eterm *hp = NULL; + Eterm **hpp; + Uint sz; + ErlOffHeap *ohp = NULL; + ErlHeapFragment *bp = NULL; + + ASSERT(esdp); +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); +#endif + + sz = REF_THING_SIZE; + hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks); + hpp = &hp; + msg = STORE_NC(hpp, ohp, scrp->ref); + + erts_queue_message(rp, &rp_locks, bp, msg, NIL); + + if (scrp->req_sched == esdp->no) + rp_locks &= ~ERTS_PROC_LOCK_MAIN; + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + + erts_proc_dec_refc(rp); + + if (erts_smp_atomic32_dec_read_nob(&scrp->refc) == 0) + screq_free(vscrp); +} + + +Eterm erts_system_check_request(Process *c_p) { + ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p); + Eterm ref; + ErtsSystemCheckReq *scrp; + Eterm *hp; + + scrp = screq_alloc(); + ref = erts_make_ref(c_p); + hp = &scrp->ref_heap[0]; + + scrp->proc = c_p; + scrp->ref = STORE_NC(&hp, NULL, ref); + scrp->req_sched = esdp->no; + erts_smp_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers); + + erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); + +#ifdef ERTS_SMP + if (erts_no_schedulers > 1) + erts_schedule_multi_misc_aux_work(1, + erts_no_schedulers, + reply_system_check, + (void *) scrp); +#endif + + reply_system_check((void *) scrp); + + return ref; +} + static ERTS_INLINE ErtsProcList * proclist_create(Process *p) { @@ -1128,7 +1260,7 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data) xplocks &= ~plocks; if (xplocks && erts_smp_proc_trylock(p, xplocks) == EBUSY) { if (xplocks & ERTS_PROC_LOCK_MAIN) { - erts_smp_proc_inc_refc(p); + erts_proc_inc_refc(p); erts_smp_proc_unlock(p, plocks); erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL); refc = 1; @@ -1144,7 +1276,7 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data) if (xplocks) erts_smp_proc_unlock(p, xplocks); if (refc) - erts_smp_proc_dec_refc(p); + erts_proc_dec_refc(p); ASSERT(p->psd); if (p->psd != psd) erts_free(ERTS_ALC_T_PSD, psd); @@ -1176,7 +1308,7 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags) case 0: break; default: - erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n", + erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__); break; } @@ -1193,11 +1325,11 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi, ERTS_DBG_CHK_SSI_AUX_WORK(ssi); old_flgs = erts_atomic32_read_nob(&ssi->aux_work); - if ((old_flgs & flgs) == 0) { + if ((old_flgs & flgs) != flgs) { old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs); - if ((old_flgs & flgs) == 0) { + if ((old_flgs & flgs) != flgs) { #ifdef ERTS_SMP erts_sched_poke(ssi); #else @@ -1217,7 +1349,7 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi, old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs); - if ((old_flgs & flgs) == 0) { + if ((old_flgs & flgs) != flgs) { #ifdef ERTS_SMP erts_sched_poke(ssi); #else @@ -1715,11 +1847,6 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin awdp->dd.thr_prgr = wakeup; haw_thr_prgr_soft_wakeup(awdp, wakeup); } - else if (awdp->dd.completed_callback) { - awdp->dd.completed_callback(awdp->dd.completed_arg); - awdp->dd.completed_callback = NULL; - awdp->dd.completed_arg = NULL; - } return aux_work & ~ERTS_SSI_AUX_WORK_DD; } @@ -1761,17 +1888,107 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i } else { unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR); - if (awdp->dd.completed_callback) { - awdp->dd.completed_callback(awdp->dd.completed_arg); - awdp->dd.completed_callback = NULL; - awdp->dd.completed_arg = NULL; - } } return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR; } /* + * Canceled timers + */ + +void +erts_notify_canceled_timer(ErtsSchedulerData *esdp, int rsid) +{ + ASSERT(esdp && esdp == erts_get_scheduler_data()); + if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) + schedule_aux_work_wakeup(&esdp->aux_work_data, + rsid, + ERTS_SSI_AUX_WORK_CNCLD_TMRS); + else + set_aux_work_flags_wakeup_relb(ERTS_SCHED_SLEEP_INFO_IX(rsid-1), + ERTS_SSI_AUX_WORK_CNCLD_TMRS); +} + +static ERTS_INLINE erts_aint32_t +handle_canceled_timers(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + ErtsSchedulerSleepInfo *ssi = awdp->ssi; + int need_thr_progress = 0; + ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; + int more_work = 0; + +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS); + erts_handle_canceled_timers((void *) awdp->esdp, + &need_thr_progress, + &wakeup, + &more_work); + if (more_work) { + if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS) + & ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR) { + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); + aux_work &= ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; + } + return aux_work; + } + + if (need_thr_progress) { + if (wakeup == ERTS_THR_PRGR_INVALID) + wakeup = erts_thr_progress_later(awdp->esdp); + awdp->cncld_tmrs.thr_prgr = wakeup; + set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); + haw_thr_prgr_soft_wakeup(awdp, wakeup); + } + return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS; +} + +static ERTS_INLINE erts_aint32_t +handle_canceled_timers_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + ErtsSchedulerSleepInfo *ssi; + int need_thr_progress; + int more_work; + ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; + ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); + +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif + if (!erts_thr_progress_has_reached_this(current, awdp->cncld_tmrs.thr_prgr)) + return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; + + ssi = awdp->ssi; + need_thr_progress = 0; + more_work = 0; + + erts_handle_canceled_timers((void *) awdp->esdp, + &need_thr_progress, + &wakeup, + &more_work); + if (more_work) { + set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS); + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); + return ((aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR) + | ERTS_SSI_AUX_WORK_CNCLD_TMRS); + } + + if (need_thr_progress) { + if (wakeup == ERTS_THR_PRGR_INVALID) + wakeup = erts_thr_progress_later(awdp->esdp); + awdp->cncld_tmrs.thr_prgr = wakeup; + haw_thr_prgr_soft_wakeup(awdp, wakeup); + } + else { + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); + } + + return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; +} + +/* * Handle scheduled thread progress later operations. */ #define ERTS_MAX_THR_PRGR_LATER_OPS 50 @@ -1860,78 +2077,142 @@ erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *), #endif } -#ifdef ERTS_SMP +static ERTS_INLINE erts_aint32_t +handle_debug_wait_completed(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + ErtsSchedulerSleepInfo *ssi = awdp->ssi; + erts_aint32_t saved_aux_work, flags; -static erts_atomic32_t completed_dealloc_count; +#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); +#endif + + flags = awdp->debug.wait_completed.flags; + + if (aux_work & flags) + return aux_work; + + saved_aux_work = erts_atomic32_read_acqb(&ssi->aux_work); + + if (saved_aux_work & flags) + return aux_work & ~ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; + + awdp->debug.wait_completed.callback(awdp->debug.wait_completed.arg); + + awdp->debug.wait_completed.flags = 0; + awdp->debug.wait_completed.callback = NULL; + awdp->debug.wait_completed.arg = NULL; + + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED); + + return aux_work & ~ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; +} + +static erts_atomic32_t debug_wait_completed_count; +static int debug_wait_completed_flags; static void -completed_dealloc(void *vproc) +thr_debug_wait_completed(void *vproc) { - if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) { + if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == 0) { erts_resume((Process *) vproc, (ErtsProcLocks) 0); - erts_smp_proc_dec_refc((Process *) vproc); + erts_proc_dec_refc((Process *) vproc); } } static void -setup_completed_dealloc(void *vproc) +setup_thr_debug_wait_completed(void *vproc) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ErtsAuxWorkData *awdp = (esdp - ? &esdp->aux_work_data - : aux_thread_aux_work_data); - erts_alloc_fix_alloc_shrink(awdp->sched_id, 0); - set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DD); - awdp->dd.completed_callback = completed_dealloc; - awdp->dd.completed_arg = vproc; + ErtsAuxWorkData *awdp; + erts_aint32_t wait_flags, aux_work_flags; +#ifdef ERTS_SMP + awdp = esdp ? &esdp->aux_work_data : aux_thread_aux_work_data; +#else + awdp = &esdp->aux_work_data; +#endif + + wait_flags = 0; + aux_work_flags = ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; + + if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS) { + erts_alloc_fix_alloc_shrink(awdp->sched_id, 0); + wait_flags |= (ERTS_SSI_AUX_WORK_DD + | ERTS_SSI_AUX_WORK_DD_THR_PRGR + | ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP); +#ifdef ERTS_SMP + aux_work_flags |= ERTS_SSI_AUX_WORK_DD; +#endif + } + + if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS) { + wait_flags |= (ERTS_SSI_AUX_WORK_CNCLD_TMRS + | ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR + | ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP); +#ifdef ERTS_SMP + if (awdp->esdp && !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)) + aux_work_flags |= ERTS_SSI_AUX_WORK_CNCLD_TMRS; +#endif + } + + set_aux_work_flags_wakeup_nob(awdp->ssi, aux_work_flags); + + awdp->debug.wait_completed.flags = wait_flags; + awdp->debug.wait_completed.callback = thr_debug_wait_completed; + awdp->debug.wait_completed.arg = vproc; } static void -prep_setup_completed_dealloc(void *vproc) +prep_setup_thr_debug_wait_completed(void *vproc) { - erts_aint32_t count = (erts_aint32_t) (erts_no_schedulers+1); - if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == count) { + erts_aint32_t count = (erts_aint32_t) erts_no_schedulers; +#ifdef ERTS_SMP + count += 1; /* aux thread */ +#endif + if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) { /* scheduler threads */ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers, - setup_completed_dealloc, + setup_thr_debug_wait_completed, vproc); +#ifdef ERTS_SMP /* aux_thread */ erts_schedule_misc_aux_work(0, - setup_completed_dealloc, + setup_thr_debug_wait_completed, vproc); +#endif } } -#endif /* ERTS_SMP */ int -erts_debug_wait_deallocations(Process *c_p) +erts_debug_wait_completed(Process *c_p, int flags) { -#ifndef ERTS_SMP - erts_alloc_fix_alloc_shrink(1, 0); - return 1; -#else /* Only one process at a time can do this */ - erts_aint32_t count = (erts_aint32_t) (2*(erts_no_schedulers+1)); - if (0 == erts_atomic32_cmpxchg_mb(&completed_dealloc_count, + erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers); +#ifdef ERTS_SMP + count += 2; /* aux thread */ +#endif + if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count, count, 0)) { + debug_wait_completed_flags = flags; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); - erts_smp_proc_inc_refc(c_p); + erts_proc_inc_refc(c_p); /* scheduler threads */ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers, - prep_setup_completed_dealloc, + prep_setup_thr_debug_wait_completed, (void *) c_p); +#ifdef ERTS_SMP /* aux_thread */ erts_schedule_misc_aux_work(0, - prep_setup_completed_dealloc, + prep_setup_thr_debug_wait_completed, (void *) c_p); +#endif return 1; } return 0; -#endif } @@ -2011,7 +2292,7 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) erts_port_release(prt); } if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) { - erl_exit_flush_async(erts_halt_code, ""); + erts_flush_async_exit(erts_halt_code, ""); } } return aux_work & ~ERTS_SSI_AUX_WORK_REAP_PORTS; @@ -2033,7 +2314,7 @@ static ERTS_INLINE erts_aint32_t handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO); - setup_aux_work_timer(); + setup_aux_work_timer(awdp->esdp); return aux_work & ~ERTS_SSI_AUX_WORK_SET_TMO; } @@ -2055,6 +2336,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) erts_aint32_t aux_work = orig_aux_work; erts_aint32_t ignore = 0; + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); #ifdef ERTS_SMP haw_thr_prgr_current_reset(awdp); #endif @@ -2093,6 +2375,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) #ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP, handle_thr_prgr_later_op); + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS, + handle_canceled_timers); + /* CNCLD_TMRS must be before CNCLD_TMRS_THR_PRGR */ + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR, + handle_canceled_timers_thr_prgr); #endif #if ERTS_USE_ASYNC_READY_Q @@ -2127,6 +2414,14 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS, handle_reap_ports); + /* + * ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED *need* to be + * the last flag checked! + */ + + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED, + handle_debug_wait_completed); + ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); #ifdef ERTS_SMP @@ -2142,8 +2437,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) typedef struct { union { - ErlTimer data; - char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErlTimer))]; + ErtsTWheelTimer data; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsTWheelTimer))]; } timer; int initialized; @@ -2153,6 +2448,22 @@ typedef struct { static ErtsAuxWorkTmo *aux_work_tmo; +static ERTS_INLINE void +start_aux_work_timer(ErtsSchedulerData *esdp) +{ + ErtsMonotonicTime tmo = erts_get_monotonic_time(esdp); + tmo = ERTS_MONOTONIC_TO_CLKTCKS(tmo-1); + tmo += ERTS_MSEC_TO_CLKTCKS(1000) + 1; + erts_twheel_init_timer(&aux_work_tmo->timer.data); + ASSERT(esdp); + erts_twheel_set_timer(esdp->timer_wheel, + &aux_work_tmo->timer.data, + aux_work_timeout, + NULL, + (void *) esdp, + tmo); +} + static void aux_work_timeout_early_init(int no_schedulers) { @@ -2185,18 +2496,12 @@ void aux_work_timeout_late_init(void) { aux_work_tmo->initialized = 1; - if (erts_atomic32_read_nob(&aux_work_tmo->refc)) { - aux_work_tmo->timer.data.active = 0; - erts_set_timer(&aux_work_tmo->timer.data, - aux_work_timeout, - NULL, - NULL, - 1000); - } + if (erts_atomic32_read_nob(&aux_work_tmo->refc)) + start_aux_work_timer(erts_get_scheduler_data()); } static void -aux_work_timeout(void *unused) +aux_work_timeout(void *vesdp) { erts_aint32_t refc; int i; @@ -2219,32 +2524,18 @@ aux_work_timeout(void *unused) if (refc != 1 || 1 != erts_atomic32_cmpxchg_relb(&aux_work_tmo->refc, 0, 1)) { /* Setup next timeout... */ - aux_work_tmo->timer.data.active = 0; - erts_set_timer(&aux_work_tmo->timer.data, - aux_work_timeout, - NULL, - NULL, - 1000); + start_aux_work_timer((ErtsSchedulerData *) vesdp); } } static void -setup_aux_work_timer(void) +setup_aux_work_timer(ErtsSchedulerData *esdp) { -#ifndef ERTS_SMP - if (!erts_get_scheduler_data()) + if (!esdp || !esdp->timer_wheel) set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(0), ERTS_SSI_AUX_WORK_SET_TMO); else -#endif - { - aux_work_tmo->timer.data.active = 0; - erts_set_timer(&aux_work_tmo->timer.data, - aux_work_timeout, - NULL, - NULL, - 1000); - } + start_aux_work_timer(esdp); } erts_aint32_t @@ -2275,7 +2566,7 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable) if (refc == 1) { erts_atomic32_inc_acqb(&aux_work_tmo->refc); if (aux_work_tmo->initialized) - setup_aux_work_timer(); + setup_aux_work_timer(erts_get_scheduler_data()); } } return old; @@ -2318,7 +2609,6 @@ erts_active_schedulers(void) ERTS_ATOMIC_FOREACH_RUNQ(rq, as -= abs(rq->waiting)); - ASSERT(as >= 0); return as; } @@ -2640,6 +2930,13 @@ thr_prgr_fin_wait(void *vssi) static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp); +void +erts_interupt_aux_thread_timed(ErtsMonotonicTime timeout_time) +{ + /* TODO only poke when needed (based on timeout_time) */ + erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(-1)); +} + static void * aux_thread(void *unused) { @@ -2770,47 +3067,89 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_wall_time_change(esdp, thr_prgr_active); while (1) { + ErtsMonotonicTime current_time; aux_work = erts_atomic32_read_acqb(&ssi->aux_work); - if (aux_work) { - if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) { + if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (!thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); sched_wall_time_change(esdp, 1); } aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); - if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp) - && erts_thr_progress_update(esdp)) + if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); } - if (aux_work) - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + if (aux_work) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + erts_bump_timers(esdp->timer_wheel, current_time); + } + } + } else { + ErtsMonotonicTime timeout_time; + int do_timeout = 0; if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - if (thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 0); - sched_wall_time_change(esdp, 0); + timeout_time = erts_check_next_timeout_time(esdp); + current_time = erts_get_monotonic_time(esdp); + do_timeout = (current_time >= timeout_time); + } else + timeout_time = ERTS_MONOTONIC_TIME_MAX; + if (do_timeout) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); } - erts_thr_progress_prepare_wait(esdp); } + else { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(esdp); + } - ERTS_SCHED_FAIR_YIELD(); + ERTS_SCHED_FAIR_YIELD(); - flgs = sched_spin_wait(ssi, spincount); - if (flgs & ERTS_SSI_FLG_SLEEPING) { - ASSERT(flgs & ERTS_SSI_FLG_WAITING); - flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING); + flgs = sched_spin_wait(ssi, spincount); if (flgs & ERTS_SSI_FLG_SLEEPING) { - int res; - ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING); ASSERT(flgs & ERTS_SSI_FLG_WAITING); - do { - res = erts_tse_wait(ssi->event); - } while (res == EINTR); + flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING); + if (flgs & ERTS_SSI_FLG_SLEEPING) { + int res; + ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING); + ASSERT(flgs & ERTS_SSI_FLG_WAITING); + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + do { + Sint64 timeout; + if (current_time >= timeout_time) + break; + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time + - current_time + - 1) + 1; + } else + timeout = -1; + res = erts_tse_twait(ssi->event, timeout); + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + } while (res == EINTR); + } } + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) + erts_thr_progress_finalize_wait(esdp); } - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) - erts_thr_progress_finalize_wait(esdp); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time) + erts_bump_timers(esdp->timer_wheel, current_time); } if (!(flgs & ERTS_SSI_FLG_WAITING)) { @@ -2843,7 +3182,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) else #endif { - erts_aint_t dt; erts_smp_atomic32_set_relb(&function_calls, 0); *fcalls = 0; @@ -2868,6 +3206,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) goto sys_aux_work; while (spincount-- > 0) { + ErtsMonotonicTime current_time; sys_poll_aux_work: @@ -2877,8 +3216,11 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ASSERT(!erts_port_task_have_outstanding_io_tasks()); erl_sys_schedule(1); /* Might give us something to do */ - dt = erts_do_time_read_and_reset(); - if (dt) erts_bump_timer(dt); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) + erts_bump_timers(esdp->timer_wheel, current_time); + } sys_aux_work: #ifndef ERTS_SMP @@ -2886,7 +3228,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) #endif aux_work = erts_atomic32_read_acqb(&ssi->aux_work); - if (aux_work) { + if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) { if (!working) sched_wall_time_change(esdp, working = 1); #ifdef ERTS_SMP @@ -2901,7 +3243,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } #ifndef ERTS_SMP - if (rq->len != 0 || rq->misc.start) + if (erts_smp_atomic32_read_dirty(&rq->len) != 0 || rq->misc.start) goto sys_woken; #else flgs = erts_smp_atomic32_read_acqb(&ssi->flags); @@ -2993,11 +3335,14 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erl_sys_schedule(0); - dt = erts_do_time_read_and_reset(); - if (dt) erts_bump_timer(dt); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + ErtsMonotonicTime current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) + erts_bump_timers(esdp->timer_wheel, current_time); + } #ifndef ERTS_SMP - if (rq->len == 0 && !rq->misc.start) + if (erts_smp_atomic32_read_dirty(&rq->len) == 0 && !rq->misc.start) goto sys_aux_work; sys_woken: #else @@ -3466,7 +3811,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) prio = ERTS_PORT_PRIO_LEVEL; break; default: - erl_exit(ERTS_ABORT_EXIT, + erts_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Invalid immigrate queue mask", __FILE__, __LINE__, __func__); prio = 0; @@ -3490,7 +3835,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) rq = erts_port_runq(prt); if (rq) { if (rq != c_rq) - erl_exit(ERTS_ABORT_EXIT, + erts_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error", __FILE__, __LINE__, __func__); erts_enqueue_port(c_rq, prt); @@ -3681,7 +4026,7 @@ evacuate_run_queue(ErtsRunQueue *rq, prt_rq = erts_port_runq(prt); if (prt_rq) { if (prt_rq != to_rq) - erl_exit(ERTS_ABORT_EXIT, + erts_exit(ERTS_ABORT_EXIT, "%s:%d:%s() internal error\n", __FILE__, __LINE__, __func__); erts_enqueue_port(to_rq, prt); @@ -3882,7 +4227,7 @@ no_procs: return 0; else { if (prt_rq != rq) - erl_exit(ERTS_ABORT_EXIT, + erts_exit(ERTS_ABORT_EXIT, "%s:%d:%s() internal error\n", __FILE__, __LINE__, __func__); *rq_lockedp = 1; @@ -4714,7 +5059,7 @@ erts_fprintf(stderr, "--------------------------------\n"); rq->out_of_work_count = 0; (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags); - rq->max_len = rq->len; + rq->max_len = erts_smp_atomic32_read_dirty(&rq->len); for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { ErtsRunQueueInfo *rqi; rqi = (pix == ERTS_PORT_PRIO_LEVEL @@ -4872,7 +5217,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - int left_len = rq->len - 1; + int left_len = erts_smp_atomic32_read_dirty(&rq->len) - 1; if (left_len < 1) { int wo_reduce = wo_reds << wakeup_other.dec_shift; wo_reduce &= wakeup_other.dec_mask; @@ -4885,9 +5230,11 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) + ERTS_WAKEUP_OTHER_FIXED_INC); if (rq->wakeup_other > wakeup_other.limit) { #ifdef ERTS_DIRTY_SCHEDULERS - if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->waiting) - wake_dirty_schedulers(rq, 1); - else + if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { + if (rq->waiting) { + wake_dirty_schedulers(rq, 1); + } + } else #endif { int empty_rqs = @@ -4943,7 +5290,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - erts_aint32_t len = rq->len; + erts_aint32_t len = erts_smp_atomic32_read_dirty(&rq->len); if (len < 2) { rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds; if (rq->wakeup_other < 0) @@ -5039,7 +5386,7 @@ runq_supervisor(void *unused) ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) { erts_smp_runq_lock(rq); - if (rq->len != 0) + if (erts_smp_atomic32_read_dirty(&rq->len) != 0) wake_scheduler_on_empty_runq(rq); /* forced wakeup... */ erts_smp_runq_unlock(rq); } @@ -5198,8 +5545,7 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST; awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; - awdp->dd.completed_callback = NULL; - awdp->dd.completed_arg = NULL; + awdp->cncld_tmrs.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->later_op.thr_prgr = ERTS_THR_PRGR_VAL_FIRST; awdp->later_op.size = 0; awdp->later_op.first = NULL; @@ -5229,6 +5575,9 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->delayed_wakeup.sched2jix[i] = -1; } #endif + awdp->debug.wait_completed.flags = 0; + awdp->debug.wait_completed.callback = NULL; + awdp->debug.wait_completed.arg = NULL; } static void @@ -5237,6 +5586,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, ErtsRunQueue* runq, char** daww_ptr, size_t daww_sz) { + esdp->timer_wheel = NULL; #ifdef ERTS_SMP erts_bits_init_state(&esdp->erl_bits_state); esdp->match_pseudo_process = NULL; @@ -5264,6 +5614,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, #else esdp->no = (Uint) num; #endif + esdp->ssi = ssi; esdp->current_process = NULL; esdp->current_port = NULL; @@ -5276,6 +5627,15 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->run_queue = runq; esdp->run_queue->scheduler = esdp; + esdp->last_monotonic_time = 0; + esdp->check_time_reds = 0; + + esdp->thr_id = (Uint32) num; + erts_sched_bif_unique_init(esdp); + + esdp->io.out = (Uint64) 0; + esdp->io.in = (Uint64) 0; + if (daww_ptr) { init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr); #ifdef ERTS_SMP @@ -5376,7 +5736,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online } rq->out_of_work_count = 0; rq->max_len = 0; - rq->len = 0; + erts_smp_atomic32_set_nob(&rq->len, 0); rq->wakeup_other = 0; rq->wakeup_other_reds = 0; rq->halt_in_progress = 0; @@ -5528,13 +5888,14 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online init_misc_aux_work(); #if !HALFWORD_HEAP init_swtreq_alloc(); + init_screq_alloc(); #endif + erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */ + debug_wait_completed_flags = 0; #ifdef ERTS_SMP - erts_atomic32_init_nob(&completed_dealloc_count, 0); /* debug only */ - aux_thread_aux_work_data = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, sizeof(ErtsAuxWorkData)); @@ -5874,6 +6235,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces free_proxy_proc(proxy); erts_smp_runq_lock(c_rq); + return 0; #ifdef ERTS_DIRTY_SCHEDULERS @@ -6197,9 +6559,6 @@ suspend_process(Process *c_p, Process *p) if (suspended) { - ASSERT(!(ERTS_PSFLG_RUNNING & state) - || p == erts_get_current_process()); - if (suspended > 0 && erts_system_profile_flags.runnable_procs) { /* 'state' is before our change... */ @@ -6531,18 +6890,19 @@ suspend_scheduler(ErtsSchedulerData *esdp) & ERTS_RUNQ_FLGS_QMASK); aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work|qmask) { - if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 1); - sched_wall_time_change(esdp, 1); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + if (aux_work) + aux_work = handle_aux_work(&esdp->aux_work_data, + aux_work, + 1); + + if (aux_work && erts_thr_progress_update(esdp)) + erts_thr_progress_leader_update(esdp); } - if (aux_work) - aux_work = handle_aux_work(&esdp->aux_work_data, - aux_work, - 1); - - if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && - (aux_work && erts_thr_progress_update(esdp))) - erts_thr_progress_leader_update(esdp); if (qmask) { #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { @@ -6586,7 +6946,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) int res; do { - res = erts_tse_wait(ssi->event); + res = erts_tse_twait(ssi->event, -1); } while (res == EINTR); } } @@ -6642,6 +7002,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) } } + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) + (void) erts_get_monotonic_time(esdp); erts_smp_runq_lock(esdp->run_queue); non_empty_runq(esdp->run_queue); @@ -6749,6 +7111,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) erts_smp_mtx_unlock(&schdlr_sspnd.mtx); while (1) { + ErtsMonotonicTime current_time; erts_aint32_t qmask; erts_aint32_t flgs; @@ -6756,16 +7119,18 @@ suspend_scheduler(ErtsSchedulerData *esdp) & ERTS_RUNQ_FLGS_QMASK); aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work|qmask) { - if (!thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 1); - sched_wall_time_change(esdp, 1); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + if (aux_work) + aux_work = handle_aux_work(&esdp->aux_work_data, + aux_work, + 1); + if (aux_work && erts_thr_progress_update(esdp)) + erts_thr_progress_leader_update(esdp); } - if (aux_work) - aux_work = handle_aux_work(&esdp->aux_work_data, - aux_work, - 1); - if (aux_work && erts_thr_progress_update(esdp)) - erts_thr_progress_leader_update(esdp); if (qmask) { erts_smp_runq_lock(esdp->run_queue); evacuate_run_queue(esdp->run_queue, &sbp); @@ -6773,30 +7138,77 @@ suspend_scheduler(ErtsSchedulerData *esdp) } } - if (!aux_work) { - if (thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 0); - sched_wall_time_change(esdp, 0); + if (aux_work) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + erts_bump_timers(esdp->timer_wheel, current_time); + } } - erts_thr_progress_prepare_wait(esdp); - flgs = sched_spin_suspended(ssi, - ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); - if (flgs == (ERTS_SSI_FLG_SLEEPING - | ERTS_SSI_FLG_WAITING - | ERTS_SSI_FLG_SUSPENDED)) { - flgs = sched_set_suspended_sleeptype(ssi); + } + else { + ErtsMonotonicTime timeout_time; + int do_timeout = 0; + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + timeout_time = erts_check_next_timeout_time(esdp); + current_time = erts_get_monotonic_time(esdp); + do_timeout = (current_time >= timeout_time); + } else + timeout_time = ERTS_MONOTONIC_TIME_MAX; + if (do_timeout) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + } + else { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(esdp); + } + flgs = sched_spin_suspended(ssi, + ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); if (flgs == (ERTS_SSI_FLG_SLEEPING - | ERTS_SSI_FLG_TSE_SLEEPING | ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED)) { - int res; - - do { - res = erts_tse_wait(ssi->event); - } while (res == EINTR); + flgs = sched_set_suspended_sleeptype(ssi); + if (flgs == (ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_TSE_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)) { + int res; + + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + do { + Sint64 timeout; + if (current_time >= timeout_time) + break; + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time + - current_time + - 1) + 1; + } else + timeout = -1; + res = erts_tse_twait(ssi->event, timeout); + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + } while (res == EINTR); + } } + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) + erts_thr_progress_finalize_wait(esdp); } - erts_thr_progress_finalize_wait(esdp); + + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time) + erts_bump_timers(esdp->timer_wheel, current_time); } flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING @@ -7616,7 +8028,18 @@ sched_thread_func(void *vesdp) ErtsSchedulerData *esdp = vesdp; Uint no = esdp->no; #ifdef ERTS_SMP - ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch(); + erts_tse_t *tse; +#endif + + erts_sched_init_time_sup(esdp); + + (void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue, + ERTS_RUNQ_FLG_EXEC); + +#ifdef ERTS_SMP + tse = erts_tse_fetch(); + erts_tse_prepare_timed(tse); + ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = tse; callbacks.arg = (void *) esdp->ssi; callbacks.wakeup = thr_prgr_wakeup; callbacks.prepare_wait = thr_prgr_prep_wait; @@ -7694,7 +8117,7 @@ sched_thread_func(void *vesdp) process_main(); /* No schedulers should *ever* terminate */ - erl_exit(ERTS_ABORT_EXIT, + erts_exit(ERTS_ABORT_EXIT, "Scheduler thread number %beu terminated\n", no); return NULL; @@ -7717,6 +8140,8 @@ sched_dirty_cpu_thread_func(void *vesdp) callbacks.wait = NULL; callbacks.finalize_wait = NULL; + esdp->thr_id += erts_no_schedulers; + erts_thr_progress_register_unmanaged_thread(&callbacks); #ifdef ERTS_ENABLE_LOCK_CHECK { @@ -7757,7 +8182,7 @@ sched_dirty_cpu_thread_func(void *vesdp) process_main(); /* No schedulers should *ever* terminate */ - erl_exit(ERTS_ABORT_EXIT, + erts_exit(ERTS_ABORT_EXIT, "Dirty CPU scheduler thread number %beu terminated\n", no); return NULL; @@ -7778,6 +8203,8 @@ sched_dirty_io_thread_func(void *vesdp) callbacks.wait = NULL; callbacks.finalize_wait = NULL; + esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers; + erts_thr_progress_register_unmanaged_thread(&callbacks); #ifdef ERTS_ENABLE_LOCK_CHECK { @@ -7818,7 +8245,7 @@ sched_dirty_io_thread_func(void *vesdp) process_main(); /* No schedulers should *ever* terminate */ - erl_exit(ERTS_ABORT_EXIT, + erts_exit(ERTS_ABORT_EXIT, "Dirty I/O scheduler thread number %beu terminated\n", no); return NULL; @@ -7835,31 +8262,25 @@ erts_start_schedulers(void) Uint actual; Uint wanted = erts_no_schedulers; Uint wanted_no_schedulers = erts_no_schedulers; + char name[16]; ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER; opts.detached = 1; -#ifdef ETHR_HAVE_THREAD_NAMES - opts.name = malloc(80); - if (!opts.name) { - ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); - } -#endif + opts.name = name; #ifdef ERTS_SMP if (erts_runq_supervision_interval) { opts.suggested_stack_size = 16; -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name, "runq_supervisor"); -#endif + erts_snprintf(opts.name, 16, "runq_supervisor"); erts_atomic_init_nob(&runq_supervisor_sleeping, 0); if (0 != ethr_event_init(&runq_supervision_event)) - erl_exit(1, "Failed to create run-queue supervision event\n"); + erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision event\n"); if (0 != ethr_thr_create(&runq_supervisor_tid, runq_supervisor, NULL, &opts)) - erl_exit(1, "Failed to create run-queue supervision thread\n"); + erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread\n"); } #endif @@ -7878,9 +8299,7 @@ erts_start_schedulers(void) ASSERT(actual == esdp->no - 1); -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name, "scheduler_%d", actual + 1); -#endif + erts_snprintf(opts.name, 16, "%lu_scheduler", actual + 1); #ifdef __OSE__ /* This should be done in the bind strategy */ @@ -7902,21 +8321,17 @@ erts_start_schedulers(void) int ix; for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name,"dirty_cpu_scheduler_%d", ix + 1); -#endif + erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1); res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts); if (res != 0) - erl_exit(1, "Failed to create dirty cpu scheduler thread %d\n", ix); + erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d\n", ix); } for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name,"dirty_io_scheduler_%d", ix + 1); -#endif + erts_snprintf(opts.name, 16, "%d_dirty_io_scheduler", ix + 1); res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts); if (res != 0) - erl_exit(1, "Failed to create dirty io scheduler thread %d\n", ix); + erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d\n", ix); } } #endif @@ -7924,9 +8339,7 @@ erts_start_schedulers(void) ERTS_THR_MEMORY_BARRIER; -#ifdef ETHR_HAVE_THREAD_NAMES - sprintf(opts.name, "aux"); -#endif + erts_snprintf(opts.name, 16, "aux"); #ifdef __OSE__ opts.coreNo = 0; @@ -7934,10 +8347,10 @@ erts_start_schedulers(void) res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts); if (res != 0) - erl_exit(1, "Failed to create aux thread\n"); + erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread\n"); if (actual < 1) - erl_exit(1, + erts_exit(ERTS_ERROR_EXIT, "Failed to create any scheduler-threads: %s (%d)\n", erl_errno_id(res), res); @@ -7952,9 +8365,6 @@ erts_start_schedulers(void) erts_send_error_to_logger_nogl(dsbufp); } -#ifdef ETHR_HAVE_THREAD_NAMES - free(opts.name); -#endif } #endif /* ERTS_SMP */ @@ -8095,9 +8505,8 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, resume_process(rp, rp_locks); } else { - rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, - pid, pid_locks|ERTS_PROC_LOCK_STATUS); + pid, ERTS_PROC_LOCK_STATUS); if (!rp) { c_p->flags &= ~F_P2PNR_RESCHED; @@ -8106,40 +8515,84 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, ASSERT(!(c_p->flags & F_P2PNR_RESCHED)); - if (suspend) { - if (suspend_process(c_p, rp)) - goto done; - } - else { - if (!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) - & erts_smp_atomic32_read_acqb(&rp->state))) - goto done; + /* + * Suspend the other process in order to prevent + * it from being selected for normal execution. + * This will however not prevent it from being + * selected for execution of a system task. If + * it is selected for execution of a system task + * we might be blocked for quite a while if the + * try-lock below fails. That is, there is room + * for improvement here... + */ - } + if (!suspend_process(c_p, rp)) { + /* Other process running */ - /* Other process running */ + ASSERT(ERTS_PSFLG_RUNNING + & erts_smp_atomic32_read_nob(&rp->state)); - /* - * If we got pending suspenders and suspend ourselves waiting - * to suspend another process we might deadlock. - * In this case we have to yield, be suspended by - * someone else and then do it all over again. - */ - if (!c_p->pending_suspenders) { - /* Mark rp pending for suspend by c_p */ - add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend); - ASSERT(is_nil(c_p->suspendee)); + running: - /* Suspend c_p; when rp is suspended c_p will be resumed. */ - suspend_process(c_p, c_p); - c_p->flags |= F_P2PNR_RESCHED; + /* + * If we got pending suspenders and suspend ourselves waiting + * to suspend another process we might deadlock. + * In this case we have to yield, be suspended by + * someone else and then do it all over again. + */ + if (!c_p->pending_suspenders) { + /* Mark rp pending for suspend by c_p */ + add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend); + ASSERT(is_nil(c_p->suspendee)); + + /* Suspend c_p; when rp is suspended c_p will be resumed. */ + suspend_process(c_p, c_p); + c_p->flags |= F_P2PNR_RESCHED; + } + /* Yield (caller is assumed to yield immediately in bif). */ + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + rp = ERTS_PROC_LOCK_BUSY; + } + else { + ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; + if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + if (ERTS_PSFLG_RUNNING_SYS + & erts_smp_atomic32_read_nob(&rp->state)) { + /* Executing system task... */ + resume_process(rp, ERTS_PROC_LOCK_STATUS); + goto running; + } + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + /* + * If we are unlucky, the process just got selected for + * execution of a system task. In this case we may be + * blocked here for quite a while... Execution of system + * tasks are fortunately quite rare events. We try to + * avoid this by checking if it is in a state executing + * system tasks (above), but it will not prevent all + * scenarios for a long block here... + */ + rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, + pid, pid_locks|ERTS_PROC_LOCK_STATUS); + if (!rp) + goto done; + } + + /* + * The previous suspend has prevented the process + * from being selected for normal execution regardless + * of locks held or not held on it... + */ + ASSERT(!(ERTS_PSFLG_RUNNING + & erts_smp_atomic32_read_nob(&rp->state))); + + if (!suspend) + resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS); } - /* Yield (caller is assumed to yield immediately in bif). */ - erts_smp_proc_unlock(rp, pid_locks|ERTS_PROC_LOCK_STATUS); - rp = ERTS_PROC_LOCK_BUSY; } done: + if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); if (unlock_c_p_status) @@ -8634,24 +9087,39 @@ resume_process_1(BIF_ALIST_1) } Uint -erts_run_queues_len(Uint *qlen) +erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched) { int i = 0; Uint len = 0; - ERTS_ATOMIC_FOREACH_RUNQ(rq, - { - Sint pqlen = 0; - int pix; - for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) - pqlen += RUNQ_READ_LEN(&rq->procs.prio_info[pix].len); + if (atomic_queues_read) + ERTS_ATOMIC_FOREACH_RUNQ(rq, + { + Sint rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len); + ASSERT(rq_len >= 0); + if (incl_active_sched + && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) { + rq_len++; + } + if (qlen) + qlen[i++] = rq_len; + len += (Uint) rq_len; + } + ); + else { + for (i = 0; i < erts_no_run_queues; i++) { + ErtsRunQueue *rq = ERTS_RUNQ_IX(i); + Sint rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len); + ASSERT(rq_len >= 0); + if (incl_active_sched + && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) { + rq_len++; + } + if (qlen) + qlen[i] = rq_len; + len += (Uint) rq_len; + } - if (pqlen < 0) - pqlen = 0; - if (qlen) - qlen[i++] = pqlen; - len += pqlen; } - ); return len; } @@ -8864,6 +9332,10 @@ erts_set_process_priority(Process *p, Eterm value) a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); } while (a != e); + + if (slocked) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + } switch (oprio) { @@ -8897,7 +9369,6 @@ Process *schedule(Process *p, int calls) { Process *proxy_p = NULL; ErtsRunQueue *rq; - erts_aint_t dt; ErtsSchedulerData *esdp; int context_reds; int fcalls; @@ -8997,7 +9468,7 @@ Process *schedule(Process *p, int calls) schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */ proxy_p = NULL; - ERTS_PROC_REDUCTIONS_EXECUTED(rq, + ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq, (int) ERTS_PSFLGS_GET_USR_PRIO(state), reds, actual_reds); @@ -9014,12 +9485,9 @@ Process *schedule(Process *p, int calls) ASSERT(esdp->free_process == p); esdp->free_process = NULL; #else - state = erts_smp_atomic32_read_nob(&p->state); - if (!(state & ERTS_PSFLG_IN_RUNQ)) - erts_free_proc(p); + erts_proc_dec_refc(p); #endif } - #ifdef ERTS_SMP ASSERT(!esdp->free_process); #endif @@ -9027,11 +9495,15 @@ Process *schedule(Process *p, int calls) ERTS_SMP_CHK_NO_PROC_LOCKS; - dt = erts_do_time_read_and_reset(); - if (dt) { - erts_smp_runq_unlock(rq); - erts_bump_timer(dt); - erts_smp_runq_lock(rq); + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS) + (void) erts_get_monotonic_time(esdp); + + if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { + erts_smp_runq_unlock(rq); + erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time); + erts_smp_runq_lock(rq); + } } BM_STOP_TIMER(system); @@ -9074,8 +9546,10 @@ Process *schedule(Process *p, int calls) if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) { if (flags & ERTS_RUNQ_FLG_SUSPENDED) { + (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC); suspend_scheduler(esdp); - flags = ERTS_RUNQ_FLGS_GET_NOB(rq); + flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC); + flags |= ERTS_RUNQ_FLG_EXEC; } if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) { flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND); @@ -9090,10 +9564,9 @@ Process *schedule(Process *p, int calls) suspend_scheduler(esdp); #endif - { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { erts_aint32_t aux_work; - int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 - : erts_thr_progress_update(esdp); + int leader_update = erts_thr_progress_update(esdp); aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); if (aux_work | leader_update | ERTS_SCHED_FAIR) { erts_smp_runq_unlock(rq); @@ -9106,8 +9579,7 @@ Process *schedule(Process *p, int calls) erts_smp_runq_lock(rq); } - ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp) - || !erts_thr_progress_is_blocking()); + ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); } ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); @@ -9166,7 +9638,10 @@ Process *schedule(Process *p, int calls) } #endif + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_EXEC); scheduler_wait(&fcalls, esdp, rq); + flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC); + flags |= ERTS_RUNQ_FLG_EXEC; #ifdef ERTS_SMP non_empty_runq(rq); @@ -9177,6 +9652,7 @@ Process *schedule(Process *p, int calls) else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && (fcalls > input_reductions && prepare_for_sys_schedule(esdp, !0))) { + ErtsMonotonicTime current_time; /* * Schedule system-level activities. */ @@ -9189,8 +9665,10 @@ Process *schedule(Process *p, int calls) #endif erts_smp_runq_unlock(rq); erl_sys_schedule(1); - dt = erts_do_time_read_and_reset(); - if (dt) erts_bump_timer(dt); + + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) + erts_bump_timers(esdp->timer_wheel, current_time); #ifdef ERTS_SMP erts_smp_runq_lock(rq); @@ -9329,17 +9807,14 @@ Process *schedule(Process *p, int calls) | ERTS_PSFLG_PENDING_EXIT | ERTS_PSFLG_ACTIVE_SYS)) == ERTS_PSFLG_SUSPENDED)) { - if (state & ERTS_PSFLG_FREE) { -#ifdef ERTS_SMP - erts_smp_proc_dec_refc(p); -#else - erts_free_proc(p); -#endif - } if (proxy_p) { free_proxy_proc(proxy_p); proxy_p = NULL; } + else if (state & ERTS_PSFLG_FREE) { + /* free and not queued by proxy */ + erts_proc_dec_refc(p); + } goto pick_next_process; } state = new; @@ -9363,6 +9838,8 @@ Process *schedule(Process *p, int calls) erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); + state = erts_smp_atomic32_read_nob(&p->state); + if (erts_sched_stat.enabled) { int prio; UWord old = ERTS_PROC_SCHED_ID(p, @@ -9478,6 +9955,20 @@ Process *schedule(Process *p, int calls) /* Never run a suspended process */ ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state))); + ASSERT(erts_proc_read_refc(p) > 0); + + if (!(state & ERTS_PSFLG_EXITING) && ERTS_PTMR_IS_TIMED_OUT(p)) { + BeamInstr** pi; +#ifdef ERTS_SMP + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); +#endif + pi = (BeamInstr **) p->def_arg_reg; + p->i = *pi; + p->flags &= ~F_INSLPQUEUE; + p->flags |= F_TIMO; + ERTS_PTMR_CLEAR(p); + } + return p; } } @@ -9530,15 +10021,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result) ASSERT(hp_start + hsz == hp); #endif - erts_queue_message(rp, - &rp_locks, - bp, - msg, - NIL -#ifdef USE_VM_PROBES - , NIL -#endif - ); + erts_queue_message(rp, &rp_locks, bp, msg, NIL); if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; @@ -10394,6 +10877,8 @@ erts_free_proc(Process *p) #ifdef ERTS_SMP erts_proc_lock_fin(p); #endif + ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); + ASSERT(0 == erts_proc_read_refc(p)); erts_free(ERTS_ALC_T_PROC, (void *) p); } @@ -10436,7 +10921,7 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state) init_arg.run_queue = rq; init_arg.state = state; - ASSERT(((char *) p) == ((char *) &p->common)); + ERTS_CT_ASSERT(offsetof(Process,common) == 0); if (!erts_ptab_new_element(&erts_proc, &p->common, @@ -10446,6 +10931,8 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state) return NULL; } + ASSERT(erts_proc_read_refc(p) > 0); + ASSERT(internal_pid_serial(p->common.id) <= ERTS_MAX_PID_SERIAL); p->approx_started = erts_get_approx_time(); @@ -10495,6 +10982,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). int ix = so->scheduler-1; ASSERT(0 <= ix && ix < erts_no_run_queues); rq = ERTS_RUNQ_IX(ix); + /* Unsupported feature... */ state |= ERTS_PSFLG_BOUND; } prio = (erts_aint32_t) so->priority; @@ -10527,6 +11015,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->flags = erts_default_process_flags; + p->static_flags = 0; + if (so->flags & SPO_SYSTEM_PROC) + p->static_flags |= ERTS_STC_FLG_SYSTEM_PROC; if (so->flags & SPO_USE_ARGS) { p->min_heap_size = so->min_heap_size; p->min_vheap_size = so->min_vheap_size; @@ -10539,9 +11030,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->schedule_count = 0; ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0)); - p->initial[INITIAL_MOD] = mod; - p->initial[INITIAL_FUN] = func; - p->initial[INITIAL_ARI] = (Uint) arity; + p->u.initial[INITIAL_MOD] = mod; + p->u.initial[INITIAL_FUN] = func; + p->u.initial[INITIAL_ARI] = (Uint) arity; /* * Must initialize binary lists here before copying binaries to process. @@ -10582,7 +11073,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). /* No need to initialize p->fcalls. */ - p->current = p->initial+INITIAL_MOD; + p->current = p->u.initial+INITIAL_MOD; p->i = (BeamInstr *) beam_apply; p->cp = (BeamInstr *) beam_apply+1; @@ -10605,11 +11096,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->ftrace = NIL; p->reds = 0; -#ifdef ERTS_SMP - p->common.u.alive.ptimer = NULL; -#else - sys_memset(&p->common.u.alive.tm, 0, sizeof(ErlTimer)); -#endif + ERTS_PTMR_INIT(p); p->common.u.alive.reg = NULL; ERTS_P_LINKS(p) = NULL; @@ -10640,7 +11127,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->msg_inq.last = &p->msg_inq.first; p->msg_inq.len = 0; #endif - p->u.bif_timers = NULL; + p->bif_timers = NULL; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + p->accessor_bif_timers = NULL; +#endif p->mbuf = NULL; p->mbuf_sz = 0; p->psd = NULL; @@ -10798,11 +11288,7 @@ void erts_init_empty_process(Process *p) p->bin_old_vheap = 0; p->sys_task_qs = NULL; p->bin_vheap_mature = 0; -#ifdef ERTS_SMP - p->common.u.alive.ptimer = NULL; -#else - memset(&(p->common.u.alive.tm), 0, sizeof(ErlTimer)); -#endif + ERTS_PTMR_INIT(p); p->next = NULL; p->off_heap.first = NULL; p->off_heap.overhead = 0; @@ -10823,14 +11309,17 @@ void erts_init_empty_process(Process *p) p->msg.last = &p->msg.first; p->msg.save = &p->msg.first; p->msg.len = 0; - p->u.bif_timers = NULL; + p->bif_timers = NULL; +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + p->accessor_bif_timers = NULL; +#endif p->dictionary = NULL; p->seq_trace_clock = 0; p->seq_trace_lastcnt = 0; p->seq_trace_token = NIL; - p->initial[0] = 0; - p->initial[1] = 0; - p->initial[2] = 0; + p->u.initial[0] = 0; + p->u.initial[1] = 0; + p->u.initial[2] = 0; p->catches = 0; p->cp = NULL; p->i = NULL; @@ -10851,6 +11340,8 @@ void erts_init_empty_process(Process *p) p->parent = NIL; p->approx_started = 0; + p->static_flags = 0; + p->common.u.alive.started_interval = 0; #ifdef HIPE @@ -10915,7 +11406,10 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->suspend_monitors == NULL); ASSERT(p->msg.first == NULL); ASSERT(p->msg.len == 0); - ASSERT(p->u.bif_timers == NULL); + ASSERT(p->bif_timers == NULL); +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + ASSERT(p->accessor_bif_timers == NULL); +#endif ASSERT(p->dictionary == NULL); ASSERT(p->catches == 0); ASSERT(p->cp == NULL); @@ -11079,7 +11573,6 @@ set_proc_exiting(Process *p, */ p->freason = EXTAG_EXIT; KILL_CATCHES(p); - cancel_timer(p); p->i = (BeamInstr *) beam_exit; #ifndef ERTS_SMP @@ -11195,10 +11688,14 @@ save_pending_exiter(Process *p) { ErtsProcList *plp; ErtsRunQueue *rq; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - rq = erts_get_runq_current(NULL); + if (!esdp) + rq = RUNQ_READ_RQ(&p->run_queue); + else + rq = esdp->run_queue; plp = proclist_create(p); @@ -11215,6 +11712,7 @@ save_pending_exiter(Process *p) else #endif wake_scheduler(rq); + } #endif @@ -11240,11 +11738,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp, hp = erts_alloc_message_heap(term_size, &bp, &ohp, to, to_locksp); mess = copy_struct(exit_term, term_size, &hp, ohp); - erts_queue_message(to, to_locksp, bp, mess, NIL -#ifdef USE_VM_PROBES - , NIL -#endif - ); + erts_queue_message(to, to_locksp, bp, mess, NIL); } else { ErlHeapFragment* bp; Eterm* hp; @@ -11260,11 +11754,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp, /* the trace token must in this case be updated by the caller */ seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL); temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap); - erts_queue_message(to, to_locksp, bp, mess, temp_token -#ifdef USE_VM_PROBES - , NIL -#endif - ); + erts_queue_message(to, to_locksp, bp, mess, temp_token); } } @@ -11411,23 +11901,21 @@ send_exit_signal(Process *c_p, /* current process if and only if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { /* ... but we havn't got all locks on it ... */ - save_pending_exiter(rp); + save_pending_exiter(rp); /* * The pending exit will be discovered when next * process is scheduled in */ - goto set_pending_exit; - } - else { - /* ...and we have all locks on it... */ - *rp_locks = ERTS_PROC_LOCKS_ALL; - set_proc_exiting(rp, - state, - (is_immed(rsn) - ? rsn - : copy_object(rsn, rp)), - NULL); + goto set_pending_exit; } + /* ...and we have all locks on it... */ + *rp_locks = ERTS_PROC_LOCKS_ALL; + set_proc_exiting(rp, + state, + (is_immed(rsn) + ? rsn + : copy_object(rsn, rp)), + NULL); } else { /* Process running... */ @@ -11517,7 +12005,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) ErtsMonitor *rmon; Process *rp; - if (mon->type == MON_ORIGIN) { + switch (mon->type) { + case MON_ORIGIN: /* We are monitoring someone else, we need to demonitor that one.. */ if (is_atom(mon->pid)) { /* remote by name */ ASSERT(is_node_name_atom(mon->pid)); @@ -11580,7 +12069,8 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } } } - } else { /* type == MON_TARGET */ + break; + case MON_TARGET: ASSERT(mon->type == MON_TARGET); ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid)); if (is_internal_port(mon->pid)) { @@ -11639,6 +12129,12 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } } } + break; + case MON_TIME_OFFSET: + erts_demonitor_time_offset(mon->ref); + break; + default: + ERTS_INTERNAL_ERROR("Invalid monitor type"); } done: /* As the monitors are previously removed from the process, @@ -11758,7 +12254,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) break; default: - erl_exit(1, "bad type in link list\n"); + erts_exit(ERTS_ERROR_EXIT, "bad type in link list\n"); break; } erts_destroy_link(lnk); @@ -11785,6 +12281,7 @@ erts_do_exit_process(Process* p, Eterm reason) { p->arity = 0; /* No live registers */ p->fvalue = reason; + #ifdef USE_VM_PROBES if (DTRACE_ENABLED(process_exit)) { @@ -11797,6 +12294,10 @@ erts_do_exit_process(Process* p, Eterm reason) } #endif + if (p->static_flags & ERTS_STC_FLG_SYSTEM_PROC) + erts_exit(ERTS_DUMP_EXIT, "System process %T terminated: %T\n", + p->common.id, reason); + #ifdef ERTS_SMP ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* By locking all locks (main lock is already locked) when going @@ -11836,20 +12337,20 @@ erts_do_exit_process(Process* p, Eterm reason) ASSERT((ERTS_TRACE_FLAGS(p) & F_INITIAL_TRACE_FLAGS) == F_INITIAL_TRACE_FLAGS); - cancel_timer(p); /* Always cancel timer just in case */ - - if (p->u.bif_timers) - erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL); + ASSERT(erts_proc_read_refc(p) > 0); + if (ERTS_PTMR_IS_SET(p)) { + erts_cancel_proc_timer(p); + ASSERT(erts_proc_read_refc(p) > 0); + } erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); /* - * The p->u.bif_timers of this process can *not* be used anymore; + * p->u.initial of this process can *not* be used anymore; * will be overwritten by misc termination data. */ p->u.terminate = NULL; - erts_continue_exit_process(p); } @@ -11874,6 +12375,29 @@ erts_continue_exit_process(Process *p) ASSERT(ERTS_PROC_IS_EXITING(p)); + ASSERT(erts_proc_read_refc(p) > 0); + if (p->bif_timers) { + if (erts_cancel_bif_timers(p, p->bif_timers, &p->u.terminate)) { + ASSERT(erts_proc_read_refc(p) > 0); + goto yield; + } + ASSERT(erts_proc_read_refc(p) > 0); + p->bif_timers = NULL; + } + +#ifdef ERTS_BTM_ACCESSOR_SUPPORT + if (p->accessor_bif_timers) { + if (erts_detach_accessor_bif_timers(p, + p->accessor_bif_timers, + &p->u.terminate)) { + ASSERT(erts_proc_read_refc(p) > 0); + goto yield; + } + ASSERT(erts_proc_read_refc(p) > 0); + p->accessor_bif_timers = NULL; + } +#endif + #ifdef ERTS_SMP if (p->flags & F_HAVE_BLCKD_MSCHED) { ErtsSchedSuspendResult ssr; @@ -11889,7 +12413,7 @@ erts_continue_exit_process(Process *p) break; case ERTS_SCHDLR_SSPND_EINVAL: default: - erl_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n", + erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n", __FILE__, __LINE__, (int) ssr); } } @@ -11967,6 +12491,8 @@ erts_continue_exit_process(Process *p) p->scheduler_data->current_process = NULL; p->scheduler_data->free_process = p; +#else + erts_proc_inc_refc(p); /* Decremented in schedule() */ #endif /* Time of death! */ @@ -11985,34 +12511,26 @@ erts_continue_exit_process(Process *p) { /* Inactivate and notify free */ erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state); -#ifdef ERTS_SMP int refc_inced = 0; -#endif while (1) { n = e = a; ASSERT(a & ERTS_PSFLG_EXITING); n |= ERTS_PSFLG_FREE; n &= ~ERTS_PSFLG_ACTIVE; -#ifdef ERTS_SMP if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) { - erts_smp_proc_inc_refc(p); + erts_proc_inc_refc(p); refc_inced = 1; } -#endif a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } -#ifdef ERTS_SMP if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ)) - erts_smp_proc_dec_refc(p); -#endif + erts_proc_dec_refc(p); } - dep = ((p->flags & F_DISTRIBUTION) - ? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL) - : NULL); + dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL; scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL); pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL); nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, ERTS_PROC_LOCKS_ALL, NULL); @@ -12024,8 +12542,6 @@ erts_continue_exit_process(Process *p) if (dep) { erts_do_net_exits(dep, reason); - if(dep) - erts_deref_dist_entry(dep); } /* @@ -12098,64 +12614,6 @@ erts_continue_exit_process(Process *p) } -/* Callback for process timeout */ -static void -timeout_proc(Process* p) -{ - erts_aint32_t state; - BeamInstr** pi = (BeamInstr **) p->def_arg_reg; - p->i = *pi; - p->flags |= F_TIMO; - p->flags &= ~F_INSLPQUEUE; - - state = erts_smp_atomic32_read_acqb(&p->state); - if (!(state & ERTS_PSFLG_ACTIVE)) - schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); -} - - -void -cancel_timer(Process* p) -{ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); - p->flags &= ~(F_INSLPQUEUE|F_TIMO); -#ifdef ERTS_SMP - erts_cancel_smp_ptimer(p->common.u.alive.ptimer); -#else - erts_cancel_timer(&p->common.u.alive.tm); -#endif -} - -/* - * Insert a process into the time queue, with a timeout 'timeout' in ms. - */ -void -set_timer(Process* p, Uint timeout) -{ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); - - /* check for special case timeout=0 DONT ADD TO time queue */ - if (timeout == 0) { - p->flags |= F_TIMO; - return; - } - p->flags |= F_INSLPQUEUE; - p->flags &= ~F_TIMO; - -#ifdef ERTS_SMP - erts_create_smp_ptimer(&p->common.u.alive.ptimer, - p->common.id, - (ErlTimeoutProc) timeout_proc, - timeout); -#else - erts_set_timer(&p->common.u.alive.tm, - (ErlTimeoutProc) timeout_proc, - NULL, - (void*) p, - timeout); -#endif -} - /* * Stack dump functions follow. */ @@ -12171,7 +12629,7 @@ erts_stack_dump(int to, void *to_arg, Process *p) } erts_program_counter_info(to, to_arg, p); for (sp = p->stop; sp < STACK_START(p); sp++) { - yreg = stack_element_dump(to, to_arg, p, sp, yreg); + yreg = stack_element_dump(to, to_arg, sp, yreg); } } @@ -12228,7 +12686,7 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x) } static int -stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg) +stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg) { Eterm x = *sp; @@ -12257,6 +12715,188 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg) } /* + * Print scheduler information + */ +void +erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) { + int i; + erts_aint32_t flg; + Process *p; + + erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); + +#ifdef ERTS_SMP + flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags); + erts_print(to, to_arg, "Scheduler Sleep Info Flags: "); + for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) { + erts_aint32_t chk = (1 << i); + if (flg & chk) { + switch (chk) { + case ERTS_SSI_FLG_SLEEPING: + erts_print(to, to_arg, "SLEEPING"); break; + case ERTS_SSI_FLG_POLL_SLEEPING: + erts_print(to, to_arg, "POLL_SLEEPING"); break; + case ERTS_SSI_FLG_TSE_SLEEPING: + erts_print(to, to_arg, "TSE_SLEEPING"); break; + case ERTS_SSI_FLG_WAITING: + erts_print(to, to_arg, "WAITING"); break; + case ERTS_SSI_FLG_SUSPENDED: + erts_print(to, to_arg, "SUSPENDED"); break; + default: + erts_print(to, to_arg, "UNKNOWN(%d)", flg); break; + } + if (flg > chk) + erts_print(to, to_arg, " | "); + flg -= chk; + } + } + erts_print(to, to_arg, "\n"); +#endif + + flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work); + erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: "); + for (i = 0; i < ERTS_SSI_AUX_WORK_NO_FLAGS && flg; i++) { + erts_aint32_t chk = (1 << i); + if (flg & chk) { + if (erts_aux_work_flag_descr[i]) + erts_print(to, to_arg, "%s", erts_aux_work_flag_descr[i]); + else + erts_print(to, to_arg, "1<<%d", i); + if (flg > chk) + erts_print(to, to_arg, " | "); + flg -= chk; + } + } + erts_print(to, to_arg, "\n"); + + erts_print(to, to_arg, "Current Port: "); + if (esdp->current_port) + erts_print(to, to_arg, "%T", esdp->current_port->common.id); + erts_print(to, to_arg, "\n"); + + for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) { + erts_print(to, to_arg, "Run Queue "); + switch (i) { + case PRIORITY_MAX: + erts_print(to, to_arg, "Max "); + break; + case PRIORITY_HIGH: + erts_print(to, to_arg, "High "); + break; + case PRIORITY_NORMAL: + erts_print(to, to_arg, "Normal "); + break; + case PRIORITY_LOW: + erts_print(to, to_arg, "Low "); + break; + default: + erts_print(to, to_arg, "Unknown "); + break; + } + erts_print(to, to_arg, "Length: %d\n", + erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); + } + erts_print(to, to_arg, "Run Queue Port Length: %d\n", + erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); + + flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags); + erts_print(to, to_arg, "Run Queue Flags: "); + for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) { + erts_aint32_t chk = (1 << i); + if (flg & chk) { + switch (chk) { + case (1 << PRIORITY_MAX): + erts_print(to, to_arg, "NONEMPTY_MAX"); break; + case (1 << PRIORITY_HIGH): + erts_print(to, to_arg, "NONEMPTY_HIGH"); break; + case (1 << PRIORITY_NORMAL): + erts_print(to, to_arg, "NONEMPTY_NORMAL"); break; + case (1 << PRIORITY_LOW): + erts_print(to, to_arg, "NONEMPTY_LOW"); break; + case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)): + erts_print(to, to_arg, "EMIGRATE_MAX"); break; + case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)): + erts_print(to, to_arg, "EMIGRATE_HIGH"); break; + case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)): + erts_print(to, to_arg, "EMIGRATE_NORMAL"); break; + case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EMIGRATE_SHFT)): + erts_print(to, to_arg, "EMIGRATE_LOW"); break; + case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)): + erts_print(to, to_arg, "IMMIGRATE_MAX"); break; + case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)): + erts_print(to, to_arg, "IMMIGRATE_HIGH"); break; + case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)): + erts_print(to, to_arg, "IMMIGRATE_NORMAL"); break; + case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)): + erts_print(to, to_arg, "IMMIGRATE_LOW"); break; + case (1 << (PRIORITY_MAX + ERTS_RUNQ_FLGS_EVACUATE_SHFT)): + erts_print(to, to_arg, "EVACUATE_MAX"); break; + case (1 << (PRIORITY_HIGH + ERTS_RUNQ_FLGS_EVACUATE_SHFT)): + erts_print(to, to_arg, "EVACUATE_HIGH"); break; + case (1 << (PRIORITY_NORMAL + ERTS_RUNQ_FLGS_EVACUATE_SHFT)): + erts_print(to, to_arg, "EVACUATE_NORMAL"); break; + case (1 << (PRIORITY_LOW + ERTS_RUNQ_FLGS_EVACUATE_SHFT)): + erts_print(to, to_arg, "EVACUATE_LOW"); break; + case ERTS_RUNQ_FLG_OUT_OF_WORK: + erts_print(to, to_arg, "OUT_OF_WORK"); break; + case ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK: + erts_print(to, to_arg, "HALFTIME_OUT_OF_WORK"); break; + case ERTS_RUNQ_FLG_SUSPENDED: + erts_print(to, to_arg, "SUSPENDED"); break; + case ERTS_RUNQ_FLG_CHK_CPU_BIND: + erts_print(to, to_arg, "CHK_CPU_BIND"); break; + case ERTS_RUNQ_FLG_INACTIVE: + erts_print(to, to_arg, "INACTIVE"); break; + case ERTS_RUNQ_FLG_NONEMPTY: + erts_print(to, to_arg, "NONEMPTY"); break; + case ERTS_RUNQ_FLG_PROTECTED: + erts_print(to, to_arg, "PROTECTED"); break; + default: + erts_print(to, to_arg, "UNKNOWN(%d)", flg); break; + } + if (flg > chk) + erts_print(to, to_arg, " | "); + flg -= chk; + } + } + erts_print(to, to_arg, "\n"); + + /* This *MUST* to be the last information in scheduler block */ + p = esdp->current_process; + erts_print(to, to_arg, "Current Process: "); + if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) { + flg = erts_smp_atomic32_read_dirty(&p->state); + erts_print(to, to_arg, "%T\n", p->common.id); + + erts_print(to, to_arg, "Current Process State: "); + erts_dump_process_state(to, to_arg, flg); + + erts_print(to, to_arg, "Current Process Internal State: "); + erts_dump_extended_process_state(to, to_arg, flg); + + erts_print(to, to_arg, "Current Process Program counter: %p (", p->i); + print_function_from_pc(to, to_arg, p->i); + erts_print(to, to_arg, ")\n"); + erts_print(to, to_arg, "Current Process CP: %p (", p->cp); + print_function_from_pc(to, to_arg, p->cp); + erts_print(to, to_arg, ")\n"); + + /* Getting this stacktrace can segfault if we are very very + unlucky if called while a process is being garbage collected. + Therefore we only call this on other schedulers if we either + have protection against segfaults, or we know that the process + is not garbage collecting. It *should* always be safe to call + on a process owned by us, even if it is currently being garbage + collected. + */ + erts_print(to, to_arg, "Current Process Limited Stack Trace:\n"); + erts_limited_stack_trace(to, to_arg, p); + } else + erts_print(to, to_arg, "\n"); + +} + +/* * A nice system halt closing all open port goes as follows: * 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS * on all schedulers, then schedules itself out. @@ -12270,10 +12910,10 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg) * The same global atomic is used as refcount. * * A BIF that calls this should make sure to schedule out to never come back: - * erl_halt((int)(- code)); + * erts_halt(code); * ERTS_BIF_YIELD1(bif_export[BIF_erlang_halt_1], BIF_P, NIL); */ -void erl_halt(int code) +void erts_halt(int code) { if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress, erts_no_schedulers, |