aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r--erts/emulator/beam/erl_process.c1498
1 files changed, 1005 insertions, 493 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 9386f79b56..0a099e69bb 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -92,10 +92,6 @@
#undef HARDDEBUG
#endif
-#ifdef HARDDEBUG
-#define HARDDEBUG_RUNQS
-#endif
-
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_init_process() */
#include "hipe_signal.h" /* for hipe_thread_signal_init() */
@@ -174,7 +170,6 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
typedef struct {
int aux_work;
int tse;
- int sys_schedule;
} ErtsBusyWaitParams;
static ErtsBusyWaitParams sched_busy_wait_params[ERTS_SCHED_TYPE_LAST + 1];
@@ -344,6 +339,10 @@ erts_sched_stat_t erts_sched_stat;
static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+static erts_atomic32_t function_calls;
+static erts_atomic32_t doing_sys_schedule;
+#endif
static erts_atomic32_t no_empty_run_queues;
long erts_runq_supervision_interval = 0;
static ethr_event runq_supervision_event;
@@ -736,12 +735,6 @@ erts_pre_init_process(void)
#endif
}
-static void
-release_process(void *vproc)
-{
- erts_proc_dec_refc((Process *) vproc);
-}
-
/* initialize the scheduler */
void
erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
@@ -753,7 +746,7 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
erts_ptab_init_table(&erts_proc,
ERTS_ALC_T_PROC_TABLE,
- release_process,
+ NULL,
(ErtsPTabElementCommon *) &erts_invalid_process.common,
proc_tab_size,
sizeof(Process),
@@ -1477,7 +1470,10 @@ proclist_create(Process *p)
{
ErtsProcList *plp = proclist_alloc();
ensure_later_proc_interval(p->common.u.alive.started_interval);
- plp->pid = p->common.id;
+ if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE)
+ plp->u.p = p;
+ else
+ plp->u.pid = p->common.id;
plp->started_interval = p->common.u.alive.started_interval;
return plp;
}
@@ -1486,7 +1482,7 @@ static ERTS_INLINE ErtsProcList *
proclist_copy(ErtsProcList *plp0)
{
ErtsProcList *plp1 = proclist_alloc();
- plp1->pid = plp0->pid;
+ plp1->u.pid = plp0->u.pid;
plp1->started_interval = plp0->started_interval;
return plp1;
}
@@ -1521,7 +1517,10 @@ erts_proclist_dump(fmtfn_t to, void *to_arg, ErtsProcList *plp)
ErtsProcList *first = plp;
while (plp) {
- erts_print(to, to_arg, "%T", plp->pid);
+ if (is_pid(plp->u.pid))
+ erts_print(to, to_arg, "%T", plp->u.pid);
+ else
+ erts_print(to, to_arg, "%T", plp->u.p->common.id);
plp = plp->next;
if (plp == first)
break;
@@ -1646,7 +1645,7 @@ haw_thr_prgr_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
awdp->latest_wakeup = val;
haw_chk_later_cleanup_op_wakeup(awdp, val);
}
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
@@ -1656,7 +1655,7 @@ haw_thr_prgr_soft_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
if (erts_thr_progress_cmp(val, awdp->latest_wakeup) > 0) {
awdp->latest_wakeup = val;
haw_chk_later_cleanup_op_wakeup(awdp, val);
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
@@ -1670,7 +1669,7 @@ haw_thr_prgr_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val,
else {
awdp->latest_wakeup = val;
awdp->later_op.size = thr_prgr_later_cleanup_op_threshold;
- erts_thr_progress_wakeup(awdp->esdp, val);
+ erts_thr_progress_wakeup(erts_thr_prgr_data(awdp->esdp), val);
}
}
}
@@ -2463,6 +2462,13 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
erts_port_lock(prt);
+ if (prt->common.u.alive.reg &&
+ prt->common.u.alive.reg->name == am_heart_port) {
+ /* Leave heart port to not get killed before flushing is done*/
+ erts_port_release(prt);
+ continue;
+ }
+
state = erts_atomic32_read_nob(&prt->state);
if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
| ERTS_PORT_SFLG_HALT))) {
@@ -3066,6 +3072,7 @@ aux_thread(void *unused)
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(-1);
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
+ ErtsThrPrgrData *tpd;
int thr_prgr_active = 1;
ERTS_MSACC_DECLARE_CACHE();
@@ -3087,12 +3094,16 @@ aux_thread(void *unused)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
#if ERTS_POLL_USE_FALLBACK
- ssi->psi = erts_create_pollset_thread(-1);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ ssi->psi = erts_create_pollset_thread(-2, tpd);
+#else
+ ssi->psi = erts_create_pollset_thread(-1, tpd);
+#endif
#endif
sched_prep_spin_wait(ssi);
@@ -3105,11 +3116,11 @@ aux_thread(void *unused)
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ erts_thr_progress_active(tpd, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (aux_work && erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
}
if (!aux_work) {
@@ -3120,7 +3131,7 @@ aux_thread(void *unused)
#endif
if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_active(tpd, thr_prgr_active = 0);
#if ERTS_POLL_USE_FALLBACK
@@ -3132,11 +3143,11 @@ aux_thread(void *unused)
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(ssi->psi);
+ erts_check_io(ssi->psi, ERTS_POLL_INF_TIMEOUT);
}
}
#else
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_prepare_wait(tpd);
flgs = sched_spin_wait(ssi, 0);
@@ -3153,7 +3164,7 @@ aux_thread(void *unused)
ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
}
}
- erts_thr_progress_finalize_wait(NULL);
+ erts_thr_progress_finalize_wait(tpd);
#endif
}
@@ -3171,7 +3182,8 @@ poll_thread(void *arg)
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
- struct erts_poll_thread *psi = erts_create_pollset_thread(id);
+ struct erts_poll_thread *psi;
+ ErtsThrPrgrData *tpd;
ERTS_MSACC_DECLARE_CACHE();
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -3192,9 +3204,12 @@ poll_thread(void *arg)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
- erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
+
+ psi = erts_create_pollset_thread(id, tpd);
+
ssi->psi = psi;
sched_prep_spin_wait(ssi);
@@ -3207,16 +3222,16 @@ poll_thread(void *arg)
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work) {
if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
+ erts_thr_progress_active(tpd, thr_prgr_active = 1);
aux_work = handle_aux_work(awdp, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(NULL))
- erts_thr_progress_leader_update(NULL);
+ if (aux_work && erts_thr_progress_update(tpd))
+ erts_thr_progress_leader_update(tpd);
}
if (!aux_work) {
if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_active(tpd, thr_prgr_active = 0);
flgs = sched_spin_wait(ssi, 0);
@@ -3226,7 +3241,7 @@ poll_thread(void *arg)
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(psi);
+ erts_check_io(psi, ERTS_POLL_INF_TIMEOUT);
}
}
}
@@ -3236,6 +3251,78 @@ poll_thread(void *arg)
return NULL;
}
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+static ERTS_INLINE void
+clear_sys_scheduling(void)
+{
+ erts_atomic32_set_relb(&function_calls, 0);
+ erts_atomic32_set_mb(&doing_sys_schedule, 0);
+}
+
+static ERTS_INLINE int
+try_set_sys_scheduling(void)
+{
+ return 0 == erts_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
+}
+
+
+static ERTS_INLINE int
+prepare_for_sys_schedule(void)
+{
+ while (!erts_port_task_have_outstanding_io_tasks()
+ && try_set_sys_scheduling()) {
+ if (!erts_port_task_have_outstanding_io_tasks())
+ return 1;
+ clear_sys_scheduling();
+ }
+ return 0;
+}
+
+#else
+#define clear_sys_scheduling()
+#define prepare_for_sys_schedule() 0
+#endif
+
+#ifdef HARDDEBUG
+#define ERTS_HDBG_CHK_SLEEP_LIST(SL, L, F, FN) \
+ check_sleepers_list((SL), (L), (F), (FN))
+static void check_sleepers_list(ErtsSchedulerSleepList *sl,
+ int lock,
+ ErtsSchedulerSleepInfo *find,
+ ErtsSchedulerSleepInfo *find_not)
+{
+ ErtsSchedulerSleepInfo *last_out;
+ int found = 0;
+
+ if (lock)
+ erts_spin_lock(&sl->lock);
+
+ ERTS_ASSERT(!find_not || (!find_not->next && !find_not->prev));
+
+ last_out = sl->list;
+ if (last_out) {
+ ErtsSchedulerSleepInfo *tmp = last_out;
+ do {
+ ERTS_ASSERT(tmp->next);
+ ERTS_ASSERT(tmp->prev);
+ ERTS_ASSERT(tmp->next->prev == tmp);
+ ERTS_ASSERT(tmp->prev->next == tmp);
+ ERTS_ASSERT(tmp != find_not);
+ if (tmp == find)
+ found = !0;
+ tmp = tmp->next;
+
+ } while (tmp != last_out);
+ }
+ ERTS_ASSERT(!find || found);
+
+ if (lock)
+ erts_spin_unlock(&sl->lock);
+}
+#else
+#define ERTS_HDBG_CHK_SLEEP_LIST(SL, L, F, FN) ((void) 0)
+#endif
+
static void
scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
@@ -3249,23 +3336,29 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- erts_spin_lock(&rq->sleepers.lock);
flgs = sched_prep_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
/* Go suspend instead... */
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- erts_spin_unlock(&rq->sleepers.lock);
return;
}
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
- ssi->prev = NULL;
- ssi->next = rq->sleepers.list;
- if (rq->sleepers.list)
- rq->sleepers.list->prev = ssi;
- rq->sleepers.list = ssi;
- erts_spin_unlock(&rq->sleepers.lock);
+ erts_spin_lock(&rq->sleepers.lock);
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, NULL, ssi);
+ ASSERT(!ssi->next); /* Not in sleepers list */
+ ASSERT(!ssi->prev);
+ if (!rq->sleepers.list) {
+ ssi->next = ssi->prev = ssi;
+ rq->sleepers.list = ssi;
+ }
+ else {
+ ssi->prev = rq->sleepers.list;
+ ssi->next = rq->sleepers.list->next;
+ ssi->prev->next = ssi;
+ ssi->next->prev = ssi;
+ }
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, ssi, NULL);
+ erts_spin_unlock(&rq->sleepers.lock);
dirty_active(esdp, -1);
}
@@ -3284,28 +3377,31 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ErtsMonotonicTime current_time = 0;
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+
+ if (aux_work && ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ ERTS_INTERNAL_ERROR("Executing aux work on a dirty scheduler.");
+ }
+
+ if (aux_work) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
+ if (aux_work && erts_thr_progress_update(erts_thr_prgr_data(esdp)))
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
}
if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- flgs = erts_atomic32_read_acqb(&ssi->flags);
- current_time = erts_get_monotonic_time(esdp);
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
- }
- erts_bump_timers(esdp->timer_wheel, current_time);
+ flgs = erts_atomic32_read_acqb(&ssi->flags);
+ current_time = erts_get_monotonic_time(esdp);
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
}
+ erts_bump_timers(esdp->timer_wheel, current_time);
}
}
else {
@@ -3321,19 +3417,37 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
if (do_timeout) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
}
- else {
+ else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && prepare_for_sys_schedule()) {
+ /* We sleep in check_io, only for normal schedulers */
+ if (thr_prgr_active) {
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
+ sched_wall_time_change(esdp, 0);
+ }
+ flgs = sched_spin_wait(ssi, 0);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ erts_check_io(ssi->psi, timeout_time);
+ current_time = erts_get_monotonic_time(esdp);
+ }
+ }
+ *fcalls = 0;
+ clear_sys_scheduling();
+ } else {
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
sched_wall_time_change(esdp, 0);
}
- erts_thr_progress_prepare_wait(esdp);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(esdp));
}
-
flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
@@ -3363,7 +3477,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
}
if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
- erts_thr_progress_finalize_wait(esdp);
+ erts_thr_progress_finalize_wait(erts_thr_prgr_data(esdp));
}
if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time)
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -3389,10 +3503,30 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
(ERTS_SSI_FLG_SUSPENDED
| ERTS_SSI_FLG_MSB_EXEC));
- if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
dirty_sched_wall_time_change(esdp, working = 1);
+ erts_spin_lock(&rq->sleepers.lock);
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, ssi->next ? ssi : NULL, NULL);
+ if (ssi->next) { /* Still in list... */
+ if (ssi->next == ssi) {
+ ASSERT(rq->sleepers.list == ssi);
+ ASSERT(ssi->prev == ssi);
+ rq->sleepers.list = NULL;
+ }
+ else {
+ ASSERT(ssi->prev != ssi);
+ if (rq->sleepers.list == ssi)
+ rq->sleepers.list = ssi->next;
+ ssi->prev->next = ssi->next;
+ ssi->next->prev = ssi->prev;
+ }
+ ssi->next = ssi->prev = NULL;
+ }
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, NULL, ssi);
+ erts_spin_unlock(&rq->sleepers.lock);
+ }
else if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
@@ -3478,56 +3612,44 @@ wake_scheduler(ErtsRunQueue *rq)
}
static void
-wake_dirty_schedulers(ErtsRunQueue *rq, int one)
+wake_dirty_scheduler(ErtsRunQueue *rq)
{
- ErtsSchedulerSleepInfo *ssi;
+ ErtsSchedulerSleepInfo *lo_ssi, *fo_ssi;
ErtsSchedulerSleepList *sl;
ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
sl = &rq->sleepers;
erts_spin_lock(&sl->lock);
- ssi = sl->list;
- if (!ssi) {
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, NULL, NULL);
+ lo_ssi = sl->list;
+ if (!lo_ssi) {
erts_spin_unlock(&sl->lock);
- if (one)
- wake_scheduler(rq);
- } else if (one) {
+ wake_scheduler(rq);
+ }
+ else {
erts_aint32_t flgs;
- if (ssi->prev)
- ssi->prev->next = ssi->next;
- else {
- ASSERT(sl->list == ssi);
- sl->list = ssi->next;
+ fo_ssi = lo_ssi->next;
+ ASSERT(fo_ssi->prev == lo_ssi);
+ if (fo_ssi == lo_ssi) {
+ ASSERT(lo_ssi->prev == lo_ssi);
+ sl->list = NULL;
+ }
+ else {
+ ASSERT(lo_ssi->prev != lo_ssi);
+ lo_ssi->next = fo_ssi->next;
+ fo_ssi->next->prev = fo_ssi->prev;
}
- if (ssi->next)
- ssi->next->prev = ssi->prev;
-
- erts_spin_unlock(&sl->lock);
-
- ERTS_THR_MEMORY_BARRIER;
- flgs = ssi_flags_set_wake(ssi);
- erts_sched_finish_poke(ssi, flgs);
- } else {
- sl->list = NULL;
+ fo_ssi->next = fo_ssi->prev = NULL;
+ ERTS_HDBG_CHK_SLEEP_LIST(&rq->sleepers, 0, NULL, fo_ssi);
erts_spin_unlock(&sl->lock);
ERTS_THR_MEMORY_BARRIER;
- do {
- ErtsSchedulerSleepInfo *wake_ssi = ssi;
- ssi = ssi->next;
- erts_sched_finish_poke(wake_ssi, ssi_flags_set_wake(wake_ssi));
- } while (ssi);
+ flgs = ssi_flags_set_wake(fo_ssi);
+ erts_sched_finish_poke(fo_ssi, flgs);
}
}
-static void
-wake_dirty_scheduler(ErtsRunQueue *rq)
-{
- wake_dirty_schedulers(rq, 1);
-}
-
-
#define ERTS_NO_USED_RUNQS_SHIFT 16
#define ERTS_NO_RUNQS_MASK 0xffffU
@@ -4022,9 +4144,7 @@ schedule_bound_processes(ErtsRunQueue *rq,
static ERTS_INLINE void
clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
{
-#ifdef DEBUG
erts_aint32_t old;
-#endif
erts_aint32_t qb = prio_bit;
if (rq == ERTS_DIRTY_CPU_RUNQ)
qb <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
@@ -4032,13 +4152,8 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
qb <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
}
-#ifdef DEBUG
- old = (int)
-#else
- (void)
-#endif
- erts_atomic32_read_band_mb(&p->dirty_state, ~qb);
- ASSERT(old & qb);
+ old = (int) erts_atomic32_read_band_mb(&p->dirty_state, ~qb);
+ ASSERT(old & qb); (void)old;
}
@@ -5118,7 +5233,6 @@ erts_fprintf(stderr, "--------------------------------\n");
rq->out_of_work_count = 0;
(void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
-
rq->max_len = erts_atomic32_read_dirty(&rq->len);
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
ErtsRunQueueInfo *rqi;
@@ -5557,7 +5671,6 @@ erts_sched_set_busy_wait_threshold(ErtsSchedType sched_type, char *str)
return EINVAL;
}
- params->sys_schedule = sys_sched;
params->tse = sys_sched * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
params->aux_work = sys_sched * aux_work_fact;
@@ -5768,6 +5881,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ erts_atomic32_init_nob(&doing_sys_schedule, 0);
+ erts_atomic32_init_nob(&function_calls, 0);
+#endif
erts_atomic32_init_nob(&no_empty_run_queues, 0);
erts_no_run_queues = n;
@@ -5882,6 +5999,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi;
erts_atomic32_init_nob(&ssi->flags, 0);
+ ssi->next = NULL;
+ ssi->prev = NULL;
ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
@@ -5892,6 +6011,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi;
erts_atomic32_init_nob(&ssi->flags, 0);
+ ssi->next = NULL;
+ ssi->prev = NULL;
ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
@@ -6405,8 +6526,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
n &= ~running_flgs;
if ((!!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- | ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE))
- & !(a & ERTS_PSFLG_FREE)) {
+ | ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE))) {
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
@@ -6441,7 +6561,6 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
else {
Process* sched_p;
- ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6571,8 +6690,8 @@ change_proc_schedule_state(Process *p,
enqueue = ERTS_ENQUEUE_NOT;
- if (a & ERTS_PSFLG_FREE)
- break; /* We don't want to schedule free processes... */
+ if ((a & (ERTS_PSFLG_FREE|ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_FREE)
+ break; /* If free and not active, do not schedule */
if (clear_state_flags)
n &= ~clear_state_flags;
@@ -7098,8 +7217,7 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(sched_type,
- plp->pid);
+ schdlr_sspnd_resume_proc(sched_type, plp->u.pid);
proclist_destroy(plp);
}
}
@@ -7175,9 +7293,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
Uint32 nrml_prio, dcpu_prio, dio_prio;
ErtsSchedType exec_type;
ErtsRunQueue *exec_rq;
-#ifdef DEBUG
erts_aint32_t dbg_val;
-#endif
ASSERT(schdlr_sspnd.msb.ongoing);
@@ -7292,16 +7408,12 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
* Suspend this scheduler and wake up scheduler
* number one of another type...
*/
-#ifdef DEBUG
dbg_val =
-#else
- (void)
-#endif
erts_atomic32_read_bset_mb(&esdp->ssi->flags,
(ERTS_SSI_FLG_SUSPENDED
| ERTS_SSI_FLG_MSB_EXEC),
ERTS_SSI_FLG_SUSPENDED);
- ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC);
+ ASSERT(dbg_val & ERTS_SSI_FLG_MSB_EXEC); (void)dbg_val;
switch (exec_type) {
case ERTS_SCHED_NORMAL:
@@ -7319,11 +7431,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
break;
}
-#ifdef DEBUG
dbg_val =
-#else
- (void)
-#endif
erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags,
(ERTS_SSI_FLG_SUSPENDED
| ERTS_SSI_FLG_MSB_EXEC),
@@ -7411,7 +7519,13 @@ suspend_scheduler(ErtsSchedulerData *esdp)
return;
}
+#ifdef HARDDEBUG
+ if (sched_type != ERTS_SCHED_NORMAL)
+ ERTS_HDBG_CHK_SLEEP_LIST(&esdp->run_queue->sleepers, !0, NULL, ssi);
+#endif
+
if (erts_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) {
+
ASSERT(no == 1);
if (!msb_scheduler_type_switch(sched_type, esdp, no))
return;
@@ -7535,7 +7649,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
else {
schdlr_sspnd.changer = am_true; /* change right in transit */
/* resume process that is queued for next change... */
- resume.onln.nxt = plp->pid;
+ resume.onln.nxt = plp->u.pid;
ASSERT(is_internal_pid(resume.onln.nxt));
}
}
@@ -7565,7 +7679,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (aux_work|evacuate) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp),
+ thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
if (aux_work)
@@ -7573,8 +7688,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
aux_work,
1);
- if (aux_work && erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
+ if (aux_work && erts_thr_progress_update(erts_thr_prgr_data(esdp)))
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
if (evacuate) {
erts_runq_lock(esdp->run_queue);
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7593,18 +7708,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
if (!aux_work && current_time < timeout_time) {
/* go to sleep... */
if (thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 0);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 0);
sched_wall_time_change(esdp, 0);
}
- erts_thr_progress_prepare_wait(NULL);
+ erts_thr_progress_prepare_wait(erts_thr_prgr_data(NULL));
suspend_normal_scheduler_sleep(esdp);
- erts_thr_progress_finalize_wait(NULL);
+ erts_thr_progress_finalize_wait(erts_thr_prgr_data(NULL));
current_time = erts_get_monotonic_time(esdp);
}
if (current_time >= timeout_time) {
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -7661,7 +7776,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
profile_scheduler(make_small(esdp->no), am_active);
if (!thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ erts_thr_progress_active(erts_thr_prgr_data(esdp), thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
}
@@ -7754,7 +7869,7 @@ abort_sched_onln_chng_waitq(Process *p)
proclist_destroy(plp);
plp = erts_proclist_peek_first(schdlr_sspnd.chngq);
if (plp)
- resume = plp->pid;
+ resume = plp->u.pid;
else
schdlr_sspnd.changer = am_false;
}
@@ -8020,7 +8135,8 @@ done:
ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal, int all)
{
- int resume_proc, ix, res, have_unlocked_plocks = 0;
+ ErtsSchedSuspendResult res;
+ int resume_proc, ix, have_unlocked_plocks = 0;
ErtsProcList *plp;
ErtsMultiSchedulingBlock *msbp;
erts_aint32_t chng_flg;
@@ -8253,10 +8369,10 @@ erts_multi_scheduling_blockers(Process *p, int normal)
plp1;
plp1 = erts_proclist_peek_next(msbp->blckrs, plp1)) {
for (plp2 = erts_proclist_peek_first(msbp->blckrs);
- plp2->pid != plp1->pid;
+ plp2->u.pid != plp1->u.pid;
plp2 = erts_proclist_peek_next(msbp->blckrs, plp2));
if (plp2 == plp1) {
- res = CONS(hp, plp1->pid, res);
+ res = CONS(hp, plp1->u.pid, res);
hp += 2;
}
/* else: already in result list */
@@ -8296,6 +8412,11 @@ sched_thread_func(void *vesdp)
erts_msacc_init_thread("scheduler", no, 1);
erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
+
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ esdp->ssi->psi = erts_create_pollset_thread(-1, NULL);
+#endif
+
erts_alloc_register_scheduler(vesdp);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -8888,11 +9009,8 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
suspend = 1;
if (suspend) {
-#ifdef DEBUG
- int res =
-#endif
- suspend_process(c_p, c_p);
- ASSERT(res);
+ int res = suspend_process(c_p, c_p);
+ ASSERT(res); (void)res;
}
if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
@@ -8923,8 +9041,13 @@ erts_resume_processes(ErtsProcList *list)
while (plp) {
Process *proc;
ErtsProcList *fplp;
- ASSERT(is_internal_pid(plp->pid));
- proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
+ ASSERT(is_internal_pid(plp->u.pid) || is_CP((Eterm)plp->u.p));
+ if (is_internal_pid(plp->u.pid))
+ proc = erts_pid2proc(NULL, 0, plp->u.pid, ERTS_PROC_LOCK_STATUS);
+ else {
+ proc = plp->u.p;
+ erts_proc_lock(proc, ERTS_PROC_LOCK_STATUS);
+ }
if (proc) {
if (erts_proclist_same(plp, proc)) {
resume_process(proc, ERTS_PROC_LOCK_STATUS);
@@ -9079,6 +9202,9 @@ unlock_lock_rq(int pre_free, void *vrq)
}
+static void trace_schedule_in(Process *p, erts_aint32_t state);
+static void trace_schedule_out(Process *p, erts_aint32_t state);
+
/*
* schedule() is called from BEAM (process_main()) or HiPE
* (hipe_mode_switch()) when the current process is to be
@@ -9102,7 +9228,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
Process *proxy_p = NULL;
ErtsRunQueue *rq;
int context_reds;
- int fcalls;
+ int fcalls = 0;
int actual_reds;
int reds;
Uint32 flags;
@@ -9176,6 +9302,10 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ fcalls = (int) erts_atomic32_add_read_acqb(&function_calls, reds);
+#endif
+
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
@@ -9184,22 +9314,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
state = erts_atomic32_read_nob(&p->state);
- if (IS_TRACED(p)) {
- if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
- erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, ERTS_PROC_LOCK_MAIN,
- ((state & ERTS_PSFLG_FREE)
- ? am_out_exited
- : am_out_exiting));
- }
- else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
- ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
- }
- }
+ if (IS_TRACED(p))
+ trace_schedule_out(p, state);
erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
@@ -9324,12 +9440,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
}
- leader_update = erts_thr_progress_update(esdp);
+ leader_update = erts_thr_progress_update(erts_thr_prgr_data(esdp));
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
if (aux_work | leader_update) {
erts_runq_unlock(rq);
if (leader_update)
- erts_thr_progress_leader_update(esdp);
+ erts_thr_progress_leader_update(erts_thr_prgr_data(esdp));
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
erts_runq_lock(rq);
@@ -9406,7 +9522,33 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
non_empty_runq(rq);
goto check_activities_to_run;
- }
+ } else if (is_normal_sched &&
+ fcalls > (2 * context_reds) &&
+ prepare_for_sys_schedule()) {
+ ErtsMonotonicTime current_time;
+ /*
+ * Schedule system-level activities.
+ */
+
+ ERTS_MSACC_PUSH_STATE_CACHED_M();
+
+ erts_runq_unlock(rq);
+
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
+ LTTNG2(scheduler_poll, esdp->no, 1);
+
+ erts_check_io(esdp->ssi->psi, ERTS_POLL_NO_TIMEOUT);
+ ERTS_MSACC_POP_STATE_M();
+
+ current_time = erts_get_monotonic_time(esdp);
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
+ erts_bump_timers(esdp->timer_wheel, current_time);
+
+ erts_runq_lock(rq);
+ fcalls = 0;
+ clear_sys_scheduling();
+ goto continue_check_activities_to_run;
+ }
if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
@@ -9497,7 +9639,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
while (1) {
erts_aint32_t exp, new;
- int run_process;
+ int run_process, not_running, exiting_on_normal_sched,
+ not_suspended, not_exiting_on_dirty_sched;
new = exp = state;
new &= psflg_band_mask;
/*
@@ -9506,29 +9649,33 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
* scheduler, and not suspended (and not in a
* state where suspend should be ignored).
*/
- run_process = (((!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS
- | ERTS_PSFLG_FREE)))
- | (((state & (ERTS_PSFLG_RUNNING
-
- | ERTS_PSFLG_FREE
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING_SYS
- | ERTS_PSFLG_EXITING))
- == ERTS_PSFLG_EXITING)
- & (!!is_normal_sched))
- )
- & ((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_EXITING
- | ERTS_PSFLG_FREE
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- != ERTS_PSFLG_SUSPENDED)
- & (!(state & ERTS_PSFLG_EXITING)
- | (!!is_normal_sched))
- );
+ not_running = !(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_FREE));
+ exiting_on_normal_sched =
+ ((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))
+ == (ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE))
+ & (!!is_normal_sched);
+
+
+ not_suspended = ((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_FREE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ != ERTS_PSFLG_SUSPENDED);
+
+ not_exiting_on_dirty_sched = !(state & ERTS_PSFLG_EXITING) | (!!is_normal_sched);
+
+ run_process = (not_running | exiting_on_normal_sched)
+ & not_suspended
+ & not_exiting_on_dirty_sched;
if (run_process) {
if (state & (ERTS_PSFLG_ACTIVE_SYS
@@ -9610,6 +9757,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* Migrate to dirty scheduler... */
sunlock_sched_out_proc:
erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (IS_TRACED(p))
+ trace_schedule_in(p, state);
goto sched_out_proc;
}
}
@@ -9643,29 +9792,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- /* Clear tracer if it has been removed */
- if (IS_TRACED(p) && erts_is_tracer_proc_enabled(
- p, ERTS_PROC_LOCK_MAIN, &p->common)) {
-
- if (state & ERTS_PSFLG_EXITING) {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
- }
- else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
- ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
- }
- if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
- erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
- }
- }
+ if (IS_TRACED(p))
+ trace_schedule_in(p, state);
if (is_normal_sched) {
if (state & ERTS_PSFLG_RUNNING_SYS) {
if (state & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) {
int local_only = (!!(p->flags & F_LOCAL_SIGS_ONLY)
- & !(state & ERTS_PSFLG_SUSPENDED));
+ & !(state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLGS_DIRTY_WORK)));
if (!local_only | !!(state & ERTS_PSFLG_SIG_Q)) {
int sig_reds;
/*
@@ -9823,6 +9957,50 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
}
+static void
+trace_schedule_in(Process *p, erts_aint32_t state)
+{
+ ASSERT(IS_TRACED(p));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCK_MAIN);
+
+ /* Clear tracer if it has been removed */
+ if (erts_is_tracer_proc_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common)) {
+
+ if (state & ERTS_PSFLG_EXITING && p->u.terminate) {
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
+ }
+ else {
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
+ }
+ if (IS_TRACED_FL(p, F_TRACE_CALLS))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
+ }
+
+}
+
+static void
+trace_schedule_out(Process *p, erts_aint32_t state)
+{
+ ASSERT(IS_TRACED(p));
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCK_MAIN);
+
+ if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
+
+ if (state & ERTS_PSFLG_EXITING && p->u.terminate) {
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out_exiting);
+ }
+ else {
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
+ }
+}
+
static int
notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
Eterm st_result, int normal_sched)
@@ -11889,12 +12067,91 @@ erts_set_self_exiting(Process *c_p, Eterm reason)
add2runq(enqueue, enq_prio, c_p, state, NULL);
}
-void
-erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
+static int
+erts_proc_exit_handle_dist_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
+{
+ ErtsProcExitContext *ctxt = (ErtsProcExitContext *) vctxt;
+ Process *c_p = ctxt->c_p;
+ Eterm reason = ctxt->reason;
+ int code;
+ ErtsDSigSendContext ctx;
+ ErtsMonLnkDist *dist;
+ DistEntry *dep;
+ Eterm watcher;
+ ErtsMonitorData *mdp = NULL;
+ Eterm watched;
+
+ ASSERT(erts_monitor_is_target(mon) && mon->type == ERTS_MON_TYPE_DIST_PROC);
+
+ mdp = erts_monitor_to_data(mon);
+
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else
+ watched = c_p->common.id;
+ ASSERT(is_internal_pid(watched) || is_atom(watched));
+
+ watcher = mon->other.item;
+ ASSERT(is_external_pid(watcher));
+ dep = external_pid_dist_entry(watcher);
+ ASSERT(dep);
+ dist = ((ErtsMonitorDataExtended *) mdp)->dist;
+ ASSERT(dist);
+
+ code = erts_dsig_prepare(&ctx, dep, c_p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 0, 1);
+
+ ctx.reds = (Sint) (reds * TERM_TO_BINARY_LOOP_FACTOR);
+
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ break;
+ case ERTS_DSIG_PREP_PENDING:
+ case ERTS_DSIG_PREP_CONNECTED:
+ if (dist->connection_id != ctx.connection_id)
+ break;
+ code = erts_dsig_send_m_exit(&ctx,
+ watcher,
+ watched,
+ mdp->ref,
+ reason);
+ switch (code) {
+ case ERTS_DSIG_SEND_CONTINUE:
+ erts_set_gc_state(c_p, 0);
+ ctxt->dist_state = erts_dsend_export_trap_context(c_p, &ctx);
+ /* fall-through */
+ case ERTS_DSIG_SEND_YIELD:
+ break;
+ case ERTS_DSIG_SEND_OK:
+ break;
+ case ERTS_DSIG_SEND_TOO_LRG:
+ erts_set_gc_state(c_p, 1);
+ break;
+ default:
+ ASSERT(! "Invalid dsig send exit monitor result");
+ break;
+ }
+ break;
+ default:
+ ASSERT(! "Invalid dsig prep exit monitor result");
+ break;
+ }
+ if (!erts_monitor_dist_delete(&mdp->origin))
+ erts_monitor_release(mon);
+ else
+ erts_monitor_release_both(mdp);
+ return reds - (ctx.reds / TERM_TO_BINARY_LOOP_FACTOR);
+}
+
+int
+erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
{
- Process *c_p = ((ErtsProcExitContext *) vctxt)->c_p;
- Eterm reason = ((ErtsProcExitContext *) vctxt)->reason;
+ ErtsProcExitContext *ctxt = (ErtsProcExitContext *) vctxt;
+ Process *c_p = ctxt->c_p;
+ Eterm reason = ctxt->reason;
ErtsMonitorData *mdp = NULL;
+ int res = 1;
if (erts_monitor_is_target(mon)) {
/* We are being watched... */
@@ -11924,43 +12181,48 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
case ERTS_MON_TYPE_DIST_PROC: {
ErtsMonLnkDist *dist;
DistEntry *dep;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
Eterm watcher;
Eterm watched;
- mdp = erts_monitor_to_data(mon);
-
- if (mon->flags & ERTS_ML_FLG_NAME)
- watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
- else
- watched = c_p->common.id;
- ASSERT(is_internal_pid(watched) || is_atom(watched));
+ if (is_immed(reason)) {
+ mdp = erts_monitor_to_data(mon);
- watcher = mon->other.item;
- ASSERT(is_external_pid(watcher));
- dep = external_pid_dist_entry(watcher);
- ASSERT(dep);
- dist = ((ErtsMonitorDataExtended *) mdp)->dist;
- ASSERT(dist);
- code = erts_dsig_prepare(&dsd, dep, NULL, 0,
- ERTS_DSP_NO_LOCK, 0, 0);
- switch (code) {
- case ERTS_DSIG_PREP_CONNECTED:
- case ERTS_DSIG_PREP_PENDING:
- if (dist->connection_id == dsd.connection_id) {
- code = erts_dsig_send_m_exit(&dsd,
- watcher,
- watched,
- mdp->ref,
- reason);
- ASSERT(code == ERTS_DSIG_SEND_OK);
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else
+ watched = c_p->common.id;
+ ASSERT(is_internal_pid(watched) || is_atom(watched));
+
+ watcher = mon->other.item;
+ ASSERT(is_external_pid(watcher));
+ dep = external_pid_dist_entry(watcher);
+ ASSERT(dep);
+ dist = ((ErtsMonitorDataExtended *) mdp)->dist;
+ ASSERT(dist);
+ code = erts_dsig_prepare(&ctx, dep, NULL, 0,
+ ERTS_DSP_NO_LOCK, 1, 1, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == ctx.connection_id) {
+ code = erts_dsig_send_m_exit(&ctx,
+ watcher,
+ watched,
+ mdp->ref,
+ reason);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ default:
+ break;
}
- default:
- break;
+ if (!erts_monitor_dist_delete(&mdp->origin))
+ mdp = NULL;
+ } else {
+ erts_monitor_tree_insert(&ctxt->dist_monitors, mon);
+ return 1;
}
- if (!erts_monitor_dist_delete(&mdp->origin))
- mdp = NULL;
break;
}
default:
@@ -12002,7 +12264,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
case ERTS_MON_TYPE_DIST_PROC: {
ErtsMonLnkDist *dist;
DistEntry *dep;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
Eterm watched;
@@ -12019,17 +12281,16 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
ASSERT(is_external_pid(watched));
dep = external_pid_dist_entry(watched);
}
- code = erts_dsig_prepare(&dsd, dep, NULL, 0,
- ERTS_DSP_NO_LOCK, 0, 0);
+ code = erts_dsig_prepare(&ctx, dep, NULL, 0,
+ ERTS_DSP_NO_LOCK, 1, 1, 0);
switch (code) {
case ERTS_DSIG_PREP_CONNECTED:
case ERTS_DSIG_PREP_PENDING:
- if (dist->connection_id == dsd.connection_id) {
- code = erts_dsig_send_demonitor(&dsd,
+ if (dist->connection_id == ctx.connection_id) {
+ code = erts_dsig_send_demonitor(&ctx,
c_p->common.id,
watched,
- mdp->ref,
- 1);
+ mdp->ref);
ASSERT(code == ERTS_DSIG_SEND_OK);
}
default:
@@ -12037,6 +12298,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
}
if (!erts_monitor_dist_delete(&mdp->target))
mdp = NULL;
+ res = 100;
break;
}
default:
@@ -12049,11 +12311,84 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
erts_monitor_release_both(mdp);
else if (mon)
erts_monitor_release(mon);
+ return res;
}
-void
-erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
+static int
+erts_proc_exit_handle_dist_link(ErtsLink *lnk, void *vctxt, Sint reds)
+{
+ ErtsProcExitContext *ctxt = (ErtsProcExitContext *) vctxt;
+ Process *c_p = ctxt->c_p;
+ Eterm reason = ctxt->reason;
+ int code;
+ ErtsDSigSendContext ctx;
+ ErtsMonLnkDist *dist;
+ DistEntry *dep;
+ ErtsLink *dlnk;
+ ErtsLinkData *ldp = NULL;
+
+ ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
+ dlnk = erts_link_to_other(lnk, &ldp);
+ dist = ((ErtsLinkDataExtended *) ldp)->dist;
+
+ ASSERT(is_external_pid(lnk->other.item));
+ dep = external_pid_dist_entry(lnk->other.item);
+
+ ASSERT(dep != erts_this_dist_entry);
+
+ if (!erts_link_dist_delete(dlnk))
+ ldp = NULL;
+
+ code = erts_dsig_prepare(&ctx, dep, c_p, ERTS_PROC_LOCK_MAIN,
+ ERTS_DSP_NO_LOCK, 0, 0, 0);
+
+ ctx.reds = (Sint) (reds * TERM_TO_BINARY_LOOP_FACTOR);
+
+ switch (code) {
+ case ERTS_DSIG_PREP_NOT_ALIVE:
+ case ERTS_DSIG_PREP_NOT_CONNECTED:
+ break;
+ case ERTS_DSIG_PREP_PENDING:
+ case ERTS_DSIG_PREP_CONNECTED:
+ if (dist->connection_id != ctx.connection_id)
+ break;
+ code = erts_dsig_send_exit_tt(&ctx,
+ c_p->common.id,
+ lnk->other.item,
+ reason,
+ SEQ_TRACE_TOKEN(c_p));
+ switch (code) {
+ case ERTS_DSIG_SEND_CONTINUE:
+ erts_set_gc_state(c_p, 0);
+ ctxt->dist_state = erts_dsend_export_trap_context(c_p, &ctx);
+ /* fall-through */
+ case ERTS_DSIG_SEND_YIELD:
+ break;
+ case ERTS_DSIG_SEND_OK:
+ break;
+ case ERTS_DSIG_SEND_TOO_LRG:
+ erts_set_gc_state(c_p, 1);
+ break;
+ default:
+ ASSERT(! "Invalid dsig send exit monitor result");
+ break;
+ }
+ break;
+ default:
+ ASSERT(! "Invalid dsig prep exit monitor result");
+ break;
+ }
+ if (ldp)
+ erts_link_release_both(ldp);
+ else if (lnk)
+ erts_link_release(lnk);
+ return reds - (ctx.reds / TERM_TO_BINARY_LOOP_FACTOR);
+}
+
+int
+erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt, Sint reds)
{
+ ErtsProcExitContext *ctxt = (ErtsProcExitContext *) vctxt;
Process *c_p = ((ErtsProcExitContext *) vctxt)->c_p;
Eterm reason = ((ErtsProcExitContext *) vctxt)->reason;
ErtsLinkData *ldp = NULL;
@@ -12084,32 +12419,40 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
DistEntry *dep;
ErtsMonLnkDist *dist;
ErtsLink *dlnk;
- ErtsDSigData dsd;
+ ErtsDSigSendContext ctx;
int code;
- dlnk = erts_link_to_other(lnk, &ldp);
- dist = ((ErtsLinkDataExtended *) ldp)->dist;
+ if (is_immed(reason)) {
+ dlnk = erts_link_to_other(lnk, &ldp);
+ dist = ((ErtsLinkDataExtended *) ldp)->dist;
- ASSERT(is_external_pid(lnk->other.item));
- dep = external_pid_dist_entry(lnk->other.item);
+ ASSERT(is_external_pid(lnk->other.item));
+ dep = external_pid_dist_entry(lnk->other.item);
- ASSERT(dep != erts_this_dist_entry);
+ ASSERT(dep != erts_this_dist_entry);
- if (!erts_link_dist_delete(dlnk))
- ldp = NULL;
+ if (!erts_link_dist_delete(dlnk))
+ ldp = NULL;
- code = erts_dsig_prepare(&dsd, dep, c_p, 0, ERTS_DSP_NO_LOCK, 0, 0);
- switch (code) {
- case ERTS_DSIG_PREP_CONNECTED:
- case ERTS_DSIG_PREP_PENDING:
- if (dist->connection_id == dsd.connection_id) {
- code = erts_dsig_send_exit_tt(&dsd,
- c_p->common.id,
- lnk->other.item,
- reason,
- SEQ_TRACE_TOKEN(c_p));
- ASSERT(code == ERTS_DSIG_SEND_OK);
+ code = erts_dsig_prepare(&ctx, dep, c_p, 0, ERTS_DSP_NO_LOCK, 1, 1, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == ctx.connection_id) {
+ code = erts_dsig_send_exit_tt(&ctx,
+ c_p->common.id,
+ lnk->other.item,
+ reason,
+ SEQ_TRACE_TOKEN(c_p));
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ break;
+ default:
+ break;
}
+ } else {
+ erts_link_tree_insert(&ctxt->dist_links, lnk);
+ return 1;
}
break;
}
@@ -12122,6 +12465,7 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
erts_link_release_both(ldp);
else if (lnk)
erts_link_release(lnk);
+ return 1;
}
/* this function fishishes a process and propagates exit messages - called
@@ -12155,11 +12499,8 @@ erts_do_exit_process(Process* p, Eterm reason)
set_self_exiting(p, reason, NULL, NULL, NULL);
- if (IS_TRACED(p)) {
- if (IS_TRACED_FL(p, F_TRACE_CALLS))
- erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
-
- }
+ if (IS_TRACED_FL(p, F_TRACE_CALLS))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
erts_trace_check_exiting(p->common.id);
@@ -12174,288 +12515,444 @@ erts_do_exit_process(Process* p, Eterm reason)
erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ if (IS_TRACED_FL(p, F_TRACE_PROCS))
trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason);
-
/*
* p->u.initial of this process can *not* be used anymore;
* will be overwritten by misc termination data.
*/
p->u.terminate = NULL;
+ BUMP_REDS(p, 100);
+
erts_continue_exit_process(p);
}
-void
-erts_continue_exit_process(Process *p)
-{
+enum continue_exit_phase {
+ ERTS_CONTINUE_EXIT_TIMERS,
+ ERTS_CONTINUE_EXIT_BLCKD_MSHED,
+ ERTS_CONTINUE_EXIT_BLCKD_NMSHED,
+ ERTS_CONTINUE_EXIT_USING_DB,
+ ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS,
+ ERTS_CONTINUE_EXIT_FREE,
+ ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS_AFTER,
+ ERTS_CONTINUE_EXIT_LINKS,
+ ERTS_CONTINUE_EXIT_MONITORS,
+ ERTS_CONTINUE_EXIT_LT_MONITORS,
+ ERTS_CONTINUE_EXIT_HANDLE_PROC_SIG,
+ ERTS_CONTINUE_EXIT_DIST_LINKS,
+ ERTS_CONTINUE_EXIT_DIST_MONITORS,
+ ERTS_CONTINUE_EXIT_DONE,
+};
+
+struct continue_exit_state {
+ enum continue_exit_phase phase;
ErtsLink *links;
ErtsMonitor *monitors;
ErtsMonitor *lt_monitors;
+ Eterm reason;
+ ErtsProcExitContext pectxt;
+ DistEntry *dep;
+ void *yield_state;
+};
+
+void
+erts_continue_exit_process(Process *p)
+{
+ struct continue_exit_state static_state, *trap_state = &static_state;
ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN;
- Eterm reason = p->fvalue;
- DistEntry *dep = NULL;
erts_aint32_t state;
int delay_del_proc = 0;
- ErtsProcExitContext pectxt;
-
+ Sint reds = ERTS_BIF_REDS_LEFT(p);
#ifdef DEBUG
int yield_allowed = 1;
#endif
+ if (p->u.terminate) {
+ trap_state = p->u.terminate;
+ } else {
+ trap_state->phase = ERTS_CONTINUE_EXIT_TIMERS;
+ trap_state->reason = p->fvalue;
+ trap_state->dep = NULL;
+ trap_state->yield_state = NULL;
+ }
+
ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
ASSERT(ERTS_PROC_IS_EXITING(p));
ASSERT(erts_proc_read_refc(p) > 0);
- if (p->bif_timers) {
- if (erts_cancel_bif_timers(p, &p->bif_timers, &p->u.terminate)) {
- ASSERT(erts_proc_read_refc(p) > 0);
- goto yield;
- }
- ASSERT(erts_proc_read_refc(p) > 0);
- p->bif_timers = NULL;
- }
-
- if (p->flags & F_SCHDLR_ONLN_WAITQ)
- abort_sched_onln_chng_waitq(p);
-
- if (p->flags & F_HAVE_BLCKD_MSCHED) {
- ErtsSchedSuspendResult ssr;
- ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 0, 1);
- switch (ssr) {
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- goto yield;
- case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_DONE:
- case ERTS_SCHDLR_SSPND_YIELD_DONE:
- p->flags &= ~F_HAVE_BLCKD_MSCHED;
- break;
- case ERTS_SCHDLR_SSPND_EINVAL:
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
- __FILE__, __LINE__, (int) ssr);
- }
- }
- if (p->flags & F_HAVE_BLCKD_NMSCHED) {
- ErtsSchedSuspendResult ssr;
- ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 1, 1);
- switch (ssr) {
- case ERTS_SCHDLR_SSPND_YIELD_RESTART:
- goto yield;
- case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_YIELD_DONE_MSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED:
- case ERTS_SCHDLR_SSPND_DONE:
- case ERTS_SCHDLR_SSPND_YIELD_DONE:
- p->flags &= ~F_HAVE_BLCKD_MSCHED;
- break;
- case ERTS_SCHDLR_SSPND_EINVAL:
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
- __FILE__, __LINE__, (int) ssr);
- }
- }
+restart:
+ switch (trap_state->phase) {
+ case ERTS_CONTINUE_EXIT_TIMERS:
+ if (p->bif_timers) {
+ reds = erts_cancel_bif_timers(p, &p->bif_timers, &trap_state->yield_state, reds);
+ if (reds <= 0) goto yield;
+ p->bif_timers = NULL;
+ }
- if (p->flags & F_USING_DB) {
- if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN))
- goto yield;
- p->flags &= ~F_USING_DB;
- }
+ if (p->flags & F_SCHDLR_ONLN_WAITQ) {
+ abort_sched_onln_chng_waitq(p);
+ reds -= 100;
+ }
- erts_set_gc_state(p, 1);
- state = erts_atomic32_read_acqb(&p->state);
- if ((state & ERTS_PSFLG_SYS_TASKS) || p->dirty_sys_tasks) {
- if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
- goto yield;
- }
+ trap_state->phase = ERTS_CONTINUE_EXIT_BLCKD_MSHED;
+ if (reds <= 0) goto yield;
+ case ERTS_CONTINUE_EXIT_BLCKD_MSHED:
+
+ if (p->flags & F_HAVE_BLCKD_MSCHED) {
+ ErtsSchedSuspendResult ssr;
+ ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 0, 1);
+ switch (ssr) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
+ p->flags &= ~F_HAVE_BLCKD_MSCHED;
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
+ __FILE__, __LINE__, (int) ssr);
+ }
+ reds -= 100;
+ }
-#ifdef DEBUG
- erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL);
- ASSERT(p->dirty_sys_tasks == NULL);
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
-#endif
+ trap_state->phase = ERTS_CONTINUE_EXIT_BLCKD_NMSHED;
+ if (reds <= 0) goto yield;
+ case ERTS_CONTINUE_EXIT_BLCKD_NMSHED:
+
+ if (p->flags & F_HAVE_BLCKD_NMSCHED) {
+ ErtsSchedSuspendResult ssr;
+ ssr = erts_block_multi_scheduling(p, ERTS_PROC_LOCK_MAIN, 0, 1, 1);
+ switch (ssr) {
+ case ERTS_SCHDLR_SSPND_DONE:
+ case ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED:
+ case ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED:
+ p->flags &= ~F_HAVE_BLCKD_MSCHED;
+ break;
+ default:
+ erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error: %d\n",
+ __FILE__, __LINE__, (int) ssr);
+ }
+ reds -= 100;
+ }
- if (p->flags & F_USING_DDLL) {
- erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
- p->flags &= ~F_USING_DDLL;
- }
+ trap_state->yield_state = NULL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_USING_DB;
+ if (reds <= 0) goto yield;
+ case ERTS_CONTINUE_EXIT_USING_DB:
- /*
- * The registered name *should* be the last "erlang resource" to
- * cleanup.
- */
- if (p->common.u.alive.reg) {
- (void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE);
- ASSERT(!p->common.u.alive.reg);
- }
+ if (p->flags & F_USING_DB) {
+ if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN, &trap_state->yield_state))
+ goto yield;
+ p->flags &= ~F_USING_DB;
+ }
- if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, curr_locks, am_out_exited);
+ erts_set_gc_state(p, 1);
- erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- curr_locks = ERTS_PROC_LOCKS_ALL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS;
+ case ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS:
+
+ state = erts_atomic32_read_acqb(&p->state);
+ if ((state & ERTS_PSFLG_SYS_TASKS) || p->dirty_sys_tasks) {
+ reds -= cleanup_sys_tasks(p, state, reds);
+ if (reds <= 0) goto yield;
+ }
+
+ trap_state->phase = ERTS_CONTINUE_EXIT_FREE;
+ case ERTS_CONTINUE_EXIT_FREE:
- /*
- * From this point on we are no longer allowed to yield
- * this process.
- */
#ifdef DEBUG
- yield_allowed = 0;
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL);
+ ASSERT(p->dirty_sys_tasks == NULL);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#endif
- /*
- * Note! The monitor and link fields will be overwritten
- * by erts_ptab_delete_element() below.
- */
- links = ERTS_P_LINKS(p);
- monitors = ERTS_P_MONITORS(p);
- lt_monitors = ERTS_P_LT_MONITORS(p);
+ if (p->flags & F_USING_DDLL) {
+ erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
+ p->flags &= ~F_USING_DDLL;
+ }
- {
- /* Do *not* use erts_get_runq_proc() */
- ErtsRunQueue *rq;
- rq = erts_get_runq_current(erts_proc_sched_data(p));
+ /*
+ * The registered name *should* be the last "erlang resource" to
+ * cleanup.
+ */
+ if (p->common.u.alive.reg) {
+ (void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE);
+ ASSERT(!p->common.u.alive.reg);
+ }
- erts_runq_lock(rq);
+ erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ curr_locks = ERTS_PROC_LOCKS_ALL;
- ASSERT(p->scheduler_data);
- ASSERT(p->scheduler_data->current_process == p);
- ASSERT(p->scheduler_data->free_process == NULL);
+ /*
+ * Note! The monitor and link fields will be overwritten
+ * by erts_ptab_delete_element() below.
+ */
+ trap_state->links = ERTS_P_LINKS(p);
+ trap_state->monitors = ERTS_P_MONITORS(p);
+ trap_state->lt_monitors = ERTS_P_LT_MONITORS(p);
- p->scheduler_data->current_process = NULL;
- p->scheduler_data->free_process = p;
+ {
+ /* Do *not* use erts_get_runq_proc() */
+ ErtsRunQueue *rq;
+ rq = erts_get_runq_current(erts_proc_sched_data(p));
- /* Time of death! */
- erts_ptab_delete_element(&erts_proc, &p->common);
+ erts_runq_lock(rq);
- erts_runq_unlock(rq);
- }
+ ASSERT(p->scheduler_data);
+ ASSERT(p->scheduler_data->current_process == p);
+ ASSERT(p->scheduler_data->free_process == NULL);
- /*
- * All "erlang resources" have to be deallocated before this point,
- * e.g. registered name, so monitoring and linked processes can
- * be sure that all interesting resources have been deallocated
- * when the monitors and/or links hit.
- */
+ /* Time of death! */
+ erts_ptab_delete_element(&erts_proc, &p->common);
- {
- /* Inactivate and notify free */
- erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state);
- int refc_inced = 0;
- while (1) {
- n = e = a;
- ASSERT(a & ERTS_PSFLG_EXITING);
- n |= ERTS_PSFLG_FREE;
- n &= ~(ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS);
- if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
- erts_proc_inc_refc(p);
- refc_inced = 1;
- }
- a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
- if (a == e)
- break;
- }
+ erts_runq_unlock(rq);
+ }
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
- p->flags |= F_DELAYED_DEL_PROC;
- delay_del_proc = 1;
- /*
- * The dirty scheduler decrease refc
- * when done with the process...
- */
- }
+ /*
+ * All "erlang resources" have to be deallocated before this point,
+ * e.g. registered name, so monitoring and linked processes can
+ * be sure that all interesting resources have been deallocated
+ * when the monitors and/or links hit.
+ */
- if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
- erts_proc_dec_refc(p);
- }
+ {
+ /* Inactivate and notify free */
+ erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state);
+ int refc_inced = 0;
+ while (1) {
+ n = e = a;
+ ASSERT(a & ERTS_PSFLG_EXITING);
+ n |= ERTS_PSFLG_FREE;
+ if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
+ erts_proc_inc_refc(p);
+ refc_inced = 1;
+ }
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e)
+ break;
+ }
- dep = ((p->flags & F_DISTRIBUTION)
- ? ERTS_PROC_SET_DIST_ENTRY(p, NULL)
- : NULL);
+ if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
+ erts_proc_dec_refc(p);
+ }
+ trap_state->dep = ((p->flags & F_DISTRIBUTION)
+ ? ERTS_PROC_SET_DIST_ENTRY(p, NULL)
+ : NULL);
+
+ reds -= 50;
- /*
- * It might show up signal prio elevation tasks until we
- * have entered free state. Cleanup such tasks now.
- */
- state = erts_atomic32_read_acqb(&p->state);
- if (!(state & ERTS_PSFLG_SYS_TASKS))
- erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
- else {
erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ curr_locks = ERTS_PROC_LOCK_MAIN;
+ trap_state->phase = ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS_AFTER;
+ case ERTS_CONTINUE_EXIT_CLEAN_SYS_TASKS_AFTER:
+ /*
+ * It might show up signal prio elevation tasks until we
+ * have entered free state. Cleanup such tasks now.
+ */
- do {
- (void) cleanup_sys_tasks(p, state, CONTEXT_REDS);
- state = erts_atomic32_read_acqb(&p->state);
- } while (state & ERTS_PSFLG_SYS_TASKS);
-
- erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- }
+ state = erts_atomic32_read_acqb(&p->state);
+ if ((state & ERTS_PSFLG_SYS_TASKS) || p->dirty_sys_tasks) {
+ reds -= cleanup_sys_tasks(p, state, reds);
+ if (reds <= 0) goto yield;
+ }
+
+ /* Needs to be unlocked for erts_do_net_exits to work?!? */
+ // erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
#ifdef DEBUG
- erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- ASSERT(p->sys_task_qs == NULL);
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ ASSERT(p->sys_task_qs == NULL);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#endif
- if (dep) {
- erts_do_net_exits(dep, (reason == am_kill) ? am_killed : reason);
- erts_deref_dist_entry(dep);
- }
+ if (trap_state->dep) {
+ erts_do_net_exits(trap_state->dep,
+ (trap_state->reason == am_kill) ? am_killed : trap_state->reason);
+ erts_deref_dist_entry(trap_state->dep);
+ }
- pectxt.c_p = p;
- pectxt.reason = reason;
+ trap_state->pectxt.c_p = p;
+ trap_state->pectxt.reason = trap_state->reason;
+ trap_state->pectxt.dist_links = NULL;
+ trap_state->pectxt.dist_monitors = NULL;
+ trap_state->pectxt.dist_state = NIL;
+
+ erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
+
+ erts_proc_sig_fetch(p);
+
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+
+ trap_state->yield_state = NULL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_LINKS;
+ if (reds <= 0) goto yield;
+ case ERTS_CONTINUE_EXIT_LINKS:
+
+ reds = erts_link_tree_foreach_delete_yielding(
+ &trap_state->links,
+ erts_proc_exit_handle_link,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0)
+ goto yield;
+
+ ASSERT(!trap_state->links);
+ trap_state->yield_state = NULL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_MONITORS;
+ case ERTS_CONTINUE_EXIT_MONITORS:
+
+ reds = erts_monitor_tree_foreach_delete_yielding(
+ &trap_state->monitors,
+ erts_proc_exit_handle_monitor,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0)
+ goto yield;
+
+ ASSERT(!trap_state->monitors);
+ trap_state->yield_state = NULL;
+ trap_state->phase = ERTS_CONTINUE_EXIT_LT_MONITORS;
+ case ERTS_CONTINUE_EXIT_LT_MONITORS:
+
+ reds = erts_monitor_list_foreach_delete_yielding(
+ &trap_state->lt_monitors,
+ erts_proc_exit_handle_monitor,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0)
+ goto yield;
+
+ ASSERT(!trap_state->lt_monitors);
+ trap_state->phase = ERTS_CONTINUE_EXIT_HANDLE_PROC_SIG;
+ case ERTS_CONTINUE_EXIT_HANDLE_PROC_SIG: {
+ Sint r = reds;
+
+ if (!erts_proc_sig_handle_exit(p, &r))
+ goto yield;
+
+ reds -= r;
+
+ trap_state->phase = ERTS_CONTINUE_EXIT_DIST_LINKS;
+ }
+ case ERTS_CONTINUE_EXIT_DIST_LINKS: {
+
+ continue_dist_send:
+ if (is_not_nil(trap_state->pectxt.dist_state)) {
+ Binary* bin = erts_magic_ref2bin(trap_state->pectxt.dist_state);
+ ErtsDSigSendContext* ctx = (ErtsDSigSendContext*) ERTS_MAGIC_BIN_DATA(bin);
+ Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * TERM_TO_BINARY_LOOP_FACTOR);
+ int result;
+
+ ctx->reds = initial_reds;
+ result = erts_dsig_send(ctx);
+
+ /* erts_dsig_send bumps reductions on the process in the ctx */
+ reds = ERTS_BIF_REDS_LEFT(p);
+
+ switch (result) {
+ case ERTS_DSIG_SEND_OK:
+ case ERTS_DSIG_SEND_TOO_LRG: /*SEND_SYSTEM_LIMIT*/
+ case ERTS_DSIG_SEND_YIELD: /*SEND_YIELD_RETURN*/
+ break;
+ case ERTS_DSIG_SEND_CONTINUE: { /*SEND_YIELD_CONTINUE*/
+ goto yield;
+ }
+ }
+ erts_set_gc_state(p, 1);
+ trap_state->pectxt.dist_state = NIL;
+ if (reds <= 0)
+ goto yield;
+ goto restart;
+ }
- erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ);
+ reds = erts_link_tree_foreach_delete_yielding(
+ &trap_state->pectxt.dist_links,
+ erts_proc_exit_handle_dist_link,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0 || is_not_nil(trap_state->pectxt.dist_state))
+ goto yield;
+ trap_state->phase = ERTS_CONTINUE_EXIT_DIST_MONITORS;
+ }
+ case ERTS_CONTINUE_EXIT_DIST_MONITORS: {
+
+ if (is_not_nil(trap_state->pectxt.dist_state))
+ goto continue_dist_send;
+
+ reds = erts_monitor_tree_foreach_delete_yielding(
+ &trap_state->pectxt.dist_monitors,
+ erts_proc_exit_handle_dist_monitor,
+ (void *) &trap_state->pectxt,
+ &trap_state->yield_state,
+ reds);
+ if (reds <= 0 || is_not_nil(trap_state->pectxt.dist_state))
+ goto yield;
+
+ trap_state->phase = ERTS_CONTINUE_EXIT_DONE;
+ }
+ case ERTS_CONTINUE_EXIT_DONE: {
+ erts_aint_t state;
+ /*
+ * From this point on we are no longer allowed to yield
+ * this process.
+ */
- erts_proc_sig_fetch(p);
+#ifdef DEBUG
+ yield_allowed = 0;
+#endif
- erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+ /* Set state to not active as we don't want this process
+ to be scheduled in again after this. */
+ state = erts_atomic32_read_band_relb(&p->state,
+ ~(ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS));
- if (links) {
- erts_link_tree_foreach_delete(&links,
- erts_proc_exit_handle_link,
- (void *) &pectxt);
- ASSERT(!links);
- }
+ ASSERT(p->scheduler_data);
+ ASSERT(p->scheduler_data->current_process == p);
+ ASSERT(p->scheduler_data->free_process == NULL);
- if (monitors) {
- erts_monitor_tree_foreach_delete(&monitors,
- erts_proc_exit_handle_monitor,
- (void *) &pectxt);
- ASSERT(!monitors);
- }
+ p->scheduler_data->current_process = NULL;
+ p->scheduler_data->free_process = p;
- if (lt_monitors) {
- erts_monitor_list_foreach_delete(&lt_monitors,
- erts_proc_exit_handle_monitor,
- (void *) &pectxt);
- ASSERT(!lt_monitors);
- }
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ p->flags |= F_DELAYED_DEL_PROC;
+ delay_del_proc = 1;
+ /*
+ * The dirty scheduler decrease refc
+ * when done with the process...
+ */
+ }
- /*
- * erts_proc_sig_handle_exit() implements yielding.
- * However, this function cannot handle it yet... loop
- * until done...
- */
- while (!0) {
- int reds = CONTEXT_REDS;
- if (erts_proc_sig_handle_exit(p, &reds))
- break;
+ erts_schedule_thr_prgr_later_cleanup_op(
+ (void (*)(void*))erts_proc_dec_refc,
+ (void *) &p->common,
+ &p->common.u.release,
+ sizeof(Process));
+
+ break;
+ }
}
+ if (trap_state != &static_state) {
+ erts_free(ERTS_ALC_T_CONT_EXIT_TRAP, trap_state);
+ p->u.terminate = NULL;
+ }
+
ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, curr_locks, am_out_exited);
+
erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN);
ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
@@ -12467,15 +12964,30 @@ erts_continue_exit_process(Process *p)
yield:
-#ifdef DEBUG
ASSERT(yield_allowed);
-#endif
ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p));
ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks);
+ ASSERT(erts_proc_read_refc(p) > 0);
+
+ if (trap_state == &static_state) {
+ trap_state = erts_alloc(ERTS_ALC_T_CONT_EXIT_TRAP, sizeof(*trap_state));
+ sys_memcpy(trap_state, &static_state, sizeof(*trap_state));
+ p->u.terminate = trap_state;
+ }
+
+ ASSERT(p->scheduler_data);
+ ASSERT(p->scheduler_data->current_process == p);
+ ASSERT(p->scheduler_data->free_process == NULL);
+
+ if (trap_state->phase >= ERTS_CONTINUE_EXIT_FREE) {
+ p->scheduler_data->current_process = NULL;
+ p->scheduler_data->free_process = p;
+ }
p->i = (BeamInstr *) beam_continue_exit;
+ /* Why is this lock take??? */
if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) {
erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
curr_locks |= ERTS_PROC_LOCK_STATUS;