aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2017-01-02 15:53:01 +0100
committerRickard Green <[email protected]>2017-01-02 15:53:01 +0100
commitae1c4b4dd1a4333727adb6875340e556a403a7fc (patch)
treeac5e193c7e12e5e0d0c190775029b6c2d3dbfad6
parente6f982d7374e148be51865a781af1f1982667af9 (diff)
parent9cb4770469218f65dbaec6c71d12b4aa722ac791 (diff)
downloadotp-ae1c4b4dd1a4333727adb6875340e556a403a7fc.tar.gz
otp-ae1c4b4dd1a4333727adb6875340e556a403a7fc.tar.bz2
otp-ae1c4b4dd1a4333727adb6875340e556a403a7fc.zip
Merge branch 'maint'
* maint: Multi scheduling block bug-fixes Fix VM global GC info for dirty schedulers Leave dirty work in dirty run-queues on multi scheduling block Fix premature removal of process struct Fix crash due to GC of node entry on dirty scheduler
-rw-r--r--erts/emulator/beam/erl_gc.c39
-rw-r--r--erts/emulator/beam/erl_hl_timer.c2
-rw-r--r--erts/emulator/beam/erl_lock_check.c1
-rw-r--r--erts/emulator/beam/erl_process.c229
-rw-r--r--erts/emulator/beam/erl_process.h2
5 files changed, 179 insertions, 94 deletions
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 28b2b02914..818f89f0e3 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -176,6 +176,13 @@ typedef struct {
erts_smp_atomic32_t refc;
} ErtsGCInfoReq;
+#ifdef ERTS_DIRTY_SCHEDULERS
+static struct {
+ erts_mtx_t mtx;
+ ErtsGCInfo info;
+} dirty_gc;
+#endif
+
static ERTS_INLINE int
gc_cost(Uint gc_moved_live_words, Uint resize_moved_words)
{
@@ -259,6 +266,11 @@ erts_init_gc(void)
init_gc_info(&esdp->gc_info);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info");
+ init_gc_info(&dirty_gc.info);
+#endif
+
init_gcireq_alloc();
}
@@ -735,8 +747,19 @@ do_major_collection:
monitor_large_heap(p);
}
- esdp->gc_info.garbage_cols++;
- esdp->gc_info.reclaimed += reclaimed_now;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ dirty_gc.info.garbage_cols++;
+ dirty_gc.info.reclaimed += reclaimed_now;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+ else
+#endif
+ {
+ esdp->gc_info.garbage_cols++;
+ esdp->gc_info.reclaimed += reclaimed_now;
+ }
FLAGS(p) &= ~F_FORCE_GC;
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -3009,6 +3032,18 @@ reply_gc_info(void *vgcirp)
reclaimed = esdp->gc_info.reclaimed;
garbage_cols = esdp->gc_info.garbage_cols;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /*
+ * Add dirty schedulers info on requesting
+ * schedulers info
+ */
+ if (gcirp->req_sched == esdp->no) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ reclaimed += dirty_gc.info.reclaimed;
+ garbage_cols += dirty_gc.info.garbage_cols;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+#endif
sz = 0;
hpp = NULL;
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index d29d079fc5..38bebc7576 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -2666,7 +2666,7 @@ erts_start_timer_callback(ErtsMonotonicTime tmo,
tmo);
twt = tmo < ERTS_TIMER_WHEEL_MSEC;
- if (esdp)
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
start_callback_timer(esdp,
twt,
timeout_pos,
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 356f5ca71e..5f2a687f74 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -127,6 +127,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "run_queue", "address" },
#ifdef ERTS_DIRTY_SCHEDULERS
{ "dirty_run_queue_sleep_list", "address" },
+ { "dirty_gc_info", NULL },
#endif
{ "process_table", NULL },
{ "cpu_info", NULL },
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 4fb0f9e975..a6fdd67088 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -2059,6 +2059,7 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
@@ -6233,6 +6234,12 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
int queue;
erts_aint32_t dact, max_qbit;
+ /* Do not enqueue free process... */
+ if (actual & ERTS_PSFLG_FREE) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NOT;
+ }
+
/* Termination should be done on an ordinary scheduler */
if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
@@ -6425,6 +6432,9 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
/*
* schedule_out_process() return with c_rq locked.
+ *
+ * Return non-zero value if caller should decrease
+ * reference count on the process when done with it...
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
@@ -6479,12 +6489,18 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 0;
-
+#if !defined(ERTS_SMP)
+ /* Decrement refc if process struct is free... */
+ return !!(n & ERTS_PSFLG_FREE);
+#else
+ /* Decrement refc if scheduled out from dirty scheduler... */
+ return !is_normal_sched;
+#endif
}
else {
Process* sched_p;
+ ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6500,11 +6516,14 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(runq);
+ if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */
+
/* Enqueue the process */
enqueue_process(runq, (int) enq_prio, sched_p);
if (runq == c_rq)
- return 1;
+ return 0;
erts_smp_runq_unlock(runq);
@@ -6512,9 +6531,15 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 1;
+ /*
+ * Decrement refc if process is scheduled out by a
+ * dirty scheduler, and we have not just scheduled
+ * the process using the ordinary process struct
+ * on a dirty run-queue again...
+ */
+ return !is_normal_sched && (sched_p != p
+ || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix));
}
-
}
static ERTS_INLINE void
@@ -6529,8 +6554,17 @@ add2runq(int enqueue, erts_aint32_t prio,
if (runq) {
Process *sched_p;
- if (enqueue > 0)
+ if (enqueue > 0) {
sched_p = proc;
+ /*
+ * Refc on process struct (i.e. true struct,
+ * not proxy-struct) increased while in a
+ * dirty run-queue or executing on a dirty
+ * scheduler.
+ */
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(proc);
+ }
else {
Process *pxy;
@@ -7054,12 +7088,18 @@ typedef struct {
} ErtsSchdlrSspndResume;
static void
-schdlr_sspnd_resume_proc(Eterm pid)
+schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid)
{
- Process *p = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_STATUS);
+ Process *p;
+ p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS,
+ (sched_type != ERTS_SCHED_NORMAL
+ ? ERTS_P2P_FLG_INC_REFC
+ : 0));
if (p) {
resume_process(p, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ erts_proc_dec_refc(p);
}
}
@@ -7068,17 +7108,20 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
ErtsSchdlrSspndResume *resume)
{
if (is_internal_pid(resume->onln.chngr)) {
- schdlr_sspnd_resume_proc(resume->onln.chngr);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.chngr);
resume->onln.chngr = NIL;
}
if (is_internal_pid(resume->onln.nxt)) {
- schdlr_sspnd_resume_proc(resume->onln.nxt);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.nxt);
resume->onln.nxt = NIL;
}
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(plp->pid);
+ schdlr_sspnd_resume_proc(sched_type,
+ plp->pid);
proclist_destroy(plp);
}
}
@@ -7130,16 +7173,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- }
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (sched_type != ERTS_SCHED_NORMAL)
erts_smp_runq_unlock(esdp->run_queue);
- }
else {
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7152,8 +7187,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
sched_wall_time_change(esdp, 0);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
@@ -7197,15 +7232,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
(void) erts_proclist_fetch(&msb[i]->chngq, &end_plp);
/* resume processes that initiated the multi scheduling block... */
plp = msb[i]->chngq;
- while (plp) {
- erts_proclist_store_last(&msb[i]->blckrs,
- proclist_copy(plp));
- plp = plp->next;
- }
- if (end_plp)
+ if (plp) {
+ ASSERT(end_plp);
+ ASSERT(msb[i]->ongoing);
+ do {
+ erts_proclist_store_last(&msb[i]->blckrs,
+ proclist_copy(plp));
+ plp = plp->next;
+ } while (plp);
end_plp->next = resume.msb.chngrs;
- resume.msb.chngrs = msb[i]->chngq;
- msb[i]->chngq = NULL;
+ resume.msb.chngrs = msb[i]->chngq;
+ msb[i]->chngq = NULL;
+ }
}
}
}
@@ -7269,24 +7307,14 @@ suspend_scheduler(ErtsSchedulerData *esdp)
while (1) {
ErtsMonotonicTime current_time;
- erts_aint32_t qmask;
erts_aint32_t flgs;
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
-
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (qmask) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
+ if (sched_type != ERTS_SCHED_NORMAL)
aux_work = 0;
- }
else {
+ erts_aint32_t qmask;
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
@@ -7414,12 +7442,23 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && schdlr_sspnd.online == schdlr_sspnd.active) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
-
+ if (changing) {
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && !schdlr_sspnd.msb.ongoing
+ && schdlr_sspnd.online == schdlr_sspnd.active) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ && !schdlr_sspnd.nmsb.ongoing
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL))) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_NMSB);
+ }
+ }
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
ASSERT((sched_type == ERTS_SCHED_NORMAL
? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
@@ -7552,7 +7591,7 @@ abort_sched_onln_chng_waitq(Process *p)
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
if (is_internal_pid(resume))
- schdlr_sspnd_resume_proc(resume);
+ schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume);
}
ErtsSchedSuspendResult
@@ -7950,21 +7989,20 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
else { /* ------ UNBLOCK ------ */
if (p->flags & have_blckd_flg) {
- ErtsProcList *plps[2];
+ ErtsProcList **plpps[3] = {0};
ErtsProcList *plp;
- int limit = 0;
- plps[limit++] = erts_proclist_peek_first(msbp->blckrs);
- if (all)
- plps[limit++] = erts_proclist_peek_first(msbp->chngq);
+ plpps[0] = &msbp->blckrs;
+ if (all)
+ plpps[1] = &msbp->chngq;
- for (ix = 0; ix < limit; ix++) {
- plp = plps[ix];
+ for (ix = 0; plpps[ix]; ix++) {
+ plp = erts_proclist_peek_first(*plpps[ix]);
while (plp) {
ErtsProcList *tmp_plp = plp;
- plp = erts_proclist_peek_next(msbp->blckrs, plp);
+ plp = erts_proclist_peek_next(*plpps[ix], plp);
if (erts_proclist_same(tmp_plp, p)) {
- erts_proclist_remove(&msbp->blckrs, tmp_plp);
+ erts_proclist_remove(plpps[ix], tmp_plp);
proclist_destroy(tmp_plp);
if (!all)
break;
@@ -9587,40 +9625,45 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
esdp->reductions += reds;
- /* schedule_out_process() returns with rq locked! */
- schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
- proxy_p = NULL;
+ {
+ int dec_refc;
- ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
- (int) ERTS_PSFLGS_GET_USR_PRIO(state),
- reds,
- actual_reds);
+ /* schedule_out_process() returns with rq locked! */
+ dec_refc = schedule_out_process(rq, state, p,
+ proxy_p, is_normal_sched);
+ proxy_p = NULL;
- esdp->current_process = NULL;
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+
+ esdp->current_process = NULL;
#ifdef ERTS_SMP
- p->scheduler_data = NULL;
+ p->scheduler_data = NULL;
#endif
- erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_STATUS
- | ERTS_PROC_LOCK_TRACE));
+ erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS
+ | ERTS_PROC_LOCK_TRACE));
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
- if (state & ERTS_PSFLG_FREE) {
- if (!is_normal_sched) {
- ASSERT(p->flags & F_DELAYED_DEL_PROC);
- erts_proc_dec_refc(p);
- }
- else {
#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
-#else
- erts_proc_dec_refc(p);
+ if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ }
+ else {
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ }
+ }
#endif
- }
- }
+
+ if (dec_refc)
+ erts_proc_dec_refc(p);
+ }
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
@@ -9896,8 +9939,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
else {
+ Eterm pid = p->common.id;
proxy_p = p;
- p = erts_proc_lookup_raw(proxy_p->common.id);
+ p = (is_normal_sched
+ ? erts_proc_lookup_raw(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0,
+ ERTS_P2P_FLG_INC_REFC));
if (!p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9929,6 +9976,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_FREE)))
#ifdef ERTS_DIRTY_SCHEDULERS
| (((state & (ERTS_PSFLG_RUNNING
+
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_EXITING))
@@ -9961,6 +10009,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* free and not queued by proxy */
erts_proc_dec_refc(p);
}
+ if (!is_normal_sched)
+ erts_proc_dec_refc(p);
goto pick_next_process;
}
state = new;
@@ -13094,15 +13144,14 @@ erts_continue_exit_process(Process *p)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
p->flags |= F_DELAYED_DEL_PROC;
delay_del_proc = 1;
/*
- * The dirty scheduler will also decrease
- * refc when done...
+ * The dirty scheduler decrease refc
+ * when done with the process...
*/
- erts_proc_inc_refc(p);
}
#endif
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 1c4b9f149d..7193b7d1db 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1681,7 +1681,7 @@ ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
return NULL;
else {
ErtsProcList *res = *list;
- if (res == *list)
+ if (res->next == *list)
*list = NULL;
else
*list = res->next;