aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r--erts/emulator/beam/erl_process.c257
1 files changed, 130 insertions, 127 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 2704359a8f..bbdcf79d00 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -134,15 +134,15 @@ int erts_disable_proc_not_running_opt;
#ifndef DEBUG
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
- erts_smp_atomic32_set(&schdlr_sspnd.changing, (VAL))
+ erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL))
#else
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
do { \
erts_aint32_t old_val__; \
- old_val__ = erts_smp_atomic32_xchg(&schdlr_sspnd.changing, \
- (VAL)); \
+ old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.changing, \
+ (VAL)); \
ASSERT(old_val__ == (OLD_VAL)); \
} while (0)
@@ -158,7 +158,7 @@ static struct {
erts_smp_atomic32_t changing;
erts_smp_atomic32_t active;
struct {
- erts_smp_atomic32_t ongoing;
+ int ongoing;
long wait_active;
ErtsProcList *procs;
} msb; /* Multi Scheduling Block */
@@ -410,7 +410,7 @@ erts_init_process(int ncpu)
init_proclist_alloc();
- erts_smp_atomic32_init(&process_count, 0);
+ erts_smp_atomic32_init_nob(&process_count, 0);
if (erts_use_r9_pids_ports) {
proc_bits = ERTS_R9_PROC_BITS;
@@ -694,8 +694,8 @@ erts_smp_schedule_misc_aux_work(int ignore_self,
erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx);
ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- aux_work = erts_smp_atomic32_bor(&ssi->aux_work,
- ERTS_SSI_AUX_WORK_MISC);
+ aux_work = erts_smp_atomic32_read_bor_nob(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_MISC);
if ((aux_work & ERTS_SSI_AUX_WORK_MISC) == 0)
erts_sched_poke(ssi);
}
@@ -711,8 +711,8 @@ erts_smp_notify_check_children_needed(void)
erts_aint32_t aux_work;
ErtsSchedulerSleepInfo *ssi;
ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
- aux_work = erts_smp_atomic32_bor(&ssi->aux_work,
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work = erts_smp_atomic32_read_bor_nob(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
erts_sched_poke(ssi);
}
@@ -727,15 +727,15 @@ blockable_aux_work(ErtsSchedulerData *esdp,
{
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
if (aux_work & ERTS_SSI_AUX_WORK_MISC) {
- aux_work = erts_smp_atomic32_band(&ssi->aux_work,
- ~ERTS_SSI_AUX_WORK_MISC);
+ aux_work = erts_smp_atomic32_read_band_nob(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_MISC);
aux_work &= ~ERTS_SSI_AUX_WORK_MISC;
handle_misc_aux_work(esdp);
}
#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
- aux_work = erts_smp_atomic32_band(&ssi->aux_work,
- ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work = erts_smp_atomic32_band_nob(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
erts_check_children();
}
@@ -815,7 +815,7 @@ erts_active_schedulers(void)
static ERTS_INLINE void
clear_sys_scheduling(void)
{
- erts_smp_atomic32_set_relb(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set_mb(&doing_sys_schedule, 0);
}
static ERTS_INLINE int
@@ -882,42 +882,43 @@ sched_active(Uint no, ErtsRunQueue *rq)
static int ERTS_INLINE
ongoing_multi_scheduling_block(void)
{
- return erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing) != 0;
+ ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx));
+ return schdlr_sspnd.msb.ongoing;
}
static ERTS_INLINE void
empty_runq(ErtsRunQueue *rq)
{
- erts_aint32_t oifls = erts_smp_atomic32_band(&rq->info_flags,
- ~ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_aint32_t oifls = erts_smp_atomic32_read_band_nob(&rq->info_flags,
+ ~ERTS_RUNQ_IFLG_NONEMPTY);
if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
#ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues);
+ erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
*/
ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
- erts_smp_atomic32_inc(&no_empty_run_queues);
+ erts_smp_atomic32_inc_relb(&no_empty_run_queues);
}
}
static ERTS_INLINE void
non_empty_runq(ErtsRunQueue *rq)
{
- erts_aint32_t oifls = erts_smp_atomic32_bor(&rq->info_flags,
- ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_aint32_t oifls = erts_smp_atomic32_read_bor_nob(&rq->info_flags,
+ ERTS_RUNQ_IFLG_NONEMPTY);
if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
#ifdef DEBUG
- erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues);
+ erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
*/
ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
- erts_smp_atomic32_dec(&no_empty_run_queues);
+ erts_smp_atomic32_dec_relb(&no_empty_run_queues);
}
}
@@ -930,7 +931,7 @@ sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t xflgs = 0;
do {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -947,7 +948,7 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING;
do {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -989,7 +990,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
@@ -1049,7 +1050,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
tse_wait:
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
tse_blockable_aux_work:
aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
@@ -1059,7 +1060,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
#endif
nonblockable_aux_work(esdp, ssi, aux_work);
#endif
@@ -1092,7 +1093,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
goto tse_blockable_aux_work;
@@ -1104,7 +1105,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
erts_smp_runq_lock(rq);
sched_active(esdp->no, rq);
@@ -1136,12 +1137,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sys_aux_work:
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
#endif
nonblockable_aux_work(esdp, ssi, aux_work);
#endif
@@ -1239,7 +1240,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sys_locked_woken:
clear_sys_scheduling();
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
sched_active_sys(esdp->no, rq);
}
}
@@ -1255,7 +1256,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
erts_aint32_t nflgs = 0;
erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return oflgs;
nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
@@ -1298,7 +1299,6 @@ wake_scheduler(ErtsRunQueue *rq, int incq, int one)
erts_smp_spin_unlock(&sl->lock);
- ERTS_THR_MEMORY_BARRIER;
flgs = ssi_flags_set_wake(ssi);
erts_sched_finish_poke(ssi, flgs);
@@ -1344,13 +1344,13 @@ init_no_runqs(int active, int used)
{
erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK);
no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT);
- erts_smp_atomic32_init(&balance_info.no_runqs, no_runqs);
+ erts_smp_atomic32_init_nob(&balance_info.no_runqs, no_runqs);
}
static ERTS_INLINE void
get_no_runqs(int *active, int *used)
{
- erts_aint32_t no_runqs = erts_smp_atomic32_read(&balance_info.no_runqs);
+ erts_aint32_t no_runqs = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
if (active)
*active = (int) (no_runqs & ERTS_NO_RUNQS_MASK);
if (used)
@@ -1360,11 +1360,12 @@ get_no_runqs(int *active, int *used)
static ERTS_INLINE void
set_no_used_runqs(int used)
{
- erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
- new = (used << ERTS_NO_USED_RUNQS_SHIFT) | (exp & ERTS_NO_RUNQS_MASK);
- act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp);
+ new = (used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT;
+ new |= exp & ERTS_NO_RUNQS_MASK;
+ act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
break;
exp = act;
@@ -1374,11 +1375,12 @@ set_no_used_runqs(int used)
static ERTS_INLINE void
set_no_active_runqs(int active)
{
- erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
while (1) {
erts_aint32_t act, new;
- new = (exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT)) | active;
- act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp);
+ new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
+ new |= active & ERTS_NO_RUNQS_MASK;
+ act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
break;
exp = act;
@@ -1388,13 +1390,14 @@ set_no_active_runqs(int active)
static ERTS_INLINE int
try_inc_no_active_runqs(int active)
{
- erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs);
+ erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs);
if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active)
return 0;
if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) {
erts_aint32_t new, act;
- new = (exp & ~ERTS_NO_RUNQS_MASK) | active;
- act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp);
+ new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT);
+ new |= active & ERTS_NO_RUNQS_MASK;
+ act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp);
if (act == exp)
return 1;
}
@@ -1410,7 +1413,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
if (crq->ix == ix)
return 0;
wrq = ERTS_RUNQ_IX(ix);
- iflgs = erts_smp_atomic32_read(&wrq->info_flags);
+ iflgs = erts_smp_atomic32_read_nob(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
if (activate) {
if (try_inc_no_active_runqs(ix+1)) {
@@ -1652,15 +1655,15 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
erts_smp_runq_lock(evac_rq);
- erts_smp_atomic32_bor(&evac_rq->scheduler->ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_bor_nob(&evac_rq->scheduler->ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
- erts_smp_atomic32_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
+ erts_smp_atomic32_read_bor_nob(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1909,7 +1912,7 @@ static ERTS_INLINE int
check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
- erts_aint32_t iflgs = erts_smp_atomic32_read(&vrq->info_flags);
+ erts_aint32_t iflgs = erts_smp_atomic32_read_nob(&vrq->info_flags);
if (iflgs & ERTS_RUNQ_IFLG_NONEMPTY)
return try_steal_task_from_victim(rq, rq_lockedp, vrq);
else
@@ -2061,7 +2064,7 @@ check_balance(ErtsRunQueue *c_rq)
int forced, active, current_active, oowc, half_full_scheds, full_scheds,
mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix;
- if (erts_smp_atomic32_xchg(&balance_info.checking_balance, 1)) {
+ if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) {
c_rq->check_balance_reds = INT_MAX;
return;
}
@@ -2069,7 +2072,7 @@ check_balance(ErtsRunQueue *c_rq)
get_no_runqs(NULL, &blnc_no_rqs);
if (blnc_no_rqs == 1) {
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic32_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
return;
}
@@ -2077,7 +2080,7 @@ check_balance(ErtsRunQueue *c_rq)
if (balance_info.halftime) {
balance_info.halftime = 0;
- erts_smp_atomic32_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
ERTS_FOREACH_RUNQ(rq,
{
if (rq->waiting)
@@ -2111,7 +2114,7 @@ check_balance(ErtsRunQueue *c_rq)
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_runq_lock(c_rq);
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic32_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
return;
}
@@ -2456,7 +2459,7 @@ erts_fprintf(stderr, "--------------------------------\n");
set_no_active_runqs(active);
balance_info.halftime = 1;
- erts_smp_atomic32_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
/* Write migration paths and reset balance statistics in all queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -2598,7 +2601,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS,
sizeof(ErtsAlignedRunQueue) * n);
#ifdef ERTS_SMP
- erts_smp_atomic32_init(&no_empty_run_queues, 0);
+ erts_smp_atomic32_init_nob(&no_empty_run_queues, 0);
#endif
erts_no_run_queues = n;
@@ -2608,7 +2611,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
rq->ix = ix;
- erts_smp_atomic32_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_smp_atomic32_init_nob(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
/* make sure that the "extra" id correponds to the schedulers
* id if the esdp->no <-> ix+1 mapping change.
@@ -2701,9 +2704,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ssi->next = NULL;
ssi->prev = NULL;
#endif
- erts_smp_atomic32_init(&ssi->flags, 0);
+ erts_smp_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
- erts_smp_atomic32_init(&ssi->aux_work, 0);
+ erts_smp_atomic32_init_nob(&ssi->aux_work, 0);
}
#endif
@@ -2747,7 +2750,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
#ifdef ERTS_SMP
- erts_smp_atomic32_init(&esdp->chk_cpu_bind, 0);
+ erts_smp_atomic32_init_nob(&esdp->chk_cpu_bind, 0);
#endif
}
@@ -2755,11 +2758,11 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- erts_smp_atomic32_init(&schdlr_sspnd.changing, 0);
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
- erts_smp_atomic32_init(&schdlr_sspnd.msb.ongoing, 0);
- erts_smp_atomic32_init(&schdlr_sspnd.active, no_schedulers);
+ schdlr_sspnd.msb.ongoing = 0;
+ erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers);
schdlr_sspnd.msb.procs = NULL;
init_no_runqs(no_schedulers,
erts_common_run_queue ? 1 : no_schedulers_online);
@@ -2768,7 +2771,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
- erts_smp_atomic32_init(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_init_nob(&balance_info.checking_balance, 0);
balance_info.prev_rise.active_runqs = 0;
balance_info.prev_rise.max_len = 0;
balance_info.prev_rise.reds = 0;
@@ -2777,8 +2780,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
if (no_schedulers_online < no_schedulers) {
if (erts_common_run_queue) {
for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_bor_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
else {
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
@@ -2792,7 +2795,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
| ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- erts_smp_atomic32_init(&doing_sys_schedule, 0);
+ erts_smp_atomic32_init_nob(&doing_sys_schedule, 0);
init_misc_aux_work();
@@ -2808,7 +2811,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_no_schedulers = 1;
#endif
- erts_smp_atomic32_init(&function_calls, 0);
+ erts_smp_atomic32_init_nob(&function_calls, 0);
/* init port tasks */
erts_port_task_init();
@@ -2935,10 +2938,10 @@ int
erts_get_max_no_executing_schedulers(void)
{
#ifdef ERTS_SMP
- if (erts_smp_atomic32_read(&schdlr_sspnd.changing))
+ if (erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
return (int) erts_no_schedulers;
ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic32_read(&schdlr_sspnd.active);
+ return (int) erts_smp_atomic32_read_nob(&schdlr_sspnd.active);
#else
return 1;
#endif
@@ -2968,7 +2971,7 @@ scheduler_ix_resume_wake(Uint ix)
| ERTS_SSI_FLG_SUSPENDED);
erts_aint32_t oflgs;
do {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, 0, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs);
if (oflgs == xflgs) {
erts_sched_finish_poke(ssi, oflgs);
break;
@@ -2987,7 +2990,7 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
erts_aint32_t xflgs = xpct;
do {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -3037,7 +3040,7 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING
@@ -3092,15 +3095,15 @@ suspend_scheduler(ErtsSchedulerData *esdp)
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
- active_schedulers = erts_smp_atomic32_dectest(&schdlr_sspnd.active);
+ active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active);
ASSERT(active_schedulers >= 1);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
if (active_schedulers == schdlr_sspnd.msb.wait_active)
wake = 1;
if (active_schedulers == 1) {
- changing = erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
}
}
@@ -3122,8 +3125,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
&& schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
wake = 1;
if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- changing = erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
}
}
@@ -3140,7 +3143,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
blockable_aux_work:
blockable_aux_work(esdp, ssi, aux_work);
#endif
@@ -3176,13 +3179,13 @@ suspend_scheduler(ErtsSchedulerData *esdp)
| ERTS_SSI_FLG_SUSPENDED));
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
break;
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
goto blockable_aux_work;
@@ -3194,19 +3197,19 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
}
- active_schedulers = erts_smp_atomic32_inctest(&schdlr_sspnd.active);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ active_schedulers = erts_smp_atomic32_inc_read_nob(&schdlr_sspnd.active);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
&& schdlr_sspnd.online == active_schedulers) {
- erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
}
ASSERT(no <= schdlr_sspnd.online);
- ASSERT(!erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing));
+ ASSERT(!ongoing_multi_scheduling_block());
}
@@ -3235,7 +3238,7 @@ do { \
(RQ)->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK \
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); \
(RQ)->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; \
- erts_smp_atomic32_band(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\
+ erts_smp_atomic32_read_band_nob(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\
for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) { \
(RQ)->procs.prio_info[pix__].max_len = 0; \
(RQ)->procs.prio_info[pix__].reds = 0; \
@@ -3279,7 +3282,7 @@ erts_schedulers_state(Uint *total,
int res;
erts_aint32_t changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
@@ -3310,7 +3313,7 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
@@ -3385,8 +3388,8 @@ erts_set_schedulers_online(Process *p,
for (ix = no; ix < online; ix++) {
ErtsSchedulerSleepInfo *ssi;
ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic32_bor(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_bor_nob(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
wake_all_schedulers();
}
@@ -3433,11 +3436,11 @@ erts_set_schedulers_online(Process *p,
NULL);
ASSERT(res != ERTS_SCHDLR_SSPND_DONE
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic32_read(&schdlr_sspnd.changing))
+ & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
: (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic32_read(&schdlr_sspnd.changing)));
- erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
}
@@ -3456,7 +3459,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
@@ -3466,7 +3469,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
plp->next = schdlr_sspnd.msb.procs;
schdlr_sspnd.msb.procs = plp;
p->flags |= F_HAVE_BLCKD_MSCHED;
- ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
@@ -3477,11 +3480,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
- ASSERT(0 == erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing));
- erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 1);
+ ASSERT(!ongoing_multi_scheduling_block());
+ schdlr_sspnd.msb.ongoing = 1;
if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
@@ -3501,8 +3504,8 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
}
if (erts_common_run_queue) {
for (ix = 1; ix < online; ix++)
- erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_bor_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
@@ -3530,7 +3533,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
- while (erts_smp_atomic32_read(&schdlr_sspnd.active)
+ while (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
!= schdlr_sspnd.msb.wait_active)
erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
@@ -3539,11 +3542,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
NULL);
ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic32_read(&schdlr_sspnd.changing))
+ & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))
: (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic32_read(&schdlr_sspnd.changing)));
- erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -3610,16 +3613,16 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
});
#endif
p->flags &= ~F_HAVE_BLCKD_MSCHED;
- erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 0);
+ schdlr_sspnd.msb.ongoing = 0;
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
- ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else if (erts_common_run_queue) {
for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic32_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_read_band_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
@@ -3669,7 +3672,7 @@ void
erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value)
{
if (return_value == am_blocked) {
- erts_aint32_t active = erts_smp_atomic32_read(&schdlr_sspnd.active);
+ erts_aint32_t active = erts_smp_atomic32_read_nob(&schdlr_sspnd.active);
ASSERT(1 <= active && active <= 2);
ASSERT(ERTS_PROC_GET_SCHDATA(p)->no == 1);
}
@@ -3752,12 +3755,12 @@ sched_thread_func(void *vesdp)
erts_thread_init_float();
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.changing)
+ ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)
& ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
- erts_smp_atomic32_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (((ErtsSchedulerData *) vesdp)->no != 1)
erts_smp_cnd_signal(&schdlr_sspnd.cnd);
}
@@ -5214,7 +5217,7 @@ Process *schedule(Process *p, int calls)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
- fcalls = (int) erts_smp_atomic32_addtest(&function_calls, reds);
+ fcalls = (int) erts_smp_atomic32_add_read_acqb(&function_calls, reds);
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
@@ -5349,7 +5352,7 @@ Process *schedule(Process *p, int calls)
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
|| (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags)
+ ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
@@ -5363,7 +5366,7 @@ Process *schedule(Process *p, int calls)
|| defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
{
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- erts_aint32_t aux_work = erts_smp_atomic32_read(&ssi->aux_work);
+ erts_aint32_t aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work);
if (aux_work) {
erts_smp_runq_unlock(rq);
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
@@ -5407,7 +5410,7 @@ Process *schedule(Process *p, int calls)
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
|| (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags)
+ ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
non_empty_runq(rq);
goto continue_check_activities_to_run;
@@ -5948,7 +5951,7 @@ erts_test_next_pid(int set, Uint next)
Uint erts_process_count(void)
{
- erts_aint32_t res = erts_smp_atomic32_read(&process_count);
+ erts_aint32_t res = erts_smp_atomic32_read_nob(&process_count);
ASSERT(res >= 0);
return (Uint) res;
}
@@ -5997,7 +6000,7 @@ alloc_process(void)
ASSERT(!process_tab[p_next]);
process_tab[p_next] = p;
- erts_smp_atomic32_inc(&process_count);
+ erts_smp_atomic32_inc_nob(&process_count);
p->id = make_internal_pid(p_serial << p_serial_shift | p_next);
if (p->id == ERTS_INVALID_PID) {
/* Do not use the invalid pid; change serial */
@@ -6123,7 +6126,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
p->prio = PRIORITY_NORMAL;
- p->max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
+ p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
}
p->skipped = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
@@ -7580,8 +7583,8 @@ continue_exit_process(Process *p
p->status_flags = 0;
#endif
process_tab[pix] = NULL; /* Time of death! */
- ASSERT(erts_smp_atomic32_read(&process_count) > 0);
- erts_smp_atomic32_dec(&process_count);
+ ASSERT(erts_smp_atomic32_read_nob(&process_count) > 0);
+ erts_smp_atomic32_dec_nob(&process_count);
#ifdef ERTS_SMP
erts_pix_unlock(pix_lock);