diff options
author | Rickard Green <[email protected]> | 2011-05-13 14:27:41 +0200 |
---|---|---|
committer | Rickard Green <[email protected]> | 2011-05-13 14:27:41 +0200 |
commit | 0c73cf3d305c4b033c6e1efa6ebd08796a1d8682 (patch) | |
tree | 4aa3a5a40d8303f1f6f01cd90cee43857371db07 /erts | |
parent | 926795501b71ebf9ca4b22927021fa551549f9b0 (diff) | |
parent | 139fa05489a6ba3e4384e6f20ea3f943741449d5 (diff) | |
download | otp-0c73cf3d305c4b033c6e1efa6ebd08796a1d8682.tar.gz otp-0c73cf3d305c4b033c6e1efa6ebd08796a1d8682.tar.bz2 otp-0c73cf3d305c4b033c6e1efa6ebd08796a1d8682.zip |
Merge branch 'rickard/barriers/OTP-9281' into dev
* rickard/barriers/OTP-9281:
Silence warnings
Fix build with hipe on amd64
Reduce number of atomic ops
Use 32-bit atomic for port snapshot
Remove pointless erts_ports_alive variable
Ensure quick break
Ensure that all rehashing information are seen when done
Ensure that stack updates are seen when stack is released
Add needed barriers for write_concurrency tables
Homogenize memory barriers on atomics
Diffstat (limited to 'erts')
-rw-r--r-- | erts/aclocal.m4 | 2 | ||||
-rw-r--r-- | erts/emulator/beam/atom.names | 1 | ||||
-rw-r--r-- | erts/emulator/beam/beam_emu.c | 2 | ||||
-rw-r--r-- | erts/emulator/beam/bif.c | 24 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db_hash.c | 23 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db_tree.c | 2 | ||||
-rw-r--r-- | erts/emulator/beam/erl_drv_thread.c | 7 | ||||
-rw-r--r-- | erts/emulator/beam/erl_port_task.c | 3 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.c | 135 | ||||
-rw-r--r-- | erts/emulator/beam/global.h | 9 | ||||
-rw-r--r-- | erts/emulator/beam/io.c | 9 | ||||
-rw-r--r-- | erts/emulator/beam/safe_hash.c | 2 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_check_io.c | 5 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_poll.c | 7 | ||||
-rw-r--r-- | erts/emulator/sys/win32/erl_poll.c | 2 | ||||
-rw-r--r-- | erts/include/internal/libatomic_ops/ethr_atomic.h | 26 | ||||
-rw-r--r-- | erts/include/internal/sparc32/atomic.h | 29 | ||||
-rw-r--r-- | erts/include/internal/tile/atomic.h | 33 |
18 files changed, 199 insertions, 122 deletions
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4 index a1211bbf0c..f7e5c31910 100644 --- a/erts/aclocal.m4 +++ b/erts/aclocal.m4 @@ -1153,7 +1153,7 @@ case "$THR_LIB_NAME" in AO_nop_full(); AO_store(&x, (AO_t) 0); z = AO_load(&x); - z = AO_compare_and_swap(&x, (AO_t) 0, (AO_t) 1); + z = AO_compare_and_swap_full(&x, (AO_t) 0, (AO_t) 1); ], [ethr_have_native_atomics=yes ethr_have_libatomic_ops=yes]) diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 327620772f..de76cb9680 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -76,6 +76,7 @@ atom allocator_sizes atom alloc_util_allocators atom allow_passive_connect atom already_loaded +atom amd64 atom anchored atom and atom andalso diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index d5d113200a..fb90a7d4f7 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -5325,7 +5325,7 @@ void process_main(void) ep->code[3] = (BeamInstr) OpCode(apply_bif); ep->code[4] = (BeamInstr) bif_table[i].f; /* XXX: set func info for bifs */ - ((BeamInstr*)ep->code + 3)[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI); + ep->fake_op_func_info_for_hipe[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI); } return; diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index ac93b10d12..68b3350d7f 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -3408,6 +3408,7 @@ BIF_RETTYPE ports_0(BIF_ALIST_0) Eterm* dead_ports; int alive, dead; Uint32 next_ss; + int i; /* To get a consistent snapshot... * We add alive ports from start of the buffer @@ -3419,21 +3420,18 @@ BIF_RETTYPE ports_0(BIF_ALIST_0) erts_smp_atomic_set(&erts_dead_ports_ptr, (erts_aint_t) (port_buf + erts_max_ports)); - next_ss = erts_smp_atomic_inctest(&erts_ports_snapshot); + next_ss = erts_smp_atomic32_inctest(&erts_ports_snapshot); - if (erts_smp_atomic_read(&erts_ports_alive) > 0) { - erts_aint_t i; - for (i = erts_max_ports-1; i >= 0; i--) { - Port* prt = &erts_port[i]; - erts_smp_port_state_lock(prt); - if (!(prt->status & ERTS_PORT_SFLGS_DEAD) - && prt->snapshot != next_ss) { - ASSERT(prt->snapshot == next_ss - 1); - *pp++ = prt->id; - prt->snapshot = next_ss; /* Consumed by this snapshot */ - } - erts_smp_port_state_unlock(prt); + for (i = erts_max_ports-1; i >= 0; i--) { + Port* prt = &erts_port[i]; + erts_smp_port_state_lock(prt); + if (!(prt->status & ERTS_PORT_SFLGS_DEAD) + && prt->snapshot != next_ss) { + ASSERT(prt->snapshot == next_ss - 1); + *pp++ = prt->id; + prt->snapshot = next_ss; /* Consumed by this snapshot */ } + erts_smp_port_state_unlock(prt); } dead_ports = (Eterm*)erts_smp_atomic_xchg(&erts_dead_ports_ptr, diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 694348e31d..fdc82c8b88 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -105,7 +105,7 @@ #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ -#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read(&(tb)->segtab)) +#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_acqb(&(tb)->segtab)) #define NACTIVE(tb) ((int)erts_smp_atomic_read(&(tb)->nactive)) #define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems)) @@ -122,8 +122,8 @@ */ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval) { - Uint mask = erts_smp_atomic_read(&tb->szm); - Uint ix = hval & mask; + Uint mask = erts_smp_atomic_read_acqb(&tb->szm); + Uint ix = hval & mask; if (ix >= erts_smp_atomic_read(&tb->nactive)) { ix &= mask>>1; ASSERT(ix < erts_smp_atomic_read(&tb->nactive)); @@ -668,6 +668,7 @@ int db_create_hash(Process *p, DbTable *tbl) else { /* coarse locking */ tb->locks = NULL; } + ERTS_THR_MEMORY_BARRIER; #endif /* ERST_SMP */ return DB_ERROR_NONE; } @@ -2349,7 +2350,7 @@ static int alloc_seg(DbTableHash *tb) struct ext_segment* eseg; eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1]; MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment); - erts_smp_atomic_set(&tb->segtab, (erts_aint_t) eseg->segtab); + erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t) eseg->segtab); tb->nsegs = eseg->nsegs; } ASSERT(seg_ix < tb->nsegs); @@ -2421,7 +2422,7 @@ static int free_seg(DbTableHash *tb, int free_records) MY_ASSERT(newtop->s.is_ext_segment); if (newtop->prev_segtab != NULL) { /* Time to use a smaller segtab */ - erts_smp_atomic_set(&tb->segtab, (erts_aint_t)newtop->prev_segtab); + erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)newtop->prev_segtab); tb->nsegs = seg_ix; ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs); } @@ -2438,7 +2439,7 @@ static int free_seg(DbTableHash *tb, int free_records) if (seg_ix > 0) { if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL; } else { - erts_smp_atomic_set(&tb->segtab, (erts_aint_t)NULL); + erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)NULL); } #endif tb->nslots -= SEGSZ; @@ -2533,9 +2534,9 @@ static void grow(DbTableHash* tb, int nactive) } erts_smp_atomic_inc(&tb->nactive); if (from_ix == 0) { - erts_smp_atomic_set(&tb->szm, szm); + erts_smp_atomic_set_relb(&tb->szm, szm); } - erts_smp_atomic_set(&tb->is_resizing, 0); + erts_smp_atomic_set_relb(&tb->is_resizing, 0); /* Finally, let's split the bucket. We try to do it in a smart way to keep link order and avoid unnecessary updates of next-pointers */ @@ -2567,7 +2568,7 @@ static void grow(DbTableHash* tb, int nactive) return; abort: - erts_smp_atomic_set(&tb->is_resizing, 0); + erts_smp_atomic_set_relb(&tb->is_resizing, 0); } @@ -2611,7 +2612,7 @@ static void shrink(DbTableHash* tb, int nactive) erts_smp_atomic_set(&tb->nactive, src_ix); if (dst_ix == 0) { - erts_smp_atomic_set(&tb->szm, low_szm); + erts_smp_atomic_set_relb(&tb->szm, low_szm); } WUNLOCK_HASH(lck); @@ -2625,7 +2626,7 @@ static void shrink(DbTableHash* tb, int nactive) } /*else already done */ - erts_smp_atomic_set(&tb->is_resizing, 0); + erts_smp_atomic_set_relb(&tb->is_resizing, 0); } diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index eb77d1281a..9a0ba3a418 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -111,7 +111,7 @@ static void release_stack(DbTableTree* tb, DbTreeStack* stack) { if (stack == &tb->static_stack) { ASSERT(erts_smp_atomic_read(&tb->is_stack_busy) == 1); - erts_smp_atomic_set(&tb->is_stack_busy, 0); + erts_smp_atomic_set_relb(&tb->is_stack_busy, 0); } else { erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb, diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c index 39bbe9633b..dc578f6d2a 100644 --- a/erts/emulator/beam/erl_drv_thread.c +++ b/erts/emulator/beam/erl_drv_thread.c @@ -700,6 +700,13 @@ erl_drv_thread_join(ErlDrvTid tid, void **respp) extern int erts_darwin_main_thread_pipe[2]; extern int erts_darwin_main_thread_result_pipe[2]; +int erl_drv_stolen_main_thread_join(ErlDrvTid tid, void **respp); +int erl_drv_steal_main_thread(char *name, + ErlDrvTid *dtid, + void* (*func)(void*), + void* arg, + ErlDrvThreadOpts *opts); + int erl_drv_stolen_main_thread_join(ErlDrvTid tid, void **respp) diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 1b07024ca1..326021643f 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -658,8 +658,6 @@ erts_port_task_free_port(Port *pp) when scheduled out... */ ErtsPortTask *ptp = port_task_alloc(); erts_smp_port_state_lock(pp); - ASSERT(erts_smp_atomic_read(&erts_ports_alive) > 0); - erts_smp_atomic_dec(&erts_ports_alive); pp->status &= ~ERTS_PORT_SFLG_CLOSING; pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; erts_may_save_closed_port(pp); @@ -681,7 +679,6 @@ erts_port_task_free_port(Port *pp) port_is_dequeued = 1; } erts_smp_port_state_lock(pp); - erts_smp_atomic_dec(&erts_ports_alive); pp->status &= ~ERTS_PORT_SFLG_CLOSING; pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; erts_may_save_closed_port(pp); diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 05172aaefa..2704359a8f 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -166,9 +166,8 @@ static struct { static struct { erts_smp_mtx_t update_mtx; - erts_smp_atomic32_t active_runqs; + erts_smp_atomic32_t no_runqs; int last_active_runqs; - erts_smp_atomic32_t used_runqs; int forced_check_balance; erts_smp_atomic32_t checking_balance; int halftime; @@ -965,7 +964,7 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read(&ssi->flags); + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) { break; @@ -1114,7 +1113,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) else { erts_aint_t dt; - erts_smp_atomic32_set(&function_calls, 0); + erts_smp_atomic32_set_relb(&function_calls, 0); *fcalls = 0; sched_waiting_sys(esdp->no, rq); @@ -1147,7 +1146,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) nonblockable_aux_work(esdp, ssi, aux_work); #endif - flgs = erts_smp_atomic32_read(&ssi->flags); + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); goto sys_woken; @@ -1333,6 +1332,76 @@ wake_all_schedulers(void) } } +#define ERTS_NO_USED_RUNQS_SHIFT 16 +#define ERTS_NO_RUNQS_MASK 0xffff + +#if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_NO_RUNQS_MASK +# error "Too large amount of schedulers allowed" +#endif + +static ERTS_INLINE void +init_no_runqs(int active, int used) +{ + erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK); + no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT); + erts_smp_atomic32_init(&balance_info.no_runqs, no_runqs); +} + +static ERTS_INLINE void +get_no_runqs(int *active, int *used) +{ + erts_aint32_t no_runqs = erts_smp_atomic32_read(&balance_info.no_runqs); + if (active) + *active = (int) (no_runqs & ERTS_NO_RUNQS_MASK); + if (used) + *used = (int) ((no_runqs >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK); +} + +static ERTS_INLINE void +set_no_used_runqs(int used) +{ + erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + while (1) { + erts_aint32_t act, new; + new = (used << ERTS_NO_USED_RUNQS_SHIFT) | (exp & ERTS_NO_RUNQS_MASK); + act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + if (act == exp) + break; + exp = act; + } +} + +static ERTS_INLINE void +set_no_active_runqs(int active) +{ + erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + while (1) { + erts_aint32_t act, new; + new = (exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT)) | active; + act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + if (act == exp) + break; + exp = act; + } +} + +static ERTS_INLINE int +try_inc_no_active_runqs(int active) +{ + erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active) + return 0; + if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) { + erts_aint32_t new, act; + new = (exp & ~ERTS_NO_RUNQS_MASK) | active; + act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + if (act == exp) + return 1; + } + return 0; +} + + static ERTS_INLINE int chk_wake_sched(ErtsRunQueue *crq, int ix, int activate) { @@ -1344,9 +1413,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate) iflgs = erts_smp_atomic32_read(&wrq->info_flags); if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) { if (activate) { - if (ix == erts_smp_atomic32_cmpxchg(&balance_info.active_runqs, - ix+1, - ix)) { + if (try_inc_no_active_runqs(ix+1)) { erts_smp_xrunq_lock(crq, wrq); wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE; erts_smp_xrunq_unlock(crq, wrq); @@ -1363,8 +1430,9 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq) { int ix = crq->ix; int stop_ix = ix; - int active_ix = erts_smp_atomic32_read(&balance_info.active_runqs); - int balance_ix = erts_smp_atomic32_read(&balance_info.used_runqs); + int active_ix, balance_ix; + + get_no_runqs(&active_ix, &balance_ix); if (active_ix > balance_ix) active_ix = balance_ix; @@ -1416,7 +1484,7 @@ erts_sched_notify_check_cpu_bind(void) int ix; if (erts_common_run_queue) { for (ix = 0; ix < erts_no_schedulers; ix++) - erts_smp_atomic32_set(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1); + erts_smp_atomic32_set_relb(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1); wake_all_schedulers(); } else { @@ -1871,8 +1939,7 @@ try_steal_task(ErtsRunQueue *rq) ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked); - active_rqs = erts_smp_atomic32_read(&balance_info.active_runqs); - blnc_rqs = erts_smp_atomic32_read(&balance_info.used_runqs); + get_no_runqs(&active_rqs, &blnc_rqs); if (active_rqs > blnc_rqs) active_rqs = blnc_rqs; @@ -1883,7 +1950,7 @@ try_steal_task(ErtsRunQueue *rq) if (active_rqs < blnc_rqs) { int no = blnc_rqs - active_rqs; int stop_ix = vix = active_rqs + rq->ix % no; - while (erts_smp_atomic32_read(&no_empty_run_queues) < blnc_rqs) { + while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { res = check_possible_steal_victim(rq, &rq_locked, vix); if (res) goto done; @@ -1898,7 +1965,7 @@ try_steal_task(ErtsRunQueue *rq) vix = rq->ix; /* ... then try to steal a job from another active queue... */ - while (erts_smp_atomic32_read(&no_empty_run_queues) < blnc_rqs) { + while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { vix++; if (vix >= active_rqs) vix = 0; @@ -1999,7 +2066,7 @@ check_balance(ErtsRunQueue *c_rq) return; } - blnc_no_rqs = (int) erts_smp_atomic32_read(&balance_info.used_runqs); + get_no_runqs(NULL, &blnc_no_rqs); if (blnc_no_rqs == 1) { c_rq->check_balance_reds = INT_MAX; erts_smp_atomic32_set(&balance_info.checking_balance, 0); @@ -2038,7 +2105,8 @@ check_balance(ErtsRunQueue *c_rq) forced = balance_info.forced_check_balance; balance_info.forced_check_balance = 0; - blnc_no_rqs = (int) erts_smp_atomic32_read(&balance_info.used_runqs); + get_no_runqs(¤t_active, &blnc_no_rqs); + if (blnc_no_rqs == 1) { erts_smp_mtx_unlock(&balance_info.update_mtx); erts_smp_runq_lock(c_rq); @@ -2052,8 +2120,6 @@ check_balance(ErtsRunQueue *c_rq) if (balance_info.full_reds_history_index >= ERTS_FULL_REDS_HISTORY_SIZE) balance_info.full_reds_history_index = 0; - current_active = erts_smp_atomic32_read(&balance_info.active_runqs); - /* Read balance information for all run queues */ for (qix = 0; qix < blnc_no_rqs; qix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); @@ -2387,7 +2453,7 @@ erts_fprintf(stderr, "--------------------------------\n"); } balance_info.last_active_runqs = active; - erts_smp_atomic32_set(&balance_info.active_runqs, active); + set_no_active_runqs(active); balance_info.halftime = 1; erts_smp_atomic32_set(&balance_info.checking_balance, 0); @@ -2695,9 +2761,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) erts_smp_atomic32_init(&schdlr_sspnd.msb.ongoing, 0); erts_smp_atomic32_init(&schdlr_sspnd.active, no_schedulers); schdlr_sspnd.msb.procs = NULL; - erts_smp_atomic32_set(&balance_info.used_runqs, - erts_common_run_queue ? 1 : no_schedulers_online); - erts_smp_atomic32_init(&balance_info.active_runqs, no_schedulers); + init_no_runqs(no_schedulers, + erts_common_run_queue ? 1 : no_schedulers_online); balance_info.last_active_runqs = no_schedulers; erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update"); balance_info.forced_check_balance = 0; @@ -2939,7 +3004,7 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read(&ssi->flags); + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED)) @@ -3068,7 +3133,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) wake = 0; } - flgs = erts_smp_atomic32_read(&ssi->flags); + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; erts_smp_mtx_unlock(&schdlr_sspnd.mtx); @@ -3292,7 +3357,7 @@ erts_set_schedulers_online(Process *p, ErtsRunQueue *to_rq = ERTS_RUNQ_IX(ix % no); evacuate_run_queue(from_rq, to_rq); } - erts_smp_atomic32_set(&balance_info.used_runqs, no); + set_no_used_runqs(no); erts_smp_mtx_unlock(&balance_info.update_mtx); erts_smp_mtx_lock(&schdlr_sspnd.mtx); } @@ -3346,7 +3411,7 @@ erts_set_schedulers_online(Process *p, for (ix = erts_no_run_queues-1; ix >= no; ix--) evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(ix % no)); - erts_smp_atomic32_set(&balance_info.used_runqs, no); + set_no_used_runqs(no); erts_smp_mtx_unlock(&balance_info.update_mtx); erts_smp_mtx_lock(&schdlr_sspnd.mtx); for (ix = no; ix < online; ix++) { @@ -3443,7 +3508,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) else { erts_smp_mtx_unlock(&schdlr_sspnd.mtx); erts_smp_mtx_lock(&balance_info.update_mtx); - erts_smp_atomic32_set(&balance_info.used_runqs, 1); + set_no_used_runqs(1); for (ix = 0; ix < online; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); erts_smp_runq_lock(rq); @@ -3580,7 +3645,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(ix % online)); - erts_smp_atomic32_set(&balance_info.used_runqs, online); + set_no_used_runqs(online); /* Make sure that we balance soon... */ balance_info.forced_check_balance = 1; erts_smp_runq_lock(ERTS_RUNQ_IX(0)); @@ -5131,7 +5196,7 @@ Process *schedule(Process *p, int calls) esdp = erts_get_scheduler_data(); rq = erts_get_runq_current(esdp); ASSERT(esdp); - fcalls = (int) erts_smp_atomic32_read(&function_calls); + fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls); actual_reds = reds = 0; erts_smp_runq_lock(rq); } else { @@ -5282,14 +5347,14 @@ Process *schedule(Process *p, int calls) | ERTS_RUNQ_FLG_CHK_CPU_BIND | ERTS_RUNQ_FLG_SUSPENDED)) { if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED) - || (erts_smp_atomic32_read(&esdp->ssi->flags) + || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED)) { ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED); suspend_scheduler(esdp); } if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) - || erts_smp_atomic32_read(&esdp->chk_cpu_bind)) { + || erts_smp_atomic32_read_acqb(&esdp->chk_cpu_bind)) { erts_sched_check_cpu_bind(esdp); } } @@ -5340,7 +5405,7 @@ Process *schedule(Process *p, int calls) if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ | ERTS_RUNQ_FLG_SUSPENDED)) { if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED) - || (erts_smp_atomic32_read(&esdp->ssi->flags) + || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED)) { ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED); @@ -5384,7 +5449,7 @@ Process *schedule(Process *p, int calls) * Schedule system-level activities. */ - erts_smp_atomic32_set(&function_calls, 0); + erts_smp_atomic32_set_relb(&function_calls, 0); fcalls = 0; ASSERT(!erts_port_task_have_outstanding_io_tasks()); @@ -5426,7 +5491,7 @@ Process *schedule(Process *p, int calls) if (erts_common_run_queue->waiting) wake_scheduler(erts_common_run_queue, 0, 1); } - else if (erts_smp_atomic32_read(&no_empty_run_queues) != 0) { + else if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { wake_scheduler_on_empty_runq(rq); rq->wakeup_other = 0; } diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 30b0a60611..499bdd77ba 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -183,7 +183,7 @@ struct port { process to get (line oriented I/O)*/ Uint32 status; /* Status and type flags */ int control_flags; /* Flags for port_control() */ - Uint32 snapshot; /* Next snapshot that port should be part of */ + erts_aint32_t snapshot; /* Next snapshot that port should be part of */ struct reg_proc *reg; ErlDrvPDL port_data_lock; @@ -527,11 +527,10 @@ union erl_off_heap_ptr { /* arrays that get malloced at startup */ extern Port* erts_port; -extern erts_smp_atomic_t erts_ports_alive; extern Uint erts_max_ports; extern Uint erts_port_tab_index_mask; -extern erts_smp_atomic_t erts_ports_snapshot; +extern erts_smp_atomic32_t erts_ports_snapshot; extern erts_smp_atomic_t erts_dead_ports_ptr; ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt); @@ -541,12 +540,12 @@ ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt); ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt) { ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&prt->state_lck)); - if (prt->snapshot != erts_smp_atomic_read(&erts_ports_snapshot)) { + if (prt->snapshot != erts_smp_atomic32_read_acqb(&erts_ports_snapshot)) { /* Dead ports are added from the end of the snapshot buffer */ Eterm* tombstone = (Eterm*) erts_smp_atomic_addtest(&erts_dead_ports_ptr, -(erts_aint_t)sizeof(Eterm)); ASSERT(tombstone+1 != NULL); - ASSERT(prt->snapshot == (Uint32) erts_smp_atomic_read(&erts_ports_snapshot) - 1); + ASSERT(prt->snapshot == erts_smp_atomic32_read(&erts_ports_snapshot) - 1); *tombstone = prt->id; } /*else no ongoing snapshot or port was already included or created after snapshot */ diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index d9df90fe7d..df5f8b22a3 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -56,7 +56,6 @@ static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error o per thread basis (for BC interfaces) */ Port* erts_port; /* The port table */ -erts_smp_atomic_t erts_ports_alive; erts_smp_atomic_t erts_bytes_out; /* No bytes sent out of the system */ erts_smp_atomic_t erts_bytes_in; /* No bytes gotten into the system */ @@ -193,7 +192,7 @@ typedef struct line_buf_context { static erts_smp_spinlock_t get_free_port_lck; static Uint last_port_num; static Uint port_num_mask; -erts_smp_atomic_t erts_ports_snapshot; /* Identifies the _next_ snapshot (not the ongoing) */ +erts_smp_atomic32_t erts_ports_snapshot; /* Identifies the _next_ snapshot (not the ongoing) */ static ERTS_INLINE void @@ -424,10 +423,9 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver, new_name = (char*) erts_alloc(ERTS_ALC_T_PORT_NAME, sys_strlen(name)+1); sys_strcpy(new_name, name); erts_smp_runq_lock(runq); - erts_smp_atomic_inc(&erts_ports_alive); erts_smp_port_state_lock(prt); prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus; - prt->snapshot = (Uint32) erts_smp_atomic_read(&erts_ports_snapshot); + prt->snapshot = erts_smp_atomic32_read(&erts_ports_snapshot); old_name = prt->name; prt->name = new_name; #ifdef ERTS_SMP @@ -1281,7 +1279,6 @@ void init_io(void) erts_smp_atomic_init(&erts_bytes_out, 0); erts_smp_atomic_init(&erts_bytes_in, 0); - erts_smp_atomic_init(&erts_ports_alive, 0); for (i = 0; i < erts_max_ports; i++) { erts_port_task_init_sched(&erts_port[i].sched); @@ -1303,7 +1300,7 @@ void init_io(void) erts_port[i].port_data_lock = NULL; } - erts_smp_atomic_init(&erts_ports_snapshot, (erts_aint_t) 0); + erts_smp_atomic32_init(&erts_ports_snapshot, (erts_aint32_t) 0); last_port_num = 0; erts_smp_spinlock_init(&get_free_port_lck, "get_free_port"); diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c index 21d6ce9304..3e9243c77d 100644 --- a/erts/emulator/beam/safe_hash.c +++ b/erts/emulator/beam/safe_hash.c @@ -99,7 +99,7 @@ static void rehash(SafeHash* h, int grow_limit) erts_free(h->type, (void *) old_tab); } /*else already done */ - erts_smp_atomic_set(&h->is_rehashing, 0); + erts_smp_atomic_set_relb(&h->is_rehashing, 0); } diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 218bd79584..71b374527e 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -1137,6 +1137,11 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) restart: +#ifdef ERTS_BREAK_REQUESTED + if (ERTS_BREAK_REQUESTED) + erts_do_break_handling(); +#endif + /* Figure out timeout value */ if (do_wait) { erts_time_remaining(&wait_time); diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 3ae5b8d747..f5c785d683 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -766,7 +766,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) short filter; int fd = (int) ebuf[i].ident; - switch ((int) ebuf[i].udata) { + switch ((int) (long) ebuf[i].udata) { /* * Since we use a lazy update approach EV_DELETE will @@ -805,7 +805,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) if (fd == (int) ebuf[j].ident) { ebuf[j].udata = (void *) ERTS_POLL_KQ_OP_HANDLED; if (!(ebuf[j].flags & EV_ERROR)) { - switch ((int) ebuf[j].udata) { + switch ((int) (long) ebuf[j].udata) { case ERTS_POLL_KQ_OP_ADD2_W: filter = EVFILT_WRITE; goto rm_add_fb; @@ -823,7 +823,8 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) } } /* The other add succeded... */ - filter = (((int) ebuf[i].udata == ERTS_POLL_KQ_OP_ADD2_W) + filter = ((((int) (long) ebuf[i].udata) + == ERTS_POLL_KQ_OP_ADD2_W) ? EVFILT_READ : EVFILT_WRITE); rm_add_fb: diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c index 7662f190ef..074e2e247f 100644 --- a/erts/emulator/sys/win32/erl_poll.c +++ b/erts/emulator/sys/win32/erl_poll.c @@ -693,6 +693,7 @@ static void *break_waiter(void *param) ResetEvent(harr[0]); erts_mtx_lock(&break_waiter_lock); erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK); + ERTS_THR_MEMORY_BARRIER; SetEvent(break_happened_event); erts_mtx_unlock(&break_waiter_lock); break; @@ -700,6 +701,7 @@ static void *break_waiter(void *param) ResetEvent(harr[1]); erts_mtx_lock(&break_waiter_lock); erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_HALT); + ERTS_THR_MEMORY_BARRIER; SetEvent(break_happened_event); erts_mtx_unlock(&break_waiter_lock); break; diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h index d56693dbf8..2fc82c99a8 100644 --- a/erts/include/internal/libatomic_ops/ethr_atomic.h +++ b/erts/include/internal/libatomic_ops/ethr_atomic.h @@ -146,13 +146,13 @@ ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var) static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr) { -#ifdef AO_HAVE_fetch_and_add - return ((ETHR_AINT_T__) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr; +#ifdef AO_HAVE_fetch_and_add_full + return ((ETHR_AINT_T__) AO_fetch_and_add_full(&var->counter, (AO_t) incr)) + incr; #else while (1) { AO_t exp = AO_load(&var->counter); AO_t new = exp + (AO_t) incr; - if (AO_compare_and_swap(&var->counter, exp, new)) + if (AO_compare_and_swap_full(&var->counter, exp, new)) return (ETHR_AINT_T__) new; } #endif @@ -167,8 +167,8 @@ ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr) static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var) { -#ifdef AO_HAVE_fetch_and_add1 - return ((ETHR_AINT_T__) AO_fetch_and_add1(&var->counter)) + 1; +#ifdef AO_HAVE_fetch_and_add1_full + return ((ETHR_AINT_T__) AO_fetch_and_add1_full(&var->counter)) + 1; #else return ETHR_NATMC_FUNC__(add_return)(var, 1); #endif @@ -183,8 +183,8 @@ ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var) static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var) { -#ifdef AO_HAVE_fetch_and_sub1 - return ((ETHR_AINT_T__) AO_fetch_and_sub1(&var->counter)) - 1; +#ifdef AO_HAVE_fetch_and_sub1_full + return ((ETHR_AINT_T__) AO_fetch_and_sub1_full(&var->counter)) - 1; #else return ETHR_NATMC_FUNC__(add_return)(var, -1); #endif @@ -202,7 +202,7 @@ ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask) while (1) { AO_t exp = AO_load(&var->counter); AO_t new = exp & ((AO_t) mask); - if (AO_compare_and_swap(&var->counter, exp, new)) + if (AO_compare_and_swap_full(&var->counter, exp, new)) return (ETHR_AINT_T__) exp; } } @@ -213,7 +213,7 @@ ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask) while (1) { AO_t exp = AO_load(&var->counter); AO_t new = exp | ((AO_t) mask); - if (AO_compare_and_swap(&var->counter, exp, new)) + if (AO_compare_and_swap_full(&var->counter, exp, new)) return (ETHR_AINT_T__) exp; } } @@ -225,7 +225,7 @@ ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, { ETHR_AINT_T__ act; do { - if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new)) + if (AO_compare_and_swap_full(&var->counter, (AO_t) exp, (AO_t) new)) return exp; act = (ETHR_AINT_T__) AO_load(&var->counter); } while (act == exp); @@ -237,7 +237,7 @@ ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new) { while (1) { AO_t exp = AO_load(&var->counter); - if (AO_compare_and_swap(&var->counter, exp, (AO_t) new)) + if (AO_compare_and_swap_full(&var->counter, exp, (AO_t) new)) return (ETHR_AINT_T__) exp; } } @@ -265,7 +265,6 @@ ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var) return ((ETHR_AINT_T__) AO_fetch_and_add1_acquire(&var->counter)) + 1; #else ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(add_return)(var, 1); - ETHR_MEMORY_BARRIER; return res; #endif } @@ -287,7 +286,6 @@ ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var) #ifdef AO_HAVE_fetch_and_sub1_release return ((ETHR_AINT_T__) AO_fetch_and_sub1_release(&var->counter)) - 1; #else - ETHR_MEMORY_BARRIER; return ETHR_NATMC_FUNC__(dec_return)(var); #endif } @@ -314,7 +312,6 @@ ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var, return act; #else ETHR_AINT_T__ act = ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp); - ETHR_MEMORY_BARRIER; return act; #endif } @@ -333,7 +330,6 @@ ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var, } while (act == exp); return act; #else - ETHR_MEMORY_BARRIER; return ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp); #endif } diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h index 00380dbf07..16182f8b01 100644 --- a/erts/include/internal/sparc32/atomic.h +++ b/erts/include/internal/sparc32/atomic.h @@ -95,7 +95,7 @@ ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr) { ETHR_AINT_T__ old, tmp; - __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n"); + __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n" : : : "memory"); do { old = var->counter; tmp = old+incr; @@ -105,7 +105,7 @@ ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr) : "r"(old), "r"(&var->counter), "0"(tmp) : "memory"); } while (__builtin_expect(old != tmp, 0)); - __asm__ __volatile__("membar #StoreLoad|#StoreStore"); + __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory"); return old+incr; } @@ -144,7 +144,7 @@ ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask) { ETHR_AINT_T__ old, tmp; - __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n"); + __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n" : : : "memory"); do { old = var->counter; tmp = old & mask; @@ -154,7 +154,7 @@ ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask) : "r"(old), "r"(&var->counter), "0"(tmp) : "memory"); } while (__builtin_expect(old != tmp, 0)); - __asm__ __volatile__("membar #StoreLoad|#StoreStore"); + __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory"); return old; } @@ -163,7 +163,7 @@ ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask) { ETHR_AINT_T__ old, tmp; - __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n"); + __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n" : : : "memory"); do { old = var->counter; tmp = old | mask; @@ -173,7 +173,7 @@ ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask) : "r"(old), "r"(&var->counter), "0"(tmp) : "memory"); } while (__builtin_expect(old != tmp, 0)); - __asm__ __volatile__("membar #StoreLoad|#StoreStore"); + __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory"); return old; } @@ -182,7 +182,7 @@ ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val) { ETHR_AINT_T__ old, new; - __asm__ __volatile__("membar #LoadLoad|#StoreLoad"); + __asm__ __volatile__("membar #LoadLoad|#StoreLoad" : : : "memory"); do { old = var->counter; new = val; @@ -192,20 +192,20 @@ ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val) : "r"(old), "r"(&var->counter), "0"(new) : "memory"); } while (__builtin_expect(old != new, 0)); - __asm__ __volatile__("membar #StoreLoad|#StoreStore"); + __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory"); return old; } static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old) { - __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n"); + __asm__ __volatile__("membar #LoadLoad|#StoreLoad\n" : : : "memory"); __asm__ __volatile__( ETHR_CAS__ " [%2], %1, %0" : "=&r"(new) : "r"(old), "r"(&var->counter), "0"(new) : "memory"); - __asm__ __volatile__("membar #StoreLoad|#StoreStore"); + __asm__ __volatile__("membar #StoreLoad|#StoreStore" : : : "memory"); return new; } @@ -213,13 +213,11 @@ ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ * Atomic ops with at least specified barriers. */ -/* TODO: relax acquire barriers */ - static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var) { ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var); - __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore" : : : "memory"); + __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory"); return res; } @@ -234,21 +232,18 @@ static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var) { ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var); - __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory"); return res; } static ETHR_INLINE void ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var) { - __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory"); ETHR_NATMC_FUNC__(dec)(var); } static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var) { - __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory"); return ETHR_NATMC_FUNC__(dec_return)(var); } @@ -256,14 +251,12 @@ static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old) { ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(cmpxchg)(var, new, old); - __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory"); return res; } static ETHR_INLINE ETHR_AINT_T__ ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old) { - __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory"); return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old); } diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h index 48e4c0c6c8..0c7b597a6b 100644 --- a/erts/include/internal/tile/atomic.h +++ b/erts/include/internal/tile/atomic.h @@ -65,25 +65,35 @@ ethr_native_atomic32_read(ethr_native_atomic32_t *var) static ETHR_INLINE void ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr) { + ETHR_MEMORY_BARRIER; atomic_add(&var->counter, incr); + ETHR_MEMORY_BARRIER; } static ETHR_INLINE void ethr_native_atomic32_inc(ethr_native_atomic32_t *var) { + ETHR_MEMORY_BARRIER; atomic_increment(&var->counter); + ETHR_MEMORY_BARRIER; } static ETHR_INLINE void ethr_native_atomic32_dec(ethr_native_atomic32_t *var) { + ETHR_MEMORY_BARRIER; atomic_decrement(&var->counter); + ETHR_MEMORY_BARRIER; } static ETHR_INLINE ethr_sint32_t ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr) { - return atomic_exchange_and_add(&var->counter, incr) + incr; + ethr_sint32_t res; + ETHR_MEMORY_BARRIER; + res = atomic_exchange_and_add(&var->counter, incr) + incr; + ETHR_MEMORY_BARRIER; + return res; } static ETHR_INLINE ethr_sint32_t @@ -101,18 +111,27 @@ ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var) static ETHR_INLINE ethr_sint32_t ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask) { - return atomic_and_val(&var->counter, mask); + ethr_sint32_t res; + ETHR_MEMORY_BARRIER; + res = atomic_and_val(&var->counter, mask); + ETHR_MEMORY_BARRIER; + return res; } static ETHR_INLINE ethr_sint32_t ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask) { - return atomic_or_val(&var->counter, mask); + ethr_sint32_t res; + ETHR_MEMORY_BARRIER; + res = atomic_or_val(&var->counter, mask); + ETHR_MEMORY_BARRIER; + return res; } static ETHR_INLINE ethr_sint32_t ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val) { + ETHR_MEMORY_BARRIER; return atomic_exchange_acq(&var->counter, val); } @@ -121,6 +140,7 @@ ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var, ethr_sint32_t new, ethr_sint32_t expected) { + ETHR_MEMORY_BARRIER; return atomic_compare_and_exchange_val_acq(&var->counter, new, expected); } @@ -139,9 +159,7 @@ ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var) static ETHR_INLINE ethr_sint32_t ethr_native_atomic32_inc_return_acqb(ethr_native_atomic32_t *var) { - ethr_sint32_t res = ethr_native_atomic32_inc_return(var); - ETHR_MEMORY_BARRIER; - return res; + return ethr_native_atomic32_inc_return(var); } static ETHR_INLINE void @@ -154,14 +172,12 @@ ethr_native_atomic32_set_relb(ethr_native_atomic32_t *var, ethr_sint32_t val) static ETHR_INLINE void ethr_native_atomic32_dec_relb(ethr_native_atomic32_t *var) { - ETHR_MEMORY_BARRIER; ethr_native_atomic32_dec(var); } static ETHR_INLINE ethr_sint32_t ethr_native_atomic32_dec_return_relb(ethr_native_atomic32_t *var) { - ETHR_MEMORY_BARRIER; return ethr_native_atomic32_dec_return(var); } @@ -178,7 +194,6 @@ ethr_native_atomic32_cmpxchg_relb(ethr_native_atomic32_t *var, ethr_sint32_t new, ethr_sint32_t exp) { - ETHR_MEMORY_BARRIER; return ethr_native_atomic32_cmpxchg(var, new, exp); } |