diff options
Diffstat (limited to 'erts/emulator/beam')
35 files changed, 1553 insertions, 1275 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c index b97705ed96..d7c7f117cf 100644 --- a/erts/emulator/beam/atom.c +++ b/erts/emulator/beam/atom.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2010. All Rights Reserved. + * Copyright Ericsson AB 1996-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -75,7 +75,7 @@ void atom_info(int to, void *to_arg) index_info(to, to_arg, &erts_atom_table); #ifdef ERTS_ATOM_PUT_OPS_STAT erts_print(to, to_arg, "atom_put_ops: %ld\n", - erts_smp_atomic_read(&atom_put_ops)); + erts_smp_atomic_read_nob(&atom_put_ops)); #endif if (lock) @@ -213,7 +213,7 @@ am_atom_put(const char* name, int len) len = MAX_ATOM_LENGTH; } #ifdef ERTS_ATOM_PUT_OPS_STAT - erts_smp_atomic_inc(&atom_put_ops); + erts_smp_atomic_inc_nob(&atom_put_ops); #endif a.len = len; a.name = (byte*)name; @@ -309,7 +309,7 @@ init_atom_table(void) rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; #ifdef ERTS_ATOM_PUT_OPS_STAT - erts_smp_atomic_init(&atom_put_ops, 0); + erts_smp_atomic_init_nob(&atom_put_ops, 0); #endif erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab"); diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 31910888d1..773baad01f 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2000-2010. All Rights Reserved. + * Copyright Ericsson AB 2000-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -408,7 +408,7 @@ erts_is_count_break(BeamInstr *pc, Sint *count_ret) { if (bdc) { if (count_ret) { - *count_ret = (Sint) erts_smp_atomic_read(&bdc->acount); + *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount); } return !0; } @@ -958,17 +958,17 @@ static int set_function_break(Module *modp, BeamInstr *pc, int bif, if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) { if (count_op == erts_break_stop) { - count = erts_smp_atomic_read(&bdc->acount); + count = erts_smp_atomic_read_nob(&bdc->acount); if (count >= 0) { while(1) { - res = erts_smp_atomic_cmpxchg(&bdc->acount, -count - 1, count); + res = erts_smp_atomic_cmpxchg_nob(&bdc->acount, -count - 1, count); if ((res == count) || count < 0) break; count = res; } } } else { /* Reset call counter */ - erts_smp_atomic_set(&bdc->acount, 0); + erts_smp_atomic_set_nob(&bdc->acount, 0); } } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { @@ -1097,7 +1097,7 @@ static int set_function_break(Module *modp, BeamInstr *pc, int bif, } } else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) { BpDataCount *bdc = (BpDataCount *) bd; - erts_smp_atomic_init(&bdc->acount, 0); + erts_smp_atomic_init_nob(&bdc->acount, 0); } if (bif == BREAK_IS_ERL) { diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h index bd8a7249a7..2ec5818688 100644 --- a/erts/emulator/beam/beam_bp.h +++ b/erts/emulator/beam/beam_bp.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2000-2010. All Rights Reserved. + * Copyright Ericsson AB 2000-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -165,8 +165,8 @@ do { \ bdc = (BpDataCount *) bdc->next; \ ASSERT(bdc); \ bds[ix] = (BpData *) bdc; \ - count = erts_smp_atomic_read(&bdc->acount); \ - if (count >= 0) erts_smp_atomic_inc(&bdc->acount); \ + count = erts_smp_atomic_read_nob(&bdc->acount); \ + if (count >= 0) erts_smp_atomic_inc_nob(&bdc->acount); \ *(instr_result) = bdc->orig_instr; \ } while (0) diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 68b3350d7f..98dde066fc 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -811,7 +811,7 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1) so.min_heap_size = H_MIN_SIZE; so.min_vheap_size = BIN_VH_MIN_SIZE; so.priority = PRIORITY_NORMAL; - so.max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs); + so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); so.scheduler = 0; /* @@ -3417,10 +3417,10 @@ BIF_RETTYPE ports_0(BIF_ALIST_0) erts_smp_mtx_lock(&ports_snapshot_mtx); /* One snapshot at a time */ - erts_smp_atomic_set(&erts_dead_ports_ptr, - (erts_aint_t) (port_buf + erts_max_ports)); + erts_smp_atomic_set_nob(&erts_dead_ports_ptr, + (erts_aint_t) (port_buf + erts_max_ports)); - next_ss = erts_smp_atomic32_inctest(&erts_ports_snapshot); + next_ss = erts_smp_atomic32_inc_read_relb(&erts_ports_snapshot); for (i = erts_max_ports-1; i >= 0; i--) { Port* prt = &erts_port[i]; @@ -3434,8 +3434,8 @@ BIF_RETTYPE ports_0(BIF_ALIST_0) erts_smp_port_state_unlock(prt); } - dead_ports = (Eterm*)erts_smp_atomic_xchg(&erts_dead_ports_ptr, - (erts_aint_t) NULL); + dead_ports = (Eterm*)erts_smp_atomic_xchg_nob(&erts_dead_ports_ptr, + (erts_aint_t) NULL); erts_smp_mtx_unlock(&ports_snapshot_mtx); ASSERT(pp <= dead_ports); @@ -3942,8 +3942,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n); - oval = (Uint) erts_smp_atomic32_xchg(&erts_max_gen_gcs, - (erts_aint32_t) nval); + oval = (Uint) erts_smp_atomic32_xchg_nob(&erts_max_gen_gcs, + (erts_aint32_t) nval); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_min_heap_size) { int oval = H_MIN_SIZE; @@ -4286,7 +4286,7 @@ void erts_init_bif(void) erts_smp_spinlock_init(&make_ref_lock, "make_ref"); erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot"); - erts_smp_atomic_init(&erts_dead_ports_ptr, (erts_aint_t) NULL); + erts_smp_atomic_init_nob(&erts_dead_ports_ptr, (erts_aint_t) NULL); /* * bif_return_trap/1 is a hidden BIF that bifs that need to diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index b8889e6206..432b3d0780 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -626,7 +626,7 @@ bin_check(void) erts_printf("%p orig_size: %bpd, norefs = %bpd\n", bp->val, bp->val->orig_size, - erts_smp_atomic_read(&bp->val->refc)); + erts_smp_atomic_read_nob(&bp->val->refc)); } } if (printed) { @@ -650,7 +650,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) char dumpnamebuf[MAXPATHLEN]; char* dumpname; - if (ERTS_IS_CRASH_DUMPING) + if (ERTS_SOMEONE_IS_CRASH_DUMPING) return; /* Wait for all threads to block. If all threads haven't blocked @@ -667,7 +667,8 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) /* Allow us to pass certain places without locking... */ #ifdef ERTS_SMP - erts_smp_atomic_inc(&erts_writing_erl_crash_dump); + erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1); + erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1); #else erts_writing_erl_crash_dump = 1; #endif diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index b1cdd0660a..ad042ec088 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -128,8 +128,8 @@ delete_cache(ErtsAtomCache *cache) { if (cache) { erts_free(ERTS_ALC_T_DCACHE, (void *) cache); - ASSERT(erts_smp_atomic_read(&no_caches) > 0); - erts_smp_atomic_dec(&no_caches); + ASSERT(erts_smp_atomic_read_nob(&no_caches) > 0); + erts_smp_atomic_dec_nob(&no_caches); } } @@ -147,7 +147,7 @@ create_cache(DistEntry *dep) dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE, sizeof(ErtsAtomCache)); - erts_smp_atomic_inc(&no_caches); + erts_smp_atomic_inc_nob(&no_caches); for (i = 0; i < sizeof(cp->in_arr)/sizeof(cp->in_arr[0]); i++) { cp->in_arr[i] = THE_NON_VALUE; cp->out_arr[i] = THE_NON_VALUE; @@ -156,7 +156,7 @@ create_cache(DistEntry *dep) Uint erts_dist_cache_size(void) { - return (Uint) erts_smp_atomic_read(&no_caches)*sizeof(ErtsAtomCache); + return (Uint) erts_smp_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache); } static ErtsProcList * @@ -444,7 +444,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) ErtsMonitor *monitors; Uint32 flags; - erts_smp_atomic_set(&dep->dist_cmd_scheduled, 1); + erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 1); erts_smp_de_rwlock(dep); ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid) @@ -510,7 +510,7 @@ void init_dist(void) { init_nodes_monitors(); - erts_smp_atomic_init(&no_caches, 0); + erts_smp_atomic_init_nob(&no_caches, 0); /* Lookup/Install all references to trap functions */ dsend2_trap = trap_function(am_dsend,2); @@ -596,7 +596,7 @@ static void clear_dist_entry(DistEntry *dep) suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL); erts_smp_mtx_unlock(&dep->qlock); - erts_smp_atomic_set(&dep->dist_cmd_scheduled, 0); + erts_smp_atomic_set_nob(&dep->dist_cmd_scheduled, 0); dep->send = NULL; erts_smp_de_rwunlock(dep); @@ -1775,7 +1775,7 @@ erts_dist_command(Port *prt, int reds_limit) erts_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be removed if port command fails */ - erts_smp_atomic_xchg(&dep->dist_cmd_scheduled, 0); + erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 0); erts_smp_de_rlock(dep); flags = dep->flags; diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index 695a4fc3fe..845151c895 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2010. All Rights Reserved. + * Copyright Ericsson AB 1996-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -203,7 +203,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) id = dep->cid; } - if (!erts_smp_atomic_xchg(&dep->dist_cmd_scheduled, 1)) { + if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) { (void) erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD, diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index 9631fb50db..d714eacd06 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -369,7 +369,7 @@ BIF_RETTYPE erl_ddll_try_load_3(Process *p, Eterm path_term, if (!(prt->status & FREE_PORT_FLAGS) && prt->drv_ptr->handle == dh) { #if DDLL_SMP - erts_smp_atomic_inc(&prt->refc); + erts_smp_atomic_inc_nob(&prt->refc); /* Extremely rare spinlock */ while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { erts_smp_port_state_unlock(prt); @@ -597,7 +597,7 @@ done: if (!(prt->status & FREE_PORT_FLAGS) && prt->drv_ptr->handle == dh) { #if DDLL_SMP - erts_smp_atomic_inc(&prt->refc); + erts_smp_atomic_inc_nob(&prt->refc); /* Extremely rare spinlock */ while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { erts_smp_port_state_unlock(prt); @@ -1054,7 +1054,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) if (!(prt->status & FREE_PORT_FLAGS) && prt->drv_ptr->handle == dh) { #if DDLL_SMP - erts_smp_atomic_inc(&prt->refc); + erts_smp_atomic_inc_nob(&prt->refc); while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { erts_smp_port_state_unlock(prt); erts_smp_port_state_lock(prt); @@ -1602,7 +1602,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name) erts_sys_ddll_close(dh->handle); return ERL_DE_LOAD_ERROR_BAD_NAME; } - erts_smp_atomic_init(&(dh->refc), (erts_aint_t) 0); + erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0); dh->port_count = 0; dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1); sys_strcpy(dh->full_path, path); diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index f264bf44df..6a74596f76 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -135,7 +135,7 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh) if (szp) *szp += 4+2; if (hpp) { - Uint refc = (Uint) erts_smp_atomic_read(&pb->val->refc); + Uint refc = (Uint) erts_smp_atomic_read_nob(&pb->val->refc); tuple = TUPLE3(*hpp, val, orig_size, make_small(refc)); res = CONS(*hpp + 4, tuple, res); *hpp += 4+2; @@ -2026,7 +2026,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = TUPLE2(hp, am_sequential_tracer, val); BIF_RET(res); } else if (BIF_ARG_1 == am_garbage_collection){ - Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs); + Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); Eterm tup; hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2); @@ -2041,7 +2041,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(res); } else if (BIF_ARG_1 == am_fullsweep_after){ - Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs); + Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, am_fullsweep_after, make_small(val)); BIF_RET(res); @@ -2545,6 +2545,70 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) hp = hsz ? HAlloc(BIF_P, hsz) : NULL; res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit); BIF_RET(res); + } else if (ERTS_IS_ATOM_STR("print_ethread_info", BIF_ARG_1)) { + int i; + char **str; +#ifdef ETHR_NATIVE_ATOMIC32_IMPL + erts_printf("32-bit native atomics: %s\n", + ETHR_NATIVE_ATOMIC32_IMPL); + str = ethr_native_atomic32_ops(); + for (i = 0; str[i]; i++) + erts_printf("ethr_native_atomic32_%s()\n", str[i]); +#endif +#ifdef ETHR_NATIVE_ATOMIC64_IMPL + erts_printf("64-bit native atomics: %s\n", + ETHR_NATIVE_ATOMIC64_IMPL); + str = ethr_native_atomic64_ops(); + for (i = 0; str[i]; i++) + erts_printf("ethr_native_atomic64_%s()\n", str[i]); +#endif +#ifdef ETHR_NATIVE_DW_ATOMIC_IMPL + if (ethr_have_native_dw_atomic()) { + erts_printf("Double word native atomics: %s\n", + ETHR_NATIVE_DW_ATOMIC_IMPL); + str = ethr_native_dw_atomic_ops(); + for (i = 0; str[i]; i++) + erts_printf("ethr_native_dw_atomic_%s()\n", str[i]); + str = ethr_native_su_dw_atomic_ops(); + for (i = 0; str[i]; i++) + erts_printf("ethr_native_su_dw_atomic_%s()\n", str[i]); + } +#endif +#ifdef ETHR_NATIVE_SPINLOCK_IMPL + erts_printf("Native spin-locks: %s\n", ETHR_NATIVE_SPINLOCK_IMPL); +#endif +#ifdef ETHR_NATIVE_RWSPINLOCK_IMPL + erts_printf("Native rwspin-locks: %s\n", ETHR_NATIVE_RWSPINLOCK_IMPL); +#endif +#ifdef ETHR_X86_RUNTIME_CONF_HAVE_SSE2__ + erts_printf("SSE2 support: %s\n", (ETHR_X86_RUNTIME_CONF_HAVE_SSE2__ + ? "yes" : "no")); +#endif +#ifdef ETHR_X86_OUT_OF_ORDER + erts_printf("x86" +#ifdef ARCH_64 + "_64" +#endif + " out of order\n"); +#endif +#ifdef ETHR_SPARC_TSO + erts_printf("Sparc TSO\n"); +#endif +#ifdef ETHR_SPARC_PSO + erts_printf("Sparc PSO\n"); +#endif +#ifdef ETHR_SPARC_RMO + erts_printf("Sparc RMO\n"); +#endif +#if defined(ETHR_PPC_HAVE_LWSYNC) + erts_printf("Have lwsync instruction: yes\n"); +#elif defined(ETHR_PPC_HAVE_NO_LWSYNC) + erts_printf("Have lwsync instruction: no\n"); +#elif defined(ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__) + erts_printf("Have lwsync instruction: %s (runtime test)\n", + ETHR_PPC_RUNTIME_CONF_HAVE_LWSYNC__ ? "yes" : "no"); +#endif + BIF_RET(am_true); } BIF_ERROR(BIF_P, BADARG); @@ -2845,7 +2909,7 @@ fun_info_2(Process* p, Eterm fun, Eterm what) } break; case am_refc: - val = erts_make_integer(erts_smp_atomic_read(&funp->fe->refc), p); + val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p); hp = HAlloc(p, 3); break; case am_arity: @@ -3065,8 +3129,8 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1) Eterm r1, r2; Eterm in, out; Uint hsz = 9; - Uint bytes_in = (Uint) erts_smp_atomic_read(&erts_bytes_in); - Uint bytes_out = (Uint) erts_smp_atomic_read(&erts_bytes_out); + Uint bytes_in = (Uint) erts_smp_atomic_read_nob(&erts_bytes_in); + Uint bytes_out = (Uint) erts_smp_atomic_read_nob(&erts_bytes_out); (void) erts_bld_uint(NULL, &hsz, bytes_in); (void) erts_bld_uint(NULL, &hsz, bytes_out); @@ -3139,7 +3203,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) * NOTE: Only supposed to be used for testing, and debugging. */ - if (!erts_smp_atomic_read(&available_internal_state)) { + if (!erts_smp_atomic_read_nob(&available_internal_state)) { BIF_ERROR(BIF_P, EXC_UNDEF); } @@ -3437,7 +3501,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1) && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) { erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true); - erts_aint_t prev_on = erts_smp_atomic_xchg(&available_internal_state, on); + erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on); if (on) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Process %T ", BIF_P->id); @@ -3453,7 +3517,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(prev_on ? am_true : am_false); } - if (!erts_smp_atomic_read(&available_internal_state)) { + if (!erts_smp_atomic_read_nob(&available_internal_state)) { BIF_ERROR(BIF_P, EXC_UNDEF); } @@ -3634,14 +3698,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) } else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) { /* Used by hipe test suites */ - erts_aint_t flag = erts_smp_atomic_read(&hipe_test_reschedule_flag); + erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag); if (!flag && BIF_ARG_2 != am_false) { - erts_smp_atomic_set(&hipe_test_reschedule_flag, 1); + erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } - erts_smp_atomic_set(&hipe_test_reschedule_flag, !flag); + erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag); BIF_RET(NIL); } else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) { @@ -3951,8 +4015,8 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1) void erts_bif_info_init(void) { - erts_smp_atomic_init(&available_internal_state, 0); - erts_smp_atomic_init(&hipe_test_reschedule_flag, 0); + erts_smp_atomic_init_nob(&available_internal_state, 0); + erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0); process_info_init(); } diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index e56084b9cb..326a5c136b 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -76,14 +76,12 @@ struct erl_bits_state ErlBitsState; #define byte_buf (ErlBitsState.byte_buf_) #define byte_buf_len (ErlBitsState.byte_buf_len_) -#ifdef ERTS_SMP static erts_smp_atomic_t bits_bufs_size; -#endif Uint erts_bits_bufs_size(void) { - return 0; + return (Uint) erts_smp_atomic_read_nob(&bits_bufs_size); } #if !defined(ERTS_SMP) @@ -109,8 +107,8 @@ erts_bits_destroy_state(ERL_BITS_PROTO_0) void erts_init_bits(void) { + erts_smp_atomic_init_nob(&bits_bufs_size, 0); #if defined(ERTS_SMP) - erts_smp_atomic_init(&bits_bufs_size, 0); /* erl_process.c calls erts_bits_init_state() on all state instances */ #else ERL_BITS_DECLARE_STATEP; @@ -713,9 +711,7 @@ static void ERTS_INLINE need_byte_buf(ERL_BITS_PROTO_1(int need)) { if (byte_buf_len < need) { -#ifdef ERTS_SMP - erts_smp_atomic_add(&bits_bufs_size, need - byte_buf_len); -#endif + erts_smp_atomic_add_nob(&bits_bufs_size, need - byte_buf_len); byte_buf_len = need; byte_buf = erts_realloc(ERTS_ALC_T_BITS_BUF, byte_buf, byte_buf_len); } diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c index bcf8bcf270..03c0ef904a 100644 --- a/erts/emulator/beam/erl_cpu_topology.c +++ b/erts/emulator/beam/erl_cpu_topology.c @@ -487,7 +487,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp) /* Make sure we check if we should bind to a cpu or not... */ if (esdp->run_queue->flags & ERTS_RUNQ_FLG_SHARED_RUNQ) - erts_smp_atomic32_set(&esdp->chk_cpu_bind, 1); + erts_smp_atomic32_set_nob(&esdp->chk_cpu_bind, 1); else esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND; } @@ -503,7 +503,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) erts_cpu_groups_callback_call_t *cgcc; #ifdef ERTS_SMP if (erts_common_run_queue) - erts_smp_atomic32_set(&esdp->chk_cpu_bind, 0); + erts_smp_atomic32_set_nob(&esdp->chk_cpu_bind, 0); else { esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND; } diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index e9bdeb35ef..0327850cb9 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -224,21 +224,21 @@ static void free_dbtable(DbTable* tb) { #ifdef HARDDEBUG - if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) { + if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n", - erts_smp_atomic_read(&tb->common.memory_size)-sizeof(DbTable), + erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), tb->common.fixations); } erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n", tb->common.id); erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n", - erts_smp_atomic_read(&meta_pid_to_tab->common.memory_size)); + erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size)); print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab); erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n", - erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size)); + erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size)); print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab); #endif #ifdef ERTS_SMP @@ -248,6 +248,7 @@ free_dbtable(DbTable* tb) ASSERT(is_immed(tb->common.heir_data)); erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable)); ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable)); + ERTS_THR_MEMORY_BARRIER; } #ifdef ERTS_SMP @@ -1417,12 +1418,12 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) { DbTable init_tb; - erts_smp_atomic_init(&init_tb.common.memory_size, 0); + erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0); tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable)); - erts_smp_atomic_init(&tb->common.memory_size, - erts_smp_atomic_read(&init_tb.common.memory_size)); + erts_smp_atomic_init_nob(&tb->common.memory_size, + erts_smp_atomic_read_nob(&init_tb.common.memory_size)); } tb->common.meth = meth; @@ -1439,7 +1440,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) tb->common.owner = BIF_P->id; set_heir(BIF_P, tb, heir, heir_data); - erts_smp_atomic_init(&tb->common.nitems, 0); + erts_smp_atomic_init_nob(&tb->common.nitems, 0); tb->common.fixations = NULL; tb->common.compress = is_compressed; @@ -1505,9 +1506,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) BIF_ARG_1, BIF_ARG_2, ret, BIF_P->id, BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]); erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n", - erts_smp_atomic_read(&meta_pid_to_tab->common.memory_size)); + erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size)); erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n", - erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size)); + erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size)); #endif UseTmpHeap(3,BIF_P); @@ -1996,7 +1997,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) { BIF_ERROR(BIF_P, BADARG); } - nitems = erts_smp_atomic_read(&tb->common.nitems); + nitems = erts_smp_atomic_read_nob(&tb->common.nitems); tb->common.meth->db_delete_all_objects(BIF_P, tb); db_unlock(tb, LCK_WRITE); BIF_RET(erts_make_integer(nitems,BIF_P)); @@ -2790,7 +2791,7 @@ void init_db(void) } #endif - erts_smp_atomic_init(&erts_ets_misc_mem_size, 0); + erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0); db_initialize_util(); if (user_requested_db_max_tabs < DB_DEF_MAX_TABS) @@ -2832,13 +2833,13 @@ void init_db(void) /*TT*/ /* Create meta table invertion. */ - erts_smp_atomic_init(&init_tb.common.memory_size, 0); + erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0); meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable)); - erts_smp_atomic_init(&meta_pid_to_tab->common.memory_size, - erts_smp_atomic_read(&init_tb.common.memory_size)); + erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size, + erts_smp_atomic_read_nob(&init_tb.common.memory_size)); meta_pid_to_tab->common.id = NIL; meta_pid_to_tab->common.the_name = am_true; @@ -2851,7 +2852,7 @@ void init_db(void) #endif meta_pid_to_tab->common.keypos = 1; meta_pid_to_tab->common.owner = NIL; - erts_smp_atomic_init(&meta_pid_to_tab->common.nitems, 0); + erts_smp_atomic_init_nob(&meta_pid_to_tab->common.nitems, 0); meta_pid_to_tab->common.slot = -1; meta_pid_to_tab->common.meth = &db_hash; meta_pid_to_tab->common.compress = 0; @@ -2864,13 +2865,13 @@ void init_db(void) erl_exit(1,"Unable to create ets metadata tables."); } - erts_smp_atomic_set(&init_tb.common.memory_size, 0); + erts_smp_atomic_set_nob(&init_tb.common.memory_size, 0); meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable)); - erts_smp_atomic_init(&meta_pid_to_fixed_tab->common.memory_size, - erts_smp_atomic_read(&init_tb.common.memory_size)); + erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size, + erts_smp_atomic_read_nob(&init_tb.common.memory_size)); meta_pid_to_fixed_tab->common.id = NIL; meta_pid_to_fixed_tab->common.the_name = am_true; @@ -2883,7 +2884,7 @@ void init_db(void) #endif meta_pid_to_fixed_tab->common.keypos = 1; meta_pid_to_fixed_tab->common.owner = NIL; - erts_smp_atomic_init(&meta_pid_to_fixed_tab->common.nitems, 0); + erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.nitems, 0); meta_pid_to_fixed_tab->common.slot = -1; meta_pid_to_fixed_tab->common.meth = &db_hash; meta_pid_to_fixed_tab->common.compress = 0; @@ -3422,7 +3423,7 @@ static void unfix_table_locked(Process* p, DbTable* tb, unlocked: if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status) - && erts_smp_atomic_read(&tb->hash.fixdel) != (erts_aint_t)NULL) { + && erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) { #ifdef ERTS_SMP if (*kind_p == LCK_READ && tb->common.is_thread_safe) { /* Must have write lock while purging pseudo-deleted (OTP-8166) */ @@ -3607,7 +3608,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) Eterm ret = THE_NON_VALUE; if (What == am_size) { - ret = make_small(erts_smp_atomic_read(&tb->common.nitems)); + ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems)); } else if (What == am_type) { if (tb->common.status & DB_SET) { ret = am_set; @@ -3620,7 +3621,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) ret = am_bag; } } else if (What == am_memory) { - Uint words = (Uint) ((erts_smp_atomic_read(&tb->common.memory_size) + Uint words = (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size) + sizeof(Uint) - 1) / sizeof(Uint)); @@ -3714,7 +3715,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) std_dev_exp = make_float(hp); PUT_DOUBLE(f, hp); hp += FLOAT_SIZE_OBJECT; - ret = TUPLE7(hp, make_small(erts_smp_atomic_read(&tb->hash.nactive)), + ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)), avg, std_dev_real, std_dev_exp, make_small(stats.min_chain_len), make_small(stats.max_chain_len), @@ -3734,9 +3735,9 @@ static void print_table(int to, void *to_arg, int show, DbTable* tb) tb->common.meth->db_print(to, to_arg, show, tb); - erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read(&tb->common.nitems)); + erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read_nob(&tb->common.nitems)); erts_print(to, to_arg, "Words: %bpu\n", - (UWord) ((erts_smp_atomic_read(&tb->common.memory_size) + (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size) + sizeof(Uint) - 1) / sizeof(Uint))); @@ -3762,8 +3763,9 @@ void db_info(int to, void *to_arg, int show) /* Called by break handler */ Uint erts_get_ets_misc_mem_size(void) { + ERTS_THR_MEMORY_BARRIER; /* Memory not allocated in ets_alloc */ - return (Uint) erts_smp_atomic_read(&erts_ets_misc_mem_size); + return (Uint) erts_smp_atomic_read_nob(&erts_ets_misc_mem_size); } /* SMP Note: May only be used when system is locked */ diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index e0bdebcb01..2e5deaf338 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2010. All Rights Reserved. + * Copyright Ericsson AB 1996-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -86,11 +86,11 @@ do { \ erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \ - ((erts_aint_t) (FREE_SZ))); \ ASSERT((TAB)); \ - erts_smp_atomic_add(&(TAB)->common.memory_size, sz__); \ + erts_smp_atomic_add_nob(&(TAB)->common.memory_size, sz__); \ } while (0) #define ERTS_ETS_MISC_MEM_ADD(SZ) \ - erts_smp_atomic_add(&erts_ets_misc_mem_size, (SZ)); + erts_smp_atomic_add_nob(&erts_ets_misc_mem_size, (SZ)); ERTS_GLB_INLINE void *erts_db_alloc(ErtsAlcType_t type, DbTable *tab, @@ -227,7 +227,7 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size) ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0); ASSERT(((void *) tab) != ptr - || erts_smp_atomic_read(&tab->common.memory_size) == 0); + || erts_smp_atomic_read_nob(&tab->common.memory_size) == 0); erts_free(type, ptr); } diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index fdc82c8b88..e3380a57b2 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -105,9 +105,13 @@ #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ -#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_acqb(&(tb)->segtab)) -#define NACTIVE(tb) ((int)erts_smp_atomic_read(&(tb)->nactive)) -#define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems)) +#ifdef ETHR_ORDERED_READ_DEPEND +#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_nob(&(tb)->segtab)) +#else +#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_rb(&(tb)->segtab)) +#endif +#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive)) +#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) #define BUCKET(tb, i) SEGTAB(tb)[(i) >> SEGSZ_EXP]->buckets[(i) & SEGSZ_MASK] @@ -123,10 +127,10 @@ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval) { Uint mask = erts_smp_atomic_read_acqb(&tb->szm); - Uint ix = hval & mask; - if (ix >= erts_smp_atomic_read(&tb->nactive)) { + Uint ix = hval & mask; + if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) { ix &= mask>>1; - ASSERT(ix < erts_smp_atomic_read(&tb->nactive)); + ASSERT(ix < erts_smp_atomic_read_nob(&tb->nactive)); } return ix; } @@ -141,14 +145,14 @@ static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix) (DbTable *) tb, sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion)); - fixd->slot = ix; - was_next = erts_smp_atomic_read(&tb->fixdel); + fixd->slot = ix; + was_next = erts_smp_atomic_read_acqb(&tb->fixdel); do { /* Lockless atomic insertion in linked list: */ exp_next = was_next; fixd->next = (FixedDeletion*) exp_next; - was_next = erts_smp_atomic_cmpxchg(&tb->fixdel, - (erts_aint_t) fixd, - exp_next); + was_next = erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + (erts_aint_t) fixd, + exp_next); }while (was_next != exp_next); } @@ -540,22 +544,24 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel) { /*int tries = 0;*/ DEBUG_WAIT(); - if (erts_smp_atomic_cmpxchg(&tb->fixdel, (erts_aint_t)fixdel, - (erts_aint_t)NULL) != (erts_aint_t)NULL) { + if (erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + (erts_aint_t) fixdel, + (erts_aint_t) NULL) != (erts_aint_t) NULL) { /* Oboy, must join lists */ FixedDeletion* last = fixdel; erts_aint_t was_tail; erts_aint_t exp_tail; - while (last->next != NULL) last = last->next; - was_tail = erts_smp_atomic_read(&tb->fixdel); + while (last->next != NULL) last = last->next; + was_tail = erts_smp_atomic_read_acqb(&tb->fixdel); do { /* Lockless atomic list insertion */ exp_tail = was_tail; last->next = (FixedDeletion*) exp_tail; /*++tries;*/ DEBUG_WAIT(); - was_tail = erts_smp_atomic_cmpxchg(&tb->fixdel, (erts_aint_t)fixdel, - exp_tail); + was_tail = erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + (erts_aint_t) fixdel, + exp_tail); }while (was_tail != exp_tail); } /*erts_fprintf(stderr,"erl_db_hash: restore_fixdel tries=%d\r\n", tries);*/ @@ -572,7 +578,8 @@ void db_unfix_table_hash(DbTableHash *tb) || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock) && !tb->common.is_thread_safe)); restart: - fixdel = (FixedDeletion*) erts_smp_atomic_xchg(&tb->fixdel, (erts_aint_t)NULL); + fixdel = (FixedDeletion*) erts_smp_atomic_xchg_acqb(&tb->fixdel, + (erts_aint_t) NULL); while (fixdel != NULL) { FixedDeletion *fx = fixdel; int ix = fx->slot; @@ -639,14 +646,14 @@ int db_create_hash(Process *p, DbTable *tbl) { DbTableHash *tb = &tbl->hash; - erts_smp_atomic_init(&tb->szm, SEGSZ_MASK); - erts_smp_atomic_init(&tb->nactive, SEGSZ); - erts_smp_atomic_init(&tb->fixdel, (erts_aint_t)NULL); - erts_smp_atomic_init(&tb->segtab, (erts_aint_t) alloc_ext_seg(tb,0,NULL)->segtab); + erts_smp_atomic_init_nob(&tb->szm, SEGSZ_MASK); + erts_smp_atomic_init_nob(&tb->nactive, SEGSZ); + erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); + erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t) alloc_ext_seg(tb,0,NULL)->segtab); tb->nsegs = NSEG_1; tb->nslots = SEGSZ; - erts_smp_atomic_init(&tb->is_resizing, 0); + erts_smp_atomic_init_nob(&tb->is_resizing, 0); #ifdef ERTS_SMP if (tb->common.type & DB_FINE_LOCKED) { erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; @@ -663,7 +670,7 @@ int db_create_hash(Process *p, DbTable *tbl) /* This important property is needed to guarantee that the buckets * involved in a grow/shrink operation it protected by the same lock: */ - ASSERT(erts_smp_atomic_read(&tb->nactive) % DB_HASH_LOCK_CNT == 0); + ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); } else { /* coarse locking */ tb->locks = NULL; @@ -783,7 +790,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) if (tb->common.status & DB_SET) { HashDbTerm* bnext = b->next; if (b->hvalue == INVALID_HASH) { - erts_smp_atomic_inc(&tb->common.nitems); + erts_smp_atomic_inc_nob(&tb->common.nitems); } else if (key_clash_fail) { ret = DB_ERROR_BADKEY; @@ -811,7 +818,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) do { if (db_eq(&tb->common,obj,&q->dbterm)) { if (q->hvalue == INVALID_HASH) { - erts_smp_atomic_inc(&tb->common.nitems); + erts_smp_atomic_inc_nob(&tb->common.nitems); q->hvalue = hval; if (q != b) { /* must move to preserve key insertion order */ *qp = q->next; @@ -832,7 +839,7 @@ Lnew: q->hvalue = hval; q->next = b; *bp = q; - nitems = erts_smp_atomic_inctest(&tb->common.nitems); + nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); WUNLOCK_HASH(lck); { int nactive = NACTIVE(tb); @@ -1069,7 +1076,7 @@ int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value) EQ(value, b->dbterm.tpl[2])) { *bp = b->next; free_term(tb, b); - erts_smp_atomic_dec(&tb->common.nitems); + erts_smp_atomic_dec_nob(&tb->common.nitems); b = *bp; break; } @@ -1128,7 +1135,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add(&tb->common.nitems, nitems_diff); + erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } *ret = am_true; @@ -1187,7 +1194,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add(&tb->common.nitems, nitems_diff); + erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } *ret = am_true; @@ -1798,7 +1805,7 @@ static int db_select_delete_hash(Process *p, free_term(tb, del); did_erase = 1; } - erts_smp_atomic_dec(&tb->common.nitems); + erts_smp_atomic_dec_nob(&tb->common.nitems); ++got; } --num_left; @@ -1909,7 +1916,7 @@ static int db_select_delete_continue_hash(Process *p, free_term(tb, del); did_erase = 1; } - erts_smp_atomic_dec(&tb->common.nitems); + erts_smp_atomic_dec_nob(&tb->common.nitems); ++got; } @@ -2064,7 +2071,7 @@ int db_mark_all_deleted_hash(DbTable *tbl) }while(list != NULL); } } - erts_smp_atomic_set(&tb->common.nitems, 0); + erts_smp_atomic_set_nob(&tb->common.nitems, 0); return DB_ERROR_NONE; } @@ -2115,7 +2122,7 @@ static int db_free_table_continue_hash(DbTable *tbl) { DbTableHash *tb = &tbl->hash; int done; - FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read(&tb->fixdel); + FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel); ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb)); done = 0; @@ -2129,11 +2136,11 @@ static int db_free_table_continue_hash(DbTable *tbl) sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion)); if (++done >= 2*DELETE_RECORD_LIMIT) { - erts_smp_atomic_set(&tb->fixdel, (erts_aint_t)fixdel); + erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel); return 0; /* Not done */ } } - erts_smp_atomic_set(&tb->fixdel, (erts_aint_t)NULL); + erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL); done /= 2; while(tb->nslots != 0) { @@ -2157,7 +2164,7 @@ static int db_free_table_continue_hash(DbTable *tbl) tb->locks = NULL; } #endif - ASSERT(erts_smp_atomic_read(&tb->common.memory_size) == sizeof(DbTable)); + ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); return 1; /* Done */ } @@ -2350,7 +2357,7 @@ static int alloc_seg(DbTableHash *tb) struct ext_segment* eseg; eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1]; MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment); - erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t) eseg->segtab); + erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) eseg->segtab); tb->nsegs = eseg->nsegs; } ASSERT(seg_ix < tb->nsegs); @@ -2422,7 +2429,7 @@ static int free_seg(DbTableHash *tb, int free_records) MY_ASSERT(newtop->s.is_ext_segment); if (newtop->prev_segtab != NULL) { /* Time to use a smaller segtab */ - erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)newtop->prev_segtab); + erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t)newtop->prev_segtab); tb->nsegs = seg_ix; ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs); } @@ -2439,7 +2446,7 @@ static int free_seg(DbTableHash *tb, int free_records) if (seg_ix > 0) { if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL; } else { - erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)NULL); + erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t)NULL); } #endif tb->nslots -= SEGSZ; @@ -2500,7 +2507,7 @@ static void grow(DbTableHash* tb, int nactive) int from_ix; int szm; - if (erts_smp_atomic_xchg(&tb->is_resizing, 1)) { + if (erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1)) { return; /* already in progress */ } if (NACTIVE(tb) != nactive) { @@ -2515,7 +2522,7 @@ static void grow(DbTableHash* tb, int nactive) } ASSERT(nactive < tb->nslots); - szm = erts_smp_atomic_read(&tb->szm); + szm = erts_smp_atomic_read_nob(&tb->szm); if (nactive <= szm) { from_ix = nactive & (szm >> 1); } else { @@ -2532,7 +2539,7 @@ static void grow(DbTableHash* tb, int nactive) WUNLOCK_HASH(lck); goto abort; } - erts_smp_atomic_inc(&tb->nactive); + erts_smp_atomic_inc_nob(&tb->nactive); if (from_ix == 0) { erts_smp_atomic_set_relb(&tb->szm, szm); } @@ -2577,13 +2584,13 @@ abort: */ static void shrink(DbTableHash* tb, int nactive) { - if (erts_smp_atomic_xchg(&tb->is_resizing, 1)) { + if (erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1)) { return; /* already in progress */ } if (NACTIVE(tb) == nactive) { erts_smp_rwmtx_t* lck; int src_ix = nactive - 1; - int low_szm = erts_smp_atomic_read(&tb->szm) >> 1; + int low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1; int dst_ix = src_ix & low_szm; ASSERT(dst_ix < src_ix); @@ -2610,7 +2617,7 @@ static void shrink(DbTableHash* tb, int nactive) *dst_bp = *src_bp; *src_bp = NULL; - erts_smp_atomic_set(&tb->nactive, src_ix); + erts_smp_atomic_set_nob(&tb->nactive, src_ix); if (dst_ix == 0) { erts_smp_atomic_set_relb(&tb->szm, low_szm); } @@ -2746,7 +2753,7 @@ static int db_delete_all_objects_hash(Process* p, DbTable* tbl) } else { db_free_table_hash(tbl); db_create_hash(p, tbl); - erts_smp_atomic_set(&tbl->hash.common.nitems, 0); + erts_smp_atomic_set_nob(&tbl->hash.common.nitems, 0); } return 0; } diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index 9a0ba3a418..c6f0d80e32 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -49,7 +49,7 @@ #include "erl_db_tree.h" #define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos)) -#define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems)) +#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) /* ** A stack of this size is enough for an AVL tree with more than @@ -84,7 +84,7 @@ */ static DbTreeStack* get_static_stack(DbTableTree* tb) { - if (!erts_smp_atomic_xchg(&tb->is_stack_busy, 1)) { + if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } return NULL; @@ -96,7 +96,7 @@ static DbTreeStack* get_static_stack(DbTableTree* tb) static DbTreeStack* get_any_stack(DbTableTree* tb) { DbTreeStack* stack; - if (!erts_smp_atomic_xchg(&tb->is_stack_busy, 1)) { + if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb, @@ -110,7 +110,7 @@ static DbTreeStack* get_any_stack(DbTableTree* tb) static void release_stack(DbTableTree* tb, DbTreeStack* stack) { if (stack == &tb->static_stack) { - ASSERT(erts_smp_atomic_read(&tb->is_stack_busy) == 1); + ASSERT(erts_smp_atomic_read_nob(&tb->is_stack_busy) == 1); erts_smp_atomic_set_relb(&tb->is_stack_busy, 0); } else { @@ -478,7 +478,7 @@ int db_create_tree(Process *p, DbTable *tbl) sizeof(TreeDbTerm *) * STACK_NEED); tb->static_stack.pos = 0; tb->static_stack.slot = 0; - erts_smp_atomic_init(&tb->is_stack_busy, 0); + erts_smp_atomic_init_nob(&tb->is_stack_busy, 0); tb->deletion = 0; return DB_ERROR_NONE; } @@ -613,8 +613,8 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail) for (;;) if (!*this) { /* Found our place */ state = 1; - if (erts_smp_atomic_inctest(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { - erts_smp_atomic_dec(&tb->common.nitems); + if (erts_smp_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { + erts_smp_atomic_dec_nob(&tb->common.nitems); return DB_ERROR_SYSRES; } *this = new_dbterm(tb, obj); @@ -1583,7 +1583,7 @@ static int db_select_delete_continue_tree(Process *p, sc.max = 1000; sc.keypos = tb->common.keypos; - ASSERT(!erts_smp_atomic_read(&tb->is_stack_busy)); + ASSERT(!erts_smp_atomic_read_nob(&tb->is_stack_busy)); traverse_backwards(tb, &tb->static_stack, lastkey, NULL, &doit_select_delete, &sc); BUMP_REDS(p, 1000 - sc.max); @@ -1774,7 +1774,7 @@ static int db_free_table_continue_tree(DbTable *tbl) (DbTable *) tb, (void *) tb->static_stack.array, sizeof(TreeDbTerm *) * STACK_NEED); - ASSERT(erts_smp_atomic_read(&tb->common.memory_size) + ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); } return result; @@ -1784,7 +1784,7 @@ static int db_delete_all_objects_tree(Process* p, DbTable* tbl) { db_free_table_tree(tbl); db_create_tree(p, tbl); - erts_smp_atomic_set(&tbl->tree.common.nitems, 0); + erts_smp_atomic_set_nob(&tbl->tree.common.nitems, 0); return 0; } @@ -1866,7 +1866,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, tstack[tpos++] = this; state = delsub(this); } - erts_smp_atomic_dec(&tb->common.nitems); + erts_smp_atomic_dec_nob(&tb->common.nitems); break; } } @@ -1933,7 +1933,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, tstack[tpos++] = this; state = delsub(this); } - erts_smp_atomic_dec(&tb->common.nitems); + erts_smp_atomic_dec_nob(&tb->common.nitems); break; } } diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index e5be1f253a..7dfbb2ed02 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -491,7 +491,7 @@ erts_match_set_release_result(Process* c_p) /* The trace control word. */ -static erts_smp_atomic_t trace_control_word; +static erts_smp_atomic32_t trace_control_word; /* This needs to be here, before the bif table... */ @@ -911,7 +911,7 @@ static void db_free_tmp_uncompressed(DbTerm* obj); BIF_RETTYPE db_get_trace_control_word_0(Process *p) { - Uint32 tcw = (Uint32) erts_smp_atomic_read(&trace_control_word); + Uint32 tcw = (Uint32) erts_smp_atomic32_read_acqb(&trace_control_word); BIF_RET(erts_make_integer((Uint) tcw, p)); } @@ -924,7 +924,8 @@ BIF_RETTYPE db_set_trace_control_word_1(Process *p, Eterm new) if (val != ((Uint32)val)) BIF_ERROR(p, BADARG); - old_tcw = (Uint32) erts_smp_atomic_xchg(&trace_control_word, (erts_aint_t) val); + old_tcw = (Uint32) erts_smp_atomic32_xchg_relb(&trace_control_word, + (erts_aint32_t) val); BIF_RET(erts_make_integer((Uint) old_tcw, p)); } @@ -1249,7 +1250,7 @@ void db_initialize_util(void){ sizeof(DMCGuardBif), (int (*)(const void *, const void *)) &cmp_guard_bif); match_pseudo_process_init(); - erts_smp_atomic_init(&trace_control_word, 0); + erts_smp_atomic32_init_nob(&trace_control_word, 0); } diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index e5fd2547a0..eaa5f9eefc 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -69,7 +69,8 @@ static void erl_init(int ncpu); #define ERTS_MIN_COMPAT_REL 7 #ifdef ERTS_SMP -erts_smp_atomic_t erts_writing_erl_crash_dump; +erts_smp_atomic32_t erts_writing_erl_crash_dump; +erts_tsd_key_t erts_is_crash_dumping_key; #else volatile int erts_writing_erl_crash_dump = 0; #endif @@ -323,7 +324,7 @@ init_shared_memory(int argc, char **argv) #endif global_gen_gcs = 0; - global_max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs); + global_max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); global_gc_flags = erts_default_process_flags; erts_global_offheap.mso = NULL; @@ -646,12 +647,14 @@ early_init(int *argc, char **argv) /* erts_lc_init(); #endif #ifdef ERTS_SMP - erts_smp_atomic_init(&erts_writing_erl_crash_dump, 0L); + erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); + erts_tsd_key_create(&erts_is_crash_dumping_key); #else erts_writing_erl_crash_dump = 0; #endif - erts_smp_atomic32_init(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1)); + erts_smp_atomic32_init_nob(&erts_max_gen_gcs, + (erts_aint32_t) ((Uint16) -1)); erts_pre_init_process(); #if defined(USE_THREADS) && !defined(ERTS_SMP) @@ -858,7 +861,8 @@ erl_start(int argc, char **argv) envbufsz = sizeof(envbuf); if (erts_sys_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) { Uint16 max_gen_gcs = atoi(envbuf); - erts_smp_atomic32_set(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs); + erts_smp_atomic32_set_nob(&erts_max_gen_gcs, + (erts_aint32_t) max_gen_gcs); } envbufsz = sizeof(envbuf); diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c index 9751b5d77c..47597f302b 100644 --- a/erts/emulator/beam/erl_monitors.c +++ b/erts/emulator/beam/erl_monitors.c @@ -125,7 +125,7 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, Eterm pid, Eterm name) } else { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH, mon_size*sizeof(Uint)); - erts_smp_atomic_add(&tot_link_lh_size, mon_size*sizeof(Uint)); + erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); } hp = n->heap; @@ -156,7 +156,7 @@ static ErtsLink *create_link(Uint type, Eterm pid) } else { n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH, lnk_size*sizeof(Uint)); - erts_smp_atomic_add(&tot_link_lh_size, lnk_size*sizeof(Uint)); + erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); } hp = n->heap; @@ -191,13 +191,13 @@ static ErtsSuspendMonitor *create_suspend_monitor(Eterm pid) void erts_init_monitors(void) { - erts_smp_atomic_init(&tot_link_lh_size, 0); + erts_smp_atomic_init_nob(&tot_link_lh_size, 0); } Uint erts_tot_link_lh_size(void) { - return (Uint) erts_smp_atomic_read(&tot_link_lh_size); + return (Uint) erts_smp_atomic_read_nob(&tot_link_lh_size); } void erts_destroy_monitor(ErtsMonitor *mon) @@ -222,7 +222,7 @@ void erts_destroy_monitor(ErtsMonitor *mon) erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon); } else { erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon); - erts_smp_atomic_add(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); + erts_smp_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); } } @@ -244,7 +244,7 @@ void erts_destroy_link(ErtsLink *lnk) erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk); } else { erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk); - erts_smp_atomic_add(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); + erts_smp_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); } } diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index 6daa127d23..af3873995e 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2001-2010. All Rights Reserved. + * Copyright Ericsson AB 2001-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -118,7 +118,7 @@ dist_table_alloc(void *dep_tmpl) dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; - erts_smp_atomic_init(&dep->dist_cmd_scheduled, 0); + erts_smp_atomic_init_nob(&dep->dist_cmd_scheduled, 0); erts_port_task_handle_init(&dep->dist_cmd); dep->send = NULL; dep->cache = NULL; @@ -767,7 +767,7 @@ void erts_init_node_tables(void) erts_this_dist_entry->finalized_out_queue.first = NULL; erts_this_dist_entry->finalized_out_queue.last = NULL; - erts_smp_atomic_init(&erts_this_dist_entry->dist_cmd_scheduled, 0); + erts_smp_atomic_init_nob(&erts_this_dist_entry->dist_cmd_scheduled, 0); erts_port_task_handle_init(&erts_this_dist_entry->dist_cmd); erts_this_dist_entry->send = NULL; erts_this_dist_entry->cache = NULL; diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index e6b55c45e4..6aa5161b08 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -121,7 +121,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_taskq, static ERTS_INLINE ErtsPortTask * handle2task(ErtsPortTaskHandle *pthp) { - return (ErtsPortTask *) erts_smp_atomic_read(pthp); + return (ErtsPortTask *) erts_smp_atomic_read_nob(pthp); } static ERTS_INLINE void @@ -129,7 +129,7 @@ reset_handle(ErtsPortTask *ptp) { if (ptp->handle) { ASSERT(ptp == handle2task(ptp->handle)); - erts_smp_atomic_set(ptp->handle, (erts_aint_t) NULL); + erts_smp_atomic_set_nob(ptp->handle, (erts_aint_t) NULL); } } @@ -138,7 +138,7 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) { ptp->handle = pthp; if (pthp) { - erts_smp_atomic_set(pthp, (erts_aint_t) ptp); + erts_smp_atomic_set_nob(pthp, (erts_aint_t) ptp); ASSERT(ptp == handle2task(ptp->handle)); } } @@ -479,8 +479,8 @@ erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp) case ERTS_PORT_TASK_INPUT: case ERTS_PORT_TASK_OUTPUT: case ERTS_PORT_TASK_EVENT: - ASSERT(erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) > 0); - erts_smp_atomic_dec(&erts_port_task_outstanding_io_tasks); + ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0); + erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks); break; default: break; @@ -568,7 +568,7 @@ erts_port_task_schedule(Eterm id, ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (xrunq) { /* Port emigrated ... */ - erts_smp_atomic_set(&pp->run_queue, (erts_aint_t) xrunq); + erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); runq = xrunq; } @@ -594,7 +594,7 @@ erts_port_task_schedule(Eterm id, case ERTS_PORT_TASK_INPUT: case ERTS_PORT_TASK_OUTPUT: case ERTS_PORT_TASK_EVENT: - erts_smp_atomic_inc(&erts_port_task_outstanding_io_tasks); + erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); /* Fall through... */ default: enqueue_task(pp->sched.taskq, ptp); @@ -662,7 +662,7 @@ erts_port_task_free_port(Port *pp) pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; erts_may_save_closed_port(pp); erts_smp_port_state_unlock(pp); - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 1); + ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 1); ptp->type = ERTS_PORT_TASK_FREE; ptp->event = (ErlDrvEvent) -1; ptp->event_data = NULL; @@ -684,9 +684,9 @@ erts_port_task_free_port(Port *pp) erts_may_save_closed_port(pp); erts_smp_port_state_unlock(pp); #ifdef ERTS_SMP - erts_smp_atomic_dec(&pp->refc); /* Not alive */ + erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ #endif - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 0); /* Lock */ + ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ handle_remaining_tasks(runq, pp); /* May release runq lock */ ASSERT(!pp->sched.exe_taskq && (!ptqp || !ptqp->first)); pp->sched.taskq = NULL; @@ -724,7 +724,7 @@ resume_after_block(void *vd) ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd; erts_smp_runq_lock(d->runq); if (d->resp) - *d->resp = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) + *d->resp = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); } @@ -832,8 +832,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) ASSERT(!ptqp->first && (!pp->sched.taskq || !pp->sched.taskq->first)); #ifdef ERTS_SMP - erts_smp_atomic_dec(&pp->refc); /* Not alive */ - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&pp->refc) > 0); /* Lock */ + erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ + ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ #else erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE); #endif @@ -906,14 +906,16 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_unblock_fpe(fpe_was_unmasked); if (io_tasks_executed) { - ASSERT(erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed); - erts_smp_atomic_add(&erts_port_task_outstanding_io_tasks, -1*io_tasks_executed); + ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) + >= io_tasks_executed); + erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks, + -1*io_tasks_executed); } *curr_port_pp = NULL; #ifdef ERTS_SMP - ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read(&pp->run_queue)); + ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); #endif if (!pp->sched.taskq) { @@ -940,7 +942,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) } else { /* Port emigrated ... */ - erts_smp_atomic_set(&pp->run_queue, (erts_aint_t) xrunq); + erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); enqueue_port(xrunq, pp); ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; @@ -951,7 +953,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) port_was_enqueued = 1; } - res = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) + res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); ERTS_PT_CHK_PRES_PORTQ(runq, pp); @@ -972,13 +974,13 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) { erts_aint_t refc; erts_smp_mtx_unlock(pp->lock); - refc = erts_smp_atomic_dectest(&pp->refc); + refc = erts_smp_atomic_dec_read_nob(&pp->refc); ASSERT(refc >= 0); if (refc == 0) { erts_smp_runq_unlock(runq); erts_port_cleanup(pp); /* Might aquire runq lock */ erts_smp_runq_lock(runq); - res = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) + res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); } } @@ -1107,12 +1109,12 @@ erts_port_migrate(Port *prt, int *prt_locked, /* Refuse to migrate to a suspended run queue */ if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED) return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED; - if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue)) + if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue)) return ERTS_MIGRATE_FAILED_RUNQ_CHANGED; if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt)) return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ; dequeue_port(from_rq, prt); - erts_smp_atomic_set(&prt->run_queue, (erts_aint_t) to_rq); + erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) to_rq); enqueue_port(to_rq, prt); return ERTS_MIGRATE_SUCCESS; } @@ -1125,7 +1127,8 @@ erts_port_migrate(Port *prt, int *prt_locked, void erts_port_task_init(void) { - erts_smp_atomic_init(&erts_port_task_outstanding_io_tasks, (erts_aint_t) 0); + erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks, + (erts_aint_t) 0); init_port_task_alloc(); init_port_taskq_alloc(); } diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index 3e2c5f07ab..d7104e1143 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -79,13 +79,13 @@ ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void); ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp) { - erts_smp_atomic_init(pthp, (erts_aint_t) NULL); + erts_smp_atomic_init_nob(pthp, (erts_aint_t) NULL); } ERTS_GLB_INLINE int erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp) { - return ((void *) erts_smp_atomic_read(pthp)) != NULL; + return ((void *) erts_smp_atomic_read_nob(pthp)) != NULL; } ERTS_GLB_INLINE void @@ -102,8 +102,8 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp) ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void) { - ERTS_THR_MEMORY_BARRIER; - return erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != 0; + return (erts_smp_atomic_read_acqb(&erts_port_task_outstanding_io_tasks) + != 0); } #endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index ba3b32dd97..9574435415 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -134,15 +134,15 @@ int erts_disable_proc_not_running_opt; #ifndef DEBUG #define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \ - erts_smp_atomic32_set(&schdlr_sspnd.changing, (VAL)) + erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, (VAL)) #else #define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \ do { \ erts_aint32_t old_val__; \ - old_val__ = erts_smp_atomic32_xchg(&schdlr_sspnd.changing, \ - (VAL)); \ + old_val__ = erts_smp_atomic32_xchg_nob(&schdlr_sspnd.changing, \ + (VAL)); \ ASSERT(old_val__ == (OLD_VAL)); \ } while (0) @@ -158,7 +158,7 @@ static struct { erts_smp_atomic32_t changing; erts_smp_atomic32_t active; struct { - erts_smp_atomic32_t ongoing; + int ongoing; long wait_active; ErtsProcList *procs; } msb; /* Multi Scheduling Block */ @@ -410,7 +410,7 @@ erts_init_process(int ncpu) init_proclist_alloc(); - erts_smp_atomic32_init(&process_count, 0); + erts_smp_atomic32_init_nob(&process_count, 0); if (erts_use_r9_pids_ports) { proc_bits = ERTS_R9_PROC_BITS; @@ -694,8 +694,8 @@ erts_smp_schedule_misc_aux_work(int ignore_self, erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx); ssi = ERTS_SCHED_SLEEP_INFO_IX(ix); - aux_work = erts_smp_atomic32_bor(&ssi->aux_work, - ERTS_SSI_AUX_WORK_MISC); + aux_work = erts_smp_atomic32_read_bor_nob(&ssi->aux_work, + ERTS_SSI_AUX_WORK_MISC); if ((aux_work & ERTS_SSI_AUX_WORK_MISC) == 0) erts_sched_poke(ssi); } @@ -711,8 +711,8 @@ erts_smp_notify_check_children_needed(void) erts_aint32_t aux_work; ErtsSchedulerSleepInfo *ssi; ssi = ERTS_SCHED_SLEEP_INFO_IX(i); - aux_work = erts_smp_atomic32_bor(&ssi->aux_work, - ERTS_SSI_AUX_WORK_CHECK_CHILDREN); + aux_work = erts_smp_atomic32_read_bor_nob(&ssi->aux_work, + ERTS_SSI_AUX_WORK_CHECK_CHILDREN); if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN)) erts_sched_poke(ssi); } @@ -727,15 +727,15 @@ blockable_aux_work(ErtsSchedulerData *esdp, { if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) { if (aux_work & ERTS_SSI_AUX_WORK_MISC) { - aux_work = erts_smp_atomic32_band(&ssi->aux_work, - ~ERTS_SSI_AUX_WORK_MISC); + aux_work = erts_smp_atomic32_read_band_nob(&ssi->aux_work, + ~ERTS_SSI_AUX_WORK_MISC); aux_work &= ~ERTS_SSI_AUX_WORK_MISC; handle_misc_aux_work(esdp); } #ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) { - aux_work = erts_smp_atomic32_band(&ssi->aux_work, - ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN); + aux_work = erts_smp_atomic32_band_nob(&ssi->aux_work, + ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN); aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN; erts_check_children(); } @@ -815,7 +815,7 @@ erts_active_schedulers(void) static ERTS_INLINE void clear_sys_scheduling(void) { - erts_smp_atomic32_set_relb(&doing_sys_schedule, 0); + erts_smp_atomic32_set_mb(&doing_sys_schedule, 0); } static ERTS_INLINE int @@ -882,42 +882,43 @@ sched_active(Uint no, ErtsRunQueue *rq) static int ERTS_INLINE ongoing_multi_scheduling_block(void) { - return erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing) != 0; + ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx)); + return schdlr_sspnd.msb.ongoing; } static ERTS_INLINE void empty_runq(ErtsRunQueue *rq) { - erts_aint32_t oifls = erts_smp_atomic32_band(&rq->info_flags, - ~ERTS_RUNQ_IFLG_NONEMPTY); + erts_aint32_t oifls = erts_smp_atomic32_read_band_nob(&rq->info_flags, + ~ERTS_RUNQ_IFLG_NONEMPTY); if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) { #ifdef DEBUG - erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues); + erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); /* * For a short period of time no_empty_run_queues may have * been increased twice for a specific run queue. */ ASSERT(0 <= empty && empty < 2*erts_no_run_queues); #endif - erts_smp_atomic32_inc(&no_empty_run_queues); + erts_smp_atomic32_inc_relb(&no_empty_run_queues); } } static ERTS_INLINE void non_empty_runq(ErtsRunQueue *rq) { - erts_aint32_t oifls = erts_smp_atomic32_bor(&rq->info_flags, - ERTS_RUNQ_IFLG_NONEMPTY); + erts_aint32_t oifls = erts_smp_atomic32_read_bor_nob(&rq->info_flags, + ERTS_RUNQ_IFLG_NONEMPTY); if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) { #ifdef DEBUG - erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues); + erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); /* * For a short period of time no_empty_run_queues may have * been increased twice for a specific run queue. */ ASSERT(0 < empty && empty <= 2*erts_no_run_queues); #endif - erts_smp_atomic32_dec(&no_empty_run_queues); + erts_smp_atomic32_dec_relb(&no_empty_run_queues); } } @@ -930,7 +931,7 @@ sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi) erts_aint32_t xflgs = 0; do { - oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs); + oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -947,7 +948,7 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi) erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING; do { - oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs); + oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -989,7 +990,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type) erts_tse_reset(ssi->event); while (1) { - oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs); + oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) @@ -1049,7 +1050,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) tse_wait: #ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK - aux_work = erts_smp_atomic32_read(&ssi->aux_work); + aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work); tse_blockable_aux_work: aux_work = blockable_aux_work(esdp, ssi, aux_work); #endif @@ -1059,7 +1060,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) #ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK #ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK - aux_work = erts_smp_atomic32_read(&ssi->aux_work); + aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work); #endif nonblockable_aux_work(esdp, ssi, aux_work); #endif @@ -1092,7 +1093,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } #ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK - aux_work = erts_smp_atomic32_read(&ssi->aux_work); + aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work); if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) { erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL); goto tse_blockable_aux_work; @@ -1104,7 +1105,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL); if (flgs & ~ERTS_SSI_FLG_SUSPENDED) - erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); erts_smp_runq_lock(rq); sched_active(esdp->no, rq); @@ -1136,12 +1137,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sys_aux_work: #ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK - aux_work = erts_smp_atomic32_read(&ssi->aux_work); + aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work); aux_work = blockable_aux_work(esdp, ssi, aux_work); #endif #ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK #ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK - aux_work = erts_smp_atomic32_read(&ssi->aux_work); + aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work); #endif nonblockable_aux_work(esdp, ssi, aux_work); #endif @@ -1232,7 +1233,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sys_locked_woken: clear_sys_scheduling(); if (flgs & ~ERTS_SSI_FLG_SUSPENDED) - erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_smp_atomic32_read_band_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); sched_active_sys(esdp->no, rq); } } @@ -1248,7 +1249,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi) erts_aint32_t nflgs = 0; erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING; while (1) { - oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs); + oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return oflgs; nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED; @@ -1291,7 +1292,6 @@ wake_scheduler(ErtsRunQueue *rq, int incq, int one) erts_smp_spin_unlock(&sl->lock); - ERTS_THR_MEMORY_BARRIER; flgs = ssi_flags_set_wake(ssi); erts_sched_finish_poke(ssi, flgs); @@ -1337,13 +1337,13 @@ init_no_runqs(int active, int used) { erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK); no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT); - erts_smp_atomic32_init(&balance_info.no_runqs, no_runqs); + erts_smp_atomic32_init_nob(&balance_info.no_runqs, no_runqs); } static ERTS_INLINE void get_no_runqs(int *active, int *used) { - erts_aint32_t no_runqs = erts_smp_atomic32_read(&balance_info.no_runqs); + erts_aint32_t no_runqs = erts_smp_atomic32_read_nob(&balance_info.no_runqs); if (active) *active = (int) (no_runqs & ERTS_NO_RUNQS_MASK); if (used) @@ -1353,11 +1353,12 @@ get_no_runqs(int *active, int *used) static ERTS_INLINE void set_no_used_runqs(int used) { - erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); while (1) { erts_aint32_t act, new; - new = (used << ERTS_NO_USED_RUNQS_SHIFT) | (exp & ERTS_NO_RUNQS_MASK); - act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + new = (used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT; + new |= exp & ERTS_NO_RUNQS_MASK; + act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) break; exp = act; @@ -1367,11 +1368,12 @@ set_no_used_runqs(int used) static ERTS_INLINE void set_no_active_runqs(int active) { - erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); while (1) { erts_aint32_t act, new; - new = (exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT)) | active; - act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT); + new |= active & ERTS_NO_RUNQS_MASK; + act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) break; exp = act; @@ -1381,13 +1383,14 @@ set_no_active_runqs(int active) static ERTS_INLINE int try_inc_no_active_runqs(int active) { - erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active) return 0; if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) { erts_aint32_t new, act; - new = (exp & ~ERTS_NO_RUNQS_MASK) | active; - act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT); + new |= active & ERTS_NO_RUNQS_MASK; + act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) return 1; } @@ -1403,7 +1406,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate) if (crq->ix == ix) return 0; wrq = ERTS_RUNQ_IX(ix); - iflgs = erts_smp_atomic32_read(&wrq->info_flags); + iflgs = erts_smp_atomic32_read_nob(&wrq->info_flags); if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) { if (activate) { if (try_inc_no_active_runqs(ix+1)) { @@ -1645,15 +1648,15 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq) erts_smp_runq_lock(evac_rq); - erts_smp_atomic32_bor(&evac_rq->scheduler->ssi->flags, - ERTS_SSI_FLG_SUSPENDED); + erts_smp_atomic32_read_bor_nob(&evac_rq->scheduler->ssi->flags, + ERTS_SSI_FLG_SUSPENDED); evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK; evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK | ERTS_RUNQ_FLGS_EVACUATE_QMASK | ERTS_RUNQ_FLG_SUSPENDED); - erts_smp_atomic32_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED); + erts_smp_atomic32_read_bor_nob(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED); /* * Need to set up evacuation paths first since we * may release the run queue lock on evac_rq @@ -1902,7 +1905,7 @@ static ERTS_INLINE int check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix) { ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix); - erts_aint32_t iflgs = erts_smp_atomic32_read(&vrq->info_flags); + erts_aint32_t iflgs = erts_smp_atomic32_read_nob(&vrq->info_flags); if (iflgs & ERTS_RUNQ_IFLG_NONEMPTY) return try_steal_task_from_victim(rq, rq_lockedp, vrq); else @@ -2054,7 +2057,7 @@ check_balance(ErtsRunQueue *c_rq) int forced, active, current_active, oowc, half_full_scheds, full_scheds, mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix; - if (erts_smp_atomic32_xchg(&balance_info.checking_balance, 1)) { + if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) { c_rq->check_balance_reds = INT_MAX; return; } @@ -2062,7 +2065,7 @@ check_balance(ErtsRunQueue *c_rq) get_no_runqs(NULL, &blnc_no_rqs); if (blnc_no_rqs == 1) { c_rq->check_balance_reds = INT_MAX; - erts_smp_atomic32_set(&balance_info.checking_balance, 0); + erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); return; } @@ -2070,7 +2073,7 @@ check_balance(ErtsRunQueue *c_rq) if (balance_info.halftime) { balance_info.halftime = 0; - erts_smp_atomic32_set(&balance_info.checking_balance, 0); + erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); ERTS_FOREACH_RUNQ(rq, { if (rq->waiting) @@ -2104,7 +2107,7 @@ check_balance(ErtsRunQueue *c_rq) erts_smp_mtx_unlock(&balance_info.update_mtx); erts_smp_runq_lock(c_rq); c_rq->check_balance_reds = INT_MAX; - erts_smp_atomic32_set(&balance_info.checking_balance, 0); + erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); return; } @@ -2449,7 +2452,7 @@ erts_fprintf(stderr, "--------------------------------\n"); set_no_active_runqs(active); balance_info.halftime = 1; - erts_smp_atomic32_set(&balance_info.checking_balance, 0); + erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); /* Write migration paths and reset balance statistics in all queues */ for (qix = 0; qix < blnc_no_rqs; qix++) { @@ -2591,7 +2594,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, sizeof(ErtsAlignedRunQueue) * n); #ifdef ERTS_SMP - erts_smp_atomic32_init(&no_empty_run_queues, 0); + erts_smp_atomic32_init_nob(&no_empty_run_queues, 0); #endif erts_no_run_queues = n; @@ -2601,7 +2604,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); rq->ix = ix; - erts_smp_atomic32_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY); + erts_smp_atomic32_init_nob(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY); /* make sure that the "extra" id correponds to the schedulers * id if the esdp->no <-> ix+1 mapping change. @@ -2694,9 +2697,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) ssi->next = NULL; ssi->prev = NULL; #endif - erts_smp_atomic32_init(&ssi->flags, 0); + erts_smp_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_thread_func */ - erts_smp_atomic32_init(&ssi->aux_work, 0); + erts_smp_atomic32_init_nob(&ssi->aux_work, 0); } #endif @@ -2740,7 +2743,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) } #ifdef ERTS_SMP - erts_smp_atomic32_init(&esdp->chk_cpu_bind, 0); + erts_smp_atomic32_init_nob(&esdp->chk_cpu_bind, 0); #endif } @@ -2748,11 +2751,11 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd"); erts_smp_cnd_init(&schdlr_sspnd.cnd); - erts_smp_atomic32_init(&schdlr_sspnd.changing, 0); + erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0); schdlr_sspnd.online = no_schedulers_online; schdlr_sspnd.curr_online = no_schedulers; - erts_smp_atomic32_init(&schdlr_sspnd.msb.ongoing, 0); - erts_smp_atomic32_init(&schdlr_sspnd.active, no_schedulers); + schdlr_sspnd.msb.ongoing = 0; + erts_smp_atomic32_init_nob(&schdlr_sspnd.active, no_schedulers); schdlr_sspnd.msb.procs = NULL; init_no_runqs(no_schedulers, erts_common_run_queue ? 1 : no_schedulers_online); @@ -2761,7 +2764,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) balance_info.forced_check_balance = 0; balance_info.halftime = 1; balance_info.full_reds_history_index = 0; - erts_smp_atomic32_init(&balance_info.checking_balance, 0); + erts_smp_atomic32_init_nob(&balance_info.checking_balance, 0); balance_info.prev_rise.active_runqs = 0; balance_info.prev_rise.max_len = 0; balance_info.prev_rise.reds = 0; @@ -2770,8 +2773,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) if (no_schedulers_online < no_schedulers) { if (erts_common_run_queue) { for (ix = no_schedulers_online; ix < no_schedulers; ix++) - erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags, - ERTS_SSI_FLG_SUSPENDED); + erts_smp_atomic32_read_bor_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags, + ERTS_SSI_FLG_SUSPENDED); } else { for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++) @@ -2785,7 +2788,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN | ERTS_SCHDLR_SSPND_CHNG_WAITER), 0); - erts_smp_atomic32_init(&doing_sys_schedule, 0); + erts_smp_atomic32_init_nob(&doing_sys_schedule, 0); init_misc_aux_work(); @@ -2801,7 +2804,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) erts_no_schedulers = 1; #endif - erts_smp_atomic32_init(&function_calls, 0); + erts_smp_atomic32_init_nob(&function_calls, 0); /* init port tasks */ erts_port_task_init(); @@ -2928,10 +2931,10 @@ int erts_get_max_no_executing_schedulers(void) { #ifdef ERTS_SMP - if (erts_smp_atomic32_read(&schdlr_sspnd.changing)) + if (erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)) return (int) erts_no_schedulers; ERTS_THR_MEMORY_BARRIER; - return (int) erts_smp_atomic32_read(&schdlr_sspnd.active); + return (int) erts_smp_atomic32_read_nob(&schdlr_sspnd.active); #else return 1; #endif @@ -2961,7 +2964,7 @@ scheduler_ix_resume_wake(Uint ix) | ERTS_SSI_FLG_SUSPENDED); erts_aint32_t oflgs; do { - oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, 0, xflgs); + oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs); if (oflgs == xflgs) { erts_sched_finish_poke(ssi, oflgs); break; @@ -2980,7 +2983,7 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct) erts_aint32_t xflgs = xpct; do { - oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs); + oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -3030,7 +3033,7 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) erts_tse_reset(ssi->event); while (1) { - oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs); + oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; if ((oflgs & (ERTS_SSI_FLG_SLEEPING @@ -3085,15 +3088,15 @@ suspend_scheduler(ErtsSchedulerData *esdp) flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED); if (flgs & ERTS_SSI_FLG_SUSPENDED) { - active_schedulers = erts_smp_atomic32_dectest(&schdlr_sspnd.active); + active_schedulers = erts_smp_atomic32_dec_read_nob(&schdlr_sspnd.active); ASSERT(active_schedulers >= 1); - changing = erts_smp_atomic32_read(&schdlr_sspnd.changing); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) { if (active_schedulers == schdlr_sspnd.msb.wait_active) wake = 1; if (active_schedulers == 1) { - changing = erts_smp_atomic32_band(&schdlr_sspnd.changing, - ~ERTS_SCHDLR_SSPND_CHNG_MSB); + changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_MSB); changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB; } } @@ -3115,8 +3118,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) && schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) wake = 1; if (schdlr_sspnd.online == schdlr_sspnd.curr_online) { - changing = erts_smp_atomic32_band(&schdlr_sspnd.changing, - ~ERTS_SCHDLR_SSPND_CHNG_ONLN); + changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_ONLN); changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN; } } @@ -3133,7 +3136,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) #ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK - aux_work = erts_smp_atomic32_read(&ssi->aux_work); + aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work); blockable_aux_work: blockable_aux_work(esdp, ssi, aux_work); #endif @@ -3169,13 +3172,13 @@ suspend_scheduler(ErtsSchedulerData *esdp) | ERTS_SSI_FLG_SUSPENDED)); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; - changing = erts_smp_atomic32_read(&schdlr_sspnd.changing); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER) break; #ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK - aux_work = erts_smp_atomic32_read(&ssi->aux_work); + aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work); if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) { erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL); goto blockable_aux_work; @@ -3187,19 +3190,19 @@ suspend_scheduler(ErtsSchedulerData *esdp) erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL); erts_smp_mtx_lock(&schdlr_sspnd.mtx); - changing = erts_smp_atomic32_read(&schdlr_sspnd.changing); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); } - active_schedulers = erts_smp_atomic32_inctest(&schdlr_sspnd.active); - changing = erts_smp_atomic32_read(&schdlr_sspnd.changing); + active_schedulers = erts_smp_atomic32_inc_read_nob(&schdlr_sspnd.active); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB) && schdlr_sspnd.online == active_schedulers) { - erts_smp_atomic32_band(&schdlr_sspnd.changing, - ~ERTS_SCHDLR_SSPND_CHNG_MSB); + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_MSB); } ASSERT(no <= schdlr_sspnd.online); - ASSERT(!erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing)); + ASSERT(!ongoing_multi_scheduling_block()); } @@ -3228,7 +3231,7 @@ do { \ (RQ)->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK \ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); \ (RQ)->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; \ - erts_smp_atomic32_band(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\ + erts_smp_atomic32_read_band_nob(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\ for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) { \ (RQ)->procs.prio_info[pix__].max_len = 0; \ (RQ)->procs.prio_info[pix__].reds = 0; \ @@ -3272,7 +3275,7 @@ erts_schedulers_state(Uint *total, int res; erts_aint32_t changing; erts_smp_mtx_lock(&schdlr_sspnd.mtx); - changing = erts_smp_atomic32_read(&schdlr_sspnd.changing); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)) res = ERTS_SCHDLR_SSPND_YIELD_RESTART; else { @@ -3303,7 +3306,7 @@ erts_set_schedulers_online(Process *p, have_unlocked_plocks = 0; no = (int) new_no; - changing = erts_smp_atomic32_read(&schdlr_sspnd.changing); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); if (changing) { res = ERTS_SCHDLR_SSPND_YIELD_RESTART; } @@ -3378,8 +3381,8 @@ erts_set_schedulers_online(Process *p, for (ix = no; ix < online; ix++) { ErtsSchedulerSleepInfo *ssi; ssi = ERTS_SCHED_SLEEP_INFO_IX(ix); - erts_smp_atomic32_bor(&ssi->flags, - ERTS_SSI_FLG_SUSPENDED); + erts_smp_atomic32_read_bor_nob(&ssi->flags, + ERTS_SSI_FLG_SUSPENDED); } wake_all_schedulers(); } @@ -3426,11 +3429,11 @@ erts_set_schedulers_online(Process *p, NULL); ASSERT(res != ERTS_SCHDLR_SSPND_DONE ? (ERTS_SCHDLR_SSPND_CHNG_WAITER - & erts_smp_atomic32_read(&schdlr_sspnd.changing)) + & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)) : (ERTS_SCHDLR_SSPND_CHNG_WAITER - == erts_smp_atomic32_read(&schdlr_sspnd.changing))); - erts_smp_atomic32_band(&schdlr_sspnd.changing, - ~ERTS_SCHDLR_SSPND_CHNG_WAITER); + == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))); + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_WAITER); } } @@ -3449,7 +3452,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) ErtsProcList *plp; erts_smp_mtx_lock(&schdlr_sspnd.mtx); - changing = erts_smp_atomic32_read(&schdlr_sspnd.changing); + changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); if (changing) { res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */ } @@ -3459,7 +3462,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) plp->next = schdlr_sspnd.msb.procs; schdlr_sspnd.msb.procs = plp; p->flags |= F_HAVE_BLCKD_MSCHED; - ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); ASSERT(p->scheduler_data->no == 1); res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED; } @@ -3470,11 +3473,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) have_unlocked_plocks = 1; erts_smp_proc_unlock(p, plocks); } - ASSERT(0 == erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing)); - erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 1); + ASSERT(!ongoing_multi_scheduling_block()); + schdlr_sspnd.msb.ongoing = 1; if (online == 1) { res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED; - ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); ASSERT(p->scheduler_data->no == 1); } else { @@ -3494,8 +3497,8 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) } if (erts_common_run_queue) { for (ix = 1; ix < online; ix++) - erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags, - ERTS_SSI_FLG_SUSPENDED); + erts_smp_atomic32_read_bor_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags, + ERTS_SSI_FLG_SUSPENDED); wake_all_schedulers(); } else { @@ -3523,7 +3526,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) susp_sched_prep_block, susp_sched_resume_block, NULL); - while (erts_smp_atomic32_read(&schdlr_sspnd.active) + while (erts_smp_atomic32_read_nob(&schdlr_sspnd.active) != schdlr_sspnd.msb.wait_active) erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx); erts_smp_activity_end(ERTS_ACTIVITY_WAIT, @@ -3532,11 +3535,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) NULL); ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED ? (ERTS_SCHDLR_SSPND_CHNG_WAITER - & erts_smp_atomic32_read(&schdlr_sspnd.changing)) + & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)) : (ERTS_SCHDLR_SSPND_CHNG_WAITER - == erts_smp_atomic32_read(&schdlr_sspnd.changing))); - erts_smp_atomic32_band(&schdlr_sspnd.changing, - ~ERTS_SCHDLR_SSPND_CHNG_WAITER); + == erts_smp_atomic32_read_nob(&schdlr_sspnd.changing))); + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_WAITER); } plp = proclist_create(p); plp->next = schdlr_sspnd.msb.procs; @@ -3603,16 +3606,16 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) }); #endif p->flags &= ~F_HAVE_BLCKD_MSCHED; - erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 0); + schdlr_sspnd.msb.ongoing = 0; if (schdlr_sspnd.online == 1) { /* No schedulers to resume */ - ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1); + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB); } else if (erts_common_run_queue) { for (ix = 1; ix < schdlr_sspnd.online; ix++) - erts_smp_atomic32_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags, - ~ERTS_SSI_FLG_SUSPENDED); + erts_smp_atomic32_read_band_nob(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags, + ~ERTS_SSI_FLG_SUSPENDED); wake_all_schedulers(); } else { @@ -3662,7 +3665,7 @@ void erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value) { if (return_value == am_blocked) { - erts_aint32_t active = erts_smp_atomic32_read(&schdlr_sspnd.active); + erts_aint32_t active = erts_smp_atomic32_read_nob(&schdlr_sspnd.active); ASSERT(1 <= active && active <= 2); ASSERT(ERTS_PROC_GET_SCHDATA(p)->no == 1); } @@ -3745,12 +3748,12 @@ sched_thread_func(void *vesdp) erts_thread_init_float(); erts_smp_mtx_lock(&schdlr_sspnd.mtx); - ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.changing) + ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.changing) & ERTS_SCHDLR_SSPND_CHNG_ONLN); if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) { - erts_smp_atomic32_band(&schdlr_sspnd.changing, - ~ERTS_SCHDLR_SSPND_CHNG_ONLN); + erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + ~ERTS_SCHDLR_SSPND_CHNG_ONLN); if (((ErtsSchedulerData *) vesdp)->no != 1) erts_smp_cnd_signal(&schdlr_sspnd.cnd); } @@ -5207,7 +5210,7 @@ Process *schedule(Process *p, int calls) reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST; esdp->virtual_reds = 0; - fcalls = (int) erts_smp_atomic32_addtest(&function_calls, reds); + fcalls = (int) erts_smp_atomic32_add_read_acqb(&function_calls, reds); ASSERT(esdp && esdp == erts_get_scheduler_data()); rq = erts_get_runq_current(esdp); @@ -5342,7 +5345,7 @@ Process *schedule(Process *p, int calls) if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED) || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED)) { - ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags) + ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED); suspend_scheduler(esdp); } @@ -5356,7 +5359,7 @@ Process *schedule(Process *p, int calls) || defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) { ErtsSchedulerSleepInfo *ssi = esdp->ssi; - erts_aint32_t aux_work = erts_smp_atomic32_read(&ssi->aux_work); + erts_aint32_t aux_work = erts_smp_atomic32_read_nob(&ssi->aux_work); if (aux_work) { erts_smp_runq_unlock(rq); #ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK @@ -5400,7 +5403,7 @@ Process *schedule(Process *p, int calls) if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED) || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED)) { - ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags) + ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED); non_empty_runq(rq); goto continue_check_activities_to_run; @@ -5944,7 +5947,7 @@ erts_test_next_pid(int set, Uint next) Uint erts_process_count(void) { - erts_aint32_t res = erts_smp_atomic32_read(&process_count); + erts_aint32_t res = erts_smp_atomic32_read_nob(&process_count); ASSERT(res >= 0); return (Uint) res; } @@ -5993,7 +5996,7 @@ alloc_process(void) ASSERT(!process_tab[p_next]); process_tab[p_next] = p; - erts_smp_atomic32_inc(&process_count); + erts_smp_atomic32_inc_nob(&process_count); p->id = make_internal_pid(p_serial << p_serial_shift | p_next); if (p->id == ERTS_INVALID_PID) { /* Do not use the invalid pid; change serial */ @@ -6119,7 +6122,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->min_heap_size = H_MIN_SIZE; p->min_vheap_size = BIN_VH_MIN_SIZE; p->prio = PRIORITY_NORMAL; - p->max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs); + p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); } p->skipped = 0; ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0)); @@ -7576,8 +7579,8 @@ continue_exit_process(Process *p p->status_flags = 0; #endif process_tab[pix] = NULL; /* Time of death! */ - ASSERT(erts_smp_atomic32_read(&process_count) > 0); - erts_smp_atomic32_dec(&process_count); + ASSERT(erts_smp_atomic32_read_nob(&process_count) > 0); + erts_smp_atomic32_dec_nob(&process_count); #ifdef ERTS_SMP erts_pix_unlock(pix_lock); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 296acc7367..739aef3130 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1599,11 +1599,11 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) { erts_aint32_t flags; ERTS_THR_MEMORY_BARRIER; - flags = erts_smp_atomic32_read(&ssi->flags); + flags = erts_smp_atomic32_read_nob(&ssi->flags); ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING) || (flags & ERTS_SSI_FLG_WAITING)); if (flags & ERTS_SSI_FLG_SLEEPING) { - flags = erts_smp_atomic32_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP); + flags = erts_smp_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP); erts_sched_finish_poke(ssi, flags); } } diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index 5410bcd495..3550f1396c 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -350,7 +350,7 @@ heap_dump(int to, void *to_arg, Eterm x) ProcBin* pb = (ProcBin *) binary_val(x); Binary* val = pb->val; - if (erts_smp_atomic_xchg(&val->refc, 0) != 0) { + if (erts_smp_atomic_xchg_nob(&val->refc, 0) != 0) { val->flags = (UWord) all_binaries; all_binaries = val; } diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index 72560aa124..83379d7352 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2007-2010. All Rights Reserved. + * Copyright Ericsson AB 2007-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -316,7 +316,7 @@ try_aquire(erts_proc_lock_t *lck, erts_tse_t *wtr) break; } wflg = lock << ERTS_PROC_LOCK_WAITER_SHIFT; - old_lflgs = ERTS_PROC_LOCK_FLGS_BOR_(lck, wflg | lock); + old_lflgs = ERTS_PROC_LOCK_FLGS_BOR_ACQB_(lck, wflg | lock); if (old_lflgs & lock) { /* Didn't get the lock */ goto enqueue; @@ -413,7 +413,7 @@ transfer_locks(Process *p, do { erts_tse_t *tmp = wake; wake = wake->next; - erts_atomic32_set(&tmp->uaflgs, 0); + erts_atomic32_set_nob(&tmp->uaflgs, 0); erts_tse_set(tmp); } while (wake); @@ -509,14 +509,14 @@ wait_for_locks(Process *p, ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); - erts_atomic32_set(&wtr->uaflgs, 1); + erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); - if (erts_atomic32_read(&wtr->uaflgs) == 0) + if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* @@ -955,7 +955,8 @@ erts_proc_lock_init(Process *p) { /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL - erts_smp_atomic32_init(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL); + erts_smp_atomic32_init_nob(&p->lock.flags, + (erts_aint32_t) ERTS_PROC_LOCKS_ALL); #else p->lock.flags = ERTS_PROC_LOCKS_ALL; #endif @@ -974,7 +975,7 @@ erts_proc_lock_init(Process *p) { int i; for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) - erts_smp_atomic32_init(&p->lock.locked[i], (erts_aint32_t) 1); + erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); } #endif } diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 355179f084..cd3b2182fd 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2007-2010. All Rights Reserved. + * Copyright Ericsson AB 2007-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -41,10 +41,10 @@ #define ERTS_PROC_LOCK_SPINLOCK_IMPL 0 #define ERTS_PROC_LOCK_MUTEX_IMPL 0 -#if defined(ETHR_HAVE_OPTIMIZED_ATOMIC_OPS) +#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS) # undef ERTS_PROC_LOCK_ATOMIC_IMPL # define ERTS_PROC_LOCK_ATOMIC_IMPL 1 -#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCK) +#elif defined(ETHR_HAVE_NATIVE_SPINLOCKS) # undef ERTS_PROC_LOCK_SPINLOCK_IMPL # define ERTS_PROC_LOCK_SPINLOCK_IMPL 1 #else @@ -270,9 +270,11 @@ typedef struct { #if ERTS_PROC_LOCK_ATOMIC_IMPL #define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \ - ((ErtsProcLocks) erts_smp_atomic32_band(&(L)->flags, (erts_aint32_t) (MSK))) -#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \ - ((ErtsProcLocks) erts_smp_atomic32_bor(&(L)->flags, (erts_aint32_t) (MSK))) + ((ErtsProcLocks) erts_smp_atomic32_read_band_nob(&(L)->flags, \ + (erts_aint32_t) (MSK))) +#define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) \ + ((ErtsProcLocks) erts_smp_atomic32_read_bor_acqb(&(L)->flags, \ + (erts_aint32_t) (MSK))) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \ ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \ (erts_aint32_t) (NEW), \ @@ -282,7 +284,7 @@ typedef struct { (erts_aint32_t) (NEW), \ (erts_aint32_t) (EXPECTED))) #define ERTS_PROC_LOCK_FLGS_READ_(L) \ - ((ErtsProcLocks) erts_smp_atomic32_read(&(L)->flags)) + ((ErtsProcLocks) erts_smp_atomic32_read_nob(&(L)->flags)) #else /* no opt atomic ops */ @@ -325,7 +327,7 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new, #endif #define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) erts_proc_lock_flags_band((L), (MSK)) -#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) erts_proc_lock_flags_bor((L), (MSK)) +#define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) erts_proc_lock_flags_bor((L), (MSK)) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \ erts_proc_lock_flags_cmpxchg((L), (NEW), (EXPECTED)) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \ @@ -623,11 +625,11 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked) if (locks & lock) { erts_aint32_t lock_count; if (locked) { - lock_count = erts_smp_atomic32_inctest(&p->lock.locked[i]); + lock_count = erts_smp_atomic32_inc_read_nob(&p->lock.locked[i]); ERTS_LC_ASSERT(lock_count == 1); } else { - lock_count = erts_smp_atomic32_dectest(&p->lock.locked[i]); + lock_count = erts_smp_atomic32_dec_read_nob(&p->lock.locked[i]); ERTS_LC_ASSERT(lock_count == 0); } } diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h index 287327bfe1..a89ddfbcc1 100644 --- a/erts/emulator/beam/erl_smp.h +++ b/erts/emulator/beam/erl_smp.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2005-2010. All Rights Reserved. + * Copyright Ericsson AB 2005-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -54,8 +54,9 @@ typedef erts_cnd_t erts_smp_cnd_t; typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t; typedef erts_rwmtx_t erts_smp_rwmtx_t; typedef erts_tsd_key_t erts_smp_tsd_key_t; -typedef erts_atomic_t erts_smp_atomic_t; -typedef erts_atomic32_t erts_smp_atomic32_t; +#define erts_smp_dw_atomic_t erts_dw_atomic_t +#define erts_smp_atomic_t erts_atomic_t +#define erts_smp_atomic32_t erts_atomic32_t typedef erts_spinlock_t erts_smp_spinlock_t; typedef erts_rwlock_t erts_smp_rwlock_t; void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */ @@ -83,8 +84,9 @@ typedef struct { } erts_smp_rwmtx_opt_t; typedef int erts_smp_rwmtx_t; typedef int erts_smp_tsd_key_t; -typedef SWord erts_smp_atomic_t; -typedef Uint32 erts_smp_atomic32_t; +#define erts_smp_dw_atomic_t erts_no_dw_atomic_t +#define erts_smp_atomic_t erts_no_atomic_t +#define erts_smp_atomic32_t erts_no_atomic32_t #if __GNUC__ > 2 typedef struct { } erts_smp_spinlock_t; typedef struct { } erts_smp_rwlock_t; @@ -160,82 +162,6 @@ ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx); ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx); ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_atomic_init(erts_smp_atomic_t *var, - erts_aint_t i); -ERTS_GLB_INLINE void erts_smp_atomic_set(erts_smp_atomic_t *var, erts_aint_t i); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_read(erts_smp_atomic_t *var); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_inctest(erts_smp_atomic_t *incp); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_dectest(erts_smp_atomic_t *decp); -ERTS_GLB_INLINE void erts_smp_atomic_inc(erts_smp_atomic_t *incp); -ERTS_GLB_INLINE void erts_smp_atomic_dec(erts_smp_atomic_t *decp); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_addtest(erts_smp_atomic_t *addp, - erts_aint_t i); -ERTS_GLB_INLINE void erts_smp_atomic_add(erts_smp_atomic_t *addp, - erts_aint_t i); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp, - erts_aint_t new); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t expected); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_bor(erts_smp_atomic_t *var, - erts_aint_t mask); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_band(erts_smp_atomic_t *var, - erts_aint_t mask); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_read_acqb(erts_smp_atomic_t *var); -ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var, - erts_aint_t i); -ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t exp); -ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t exp); -ERTS_GLB_INLINE void -erts_smp_atomic32_init(erts_smp_atomic32_t *var, erts_aint32_t i); -ERTS_GLB_INLINE void -erts_smp_atomic32_set(erts_smp_atomic32_t *var, erts_aint32_t i); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_read(erts_smp_atomic32_t *var); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_inctest(erts_smp_atomic32_t *incp); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_dectest(erts_smp_atomic32_t *decp); -ERTS_GLB_INLINE void -erts_smp_atomic32_inc(erts_smp_atomic32_t *incp); -ERTS_GLB_INLINE void -erts_smp_atomic32_dec(erts_smp_atomic32_t *decp); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_addtest(erts_smp_atomic32_t *addp, erts_aint32_t i); -ERTS_GLB_INLINE void -erts_smp_atomic32_add(erts_smp_atomic32_t *addp, erts_aint32_t i); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_xchg(erts_smp_atomic32_t *xchgp, erts_aint32_t new); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_cmpxchg(erts_smp_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t expected); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_bor(erts_smp_atomic32_t *var, erts_aint32_t mask); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_band(erts_smp_atomic32_t *var, erts_aint32_t mask); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_read_acqb(erts_smp_atomic32_t *var); -ERTS_GLB_INLINE void -erts_smp_atomic32_set_relb(erts_smp_atomic32_t *var, erts_aint32_t i); -ERTS_GLB_INLINE void -erts_smp_atomic32_dec_relb(erts_smp_atomic32_t *decp); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_dectest_relb(erts_smp_atomic32_t *decp); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_cmpxchg_acqb(erts_smp_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t exp); -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_cmpxchg_relb(erts_smp_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t exp); ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra); @@ -279,6 +205,429 @@ ERTS_GLB_INLINE void erts_smp_thr_sigmask(int how, ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */ +/* + * Functions implementing atomic operations with with no (nob), + * full (mb), acquire (acqb), release (relb), read (rb), and + * write (wb) memory barriers. + * + * If SMP support has been disabled, they are mapped to functions + * that performs the same operation, but aren't atomic and don't + * imply memory barriers. + */ + +#ifdef ERTS_SMP + +/* Double word size atomics */ + +#define erts_smp_dw_atomic_init_nob erts_dw_atomic_init_nob +#define erts_smp_dw_atomic_set_nob erts_dw_atomic_set_nob +#define erts_smp_dw_atomic_read_nob erts_dw_atomic_read_nob +#define erts_smp_dw_atomic_cmpxchg_nob erts_dw_atomic_cmpxchg_nob + +#define erts_smp_dw_atomic_init_mb erts_dw_atomic_init_mb +#define erts_smp_dw_atomic_set_mb erts_dw_atomic_set_mb +#define erts_smp_dw_atomic_read_mb erts_dw_atomic_read_mb +#define erts_smp_dw_atomic_cmpxchg_mb erts_dw_atomic_cmpxchg_mb + +#define erts_smp_dw_atomic_init_acqb erts_dw_atomic_init_acqb +#define erts_smp_dw_atomic_set_acqb erts_dw_atomic_set_acqb +#define erts_smp_dw_atomic_read_acqb erts_dw_atomic_read_acqb +#define erts_smp_dw_atomic_cmpxchg_acqb erts_dw_atomic_cmpxchg_acqb + +#define erts_smp_dw_atomic_init_relb erts_dw_atomic_init_relb +#define erts_smp_dw_atomic_set_relb erts_dw_atomic_set_relb +#define erts_smp_dw_atomic_read_relb erts_dw_atomic_read_relb +#define erts_smp_dw_atomic_cmpxchg_relb erts_dw_atomic_cmpxchg_relb + +#define erts_smp_dw_atomic_init_rb erts_dw_atomic_init_rb +#define erts_smp_dw_atomic_set_rb erts_dw_atomic_set_rb +#define erts_smp_dw_atomic_read_rb erts_dw_atomic_read_rb +#define erts_smp_dw_atomic_cmpxchg_rb erts_dw_atomic_cmpxchg_rb + +#define erts_smp_dw_atomic_init_wb erts_dw_atomic_init_wb +#define erts_smp_dw_atomic_set_wb erts_dw_atomic_set_wb +#define erts_smp_dw_atomic_read_wb erts_dw_atomic_read_wb +#define erts_smp_dw_atomic_cmpxchg_wb erts_dw_atomic_cmpxchg_wb + +/* Word size atomics */ + +#define erts_smp_atomic_init_nob erts_atomic_init_nob +#define erts_smp_atomic_set_nob erts_atomic_set_nob +#define erts_smp_atomic_read_nob erts_atomic_read_nob +#define erts_smp_atomic_inc_read_nob erts_atomic_inc_read_nob +#define erts_smp_atomic_dec_read_nob erts_atomic_dec_read_nob +#define erts_smp_atomic_inc_nob erts_atomic_inc_nob +#define erts_smp_atomic_dec_nob erts_atomic_dec_nob +#define erts_smp_atomic_add_read_nob erts_atomic_add_read_nob +#define erts_smp_atomic_add_nob erts_atomic_add_nob +#define erts_smp_atomic_read_bor_nob erts_atomic_read_bor_nob +#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob +#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob +#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob + +#define erts_smp_atomic_init_mb erts_atomic_init_mb +#define erts_smp_atomic_set_mb erts_atomic_set_mb +#define erts_smp_atomic_read_mb erts_atomic_read_mb +#define erts_smp_atomic_inc_read_mb erts_atomic_inc_read_mb +#define erts_smp_atomic_dec_read_mb erts_atomic_dec_read_mb +#define erts_smp_atomic_inc_mb erts_atomic_inc_mb +#define erts_smp_atomic_dec_mb erts_atomic_dec_mb +#define erts_smp_atomic_add_read_mb erts_atomic_add_read_mb +#define erts_smp_atomic_add_mb erts_atomic_add_mb +#define erts_smp_atomic_read_bor_mb erts_atomic_read_bor_mb +#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb +#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb +#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb + +#define erts_smp_atomic_init_acqb erts_atomic_init_acqb +#define erts_smp_atomic_set_acqb erts_atomic_set_acqb +#define erts_smp_atomic_read_acqb erts_atomic_read_acqb +#define erts_smp_atomic_inc_read_acqb erts_atomic_inc_read_acqb +#define erts_smp_atomic_dec_read_acqb erts_atomic_dec_read_acqb +#define erts_smp_atomic_inc_acqb erts_atomic_inc_acqb +#define erts_smp_atomic_dec_acqb erts_atomic_dec_acqb +#define erts_smp_atomic_add_read_acqb erts_atomic_add_read_acqb +#define erts_smp_atomic_add_acqb erts_atomic_add_acqb +#define erts_smp_atomic_read_bor_acqb erts_atomic_read_bor_acqb +#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb +#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb +#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb + +#define erts_smp_atomic_init_relb erts_atomic_init_relb +#define erts_smp_atomic_set_relb erts_atomic_set_relb +#define erts_smp_atomic_read_relb erts_atomic_read_relb +#define erts_smp_atomic_inc_read_relb erts_atomic_inc_read_relb +#define erts_smp_atomic_dec_read_relb erts_atomic_dec_read_relb +#define erts_smp_atomic_inc_relb erts_atomic_inc_relb +#define erts_smp_atomic_dec_relb erts_atomic_dec_relb +#define erts_smp_atomic_add_read_relb erts_atomic_add_read_relb +#define erts_smp_atomic_add_relb erts_atomic_add_relb +#define erts_smp_atomic_read_bor_relb erts_atomic_read_bor_relb +#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb +#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb +#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb + +#define erts_smp_atomic_init_rb erts_atomic_init_rb +#define erts_smp_atomic_set_rb erts_atomic_set_rb +#define erts_smp_atomic_read_rb erts_atomic_read_rb +#define erts_smp_atomic_inc_read_rb erts_atomic_inc_read_rb +#define erts_smp_atomic_dec_read_rb erts_atomic_dec_read_rb +#define erts_smp_atomic_inc_rb erts_atomic_inc_rb +#define erts_smp_atomic_dec_rb erts_atomic_dec_rb +#define erts_smp_atomic_add_read_rb erts_atomic_add_read_rb +#define erts_smp_atomic_add_rb erts_atomic_add_rb +#define erts_smp_atomic_read_bor_rb erts_atomic_read_bor_rb +#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb +#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb +#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb + +#define erts_smp_atomic_init_wb erts_atomic_init_wb +#define erts_smp_atomic_set_wb erts_atomic_set_wb +#define erts_smp_atomic_read_wb erts_atomic_read_wb +#define erts_smp_atomic_inc_read_wb erts_atomic_inc_read_wb +#define erts_smp_atomic_dec_read_wb erts_atomic_dec_read_wb +#define erts_smp_atomic_inc_wb erts_atomic_inc_wb +#define erts_smp_atomic_dec_wb erts_atomic_dec_wb +#define erts_smp_atomic_add_read_wb erts_atomic_add_read_wb +#define erts_smp_atomic_add_wb erts_atomic_add_wb +#define erts_smp_atomic_read_bor_wb erts_atomic_read_bor_wb +#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb +#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb +#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb + +/* 32-bit atomics */ + +#define erts_smp_atomic32_init_nob erts_atomic32_init_nob +#define erts_smp_atomic32_set_nob erts_atomic32_set_nob +#define erts_smp_atomic32_read_nob erts_atomic32_read_nob +#define erts_smp_atomic32_inc_read_nob erts_atomic32_inc_read_nob +#define erts_smp_atomic32_dec_read_nob erts_atomic32_dec_read_nob +#define erts_smp_atomic32_inc_nob erts_atomic32_inc_nob +#define erts_smp_atomic32_dec_nob erts_atomic32_dec_nob +#define erts_smp_atomic32_add_read_nob erts_atomic32_add_read_nob +#define erts_smp_atomic32_add_nob erts_atomic32_add_nob +#define erts_smp_atomic32_read_bor_nob erts_atomic32_read_bor_nob +#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob +#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob +#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob + +#define erts_smp_atomic32_init_mb erts_atomic32_init_mb +#define erts_smp_atomic32_set_mb erts_atomic32_set_mb +#define erts_smp_atomic32_read_mb erts_atomic32_read_mb +#define erts_smp_atomic32_inc_read_mb erts_atomic32_inc_read_mb +#define erts_smp_atomic32_dec_read_mb erts_atomic32_dec_read_mb +#define erts_smp_atomic32_inc_mb erts_atomic32_inc_mb +#define erts_smp_atomic32_dec_mb erts_atomic32_dec_mb +#define erts_smp_atomic32_add_read_mb erts_atomic32_add_read_mb +#define erts_smp_atomic32_add_mb erts_atomic32_add_mb +#define erts_smp_atomic32_read_bor_mb erts_atomic32_read_bor_mb +#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb +#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb +#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb + +#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb +#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb +#define erts_smp_atomic32_read_acqb erts_atomic32_read_acqb +#define erts_smp_atomic32_inc_read_acqb erts_atomic32_inc_read_acqb +#define erts_smp_atomic32_dec_read_acqb erts_atomic32_dec_read_acqb +#define erts_smp_atomic32_inc_acqb erts_atomic32_inc_acqb +#define erts_smp_atomic32_dec_acqb erts_atomic32_dec_acqb +#define erts_smp_atomic32_add_read_acqb erts_atomic32_add_read_acqb +#define erts_smp_atomic32_add_acqb erts_atomic32_add_acqb +#define erts_smp_atomic32_read_bor_acqb erts_atomic32_read_bor_acqb +#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb +#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb +#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb + +#define erts_smp_atomic32_init_relb erts_atomic32_init_relb +#define erts_smp_atomic32_set_relb erts_atomic32_set_relb +#define erts_smp_atomic32_read_relb erts_atomic32_read_relb +#define erts_smp_atomic32_inc_read_relb erts_atomic32_inc_read_relb +#define erts_smp_atomic32_dec_read_relb erts_atomic32_dec_read_relb +#define erts_smp_atomic32_inc_relb erts_atomic32_inc_relb +#define erts_smp_atomic32_dec_relb erts_atomic32_dec_relb +#define erts_smp_atomic32_add_read_relb erts_atomic32_add_read_relb +#define erts_smp_atomic32_add_relb erts_atomic32_add_relb +#define erts_smp_atomic32_read_bor_relb erts_atomic32_read_bor_relb +#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb +#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb +#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb + +#define erts_smp_atomic32_init_rb erts_atomic32_init_rb +#define erts_smp_atomic32_set_rb erts_atomic32_set_rb +#define erts_smp_atomic32_read_rb erts_atomic32_read_rb +#define erts_smp_atomic32_inc_read_rb erts_atomic32_inc_read_rb +#define erts_smp_atomic32_dec_read_rb erts_atomic32_dec_read_rb +#define erts_smp_atomic32_inc_rb erts_atomic32_inc_rb +#define erts_smp_atomic32_dec_rb erts_atomic32_dec_rb +#define erts_smp_atomic32_add_read_rb erts_atomic32_add_read_rb +#define erts_smp_atomic32_add_rb erts_atomic32_add_rb +#define erts_smp_atomic32_read_bor_rb erts_atomic32_read_bor_rb +#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb +#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb +#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb + +#define erts_smp_atomic32_init_wb erts_atomic32_init_wb +#define erts_smp_atomic32_set_wb erts_atomic32_set_wb +#define erts_smp_atomic32_read_wb erts_atomic32_read_wb +#define erts_smp_atomic32_inc_read_wb erts_atomic32_inc_read_wb +#define erts_smp_atomic32_dec_read_wb erts_atomic32_dec_read_wb +#define erts_smp_atomic32_inc_wb erts_atomic32_inc_wb +#define erts_smp_atomic32_dec_wb erts_atomic32_dec_wb +#define erts_smp_atomic32_add_read_wb erts_atomic32_add_read_wb +#define erts_smp_atomic32_add_wb erts_atomic32_add_wb +#define erts_smp_atomic32_read_bor_wb erts_atomic32_read_bor_wb +#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb +#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb +#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb + +#else /* !ERTS_SMP */ + +/* Double word size atomics */ + +#define erts_smp_dw_atomic_init_nob erts_no_dw_atomic_set +#define erts_smp_dw_atomic_set_nob erts_no_dw_atomic_set +#define erts_smp_dw_atomic_read_nob erts_no_dw_atomic_read +#define erts_smp_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg + +#define erts_smp_dw_atomic_init_mb erts_no_dw_atomic_init +#define erts_smp_dw_atomic_set_mb erts_no_dw_atomic_set +#define erts_smp_dw_atomic_read_mb erts_no_dw_atomic_read +#define erts_smp_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg + +#define erts_smp_dw_atomic_init_acqb erts_no_dw_atomic_init +#define erts_smp_dw_atomic_set_acqb erts_no_dw_atomic_set +#define erts_smp_dw_atomic_read_acqb erts_no_dw_atomic_read +#define erts_smp_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg + +#define erts_smp_dw_atomic_init_relb erts_no_dw_atomic_init +#define erts_smp_dw_atomic_set_relb erts_no_dw_atomic_set +#define erts_smp_dw_atomic_read_relb erts_no_dw_atomic_read +#define erts_smp_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg + +#define erts_smp_dw_atomic_init_rb erts_no_dw_atomic_init +#define erts_smp_dw_atomic_set_rb erts_no_dw_atomic_set +#define erts_smp_dw_atomic_read_rb erts_no_dw_atomic_read +#define erts_smp_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg + +#define erts_smp_dw_atomic_init_wb erts_no_dw_atomic_init +#define erts_smp_dw_atomic_set_wb erts_no_dw_atomic_set +#define erts_smp_dw_atomic_read_wb erts_no_dw_atomic_read +#define erts_smp_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg + +/* Word size atomics */ + +#define erts_smp_atomic_init_nob erts_no_atomic_set +#define erts_smp_atomic_set_nob erts_no_atomic_set +#define erts_smp_atomic_read_nob erts_no_atomic_read +#define erts_smp_atomic_inc_read_nob erts_no_atomic_inc_read +#define erts_smp_atomic_dec_read_nob erts_no_atomic_dec_read +#define erts_smp_atomic_inc_nob erts_no_atomic_inc +#define erts_smp_atomic_dec_nob erts_no_atomic_dec +#define erts_smp_atomic_add_read_nob erts_no_atomic_add_read +#define erts_smp_atomic_add_nob erts_no_atomic_add +#define erts_smp_atomic_read_bor_nob erts_no_atomic_read_bor +#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band +#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg +#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg + +#define erts_smp_atomic_init_mb erts_no_atomic_set +#define erts_smp_atomic_set_mb erts_no_atomic_set +#define erts_smp_atomic_read_mb erts_no_atomic_read +#define erts_smp_atomic_inc_read_mb erts_no_atomic_inc_read +#define erts_smp_atomic_dec_read_mb erts_no_atomic_dec_read +#define erts_smp_atomic_inc_mb erts_no_atomic_inc +#define erts_smp_atomic_dec_mb erts_no_atomic_dec +#define erts_smp_atomic_add_read_mb erts_no_atomic_add_read +#define erts_smp_atomic_add_mb erts_no_atomic_add +#define erts_smp_atomic_read_bor_mb erts_no_atomic_read_bor +#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band +#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg +#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg + +#define erts_smp_atomic_init_acqb erts_no_atomic_set +#define erts_smp_atomic_set_acqb erts_no_atomic_set +#define erts_smp_atomic_read_acqb erts_no_atomic_read +#define erts_smp_atomic_inc_read_acqb erts_no_atomic_inc_read +#define erts_smp_atomic_dec_read_acqb erts_no_atomic_dec_read +#define erts_smp_atomic_inc_acqb erts_no_atomic_inc +#define erts_smp_atomic_dec_acqb erts_no_atomic_dec +#define erts_smp_atomic_add_read_acqb erts_no_atomic_add_read +#define erts_smp_atomic_add_acqb erts_no_atomic_add +#define erts_smp_atomic_read_bor_acqb erts_no_atomic_read_bor +#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band +#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg +#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg + +#define erts_smp_atomic_init_relb erts_no_atomic_set +#define erts_smp_atomic_set_relb erts_no_atomic_set +#define erts_smp_atomic_read_relb erts_no_atomic_read +#define erts_smp_atomic_inc_read_relb erts_no_atomic_inc_read +#define erts_smp_atomic_dec_read_relb erts_no_atomic_dec_read +#define erts_smp_atomic_inc_relb erts_no_atomic_inc +#define erts_smp_atomic_dec_relb erts_no_atomic_dec +#define erts_smp_atomic_add_read_relb erts_no_atomic_add_read +#define erts_smp_atomic_add_relb erts_no_atomic_add +#define erts_smp_atomic_read_bor_relb erts_no_atomic_read_bor +#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band +#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg +#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg + +#define erts_smp_atomic_init_rb erts_no_atomic_set +#define erts_smp_atomic_set_rb erts_no_atomic_set +#define erts_smp_atomic_read_rb erts_no_atomic_read +#define erts_smp_atomic_inc_read_rb erts_no_atomic_inc_read +#define erts_smp_atomic_dec_read_rb erts_no_atomic_dec_read +#define erts_smp_atomic_inc_rb erts_no_atomic_inc +#define erts_smp_atomic_dec_rb erts_no_atomic_dec +#define erts_smp_atomic_add_read_rb erts_no_atomic_add_read +#define erts_smp_atomic_add_rb erts_no_atomic_add +#define erts_smp_atomic_read_bor_rb erts_no_atomic_read_bor +#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band +#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg +#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg + +#define erts_smp_atomic_init_wb erts_no_atomic_set +#define erts_smp_atomic_set_wb erts_no_atomic_set +#define erts_smp_atomic_read_wb erts_no_atomic_read +#define erts_smp_atomic_inc_read_wb erts_no_atomic_inc_read +#define erts_smp_atomic_dec_read_wb erts_no_atomic_dec_read +#define erts_smp_atomic_inc_wb erts_no_atomic_inc +#define erts_smp_atomic_dec_wb erts_no_atomic_dec +#define erts_smp_atomic_add_read_wb erts_no_atomic_add_read +#define erts_smp_atomic_add_wb erts_no_atomic_add +#define erts_smp_atomic_read_bor_wb erts_no_atomic_read_bor +#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band +#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg +#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg + +/* 32-bit atomics */ + +#define erts_smp_atomic32_init_nob erts_no_atomic32_set +#define erts_smp_atomic32_set_nob erts_no_atomic32_set +#define erts_smp_atomic32_read_nob erts_no_atomic32_read +#define erts_smp_atomic32_inc_read_nob erts_no_atomic32_inc_read +#define erts_smp_atomic32_dec_read_nob erts_no_atomic32_dec_read +#define erts_smp_atomic32_inc_nob erts_no_atomic32_inc +#define erts_smp_atomic32_dec_nob erts_no_atomic32_dec +#define erts_smp_atomic32_add_read_nob erts_no_atomic32_add_read +#define erts_smp_atomic32_add_nob erts_no_atomic32_add +#define erts_smp_atomic32_read_bor_nob erts_no_atomic32_read_bor +#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band +#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg +#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg + +#define erts_smp_atomic32_init_mb erts_no_atomic32_set +#define erts_smp_atomic32_set_mb erts_no_atomic32_set +#define erts_smp_atomic32_read_mb erts_no_atomic32_read +#define erts_smp_atomic32_inc_read_mb erts_no_atomic32_inc_read +#define erts_smp_atomic32_dec_read_mb erts_no_atomic32_dec_read +#define erts_smp_atomic32_inc_mb erts_no_atomic32_inc +#define erts_smp_atomic32_dec_mb erts_no_atomic32_dec +#define erts_smp_atomic32_add_read_mb erts_no_atomic32_add_read +#define erts_smp_atomic32_add_mb erts_no_atomic32_add +#define erts_smp_atomic32_read_bor_mb erts_no_atomic32_read_bor +#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band +#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg +#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg + +#define erts_smp_atomic32_init_acqb erts_no_atomic32_set +#define erts_smp_atomic32_set_acqb erts_no_atomic32_set +#define erts_smp_atomic32_read_acqb erts_no_atomic32_read +#define erts_smp_atomic32_inc_read_acqb erts_no_atomic32_inc_read +#define erts_smp_atomic32_dec_read_acqb erts_no_atomic32_dec_read +#define erts_smp_atomic32_inc_acqb erts_no_atomic32_inc +#define erts_smp_atomic32_dec_acqb erts_no_atomic32_dec +#define erts_smp_atomic32_add_read_acqb erts_no_atomic32_add_read +#define erts_smp_atomic32_add_acqb erts_no_atomic32_add +#define erts_smp_atomic32_read_bor_acqb erts_no_atomic32_read_bor +#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band +#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg +#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg + +#define erts_smp_atomic32_init_relb erts_no_atomic32_set +#define erts_smp_atomic32_set_relb erts_no_atomic32_set +#define erts_smp_atomic32_read_relb erts_no_atomic32_read +#define erts_smp_atomic32_inc_read_relb erts_no_atomic32_inc_read +#define erts_smp_atomic32_dec_read_relb erts_no_atomic32_dec_read +#define erts_smp_atomic32_inc_relb erts_no_atomic32_inc +#define erts_smp_atomic32_dec_relb erts_no_atomic32_dec +#define erts_smp_atomic32_add_read_relb erts_no_atomic32_add_read +#define erts_smp_atomic32_add_relb erts_no_atomic32_add +#define erts_smp_atomic32_read_bor_relb erts_no_atomic32_read_bor +#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band +#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg +#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg + +#define erts_smp_atomic32_init_rb erts_no_atomic32_set +#define erts_smp_atomic32_set_rb erts_no_atomic32_set +#define erts_smp_atomic32_read_rb erts_no_atomic32_read +#define erts_smp_atomic32_inc_read_rb erts_no_atomic32_inc_read +#define erts_smp_atomic32_dec_read_rb erts_no_atomic32_dec_read +#define erts_smp_atomic32_inc_rb erts_no_atomic32_inc +#define erts_smp_atomic32_dec_rb erts_no_atomic32_dec +#define erts_smp_atomic32_add_read_rb erts_no_atomic32_add_read +#define erts_smp_atomic32_add_rb erts_no_atomic32_add +#define erts_smp_atomic32_read_bor_rb erts_no_atomic32_read_bor +#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band +#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg +#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg + +#define erts_smp_atomic32_init_wb erts_no_atomic32_set +#define erts_smp_atomic32_set_wb erts_no_atomic32_set +#define erts_smp_atomic32_read_wb erts_no_atomic32_read +#define erts_smp_atomic32_inc_read_wb erts_no_atomic32_inc_read +#define erts_smp_atomic32_dec_read_wb erts_no_atomic32_dec_read +#define erts_smp_atomic32_inc_wb erts_no_atomic32_inc +#define erts_smp_atomic32_dec_wb erts_no_atomic32_dec +#define erts_smp_atomic32_add_read_wb erts_no_atomic32_add_read +#define erts_smp_atomic32_add_wb erts_no_atomic32_add +#define erts_smp_atomic32_read_bor_wb erts_no_atomic32_read_bor +#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band +#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg +#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg + +#endif /* !ERTS_SMP */ #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -655,434 +1004,6 @@ erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx) } ERTS_GLB_INLINE void -erts_smp_atomic_init(erts_smp_atomic_t *var, erts_aint_t i) -{ -#ifdef ERTS_SMP - erts_atomic_init(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic_set(erts_smp_atomic_t *var, erts_aint_t i) -{ -#ifdef ERTS_SMP - erts_atomic_set(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_read(erts_smp_atomic_t *var) -{ -#ifdef ERTS_SMP - return erts_atomic_read(var); -#else - return *var; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_inctest(erts_smp_atomic_t *incp) -{ -#ifdef ERTS_SMP - return erts_atomic_inctest(incp); -#else - return ++(*incp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_dectest(erts_smp_atomic_t *decp) -{ -#ifdef ERTS_SMP - return erts_atomic_dectest(decp); -#else - return --(*decp); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic_inc(erts_smp_atomic_t *incp) -{ -#ifdef ERTS_SMP - erts_atomic_inc(incp); -#else - ++(*incp); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic_dec(erts_smp_atomic_t *decp) -{ -#ifdef ERTS_SMP - erts_atomic_dec(decp); -#else - --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_addtest(erts_smp_atomic_t *addp, erts_aint_t i) -{ -#ifdef ERTS_SMP - return erts_atomic_addtest(addp, i); -#else - return *addp += i; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic_add(erts_smp_atomic_t *addp, erts_aint_t i) -{ -#ifdef ERTS_SMP - erts_atomic_add(addp, i); -#else - *addp += i; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp, erts_aint_t new) -{ -#ifdef ERTS_SMP - return erts_atomic_xchg(xchgp, new); -#else - erts_aint_t old; - old = *xchgp; - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t expected) -{ -#ifdef ERTS_SMP - return erts_atomic_cmpxchg(xchgp, new, expected); -#else - erts_aint_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_bor(erts_smp_atomic_t *var, erts_aint_t mask) -{ -#ifdef ERTS_SMP - return erts_atomic_bor(var, mask); -#else - erts_aint_t old; - old = *var; - *var |= mask; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_band(erts_smp_atomic_t *var, erts_aint_t mask) -{ -#ifdef ERTS_SMP - return erts_atomic_band(var, mask); -#else - erts_aint_t old; - old = *var; - *var &= mask; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_read_acqb(erts_smp_atomic_t *var) -{ -#ifdef ERTS_SMP - return erts_atomic_read_acqb(var); -#else - return *var; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic_set_relb(erts_smp_atomic_t *var, erts_aint_t i) -{ -#ifdef ERTS_SMP - erts_atomic_set_relb(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp) -{ -#ifdef ERTS_SMP - erts_atomic_dec_relb(decp); -#else - --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp) -{ -#ifdef ERTS_SMP - return erts_atomic_dectest_relb(decp); -#else - return --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t exp) -{ -#ifdef ERTS_SMP - return erts_atomic_cmpxchg_acqb(xchgp, new, exp); -#else - erts_aint_t old = *xchgp; - if (old == exp) - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t exp) -{ -#ifdef ERTS_SMP - return erts_atomic_cmpxchg_relb(xchgp, new, exp); -#else - erts_aint_t old = *xchgp; - if (old == exp) - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic32_init(erts_smp_atomic32_t *var, erts_aint32_t i) -{ -#ifdef ERTS_SMP - erts_atomic32_init(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic32_set(erts_smp_atomic32_t *var, erts_aint32_t i) -{ -#ifdef ERTS_SMP - erts_atomic32_set(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_read(erts_smp_atomic32_t *var) -{ -#ifdef ERTS_SMP - return erts_atomic32_read(var); -#else - return *var; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_inctest(erts_smp_atomic32_t *incp) -{ -#ifdef ERTS_SMP - return erts_atomic32_inctest(incp); -#else - return ++(*incp); -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_dectest(erts_smp_atomic32_t *decp) -{ -#ifdef ERTS_SMP - return erts_atomic32_dectest(decp); -#else - return --(*decp); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic32_inc(erts_smp_atomic32_t *incp) -{ -#ifdef ERTS_SMP - erts_atomic32_inc(incp); -#else - ++(*incp); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic32_dec(erts_smp_atomic32_t *decp) -{ -#ifdef ERTS_SMP - erts_atomic32_dec(decp); -#else - --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_addtest(erts_smp_atomic32_t *addp, erts_aint32_t i) -{ -#ifdef ERTS_SMP - return erts_atomic32_addtest(addp, i); -#else - return *addp += i; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic32_add(erts_smp_atomic32_t *addp, erts_aint32_t i) -{ -#ifdef ERTS_SMP - erts_atomic32_add(addp, i); -#else - *addp += i; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_xchg(erts_smp_atomic32_t *xchgp, erts_aint32_t new) -{ -#ifdef ERTS_SMP - return erts_atomic32_xchg(xchgp, new); -#else - erts_aint32_t old; - old = *xchgp; - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_cmpxchg(erts_smp_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t expected) -{ -#ifdef ERTS_SMP - return erts_atomic32_cmpxchg(xchgp, new, expected); -#else - erts_aint32_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_bor(erts_smp_atomic32_t *var, erts_aint32_t mask) -{ -#ifdef ERTS_SMP - return erts_atomic32_bor(var, mask); -#else - erts_aint32_t old; - old = *var; - *var |= mask; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_band(erts_smp_atomic32_t *var, erts_aint32_t mask) -{ -#ifdef ERTS_SMP - return erts_atomic32_band(var, mask); -#else - erts_aint32_t old; - old = *var; - *var &= mask; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_read_acqb(erts_smp_atomic32_t *var) -{ -#ifdef ERTS_SMP - return erts_atomic32_read_acqb(var); -#else - return *var; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic32_set_relb(erts_smp_atomic32_t *var, erts_aint32_t i) -{ -#ifdef ERTS_SMP - erts_atomic32_set_relb(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_atomic32_dec_relb(erts_smp_atomic32_t *decp) -{ -#ifdef ERTS_SMP - erts_atomic32_dec_relb(decp); -#else - --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_dectest_relb(erts_smp_atomic32_t *decp) -{ -#ifdef ERTS_SMP - return erts_atomic32_dectest_relb(decp); -#else - return --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_cmpxchg_acqb(erts_smp_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t exp) -{ -#ifdef ERTS_SMP - return erts_atomic32_cmpxchg_acqb(xchgp, new, exp); -#else - erts_aint32_t old = *xchgp; - if (old == exp) - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_cmpxchg_relb(erts_smp_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t exp) -{ -#ifdef ERTS_SMP - return erts_atomic32_cmpxchg_relb(xchgp, new, exp); -#else - erts_aint32_t old = *xchgp; - if (old == exp) - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra) { #ifdef ERTS_SMP @@ -1308,3 +1229,37 @@ erts_smp_thr_sigwait(const sigset_t *set, int *sig) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ #endif /* ERL_SMP_H */ + +#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS + +/* Deprecated functions to replace */ + +#undef erts_smp_atomic_init +#undef erts_smp_atomic_set +#undef erts_smp_atomic_read +#undef erts_smp_atomic_inctest +#undef erts_smp_atomic_dectest +#undef erts_smp_atomic_inc +#undef erts_smp_atomic_dec +#undef erts_smp_atomic_addtest +#undef erts_smp_atomic_add +#undef erts_smp_atomic_xchg +#undef erts_smp_atomic_cmpxchg +#undef erts_smp_atomic_bor +#undef erts_smp_atomic_band + +#undef erts_smp_atomic32_init +#undef erts_smp_atomic32_set +#undef erts_smp_atomic32_read +#undef erts_smp_atomic32_inctest +#undef erts_smp_atomic32_dectest +#undef erts_smp_atomic32_inc +#undef erts_smp_atomic32_dec +#undef erts_smp_atomic32_addtest +#undef erts_smp_atomic32_add +#undef erts_smp_atomic32_xchg +#undef erts_smp_atomic32_cmpxchg +#undef erts_smp_atomic32_bor +#undef erts_smp_atomic32_band + +#endif diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h index a0eda61ba5..12eaf39ec7 100644 --- a/erts/emulator/beam/erl_threads.h +++ b/erts/emulator/beam/erl_threads.h @@ -28,6 +28,11 @@ #define ERTS_SPIN_BODY ETHR_SPIN_BODY #include "sys.h" + +typedef struct { SWord sint[2]; } erts_no_dw_atomic_t; +typedef SWord erts_no_atomic_t; +typedef Sint32 erts_no_atomic32_t; + #ifdef USE_THREADS #define ETHR_TRY_INLINE_FUNCS @@ -99,10 +104,12 @@ typedef ethr_rwmutex_opt erts_rwmtx_opt_t; typedef ethr_tsd_key erts_tsd_key_t; typedef ethr_ts_event erts_tse_t; -typedef ethr_sint_t erts_aint_t; -typedef ethr_atomic_t erts_atomic_t; -typedef ethr_sint32_t erts_aint32_t; -typedef ethr_atomic32_t erts_atomic32_t; +#define erts_dw_aint_t ethr_dw_sint_t +#define erts_dw_atomic_t ethr_dw_atomic_t +#define erts_aint_t ethr_sint_t +#define erts_atomic_t ethr_atomic_t +#define erts_aint32_t ethr_sint32_t +#define erts_atomic32_t ethr_atomic32_t /* spinlock */ typedef struct { @@ -164,10 +171,12 @@ typedef struct { typedef int erts_rwmtx_t; typedef int erts_tsd_key_t; typedef int erts_tse_t; -typedef SWord erts_aint_t; -typedef SWord erts_atomic_t; -typedef SWord erts_aint32_t; -typedef SWord erts_atomic32_t; +#define erts_dw_aint_t erts_no_dw_atomic_t +#define erts_dw_atomic_t erts_no_dw_atomic_t +#define erts_aint_t SWord +#define erts_atomic_t erts_no_atomic_t +#define erts_aint32_t Sint32 +#define erts_atomic32_t erts_no_atomic32_t #if __GNUC__ > 2 typedef struct { } erts_spinlock_t; typedef struct { } erts_rwlock_t; @@ -247,65 +256,51 @@ ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx); ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx); -ERTS_GLB_INLINE void erts_atomic_init(erts_atomic_t *var, erts_aint_t i); -ERTS_GLB_INLINE void erts_atomic_set(erts_atomic_t *var, erts_aint_t i); -ERTS_GLB_INLINE erts_aint_t erts_atomic_read(erts_atomic_t *var); -ERTS_GLB_INLINE erts_aint_t erts_atomic_inctest(erts_atomic_t *incp); -ERTS_GLB_INLINE erts_aint_t erts_atomic_dectest(erts_atomic_t *decp); -ERTS_GLB_INLINE void erts_atomic_inc(erts_atomic_t *incp); -ERTS_GLB_INLINE void erts_atomic_dec(erts_atomic_t *decp); -ERTS_GLB_INLINE erts_aint_t erts_atomic_addtest(erts_atomic_t *addp, - erts_aint_t i); -ERTS_GLB_INLINE void erts_atomic_add(erts_atomic_t *addp, erts_aint_t i); -ERTS_GLB_INLINE erts_aint_t erts_atomic_xchg(erts_atomic_t *xchgp, - erts_aint_t new); -ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg(erts_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t expected); -ERTS_GLB_INLINE erts_aint_t erts_atomic_bor(erts_atomic_t *var, - erts_aint_t mask); -ERTS_GLB_INLINE erts_aint_t erts_atomic_band(erts_atomic_t *var, - erts_aint_t mask); -ERTS_GLB_INLINE erts_aint_t erts_atomic_read_acqb(erts_atomic_t *var); -ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, erts_aint_t i); -ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp); -ERTS_GLB_INLINE erts_aint_t erts_atomic_dectest_relb(erts_atomic_t *decp); -ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t exp); -ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t exp); -ERTS_GLB_INLINE void erts_atomic32_init(erts_atomic32_t *var, erts_aint32_t i); -ERTS_GLB_INLINE void erts_atomic32_set(erts_atomic32_t *var, erts_aint32_t i); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_read(erts_atomic32_t *var); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_inctest(erts_atomic32_t *incp); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_dectest(erts_atomic32_t *decp); -ERTS_GLB_INLINE void erts_atomic32_inc(erts_atomic32_t *incp); -ERTS_GLB_INLINE void erts_atomic32_dec(erts_atomic32_t *decp); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_addtest(erts_atomic32_t *addp, - erts_aint32_t i); -ERTS_GLB_INLINE void erts_atomic32_add(erts_atomic32_t *addp, erts_aint32_t i); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_xchg(erts_atomic32_t *xchgp, - erts_aint32_t new); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg(erts_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t expected); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_bor(erts_atomic32_t *var, - erts_aint32_t mask); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_band(erts_atomic32_t *var, - erts_aint32_t mask); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_read_acqb(erts_atomic32_t *var); -ERTS_GLB_INLINE void erts_atomic32_set_relb(erts_atomic32_t *var, - erts_aint32_t i); -ERTS_GLB_INLINE void erts_atomic32_dec_relb(erts_atomic32_t *decp); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_dectest_relb(erts_atomic32_t *decp); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg_acqb(erts_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t exp); -ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg_relb(erts_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t exp); + +ERTS_GLB_INLINE void erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val); +ERTS_GLB_INLINE void erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val); +ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var, + erts_no_dw_atomic_t *val, + erts_no_dw_atomic_t *old_val); +ERTS_GLB_INLINE void erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read(erts_no_atomic_t *var); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_inc_read(erts_no_atomic_t *incp); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_dec_read(erts_no_atomic_t *decp); +ERTS_GLB_INLINE void erts_no_atomic_inc(erts_no_atomic_t *incp); +ERTS_GLB_INLINE void erts_no_atomic_dec(erts_no_atomic_t *decp); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_add_read(erts_no_atomic_t *addp, + erts_aint_t i); +ERTS_GLB_INLINE void erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bor(erts_no_atomic_t *var, + erts_aint_t mask); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_band(erts_no_atomic_t *var, + erts_aint_t mask); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp, + erts_aint_t new); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp, + erts_aint_t new, + erts_aint_t expected); +ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var, + erts_aint32_t i); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_inc_read(erts_no_atomic32_t *incp); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_dec_read(erts_no_atomic32_t *decp); +ERTS_GLB_INLINE void erts_no_atomic32_inc(erts_no_atomic32_t *incp); +ERTS_GLB_INLINE void erts_no_atomic32_dec(erts_no_atomic32_t *decp); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_add_read(erts_no_atomic32_t *addp, + erts_aint32_t i); +ERTS_GLB_INLINE void erts_no_atomic32_add(erts_no_atomic32_t *addp, + erts_aint32_t i); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bor(erts_no_atomic32_t *var, + erts_aint32_t mask); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_band(erts_no_atomic32_t *var, + erts_aint32_t mask); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, + erts_aint32_t new); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp, + erts_aint32_t new, + erts_aint32_t expected); + ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra, @@ -362,6 +357,430 @@ ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set, ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #endif /* #ifdef HAVE_ETHR_SIG_FUNCS */ +/* + * Functions implementing atomic operations with with no (nob), + * full (mb), acquire (acqb), release (relb), read (rb), and + * write (wb) memory barriers. + * + * If thread support has been disabled, they are mapped to + * functions that performs the same operation, but aren't atomic + * and don't imply memory barriers. + */ + +#ifdef USE_THREADS + +/* Double word size atomics */ + +#define erts_dw_atomic_init_nob ethr_dw_atomic_init +#define erts_dw_atomic_set_nob ethr_dw_atomic_set +#define erts_dw_atomic_read_nob ethr_dw_atomic_read +#define erts_dw_atomic_cmpxchg_nob ethr_dw_atomic_cmpxchg + +#define erts_dw_atomic_init_mb ethr_dw_atomic_init_mb +#define erts_dw_atomic_set_mb ethr_dw_atomic_set_mb +#define erts_dw_atomic_read_mb ethr_dw_atomic_read_mb +#define erts_dw_atomic_cmpxchg_mb ethr_dw_atomic_cmpxchg_mb + +#define erts_dw_atomic_init_acqb ethr_dw_atomic_init_acqb +#define erts_dw_atomic_set_acqb ethr_dw_atomic_set_acqb +#define erts_dw_atomic_read_acqb ethr_dw_atomic_read_acqb +#define erts_dw_atomic_cmpxchg_acqb ethr_dw_atomic_cmpxchg_acqb + +#define erts_dw_atomic_init_relb ethr_dw_atomic_init_relb +#define erts_dw_atomic_set_relb ethr_dw_atomic_set_relb +#define erts_dw_atomic_read_relb ethr_dw_atomic_read_relb +#define erts_dw_atomic_cmpxchg_relb ethr_dw_atomic_cmpxchg_relb + +#define erts_dw_atomic_init_rb ethr_dw_atomic_init_rb +#define erts_dw_atomic_set_rb ethr_dw_atomic_set_rb +#define erts_dw_atomic_read_rb ethr_dw_atomic_read_rb +#define erts_dw_atomic_cmpxchg_rb ethr_dw_atomic_cmpxchg_rb + +#define erts_dw_atomic_init_wb ethr_dw_atomic_init_wb +#define erts_dw_atomic_set_wb ethr_dw_atomic_set_wb +#define erts_dw_atomic_read_wb ethr_dw_atomic_read_wb +#define erts_dw_atomic_cmpxchg_wb ethr_dw_atomic_cmpxchg_wb + +/* Word size atomics */ + +#define erts_atomic_init_nob ethr_atomic_init +#define erts_atomic_set_nob ethr_atomic_set +#define erts_atomic_read_nob ethr_atomic_read +#define erts_atomic_inc_read_nob ethr_atomic_inc_read +#define erts_atomic_dec_read_nob ethr_atomic_dec_read +#define erts_atomic_inc_nob ethr_atomic_inc +#define erts_atomic_dec_nob ethr_atomic_dec +#define erts_atomic_add_read_nob ethr_atomic_add_read +#define erts_atomic_add_nob ethr_atomic_add +#define erts_atomic_read_bor_nob ethr_atomic_read_bor +#define erts_atomic_read_band_nob ethr_atomic_read_band +#define erts_atomic_xchg_nob ethr_atomic_xchg +#define erts_atomic_cmpxchg_nob ethr_atomic_cmpxchg + +#define erts_atomic_init_mb ethr_atomic_init_mb +#define erts_atomic_set_mb ethr_atomic_set_mb +#define erts_atomic_read_mb ethr_atomic_read_mb +#define erts_atomic_inc_read_mb ethr_atomic_inc_read_mb +#define erts_atomic_dec_read_mb ethr_atomic_dec_read_mb +#define erts_atomic_inc_mb ethr_atomic_inc_mb +#define erts_atomic_dec_mb ethr_atomic_dec_mb +#define erts_atomic_add_read_mb ethr_atomic_add_read_mb +#define erts_atomic_add_mb ethr_atomic_add_mb +#define erts_atomic_read_bor_mb ethr_atomic_read_bor_mb +#define erts_atomic_read_band_mb ethr_atomic_read_band_mb +#define erts_atomic_xchg_mb ethr_atomic_xchg_mb +#define erts_atomic_cmpxchg_mb ethr_atomic_cmpxchg_mb + +#define erts_atomic_init_acqb ethr_atomic_init_acqb +#define erts_atomic_set_acqb ethr_atomic_set_acqb +#define erts_atomic_read_acqb ethr_atomic_read_acqb +#define erts_atomic_inc_read_acqb ethr_atomic_inc_read_acqb +#define erts_atomic_dec_read_acqb ethr_atomic_dec_read_acqb +#define erts_atomic_inc_acqb ethr_atomic_inc_acqb +#define erts_atomic_dec_acqb ethr_atomic_dec_acqb +#define erts_atomic_add_read_acqb ethr_atomic_add_read_acqb +#define erts_atomic_add_acqb ethr_atomic_add_acqb +#define erts_atomic_read_bor_acqb ethr_atomic_read_bor_acqb +#define erts_atomic_read_band_acqb ethr_atomic_read_band_acqb +#define erts_atomic_xchg_acqb ethr_atomic_xchg_acqb +#define erts_atomic_cmpxchg_acqb ethr_atomic_cmpxchg_acqb + +#define erts_atomic_init_relb ethr_atomic_init_relb +#define erts_atomic_set_relb ethr_atomic_set_relb +#define erts_atomic_read_relb ethr_atomic_read_relb +#define erts_atomic_inc_read_relb ethr_atomic_inc_read_relb +#define erts_atomic_dec_read_relb ethr_atomic_dec_read_relb +#define erts_atomic_inc_relb ethr_atomic_inc_relb +#define erts_atomic_dec_relb ethr_atomic_dec_relb +#define erts_atomic_add_read_relb ethr_atomic_add_read_relb +#define erts_atomic_add_relb ethr_atomic_add_relb +#define erts_atomic_read_bor_relb ethr_atomic_read_bor_relb +#define erts_atomic_read_band_relb ethr_atomic_read_band_relb +#define erts_atomic_xchg_relb ethr_atomic_xchg_relb +#define erts_atomic_cmpxchg_relb ethr_atomic_cmpxchg_relb + +#define erts_atomic_init_rb ethr_atomic_init_rb +#define erts_atomic_set_rb ethr_atomic_set_rb +#define erts_atomic_read_rb ethr_atomic_read_rb +#define erts_atomic_inc_read_rb ethr_atomic_inc_read_rb +#define erts_atomic_dec_read_rb ethr_atomic_dec_read_rb +#define erts_atomic_inc_rb ethr_atomic_inc_rb +#define erts_atomic_dec_rb ethr_atomic_dec_rb +#define erts_atomic_add_read_rb ethr_atomic_add_read_rb +#define erts_atomic_add_rb ethr_atomic_add_rb +#define erts_atomic_read_bor_rb ethr_atomic_read_bor_rb +#define erts_atomic_read_band_rb ethr_atomic_read_band_rb +#define erts_atomic_xchg_rb ethr_atomic_xchg_rb +#define erts_atomic_cmpxchg_rb ethr_atomic_cmpxchg_rb + +#define erts_atomic_init_wb ethr_atomic_init_wb +#define erts_atomic_set_wb ethr_atomic_set_wb +#define erts_atomic_read_wb ethr_atomic_read_wb +#define erts_atomic_inc_read_wb ethr_atomic_inc_read_wb +#define erts_atomic_dec_read_wb ethr_atomic_dec_read_wb +#define erts_atomic_inc_wb ethr_atomic_inc_wb +#define erts_atomic_dec_wb ethr_atomic_dec_wb +#define erts_atomic_add_read_wb ethr_atomic_add_read_wb +#define erts_atomic_add_wb ethr_atomic_add_wb +#define erts_atomic_read_bor_wb ethr_atomic_read_bor_wb +#define erts_atomic_read_band_wb ethr_atomic_read_band_wb +#define erts_atomic_xchg_wb ethr_atomic_xchg_wb +#define erts_atomic_cmpxchg_wb ethr_atomic_cmpxchg_wb + +/* 32-bit atomics */ + +#define erts_atomic32_init_nob ethr_atomic32_init +#define erts_atomic32_set_nob ethr_atomic32_set +#define erts_atomic32_read_nob ethr_atomic32_read +#define erts_atomic32_inc_read_nob ethr_atomic32_inc_read +#define erts_atomic32_dec_read_nob ethr_atomic32_dec_read +#define erts_atomic32_inc_nob ethr_atomic32_inc +#define erts_atomic32_dec_nob ethr_atomic32_dec +#define erts_atomic32_add_read_nob ethr_atomic32_add_read +#define erts_atomic32_add_nob ethr_atomic32_add +#define erts_atomic32_read_bor_nob ethr_atomic32_read_bor +#define erts_atomic32_read_band_nob ethr_atomic32_read_band +#define erts_atomic32_xchg_nob ethr_atomic32_xchg +#define erts_atomic32_cmpxchg_nob ethr_atomic32_cmpxchg + +#define erts_atomic32_init_mb ethr_atomic32_init_mb +#define erts_atomic32_set_mb ethr_atomic32_set_mb +#define erts_atomic32_read_mb ethr_atomic32_read_mb +#define erts_atomic32_inc_read_mb ethr_atomic32_inc_read_mb +#define erts_atomic32_dec_read_mb ethr_atomic32_dec_read_mb +#define erts_atomic32_inc_mb ethr_atomic32_inc_mb +#define erts_atomic32_dec_mb ethr_atomic32_dec_mb +#define erts_atomic32_add_read_mb ethr_atomic32_add_read_mb +#define erts_atomic32_add_mb ethr_atomic32_add_mb +#define erts_atomic32_read_bor_mb ethr_atomic32_read_bor_mb +#define erts_atomic32_read_band_mb ethr_atomic32_read_band_mb +#define erts_atomic32_xchg_mb ethr_atomic32_xchg_mb +#define erts_atomic32_cmpxchg_mb ethr_atomic32_cmpxchg_mb + +#define erts_atomic32_init_acqb ethr_atomic32_init_acqb +#define erts_atomic32_set_acqb ethr_atomic32_set_acqb +#define erts_atomic32_read_acqb ethr_atomic32_read_acqb +#define erts_atomic32_inc_read_acqb ethr_atomic32_inc_read_acqb +#define erts_atomic32_dec_read_acqb ethr_atomic32_dec_read_acqb +#define erts_atomic32_inc_acqb ethr_atomic32_inc_acqb +#define erts_atomic32_dec_acqb ethr_atomic32_dec_acqb +#define erts_atomic32_add_read_acqb ethr_atomic32_add_read_acqb +#define erts_atomic32_add_acqb ethr_atomic32_add_acqb +#define erts_atomic32_read_bor_acqb ethr_atomic32_read_bor_acqb +#define erts_atomic32_read_band_acqb ethr_atomic32_read_band_acqb +#define erts_atomic32_xchg_acqb ethr_atomic32_xchg_acqb +#define erts_atomic32_cmpxchg_acqb ethr_atomic32_cmpxchg_acqb + +#define erts_atomic32_init_relb ethr_atomic32_init_relb +#define erts_atomic32_set_relb ethr_atomic32_set_relb +#define erts_atomic32_read_relb ethr_atomic32_read_relb +#define erts_atomic32_inc_read_relb ethr_atomic32_inc_read_relb +#define erts_atomic32_dec_read_relb ethr_atomic32_dec_read_relb +#define erts_atomic32_inc_relb ethr_atomic32_inc_relb +#define erts_atomic32_dec_relb ethr_atomic32_dec_relb +#define erts_atomic32_add_read_relb ethr_atomic32_add_read_relb +#define erts_atomic32_add_relb ethr_atomic32_add_relb +#define erts_atomic32_read_bor_relb ethr_atomic32_read_bor_relb +#define erts_atomic32_read_band_relb ethr_atomic32_read_band_relb +#define erts_atomic32_xchg_relb ethr_atomic32_xchg_relb +#define erts_atomic32_cmpxchg_relb ethr_atomic32_cmpxchg_relb + +#define erts_atomic32_init_rb ethr_atomic32_init_rb +#define erts_atomic32_set_rb ethr_atomic32_set_rb +#define erts_atomic32_read_rb ethr_atomic32_read_rb +#define erts_atomic32_inc_read_rb ethr_atomic32_inc_read_rb +#define erts_atomic32_dec_read_rb ethr_atomic32_dec_read_rb +#define erts_atomic32_inc_rb ethr_atomic32_inc_rb +#define erts_atomic32_dec_rb ethr_atomic32_dec_rb +#define erts_atomic32_add_read_rb ethr_atomic32_add_read_rb +#define erts_atomic32_add_rb ethr_atomic32_add_rb +#define erts_atomic32_read_bor_rb ethr_atomic32_read_bor_rb +#define erts_atomic32_read_band_rb ethr_atomic32_read_band_rb +#define erts_atomic32_xchg_rb ethr_atomic32_xchg_rb +#define erts_atomic32_cmpxchg_rb ethr_atomic32_cmpxchg_rb + +#define erts_atomic32_init_wb ethr_atomic32_init_wb +#define erts_atomic32_set_wb ethr_atomic32_set_wb +#define erts_atomic32_read_wb ethr_atomic32_read_wb +#define erts_atomic32_inc_read_wb ethr_atomic32_inc_read_wb +#define erts_atomic32_dec_read_wb ethr_atomic32_dec_read_wb +#define erts_atomic32_inc_wb ethr_atomic32_inc_wb +#define erts_atomic32_dec_wb ethr_atomic32_dec_wb +#define erts_atomic32_add_read_wb ethr_atomic32_add_read_wb +#define erts_atomic32_add_wb ethr_atomic32_add_wb +#define erts_atomic32_read_bor_wb ethr_atomic32_read_bor_wb +#define erts_atomic32_read_band_wb ethr_atomic32_read_band_wb +#define erts_atomic32_xchg_wb ethr_atomic32_xchg_wb +#define erts_atomic32_cmpxchg_wb ethr_atomic32_cmpxchg_wb + +#else /* !USE_THREADS */ + +/* Double word size atomics */ + +#define erts_dw_atomic_init_nob erts_no_dw_atomic_set +#define erts_dw_atomic_set_nob erts_no_dw_atomic_set +#define erts_dw_atomic_read_nob erts_no_dw_atomic_read +#define erts_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg + +#define erts_dw_atomic_init_mb erts_no_dw_atomic_init +#define erts_dw_atomic_set_mb erts_no_dw_atomic_set +#define erts_dw_atomic_read_mb erts_no_dw_atomic_read +#define erts_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg + +#define erts_dw_atomic_init_acqb erts_no_dw_atomic_init +#define erts_dw_atomic_set_acqb erts_no_dw_atomic_set +#define erts_dw_atomic_read_acqb erts_no_dw_atomic_read +#define erts_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg + +#define erts_dw_atomic_init_relb erts_no_dw_atomic_init +#define erts_dw_atomic_set_relb erts_no_dw_atomic_set +#define erts_dw_atomic_read_relb erts_no_dw_atomic_read +#define erts_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg + +#define erts_dw_atomic_init_rb erts_no_dw_atomic_init +#define erts_dw_atomic_set_rb erts_no_dw_atomic_set +#define erts_dw_atomic_read_rb erts_no_dw_atomic_read +#define erts_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg + +#define erts_dw_atomic_init_wb erts_no_dw_atomic_init +#define erts_dw_atomic_set_wb erts_no_dw_atomic_set +#define erts_dw_atomic_read_wb erts_no_dw_atomic_read +#define erts_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg + +/* Word size atomics */ + +#define erts_atomic_init_nob erts_no_atomic_set +#define erts_atomic_set_nob erts_no_atomic_set +#define erts_atomic_read_nob erts_no_atomic_read +#define erts_atomic_inc_read_nob erts_no_atomic_inc_read +#define erts_atomic_dec_read_nob erts_no_atomic_dec_read +#define erts_atomic_inc_nob erts_no_atomic_inc +#define erts_atomic_dec_nob erts_no_atomic_dec +#define erts_atomic_add_read_nob erts_no_atomic_add_read +#define erts_atomic_add_nob erts_no_atomic_add +#define erts_atomic_read_bor_nob erts_no_atomic_read_bor +#define erts_atomic_read_band_nob erts_no_atomic_read_band +#define erts_atomic_xchg_nob erts_no_atomic_xchg +#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg + +#define erts_atomic_init_mb erts_no_atomic_set +#define erts_atomic_set_mb erts_no_atomic_set +#define erts_atomic_read_mb erts_no_atomic_read +#define erts_atomic_inc_read_mb erts_no_atomic_inc_read +#define erts_atomic_dec_read_mb erts_no_atomic_dec_read +#define erts_atomic_inc_mb erts_no_atomic_inc +#define erts_atomic_dec_mb erts_no_atomic_dec +#define erts_atomic_add_read_mb erts_no_atomic_add_read +#define erts_atomic_add_mb erts_no_atomic_add +#define erts_atomic_read_bor_mb erts_no_atomic_read_bor +#define erts_atomic_read_band_mb erts_no_atomic_read_band +#define erts_atomic_xchg_mb erts_no_atomic_xchg +#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg + +#define erts_atomic_init_acqb erts_no_atomic_set +#define erts_atomic_set_acqb erts_no_atomic_set +#define erts_atomic_read_acqb erts_no_atomic_read +#define erts_atomic_inc_read_acqb erts_no_atomic_inc_read +#define erts_atomic_dec_read_acqb erts_no_atomic_dec_read +#define erts_atomic_inc_acqb erts_no_atomic_inc +#define erts_atomic_dec_acqb erts_no_atomic_dec +#define erts_atomic_add_read_acqb erts_no_atomic_add_read +#define erts_atomic_add_acqb erts_no_atomic_add +#define erts_atomic_read_bor_acqb erts_no_atomic_read_bor +#define erts_atomic_read_band_acqb erts_no_atomic_read_band +#define erts_atomic_xchg_acqb erts_no_atomic_xchg +#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg + +#define erts_atomic_init_relb erts_no_atomic_set +#define erts_atomic_set_relb erts_no_atomic_set +#define erts_atomic_read_relb erts_no_atomic_read +#define erts_atomic_inc_read_relb erts_no_atomic_inc_read +#define erts_atomic_dec_read_relb erts_no_atomic_dec_read +#define erts_atomic_inc_relb erts_no_atomic_inc +#define erts_atomic_dec_relb erts_no_atomic_dec +#define erts_atomic_add_read_relb erts_no_atomic_add_read +#define erts_atomic_add_relb erts_no_atomic_add +#define erts_atomic_read_bor_relb erts_no_atomic_read_bor +#define erts_atomic_read_band_relb erts_no_atomic_read_band +#define erts_atomic_xchg_relb erts_no_atomic_xchg +#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg + +#define erts_atomic_init_rb erts_no_atomic_set +#define erts_atomic_set_rb erts_no_atomic_set +#define erts_atomic_read_rb erts_no_atomic_read +#define erts_atomic_inc_read_rb erts_no_atomic_inc_read +#define erts_atomic_dec_read_rb erts_no_atomic_dec_read +#define erts_atomic_inc_rb erts_no_atomic_inc +#define erts_atomic_dec_rb erts_no_atomic_dec +#define erts_atomic_add_read_rb erts_no_atomic_add_read +#define erts_atomic_add_rb erts_no_atomic_add +#define erts_atomic_read_bor_rb erts_no_atomic_read_bor +#define erts_atomic_read_band_rb erts_no_atomic_read_band +#define erts_atomic_xchg_rb erts_no_atomic_xchg +#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg + +#define erts_atomic_init_wb erts_no_atomic_set +#define erts_atomic_set_wb erts_no_atomic_set +#define erts_atomic_read_wb erts_no_atomic_read +#define erts_atomic_inc_read_wb erts_no_atomic_inc_read +#define erts_atomic_dec_read_wb erts_no_atomic_dec_read +#define erts_atomic_inc_wb erts_no_atomic_inc +#define erts_atomic_dec_wb erts_no_atomic_dec +#define erts_atomic_add_read_wb erts_no_atomic_add_read +#define erts_atomic_add_wb erts_no_atomic_add +#define erts_atomic_read_bor_wb erts_no_atomic_read_bor +#define erts_atomic_read_band_wb erts_no_atomic_read_band +#define erts_atomic_xchg_wb erts_no_atomic_xchg +#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg + +/* 32-bit atomics */ + +#define erts_atomic32_init_nob erts_no_atomic32_set +#define erts_atomic32_set_nob erts_no_atomic32_set +#define erts_atomic32_read_nob erts_no_atomic32_read +#define erts_atomic32_inc_read_nob erts_no_atomic32_inc_read +#define erts_atomic32_dec_read_nob erts_no_atomic32_dec_read +#define erts_atomic32_inc_nob erts_no_atomic32_inc +#define erts_atomic32_dec_nob erts_no_atomic32_dec +#define erts_atomic32_add_read_nob erts_no_atomic32_add_read +#define erts_atomic32_add_nob erts_no_atomic32_add +#define erts_atomic32_read_bor_nob erts_no_atomic32_read_bor +#define erts_atomic32_read_band_nob erts_no_atomic32_read_band +#define erts_atomic32_xchg_nob erts_no_atomic32_xchg +#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg + +#define erts_atomic32_init_mb erts_no_atomic32_set +#define erts_atomic32_set_mb erts_no_atomic32_set +#define erts_atomic32_read_mb erts_no_atomic32_read +#define erts_atomic32_inc_read_mb erts_no_atomic32_inc_read +#define erts_atomic32_dec_read_mb erts_no_atomic32_dec_read +#define erts_atomic32_inc_mb erts_no_atomic32_inc +#define erts_atomic32_dec_mb erts_no_atomic32_dec +#define erts_atomic32_add_read_mb erts_no_atomic32_add_read +#define erts_atomic32_add_mb erts_no_atomic32_add +#define erts_atomic32_read_bor_mb erts_no_atomic32_read_bor +#define erts_atomic32_read_band_mb erts_no_atomic32_read_band +#define erts_atomic32_xchg_mb erts_no_atomic32_xchg +#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg + +#define erts_atomic32_init_acqb erts_no_atomic32_set +#define erts_atomic32_set_acqb erts_no_atomic32_set +#define erts_atomic32_read_acqb erts_no_atomic32_read +#define erts_atomic32_inc_read_acqb erts_no_atomic32_inc_read +#define erts_atomic32_dec_read_acqb erts_no_atomic32_dec_read +#define erts_atomic32_inc_acqb erts_no_atomic32_inc +#define erts_atomic32_dec_acqb erts_no_atomic32_dec +#define erts_atomic32_add_read_acqb erts_no_atomic32_add_read +#define erts_atomic32_add_acqb erts_no_atomic32_add +#define erts_atomic32_read_bor_acqb erts_no_atomic32_read_bor +#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band +#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg +#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg + +#define erts_atomic32_init_relb erts_no_atomic32_set +#define erts_atomic32_set_relb erts_no_atomic32_set +#define erts_atomic32_read_relb erts_no_atomic32_read +#define erts_atomic32_inc_read_relb erts_no_atomic32_inc_read +#define erts_atomic32_dec_read_relb erts_no_atomic32_dec_read +#define erts_atomic32_inc_relb erts_no_atomic32_inc +#define erts_atomic32_dec_relb erts_no_atomic32_dec +#define erts_atomic32_add_read_relb erts_no_atomic32_add_read +#define erts_atomic32_add_relb erts_no_atomic32_add +#define erts_atomic32_read_bor_relb erts_no_atomic32_read_bor +#define erts_atomic32_read_band_relb erts_no_atomic32_read_band +#define erts_atomic32_xchg_relb erts_no_atomic32_xchg +#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg + +#define erts_atomic32_init_rb erts_no_atomic32_set +#define erts_atomic32_set_rb erts_no_atomic32_set +#define erts_atomic32_read_rb erts_no_atomic32_read +#define erts_atomic32_inc_read_rb erts_no_atomic32_inc_read +#define erts_atomic32_dec_read_rb erts_no_atomic32_dec_read +#define erts_atomic32_inc_rb erts_no_atomic32_inc +#define erts_atomic32_dec_rb erts_no_atomic32_dec +#define erts_atomic32_add_read_rb erts_no_atomic32_add_read +#define erts_atomic32_add_rb erts_no_atomic32_add +#define erts_atomic32_read_bor_rb erts_no_atomic32_read_bor +#define erts_atomic32_read_band_rb erts_no_atomic32_read_band +#define erts_atomic32_xchg_rb erts_no_atomic32_xchg +#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg + +#define erts_atomic32_init_wb erts_no_atomic32_set +#define erts_atomic32_set_wb erts_no_atomic32_set +#define erts_atomic32_read_wb erts_no_atomic32_read +#define erts_atomic32_inc_read_wb erts_no_atomic32_inc_read +#define erts_atomic32_dec_read_wb erts_no_atomic32_dec_read +#define erts_atomic32_inc_wb erts_no_atomic32_inc +#define erts_atomic32_dec_wb erts_no_atomic32_dec +#define erts_atomic32_add_read_wb erts_no_atomic32_add_read +#define erts_atomic32_add_wb erts_no_atomic32_add +#define erts_atomic32_read_bor_wb erts_no_atomic32_read_bor +#define erts_atomic32_read_band_wb erts_no_atomic32_read_band +#define erts_atomic32_xchg_wb erts_no_atomic32_xchg +#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg + +#endif /* !USE_THREADS */ + #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void @@ -998,428 +1417,206 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx) #endif } +/* No atomic ops */ + ERTS_GLB_INLINE void -erts_atomic_init(erts_atomic_t *var, erts_aint_t i) +erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val) { -#ifdef USE_THREADS - ethr_atomic_init(var, i); -#else - *var = i; -#endif + var->sint[0] = val->sint[0]; + var->sint[1] = val->sint[1]; } ERTS_GLB_INLINE void -erts_atomic_set(erts_atomic_t *var, erts_aint_t i) +erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val) +{ + val->sint[0] = var->sint[0]; + val->sint[1] = var->sint[1]; +} + +ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var, + erts_no_dw_atomic_t *new_val, + erts_no_dw_atomic_t *old_val) +{ + if (var->sint[0] != old_val->sint[0] || var->sint[1] != old_val->sint[1]) { + erts_no_dw_atomic_read(var, old_val); + return 0; + } + else { + erts_no_dw_atomic_set(var, new_val); + return !0; + } +} + +ERTS_GLB_INLINE void +erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i) { -#ifdef USE_THREADS - ethr_atomic_set(var, i); -#else *var = i; -#endif } ERTS_GLB_INLINE erts_aint_t -erts_atomic_read(erts_atomic_t *var) +erts_no_atomic_read(erts_no_atomic_t *var) { -#ifdef USE_THREADS - return ethr_atomic_read(var); -#else return *var; -#endif } ERTS_GLB_INLINE erts_aint_t -erts_atomic_inctest(erts_atomic_t *incp) +erts_no_atomic_inc_read(erts_no_atomic_t *incp) { -#ifdef USE_THREADS - return ethr_atomic_inc_read(incp); -#else return ++(*incp); -#endif } ERTS_GLB_INLINE erts_aint_t -erts_atomic_dectest(erts_atomic_t *decp) +erts_no_atomic_dec_read(erts_no_atomic_t *decp) { -#ifdef USE_THREADS - return ethr_atomic_dec_read(decp); -#else return --(*decp); -#endif } ERTS_GLB_INLINE void -erts_atomic_inc(erts_atomic_t *incp) +erts_no_atomic_inc(erts_no_atomic_t *incp) { -#ifdef USE_THREADS - ethr_atomic_inc(incp); -#else ++(*incp); -#endif } ERTS_GLB_INLINE void -erts_atomic_dec(erts_atomic_t *decp) +erts_no_atomic_dec(erts_no_atomic_t *decp) { -#ifdef USE_THREADS - ethr_atomic_dec(decp); -#else --(*decp); -#endif } ERTS_GLB_INLINE erts_aint_t -erts_atomic_addtest(erts_atomic_t *addp, erts_aint_t i) +erts_no_atomic_add_read(erts_no_atomic_t *addp, erts_aint_t i) { -#ifdef USE_THREADS - return ethr_atomic_add_read(addp, i); -#else return *addp += i; -#endif } ERTS_GLB_INLINE void -erts_atomic_add(erts_atomic_t *addp, erts_aint_t i) +erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i) { -#ifdef USE_THREADS - ethr_atomic_add(addp, i); -#else *addp += i; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_atomic_xchg(erts_atomic_t *xchgp, erts_aint_t new) -{ -#ifdef USE_THREADS - return ethr_atomic_xchg(xchgp, new); -#else - erts_aint_t old = *xchgp; - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_atomic_cmpxchg(erts_atomic_t *xchgp, erts_aint_t new, erts_aint_t expected) -{ -#ifdef USE_THREADS - return ethr_atomic_cmpxchg(xchgp, new, expected); -#else - erts_aint_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -#endif } ERTS_GLB_INLINE erts_aint_t -erts_atomic_bor(erts_atomic_t *var, erts_aint_t mask) +erts_no_atomic_read_bor(erts_no_atomic_t *var, erts_aint_t mask) { -#ifdef USE_THREADS - return ethr_atomic_read_bor(var, mask); -#else erts_aint_t old; old = *var; *var |= mask; return old; -#endif } ERTS_GLB_INLINE erts_aint_t -erts_atomic_band(erts_atomic_t *var, erts_aint_t mask) +erts_no_atomic_read_band(erts_no_atomic_t *var, erts_aint_t mask) { -#ifdef USE_THREADS - return ethr_atomic_read_band(var, mask); -#else erts_aint_t old; old = *var; *var &= mask; return old; -#endif } ERTS_GLB_INLINE erts_aint_t -erts_atomic_read_acqb(erts_atomic_t *var) +erts_no_atomic_xchg(erts_no_atomic_t *xchgp, erts_aint_t new) { -#ifdef USE_THREADS - return ethr_atomic_read_acqb(var); -#else - return *var; -#endif -} - -ERTS_GLB_INLINE void -erts_atomic_set_relb(erts_atomic_t *var, erts_aint_t i) -{ -#ifdef USE_THREADS - ethr_atomic_set_relb(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE void -erts_atomic_dec_relb(erts_atomic_t *decp) -{ -#ifdef USE_THREADS - ethr_atomic_dec_relb(decp); -#else - --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_atomic_dectest_relb(erts_atomic_t *decp) -{ -#ifdef USE_THREADS - return ethr_atomic_dec_read_relb(decp); -#else - return --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t exp) -{ -#ifdef USE_THREADS - return ethr_atomic_cmpxchg_acqb(xchgp, new, exp); -#else erts_aint_t old = *xchgp; - if (old == exp) - *xchgp = new; + *xchgp = new; return old; -#endif } -ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t exp) +ERTS_GLB_INLINE erts_aint_t +erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp, + erts_aint_t new, + erts_aint_t expected) { -#ifdef USE_THREADS - return ethr_atomic_cmpxchg_relb(xchgp, new, exp); -#else erts_aint_t old = *xchgp; - if (old == exp) + if (old == expected) *xchgp = new; return old; -#endif } /* atomic32 */ ERTS_GLB_INLINE void -erts_atomic32_init(erts_atomic32_t *var, erts_aint32_t i) -{ -#ifdef USE_THREADS - ethr_atomic32_init(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE void -erts_atomic32_set(erts_atomic32_t *var, erts_aint32_t i) +erts_no_atomic32_set(erts_no_atomic32_t *var, erts_aint32_t i) { -#ifdef USE_THREADS - ethr_atomic32_set(var, i); -#else *var = i; -#endif } ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_read(erts_atomic32_t *var) +erts_no_atomic32_read(erts_no_atomic32_t *var) { -#ifdef USE_THREADS - return ethr_atomic32_read(var); -#else return *var; -#endif } ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_inctest(erts_atomic32_t *incp) +erts_no_atomic32_inc_read(erts_no_atomic32_t *incp) { -#ifdef USE_THREADS - return ethr_atomic32_inc_read(incp); -#else return ++(*incp); -#endif } ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_dectest(erts_atomic32_t *decp) +erts_no_atomic32_dec_read(erts_no_atomic32_t *decp) { -#ifdef USE_THREADS - return ethr_atomic32_dec_read(decp); -#else return --(*decp); -#endif } ERTS_GLB_INLINE void -erts_atomic32_inc(erts_atomic32_t *incp) +erts_no_atomic32_inc(erts_no_atomic32_t *incp) { -#ifdef USE_THREADS - ethr_atomic32_inc(incp); -#else ++(*incp); -#endif } ERTS_GLB_INLINE void -erts_atomic32_dec(erts_atomic32_t *decp) +erts_no_atomic32_dec(erts_no_atomic32_t *decp) { -#ifdef USE_THREADS - ethr_atomic32_dec(decp); -#else --(*decp); -#endif } ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_addtest(erts_atomic32_t *addp, erts_aint32_t i) +erts_no_atomic32_add_read(erts_no_atomic32_t *addp, erts_aint32_t i) { -#ifdef USE_THREADS - return ethr_atomic32_add_read(addp, i); -#else return *addp += i; -#endif } ERTS_GLB_INLINE void -erts_atomic32_add(erts_atomic32_t *addp, erts_aint32_t i) +erts_no_atomic32_add(erts_no_atomic32_t *addp, erts_aint32_t i) { -#ifdef USE_THREADS - ethr_atomic32_add(addp, i); -#else *addp += i; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_xchg(erts_atomic32_t *xchgp, erts_aint32_t new) -{ -#ifdef USE_THREADS - return ethr_atomic32_xchg(xchgp, new); -#else - erts_aint32_t old = *xchgp; - *xchgp = new; - return old; -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_cmpxchg(erts_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t expected) -{ -#ifdef USE_THREADS - return ethr_atomic32_cmpxchg(xchgp, new, expected); -#else - erts_aint32_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -#endif } ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_bor(erts_atomic32_t *var, erts_aint32_t mask) +erts_no_atomic32_read_bor(erts_no_atomic32_t *var, erts_aint32_t mask) { -#ifdef USE_THREADS - return ethr_atomic32_read_bor(var, mask); -#else erts_aint32_t old; old = *var; *var |= mask; return old; -#endif } ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_band(erts_atomic32_t *var, erts_aint32_t mask) +erts_no_atomic32_read_band(erts_no_atomic32_t *var, erts_aint32_t mask) { -#ifdef USE_THREADS - return ethr_atomic32_read_band(var, mask); -#else erts_aint32_t old; old = *var; *var &= mask; return old; -#endif } ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_read_acqb(erts_atomic32_t *var) -{ -#ifdef USE_THREADS - return ethr_atomic32_read_acqb(var); -#else - return *var; -#endif -} - -ERTS_GLB_INLINE void -erts_atomic32_set_relb(erts_atomic32_t *var, erts_aint32_t i) -{ -#ifdef USE_THREADS - ethr_atomic32_set_relb(var, i); -#else - *var = i; -#endif -} - -ERTS_GLB_INLINE void -erts_atomic32_dec_relb(erts_atomic32_t *decp) +erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, erts_aint32_t new) { -#ifdef USE_THREADS - ethr_atomic32_dec_relb(decp); -#else - --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_dectest_relb(erts_atomic32_t *decp) -{ -#ifdef USE_THREADS - return ethr_atomic32_dec_read_relb(decp); -#else - return --(*decp); -#endif -} - -ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_cmpxchg_acqb(erts_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t exp) -{ -#ifdef USE_THREADS - return ethr_atomic32_cmpxchg_acqb(xchgp, new, exp); -#else erts_aint32_t old = *xchgp; - if (old == exp) - *xchgp = new; + *xchgp = new; return old; -#endif } ERTS_GLB_INLINE erts_aint32_t -erts_atomic32_cmpxchg_relb(erts_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t exp) +erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp, + erts_aint32_t new, + erts_aint32_t expected) { -#ifdef USE_THREADS - return ethr_atomic32_cmpxchg_relb(xchgp, new, exp); -#else erts_aint32_t old = *xchgp; - if (old == exp) + if (old == expected) *xchgp = new; return old; -#endif } /* spinlock */ @@ -1892,3 +2089,37 @@ erts_thr_sigwait(const sigset_t *set, int *sig) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ #endif /* #ifndef ERL_THREAD_H__ */ + +#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS + +/* Deprecated functions to replace */ + +#undef erts_atomic_init +#undef erts_atomic_set +#undef erts_atomic_read +#undef erts_atomic_inctest +#undef erts_atomic_dectest +#undef erts_atomic_inc +#undef erts_atomic_dec +#undef erts_atomic_addtest +#undef erts_atomic_add +#undef erts_atomic_xchg +#undef erts_atomic_cmpxchg +#undef erts_atomic_bor +#undef erts_atomic_band + +#undef erts_atomic32_init +#undef erts_atomic32_set +#undef erts_atomic32_read +#undef erts_atomic32_inctest +#undef erts_atomic32_dectest +#undef erts_atomic32_inc +#undef erts_atomic32_dec +#undef erts_atomic32_addtest +#undef erts_atomic32_add +#undef erts_atomic32_xchg +#undef erts_atomic32_cmpxchg +#undef erts_atomic32_bor +#undef erts_atomic32_band + +#endif diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h index d0ad73cd81..7a09d30ff6 100644 --- a/erts/emulator/beam/erl_time.h +++ b/erts/emulator/beam/erl_time.h @@ -85,8 +85,8 @@ ERTS_GLB_INLINE void erts_do_time_add(long); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -ERTS_GLB_INLINE erts_aint_t erts_do_time_read_and_reset(void) { return erts_smp_atomic_xchg(&do_time, 0L); } -ERTS_GLB_INLINE void erts_do_time_add(long elapsed) { erts_smp_atomic_add(&do_time, elapsed); } +ERTS_GLB_INLINE erts_aint_t erts_do_time_read_and_reset(void) { return erts_smp_atomic_xchg_acqb(&do_time, 0L); } +ERTS_GLB_INLINE void erts_do_time_add(long elapsed) { erts_smp_atomic_add_relb(&do_time, elapsed); } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 499bdd77ba..249df54015 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -200,10 +200,10 @@ erts_port_runq(Port *prt) { #ifdef ERTS_SMP ErtsRunQueue *rq1, *rq2; - rq1 = (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue); + rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); while (1) { erts_smp_runq_lock(rq1); - rq2 = (ErtsRunQueue *) erts_smp_atomic_read(&prt->run_queue); + rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); if (rq1 == rq2) return rq1; erts_smp_runq_unlock(rq1); @@ -542,10 +542,11 @@ ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt) ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&prt->state_lck)); if (prt->snapshot != erts_smp_atomic32_read_acqb(&erts_ports_snapshot)) { /* Dead ports are added from the end of the snapshot buffer */ - Eterm* tombstone = (Eterm*) erts_smp_atomic_addtest(&erts_dead_ports_ptr, - -(erts_aint_t)sizeof(Eterm)); + Eterm* tombstone; + tombstone = (Eterm*) erts_smp_atomic_add_read_nob(&erts_dead_ports_ptr, + -(erts_aint_t)sizeof(Eterm)); ASSERT(tombstone+1 != NULL); - ASSERT(prt->snapshot == erts_smp_atomic32_read(&erts_ports_snapshot) - 1); + ASSERT(prt->snapshot == erts_smp_atomic_read_nob(&erts_ports_snapshot) - 1); *tombstone = prt->id; } /*else no ongoing snapshot or port was already included or created after snapshot */ @@ -1200,11 +1201,11 @@ erts_smp_port_trylock(Port *prt) #ifdef ERTS_SMP int res; - ASSERT(erts_smp_atomic_read(&prt->refc) > 0); - erts_smp_atomic_inc(&prt->refc); + ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0); + erts_smp_atomic_inc_nob(&prt->refc); res = erts_smp_mtx_trylock(prt->lock); if (res == EBUSY) { - erts_smp_atomic_dec(&prt->refc); + erts_smp_atomic_dec_nob(&prt->refc); } return res; @@ -1217,8 +1218,8 @@ ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt) { #ifdef ERTS_SMP - ASSERT(erts_smp_atomic_read(&prt->refc) > 0); - erts_smp_atomic_inc(&prt->refc); + ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0); + erts_smp_atomic_inc_nob(&prt->refc); erts_smp_mtx_lock(prt->lock); #endif } @@ -1229,7 +1230,7 @@ erts_smp_port_unlock(Port *prt) #ifdef ERTS_SMP erts_aint_t refc; erts_smp_mtx_unlock(prt->lock); - refc = erts_smp_atomic_dectest(&prt->refc); + refc = erts_smp_atomic_dec_read_nob(&prt->refc); ASSERT(refc >= 0); if (refc == 0) erts_port_cleanup(prt); @@ -1298,7 +1299,7 @@ erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 sflgs } #ifdef ERTS_SMP else { - erts_smp_atomic_inc(&prt->refc); + erts_smp_atomic_inc_nob(&prt->refc); erts_smp_port_state_unlock(prt); if (no_proc_locks) diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index df5f8b22a3..151c776a3d 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -244,8 +244,8 @@ get_free_port(void) } port->status = ERTS_PORT_SFLG_INITIALIZING; #ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&port->refc) == 0); - erts_smp_atomic_set(&port->refc, 2); /* Port alive + lock */ + ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 0); + erts_smp_atomic_set_nob(&port->refc, 2); /* Port alive + lock */ #endif erts_smp_port_state_unlock(port); return num & port_num_mask; @@ -327,7 +327,7 @@ port_cleanup(Port *prt) #ifdef ERTS_SMP ASSERT(prt->status & ERTS_PORT_SFLG_FREE_SCHEDULED); - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&prt->refc) == 0); + ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&prt->refc) == 0); port_specific = (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK); @@ -425,11 +425,11 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver, erts_smp_runq_lock(runq); erts_smp_port_state_lock(prt); prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus; - prt->snapshot = erts_smp_atomic32_read(&erts_ports_snapshot); + prt->snapshot = erts_smp_atomic32_read_nob(&erts_ports_snapshot); old_name = prt->name; prt->name = new_name; #ifdef ERTS_SMP - erts_smp_atomic_set(&prt->run_queue, (erts_aint_t) runq); + erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); #endif ASSERT(!prt->drv_ptr); prt->drv_ptr = driver; @@ -590,8 +590,8 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ erts_smp_port_state_lock(port); port->status = ERTS_PORT_SFLG_FREE; #ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read(&port->refc) == 2); - erts_smp_atomic_set(&port->refc, 0); + ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 2); + erts_smp_atomic_set_nob(&port->refc, 0); #endif erts_smp_port_state_unlock(port); return -3; @@ -1206,7 +1206,7 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list) } } p->bytes_out += size; - erts_smp_atomic_add(&erts_bytes_out, size); + erts_smp_atomic_add_nob(&erts_bytes_out, size); #ifdef ERTS_SMP if (p->xports) @@ -1277,13 +1277,13 @@ void init_io(void) erts_port = (Port *) erts_alloc(ERTS_ALC_T_PORT_TABLE, erts_max_ports * sizeof(Port)); - erts_smp_atomic_init(&erts_bytes_out, 0); - erts_smp_atomic_init(&erts_bytes_in, 0); + erts_smp_atomic_init_nob(&erts_bytes_out, 0); + erts_smp_atomic_init_nob(&erts_bytes_in, 0); for (i = 0; i < erts_max_ports; i++) { erts_port_task_init_sched(&erts_port[i].sched); #ifdef ERTS_SMP - erts_smp_atomic_init(&erts_port[i].refc, 0); + erts_smp_atomic_init_nob(&erts_port[i].refc, 0); erts_port[i].lock = NULL; erts_port[i].xports = NULL; erts_smp_spinlock_init_x(&erts_port[i].state_lck, "port_state", make_small(i)); @@ -1300,7 +1300,7 @@ void init_io(void) erts_port[i].port_data_lock = NULL; } - erts_smp_atomic32_init(&erts_ports_snapshot, (erts_aint32_t) 0); + erts_smp_atomic32_init_nob(&erts_ports_snapshot, (erts_aint32_t) 0); last_port_num = 0; erts_smp_spinlock_init(&get_free_port_lck, "get_free_port"); @@ -3253,7 +3253,7 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, int hlen, return 0; prt->bytes_in += (hlen + len); - erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + len)); + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len)); if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) { return erts_net_message(prt, prt->dist_entry, @@ -3288,7 +3288,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, int hlen, char* buf, int len) return 0; prt->bytes_in += (hlen + len); - erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + len)); + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len)); if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) { if (len == 0) return erts_net_message(prt, @@ -3365,7 +3365,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, int hlen, ErlIOVec* vec, int skip) /* XXX handle distribution !!! */ prt->bytes_in += (hlen + size); - erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + size)); + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + size)); deliver_vec_message(prt, prt->connected, hbuf, hlen, binv, iov, n, size); return 0; } @@ -3539,13 +3539,13 @@ pdl_init(void) static ERTS_INLINE void pdl_init_refc(ErlDrvPDL pdl) { - erts_atomic_init(&pdl->refc, 1); + erts_atomic_init_nob(&pdl->refc, 1); } static ERTS_INLINE ErlDrvSInt pdl_read_refc(ErlDrvPDL pdl) { - erts_aint_t refc = erts_atomic_read(&pdl->refc); + erts_aint_t refc = erts_atomic_read_nob(&pdl->refc); ERTS_LC_ASSERT(refc >= 0); return (ErlDrvSInt) refc; } @@ -3553,14 +3553,14 @@ pdl_read_refc(ErlDrvPDL pdl) static ERTS_INLINE void pdl_inc_refc(ErlDrvPDL pdl) { - erts_atomic_inc(&pdl->refc); + erts_atomic_inc_nob(&pdl->refc); ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) > 1); } static ERTS_INLINE ErlDrvSInt pdl_inctest_refc(ErlDrvPDL pdl) { - erts_aint_t refc = erts_atomic_inctest(&pdl->refc); + erts_aint_t refc = erts_atomic_inc_read_nob(&pdl->refc); ERTS_LC_ASSERT(refc > 1); return (ErlDrvSInt) refc; } @@ -3569,7 +3569,7 @@ pdl_inctest_refc(ErlDrvPDL pdl) static ERTS_INLINE void pdl_dec_refc(ErlDrvPDL pdl) { - erts_atomic_dec(&pdl->refc); + erts_atomic_dec_nob(&pdl->refc); ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) > 0); } #endif @@ -3577,7 +3577,7 @@ pdl_dec_refc(ErlDrvPDL pdl) static ERTS_INLINE ErlDrvSInt pdl_dectest_refc(ErlDrvPDL pdl) { - erts_aint_t refc = erts_atomic_dectest(&pdl->refc); + erts_aint_t refc = erts_atomic_dec_read_nob(&pdl->refc); ERTS_LC_ASSERT(refc >= 0); return (ErlDrvSInt) refc; } diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c index 4c54e19cdb..3326e5cc2a 100644 --- a/erts/emulator/beam/safe_hash.c +++ b/erts/emulator/beam/safe_hash.c @@ -61,7 +61,7 @@ static ERTS_INLINE int align_up_pow2(int val) */ static void rehash(SafeHash* h, int grow_limit) { - if (erts_smp_atomic_xchg(&h->is_rehashing, 1) != 0) { + if (erts_smp_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) { return; /* already in progress */ } if (h->grow_limit == grow_limit) { @@ -166,8 +166,8 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, h->name = name; h->fun = fun; set_size(h,size); - erts_smp_atomic_init(&h->is_rehashing, 0); - erts_smp_atomic_init(&h->nitems, 0); + erts_smp_atomic_init_nob(&h->is_rehashing, 0); + erts_smp_atomic_init_nob(&h->nitems, 0); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash"); } @@ -222,7 +222,7 @@ void* safe_hash_put(SafeHash* h, void* tmpl) *head = b; grow_limit = h->grow_limit; erts_smp_mtx_unlock(lock); - if (erts_smp_atomic_inctest(&h->nitems) > grow_limit) { + if (erts_smp_atomic_inc_read_nob(&h->nitems) > grow_limit) { rehash(h, grow_limit); } return (void*) b; @@ -245,7 +245,7 @@ void* safe_hash_erase(SafeHash* h, void* tmpl) if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { *prevp = b->next; erts_smp_mtx_unlock(lock); - erts_smp_atomic_dec(&h->nitems); + erts_smp_atomic_dec_nob(&h->nitems); h->fun.free((void*)b); return tmpl; } diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index e64c43de6e..669a601b35 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -340,7 +340,8 @@ int erts_send_warning_to_logger_str_nogl(char *); #ifdef ERTS_WANT_BREAK_HANDLING # ifdef ERTS_SMP extern erts_smp_atomic32_t erts_break_requested; -# define ERTS_BREAK_REQUESTED ((int) erts_smp_atomic32_read(&erts_break_requested)) +# define ERTS_BREAK_REQUESTED \ + ((int) erts_smp_atomic32_read_nob(&erts_break_requested)) # else extern volatile int erts_break_requested; # define ERTS_BREAK_REQUESTED erts_break_requested @@ -354,7 +355,7 @@ void erts_do_break_handling(void); # else # ifdef ERTS_SMP extern erts_smp_atomic32_t erts_got_sigusr1; -# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read(&erts_got_sigusr1)) +# define ERTS_GOT_SIGUSR1 ((int) erts_smp_atomic32_read_mb(&erts_got_sigusr1)) # else extern volatile int erts_got_sigusr1; # define ERTS_GOT_SIGUSR1 erts_got_sigusr1 @@ -363,11 +364,15 @@ extern volatile int erts_got_sigusr1; #endif #ifdef ERTS_SMP -extern erts_smp_atomic_t erts_writing_erl_crash_dump; +extern erts_smp_atomic32_t erts_writing_erl_crash_dump; +extern erts_tsd_key_t erts_is_crash_dumping_key; +#define ERTS_SOMEONE_IS_CRASH_DUMPING \ + ((int) erts_smp_atomic32_read_mb(&erts_writing_erl_crash_dump)) #define ERTS_IS_CRASH_DUMPING \ - ((int) erts_smp_atomic_read(&erts_writing_erl_crash_dump)) + ((int) (SWord) erts_tsd_get(erts_is_crash_dumping_key)) #else extern volatile int erts_writing_erl_crash_dump; +#define ERTS_SOMEONE_IS_CRASH_DUMPING erts_writing_erl_crash_dump #define ERTS_IS_CRASH_DUMPING erts_writing_erl_crash_dump #endif @@ -877,7 +882,7 @@ ERTS_GLB_INLINE int erts_smp_pending_system_block(void) { #ifdef ERTS_SMP - return (int) erts_smp_atomic32_read(&erts_system_block_state.do_block); + return (int) erts_smp_atomic32_read_nob(&erts_system_block_state.do_block); #else return 0; #endif @@ -913,7 +918,7 @@ erts_smp_set_activity(erts_activity_t old_activity, case ERTS_ACTIVITY_UNDEFINED: break; case ERTS_ACTIVITY_WAIT: - erts_smp_atomic32_dec(&erts_system_block_state.in_activity.wait); + erts_smp_atomic32_dec_acqb(&erts_system_block_state.in_activity.wait); if (locked) { /* You are not allowed to leave activity waiting * without supplying the possibility to block @@ -924,10 +929,10 @@ erts_smp_set_activity(erts_activity_t old_activity, } break; case ERTS_ACTIVITY_GC: - erts_smp_atomic32_dec(&erts_system_block_state.in_activity.gc); + erts_smp_atomic32_dec_acqb(&erts_system_block_state.in_activity.gc); break; case ERTS_ACTIVITY_IO: - erts_smp_atomic32_dec(&erts_system_block_state.in_activity.io); + erts_smp_atomic32_dec_acqb(&erts_system_block_state.in_activity.io); break; default: erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY, @@ -943,13 +948,13 @@ erts_smp_set_activity(erts_activity_t old_activity, case ERTS_ACTIVITY_UNDEFINED: break; case ERTS_ACTIVITY_WAIT: - erts_smp_atomic32_inc(&erts_system_block_state.in_activity.wait); + erts_smp_atomic32_inc_mb(&erts_system_block_state.in_activity.wait); break; case ERTS_ACTIVITY_GC: - erts_smp_atomic32_inc(&erts_system_block_state.in_activity.gc); + erts_smp_atomic32_inc_mb(&erts_system_block_state.in_activity.gc); break; case ERTS_ACTIVITY_IO: - erts_smp_atomic32_inc(&erts_system_block_state.in_activity.io); + erts_smp_atomic32_inc_mb(&erts_system_block_state.in_activity.io); break; default: erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY, @@ -1001,27 +1006,27 @@ ERTS_GLB_INLINE erts_aint_t erts_refc_read(erts_refc_t *refcp, ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, erts_aint_t val) { - erts_smp_atomic_init((erts_smp_atomic_t *) refcp, val); + erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val); } ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val) { #ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp); + erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp); if (val < min_val) erl_exit(ERTS_ABORT_EXIT, "erts_refc_inc(): Bad refc found (refc=%ld < %ld)!\n", val, min_val); #else - erts_smp_atomic_inc((erts_smp_atomic_t *) refcp); + erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp); #endif } ERTS_GLB_INLINE erts_aint_t erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val) { - erts_aint_t val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp); + erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp); #ifdef ERTS_REFC_DEBUG if (val < min_val) erl_exit(ERTS_ABORT_EXIT, @@ -1035,20 +1040,20 @@ ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val) { #ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp); + erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp); if (val < min_val) erl_exit(ERTS_ABORT_EXIT, "erts_refc_dec(): Bad refc found (refc=%ld < %ld)!\n", val, min_val); #else - erts_smp_atomic_dec((erts_smp_atomic_t *) refcp); + erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp); #endif } ERTS_GLB_INLINE erts_aint_t erts_refc_dectest(erts_refc_t *refcp, erts_aint_t min_val) { - erts_aint_t val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp); + erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp); #ifdef ERTS_REFC_DEBUG if (val < min_val) erl_exit(ERTS_ABORT_EXIT, @@ -1062,20 +1067,20 @@ ERTS_GLB_INLINE void erts_refc_add(erts_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val) { #ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_addtest((erts_smp_atomic_t *) refcp, diff); + erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff); if (val < min_val) erl_exit(ERTS_ABORT_EXIT, "erts_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n", diff, val, min_val); #else - erts_smp_atomic_add((erts_smp_atomic_t *) refcp, diff); + erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff); #endif } ERTS_GLB_INLINE erts_aint_t erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val) { - erts_aint_t val = erts_smp_atomic_read((erts_smp_atomic_t *) refcp); + erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp); #ifdef ERTS_REFC_DEBUG if (val < min_val) erl_exit(ERTS_ABORT_EXIT, diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c index a00faff912..8fa8c1cfe0 100644 --- a/erts/emulator/beam/time.c +++ b/erts/emulator/beam/time.c @@ -108,9 +108,9 @@ static ErlTimer *tiw_min_ptr; static int itime; /* Constant after init */ erts_smp_atomic_t do_time; /* set at clock interrupt */ -static ERTS_INLINE erts_aint_t do_time_read(void) { return erts_smp_atomic_read(&do_time); } +static ERTS_INLINE erts_aint_t do_time_read(void) { return erts_smp_atomic_read_acqb(&do_time); } static ERTS_INLINE erts_aint_t do_time_update(void) { return do_time_read(); } -static ERTS_INLINE void do_time_init(void) { erts_smp_atomic_init(&do_time, 0L); } +static ERTS_INLINE void do_time_init(void) { erts_smp_atomic_init_nob(&do_time, 0L); } /* get the time (in units of itime) to the next timeout, or -1 if there are no timeouts */ diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index a17de717bc..3f6accba2d 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -3689,14 +3689,16 @@ threads_not_under_control(void) { erts_aint32_t res = system_block_state.threads_to_block; + ERTS_THR_MEMORY_BARRIER; + /* Waiting is always an allowed activity... */ - res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.wait); + res -= erts_smp_atomic32_read_nob(&erts_system_block_state.in_activity.wait); if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_GC) - res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.gc); + res -= erts_smp_atomic32_read_nob(&erts_system_block_state.in_activity.gc); if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_IO) - res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.io); + res -= erts_smp_atomic32_read_nob(&erts_system_block_state.in_activity.io); if (res < 0) { ASSERT(0); @@ -3756,7 +3758,7 @@ erts_block_system(Uint32 allowed_activities) } else { - erts_smp_atomic32_inc(&erts_system_block_state.do_block); + erts_smp_atomic32_inc_nob(&erts_system_block_state.do_block); /* Someone else might be waiting for us to block... */ if (do_block) { @@ -3808,11 +3810,11 @@ erts_emergency_block_system(long timeout, Uint32 allowed_activities) another_blocker = erts_smp_pending_system_block(); system_block_state.emergency = 1; - erts_smp_atomic32_inc(&erts_system_block_state.do_block); + erts_smp_atomic32_inc_nob(&erts_system_block_state.do_block); if (another_blocker) { if (is_blocker()) { - erts_smp_atomic32_dec(&erts_system_block_state.do_block); + erts_smp_atomic32_dec_nob(&erts_system_block_state.do_block); res = 0; goto done; } @@ -3869,7 +3871,7 @@ erts_release_system(void) if (system_block_state.recursive_block) system_block_state.recursive_block--; else { - do_block = erts_smp_atomic32_dectest(&erts_system_block_state.do_block); + do_block = erts_smp_atomic32_dec_read_nob(&erts_system_block_state.do_block); system_block_state.have_blocker = 0; if (is_blockable_thread()) system_block_state.threads_to_block++; @@ -4004,10 +4006,10 @@ erts_system_block_init(void) /* Global state... */ - erts_smp_atomic32_init(&erts_system_block_state.do_block, 0); - erts_smp_atomic32_init(&erts_system_block_state.in_activity.wait, 0); - erts_smp_atomic32_init(&erts_system_block_state.in_activity.gc, 0); - erts_smp_atomic32_init(&erts_system_block_state.in_activity.io, 0); + erts_smp_atomic32_init_nob(&erts_system_block_state.do_block, 0); + erts_smp_atomic32_init_nob(&erts_system_block_state.in_activity.wait, 0); + erts_smp_atomic32_init_nob(&erts_system_block_state.in_activity.gc, 0); + erts_smp_atomic32_init_nob(&erts_system_block_state.in_activity.io, 0); /* Make sure blockable threads unregister when exiting... */ erts_smp_install_exit_handler(erts_unregister_blockable_thread); |