aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2010-12-16 13:14:31 +0100
committerRickard Green <[email protected]>2010-12-16 13:14:31 +0100
commit2bfd5a25adfc87d7f83663e313d619050fdbd26c (patch)
tree8ddb457775674d595125fc1cfe290ca1c64b8935
parentf7eaaee444b6d785bd2a9dbb86e96e6db0c04b4d (diff)
parente22880b24b64a4d033b05d41406187fd313ac99e (diff)
downloadotp-2bfd5a25adfc87d7f83663e313d619050fdbd26c.tar.gz
otp-2bfd5a25adfc87d7f83663e313d619050fdbd26c.tar.bz2
otp-2bfd5a25adfc87d7f83663e313d619050fdbd26c.zip
Merge branch 'rickard/atomic-type/OTP-8974' into dev
* rickard/atomic-type/OTP-8974: Use 32-bit atomics for system block Use 32-bit atomics for misc scheduling specific information Use 32-bit atomic for uaflgs in thread specific events Use 32-bit atomics for process lock flags Add 32-bit atomics to emulator APIs Use new atomic types in emulator Use 32-bit atomics for ethr_thr_create Use 32-bit atomics for mutex and rwmutex flags Use 32-bit atomics for events Add support for 32-bit atomics Move atomic API into own files Add support for 64-bit atomics on Windows Remove unused ethread time functionality Introduce ethr_sint_t and use it for atomics
-rw-r--r--erts/aclocal.m4141
-rw-r--r--erts/emulator/beam/beam_bp.c4
-rw-r--r--erts/emulator/beam/beam_bp.h2
-rw-r--r--erts/emulator/beam/bif.c16
-rw-r--r--erts/emulator/beam/break.c8
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c6
-rw-r--r--erts/emulator/beam/erl_bif_info.c16
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c4
-rw-r--r--erts/emulator/beam/erl_db.c4
-rw-r--r--erts/emulator/beam/erl_db.h3
-rw-r--r--erts/emulator/beam/erl_db_hash.c38
-rw-r--r--erts/emulator/beam/erl_db_util.c2
-rw-r--r--erts/emulator/beam/erl_driver.h53
-rw-r--r--erts/emulator/beam/erl_fun.c8
-rw-r--r--erts/emulator/beam/erl_gc.c2
-rw-r--r--erts/emulator/beam/erl_init.c8
-rw-r--r--erts/emulator/beam/erl_lock_check.c6
-rw-r--r--erts/emulator/beam/erl_lock_count.c14
-rw-r--r--erts/emulator/beam/erl_node_tables.c10
-rw-r--r--erts/emulator/beam/erl_port_task.c25
-rw-r--r--erts/emulator/beam/erl_port_task.h4
-rw-r--r--erts/emulator/beam/erl_process.c451
-rw-r--r--erts/emulator/beam/erl_process.h30
-rw-r--r--erts/emulator/beam/erl_process_lock.c12
-rw-r--r--erts/emulator/beam/erl_process_lock.h30
-rw-r--r--erts/emulator/beam/erl_smp.h401
-rw-r--r--erts/emulator/beam/erl_threads.h386
-rw-r--r--erts/emulator/beam/global.h26
-rw-r--r--erts/emulator/beam/io.c58
-rw-r--r--erts/emulator/beam/sys.h72
-rw-r--r--erts/emulator/beam/time.c55
-rw-r--r--erts/emulator/beam/utils.c26
-rw-r--r--erts/emulator/sys/common/erl_poll.c32
-rw-r--r--erts/emulator/sys/unix/sys.c4
-rw-r--r--erts/emulator/sys/win32/erl_poll.c28
-rw-r--r--erts/emulator/sys/win32/erl_win_dyn_driver.h12
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c6
-rw-r--r--erts/include/internal/ethr_atomics.h726
-rw-r--r--erts/include/internal/ethr_mutex.h78
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h74
-rw-r--r--erts/include/internal/ethread.h376
-rw-r--r--erts/include/internal/ethread_header_config.h.in36
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h222
-rw-r--r--erts/include/internal/gcc/ethread.h10
-rw-r--r--erts/include/internal/i386/atomic.h190
-rw-r--r--erts/include/internal/i386/ethread.h7
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h200
-rw-r--r--erts/include/internal/ppc32/atomic.h94
-rw-r--r--erts/include/internal/pthread/ethr_event.h54
-rw-r--r--erts/include/internal/sparc32/atomic.h172
-rw-r--r--erts/include/internal/sparc32/ethread.h7
-rw-r--r--erts/include/internal/tile/atomic.h104
-rw-r--r--erts/include/internal/win/ethr_atomic.h415
-rw-r--r--erts/include/internal/win/ethr_event.h16
-rw-r--r--erts/include/internal/win/ethread.h6
-rw-r--r--erts/lib_src/Makefile.in7
-rw-r--r--erts/lib_src/common/ethr_atomics.c402
-rw-r--r--erts/lib_src/common/ethr_aux.c202
-rw-r--r--erts/lib_src/common/ethr_mutex.c327
-rw-r--r--erts/lib_src/pthread/ethr_event.c34
-rw-r--r--erts/lib_src/pthread/ethread.c40
-rw-r--r--erts/lib_src/win/ethr_event.c7
-rw-r--r--erts/lib_src/win/ethread.c55
-rw-r--r--erts/test/ethread_SUITE.erl13
-rw-r--r--erts/test/ethread_SUITE_data/ethread_tests.c65
65 files changed, 3854 insertions, 2088 deletions
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 443d8622bf..a1211bbf0c 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -747,9 +747,124 @@ case "$THR_LIB_NAME" in
if test $found_win32_winnt = no; then
AC_MSG_ERROR([-D_WIN32_WINNT missing in CPPFLAGS])
fi
- ethr_have_native_atomics=yes
- ethr_have_native_spinlock=yes
+
AC_DEFINE(ETHR_WIN32_THREADS, 1, [Define if you have win32 threads])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedCompareExchange64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedCompareExchange64(var, (__int64) 1, (__int64) 0);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64, 1, [Define if you have _InterlockedCompareExchange64()])
+
+ AC_CHECK_SIZEOF(void *)
+ case "$ac_cv_sizeof_void_p-$have_ilckd" in
+ 8-no)
+ ethr_have_native_atomics=no
+ ethr_have_native_spinlock=no;;
+ *)
+ ethr_have_native_atomics=yes
+ ethr_have_native_spinlock=yes;;
+ esac
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedDecrement64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedDecrement64(var);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDDECREMENT64, 1, [Define if you have _InterlockedDecrement64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedIncrement64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedIncrement64(var);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDINCREMENT64, 1, [Define if you have _InterlockedIncrement64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedExchangeAdd64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedExchangeAdd64(var, (__int64) 1);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDEXCHANGEADD64, 1, [Define if you have _InterlockedExchangeAdd64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedExchange64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedExchange64(var, (__int64) 1);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDEXCHANGE64, 1, [Define if you have _InterlockedExchange64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedAnd64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedAnd64(var, (__int64) 1);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDAND64, 1, [Define if you have _InterlockedAnd64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedOr64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedOr64(var, (__int64) 1);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDOR64, 1, [Define if you have _InterlockedOr64()])
+
;;
pthread)
@@ -1087,6 +1202,28 @@ fi
AC_CHECK_SIZEOF(void *)
AC_DEFINE_UNQUOTED(ETHR_SIZEOF_PTR, $ac_cv_sizeof_void_p, [Define to the size of pointers])
+AC_CHECK_SIZEOF(int)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_INT, $ac_cv_sizeof_int, [Define to the size of int])
+AC_CHECK_SIZEOF(long)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_LONG, $ac_cv_sizeof_long, [Define to the size of long])
+AC_CHECK_SIZEOF(long long)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_LONG_LONG, $ac_cv_sizeof_long_long, [Define to the size of long long])
+AC_CHECK_SIZEOF(__int64)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF___INT64, $ac_cv_sizeof___int64, [Define to the size of __int64])
+
+
+case X$erl_xcomp_bigendian in
+ X) ;;
+ Xyes|Xno) ac_cv_c_bigendian=$erl_xcomp_bigendian;;
+ *) AC_MSG_ERROR([Bad erl_xcomp_bigendian value: $erl_xcomp_bigendian]);;
+esac
+
+AC_C_BIGENDIAN
+
+if test "$ac_cv_c_bigendian" = "yes"; then
+ AC_DEFINE(ETHR_BIGENDIAN, 1, [Define if bigendian])
+fi
+
AC_ARG_ENABLE(native-ethr-impls,
AS_HELP_STRING([--disable-native-ethr-impls],
[disable native ethread implementations]),
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 682f31b83f..31910888d1 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -950,8 +950,8 @@ static int set_function_break(Module *modp, BeamInstr *pc, int bif,
MatchSetUnref(old_match_spec);
} else {
BpDataCount *bdc = (BpDataCount *) bd;
- long count = 0;
- long res = 0;
+ erts_aint_t count = 0;
+ erts_aint_t res = 0;
ASSERT(! match_spec);
ASSERT(is_nil(tracer_pid));
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index ebc171078d..bd8a7249a7 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -157,7 +157,7 @@ do { \
BpData **bds = (BpData **) (pc)[-4]; \
BpDataCount *bdc = NULL; \
Uint ix = bp_sched2ix_proc( (p) ); \
- long count = 0; \
+ erts_aint_t count = 0; \
\
ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
ASSERT(bds); \
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 6e9755ad48..d4a43f6e5f 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -813,7 +813,7 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
so.min_heap_size = H_MIN_SIZE;
so.min_vheap_size = BIN_VH_MIN_SIZE;
so.priority = PRIORITY_NORMAL;
- so.max_gen_gcs = (Uint16) erts_smp_atomic_read(&erts_max_gen_gcs);
+ so.max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
so.scheduler = 0;
/*
@@ -3269,12 +3269,13 @@ BIF_RETTYPE ports_0(BIF_ALIST_0)
erts_smp_mtx_lock(&ports_snapshot_mtx); /* One snapshot at a time */
- erts_smp_atomic_set(&erts_dead_ports_ptr, (long) (port_buf + erts_max_ports));
+ erts_smp_atomic_set(&erts_dead_ports_ptr,
+ (erts_aint_t) (port_buf + erts_max_ports));
next_ss = erts_smp_atomic_inctest(&erts_ports_snapshot);
if (erts_smp_atomic_read(&erts_ports_alive) > 0) {
- long i;
+ erts_aint_t i;
for (i = erts_max_ports-1; i >= 0; i--) {
Port* prt = &erts_port[i];
erts_smp_port_state_lock(prt);
@@ -3289,7 +3290,7 @@ BIF_RETTYPE ports_0(BIF_ALIST_0)
}
dead_ports = (Eterm*)erts_smp_atomic_xchg(&erts_dead_ports_ptr,
- (long)NULL);
+ (erts_aint_t) NULL);
erts_smp_mtx_unlock(&ports_snapshot_mtx);
ASSERT(pp <= dead_ports);
@@ -3300,7 +3301,7 @@ BIF_RETTYPE ports_0(BIF_ALIST_0)
ASSERT((alive+dead) <= erts_max_ports);
if (alive+dead > 0) {
- long i;
+ erts_aint_t i;
Eterm *hp = HAlloc(BIF_P, (alive+dead)*2);
for (i = 0; i < alive; i++) {
@@ -3796,7 +3797,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n);
- oval = (Uint) erts_smp_atomic_xchg(&erts_max_gen_gcs, (long) nval);
+ oval = (Uint) erts_smp_atomic32_xchg(&erts_max_gen_gcs,
+ (erts_aint32_t) nval);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_heap_size) {
int oval = H_MIN_SIZE;
@@ -4139,7 +4141,7 @@ void erts_init_bif(void)
erts_smp_spinlock_init(&make_ref_lock, "make_ref");
erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
- erts_smp_atomic_init(&erts_dead_ports_ptr, (long)NULL);
+ erts_smp_atomic_init(&erts_dead_ports_ptr, (erts_aint_t) NULL);
/*
* bif_return_trap/1 is a hidden BIF that bifs that need to
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index f339e19761..fd2ec91e65 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -98,7 +98,7 @@ process_killer(void)
switch(j) {
case 'k':
if (rp->status == P_WAITING) {
- Uint32 rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
erts_smp_proc_inc_refc(rp);
erts_smp_proc_lock(rp, rp_locks);
(void) erts_send_exit_signal(NULL,
@@ -624,9 +624,9 @@ bin_check(void)
erts_printf("Process %T holding binary data \n", rp->id);
printed = 1;
}
- erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
- (unsigned long)bp->val,
- (long)bp->val->orig_size,
+ erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
+ bp->val,
+ bp->val->orig_size,
erts_smp_atomic_read(&bp->val->refc));
}
}
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 2c2e283f65..c9cdcb87a6 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1193,7 +1193,7 @@ int erts_ddll_driver_ok(DE_Handle *dh)
static void ddll_no_more_references(void *vdh)
{
DE_Handle *dh = (DE_Handle *) vdh;
- int x;
+ erts_aint_t x;
lock_drv_list();
@@ -1604,7 +1604,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
erts_sys_ddll_close(dh->handle);
return ERL_DE_LOAD_ERROR_BAD_NAME;
}
- erts_smp_atomic_init(&(dh->refc), (long) 0);
+ erts_smp_atomic_init(&(dh->refc), (erts_aint_t) 0);
dh->port_count = 0;
dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
sys_strcpy(dh->full_path, path);
@@ -1672,7 +1672,7 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
dh->handle = NULL;
dh->procs = NULL;
dh->port_count = 0;
- erts_refc_init(&(dh->refc), (long) 0);
+ erts_refc_init(&(dh->refc), (erts_aint_t) 0);
dh->status = -1;
dh->reload_full_path = NULL;
dh->reload_driver_name = NULL;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 75d8db880c..4a717d7271 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2020,7 +2020,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
} else if (BIF_ARG_1 == am_garbage_collection){
- Uint val = (Uint) erts_smp_atomic_read(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs);
Eterm tup;
hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2);
@@ -2035,7 +2035,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
} else if (BIF_ARG_1 == am_fullsweep_after){
- Uint val = (Uint) erts_smp_atomic_read(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_fullsweep_after, make_small(val));
BIF_RET(res);
@@ -3430,8 +3430,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
*/
if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
&& (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
- long on = (long) (BIF_ARG_2 == am_true);
- long prev_on = erts_smp_atomic_xchg(&available_internal_state, on);
+ erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
+ erts_aint_t prev_on = erts_smp_atomic_xchg(&available_internal_state, on);
if (on) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Process %T ", BIF_P->id);
@@ -3628,7 +3628,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
/* Used by hipe test suites */
- long flag = erts_smp_atomic_read(&hipe_test_reschedule_flag);
+ erts_aint_t flag = erts_smp_atomic_read(&hipe_test_reschedule_flag);
if (!flag && BIF_ARG_2 != am_false) {
erts_smp_atomic_set(&hipe_test_reschedule_flag, 1);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
@@ -3703,7 +3703,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
#ifdef ERTS_ENABLE_LOCK_COUNT
static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
- unsigned long tries = 0, colls = 0;
+ Uint tries = 0, colls = 0;
unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
unsigned int line = 0;
@@ -3716,8 +3716,8 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
* [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
*/
- tries = (unsigned long) ethr_atomic_read(&stats->tries);
- colls = (unsigned long) ethr_atomic_read(&stats->colls);
+ tries = (Uint) ethr_atomic_read(&stats->tries);
+ colls = (Uint) ethr_atomic_read(&stats->colls);
line = stats->line;
timer_s = stats->timer.s;
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index db95c4a5d4..8a6b4d8d6c 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -487,7 +487,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
/* Make sure we check if we should bind to a cpu or not... */
if (esdp->run_queue->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- erts_smp_atomic_set(&esdp->chk_cpu_bind, 1);
+ erts_smp_atomic32_set(&esdp->chk_cpu_bind, 1);
else
esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
}
@@ -503,7 +503,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_callback_call_t *cgcc;
#ifdef ERTS_SMP
if (erts_common_run_queue)
- erts_smp_atomic_set(&esdp->chk_cpu_bind, 0);
+ erts_smp_atomic32_set(&esdp->chk_cpu_bind, 0);
else {
esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
}
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 8577354d27..65565cf3c7 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -3259,7 +3259,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
pp = &(*pp)->next) {
if ((*pp)->pid == pid) {
DbFixation* fix = *pp;
- long diff = -(long)fix->counter;
+ erts_aint_t diff = -((erts_aint_t) fix->counter);
erts_refc_add(&tb->common.fixref,diff,0);
*pp = fix->next;
erts_db_free(ERTS_ALC_T_DB_FIXATION,
@@ -3415,7 +3415,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
- && erts_smp_atomic_read(&tb->hash.fixdel) != (long)NULL) {
+ && erts_smp_atomic_read(&tb->hash.fixdel) != (erts_aint_t)NULL) {
#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index cb2da603f0..e0bdebcb01 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -83,7 +83,8 @@ Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
#define ERTS_DB_ALC_MEM_UPDATE_(TAB, FREE_SZ, ALLOC_SZ) \
do { \
- long sz__ = ((long) (ALLOC_SZ)) - ((long) (FREE_SZ)); \
+ erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \
+ - ((erts_aint_t) (FREE_SZ))); \
ASSERT((TAB)); \
erts_smp_atomic_add(&(TAB)->common.memory_size, sz__); \
} while (0)
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 14ee63100a..1e50fee554 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -135,8 +135,8 @@ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
*/
static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
{
- long was_next;
- long exp_next;
+ erts_aint_t was_next;
+ erts_aint_t exp_next;
FixedDeletion* fixd = (FixedDeletion*) erts_db_alloc(ERTS_ALC_T_DB_FIX_DEL,
(DbTable *) tb,
sizeof(FixedDeletion));
@@ -146,7 +146,9 @@ static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
do { /* Lockless atomic insertion in linked list: */
exp_next = was_next;
fixd->next = (FixedDeletion*) exp_next;
- was_next = erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixd, exp_next);
+ was_next = erts_smp_atomic_cmpxchg(&tb->fixdel,
+ (erts_aint_t) fixd,
+ exp_next);
}while (was_next != exp_next);
}
@@ -541,12 +543,12 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
{
/*int tries = 0;*/
DEBUG_WAIT();
- if (erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixdel,
- (long)NULL) != (long)NULL) {
+ if (erts_smp_atomic_cmpxchg(&tb->fixdel, (erts_aint_t)fixdel,
+ (erts_aint_t)NULL) != (erts_aint_t)NULL) {
/* Oboy, must join lists */
FixedDeletion* last = fixdel;
- long was_tail;
- long exp_tail;
+ erts_aint_t was_tail;
+ erts_aint_t exp_tail;
while (last->next != NULL) last = last->next;
was_tail = erts_smp_atomic_read(&tb->fixdel);
@@ -555,7 +557,7 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
last->next = (FixedDeletion*) exp_tail;
/*++tries;*/
DEBUG_WAIT();
- was_tail = erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixdel,
+ was_tail = erts_smp_atomic_cmpxchg(&tb->fixdel, (erts_aint_t)fixdel,
exp_tail);
}while (was_tail != exp_tail);
}
@@ -573,7 +575,7 @@ void db_unfix_table_hash(DbTableHash *tb)
|| (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
&& !tb->common.is_thread_safe));
restart:
- fixdel = (FixedDeletion*) erts_smp_atomic_xchg(&tb->fixdel, (long)NULL);
+ fixdel = (FixedDeletion*) erts_smp_atomic_xchg(&tb->fixdel, (erts_aint_t)NULL);
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
int ix = fx->slot;
@@ -642,8 +644,8 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_atomic_init(&tb->szm, SEGSZ_MASK);
erts_smp_atomic_init(&tb->nactive, SEGSZ);
- erts_smp_atomic_init(&tb->fixdel, (long)NULL);
- erts_smp_atomic_init(&tb->segtab, (long) alloc_ext_seg(tb,0,NULL)->segtab);
+ erts_smp_atomic_init(&tb->fixdel, (erts_aint_t)NULL);
+ erts_smp_atomic_init(&tb->segtab, (erts_aint_t) alloc_ext_seg(tb,0,NULL)->segtab);
tb->nsegs = NSEG_1;
tb->nslots = SEGSZ;
@@ -1715,9 +1717,9 @@ static int db_select_delete_hash(Process *p,
Eterm mpb;
Eterm egot;
#ifdef ERTS_SMP
- int fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */
+ erts_aint_t fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */
#else
- int fixated_by_me = 0;
+ erts_aint_t fixated_by_me = 0;
#endif
erts_smp_rwmtx_t* lck;
@@ -2124,11 +2126,11 @@ static int db_free_table_continue_hash(DbTable *tbl)
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
if (++done >= 2*DELETE_RECORD_LIMIT) {
- erts_smp_atomic_set(&tb->fixdel, (long)fixdel);
+ erts_smp_atomic_set(&tb->fixdel, (erts_aint_t)fixdel);
return 0; /* Not done */
}
}
- erts_smp_atomic_set(&tb->fixdel, (long)NULL);
+ erts_smp_atomic_set(&tb->fixdel, (erts_aint_t)NULL);
done /= 2;
while(tb->nslots != 0) {
@@ -2345,7 +2347,7 @@ static int alloc_seg(DbTableHash *tb)
struct ext_segment* eseg;
eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- erts_smp_atomic_set(&tb->segtab, (long) eseg->segtab);
+ erts_smp_atomic_set(&tb->segtab, (erts_aint_t) eseg->segtab);
tb->nsegs = eseg->nsegs;
}
ASSERT(seg_ix < tb->nsegs);
@@ -2417,7 +2419,7 @@ static int free_seg(DbTableHash *tb, int free_records)
MY_ASSERT(newtop->s.is_ext_segment);
if (newtop->prev_segtab != NULL) {
/* Time to use a smaller segtab */
- erts_smp_atomic_set(&tb->segtab, (long)newtop->prev_segtab);
+ erts_smp_atomic_set(&tb->segtab, (erts_aint_t)newtop->prev_segtab);
tb->nsegs = seg_ix;
ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
}
@@ -2434,7 +2436,7 @@ static int free_seg(DbTableHash *tb, int free_records)
if (seg_ix > 0) {
if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL;
} else {
- erts_smp_atomic_set(&tb->segtab, (long)NULL);
+ erts_smp_atomic_set(&tb->segtab, (erts_aint_t)NULL);
}
#endif
tb->nslots -= SEGSZ;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index e773361619..2852fb93fe 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -915,7 +915,7 @@ BIF_RETTYPE db_set_trace_control_word_1(Process *p, Eterm new)
if (val != ((Uint32)val))
BIF_ERROR(p, BADARG);
- old_tcw = (Uint32) erts_smp_atomic_xchg(&trace_control_word, (long) val);
+ old_tcw = (Uint32) erts_smp_atomic_xchg(&trace_control_word, (erts_aint_t) val);
BIF_RET(erts_make_integer((Uint) old_tcw, p));
}
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 9733c0e5b5..13a73e01bb 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -150,6 +150,27 @@ typedef struct {
#define ERL_DRV_FLAG_SOFT_BUSY (1 << 1)
/*
+ * Integer types
+ */
+
+typedef unsigned long ErlDrvTermData;
+typedef unsigned long ErlDrvUInt;
+typedef signed long ErlDrvSInt;
+
+#if defined(__WIN32__)
+typedef unsigned __int64 ErlDrvUInt64;
+typedef __int64 ErlDrvSInt64;
+#elif SIZEOF_LONG == 8
+typedef unsigned long ErlDrvUInt64;
+typedef long ErlDrvSInt64;
+#elif SIZEOF_LONG_LONG == 8
+typedef unsigned long long ErlDrvUInt64;
+typedef long long ErlDrvSInt64;
+#else
+#error No 64-bit integer type
+#endif
+
+/*
* A binary as seen in a driver. Note that a binary should never be
* altered by the driver when it has been sent to Erlang.
*/
@@ -179,26 +200,6 @@ struct erl_drv_event_data {
#endif
typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */
-/*
- * Used in monitors...
- */
-typedef unsigned long ErlDrvTermData;
-typedef unsigned long ErlDrvUInt;
-typedef signed long ErlDrvSInt;
-
-#if defined(__WIN32__)
-typedef unsigned __int64 ErlDrvUInt64;
-typedef __int64 ErlDrvSInt64;
-#elif SIZEOF_LONG == 8
-typedef unsigned long ErlDrvUInt64;
-typedef long ErlDrvSInt64;
-#elif SIZEOF_LONG_LONG == 8
-typedef unsigned long long ErlDrvUInt64;
-typedef long long ErlDrvSInt64;
-#else
-#error No 64-bit integer type
-#endif
-
/*
* A driver monitor
*/
@@ -394,9 +395,9 @@ EXTERN int driver_exit (ErlDrvPort port, int err);
EXTERN ErlDrvPDL driver_pdl_create(ErlDrvPort);
EXTERN void driver_pdl_lock(ErlDrvPDL);
EXTERN void driver_pdl_unlock(ErlDrvPDL);
-EXTERN long driver_pdl_get_refc(ErlDrvPDL);
-EXTERN long driver_pdl_inc_refc(ErlDrvPDL);
-EXTERN long driver_pdl_dec_refc(ErlDrvPDL);
+EXTERN ErlDrvSInt driver_pdl_get_refc(ErlDrvPDL);
+EXTERN ErlDrvSInt driver_pdl_inc_refc(ErlDrvPDL);
+EXTERN ErlDrvSInt driver_pdl_dec_refc(ErlDrvPDL);
/*
* Process monitors
@@ -432,9 +433,9 @@ EXTERN ErlDrvBinary* driver_realloc_binary(ErlDrvBinary *bin, int size);
EXTERN void driver_free_binary(ErlDrvBinary *bin);
/* Referenc count on driver binaries */
-EXTERN long driver_binary_get_refc(ErlDrvBinary *dbp);
-EXTERN long driver_binary_inc_refc(ErlDrvBinary *dbp);
-EXTERN long driver_binary_dec_refc(ErlDrvBinary *dbp);
+EXTERN ErlDrvSInt driver_binary_get_refc(ErlDrvBinary *dbp);
+EXTERN ErlDrvSInt driver_binary_inc_refc(ErlDrvBinary *dbp);
+EXTERN ErlDrvSInt driver_binary_dec_refc(ErlDrvBinary *dbp);
/* Allocation interface */
EXTERN void *driver_alloc(size_t size);
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 84869f12d6..88947b5536 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -97,7 +97,7 @@ erts_put_fun_entry(Eterm mod, int uniq, int index)
{
ErlFunEntry template;
ErlFunEntry* fe;
- long refc;
+ erts_aint_t refc;
ASSERT(is_atom(mod));
template.old_uniq = uniq;
template.old_index = index;
@@ -119,7 +119,7 @@ erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
{
ErlFunEntry template;
ErlFunEntry* fe;
- long refc;
+ erts_aint_t refc;
ASSERT(is_atom(mod));
template.old_uniq = old_uniq;
@@ -157,7 +157,7 @@ erts_get_fun_entry(Eterm mod, int uniq, int index)
erts_fun_read_lock();
ret = (ErlFunEntry *) hash_get(&erts_fun_table, (void*) &template);
if (ret) {
- long refc = erts_refc_inctest(&ret->refc, 1);
+ erts_aint_t refc = erts_refc_inctest(&ret->refc, 1);
if (refc < 2) /* Pending delete */
erts_refc_inc(&ret->refc, 1);
}
@@ -257,7 +257,7 @@ erts_dump_fun_entries(int to, void *to_arg)
#ifdef HIPE
erts_print(to, to_arg, "Native_address: %p\n", fe->native_address);
#endif
- erts_print(to, to_arg, "Refc: %d\n", erts_refc_read(&fe->refc, 1));
+ erts_print(to, to_arg, "Refc: %ld\n", erts_refc_read(&fe->refc, 1));
b = b->next;
}
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 0f4d2a2ef9..2aa932e7d1 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -2471,7 +2471,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
old = 0;
for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next) {
- long refc;
+ erts_aint_t refc;
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
refc = erts_refc_read(&u.pb->val->refc, 1);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 464ee750f7..f4e0717d30 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -100,7 +100,7 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace
int erts_async_max_threads; /* number of threads for async support */
int erts_async_thread_suggested_stack_size;
-erts_smp_atomic_t erts_max_gen_gcs;
+erts_smp_atomic32_t erts_max_gen_gcs;
Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
am_info or am_warning, am_error is
@@ -323,7 +323,7 @@ init_shared_memory(int argc, char **argv)
#endif
global_gen_gcs = 0;
- global_max_gen_gcs = erts_smp_atomic_read(&erts_max_gen_gcs);
+ global_max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
global_gc_flags = erts_default_process_flags;
erts_global_offheap.mso = NULL;
@@ -651,7 +651,7 @@ early_init(int *argc, char **argv) /*
erts_writing_erl_crash_dump = 0;
#endif
- erts_smp_atomic_init(&erts_max_gen_gcs, (long)((Uint16) -1));
+ erts_smp_atomic32_init(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1));
erts_pre_init_process();
#if defined(USE_THREADS) && !defined(ERTS_SMP)
@@ -856,7 +856,7 @@ erl_start(int argc, char **argv)
envbufsz = sizeof(envbuf);
if (erts_sys_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
Uint16 max_gen_gcs = atoi(envbuf);
- erts_smp_atomic_set(&erts_max_gen_gcs, (long) max_gen_gcs);
+ erts_smp_atomic32_set(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs);
}
envbufsz = sizeof(envbuf);
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 04c7dbd2ec..7a6aaa6bbe 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -978,10 +978,10 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
/* We only force busy if a lock order violation would occur
and when on an even millisecond. */
{
- erts_thr_timeval_t time;
- erts_thr_time_now(&time);
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
- if ((time.tv_nsec / 1000000) & 1)
+ if ((tv.tv_usec / 1000) & 1)
return 0;
}
#endif
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 239773f366..a36c53560e 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -159,7 +159,7 @@ static char* lock_opt(Uint16 flag) {
}
static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) {
- long int colls, tries, w_state, r_state;
+ erts_aint_t colls, tries, w_state, r_state;
erts_lcnt_lock_stats_t *stats = NULL;
char *type;
@@ -385,7 +385,7 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
/* lock */
void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- long r_state = 0, w_state = 0;
+ erts_aint_t r_state = 0, w_state = 0;
erts_lcnt_thread_data_t *eltd;
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
@@ -418,7 +418,7 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
}
void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
- long w_state;
+ erts_aint_t w_state;
erts_lcnt_thread_data_t *eltd;
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
@@ -471,7 +471,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
erts_lcnt_time_t time_wait;
erts_lcnt_lock_stats_t *stats;
#ifdef DEBUG
- long flowstate;
+ erts_aint_t flowstate;
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
@@ -516,8 +516,8 @@ void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
#ifdef DEBUG
- long w_state;
- long flowstate;
+ erts_aint_t w_state;
+ erts_aint_t flowstate;
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
#ifdef DEBUG
@@ -552,7 +552,7 @@ void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
/* Determine lock_state via res instead of state */
#ifdef DEBUG
- long flowstate;
+ erts_aint_t flowstate;
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
if (res != EBUSY) {
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 8cdda395df..6daa127d23 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -235,7 +235,7 @@ erts_sysname_to_connected_dist_entry(Eterm sysname)
erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
if (res_dep) {
- long refc = erts_refc_inctest(&res_dep->refc, 1);
+ erts_aint_t refc = erts_refc_inctest(&res_dep->refc, 1);
if (refc < 2) /* Pending delete */
erts_refc_inc(&res_dep->refc, 1);
}
@@ -257,7 +257,7 @@ DistEntry *erts_find_or_insert_dist_entry(Eterm sysname)
{
DistEntry *res;
DistEntry de;
- long refc;
+ erts_aint_t refc;
res = erts_find_dist_entry(sysname);
if (res)
return res;
@@ -279,7 +279,7 @@ DistEntry *erts_find_dist_entry(Eterm sysname)
erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
res = hash_get(&erts_dist_table, (void *) &de);
if (res) {
- long refc = erts_refc_inctest(&res->refc, 1);
+ erts_aint_t refc = erts_refc_inctest(&res->refc, 1);
if (refc < 2) /* Pending delete */
erts_refc_inc(&res->refc, 1);
}
@@ -586,7 +586,7 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
res = hash_get(&erts_node_table, (void *) &ne);
if (res && res != erts_this_node) {
- long refc = erts_refc_inctest(&res->refc, 0);
+ erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
erts_refc_inc(&res->refc, 1);
}
@@ -598,7 +598,7 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
if (res != erts_this_node) {
- long refc = erts_refc_inctest(&res->refc, 0);
+ erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
erts_refc_inc(&res->refc, 1);
}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index c10724b951..1b07024ca1 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -129,7 +129,7 @@ reset_handle(ErtsPortTask *ptp)
{
if (ptp->handle) {
ASSERT(ptp == handle2task(ptp->handle));
- erts_smp_atomic_set(ptp->handle, (long) NULL);
+ erts_smp_atomic_set(ptp->handle, (erts_aint_t) NULL);
}
}
@@ -138,7 +138,7 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
ptp->handle = pthp;
if (pthp) {
- erts_smp_atomic_set(pthp, (long) ptp);
+ erts_smp_atomic_set(pthp, (erts_aint_t) ptp);
ASSERT(ptp == handle2task(ptp->handle));
}
}
@@ -568,7 +568,7 @@ erts_port_task_schedule(Eterm id,
ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
if (xrunq) {
/* Port emigrated ... */
- erts_smp_atomic_set(&pp->run_queue, (long) xrunq);
+ erts_smp_atomic_set(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
runq = xrunq;
}
@@ -727,7 +727,8 @@ resume_after_block(void *vd)
ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
erts_smp_runq_lock(d->runq);
if (d->resp)
- *d->resp = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+ *d->resp = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
+ != (erts_aint_t) 0);
}
/*
@@ -748,7 +749,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
ErtsPortTask *ptp;
int res = 0;
int reds = ERTS_PORT_REDS_EXECUTE;
- long io_tasks_executed = 0;
+ erts_aint_t io_tasks_executed = 0;
int fpe_was_unmasked;
ErtsPortTaskExeBlockData blk_data = {runq, NULL};
@@ -942,7 +943,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
}
else {
/* Port emigrated ... */
- erts_smp_atomic_set(&pp->run_queue, (long) xrunq);
+ erts_smp_atomic_set(&pp->run_queue, (erts_aint_t) xrunq);
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
@@ -953,7 +954,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
port_was_enqueued = 1;
}
- res = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+ res = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
+ != (erts_aint_t) 0);
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
@@ -971,7 +973,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_port_release(pp);
#else
{
- long refc;
+ erts_aint_t refc;
erts_smp_mtx_unlock(pp->lock);
refc = erts_smp_atomic_dectest(&pp->refc);
ASSERT(refc >= 0);
@@ -979,7 +981,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_unlock(runq);
erts_port_cleanup(pp); /* Might aquire runq lock */
erts_smp_runq_lock(runq);
- res = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+ res = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
+ != (erts_aint_t) 0);
}
}
#endif
@@ -1112,7 +1115,7 @@ erts_port_migrate(Port *prt, int *prt_locked,
if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt))
return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
dequeue_port(from_rq, prt);
- erts_smp_atomic_set(&prt->run_queue, (long) to_rq);
+ erts_smp_atomic_set(&prt->run_queue, (erts_aint_t) to_rq);
enqueue_port(to_rq, prt);
return ERTS_MIGRATE_SUCCESS;
}
@@ -1125,7 +1128,7 @@ erts_port_migrate(Port *prt, int *prt_locked,
void
erts_port_task_init(void)
{
- erts_smp_atomic_init(&erts_port_task_outstanding_io_tasks, (long) 0);
+ erts_smp_atomic_init(&erts_port_task_outstanding_io_tasks, (erts_aint_t) 0);
init_port_task_alloc();
init_port_taskq_alloc();
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index f12d02da0c..714b4ea7dd 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -79,7 +79,7 @@ ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
ERTS_GLB_INLINE void
erts_port_task_handle_init(ErtsPortTaskHandle *pthp)
{
- erts_smp_atomic_init(pthp, (long) NULL);
+ erts_smp_atomic_init(pthp, (erts_aint_t) NULL);
}
ERTS_GLB_INLINE int
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index fc950af8ce..77ee1d6ac5 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -127,21 +127,22 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
int erts_disable_proc_not_running_opt;
-#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((long) 1) << 0)
-#define ERTS_SCHDLR_SSPND_CHNG_MSB (((long) 1) << 1)
-#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((long) 1) << 2)
+#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((erts_aint32_t) 1) << 0)
+#define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
+#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
#ifndef DEBUG
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
- erts_smp_atomic_set(&schdlr_sspnd.changing, (VAL))
+ erts_smp_atomic32_set(&schdlr_sspnd.changing, (VAL))
#else
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
do { \
- long old_val__ = erts_smp_atomic_xchg(&schdlr_sspnd.changing, \
- (VAL)); \
+ erts_aint32_t old_val__; \
+ old_val__ = erts_smp_atomic32_xchg(&schdlr_sspnd.changing, \
+ (VAL)); \
ASSERT(old_val__ == (OLD_VAL)); \
} while (0)
@@ -154,10 +155,10 @@ static struct {
int online;
int curr_online;
int wait_curr_online;
- erts_smp_atomic_t changing;
- erts_smp_atomic_t active;
+ erts_smp_atomic32_t changing;
+ erts_smp_atomic32_t active;
struct {
- erts_smp_atomic_t ongoing;
+ erts_smp_atomic32_t ongoing;
long wait_active;
ErtsProcList *procs;
} msb; /* Multi Scheduling Block */
@@ -165,11 +166,11 @@ static struct {
static struct {
erts_smp_mtx_t update_mtx;
- erts_smp_atomic_t active_runqs;
+ erts_smp_atomic32_t active_runqs;
int last_active_runqs;
- erts_smp_atomic_t used_runqs;
+ erts_smp_atomic32_t used_runqs;
int forced_check_balance;
- erts_smp_atomic_t checking_balance;
+ erts_smp_atomic32_t checking_balance;
int halftime;
int full_reds_history_index;
struct {
@@ -199,11 +200,11 @@ static erts_tsd_key_t sched_data_key;
static erts_smp_mtx_t proc_tab_mtx;
-static erts_smp_atomic_t function_calls;
+static erts_smp_atomic32_t function_calls;
#ifdef ERTS_SMP
-static erts_smp_atomic_t doing_sys_schedule;
-static erts_smp_atomic_t no_empty_run_queues;
+static erts_smp_atomic32_t doing_sys_schedule;
+static erts_smp_atomic32_t no_empty_run_queues;
#else /* !ERTS_SMP */
ErtsSchedulerData *erts_scheduler_data;
#endif
@@ -247,7 +248,10 @@ Uint erts_num_active_procs;
Process** erts_active_procs;
#endif
-static erts_smp_atomic_t process_count;
+#if ERTS_MAX_PROCESSES > 0x7fffffff
+#error "Need to store process_count in another type"
+#endif
+static erts_smp_atomic32_t process_count;
typedef struct ErtsTermProcElement_ ErtsTermProcElement;
struct ErtsTermProcElement_ {
@@ -407,7 +411,7 @@ erts_init_process(int ncpu)
init_proclist_alloc();
- erts_smp_atomic_init(&process_count, 0);
+ erts_smp_atomic32_init(&process_count, 0);
if (erts_use_r9_pids_ports) {
proc_bits = ERTS_R9_PROC_BITS;
@@ -568,7 +572,7 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
#ifdef ERTS_SMP
void
-erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags)
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
{
switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
case ERTS_SSI_FLG_POLL_SLEEPING:
@@ -593,11 +597,11 @@ erts_smp_notify_check_children_needed(void)
int i;
for (i = 0; i < erts_no_schedulers; i++) {
- long aux_work;
+ erts_aint32_t aux_work;
ErtsSchedulerSleepInfo *ssi;
ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
- aux_work = erts_smp_atomic_bor(&ssi->aux_work,
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work = erts_smp_atomic32_bor(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
erts_sched_poke(ssi);
}
@@ -605,16 +609,16 @@ erts_smp_notify_check_children_needed(void)
#endif
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
-static ERTS_INLINE long
+static ERTS_INLINE erts_aint32_t
blockable_aux_work(ErtsSchedulerData *esdp,
ErtsSchedulerSleepInfo *ssi,
- long aux_work)
+ erts_aint32_t aux_work)
{
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
- aux_work = erts_smp_atomic_band(&ssi->aux_work,
- ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work = erts_smp_atomic32_band(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
erts_check_children();
}
@@ -626,10 +630,10 @@ blockable_aux_work(ErtsSchedulerData *esdp,
#endif
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-static ERTS_INLINE long
+static ERTS_INLINE erts_aint32_t
nonblockable_aux_work(ErtsSchedulerData *esdp,
ErtsSchedulerSleepInfo *ssi,
- long aux_work)
+ erts_aint32_t aux_work)
{
if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
@@ -694,10 +698,10 @@ prepare_for_sys_schedule(void)
{
#ifdef ERTS_SMP
while (!erts_port_task_have_outstanding_io_tasks()
- && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
+ && !erts_smp_atomic32_xchg(&doing_sys_schedule, 1)) {
if (!erts_port_task_have_outstanding_io_tasks())
return 1;
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
}
return 0;
#else
@@ -745,53 +749,55 @@ sched_active(Uint no, ErtsRunQueue *rq)
static int ERTS_INLINE
ongoing_multi_scheduling_block(void)
{
- return erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing) != 0;
+ return erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing) != 0;
}
static ERTS_INLINE void
empty_runq(ErtsRunQueue *rq)
{
- long oifls = erts_smp_atomic_band(&rq->info_flags, ~ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_aint32_t oifls = erts_smp_atomic32_band(&rq->info_flags,
+ ~ERTS_RUNQ_IFLG_NONEMPTY);
if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
#ifdef DEBUG
- long empty = erts_smp_atomic_read(&no_empty_run_queues);
+ erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
*/
ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
- erts_smp_atomic_inc(&no_empty_run_queues);
+ erts_smp_atomic32_inc(&no_empty_run_queues);
}
}
static ERTS_INLINE void
non_empty_runq(ErtsRunQueue *rq)
{
- long oifls = erts_smp_atomic_bor(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_aint32_t oifls = erts_smp_atomic32_bor(&rq->info_flags,
+ ERTS_RUNQ_IFLG_NONEMPTY);
if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
#ifdef DEBUG
- long empty = erts_smp_atomic_read(&no_empty_run_queues);
+ erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
*/
ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
- erts_smp_atomic_dec(&no_empty_run_queues);
+ erts_smp_atomic32_dec(&no_empty_run_queues);
}
}
-static long
+static erts_aint32_t
sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
- long oflgs;
- long nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
- long xflgs = 0;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ erts_aint32_t xflgs = 0;
do {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -799,16 +805,16 @@ sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
return oflgs;
}
-static long
+static erts_aint32_t
sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
- long oflgs;
- long nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
- long xflgs = ERTS_SSI_FLG_WAITING;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING;
do {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -817,15 +823,15 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
return oflgs;
}
-static long
+static erts_aint32_t
sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
{
- long until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
int sc = spincount;
- long flgs;
+ erts_aint32_t flgs;
do {
- flgs = erts_smp_atomic_read(&ssi->flags);
+ flgs = erts_smp_atomic32_read(&ssi->flags);
if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
!= (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
break;
@@ -839,18 +845,18 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
return flgs;
}
-static long
-sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
+static erts_aint32_t
+sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
{
- long oflgs;
- long nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
- long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
+ erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
@@ -867,14 +873,14 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
!= ERTS_SSI_FLG_WAITING)
static void
-scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
+scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
int spincount;
- long flgs;
+ erts_aint32_t flgs;
#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
|| defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
- long aux_work;
+ erts_aint32_t aux_work;
#endif
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -910,7 +916,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
tse_wait:
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
tse_blockable_aux_work:
aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
@@ -920,7 +926,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
#endif
nonblockable_aux_work(esdp, ssi, aux_work);
#endif
@@ -953,7 +959,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
goto tse_blockable_aux_work;
@@ -965,16 +971,16 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
erts_smp_runq_lock(rq);
sched_active(esdp->no, rq);
}
else {
- long dt;
+ erts_aint_t dt;
- erts_smp_atomic_set(&function_calls, 0);
+ erts_smp_atomic32_set(&function_calls, 0);
*fcalls = 0;
sched_waiting_sys(esdp->no, rq);
@@ -997,17 +1003,17 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sys_aux_work:
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
#endif
nonblockable_aux_work(esdp, ssi, aux_work);
#endif
- flgs = erts_smp_atomic_read(&ssi->flags);
+ flgs = erts_smp_atomic32_read(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
goto sys_woken;
@@ -1025,7 +1031,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* call erl_sys_schedule() until it is handled.
*/
if (erts_port_task_have_outstanding_io_tasks()) {
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
/*
* Got to check that we still got I/O tasks; otherwise
* we have to continue checking for I/O...
@@ -1044,7 +1050,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* sleep in erl_sys_schedule().
*/
if (erts_port_task_have_outstanding_io_tasks()) {
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
/*
* Got to check that we still got I/O tasks; otherwise
@@ -1098,9 +1104,9 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sys_woken:
erts_smp_runq_lock(rq);
sys_locked_woken:
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
sched_active_sys(esdp->no, rq);
}
}
@@ -1108,15 +1114,15 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
}
-static ERTS_INLINE long
+static ERTS_INLINE erts_aint32_t
ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
{
/* reset all flags but suspended */
- long oflgs;
- long nflgs = 0;
- long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = 0;
+ erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
while (1) {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return oflgs;
nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
@@ -1148,7 +1154,7 @@ wake_scheduler(ErtsRunQueue *rq, int incq, int one)
if (!ssi)
erts_smp_spin_unlock(&sl->lock);
else if (one) {
- long flgs;
+ erts_aint32_t flgs;
if (ssi->prev)
ssi->prev->next = ssi->next;
else {
@@ -1195,15 +1201,17 @@ wake_all_schedulers(void)
static ERTS_INLINE int
chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
{
- long iflgs;
+ erts_aint32_t iflgs;
ErtsRunQueue *wrq;
if (crq->ix == ix)
return 0;
wrq = ERTS_RUNQ_IX(ix);
- iflgs = erts_smp_atomic_read(&wrq->info_flags);
+ iflgs = erts_smp_atomic32_read(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
if (activate) {
- if (ix == erts_smp_atomic_cmpxchg(&balance_info.active_runqs, ix+1, ix)) {
+ if (ix == erts_smp_atomic32_cmpxchg(&balance_info.active_runqs,
+ ix+1,
+ ix)) {
erts_smp_xrunq_lock(crq, wrq);
wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
erts_smp_xrunq_unlock(crq, wrq);
@@ -1220,8 +1228,8 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq)
{
int ix = crq->ix;
int stop_ix = ix;
- int active_ix = erts_smp_atomic_read(&balance_info.active_runqs);
- int balance_ix = erts_smp_atomic_read(&balance_info.used_runqs);
+ int active_ix = erts_smp_atomic32_read(&balance_info.active_runqs);
+ int balance_ix = erts_smp_atomic32_read(&balance_info.used_runqs);
if (active_ix > balance_ix)
active_ix = balance_ix;
@@ -1273,7 +1281,7 @@ erts_sched_notify_check_cpu_bind(void)
int ix;
if (erts_common_run_queue) {
for (ix = 0; ix < erts_no_schedulers; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1);
+ erts_smp_atomic32_set(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1);
wake_all_schedulers();
}
else {
@@ -1441,14 +1449,15 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
erts_smp_runq_lock(evac_rq);
- erts_smp_atomic_bor(&evac_rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_bor(&evac_rq->scheduler->ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
- erts_smp_atomic_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
+ erts_smp_atomic32_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1697,7 +1706,7 @@ static ERTS_INLINE int
check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
- long iflgs = erts_smp_atomic_read(&vrq->info_flags);
+ erts_aint32_t iflgs = erts_smp_atomic32_read(&vrq->info_flags);
if (iflgs & ERTS_RUNQ_IFLG_NONEMPTY)
return try_steal_task_from_victim(rq, rq_lockedp, vrq);
else
@@ -1727,8 +1736,8 @@ try_steal_task(ErtsRunQueue *rq)
ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked);
- active_rqs = erts_smp_atomic_read(&balance_info.active_runqs);
- blnc_rqs = erts_smp_atomic_read(&balance_info.used_runqs);
+ active_rqs = erts_smp_atomic32_read(&balance_info.active_runqs);
+ blnc_rqs = erts_smp_atomic32_read(&balance_info.used_runqs);
if (active_rqs > blnc_rqs)
active_rqs = blnc_rqs;
@@ -1739,7 +1748,7 @@ try_steal_task(ErtsRunQueue *rq)
if (active_rqs < blnc_rqs) {
int no = blnc_rqs - active_rqs;
int stop_ix = vix = active_rqs + rq->ix % no;
- while (erts_smp_atomic_read(&no_empty_run_queues) < blnc_rqs) {
+ while (erts_smp_atomic32_read(&no_empty_run_queues) < blnc_rqs) {
res = check_possible_steal_victim(rq, &rq_locked, vix);
if (res)
goto done;
@@ -1754,7 +1763,7 @@ try_steal_task(ErtsRunQueue *rq)
vix = rq->ix;
/* ... then try to steal a job from another active queue... */
- while (erts_smp_atomic_read(&no_empty_run_queues) < blnc_rqs) {
+ while (erts_smp_atomic32_read(&no_empty_run_queues) < blnc_rqs) {
vix++;
if (vix >= active_rqs)
vix = 0;
@@ -1850,15 +1859,15 @@ check_balance(ErtsRunQueue *c_rq)
int forced, active, current_active, oowc, half_full_scheds, full_scheds,
mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix;
- if (erts_smp_atomic_xchg(&balance_info.checking_balance, 1)) {
+ if (erts_smp_atomic32_xchg(&balance_info.checking_balance, 1)) {
c_rq->check_balance_reds = INT_MAX;
return;
}
- blnc_no_rqs = (int) erts_smp_atomic_read(&balance_info.used_runqs);
+ blnc_no_rqs = (int) erts_smp_atomic32_read(&balance_info.used_runqs);
if (blnc_no_rqs == 1) {
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set(&balance_info.checking_balance, 0);
return;
}
@@ -1866,7 +1875,7 @@ check_balance(ErtsRunQueue *c_rq)
if (balance_info.halftime) {
balance_info.halftime = 0;
- erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set(&balance_info.checking_balance, 0);
ERTS_FOREACH_RUNQ(rq,
{
if (rq->waiting)
@@ -1894,12 +1903,12 @@ check_balance(ErtsRunQueue *c_rq)
forced = balance_info.forced_check_balance;
balance_info.forced_check_balance = 0;
- blnc_no_rqs = (int) erts_smp_atomic_read(&balance_info.used_runqs);
+ blnc_no_rqs = (int) erts_smp_atomic32_read(&balance_info.used_runqs);
if (blnc_no_rqs == 1) {
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_runq_lock(c_rq);
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set(&balance_info.checking_balance, 0);
return;
}
@@ -1908,7 +1917,7 @@ check_balance(ErtsRunQueue *c_rq)
if (balance_info.full_reds_history_index >= ERTS_FULL_REDS_HISTORY_SIZE)
balance_info.full_reds_history_index = 0;
- current_active = erts_smp_atomic_read(&balance_info.active_runqs);
+ current_active = erts_smp_atomic32_read(&balance_info.active_runqs);
/* Read balance information for all run queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -2243,10 +2252,10 @@ erts_fprintf(stderr, "--------------------------------\n");
}
balance_info.last_active_runqs = active;
- erts_smp_atomic_set(&balance_info.active_runqs, active);
+ erts_smp_atomic32_set(&balance_info.active_runqs, active);
balance_info.halftime = 1;
- erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set(&balance_info.checking_balance, 0);
/* Write migration paths and reset balance statistics in all queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -2395,7 +2404,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ASSERT((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) == 0);
#ifdef ERTS_SMP
- erts_smp_atomic_init(&no_empty_run_queues, 0);
+ erts_smp_atomic32_init(&no_empty_run_queues, 0);
#endif
erts_no_run_queues = n;
@@ -2405,7 +2414,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
rq->ix = ix;
- erts_smp_atomic_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_smp_atomic32_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
/* make sure that the "extra" id correponds to the schedulers
* id if the esdp->no <-> ix+1 mapping change.
@@ -2502,9 +2511,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ssi->next = NULL;
ssi->prev = NULL;
#endif
- erts_smp_atomic_init(&ssi->flags, 0);
+ erts_smp_atomic32_init(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
- erts_smp_atomic_init(&ssi->aux_work, 0);
+ erts_smp_atomic32_init(&ssi->aux_work, 0);
}
#endif
@@ -2555,7 +2564,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
#ifdef ERTS_SMP
- erts_smp_atomic_init(&esdp->chk_cpu_bind, 0);
+ erts_smp_atomic32_init(&esdp->chk_cpu_bind, 0);
#endif
}
@@ -2563,21 +2572,21 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- erts_smp_atomic_init(&schdlr_sspnd.changing, 0);
+ erts_smp_atomic32_init(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
- erts_smp_atomic_init(&schdlr_sspnd.msb.ongoing, 0);
- erts_smp_atomic_init(&schdlr_sspnd.active, no_schedulers);
+ erts_smp_atomic32_init(&schdlr_sspnd.msb.ongoing, 0);
+ erts_smp_atomic32_init(&schdlr_sspnd.active, no_schedulers);
schdlr_sspnd.msb.procs = NULL;
- erts_smp_atomic_set(&balance_info.used_runqs,
- erts_common_run_queue ? 1 : no_schedulers_online);
- erts_smp_atomic_init(&balance_info.active_runqs, no_schedulers);
+ erts_smp_atomic32_set(&balance_info.used_runqs,
+ erts_common_run_queue ? 1 : no_schedulers_online);
+ erts_smp_atomic32_init(&balance_info.active_runqs, no_schedulers);
balance_info.last_active_runqs = no_schedulers;
erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
- erts_smp_atomic_init(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_init(&balance_info.checking_balance, 0);
balance_info.prev_rise.active_runqs = 0;
balance_info.prev_rise.max_len = 0;
balance_info.prev_rise.reds = 0;
@@ -2586,8 +2595,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
if (no_schedulers_online < no_schedulers) {
if (erts_common_run_queue) {
for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
else {
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
@@ -2601,7 +2610,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
| ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- erts_smp_atomic_init(&doing_sys_schedule, 0);
+ erts_smp_atomic32_init(&doing_sys_schedule, 0);
#else /* !ERTS_SMP */
{
@@ -2615,7 +2624,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_no_schedulers = 1;
#endif
- erts_smp_atomic_init(&function_calls, 0);
+ erts_smp_atomic32_init(&function_calls, 0);
/* init port tasks */
erts_port_task_init();
@@ -2748,13 +2757,13 @@ static void
scheduler_ix_resume_wake(Uint ix)
{
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- long xflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
- long oflgs;
+ erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t oflgs;
do {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, 0, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, 0, xflgs);
if (oflgs == xflgs) {
erts_sched_finish_poke(ssi, oflgs);
break;
@@ -2763,17 +2772,17 @@ scheduler_ix_resume_wake(Uint ix)
} while (oflgs & ERTS_SSI_FLG_SUSPENDED);
}
-static long
-sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
+static erts_aint32_t
+sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
{
- long oflgs;
- long nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
- long xflgs = xpct;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t xflgs = xpct;
do {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -2782,15 +2791,15 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
return oflgs;
}
-static long
+static erts_aint32_t
sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
{
int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
int sc = spincount;
- long flgs;
+ erts_aint32_t flgs;
do {
- flgs = erts_smp_atomic_read(&ssi->flags);
+ flgs = erts_smp_atomic32_read(&ssi->flags);
if ((flgs & (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED))
@@ -2808,22 +2817,22 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
return flgs;
}
-static long
+static erts_aint32_t
sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
{
- long oflgs;
- long nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
- long xflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING
@@ -2841,8 +2850,8 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
- long flgs;
- int changing;
+ erts_aint32_t flgs;
+ erts_aint32_t changing;
long no = (long) esdp->no;
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
@@ -2850,7 +2859,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
int wake = 0;
#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
|| defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
- long aux_work;
+ erts_aint32_t aux_work;
#endif
/*
@@ -2878,15 +2887,15 @@ suspend_scheduler(ErtsSchedulerData *esdp)
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
- active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
+ active_schedulers = erts_smp_atomic32_dectest(&schdlr_sspnd.active);
ASSERT(active_schedulers >= 1);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
if (active_schedulers == schdlr_sspnd.msb.wait_active)
wake = 1;
if (active_schedulers == 1) {
- changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing = erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
}
}
@@ -2908,8 +2917,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
&& schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
wake = 1;
if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing = erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
}
}
@@ -2919,29 +2928,30 @@ suspend_scheduler(ErtsSchedulerData *esdp)
wake = 0;
}
- flgs = erts_smp_atomic_read(&ssi->flags);
+ flgs = erts_smp_atomic32_read(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
blockable_aux_work:
blockable_aux_work(esdp, ssi, aux_work);
#endif
erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
while (1) {
- long flgs;
+ erts_aint32_t flgs;
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
#endif
nonblockable_aux_work(esdp, ssi, aux_work);
#endif
- flgs = sched_spin_suspended(ssi, ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
if (flgs == (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED)) {
@@ -2961,13 +2971,13 @@ suspend_scheduler(ErtsSchedulerData *esdp)
| ERTS_SSI_FLG_SUSPENDED));
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
break;
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
goto blockable_aux_work;
@@ -2979,19 +2989,19 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
}
- active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ active_schedulers = erts_smp_atomic32_inctest(&schdlr_sspnd.active);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
&& schdlr_sspnd.online == active_schedulers) {
- erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
}
ASSERT(no <= schdlr_sspnd.online);
- ASSERT(!erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
+ ASSERT(!erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing));
}
@@ -3020,7 +3030,7 @@ do { \
(RQ)->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK \
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); \
(RQ)->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; \
- erts_smp_atomic_band(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED); \
+ erts_smp_atomic32_band(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\
for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) { \
(RQ)->procs.prio_info[pix__].max_len = 0; \
(RQ)->procs.prio_info[pix__].reds = 0; \
@@ -3062,9 +3072,9 @@ erts_schedulers_state(Uint *total,
int yield_allowed)
{
int res;
- long changing;
+ erts_aint32_t changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
@@ -3085,7 +3095,7 @@ erts_set_schedulers_online(Process *p,
Sint *old_no)
{
int ix, res, no, have_unlocked_plocks;
- long changing;
+ erts_aint32_t changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -3095,7 +3105,7 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
@@ -3142,7 +3152,7 @@ erts_set_schedulers_online(Process *p,
ErtsRunQueue *to_rq = ERTS_RUNQ_IX(ix % no);
evacuate_run_queue(from_rq, to_rq);
}
- erts_smp_atomic_set(&balance_info.used_runqs, no);
+ erts_smp_atomic32_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
@@ -3170,8 +3180,8 @@ erts_set_schedulers_online(Process *p,
for (ix = no; ix < online; ix++) {
ErtsSchedulerSleepInfo *ssi;
ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic_bor(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_bor(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
wake_all_schedulers();
}
@@ -3196,7 +3206,7 @@ erts_set_schedulers_online(Process *p,
for (ix = erts_no_run_queues-1; ix >= no; ix--)
evacuate_run_queue(ERTS_RUNQ_IX(ix),
ERTS_RUNQ_IX(ix % no));
- erts_smp_atomic_set(&balance_info.used_runqs, no);
+ erts_smp_atomic32_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
for (ix = no; ix < online; ix++) {
@@ -3218,10 +3228,11 @@ erts_set_schedulers_online(Process *p,
NULL);
ASSERT(res != ERTS_SCHDLR_SSPND_DONE
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ & erts_smp_atomic32_read(&schdlr_sspnd.changing))
: (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic_read(&schdlr_sspnd.changing)));
- erts_smp_atomic_band(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ == erts_smp_atomic32_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
}
@@ -3236,11 +3247,11 @@ ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
int ix, res, have_unlocked_plocks = 0;
- long changing;
+ erts_aint32_t changing;
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
@@ -3250,7 +3261,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
plp->next = schdlr_sspnd.msb.procs;
schdlr_sspnd.msb.procs = plp;
p->flags |= F_HAVE_BLCKD_MSCHED;
- ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
@@ -3261,11 +3272,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
- ASSERT(0 == erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
- erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 1);
+ ASSERT(0 == erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing));
+ erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 1);
if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
@@ -3285,14 +3296,14 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
}
if (erts_common_run_queue) {
for (ix = 1; ix < online; ix++)
- erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
erts_smp_mtx_lock(&balance_info.update_mtx);
- erts_smp_atomic_set(&balance_info.used_runqs, 1);
+ erts_smp_atomic32_set(&balance_info.used_runqs, 1);
for (ix = 0; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
erts_smp_runq_lock(rq);
@@ -3314,7 +3325,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
- while (erts_smp_atomic_read(&schdlr_sspnd.active)
+ while (erts_smp_atomic32_read(&schdlr_sspnd.active)
!= schdlr_sspnd.msb.wait_active)
erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
@@ -3323,11 +3334,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
NULL);
ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ & erts_smp_atomic32_read(&schdlr_sspnd.changing))
: (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic_read(&schdlr_sspnd.changing)));
- erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ == erts_smp_atomic32_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -3394,16 +3405,16 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
});
#endif
p->flags &= ~F_HAVE_BLCKD_MSCHED;
- erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 0);
+ erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 0);
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
- ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else if (erts_common_run_queue) {
for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
@@ -3429,7 +3440,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
evacuate_run_queue(ERTS_RUNQ_IX(ix),
ERTS_RUNQ_IX(ix % online));
- erts_smp_atomic_set(&balance_info.used_runqs, online);
+ erts_smp_atomic32_set(&balance_info.used_runqs, online);
/* Make sure that we balance soon... */
balance_info.forced_check_balance = 1;
erts_smp_runq_lock(ERTS_RUNQ_IX(0));
@@ -3453,7 +3464,7 @@ void
erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value)
{
if (return_value == am_blocked) {
- long active = erts_smp_atomic_read(&schdlr_sspnd.active);
+ erts_aint32_t active = erts_smp_atomic32_read(&schdlr_sspnd.active);
ASSERT(1 <= active && active <= 2);
ASSERT(ERTS_PROC_GET_SCHDATA(p)->no == 1);
}
@@ -3536,12 +3547,12 @@ sched_thread_func(void *vesdp)
erts_thread_init_float();
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(erts_smp_atomic_read(&schdlr_sspnd.changing)
+ ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.changing)
& ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
- erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (((ErtsSchedulerData *) vesdp)->no != 1)
erts_smp_cnd_signal(&schdlr_sspnd.cnd);
}
@@ -4914,10 +4925,10 @@ Process *schedule(Process *p, int calls)
{
ErtsRunQueue *rq;
ErtsRunPrioQueue *rpq;
- long dt;
+ erts_aint_t dt;
ErtsSchedulerData *esdp;
int context_reds;
- long fcalls;
+ int fcalls;
int input_reductions;
int actual_reds;
int reds;
@@ -4940,7 +4951,7 @@ Process *schedule(Process *p, int calls)
esdp = erts_get_scheduler_data();
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
- fcalls = erts_smp_atomic_read(&function_calls);
+ fcalls = (int) erts_smp_atomic32_read(&function_calls);
actual_reds = reds = 0;
erts_smp_runq_lock(rq);
} else {
@@ -4958,7 +4969,7 @@ Process *schedule(Process *p, int calls)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
- fcalls = erts_smp_atomic_addtest(&function_calls, reds);
+ fcalls = (int) erts_smp_atomic32_addtest(&function_calls, reds);
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
@@ -5091,14 +5102,14 @@ Process *schedule(Process *p, int calls)
| ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic_read(&esdp->ssi->flags)
+ || (erts_smp_atomic32_read(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
- || erts_smp_atomic_read(&esdp->chk_cpu_bind)) {
+ || erts_smp_atomic32_read(&esdp->chk_cpu_bind)) {
erts_sched_check_cpu_bind(esdp);
}
}
@@ -5107,7 +5118,7 @@ Process *schedule(Process *p, int calls)
|| defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
{
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- long aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ erts_aint32_t aux_work = erts_smp_atomic32_read(&ssi->aux_work);
if (aux_work) {
erts_smp_runq_unlock(rq);
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
@@ -5149,9 +5160,9 @@ Process *schedule(Process *p, int calls)
if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic_read(&esdp->ssi->flags)
+ || (erts_smp_atomic32_read(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
non_empty_runq(rq);
goto continue_check_activities_to_run;
@@ -5193,7 +5204,7 @@ Process *schedule(Process *p, int calls)
* Schedule system-level activities.
*/
- erts_smp_atomic_set(&function_calls, 0);
+ erts_smp_atomic32_set(&function_calls, 0);
fcalls = 0;
ASSERT(!erts_port_task_have_outstanding_io_tasks());
@@ -5207,7 +5218,7 @@ Process *schedule(Process *p, int calls)
if (dt) bump_timer(dt);
#ifdef ERTS_SMP
erts_smp_runq_lock(rq);
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
goto continue_check_activities_to_run;
#else
if (!runnable)
@@ -5235,7 +5246,7 @@ Process *schedule(Process *p, int calls)
if (erts_common_run_queue->waiting)
wake_scheduler(erts_common_run_queue, 0, 1);
}
- else if (erts_smp_atomic_read(&no_empty_run_queues) != 0) {
+ else if (erts_smp_atomic32_read(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
}
@@ -5692,7 +5703,7 @@ erts_test_next_pid(int set, Uint next)
Uint erts_process_count(void)
{
- long res = erts_smp_atomic_read(&process_count);
+ erts_aint32_t res = erts_smp_atomic32_read(&process_count);
ASSERT(res >= 0);
return (Uint) res;
}
@@ -5741,7 +5752,7 @@ alloc_process(void)
ASSERT(!process_tab[p_next]);
process_tab[p_next] = p;
- erts_smp_atomic_inc(&process_count);
+ erts_smp_atomic32_inc(&process_count);
p->id = make_internal_pid(p_serial << p_serial_shift | p_next);
if (p->id == ERTS_INVALID_PID) {
/* Do not use the invalid pid; change serial */
@@ -5867,7 +5878,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
p->prio = PRIORITY_NORMAL;
- p->max_gen_gcs = (Uint16) erts_smp_atomic_read(&erts_max_gen_gcs);
+ p->max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
}
p->skipped = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
@@ -7324,8 +7335,8 @@ continue_exit_process(Process *p
p->status_flags = 0;
#endif
process_tab[pix] = NULL; /* Time of death! */
- ASSERT(erts_smp_atomic_read(&process_count) > 0);
- erts_smp_atomic_dec(&process_count);
+ ASSERT(erts_smp_atomic32_read(&process_count) > 0);
+ erts_smp_atomic32_dec(&process_count);
#ifdef ERTS_SMP
erts_pix_unlock(pix_lock);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index c038e57b65..8e32121a68 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -174,8 +174,8 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_UNSET_RUNQ_FLG_EVACUATE(FLGS, PRIO) \
((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO)))
-#define ERTS_RUNQ_IFLG_SUSPENDED (((long) 1) << 0)
-#define ERTS_RUNQ_IFLG_NONEMPTY (((long) 1) << 1)
+#define ERTS_RUNQ_IFLG_SUSPENDED (((erts_aint32_t) 1) << 0)
+#define ERTS_RUNQ_IFLG_NONEMPTY (((erts_aint32_t) 1) << 1)
#ifdef DEBUG
@@ -219,11 +219,11 @@ typedef enum {
ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED
} ErtsMigrateResult;
-#define ERTS_SSI_FLG_SLEEPING (((long) 1) << 0)
-#define ERTS_SSI_FLG_POLL_SLEEPING (((long) 1) << 1)
-#define ERTS_SSI_FLG_TSE_SLEEPING (((long) 1) << 2)
-#define ERTS_SSI_FLG_WAITING (((long) 1) << 3)
-#define ERTS_SSI_FLG_SUSPENDED (((long) 1) << 4)
+#define ERTS_SSI_FLG_SLEEPING (((erts_aint32_t) 1) << 0)
+#define ERTS_SSI_FLG_POLL_SLEEPING (((erts_aint32_t) 1) << 1)
+#define ERTS_SSI_FLG_TSE_SLEEPING (((erts_aint32_t) 1) << 2)
+#define ERTS_SSI_FLG_WAITING (((erts_aint32_t) 1) << 3)
+#define ERTS_SSI_FLG_SUSPENDED (((erts_aint32_t) 1) << 4)
#define ERTS_SSI_FLGS_SLEEP_TYPE \
(ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
@@ -242,7 +242,7 @@ typedef enum {
#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
#endif
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((long) 1) << 0)
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 0)
#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
(ERTS_SSI_AUX_WORK_CHECK_CHILDREN)
@@ -259,9 +259,9 @@ typedef struct {
struct ErtsSchedulerSleepInfo_ {
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
- erts_smp_atomic_t flags;
+ erts_smp_atomic32_t flags;
erts_tse_t *event;
- erts_smp_atomic_t aux_work;
+ erts_smp_atomic32_t aux_work;
};
/* times to reschedule low prio process before running */
@@ -311,7 +311,7 @@ typedef struct {
struct ErtsRunQueue_ {
int ix;
- erts_smp_atomic_t info_flags;
+ erts_smp_atomic32_t info_flags;
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
@@ -421,7 +421,7 @@ struct ErtsSchedulerData_ {
#ifdef ERTS_SMP
/* NOTE: These fields are modified under held mutexes by other threads */
- erts_smp_atomic_t chk_cpu_bind; /* Only used when common run queue */
+ erts_smp_atomic32_t chk_cpu_bind; /* Only used when common run queue */
#endif
};
@@ -1555,7 +1555,7 @@ extern int erts_disable_proc_not_running_opt;
void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
#ifdef ERTS_SMP
-void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, long);
+void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -1563,11 +1563,11 @@ ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
ERTS_GLB_INLINE void
erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
- long flags = erts_smp_atomic_read(&ssi->flags);
+ erts_aint32_t flags = erts_smp_atomic32_read(&ssi->flags);
ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
|| (flags & ERTS_SSI_FLG_WAITING));
if (flags & ERTS_SSI_FLG_SLEEPING) {
- flags = erts_smp_atomic_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ flags = erts_smp_atomic32_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
erts_sched_finish_poke(ssi, flags);
}
}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 1bebcdb911..72560aa124 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -124,7 +124,7 @@ erts_init_proc_lock(int cpus)
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
- "pix_lock", make_small(i));
+ "pix_lock", make_small(i));
#else
erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
#endif
@@ -413,7 +413,7 @@ transfer_locks(Process *p,
do {
erts_tse_t *tmp = wake;
wake = wake->next;
- erts_atomic_set(&tmp->uaflgs, 0);
+ erts_atomic32_set(&tmp->uaflgs, 0);
erts_tse_set(tmp);
} while (wake);
@@ -509,14 +509,14 @@ wait_for_locks(Process *p,
ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
- erts_atomic_set(&wtr->uaflgs, 1);
+ erts_atomic32_set(&wtr->uaflgs, 1);
erts_pix_unlock(pix_lock);
while (1) {
int res;
erts_tse_reset(wtr);
- if (erts_atomic_read(&wtr->uaflgs) == 0)
+ if (erts_atomic32_read(&wtr->uaflgs) == 0)
break;
/*
@@ -955,7 +955,7 @@ erts_proc_lock_init(Process *p)
{
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic_init(&p->lock.flags, (long) ERTS_PROC_LOCKS_ALL);
+ erts_smp_atomic32_init(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL);
#else
p->lock.flags = ERTS_PROC_LOCKS_ALL;
#endif
@@ -974,7 +974,7 @@ erts_proc_lock_init(Process *p)
{
int i;
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- erts_smp_atomic_init(&p->lock.locked[i], (long) 1);
+ erts_smp_atomic32_init(&p->lock.locked[i], (erts_aint32_t) 1);
}
#endif
}
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 4fe30c7209..355179f084 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -54,20 +54,20 @@
#define ERTS_PROC_LOCK_MAX_BIT 3
-typedef Uint32 ErtsProcLocks;
+typedef erts_aint32_t ErtsProcLocks;
typedef struct erts_proc_lock_queues_t_ erts_proc_lock_queues_t;
typedef struct erts_proc_lock_t_ {
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic_t flags;
+ erts_smp_atomic32_t flags;
#else
ErtsProcLocks flags;
#endif
erts_proc_lock_queues_t *queues;
- long refc;
+ Sint32 refc;
#ifdef ERTS_PROC_LOCK_DEBUG
- erts_smp_atomic_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
+ erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_t lcnt_main;
@@ -270,17 +270,19 @@ typedef struct {
#if ERTS_PROC_LOCK_ATOMIC_IMPL
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic_band(&(L)->flags, (long) (MSK)))
+ ((ErtsProcLocks) erts_smp_atomic32_band(&(L)->flags, (erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic_bor(&(L)->flags, (long) (MSK)))
+ ((ErtsProcLocks) erts_smp_atomic32_bor(&(L)->flags, (erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg_acqb(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+ ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \
+ (erts_aint32_t) (NEW), \
+ (erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg_relb(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+ ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_relb(&(L)->flags, \
+ (erts_aint32_t) (NEW), \
+ (erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
- ((ErtsProcLocks) erts_smp_atomic_read(&(L)->flags))
+ ((ErtsProcLocks) erts_smp_atomic32_read(&(L)->flags))
#else /* no opt atomic ops */
@@ -619,13 +621,13 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) {
ErtsProcLocks lock = ((ErtsProcLocks) 1) << i;
if (locks & lock) {
- long lock_count;
+ erts_aint32_t lock_count;
if (locked) {
- lock_count = erts_smp_atomic_inctest(&p->lock.locked[i]);
+ lock_count = erts_smp_atomic32_inctest(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 1);
}
else {
- lock_count = erts_smp_atomic_dectest(&p->lock.locked[i]);
+ lock_count = erts_smp_atomic32_dectest(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 0);
}
}
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index b41fa70476..287327bfe1 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -54,10 +54,10 @@ typedef erts_cnd_t erts_smp_cnd_t;
typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
typedef erts_rwmtx_t erts_smp_rwmtx_t;
typedef erts_tsd_key_t erts_smp_tsd_key_t;
-typedef ethr_atomic_t erts_smp_atomic_t;
+typedef erts_atomic_t erts_smp_atomic_t;
+typedef erts_atomic32_t erts_smp_atomic32_t;
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
-typedef erts_thr_timeval_t erts_smp_thr_timeval_t;
void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
#else /* #ifdef ERTS_SMP */
@@ -83,7 +83,8 @@ typedef struct {
} erts_smp_rwmtx_opt_t;
typedef int erts_smp_rwmtx_t;
typedef int erts_smp_tsd_key_t;
-typedef long erts_smp_atomic_t;
+typedef SWord erts_smp_atomic_t;
+typedef Uint32 erts_smp_atomic32_t;
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
typedef struct { } erts_smp_rwlock_t;
@@ -92,11 +93,6 @@ typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t;
typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t;
#endif
-typedef struct {
- long tv_sec;
- long tv_nsec;
-} erts_smp_thr_timeval_t;
-
#endif /* #ifdef ERTS_SMP */
ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id);
@@ -164,33 +160,82 @@ ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_atomic_init(erts_smp_atomic_t *var, long i);
-ERTS_GLB_INLINE void erts_smp_atomic_set(erts_smp_atomic_t *var, long i);
-ERTS_GLB_INLINE long erts_smp_atomic_read(erts_smp_atomic_t *var);
-ERTS_GLB_INLINE long erts_smp_atomic_inctest(erts_smp_atomic_t *incp);
-ERTS_GLB_INLINE long erts_smp_atomic_dectest(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE void erts_smp_atomic_init(erts_smp_atomic_t *var,
+ erts_aint_t i);
+ERTS_GLB_INLINE void erts_smp_atomic_set(erts_smp_atomic_t *var, erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_read(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_inctest(erts_smp_atomic_t *incp);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_dectest(erts_smp_atomic_t *decp);
ERTS_GLB_INLINE void erts_smp_atomic_inc(erts_smp_atomic_t *incp);
ERTS_GLB_INLINE void erts_smp_atomic_dec(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE long erts_smp_atomic_addtest(erts_smp_atomic_t *addp,
- long i);
-ERTS_GLB_INLINE void erts_smp_atomic_add(erts_smp_atomic_t *addp, long i);
-ERTS_GLB_INLINE long erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp,
- long new);
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
- long new,
- long expected);
-ERTS_GLB_INLINE long erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask);
-ERTS_GLB_INLINE long erts_smp_atomic_band(erts_smp_atomic_t *var, long mask);
-ERTS_GLB_INLINE long erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
-ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_addtest(erts_smp_atomic_t *addp,
+ erts_aint_t i);
+ERTS_GLB_INLINE void erts_smp_atomic_add(erts_smp_atomic_t *addp,
+ erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp,
+ erts_aint_t new);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t expected);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_bor(erts_smp_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_band(erts_smp_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var,
+ erts_aint_t i);
ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE long erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
- long new,
- long exp);
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
- long new,
- long exp);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_init(erts_smp_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_set(erts_smp_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_read(erts_smp_atomic32_t *var);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_inctest(erts_smp_atomic32_t *incp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_dectest(erts_smp_atomic32_t *decp);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_inc(erts_smp_atomic32_t *incp);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_dec(erts_smp_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_addtest(erts_smp_atomic32_t *addp, erts_aint32_t i);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_add(erts_smp_atomic32_t *addp, erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_xchg(erts_smp_atomic32_t *xchgp, erts_aint32_t new);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_bor(erts_smp_atomic32_t *var, erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_band(erts_smp_atomic32_t *var, erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_read_acqb(erts_smp_atomic32_t *var);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_set_relb(erts_smp_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_dec_relb(erts_smp_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_dectest_relb(erts_smp_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg_acqb(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg_relb(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp);
ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
char *name,
Eterm extra);
@@ -221,7 +266,6 @@ ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_thr_time_now(erts_smp_thr_timeval_t *time);
ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
@@ -611,7 +655,7 @@ erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_init(erts_smp_atomic_t *var, long i)
+erts_smp_atomic_init(erts_smp_atomic_t *var, erts_aint_t i)
{
#ifdef ERTS_SMP
erts_atomic_init(var, i);
@@ -621,7 +665,7 @@ erts_smp_atomic_init(erts_smp_atomic_t *var, long i)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_set(erts_smp_atomic_t *var, long i)
+erts_smp_atomic_set(erts_smp_atomic_t *var, erts_aint_t i)
{
#ifdef ERTS_SMP
erts_atomic_set(var, i);
@@ -630,7 +674,7 @@ erts_smp_atomic_set(erts_smp_atomic_t *var, long i)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_read(erts_smp_atomic_t *var)
{
#ifdef ERTS_SMP
@@ -640,7 +684,7 @@ erts_smp_atomic_read(erts_smp_atomic_t *var)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_inctest(erts_smp_atomic_t *incp)
{
#ifdef ERTS_SMP
@@ -650,7 +694,7 @@ erts_smp_atomic_inctest(erts_smp_atomic_t *incp)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_dectest(erts_smp_atomic_t *decp)
{
#ifdef ERTS_SMP
@@ -680,8 +724,8 @@ erts_smp_atomic_dec(erts_smp_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_addtest(erts_smp_atomic_t *addp, long i)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_addtest(erts_smp_atomic_t *addp, erts_aint_t i)
{
#ifdef ERTS_SMP
return erts_atomic_addtest(addp, i);
@@ -691,7 +735,7 @@ erts_smp_atomic_addtest(erts_smp_atomic_t *addp, long i)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_add(erts_smp_atomic_t *addp, long i)
+erts_smp_atomic_add(erts_smp_atomic_t *addp, erts_aint_t i)
{
#ifdef ERTS_SMP
erts_atomic_add(addp, i);
@@ -700,59 +744,61 @@ erts_smp_atomic_add(erts_smp_atomic_t *addp, long i)
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp, long new)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp, erts_aint_t new)
{
#ifdef ERTS_SMP
return erts_atomic_xchg(xchgp, new);
#else
- long old;
+ erts_aint_t old;
old = *xchgp;
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp, long new, long expected)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t expected)
{
#ifdef ERTS_SMP
return erts_atomic_cmpxchg(xchgp, new, expected);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
if (old == expected)
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_bor(erts_smp_atomic_t *var, erts_aint_t mask)
{
#ifdef ERTS_SMP
return erts_atomic_bor(var, mask);
#else
- long old;
+ erts_aint_t old;
old = *var;
*var |= mask;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_band(erts_smp_atomic_t *var, long mask)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_band(erts_smp_atomic_t *var, erts_aint_t mask)
{
#ifdef ERTS_SMP
return erts_atomic_band(var, mask);
#else
- long old;
+ erts_aint_t old;
old = *var;
*var &= mask;
return old;
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
{
#ifdef ERTS_SMP
@@ -763,7 +809,7 @@ erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
+erts_smp_atomic_set_relb(erts_smp_atomic_t *var, erts_aint_t i)
{
#ifdef ERTS_SMP
erts_atomic_set_relb(var, i);
@@ -772,7 +818,8 @@ erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
#endif
}
-ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
+ERTS_GLB_INLINE void
+erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
{
#ifdef ERTS_SMP
erts_atomic_dec_relb(decp);
@@ -781,7 +828,7 @@ ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
{
#ifdef ERTS_SMP
@@ -791,28 +838,244 @@ erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
- long new,
- long exp)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp)
{
#ifdef ERTS_SMP
return erts_atomic_cmpxchg_acqb(xchgp, new, exp);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
if (old == exp)
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
- long new,
- long exp)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp)
{
#ifdef ERTS_SMP
return erts_atomic_cmpxchg_relb(xchgp, new, exp);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_init(erts_smp_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_init(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_set(erts_smp_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_set(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_read(erts_smp_atomic32_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_read(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_inctest(erts_smp_atomic32_t *incp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_inctest(incp);
+#else
+ return ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_dectest(erts_smp_atomic32_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_dectest(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_inc(erts_smp_atomic32_t *incp)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_inc(incp);
+#else
+ ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_dec(erts_smp_atomic32_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_dec(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_addtest(erts_smp_atomic32_t *addp, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_addtest(addp, i);
+#else
+ return *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_add(erts_smp_atomic32_t *addp, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_add(addp, i);
+#else
+ *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_xchg(erts_smp_atomic32_t *xchgp, erts_aint32_t new)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_xchg(xchgp, new);
+#else
+ erts_aint32_t old;
+ old = *xchgp;
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_cmpxchg(xchgp, new, expected);
+#else
+ erts_aint32_t old = *xchgp;
+ if (old == expected)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_bor(erts_smp_atomic32_t *var, erts_aint32_t mask)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_bor(var, mask);
+#else
+ erts_aint32_t old;
+ old = *var;
+ *var |= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_band(erts_smp_atomic32_t *var, erts_aint32_t mask)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_band(var, mask);
+#else
+ erts_aint32_t old;
+ old = *var;
+ *var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_read_acqb(erts_smp_atomic32_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_set_relb(erts_smp_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_dec_relb(erts_smp_atomic32_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_dectest_relb(erts_smp_atomic32_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_dectest_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg_acqb(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_cmpxchg_acqb(xchgp, new, exp);
+#else
+ erts_aint32_t old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg_relb(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_cmpxchg_relb(xchgp, new, exp);
+#else
+ erts_aint32_t old = *xchgp;
if (old == exp)
*xchgp = new;
return old;
@@ -988,14 +1251,6 @@ erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_smp_thr_time_now(erts_smp_thr_timeval_t *time)
-{
-#ifdef ERTS_SMP
- erts_thr_time_now(time);
-#endif
-}
-
-ERTS_GLB_INLINE void
erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp)
{
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index a74cf79b8c..84a20b51f2 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -89,7 +89,10 @@ typedef ethr_rwmutex_opt erts_rwmtx_opt_t;
typedef ethr_tsd_key erts_tsd_key_t;
typedef ethr_ts_event erts_tse_t;
+typedef ethr_sint_t erts_aint_t;
typedef ethr_atomic_t erts_atomic_t;
+typedef ethr_sint32_t erts_aint32_t;
+typedef ethr_atomic32_t erts_atomic32_t;
/* spinlock */
typedef struct {
@@ -113,7 +116,6 @@ typedef struct {
#endif
} erts_rwlock_t;
-typedef ethr_timeval erts_thr_timeval_t;
__decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
/* implemented in erl_init.c */
@@ -152,7 +154,10 @@ typedef struct {
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
typedef int erts_tse_t;
-typedef long erts_atomic_t;
+typedef SWord erts_aint_t;
+typedef SWord erts_atomic_t;
+typedef SWord erts_aint32_t;
+typedef SWord erts_atomic32_t;
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
typedef struct { } erts_rwlock_t;
@@ -160,10 +165,6 @@ typedef struct { } erts_rwlock_t;
typedef struct { int gcc_is_buggy; } erts_spinlock_t;
typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#endif
-typedef struct {
- long tv_sec;
- long tv_nsec;
-} erts_thr_timeval_t;
#define ERTS_MTX_INITER 0
#define ERTS_CND_INITER 0
@@ -173,6 +174,8 @@ typedef struct {
#endif /* #ifdef USE_THREADS */
+#define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
+
ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *),
@@ -231,33 +234,65 @@ ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_atomic_init(erts_atomic_t *var, long i);
-ERTS_GLB_INLINE void erts_atomic_set(erts_atomic_t *var, long i);
-ERTS_GLB_INLINE long erts_atomic_read(erts_atomic_t *var);
-ERTS_GLB_INLINE long erts_atomic_inctest(erts_atomic_t *incp);
-ERTS_GLB_INLINE long erts_atomic_dectest(erts_atomic_t *decp);
+ERTS_GLB_INLINE void erts_atomic_init(erts_atomic_t *var, erts_aint_t i);
+ERTS_GLB_INLINE void erts_atomic_set(erts_atomic_t *var, erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_read(erts_atomic_t *var);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_inctest(erts_atomic_t *incp);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_dectest(erts_atomic_t *decp);
ERTS_GLB_INLINE void erts_atomic_inc(erts_atomic_t *incp);
ERTS_GLB_INLINE void erts_atomic_dec(erts_atomic_t *decp);
-ERTS_GLB_INLINE long erts_atomic_addtest(erts_atomic_t *addp,
- long i);
-ERTS_GLB_INLINE void erts_atomic_add(erts_atomic_t *addp, long i);
-ERTS_GLB_INLINE long erts_atomic_xchg(erts_atomic_t *xchgp,
- long new);
-ERTS_GLB_INLINE long erts_atomic_cmpxchg(erts_atomic_t *xchgp,
- long new,
- long expected);
-ERTS_GLB_INLINE long erts_atomic_bor(erts_atomic_t *var, long mask);
-ERTS_GLB_INLINE long erts_atomic_band(erts_atomic_t *var, long mask);
-ERTS_GLB_INLINE long erts_atomic_read_acqb(erts_atomic_t *var);
-ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_addtest(erts_atomic_t *addp,
+ erts_aint_t i);
+ERTS_GLB_INLINE void erts_atomic_add(erts_atomic_t *addp, erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_xchg(erts_atomic_t *xchgp,
+ erts_aint_t new);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t expected);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_bor(erts_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_band(erts_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_read_acqb(erts_atomic_t *var);
+ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, erts_aint_t i);
ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp);
-ERTS_GLB_INLINE long erts_atomic_dectest_relb(erts_atomic_t *decp);
-ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
- long new,
- long exp);
-ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
- long new,
- long exp);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_dectest_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp);
+ERTS_GLB_INLINE void erts_atomic32_init(erts_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE void erts_atomic32_set(erts_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_read(erts_atomic32_t *var);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_inctest(erts_atomic32_t *incp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_dectest(erts_atomic32_t *decp);
+ERTS_GLB_INLINE void erts_atomic32_inc(erts_atomic32_t *incp);
+ERTS_GLB_INLINE void erts_atomic32_dec(erts_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_addtest(erts_atomic32_t *addp,
+ erts_aint32_t i);
+ERTS_GLB_INLINE void erts_atomic32_add(erts_atomic32_t *addp, erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_xchg(erts_atomic32_t *xchgp,
+ erts_aint32_t new);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_bor(erts_atomic32_t *var,
+ erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_band(erts_atomic32_t *var,
+ erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_read_acqb(erts_atomic32_t *var);
+ERTS_GLB_INLINE void erts_atomic32_set_relb(erts_atomic32_t *var,
+ erts_aint32_t i);
+ERTS_GLB_INLINE void erts_atomic32_dec_relb(erts_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_dectest_relb(erts_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg_acqb(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg_relb(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp);
ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
char *name,
Eterm extra,
@@ -292,7 +327,6 @@ ERTS_GLB_INLINE void erts_write_lock(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_thr_time_now(erts_thr_timeval_t *time);
ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
@@ -925,7 +959,7 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
}
ERTS_GLB_INLINE void
-erts_atomic_init(erts_atomic_t *var, long i)
+erts_atomic_init(erts_atomic_t *var, erts_aint_t i)
{
#ifdef USE_THREADS
ethr_atomic_init(var, i);
@@ -935,7 +969,7 @@ erts_atomic_init(erts_atomic_t *var, long i)
}
ERTS_GLB_INLINE void
-erts_atomic_set(erts_atomic_t *var, long i)
+erts_atomic_set(erts_atomic_t *var, erts_aint_t i)
{
#ifdef USE_THREADS
ethr_atomic_set(var, i);
@@ -944,7 +978,7 @@ erts_atomic_set(erts_atomic_t *var, long i)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_read(erts_atomic_t *var)
{
#ifdef USE_THREADS
@@ -954,7 +988,7 @@ erts_atomic_read(erts_atomic_t *var)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_inctest(erts_atomic_t *incp)
{
#ifdef USE_THREADS
@@ -964,7 +998,7 @@ erts_atomic_inctest(erts_atomic_t *incp)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_dectest(erts_atomic_t *decp)
{
#ifdef USE_THREADS
@@ -994,8 +1028,8 @@ erts_atomic_dec(erts_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_addtest(erts_atomic_t *addp, long i)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_addtest(erts_atomic_t *addp, erts_aint_t i)
{
#ifdef USE_THREADS
return ethr_atomic_add_read(addp, i);
@@ -1005,7 +1039,7 @@ erts_atomic_addtest(erts_atomic_t *addp, long i)
}
ERTS_GLB_INLINE void
-erts_atomic_add(erts_atomic_t *addp, long i)
+erts_atomic_add(erts_atomic_t *addp, erts_aint_t i)
{
#ifdef USE_THREADS
ethr_atomic_add(addp, i);
@@ -1014,59 +1048,58 @@ erts_atomic_add(erts_atomic_t *addp, long i)
#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_xchg(erts_atomic_t *xchgp, long new)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_xchg(erts_atomic_t *xchgp, erts_aint_t new)
{
- long old;
#ifdef USE_THREADS
return ethr_atomic_xchg(xchgp, new);
#else
- old = *xchgp;
+ erts_aint_t old = *xchgp;
*xchgp = new;
-#endif
return old;
+#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_cmpxchg(erts_atomic_t *xchgp, erts_aint_t new, erts_aint_t expected)
{
#ifdef USE_THREADS
return ethr_atomic_cmpxchg(xchgp, new, expected);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
if (old == expected)
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_bor(erts_atomic_t *var, long mask)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_bor(erts_atomic_t *var, erts_aint_t mask)
{
#ifdef USE_THREADS
return ethr_atomic_read_bor(var, mask);
#else
- long old;
+ erts_aint_t old;
old = *var;
*var |= mask;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_band(erts_atomic_t *var, long mask)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_band(erts_atomic_t *var, erts_aint_t mask)
{
#ifdef USE_THREADS
return ethr_atomic_read_band(var, mask);
#else
- long old;
+ erts_aint_t old;
old = *var;
*var &= mask;
return old;
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_read_acqb(erts_atomic_t *var)
{
#ifdef USE_THREADS
@@ -1077,7 +1110,7 @@ erts_atomic_read_acqb(erts_atomic_t *var)
}
ERTS_GLB_INLINE void
-erts_atomic_set_relb(erts_atomic_t *var, long i)
+erts_atomic_set_relb(erts_atomic_t *var, erts_aint_t i)
{
#ifdef USE_THREADS
ethr_atomic_set_relb(var, i);
@@ -1096,7 +1129,7 @@ erts_atomic_dec_relb(erts_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_dectest_relb(erts_atomic_t *decp)
{
#ifdef USE_THREADS
@@ -1106,28 +1139,243 @@ erts_atomic_dectest_relb(erts_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
- long new,
- long exp)
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp)
{
#ifdef USE_THREADS
return ethr_atomic_cmpxchg_acqb(xchgp, new, exp);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
if (old == exp)
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
- long new,
- long exp)
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp)
{
#ifdef USE_THREADS
return ethr_atomic_cmpxchg_relb(xchgp, new, exp);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+/* atomic32 */
+
+ERTS_GLB_INLINE void
+erts_atomic32_init(erts_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_init(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_set(erts_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_set(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read(erts_atomic32_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_read(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_inctest(erts_atomic32_t *incp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_inc_read(incp);
+#else
+ return ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_dectest(erts_atomic32_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_dec_read(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_inc(erts_atomic32_t *incp)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_inc(incp);
+#else
+ ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_dec(erts_atomic32_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_dec(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_addtest(erts_atomic32_t *addp, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_add_read(addp, i);
+#else
+ return *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_add(erts_atomic32_t *addp, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_add(addp, i);
+#else
+ *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_xchg(erts_atomic32_t *xchgp, erts_aint32_t new)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_xchg(xchgp, new);
+#else
+ erts_aint32_t old = *xchgp;
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_cmpxchg(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_cmpxchg(xchgp, new, expected);
+#else
+ erts_aint32_t old = *xchgp;
+ if (old == expected)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_bor(erts_atomic32_t *var, erts_aint32_t mask)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_read_bor(var, mask);
+#else
+ erts_aint32_t old;
+ old = *var;
+ *var |= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_band(erts_atomic32_t *var, erts_aint32_t mask)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_read_band(var, mask);
+#else
+ erts_aint32_t old;
+ old = *var;
+ *var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_acqb(erts_atomic32_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_set_relb(erts_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_dec_relb(erts_atomic32_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_dectest_relb(erts_atomic32_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_dec_read_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_cmpxchg_acqb(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_cmpxchg_acqb(xchgp, new, exp);
+#else
+ erts_aint32_t old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_cmpxchg_relb(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_cmpxchg_relb(xchgp, new, exp);
+#else
+ erts_aint32_t old = *xchgp;
if (old == exp)
*xchgp = new;
return old;
@@ -1428,16 +1676,6 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_thr_time_now(erts_thr_timeval_t *time)
-{
-#ifdef USE_THREADS
- int res = ethr_time_now(time);
- if (res)
- erts_thr_fatal_error(res, "get current time");
-#endif
-}
-
-ERTS_GLB_INLINE void
erts_tsd_key_create(erts_tsd_key_t *keyp)
{
#ifdef USE_THREADS
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 524db2a2eb..c948af14ae 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -544,7 +544,7 @@ ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt)
if (prt->snapshot != erts_smp_atomic_read(&erts_ports_snapshot)) {
/* Dead ports are added from the end of the snapshot buffer */
Eterm* tombstone = (Eterm*) erts_smp_atomic_addtest(&erts_dead_ports_ptr,
- -(long)sizeof(Eterm));
+ -(erts_aint_t)sizeof(Eterm));
ASSERT(tombstone+1 != NULL);
ASSERT(prt->snapshot == (Uint32) erts_smp_atomic_read(&erts_ports_snapshot) - 1);
*tombstone = prt->id;
@@ -563,7 +563,7 @@ extern Uint display_items; /* no of items to display in traces etc */
extern Uint display_loads; /* print info about loaded modules */
extern int erts_backtrace_depth;
-extern erts_smp_atomic_t erts_max_gen_gcs;
+extern erts_smp_atomic32_t erts_max_gen_gcs;
extern int erts_disable_tolerant_timeofday;
@@ -1206,7 +1206,7 @@ ERTS_GLB_INLINE void
erts_smp_port_unlock(Port *prt)
{
#ifdef ERTS_SMP
- long refc;
+ erts_aint_t refc;
erts_smp_mtx_unlock(prt->lock);
refc = erts_smp_atomic_dectest(&prt->refc);
ASSERT(refc >= 0);
@@ -1425,29 +1425,29 @@ void erl_drv_thr_init(void);
/* time.c */
-ERTS_GLB_INLINE long do_time_read_and_reset(void);
+ERTS_GLB_INLINE erts_aint_t do_time_read_and_reset(void);
#ifdef ERTS_TIMER_THREAD
ERTS_GLB_INLINE int next_time(void);
-ERTS_GLB_INLINE void bump_timer(long);
+ERTS_GLB_INLINE void bump_timer(erts_aint_t);
#else
-int next_time(void);
-void bump_timer(long);
+erts_aint_t next_time(void);
+void bump_timer(erts_aint_t);
extern erts_smp_atomic_t do_time; /* set at clock interrupt */
-ERTS_GLB_INLINE void do_time_add(long);
+ERTS_GLB_INLINE void do_time_add(erts_aint_t);
#endif
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
#ifdef ERTS_TIMER_THREAD
-ERTS_GLB_INLINE long do_time_read_and_reset(void) { return 0; }
-ERTS_GLB_INLINE int next_time(void) { return -1; }
-ERTS_GLB_INLINE void bump_timer(long ignore) { }
+ERTS_GLB_INLINE erts_aint_t do_time_read_and_reset(void) { return 0; }
+ERTS_GLB_INLINE erts_aint_t next_time(void) { return -1; }
+ERTS_GLB_INLINE void bump_timer(erts_aint_t ignore) { }
#else
-ERTS_GLB_INLINE long do_time_read_and_reset(void)
+ERTS_GLB_INLINE erts_aint_t do_time_read_and_reset(void)
{
return erts_smp_atomic_xchg(&do_time, 0L);
}
-ERTS_GLB_INLINE void do_time_add(long elapsed)
+ERTS_GLB_INLINE void do_time_add(erts_aint_t elapsed)
{
erts_smp_atomic_add(&do_time, elapsed);
}
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 9ed92bbe03..f6c6a01fb2 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -428,7 +428,7 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver,
old_name = prt->name;
prt->name = new_name;
#ifdef ERTS_SMP
- erts_smp_atomic_set(&prt->run_queue, (long) runq);
+ erts_smp_atomic_set(&prt->run_queue, (erts_aint_t) runq);
#endif
ASSERT(!prt->drv_ptr);
prt->drv_ptr = driver;
@@ -1297,7 +1297,7 @@ void init_io(void)
erts_port[i].port_data_lock = NULL;
}
- erts_smp_atomic_init(&erts_ports_snapshot, (long) 0);
+ erts_smp_atomic_init(&erts_ports_snapshot, (erts_aint_t) 0);
last_port_num = 0;
erts_smp_spinlock_init(&get_free_port_lck, "get_free_port");
@@ -3252,7 +3252,7 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, int hlen,
return 0;
prt->bytes_in += (hlen + len);
- erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + len));
+ erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + len));
if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
return erts_net_message(prt,
prt->dist_entry,
@@ -3287,7 +3287,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, int hlen, char* buf, int len)
return 0;
prt->bytes_in += (hlen + len);
- erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + len));
+ erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + len));
if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
if (len == 0)
return erts_net_message(prt,
@@ -3364,7 +3364,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, int hlen, ErlIOVec* vec, int skip)
/* XXX handle distribution !!! */
prt->bytes_in += (hlen + size);
- erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + size));
+ erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + size));
deliver_vec_message(prt, prt->connected, hbuf, hlen, binv, iov, n, size);
return 0;
}
@@ -3408,25 +3408,25 @@ int len;
* reference count on driver binaries...
*/
-long
+ErlDrvSInt
driver_binary_get_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return erts_refc_read(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_read(&bp->refc, 1);
}
-long
+ErlDrvSInt
driver_binary_inc_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return erts_refc_inctest(&bp->refc, 2);
+ return (ErlDrvSInt) erts_refc_inctest(&bp->refc, 2);
}
-long
+ErlDrvSInt
driver_binary_dec_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return erts_refc_dectest(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_dectest(&bp->refc, 1);
}
@@ -3541,12 +3541,12 @@ pdl_init_refc(ErlDrvPDL pdl)
erts_atomic_init(&pdl->refc, 1);
}
-static ERTS_INLINE long
+static ERTS_INLINE ErlDrvSInt
pdl_read_refc(ErlDrvPDL pdl)
{
- long refc = erts_atomic_read(&pdl->refc);
+ erts_aint_t refc = erts_atomic_read(&pdl->refc);
ERTS_LC_ASSERT(refc >= 0);
- return refc;
+ return (ErlDrvSInt) refc;
}
static ERTS_INLINE void
@@ -3556,12 +3556,12 @@ pdl_inc_refc(ErlDrvPDL pdl)
ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) > 1);
}
-static ERTS_INLINE long
+static ERTS_INLINE ErlDrvSInt
pdl_inctest_refc(ErlDrvPDL pdl)
{
- long refc = erts_atomic_inctest(&pdl->refc);
+ erts_aint_t refc = erts_atomic_inctest(&pdl->refc);
ERTS_LC_ASSERT(refc > 1);
- return refc;
+ return (ErlDrvSInt) refc;
}
#if 0 /* unused */
@@ -3573,12 +3573,12 @@ pdl_dec_refc(ErlDrvPDL pdl)
}
#endif
-static ERTS_INLINE long
+static ERTS_INLINE ErlDrvSInt
pdl_dectest_refc(ErlDrvPDL pdl)
{
- long refc = erts_atomic_dectest(&pdl->refc);
+ erts_aint_t refc = erts_atomic_dectest(&pdl->refc);
ERTS_LC_ASSERT(refc >= 0);
- return refc;
+ return (ErlDrvSInt) refc;
}
static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
@@ -3649,7 +3649,7 @@ driver_pdl_lock(ErlDrvPDL pdl)
void
driver_pdl_unlock(ErlDrvPDL pdl)
{
- long refc;
+ ErlDrvSInt refc;
#ifdef HARDDEBUG
erts_fprintf(stderr, "driver_pdl_unlock(0x%08X)\r\n",(unsigned) pdl);
#endif
@@ -3659,28 +3659,30 @@ driver_pdl_unlock(ErlDrvPDL pdl)
pdl_destroy(pdl);
}
-long
+ErlDrvSInt
driver_pdl_get_refc(ErlDrvPDL pdl)
{
return pdl_read_refc(pdl);
}
-long
+ErlDrvSInt
driver_pdl_inc_refc(ErlDrvPDL pdl)
{
- long refc = pdl_inctest_refc(pdl);
+ ErlDrvSInt refc = pdl_inctest_refc(pdl);
#ifdef HARDDEBUG
- erts_fprintf(stderr, "driver_pdl_inc_refc(0x%08X) -> %ld\r\n",(unsigned) pdl, refc);
+ erts_fprintf(stderr, "driver_pdl_inc_refc(%p) -> %bpd\r\n",
+ pdl, refc);
#endif
return refc;
}
-long
+ErlDrvSInt
driver_pdl_dec_refc(ErlDrvPDL pdl)
{
- long refc = pdl_dectest_refc(pdl);
+ ErlDrvSInt refc = pdl_dectest_refc(pdl);
#ifdef HARDDEBUG
- erts_fprintf(stderr, "driver_pdl_dec_refc(0x%08X) -> %ld\r\n",(unsigned) pdl, refc);
+ erts_fprintf(stderr, "driver_pdl_dec_refc(%p) -> %bpd\r\n",
+ pdl, refc);
#endif
if (!refc)
pdl_destroy(pdl);
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 27c5f99320..5a4dad0a28 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -728,11 +728,11 @@ typedef enum {
} erts_activity_error_t;
typedef struct {
- erts_smp_atomic_t do_block;
+ erts_smp_atomic32_t do_block;
struct {
- erts_smp_atomic_t wait;
- erts_smp_atomic_t gc;
- erts_smp_atomic_t io;
+ erts_smp_atomic32_t wait;
+ erts_smp_atomic32_t gc;
+ erts_smp_atomic32_t io;
} in_activity;
} erts_system_block_state_t;
@@ -883,7 +883,7 @@ ERTS_GLB_INLINE int
erts_smp_pending_system_block(void)
{
#ifdef ERTS_SMP
- return erts_smp_atomic_read(&erts_system_block_state.do_block);
+ return (int) erts_smp_atomic32_read(&erts_system_block_state.do_block);
#else
return 0;
#endif
@@ -919,7 +919,7 @@ erts_smp_set_activity(erts_activity_t old_activity,
case ERTS_ACTIVITY_UNDEFINED:
break;
case ERTS_ACTIVITY_WAIT:
- erts_smp_atomic_dec(&erts_system_block_state.in_activity.wait);
+ erts_smp_atomic32_dec(&erts_system_block_state.in_activity.wait);
if (locked) {
/* You are not allowed to leave activity waiting
* without supplying the possibility to block
@@ -930,10 +930,10 @@ erts_smp_set_activity(erts_activity_t old_activity,
}
break;
case ERTS_ACTIVITY_GC:
- erts_smp_atomic_dec(&erts_system_block_state.in_activity.gc);
+ erts_smp_atomic32_dec(&erts_system_block_state.in_activity.gc);
break;
case ERTS_ACTIVITY_IO:
- erts_smp_atomic_dec(&erts_system_block_state.in_activity.io);
+ erts_smp_atomic32_dec(&erts_system_block_state.in_activity.io);
break;
default:
erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
@@ -949,13 +949,13 @@ erts_smp_set_activity(erts_activity_t old_activity,
case ERTS_ACTIVITY_UNDEFINED:
break;
case ERTS_ACTIVITY_WAIT:
- erts_smp_atomic_inc(&erts_system_block_state.in_activity.wait);
+ erts_smp_atomic32_inc(&erts_system_block_state.in_activity.wait);
break;
case ERTS_ACTIVITY_GC:
- erts_smp_atomic_inc(&erts_system_block_state.in_activity.gc);
+ erts_smp_atomic32_inc(&erts_system_block_state.in_activity.gc);
break;
case ERTS_ACTIVITY_IO:
- erts_smp_atomic_inc(&erts_system_block_state.in_activity.io);
+ erts_smp_atomic32_inc(&erts_system_block_state.in_activity.io);
break;
default:
erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
@@ -990,27 +990,31 @@ erts_smp_set_activity(erts_activity_t old_activity,
typedef erts_smp_atomic_t erts_refc_t;
-ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, long val);
-ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, long min_val);
-ERTS_GLB_INLINE long erts_refc_inctest(erts_refc_t *refcp, long min_val);
-ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, long min_val);
-ERTS_GLB_INLINE long erts_refc_dectest(erts_refc_t *refcp, long min_val);
-ERTS_GLB_INLINE void erts_refc_add(erts_refc_t *refcp, long diff, long min_val);
-ERTS_GLB_INLINE long erts_refc_read(erts_refc_t *refcp, long min_val);
+ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, erts_aint_t val);
+ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_inctest(erts_refc_t *refcp,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_dectest(erts_refc_t *refcp,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE void erts_refc_add(erts_refc_t *refcp, erts_aint_t diff,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_read(erts_refc_t *refcp,
+ erts_aint_t min_val);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_refc_init(erts_refc_t *refcp, long val)
+erts_refc_init(erts_refc_t *refcp, erts_aint_t val)
{
erts_smp_atomic_init((erts_smp_atomic_t *) refcp, val);
}
ERTS_GLB_INLINE void
-erts_refc_inc(erts_refc_t *refcp, long min_val)
+erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- long val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_inc(): Bad refc found (refc=%ld < %ld)!\n",
@@ -1020,10 +1024,10 @@ erts_refc_inc(erts_refc_t *refcp, long min_val)
#endif
}
-ERTS_GLB_INLINE long
-erts_refc_inctest(erts_refc_t *refcp, long min_val)
+ERTS_GLB_INLINE erts_aint_t
+erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val)
{
- long val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
@@ -1034,10 +1038,10 @@ erts_refc_inctest(erts_refc_t *refcp, long min_val)
}
ERTS_GLB_INLINE void
-erts_refc_dec(erts_refc_t *refcp, long min_val)
+erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- long val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_dec(): Bad refc found (refc=%ld < %ld)!\n",
@@ -1047,10 +1051,10 @@ erts_refc_dec(erts_refc_t *refcp, long min_val)
#endif
}
-ERTS_GLB_INLINE long
-erts_refc_dectest(erts_refc_t *refcp, long min_val)
+ERTS_GLB_INLINE erts_aint_t
+erts_refc_dectest(erts_refc_t *refcp, erts_aint_t min_val)
{
- long val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
@@ -1061,10 +1065,10 @@ erts_refc_dectest(erts_refc_t *refcp, long min_val)
}
ERTS_GLB_INLINE void
-erts_refc_add(erts_refc_t *refcp, long diff, long min_val)
+erts_refc_add(erts_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- long val = erts_smp_atomic_addtest((erts_smp_atomic_t *) refcp, diff);
+ erts_aint_t val = erts_smp_atomic_addtest((erts_smp_atomic_t *) refcp, diff);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n",
@@ -1074,10 +1078,10 @@ erts_refc_add(erts_refc_t *refcp, long diff, long min_val)
#endif
}
-ERTS_GLB_INLINE long
-erts_refc_read(erts_refc_t *refcp, long min_val)
+ERTS_GLB_INLINE erts_aint_t
+erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
{
- long val = erts_smp_atomic_read((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_read((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 53d39aef0e..9cb6ea34ef 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -107,12 +107,12 @@ static int itime; /* Constant after init */
#if defined(ERTS_TIMER_THREAD)
static SysTimeval time_start; /* start of current time interval */
-static long ticks_end; /* time_start+ticks_end == time_wakeup */
-static long ticks_latest; /* delta from time_start at latest time update*/
+static erts_aint_t ticks_end; /* time_start+ticks_end == time_wakeup */
+static erts_aint_t ticks_latest; /* delta from time_start at latest time update*/
-static ERTS_INLINE long time_gettimeofday(SysTimeval *now)
+static ERTS_INLINE erts_aint_t time_gettimeofday(SysTimeval *now)
{
- long elapsed;
+ erts_aint_t elapsed;
erts_get_timeval(now);
now->tv_usec = 1000 * (now->tv_usec / 1000); /* ms resolution */
@@ -122,25 +122,25 @@ static ERTS_INLINE long time_gettimeofday(SysTimeval *now)
return elapsed;
}
-static long do_time_update(void)
+static erts_aint_t do_time_update(void)
{
SysTimeval now;
- long elapsed;
+ erts_aint_t elapsed;
elapsed = time_gettimeofday(&now);
ticks_latest = elapsed;
return elapsed;
}
-static ERTS_INLINE long do_time_read(void)
+static ERTS_INLINE erts_aint_t do_time_read(void)
{
return ticks_latest;
}
-static long do_time_reset(void)
+static erts_aint_t do_time_reset(void)
{
SysTimeval now;
- long elapsed;
+ erts_aint_t elapsed;
elapsed = time_gettimeofday(&now);
time_start = now;
@@ -156,20 +156,29 @@ static ERTS_INLINE void do_time_init(void)
#else
erts_smp_atomic_t do_time; /* set at clock interrupt */
-static ERTS_INLINE long do_time_read(void) { return erts_smp_atomic_read(&do_time); }
-static ERTS_INLINE long do_time_update(void) { return do_time_read(); }
-static ERTS_INLINE void do_time_init(void) { erts_smp_atomic_init(&do_time, 0L); }
+static ERTS_INLINE erts_aint_t do_time_read(void)
+{
+ return erts_smp_atomic_read(&do_time);
+}
+static ERTS_INLINE erts_aint_t do_time_update(void)
+{
+ return do_time_read();
+}
+static ERTS_INLINE void do_time_init(void)
+{
+ erts_smp_atomic_init(&do_time, (erts_aint_t) 0);
+}
#endif
/* get the time (in units of itime) to the next timeout,
or -1 if there are no timeouts */
-static int next_time_internal(void) /* PRE: tiw_lock taken by caller */
+static erts_aint_t next_time_internal(void) /* PRE: tiw_lock taken by caller */
{
int i, tm, nto;
unsigned int min;
ErlTimer* p;
- long dt;
+ erts_aint_t dt;
if (tiw_nto == 0)
return -1; /* no timeouts in wheel */
@@ -204,9 +213,9 @@ static int next_time_internal(void) /* PRE: tiw_lock taken by caller */
#if !defined(ERTS_TIMER_THREAD)
/* Private export to erl_time_sup.c */
-int next_time(void)
+erts_aint_t next_time(void)
{
- int ret;
+ erts_aint_t ret;
erts_smp_mtx_lock(&tiw_lock);
(void)do_time_update();
@@ -216,12 +225,12 @@ int next_time(void)
}
#endif
-static ERTS_INLINE void bump_timer_internal(long dt) /* PRE: tiw_lock is write-locked */
+static ERTS_INLINE void bump_timer_internal(erts_aint_t dt) /* PRE: tiw_lock is write-locked */
{
Uint keep_pos;
Uint count;
ErlTimer *p, **prev, *timeout_head, **timeout_tail;
- Uint dtime = (unsigned long)dt;
+ Uint dtime = (Uint) dt;
/* no need to bump the position if there aren't any timeouts */
if (tiw_nto == 0) {
@@ -287,7 +296,7 @@ static void timer_thread_bump_timer(void)
bump_timer_internal(do_time_reset());
}
#else
-void bump_timer(long dt) /* dt is value from do_time */
+void bump_timer(erts_aint_t dt) /* dt is value from do_time */
{
erts_smp_mtx_lock(&tiw_lock);
bump_timer_internal(dt);
@@ -305,8 +314,8 @@ static struct erts_iwait *timer_thread_iwait;
static int timer_thread_setup_delay(SysTimeval *rem_time)
{
- long elapsed;
- int ticks;
+ erts_aint_t elapsed;
+ erts_aint_t ticks;
erts_smp_mtx_lock(&tiw_lock);
elapsed = do_time_update();
@@ -496,7 +505,7 @@ Uint
time_left(ErlTimer *p)
{
Uint left;
- long dt;
+ erts_aint_t dt;
erts_smp_mtx_lock(&tiw_lock);
@@ -517,7 +526,7 @@ time_left(ErlTimer *p)
erts_smp_mtx_unlock(&tiw_lock);
- return left * itime;
+ return (Uint) left * itime;
}
#ifdef DEBUG
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index ab5e8b5d4a..2bf283d9ec 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -3637,19 +3637,19 @@ erts_set_activity_error(erts_activity_error_t error, char *file, int line)
}
-static ERTS_INLINE int
+static ERTS_INLINE erts_aint32_t
threads_not_under_control(void)
{
- int res = system_block_state.threads_to_block;
+ erts_aint32_t res = system_block_state.threads_to_block;
/* Waiting is always an allowed activity... */
- res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.wait);
+ res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.wait);
if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_GC)
- res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.gc);
+ res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.gc);
if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_IO)
- res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.io);
+ res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.io);
if (res < 0) {
ASSERT(0);
@@ -3709,7 +3709,7 @@ erts_block_system(Uint32 allowed_activities)
}
else {
- erts_smp_atomic_inc(&erts_system_block_state.do_block);
+ erts_smp_atomic32_inc(&erts_system_block_state.do_block);
/* Someone else might be waiting for us to block... */
if (do_block) {
@@ -3761,11 +3761,11 @@ erts_emergency_block_system(long timeout, Uint32 allowed_activities)
another_blocker = erts_smp_pending_system_block();
system_block_state.emergency = 1;
- erts_smp_atomic_inc(&erts_system_block_state.do_block);
+ erts_smp_atomic32_inc(&erts_system_block_state.do_block);
if (another_blocker) {
if (is_blocker()) {
- erts_smp_atomic_dec(&erts_system_block_state.do_block);
+ erts_smp_atomic32_dec(&erts_system_block_state.do_block);
res = 0;
goto done;
}
@@ -3822,7 +3822,7 @@ erts_release_system(void)
if (system_block_state.recursive_block)
system_block_state.recursive_block--;
else {
- do_block = erts_smp_atomic_dectest(&erts_system_block_state.do_block);
+ do_block = erts_smp_atomic32_dectest(&erts_system_block_state.do_block);
system_block_state.have_blocker = 0;
if (is_blockable_thread())
system_block_state.threads_to_block++;
@@ -3957,10 +3957,10 @@ erts_system_block_init(void)
/* Global state... */
- erts_smp_atomic_init(&erts_system_block_state.do_block, 0L);
- erts_smp_atomic_init(&erts_system_block_state.in_activity.wait, 0L);
- erts_smp_atomic_init(&erts_system_block_state.in_activity.gc, 0L);
- erts_smp_atomic_init(&erts_system_block_state.in_activity.io, 0L);
+ erts_smp_atomic32_init(&erts_system_block_state.do_block, 0);
+ erts_smp_atomic32_init(&erts_system_block_state.in_activity.wait, 0);
+ erts_smp_atomic32_init(&erts_system_block_state.in_activity.gc, 0);
+ erts_smp_atomic32_init(&erts_system_block_state.in_activity.io, 0);
/* Make sure blockable threads unregister when exiting... */
erts_smp_install_exit_handler(erts_unregister_blockable_thread);
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index c17806d96c..4d0ca97889 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -124,9 +124,9 @@
erts_smp_mtx_unlock(&(PS)->mtx)
#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->polled, (long) 1))
+ ((int) erts_smp_atomic_xchg(&(PS)->polled, (erts_aint_t) 1))
#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (long) 0)
+ erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
((int) erts_smp_atomic_read(&(PS)->polled))
@@ -134,11 +134,11 @@
#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
do { \
ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->woken, (long) 1); \
+ erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 1); \
} while (0)
#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
do { \
- erts_smp_atomic_set(&(PS)->woken, (long) 0); \
+ erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 0); \
ERTS_THR_MEMORY_BARRIER; \
} while (0)
#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
@@ -179,9 +179,9 @@ do { \
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic_set(&(PS)->have_update_requests, (long) 1)
+ erts_smp_atomic_set(&(PS)->have_update_requests, (erts_aint_t) 1)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic_set(&(PS)->have_update_requests, (long) 0)
+ erts_smp_atomic_set(&(PS)->have_update_requests, (erts_aint_t) 0)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \
((int) erts_smp_atomic_read(&(PS)->have_update_requests))
#else
@@ -202,13 +202,13 @@ do { \
#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
do { \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 0); \
+ erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 0); \
ERTS_THR_MEMORY_BARRIER; \
} while (0)
#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
do { \
ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 1); \
+ erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 1); \
} while (0)
#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
((int) erts_smp_atomic_read(&(PS)->interrupt))
@@ -356,7 +356,7 @@ unset_interrupted_chk(ErtsPollSet ps)
res = ps->interrupt;
ps->interrupt = 0;
#else
- res = (int) erts_smp_atomic_xchg(&ps->interrupt, (long) 0);
+ res = (int) erts_smp_atomic_xchg(&ps->interrupt, (erts_aint_t) 0);
ERTS_THR_MEMORY_BARRIER;
#endif
return res;
@@ -369,7 +369,7 @@ static ERTS_INLINE int
set_poller_woken_chk(ErtsPollSet ps)
{
ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic_xchg(&ps->woken, (long) 1);
+ return (int) erts_smp_atomic_xchg(&ps->woken, (erts_aint_t) 1);
}
#endif
@@ -1918,7 +1918,7 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
return 0;
}
else {
- long timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
+ erts_aint_t timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
ASSERT(timeout >= 0);
erts_smp_atomic_set(&ps->timeout, timeout);
#if ERTS_POLL_USE_FALLBACK
@@ -2112,7 +2112,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#endif
done:
- erts_smp_atomic_set(&ps->timeout, LONG_MAX);
+ erts_smp_atomic_set(&ps->timeout, ERTS_AINT_T_MAX);
#ifdef ERTS_POLL_DEBUG_PRINT
erts_printf("Leaving %s = erts_poll_wait()\n",
res == 0 ? "0" : erl_errno_id(res));
@@ -2150,10 +2150,12 @@ ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
* is not guaranteed that it will timeout before 'msec' milli seconds.
*/
void
-ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, int set, long msec)
+ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
+ int set,
+ long msec)
{
if (set) {
- if (erts_smp_atomic_read(&ps->timeout) > msec) {
+ if (erts_smp_atomic_read(&ps->timeout) > (erts_aint_t) msec) {
ERTS_POLLSET_SET_INTERRUPTED(ps);
#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
wake_poller(ps);
@@ -2315,7 +2317,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#else
erts_smp_atomic_init(&ps->interrupt, 0);
#endif
- erts_smp_atomic_init(&ps->timeout, LONG_MAX);
+ erts_smp_atomic_init(&ps->timeout, ERTS_AINT_T_MAX);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_init(&ps->no_avoided_wakeups, 0);
erts_smp_atomic_init(&ps->no_avoided_interrupts, 0);
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 01ba773688..d021baa6bf 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -237,9 +237,9 @@ static int max_files = -1;
#ifdef ERTS_SMP
erts_smp_atomic_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 1)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 0)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index a766fe9575..d84ae2ede2 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -297,11 +297,11 @@ struct ErtsPollSet_ {
#define ERTS_POLLSET_UNLOCK(PS) \
erts_smp_mtx_unlock(&(PS)->mtx)
#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->polled, (long) 1))
+ ((int) erts_smp_atomic_xchg(&(PS)->polled, (erts_aint_t) 1))
#define ERTS_POLLSET_SET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (long) 1)
+ erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 1)
#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (long) 0)
+ erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
((int) erts_smp_atomic_read(&(PS)->polled))
@@ -309,11 +309,11 @@ struct ErtsPollSet_ {
#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
do { \
ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->woken, (long) 1); \
+ erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 1); \
} while (0)
#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
do { \
- erts_smp_atomic_set(&(PS)->woken, (long) 0); \
+ erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 0); \
ERTS_THR_MEMORY_BARRIER; \
} while (0)
#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
@@ -322,13 +322,13 @@ do { \
#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
do { \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 0); \
+ erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 0); \
ERTS_THR_MEMORY_BARRIER; \
} while (0)
#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
do { \
ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 1); \
+ erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 1); \
} while (0)
#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
((int) erts_smp_atomic_read(&(PS)->interrupt))
@@ -336,7 +336,7 @@ do { \
static ERTS_INLINE int
unset_interrupted_chk(ErtsPollSet ps)
{
- int res = (int) erts_smp_atomic_xchg(&ps->interrupt, (long) 0);
+ int res = (int) erts_smp_atomic_xchg(&ps->interrupt, (erts_aint_t) 0);
ERTS_THR_MEMORY_BARRIER;
return res;
@@ -346,7 +346,7 @@ static ERTS_INLINE int
set_poller_woken_chk(ErtsPollSet ps)
{
ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic_xchg(&ps->woken, (long) 1);
+ return (int) erts_smp_atomic_xchg(&ps->woken, (erts_aint_t) 1);
}
#else
@@ -413,9 +413,9 @@ set_poller_woken_chk(ErtsPollSet ps)
#ifdef ERTS_SMP
extern erts_smp_atomic_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 1)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 0)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
#else
extern volatile int erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -986,7 +986,7 @@ void erts_poll_interrupt_timed(ErtsPollSet ps,
HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,msec));
#ifdef ERTS_SMP
if (set) {
- if (erts_smp_atomic_read(&ps->timeout) > msec) {
+ if (erts_smp_atomic_read(&ps->timeout) > (erts_aint_t) msec) {
ERTS_POLLSET_SET_INTERRUPTED(ps);
wake_poller(ps);
}
@@ -1228,7 +1228,7 @@ int erts_poll_wait(ErtsPollSet ps,
erts_mtx_unlock(&w->mtx);
}
done:
- erts_smp_atomic_set(&ps->timeout, LONG_MAX);
+ erts_smp_atomic_set(&ps->timeout, ERTS_AINT_T_MAX);
*len = num;
ERTS_POLLSET_UNLOCK(ps);
HARDTRACEF(("Out erts_poll_wait"));
@@ -1314,7 +1314,7 @@ ErtsPollSet erts_poll_create_pollset(void)
erts_smp_mtx_init(&ps->mtx, "pollset");
erts_smp_atomic_init(&ps->interrupt, 0);
#endif
- erts_smp_atomic_init(&ps->timeout, LONG_MAX);
+ erts_smp_atomic_init(&ps->timeout, ERTS_AINT_T_MAX);
HARDTRACEF(("Out erts_poll_create_pollset"));
return ps;
diff --git a/erts/emulator/sys/win32/erl_win_dyn_driver.h b/erts/emulator/sys/win32/erl_win_dyn_driver.h
index 4949998abc..1347eead91 100644
--- a/erts/emulator/sys/win32/erl_win_dyn_driver.h
+++ b/erts/emulator/sys/win32/erl_win_dyn_driver.h
@@ -87,15 +87,15 @@ WDD_TYPEDEF(unsigned long, erts_alc_test, (unsigned long,
unsigned long,
unsigned long,
unsigned long));
-WDD_TYPEDEF(long, driver_binary_get_refc, (ErlDrvBinary *dbp));
-WDD_TYPEDEF(long, driver_binary_inc_refc, (ErlDrvBinary *dbp));
-WDD_TYPEDEF(long, driver_binary_dec_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(ErlDrvSInt, driver_binary_get_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(ErlDrvSInt, driver_binary_inc_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(ErlDrvSInt, driver_binary_dec_refc, (ErlDrvBinary *dbp));
WDD_TYPEDEF(ErlDrvPDL, driver_pdl_create, (ErlDrvPort));
WDD_TYPEDEF(void, driver_pdl_lock, (ErlDrvPDL));
WDD_TYPEDEF(void, driver_pdl_unlock, (ErlDrvPDL));
-WDD_TYPEDEF(long, driver_pdl_get_refc, (ErlDrvPDL));
-WDD_TYPEDEF(long, driver_pdl_inc_refc, (ErlDrvPDL));
-WDD_TYPEDEF(long, driver_pdl_dec_refc, (ErlDrvPDL));
+WDD_TYPEDEF(ErlDrvSInt, driver_pdl_get_refc, (ErlDrvPDL));
+WDD_TYPEDEF(ErlDrvSInt, driver_pdl_inc_refc, (ErlDrvPDL));
+WDD_TYPEDEF(ErlDrvSInt, driver_pdl_dec_refc, (ErlDrvPDL));
WDD_TYPEDEF(void, driver_system_info, (ErlDrvSysInfo *, size_t));
WDD_TYPEDEF(int, driver_get_now, (ErlDrvNowData *));
WDD_TYPEDEF(int, driver_monitor_process, (ErlDrvPort port,
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
index d2449a1bdb..262f84babc 100644
--- a/erts/emulator/sys/win32/sys_interrupt.c
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -33,9 +33,9 @@
#ifdef ERTS_SMP
erts_smp_atomic_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 1)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 0)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
diff --git a/erts/include/internal/ethr_atomics.h b/erts/include/internal/ethr_atomics.h
new file mode 100644
index 0000000000..1caf4d0567
--- /dev/null
+++ b/erts/include/internal/ethr_atomics.h
@@ -0,0 +1,726 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: The ethread atomic API
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_ATOMIC_H__
+#define ETHR_ATOMIC_H__
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_ATOMIC_PROTOTYPES__
+#endif
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * No native atomic implementation available. :(
+ * Use fallback...
+ */
+typedef ethr_sint32_t ethr_atomic32_t;
+typedef ethr_sint_t ethr_atomic_t;
+#else
+/*
+ * Map ethread native atomics to ethread API atomics.
+ *
+ * We do at least have a native atomic implementation that
+ * can handle integers of a size larger than or equal to
+ * the size of pointers.
+ */
+
+/* -- Pointer size atomics -- */
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+#if ETHR_SIZEOF_PTR == 8
+# if defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic64_addr
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# else
+# error "Missing native atomic implementation"
+# endif
+#elif ETHR_SIZEOF_PTR == 4
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic32_addr
+# ifdef ETHR_HAVE_NATIVE_ATOMIC32
+typedef ethr_native_atomic32_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+# elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NATMC_T__ ethr_native_atomic64_t
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# else
+# error "Missing native atomic implementation"
+# endif
+#endif
+
+/* -- 32-bit atomics -- */
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint32_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint64_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
+
+#endif
+
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+ethr_sint_t *ethr_atomic_addr(ethr_atomic_t *);
+void ethr_atomic_init(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_set(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_inc(ethr_atomic_t *);
+void ethr_atomic_dec(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_add_read(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_add(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_band(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_bor(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_xchg(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_acqb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_dec_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg_relb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+
+ethr_sint32_t *ethr_atomic32_addr(ethr_atomic32_t *);
+void ethr_atomic32_init(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_set(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_inc_read(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_dec_read(ethr_atomic32_t *);
+void ethr_atomic32_inc(ethr_atomic32_t *);
+void ethr_atomic32_dec(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_add_read(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_add(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_band(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_bor(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_xchg(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_cmpxchg(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_acqb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_inc_read_acqb(ethr_atomic32_t *);
+void ethr_atomic32_set_relb(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_dec_relb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_dec_read_relb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+#endif
+
+int ethr_init_atomics(void);
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * Fallbacks for atomics used in absence of a native implementation.
+ */
+
+#define ETHR_ATOMIC_ADDR_BITS 10
+#define ETHR_ATOMIC_ADDR_SHIFT 6
+
+typedef struct {
+ union {
+ ethr_spinlock_t lck;
+ char buf[ETHR_CACHE_LINE_SIZE];
+ } u;
+} ethr_atomic_protection_t;
+
+extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+
+#define ETHR_ATOMIC_PTR2LCK__(PTR) \
+(&ethr_atomic_protection__[((((ethr_uint_t) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
+
+
+#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
+do { \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
+ { EXPS; } \
+ ethr_spin_unlock(slp__); \
+} while (0)
+
+#endif
+
+/*
+ * --- Pointer size atomics ---------------------------------------------------
+ */
+
+static ETHR_INLINE ethr_sint_t *
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_addr)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t *) ETHR_NATMC_ADDR_FUNC__(var);
+#else
+ return (ethr_sint_t *) var;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, ethr_sint_t incr)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) i);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(inc)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(dec)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
+ ethr_sint_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var,
+ (ETHR_NAINT_T__) mask);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
+ ethr_sint_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var,
+ (ETHR_NAINT_T__) mask);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var, ethr_sint_t new)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var,
+ (ETHR_NAINT_T__) new);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
+}
+
+/*
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
+ */
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var,
+ ethr_sint_t val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#endif
+}
+
+/*
+ * --- 32-bit atomics ---------------------------------------------------------
+ */
+
+static ETHR_INLINE ethr_sint32_t *
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_addr)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic32_addr(var);
+#else
+ return (ethr_sint32_t *) var;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_init)(ethr_atomic32_t *var,
+ ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add)(ethr_atomic32_t *var,
+ ethr_sint32_t incr)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add_read)(ethr_atomic32_t *var,
+ ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) i);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(inc)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(dec)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_band)(ethr_atomic32_t *var,
+ ethr_sint32_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) mask);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_bor)(ethr_atomic32_t *var,
+ ethr_sint32_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return
+ (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var,
+ (ETHR_NAINT32_T__) mask);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_xchg)(ethr_atomic32_t *var,
+ ethr_sint32_t new)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var,
+ (ETHR_NAINT32_T__) new);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
+}
+
+/*
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
+ */
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_acqb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read_acqb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set_relb)(ethr_atomic32_t *var,
+ ethr_sint32_t val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_relb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read_relb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_acqb)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_relb)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(cmpxchg_relb)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+#endif
+}
+
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+
+#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
index 01855864e3..fadaf1e2a4 100644
--- a/erts/include/internal/ethr_mutex.h
+++ b/erts/include/internal/ethr_mutex.h
@@ -78,13 +78,13 @@
# error Need a qlock implementation
#endif
-#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
-#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
-#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+#define ETHR_RWMTX_W_FLG__ (((ethr_sint32_t) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((ethr_sint32_t) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((ethr_sint32_t) 1) << 29)
/* frequent read kind */
-#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
-#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((long) 1) << 27)
+#define ETHR_RWMTX_R_FLG__ (((ethr_sint32_t) 1) << 28)
+#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((ethr_sint32_t) 1) << 27)
#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_ABRT_UNLCK_FLG__ - 1)
/* normal kind */
@@ -106,28 +106,28 @@
#endif
#define ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(MTX) \
- ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic_read(&(MTX)->mtxb.flgs))
+ ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic32_read(&(MTX)->mtxb.flgs))
struct ethr_mutex_base_ {
#ifdef ETHR_MTX_HARD_DEBUG_FENCE
long pre_fence;
#endif
- ethr_atomic_t flgs;
- ETHR_MTX_QLOCK_TYPE__ qlck;
- ethr_ts_event *q;
+ ethr_atomic32_t flgs;
short aux_scnt;
short main_scnt;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
#ifdef ETHR_MTX_HARD_DEBUG_WSQ
int ws;
#endif
#ifdef ETHR_MTX_CHK_EXCL
- ethr_atomic_t exclusive;
+ ethr_atomic32_t exclusive;
#endif
#ifdef ETHR_MTX_CHK_NON_EXCL
- ethr_atomic_t non_exclusive;
+ ethr_atomic32_t non_exclusive;
#endif
#ifdef ETHR_MTX_HARD_DEBUG_LFS
- ethr_atomic_t hdbg_lfs;
+ ethr_atomic32_t hdbg_lfs;
#endif
};
@@ -236,7 +236,7 @@ typedef struct {
typedef union {
struct {
- ethr_atomic_t readers;
+ ethr_atomic32_t readers;
int waiting_readers;
int byte_offset;
ethr_rwmutex_lived lived;
@@ -298,13 +298,13 @@ void ethr_rwmutex_rwunlock(ethr_rwmutex *);
#ifdef ETHR_MTX_HARD_DEBUG_LFS
# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
do { \
- ethr_atomic_init(&(MTXB)->hdbg_lfs, 0); \
+ ethr_atomic32_init(&(MTXB)->hdbg_lfs, 0); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint32_t val__; \
ETHR_COMPILER_BARRIER; \
- val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
+ val__ = ethr_atomic32_inc_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ > 0); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
@@ -317,15 +317,15 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ethr_sint32_t val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ >= 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint32_t val__; \
ETHR_COMPILER_BARRIER; \
- val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == -1); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
@@ -338,7 +338,7 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ethr_sint32_t val__ = ethr_atomic32_inctest(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
@@ -386,12 +386,12 @@ do { \
#endif
# define ETHR_MTX_CHK_EXCL_INIT__(MTXB) \
- ethr_atomic_init(&(MTXB)->exclusive, 0)
+ ethr_atomic32_init(&(MTXB)->exclusive, 0)
# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (!ethr_atomic_read(&(MTXB)->exclusive)) \
+ if (!ethr_atomic32_read(&(MTXB)->exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -399,7 +399,7 @@ do { \
# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (ethr_atomic_read(&(MTXB)->exclusive)) \
+ if (ethr_atomic32_read(&(MTXB)->exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is not exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -407,13 +407,13 @@ do { \
# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB) \
do { \
ETHR_MTX_CHK_EXCL_IS_NOT_EXCL((MTXB)); \
- ethr_atomic_set(&(MTXB)->exclusive, 1); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 1); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB) \
do { \
ETHR_MTX_CHK_EXCL_IS_EXCL((MTXB)); \
- ethr_atomic_set(&(MTXB)->exclusive, 0); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
@@ -424,11 +424,11 @@ do { \
#endif
# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) \
- ethr_atomic_init(&(MTXB)->non_exclusive, 0)
+ ethr_atomic32_init(&(MTXB)->non_exclusive, 0)
# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (!ethr_atomic_read(&(MTXB)->non_exclusive)) \
+ if (!ethr_atomic32_read(&(MTXB)->non_exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is non-exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -436,7 +436,7 @@ do { \
# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (ethr_atomic_read(&(MTXB)->non_exclusive)) \
+ if (ethr_atomic32_read(&(MTXB)->non_exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is not non-exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -444,19 +444,19 @@ do { \
# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_inc(&(MTXB)->non_exclusive); \
+ ethr_atomic32_inc(&(MTXB)->non_exclusive); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_add(&(MTXB)->non_exclusive, (NO)); \
+ ethr_atomic32_add(&(MTXB)->non_exclusive, (NO)); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_dec(&(MTXB)->non_exclusive); \
+ ethr_atomic32_dec(&(MTXB)->non_exclusive); \
ETHR_COMPILER_BARRIER; \
} while (0)
#else
@@ -501,18 +501,18 @@ do { \
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
-void ethr_mutex_lock_wait__(ethr_mutex *, long);
-void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+void ethr_mutex_lock_wait__(ethr_mutex *, ethr_sint32_t);
+void ethr_mutex_unlock_wake__(ethr_mutex *, ethr_sint32_t);
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
int res;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
- act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
res = (act == 0) ? 0 : EBUSY;
#ifdef ETHR_MTX_CHK_EXCL
@@ -531,11 +531,11 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
- act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
if (act != 0)
ethr_mutex_lock_wait__(mtx, act);
@@ -551,7 +551,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_COMPILER_BARRIER;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
@@ -559,7 +559,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
ETHR_MTX_CHK_EXCL_UNSET_EXCL(&mtx->mtxb);
- act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__)
ethr_mutex_unlock_wake__(mtx, act);
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
index 2f9f987d0b..8e04692856 100644
--- a/erts/include/internal/ethr_optimized_fallbacks.h
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -71,36 +71,46 @@ ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
#define ETHR_HAVE_NATIVE_SPINLOCKS 1
#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
-typedef ethr_native_atomic_t ethr_native_spinlock_t;
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_native_spinlock_t;
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_native_spinlock_t;
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
{
- ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+ ETHR_NATMC_FUNC__(init)(lock, 0);
}
static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
{
ETHR_COMPILER_BARRIER;
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) == 1);
- ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == 1);
+ ETHR_NATMC_FUNC__(set_relb)(lock, 0);
}
static ETHR_INLINE void
ethr_native_spin_lock(ethr_native_spinlock_t *lock)
{
- while (ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- (long) 1, (long) 0) != 0) {
- ETHR_SPIN_BODY;
+ while (ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, 1, 0) != 0) {
+ while (ETHR_NATMC_FUNC__(read)(lock) != 0)
+ ETHR_SPIN_BODY;
}
ETHR_COMPILER_BARRIER;
}
#endif
+#undef ETHR_NATMC_FUNC__
+
#endif
@@ -111,16 +121,26 @@ ethr_native_spin_lock(ethr_native_spinlock_t *lock)
#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
-typedef ethr_native_atomic_t ethr_native_rwlock_t;
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_native_rwlock_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_WLOCK_FLAG__ (((ethr_sint32_t) 1) << 30)
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_native_rwlock_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_WLOCK_FLAG__ (((ethr_sint64_t) 1) << 62)
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-#define ETHR_WLOCK_FLAG__ (((long) 1) << 30)
-
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
{
- ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+ ETHR_NATMC_FUNC__(init)(lock, 0);
}
static ETHR_INLINE void
@@ -128,22 +148,24 @@ ethr_native_read_unlock(ethr_native_rwlock_t *lock)
{
ETHR_COMPILER_BARRIER;
#ifdef DEBUG
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) >= 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) >= 0);
#endif
- ethr_native_atomic_dec_relb((ethr_native_atomic_t *) lock);
+ ETHR_NATMC_FUNC__(dec_relb)(lock);
}
static ETHR_INLINE void
ethr_native_read_lock(ethr_native_rwlock_t *lock)
{
- long act, exp = 0;
+ ETHR_NAINT_T__ act, exp = 0;
while (1) {
- act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- exp+1, exp);
+ act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp+1, exp);
if (act == exp)
break;
- ETHR_SPIN_BODY;
- exp = (act & ETHR_WLOCK_FLAG__) ? 0 : act;
+ while (act & ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ETHR_NATMC_FUNC__(read)(lock);
+ }
+ exp = act;
}
ETHR_COMPILER_BARRIER;
}
@@ -152,18 +174,16 @@ static ETHR_INLINE void
ethr_native_write_unlock(ethr_native_rwlock_t *lock)
{
ETHR_COMPILER_BARRIER;
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock)
- == ETHR_WLOCK_FLAG__);
- ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == ETHR_WLOCK_FLAG__);
+ ETHR_NATMC_FUNC__(set_relb)(lock, 0);
}
static ETHR_INLINE void
ethr_native_write_lock(ethr_native_rwlock_t *lock)
{
- long act, exp = 0;
+ ETHR_NAINT_T__ act, exp = 0;
while (1) {
- act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- exp|ETHR_WLOCK_FLAG__, exp);
+ act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp|ETHR_WLOCK_FLAG__, exp);
if (act == exp)
break;
ETHR_SPIN_BODY;
@@ -173,13 +193,17 @@ ethr_native_write_lock(ethr_native_rwlock_t *lock)
/* Wait for readers to leave */
while (act != ETHR_WLOCK_FLAG__) {
ETHR_SPIN_BODY;
- act = ethr_native_atomic_read_acqb((ethr_native_atomic_t *) lock);
+ act = ETHR_NATMC_FUNC__(read_acqb)(lock);
}
ETHR_COMPILER_BARRIER;
}
#endif
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_WLOCK_FLAG__
+
#endif
#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 53fa1acdc2..4cd95faf6a 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -37,11 +37,6 @@
#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
-typedef struct {
- long tv_sec;
- long tv_nsec;
-} ethr_timeval;
-
#if defined(DEBUG)
# define ETHR_DEBUG
#endif
@@ -73,7 +68,7 @@ typedef struct {
#endif
/* Assume 64-byte cache line size */
-#define ETHR_CACHE_LINE_SIZE 64L
+#define ETHR_CACHE_LINE_SIZE ((ethr_uint_t) 64)
#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
@@ -171,6 +166,22 @@ typedef pthread_key_t ethr_tsd_key;
# undef WIN32_LEAN_AND_MEAN
#endif
+#if defined(_MSC_VER)
+
+#if ETHR_SIZEOF_LONG == 4
+#define ETHR_HAVE_INT32_T 1
+typedef long ethr_sint32_t;
+typedef unsigned long ethr_uint32_t;
+#endif
+
+#if ETHR_SIZEOF___INT64 == 8
+#define ETHR_HAVE_INT64_T 1
+typedef __int64 ethr_sint64_t;
+typedef unsigned __int64 ethr_uint64_t;
+#endif
+
+#endif
+
struct ethr_join_data_;
/* Types */
@@ -198,12 +209,48 @@ typedef DWORD ethr_tsd_key;
#endif
-#ifdef SIZEOF_LONG
-#if SIZEOF_LONG < ETHR_SIZEOF_PTR
-#error size of long currently needs to be at least the same as size of void *
+#ifndef ETHR_HAVE_INT32_T
+#if ETHR_SIZEOF_INT == 4
+#define ETHR_HAVE_INT32_T 1
+typedef int ethr_sint32_t;
+typedef unsigned int ethr_uint32_t;
+#elif ETHR_SIZEOF_LONG == 4
+#define ETHR_HAVE_INT32_T 1
+typedef long ethr_sint32_t;
+typedef unsigned long ethr_uint32_t;
#endif
#endif
+#ifndef ETHR_HAVE_INT64_T
+#if ETHR_SIZEOF_INT == 8
+#define ETHR_HAVE_INT64_T 1
+typedef int ethr_sint64_t;
+typedef unsigned int ethr_uint64_t;
+#elif ETHR_SIZEOF_LONG == 8
+#define ETHR_HAVE_INT64_T 1
+typedef long ethr_sint64_t;
+typedef unsigned long ethr_uint64_t;
+#elif ETHR_SIZEOF_LONG_LONG == 8
+#define ETHR_HAVE_INT64_T 1
+typedef long long ethr_sint64_t;
+typedef unsigned long long ethr_uint64_t;
+#endif
+#endif
+
+#if ETHR_SIZEOF_PTR == 4
+#ifndef ETHR_HAVE_INT32_T
+#error "No 32-bit integer type found"
+#endif
+typedef ethr_sint32_t ethr_sint_t;
+typedef ethr_uint32_t ethr_uint_t;
+#elif ETHR_SIZEOF_PTR == 8
+#ifndef ETHR_HAVE_INT64_T
+#error "No 64-bit integer type found"
+#endif
+typedef ethr_sint64_t ethr_sint_t;
+typedef ethr_uint64_t ethr_uint_t;
+#endif
+
/* __builtin_expect() is needed by both native atomics code
* and the fallback code */
#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
@@ -386,7 +433,6 @@ typedef struct {
#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
# define ETHR_NEED_SPINLOCK_PROTOTYPES__
# define ETHR_NEED_RWSPINLOCK_PROTOTYPES__
-# define ETHR_NEED_ATOMIC_PROTOTYPES__
#endif
int ethr_init(ethr_init_data *);
@@ -399,7 +445,6 @@ void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_time_now(ethr_timeval *);
int ethr_tsd_key_create(ethr_tsd_key *);
int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
@@ -502,312 +547,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-/*
- * Map ethread native atomics to ethread API atomics.
- */
-typedef ethr_native_atomic_t ethr_atomic_t;
-#else
-typedef long ethr_atomic_t;
-#endif
-
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-void ethr_atomic_init(ethr_atomic_t *, long);
-void ethr_atomic_set(ethr_atomic_t *, long);
-long ethr_atomic_read(ethr_atomic_t *);
-long ethr_atomic_inc_read(ethr_atomic_t *);
-long ethr_atomic_dec_read(ethr_atomic_t *);
-void ethr_atomic_inc(ethr_atomic_t *);
-void ethr_atomic_dec(ethr_atomic_t *);
-long ethr_atomic_add_read(ethr_atomic_t *, long);
-void ethr_atomic_add(ethr_atomic_t *, long);
-long ethr_atomic_read_band(ethr_atomic_t *, long);
-long ethr_atomic_read_bor(ethr_atomic_t *, long);
-long ethr_atomic_xchg(ethr_atomic_t *, long);
-long ethr_atomic_cmpxchg(ethr_atomic_t *, long, long);
-long ethr_atomic_read_acqb(ethr_atomic_t *);
-long ethr_atomic_inc_read_acqb(ethr_atomic_t *);
-void ethr_atomic_set_relb(ethr_atomic_t *, long);
-void ethr_atomic_dec_relb(ethr_atomic_t *);
-long ethr_atomic_dec_read_relb(ethr_atomic_t *);
-long ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, long, long);
-long ethr_atomic_cmpxchg_relb(ethr_atomic_t *, long, long);
-#endif
-
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-
-#ifndef ETHR_HAVE_NATIVE_ATOMICS
-/*
- * Fallbacks for atomics used in absence of a native implementation.
- */
-
-#define ETHR_ATOMIC_ADDR_BITS 10
-#define ETHR_ATOMIC_ADDR_SHIFT 6
-
-typedef struct {
- union {
- ethr_spinlock_t lck;
- char buf[ETHR_CACHE_LINE_SIZE];
- } u;
-} ethr_atomic_protection_t;
-
-extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
-
-
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- ethr_spin_lock(slp__); \
- { EXPS; } \
- ethr_spin_unlock(slp__); \
-} while (0)
-
-#endif
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_init(var, i);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_set(var, i);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_read(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) *var);
- return res;
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_add(var, incr);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_add_return(var, i);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
- return res;
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_inc(var);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_dec(var);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_inc_return(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) ++(*var));
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_dec_return(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) --(*var));
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
- long mask)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_and_retold(var, mask);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
- long mask)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_or_retold(var, mask);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_xchg(var, new);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg(var, new, exp);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
- {
- res = *var;
- if (__builtin_expect(res == exp, 1))
- *var = new;
- });
- return res;
-#endif
-}
-
-/*
- * Important memory barrier requirements.
- *
- * The following atomic operations *must* supply a memory barrier of
- * at least the type specified by its suffix:
- * _acqb = acquire barrier
- * _relb = release barrier
- */
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_read_acqb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_inc_return_acqb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var, long val)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_set_relb(var, val);
-#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_dec_relb(var);
-#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_dec_return_relb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg_acqb(var, new, exp);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg_relb(var, new, exp);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
-#endif
-}
-
-#endif /* ETHR_TRY_INLINE_FUNCS */
+#include "ethr_atomics.h"
typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
@@ -825,7 +565,7 @@ struct ethr_ts_event_ {
ethr_ts_event *prev;
ethr_event event;
void *udata;
- ethr_atomic_t uaflgs;
+ ethr_atomic32_t uaflgs;
unsigned uflgs;
unsigned iflgs; /* for ethr lib only */
short rgix; /* for ethr lib only */
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index 5debb44756..f394d790d2 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -20,6 +20,21 @@
/* Define to the size of pointers */
#undef ETHR_SIZEOF_PTR
+/* Define to the size of int */
+#undef ETHR_SIZEOF_INT
+
+/* Define to the size of long */
+#undef ETHR_SIZEOF_LONG
+
+/* Define to the size of long long */
+#undef ETHR_SIZEOF_LONG_LONG
+
+/* Define to the size of __int64 */
+#undef ETHR_SIZEOF___INT64
+
+/* Define if bigendian */
+#undef ETHR_BIGENDIAN
+
/* Define if you want to disable native ethread implementations */
#undef ETHR_DISABLE_NATIVE_IMPLS
@@ -100,6 +115,27 @@
/* Define to the size of AO_t if libatomic_ops is used */
#undef ETHR_SIZEOF_AO_T
+/* Define if you have _InterlockedCompareExchange64() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+
+/* Define if you have _InterlockedDecrement64() */
+#undef ETHR_HAVE__INTERLOCKEDDECREMENT64
+
+/* Define if you have _InterlockedIncrement64() */
+#undef ETHR_HAVE__INTERLOCKEDINCREMENT64
+
+/* Define if you have _InterlockedExchangeAdd64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+
+/* Define if you have _InterlockedExchange64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+
+/* Define if you have _InterlockedAnd64() */
+#undef ETHR_HAVE__INTERLOCKEDAND64
+
+/* Define if you have _InterlockedOr64() */
+#undef ETHR_HAVE__INTERLOCKEDOR64
+
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index e8e529dd48..16935084b1 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -22,24 +22,35 @@
* Author: Rickard Green
*/
-#ifndef ETHR_GCC_ATOMIC_H__
-#define ETHR_GCC_ATOMIC_H__
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_GCC_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_GCC_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_GCC_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_GCC_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
-#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#ifndef ETHR_GCC_ATOMIC_COMMON__
+#define ETHR_GCC_ATOMIC_COMMON__
-#define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-/* Enable immediate read/write on platforms where we know it is safe */
+#define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 0
#if defined(__i386__) || defined(__x86_64__) || defined(__sparc__) \
|| defined(__powerpc__) || defined(__ppc__) || defined(__mips__)
-# undef ETHR_IMMED_ATOMIC_SET_GET_SAFE__
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# undef ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
+# define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 1
#endif
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
-
+#if defined(__x86_64__) || (defined(__i386__) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
+#else
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
+#endif
/*
* According to the documentation this is what we want:
@@ -47,34 +58,73 @@ typedef struct {
* However, __sync_synchronize() is known to erroneously be
* a noop on at least some platforms with some gcc versions.
* This has suposedly been fixed in some gcc version, but we
- * don't know from which version. Therefore, we use the
- * workaround implemented below on all gcc versions except
- * for gcc 4.2 or above for MIPS, where it's been verified.
+ * don't know from which version. Therefore, we only use
+ * it when it has been verified to work. Otherwise
+ * we use a workaround.
*/
#if defined(__mips__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
+/* __sync_synchronize() has been verified to work here */
#define ETHR_MEMORY_BARRIER __sync_synchronize()
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __sync_synchronize()
+#elif defined(__x86_64__) || (defined(__i386__) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+/* Use fence instructions directly instead of workaround */
+#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
#else
+/* Workaround */
#define ETHR_MEMORY_BARRIER \
do { \
- volatile long x___ = 0; \
- (void) __sync_val_compare_and_swap(&x___, (long) 0, (long) 1); \
+ volatile ethr_sint32_t x___ = 0; \
+ (void) __sync_val_compare_and_swap(&x___, (ethr_sint32_t) 0, (ethr_sint32_t) 1); \
} while (0)
-#endif
#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+
+#define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_GCC_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#else
+#error "Unsupported integer size"
+#endif
+
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
var->counter = value;
#else
/*
* Unfortunately no __sync_store() or similar exist in the gcc atomic
* op interface. We therefore have to simulate it this way...
*/
- long act = 0, exp;
+ ETHR_AINT_T__ act = 0, exp;
do {
exp = act;
act = __sync_val_compare_and_swap(&var->counter, exp, value);
@@ -82,80 +132,86 @@ ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
#endif
}
-#define ethr_native_atomic_init ethr_native_atomic_set
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ ETHR_NATMC_FUNC__(set)(var, value);
+}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
return var->counter;
#else
/*
* Unfortunately no __sync_fetch() or similar exist in the gcc atomic
* op interface. We therefore have to simulate it this way...
*/
- return __sync_add_and_fetch(&var->counter, (long) 0);
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
(void) __sync_add_and_fetch(&var->counter, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
return __sync_add_and_fetch(&var->counter, incr);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) __sync_add_and_fetch(&var->counter, (long) 1);
+ (void) __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) __sync_sub_and_fetch(&var->counter, (long) 1);
+ (void) __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return __sync_add_and_fetch(&var->counter, (long) 1);
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return __sync_sub_and_fetch(&var->counter, (long) 1);
+ return __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
return __sync_fetch_and_and(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) __sync_fetch_and_or(&var->counter, mask);
+ return (ETHR_AINT_T__) __sync_fetch_and_or(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
return __sync_val_compare_and_swap(&var->counter, old, new);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
- long exp, act = 0;
+ ETHR_AINT_T__ exp, act = 0;
do {
exp = act;
act = __sync_val_compare_and_swap(&var->counter, exp, new);
@@ -167,22 +223,68 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- return __sync_add_and_fetch(&var->counter, (long) 0);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_AINT_T__ val = var->counter;
+ ETHR_COMPILER_BARRIER;
+ return val;
+#else
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
+#endif
}
-#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
-#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
-#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
-#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_COMPILER_BARRIER;
+ var->counter = i;
+#else
+ (void) ETHR_NATMC_FUNC__(xchg)(var, i);
+#endif
+}
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(inc_return)(var);
+}
-#endif
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+{
+ ETHR_NATMC_FUNC__(dec)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(dec_return)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
#endif
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_AINT_SUFFIX__
+
#endif
diff --git a/erts/include/internal/gcc/ethread.h b/erts/include/internal/gcc/ethread.h
index bb378e31e0..392a1aa2b2 100644
--- a/erts/include/internal/gcc/ethread.h
+++ b/erts/include/internal/gcc/ethread.h
@@ -25,6 +25,16 @@
#ifndef ETHREAD_GCC_H__
#define ETHREAD_GCC_H__
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "ethr_atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
+
+#endif
#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 52d01aab32..4e402f261a 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -23,14 +23,24 @@
*
* This code requires a 486 or newer processor.
*/
-#ifndef ETHREAD_I386_ATOMIC_H
-#define ETHREAD_I386_ATOMIC_H
-/* An atomic is an aligned long accessed via locked operations.
- */
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_X86_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_X86_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_X86_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_X86_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#ifndef ETHR_X86_ATOMIC_COMMON__
+#define ETHR_X86_ATOMIC_COMMON__
+
+#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
@@ -40,123 +50,161 @@ typedef struct {
#else
#define ETHR_MEMORY_BARRIER \
do { \
- volatile long x___ = 0; \
+ volatile ethr_sint32_t x___ = 0; \
__asm__ __volatile__("lock; incl %0" : "=m"(x___) : "m"(x___) : "memory"); \
} while (0)
#endif
-#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
-
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_X86_ATOMIC_COMMON__ */
-#ifdef __x86_64__
-#define LONG_SUFFIX "q"
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_AINT_SUFFIX__ "l"
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_AINT_SUFFIX__ "q"
#else
-#define LONG_SUFFIX "l"
+#error "Unsupported integer size"
#endif
+/* An atomic is an aligned ETHR_AINT_T__ accessed via locked operations.
+ */
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->counter = i;
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
__asm__ __volatile__(
- "lock; add" LONG_SUFFIX " %1, %0"
+ "lock; add" ETHR_AINT_SUFFIX__ " %1, %0"
: "=m"(var->counter)
: "ir"(incr), "m"(var->counter));
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__(
- "lock; inc" LONG_SUFFIX " %0"
+ "lock; inc" ETHR_AINT_SUFFIX__ " %0"
: "=m"(var->counter)
: "m"(var->counter));
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__(
- "lock; dec" LONG_SUFFIX " %0"
+ "lock; dec" ETHR_AINT_SUFFIX__ " %0"
: "=m"(var->counter)
: "m"(var->counter));
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- long tmp;
+ ETHR_AINT_T__ tmp;
tmp = incr;
__asm__ __volatile__(
- "lock; xadd" LONG_SUFFIX " %0, %1" /* xadd didn't exist prior to the 486 */
+ "lock; xadd" ETHR_AINT_SUFFIX__ " %0, %1" /* xadd didn't exist prior to the 486 */
: "=r"(tmp)
: "m"(var->counter), "0"(tmp));
/* now tmp is the atomic's previous value */
return tmp + incr;
}
-#define ethr_native_atomic_inc_return(var) ethr_native_atomic_add_return((var), 1)
-#define ethr_native_atomic_dec_return(var) ethr_native_atomic_add_return((var), -1)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) 1);
+}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) -1);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
__asm__ __volatile__(
- "lock; cmpxchg" LONG_SUFFIX " %2, %3"
+ "lock; cmpxchg" ETHR_AINT_SUFFIX__ " %2, %3"
: "=a"(old), "=m"(var->counter)
: "r"(new), "m"(var->counter), "0"(old)
: "cc", "memory"); /* full memory clobber to make this a compiler barrier */
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long tmp, old;
+ ETHR_AINT_T__ tmp, old;
tmp = var->counter;
do {
old = tmp;
- tmp = ethr_native_atomic_cmpxchg(var, tmp & mask, tmp);
+ tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp & mask, tmp);
} while (__builtin_expect(tmp != old, 0));
/* now tmp is the atomic's previous value */
return tmp;
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long tmp, old;
+ ETHR_AINT_T__ tmp, old;
tmp = var->counter;
do {
old = tmp;
- tmp = ethr_native_atomic_cmpxchg(var, tmp | mask, tmp);
+ tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp | mask, tmp);
} while (__builtin_expect(tmp != old, 0));
/* now tmp is the atomic's previous value */
return tmp;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
- long tmp = val;
+ ETHR_AINT_T__ tmp = val;
__asm__ __volatile__(
- "xchg" LONG_SUFFIX " %0, %1"
+ "xchg" ETHR_AINT_SUFFIX__ " %0, %1"
: "=r"(tmp)
: "m"(var->counter), "0"(tmp));
/* now tmp is the atomic's previous value */
@@ -167,57 +215,73 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- long val;
+ ETHR_AINT_T__ val;
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
val = var->counter;
#else
- val = ethr_native_atomic_add_return(var, 0);
+ val = ETHR_NATMC_FUNC__(add_return)(var, 0);
#endif
__asm__ __volatile__("" : : : "memory");
return val;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
__asm__ __volatile__("" : : : "memory");
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
var->counter = i;
#else
- (void) ethr_native_atomic_xchg(var, i);
+ (void) ETHR_NATMC_FUNC__(xchg)(var, i);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
__asm__ __volatile__("" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("" : : : "memory");
- ethr_native_atomic_dec(var);
+ ETHR_NATMC_FUNC__(dec)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("" : : : "memory");
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
}
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
-#undef LONG_SUFFIX
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* ETHREAD_I386_ATOMIC_H */
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_AINT_SUFFIX__
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index ed43e77279..b5a17caefb 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,12 @@
#ifndef ETHREAD_I386_ETHREAD_H
#define ETHREAD_I386_ETHREAD_H
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "atomic.h"
+#endif
#include "spinlock.h"
#include "rwlock.h"
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
index a6eb43a0bd..d56693dbf8 100644
--- a/erts/include/internal/libatomic_ops/ethr_atomic.h
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -46,17 +46,39 @@
* - AO_store()
* - AO_compare_and_swap()
*
- * The `AO_t' type also have to be at least as large as
- * `void *' and `long' types.
+ * The `AO_t' type also have to be at least as large as the `void *' type.
*/
#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
#error The AO_t type is too small
#endif
+#if ETHR_SIZEOF_AO_T == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_AINT_SUFFIX__ "l"
+#elif ETHR_SIZEOF_AO_T == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_AINT_SUFFIX__ "q"
+#else
+#error "Unsupported integer size"
+#endif
+
+#if ETHR_SIZEOF_AO_T == 8
+typedef union {
+ volatile AO_t counter;
+ ethr_sint32_t sint32[2];
+} ETHR_ATMC_T__;
+#else
typedef struct {
volatile AO_t counter;
-} ethr_native_atomic_t;
+} ETHR_ATMC_T__;
+#endif
#define ETHR_MEMORY_BARRIER AO_nop_full()
#ifdef AO_HAVE_nop_write
@@ -72,123 +94,151 @@ typedef struct {
#ifdef AO_NO_DD_ORDERING
# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
#else
-# define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("":::"memory")
+# define ETHR_READ_DEPEND_MEMORY_BARRIER AO_compiler_barrier()
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
+#if ETHR_SIZEOF_AO_T == 8
+/*
+ * We also need to provide an ethr_native_atomic32_addr(), since
+ * this 64-bit implementation will be used implementing 32-bit
+ * native atomics.
+ */
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ETHR_ATMC_T__ *var)
+{
+ ETHR_ASSERT(((void *) &var->sint32[0]) == ((void *) &var->counter));
+#ifdef ETHR_BIGENDIAN
+ return &var->sint32[1];
+#else
+ return &var->sint32[0];
#endif
+}
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_SIZEOF_AO_T == 8 */
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
AO_store(&var->counter, (AO_t) value);
}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
- ethr_native_atomic_set(var, value);
+ ETHR_NATMC_FUNC__(set)(var, value);
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
- return (long) AO_load(&var->counter);
+ return (ETHR_AINT_T__) AO_load(&var->counter);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
#ifdef AO_HAVE_fetch_and_add
- return ((long) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+ return ((ETHR_AINT_T__) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
#else
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp + (AO_t) incr;
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) new;
+ return (ETHR_AINT_T__) new;
}
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void) ethr_native_atomic_add_return(var, incr);
+ (void) ETHR_NATMC_FUNC__(add_return)(var, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_add1
- return ((long) AO_fetch_and_add1(&var->counter)) + 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_add1(&var->counter)) + 1;
#else
- return ethr_native_atomic_add_return(var, 1);
+ return ETHR_NATMC_FUNC__(add_return)(var, 1);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_inc_return(var);
+ (void) ETHR_NATMC_FUNC__(inc_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_sub1
- return ((long) AO_fetch_and_sub1(&var->counter)) - 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1(&var->counter)) - 1;
#else
- return ethr_native_atomic_add_return(var, -1);
+ return ETHR_NATMC_FUNC__(add_return)(var, -1);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_dec_return(var);
+ (void) ETHR_NATMC_FUNC__(dec_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp & ((AO_t) mask);
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp | ((AO_t) mask);
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
return act;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
while (1) {
AO_t exp = AO_load(&var->counter);
if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
@@ -196,97 +246,105 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_load_acquire
- return (long) AO_load_acquire(&var->counter);
+ return (ETHR_AINT_T__) AO_load_acquire(&var->counter);
#else
- long res = ethr_native_atomic_read(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
ETHR_MEMORY_BARRIER;
return res;
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_add1_acquire
- return ((long) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_add1_acquire(&var->counter)) + 1;
#else
- long res = ethr_native_atomic_add_return(var, 1);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(add_return)(var, 1);
ETHR_MEMORY_BARRIER;
return res;
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
#ifdef AO_HAVE_store_release
AO_store_release(&var->counter, (AO_t) value);
#else
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_set(var, value);
+ ETHR_NATMC_FUNC__(set)(var, value);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_sub1_release
- return ((long) AO_fetch_and_sub1_release(&var->counter)) - 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1_release(&var->counter)) - 1;
#else
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_dec_return_relb(var);
+ (void) ETHR_NATMC_FUNC__(dec_return_relb)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
#ifdef AO_HAVE_compare_and_swap_acquire
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
AO_nop_full();
return act;
#else
- long act = ethr_native_atomic_cmpxchg(var, new, exp);
+ ETHR_AINT_T__ act = ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
ETHR_MEMORY_BARRIER;
return act;
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
#ifdef AO_HAVE_compare_and_swap_release
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
return act;
#else
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
#endif
}
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
-#endif
+#endif /* !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS) */
+
+#endif /* ETHR_LIBATOMIC_OPS_ATOMIC_H__ */
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index f21f7c9588..522f433649 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -28,31 +28,39 @@
#ifndef ETHREAD_PPC_ATOMIC_H
#define ETHREAD_PPC_ATOMIC_H
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
typedef struct {
- volatile int counter;
-} ethr_native_atomic_t;
+ volatile ethr_sint32_t counter;
+} ethr_native_atomic32_t;
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
+{
+ return (ethr_sint32_t *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
+ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
+#define ethr_native_atomic32_set(v, i) ethr_native_atomic32_init((v), (i))
-static ETHR_INLINE int
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read(ethr_native_atomic32_t *var)
{
return var->counter;
}
-static ETHR_INLINE int
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -69,16 +77,16 @@ ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, int incr)
+ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_add_return(var, incr);
+ (void)ethr_native_atomic32_add_return(var, incr);
}
-static ETHR_INLINE int
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -95,16 +103,16 @@ ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_inc_return(var);
+ (void)ethr_native_atomic32_inc_return(var);
}
-static ETHR_INLINE int
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -121,16 +129,16 @@ ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_dec_return(var);
+ (void)ethr_native_atomic32_dec_return(var);
}
-static ETHR_INLINE int
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- int old, new;
+ ethr_sint32_t old, new;
__asm__ __volatile__(
"eieio\n\t"
@@ -146,10 +154,10 @@ ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
return old;
}
-static ETHR_INLINE int
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- int old, new;
+ ethr_sint32_t old, new;
__asm__ __volatile__(
"eieio\n\t"
@@ -165,10 +173,10 @@ ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
return old;
}
-static ETHR_INLINE int
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -183,10 +191,12 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
return tmp;
}
-static ETHR_INLINE int
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
- int old;
+ ethr_sint32_t old;
__asm__ __volatile__(
"eieio\n\t"
@@ -210,20 +220,20 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
*/
static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_read(var);
+ long res = ethr_native_atomic32_read(var);
ETHR_MEMORY_BARRIER;
return res;
}
-#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
-#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
-#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
-#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic32_set_relb ethr_native_atomic32_xchg
+#define ethr_native_atomic32_inc_return_acqb ethr_native_atomic32_inc_return
+#define ethr_native_atomic32_dec_relb ethr_native_atomic32_dec_return
+#define ethr_native_atomic32_dec_return_relb ethr_native_atomic32_dec_return
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic32_cmpxchg_acqb ethr_native_atomic32_cmpxchg
+#define ethr_native_atomic32_cmpxchg_relb ethr_native_atomic32_cmpxchg
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
index 104ec287e0..93da8a0429 100644
--- a/erts/include/internal/pthread/ethr_event.h
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -30,31 +30,9 @@
#include <linux/futex.h>
#include <sys/time.h>
-/*
- * Note: Linux futexes operate on 32-bit integers, but
- * ethr_native_atomic_t are 64-bits on 64-bit
- * platforms. This has to be taken into account.
- * Therefore, in each individual value used each
- * byte look the same.
- */
-
-#if ETHR_SIZEOF_PTR == 8
-
-#define ETHR_EVENT_OFF_WAITER__ 0xffffffffffffffffL
-#define ETHR_EVENT_OFF__ 0x7777777777777777L
-#define ETHR_EVENT_ON__ 0L
-
-#elif ETHR_SIZEOF_PTR == 4
-
-#define ETHR_EVENT_OFF_WAITER__ 0xffffffffL
-#define ETHR_EVENT_OFF__ 0x77777777L
-#define ETHR_EVENT_ON__ 0L
-
-#else
-
-#error ehrm...
-
-#endif
+#define ETHR_EVENT_OFF_WAITER__ ((ethr_sint32_t) -1)
+#define ETHR_EVENT_OFF__ ((ethr_sint32_t) 1)
+#define ETHR_EVENT_ON__ ((ethr_sint32_t) 0)
#if defined(FUTEX_WAIT_PRIVATE) && defined(FUTEX_WAKE_PRIVATE)
# define ETHR_FUTEX_WAIT__ FUTEX_WAIT_PRIVATE
@@ -65,11 +43,17 @@
#endif
typedef struct {
- ethr_atomic_t futex;
+ ethr_atomic32_t futex;
} ethr_event;
-#define ETHR_FUTEX__(FTX, OP, VAL) \
- (-1 == syscall(__NR_futex, (void *) (FTX), (OP), (int) (VAL), NULL, NULL, 0)\
+#define ETHR_FUTEX__(FTX, OP, VAL) \
+ (-1 == syscall(__NR_futex, \
+ (void *) ethr_atomic32_addr((FTX)), \
+ (OP), \
+ (int) (VAL), \
+ NULL, \
+ NULL, \
+ 0) \
? errno : 0)
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
@@ -77,9 +61,9 @@ typedef struct {
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- long val;
+ ethr_sint32_t val;
ETHR_WRITE_MEMORY_BARRIER;
- val = ethr_atomic_xchg(&e->futex, ETHR_EVENT_ON__);
+ val = ethr_atomic32_xchg(&e->futex, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
if (res != 0)
@@ -90,7 +74,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- ethr_atomic_set(&e->futex, ETHR_EVENT_OFF__);
+ ethr_atomic32_set(&e->futex, ETHR_EVENT_OFF__);
ETHR_MEMORY_BARRIER;
}
@@ -100,7 +84,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
/* --- Posix mutex/cond implementation of events ---------------------------- */
typedef struct {
- ethr_atomic_t state;
+ ethr_atomic32_t state;
pthread_mutex_t mtx;
pthread_cond_t cnd;
} ethr_event;
@@ -114,9 +98,9 @@ typedef struct {
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- long val;
+ ethr_sint32_t val;
ETHR_WRITE_MEMORY_BARRIER;
- val = ethr_atomic_xchg(&e->state, ETHR_EVENT_ON__);
+ val = ethr_atomic32_xchg(&e->state, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
int res = pthread_mutex_lock(&e->mtx);
if (res != 0)
@@ -133,7 +117,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- ethr_atomic_set(&e->state, ETHR_EVENT_OFF__);
+ ethr_atomic32_set(&e->state, ETHR_EVENT_OFF__);
ETHR_MEMORY_BARRIER;
}
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 2da6472393..00380dbf07 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -21,49 +21,86 @@
* Native ethread atomics on SPARC V9.
* Author: Mikael Pettersson.
*/
-#ifndef ETHR_SPARC32_ATOMIC_H
-#define ETHR_SPARC32_ATOMIC_H
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_SPARC_V9_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_SPARC_V9_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_SPARC_V9_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_SPARC_V9_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#ifndef ETHR_SPARC_V9_ATOMIC_COMMON__
+#define ETHR_SPARC_V9_ATOMIC_COMMON__
#define ETHR_MEMORY_BARRIER \
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
: : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-
-#if defined(__arch64__)
-#define CASX "casx"
+#endif /* ETHR_SPARC_V9_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_CAS__ "cas"
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_CAS__ "casx"
#else
-#define CASX "cas"
+#error "Unsupported integer size"
#endif
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->counter = i;
+}
+
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old+incr;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -73,46 +110,46 @@ ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void)ethr_native_atomic_add_return(var, incr);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return ethr_native_atomic_add_return(var, 1);
+ return ETHR_NATMC_FUNC__(add_return)(var, 1);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void)ethr_native_atomic_add_return(var, 1);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return ethr_native_atomic_add_return(var, -1);
+ return ETHR_NATMC_FUNC__(add_return)(var, -1);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void)ethr_native_atomic_add_return(var, -1);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, -1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old & mask;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -121,17 +158,17 @@ ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old | mask;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -140,17 +177,17 @@ ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
- long old, new;
+ ETHR_AINT_T__ old, new;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad");
do {
old = var->counter;
new = val;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
@@ -159,12 +196,12 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
@@ -178,58 +215,63 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
/* TODO: relax acquire barriers */
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_read(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- ethr_native_atomic_set(var, i);
+ ETHR_NATMC_FUNC__(set)(var, i);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
__asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- ethr_native_atomic_dec(var);
+ ETHR_NATMC_FUNC__(dec)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
- long res = ethr_native_atomic_cmpxchg(var, new, old);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
__asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
return res;
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- return ethr_native_atomic_cmpxchg(var, new, old);
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
}
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* ETHR_SPARC32_ATOMIC_H */
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_CAS__
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index dca113b4d6..aea9794390 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,12 @@
#ifndef ETHREAD_SPARC32_ETHREAD_H
#define ETHREAD_SPARC32_ETHREAD_H
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "atomic.h"
+#endif
#include "spinlock.h"
#include "rwlock.h"
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 69569d82d1..48e4c0c6c8 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -24,92 +24,102 @@
#ifndef ETHREAD_TILE_ATOMIC_H
#define ETHREAD_TILE_ATOMIC_H
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
#include <atomic.h>
/* An atomic is an aligned int accessed via locked operations.
*/
typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+ volatile ethr_sint32_t counter;
+} ethr_native_atomic32_t;
#define ETHR_MEMORY_BARRIER __insn_mf()
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
+{
+ return (ethr_sint32_t *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+ethr_native_atomic32_set(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
atomic_exchange_acq(&var->counter, i);
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read(ethr_native_atomic32_t *var)
{
return var->counter;
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
atomic_add(&var->counter, incr);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
atomic_increment(&var->counter);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
atomic_decrement(&var->counter);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
return atomic_exchange_and_add(&var->counter, incr) + incr;
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
- return ethr_native_atomic_add_return(var, 1);
+ return ethr_native_atomic32_add_return(var, 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
{
- return ethr_native_atomic_add_return(var, -1);
+ return ethr_native_atomic32_add_return(var, -1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
return atomic_and_val(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
return atomic_or_val(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
return atomic_exchange_acq(&var->counter, val);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
@@ -118,54 +128,58 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_read(var);
+ ethr_sint32_t res = ethr_native_atomic32_read(var);
ETHR_MEMORY_BARRIER;
return res;
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ethr_sint32_t res = ethr_native_atomic32_inc_return(var);
ETHR_MEMORY_BARRIER;
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long val)
+ethr_native_atomic32_set_relb(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_set(var, val);
+ ethr_native_atomic32_set(var, val);
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec_relb(ethr_native_atomic32_t *var)
{
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_dec(var);
+ ethr_native_atomic32_dec(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return_relb(ethr_native_atomic32_t *var)
{
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_dec_return(var);
+ return ethr_native_atomic32_dec_return(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_acqb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
{
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ethr_native_atomic32_cmpxchg(var, new, exp);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_relb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
{
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ethr_native_atomic32_cmpxchg(var, new, exp);
}
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
index 500459dd6c..60def01a7e 100644
--- a/erts/include/internal/win/ethr_atomic.h
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -22,223 +22,394 @@
* Author: Rickard Green
*/
-#ifndef ETHR_WIN_ATOMIC_H__
-#define ETHR_WIN_ATOMIC_H__
-
-#ifdef _MSC_VER
-# if _MSC_VER < 1300
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-# endif
-# endif
-# endif
-# if _MSC_VER >= 1400
-# include <intrin.h>
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_WIN_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_WIN_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_WIN_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_WIN_ATOMIC64_H__
+#ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+/* _InterlockedCompareExchange64() required... */
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#endif
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+
+#ifndef ETHR_WIN_ATOMIC_COMMON__
+#define ETHR_WIN_ATOMIC_COMMON__
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if defined(_M_IX86) || defined(_M_AMD64) || defined(_M_IA64)
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 1
#else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 0
#endif
+#if defined(_M_AMD64) || (defined(_M_IX86) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
+#else
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
+#endif
/*
- * No configure test checking for _Interlocked*_{acq,rel} and
- * Interlocked*{Acquire,Release} have been written yet...
+ * No configure test checking for interlocked acquire/release
+ * versions have been written, yet. It should define
+ * ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS if, and
+ * only if, all used interlocked operations with barriers
+ * exists.
*
* Note, that these are pure optimizations for the itanium
* processor.
*/
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
-#pragma intrinsic(_InterlockedCompareExchange_acq)
+#include <intrin.h>
+#undef ETHR_COMPILER_BARRIER
+#define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedCompareExchange)
+
+#if defined(_M_AMD64) || (defined(_M_IX86) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+#include <emmintrin.h>
+#include <mmintrin.h>
+#pragma intrinsic(_mm_mfence)
+#define ETHR_MEMORY_BARRIER _mm_mfence()
+#pragma intrinsic(_mm_sfence)
+#define ETHR_WRITE_MEMORY_BARRIER _mm_sfence()
+#pragma intrinsic(_mm_lfence)
+#define ETHR_READ_MEMORY_BARRIER _mm_lfence()
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+
+#else
+
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile long x___ = 0; \
+ _InterlockedCompareExchange(&x___, (long) 1, (long) 0); \
+} while (0)
+
#endif
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+
+#endif /* ETHR_WIN_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
+/*
+ * All used operations available as 32-bit intrinsics
+ */
+
+#pragma intrinsic(_InterlockedDecrement)
+#pragma intrinsic(_InterlockedIncrement)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#pragma intrinsic(_InterlockedExchangeAdd_acq)
+#pragma intrinsic(_InterlockedIncrement_acq)
+#pragma intrinsic(_InterlockedDecrement_rel)
+#pragma intrinsic(_InterlockedCompareExchange_acq)
#pragma intrinsic(_InterlockedCompareExchange_rel)
#endif
+#define ETHR_ILCKD__(X) _Interlocked ## X
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## _acq
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## _rel
+#else
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X
+#endif
+
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+
+/*
+ * _InterlockedCompareExchange64() is required. The other may not
+ * be available, but if so, we can generate them.
+ */
+#pragma intrinsic(_InterlockedCompareExchange64)
+
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) *(PTR)
+#else
+#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) (__int64) 0
+#endif
+
+#define ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, PTR, NEW, ACT, EXP, OPS, RET) \
+{ \
+ __int64 NEW, ACT, EXP; \
+ ACT = ETHR_OWN_ILCKD_INIT_VAL__(PTR); \
+ do { \
+ EXP = ACT; \
+ { OPS; } \
+ ACT = _InterlockedCompareExchange64(PTR, NEW, EXP); \
+ } while (ACT != EXP); \
+ return RET; \
+}
+
+#define ETHR_OWN_ILCKD_1_IMPL__(FUNC, NEW, ACT, EXP, OPS, RET) \
+static __forceinline __int64 \
+FUNC(__int64 volatile *ptr) \
+ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+
+#define ETHR_OWN_ILCKD_2_IMPL__(FUNC, NEW, ACT, EXP, OPS, ARG, RET) \
+static __forceinline __int64 \
+FUNC(__int64 volatile *ptr, __int64 ARG) \
+ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+
+
+#ifdef ETHR_HAVE__INTERLOCKEDDECREMENT64
+#pragma intrinsic(_InterlockedDecrement64)
+#else
+ETHR_OWN_ILCKD_1_IMPL__(_InterlockedDecrement64, new, act, exp,
+ new = act - 1, new)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDINCREMENT64
+#pragma intrinsic(_InterlockedIncrement64)
+#else
+ETHR_OWN_ILCKD_1_IMPL__(_InterlockedIncrement64, new, act, exp,
+ new = act + 1, new)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchangeAdd64, new, act, exp,
+ new = act + arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+#pragma intrinsic(_InterlockedExchange64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchange64, new, act, exp,
+ new = arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDAND64
+#pragma intrinsic(_InterlockedAnd64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedAnd64, new, act, exp,
+ new = act & arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDOR64
+#pragma intrinsic(_InterlockedOr64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedOr64, new, act, exp,
+ new = act | arg, arg, act)
+#endif
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#pragma intrinsic(_InterlockedExchangeAdd64_acq)
+#pragma intrinsic(_InterlockedIncrement64_acq)
+#pragma intrinsic(_InterlockedDecrement64_rel)
+#pragma intrinsic(_InterlockedCompareExchange64_acq)
+#pragma intrinsic(_InterlockedCompareExchange64_rel)
+#endif
+
+#define ETHR_ILCKD__(X) _Interlocked ## X ## 64
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64_acq
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64_rel
+#else
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64
+#endif
+
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+
+#else
+#error "Unsupported integer size"
+#endif
typedef struct {
- volatile LONG value;
-} ethr_native_atomic_t;
+ volatile ETHR_AINT_T__ value;
+} ETHR_ATMC_T__;
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile LONG x___ = 0; \
- _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
-} while (0)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->value;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- var->value = (LONG) i;
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ var->value = i;
+#else
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
+#endif
}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ var->value = i;
#else
- (void) InterlockedExchange(&var->value, (LONG) i);
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
return var->value;
#else
- return InterlockedExchangeAdd(&var->value, (LONG) 0);
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
+ (void) ETHR_ILCKD__(ExchangeAdd)(&var->value, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long i)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- LONG tmp = InterlockedExchangeAdd(&var->value, (LONG) i);
- return tmp + i;
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, i) + i;
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) InterlockedIncrement(&var->value);
+ (void) ETHR_ILCKD__(Increment)(&var->value);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) InterlockedDecrement(&var->value);
+ (void) ETHR_ILCKD__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return (long) InterlockedIncrement(&var->value);
+ return ETHR_ILCKD__(Increment)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return (long) InterlockedDecrement(&var->value);
+ return ETHR_ILCKD__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) _InterlockedAnd(&var->value, mask);
+ return ETHR_ILCKD__(And)(&var->value, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) _InterlockedOr(&var->value, mask);
+ return ETHR_ILCKD__(Or)(&var->value, mask);
}
-
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+ return ETHR_ILCKD__(CompareExchange)(&var->value, new, old);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
- return (long) InterlockedExchange(&var->value, (LONG) new);
+ return ETHR_ILCKD__(Exchange)(&var->value, new);
}
/*
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDEXCHANGEADDACQUIRE
- return (long) InterlockedExchangeAddAcquire(&var->value, (LONG) 0);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_AINT_T__ val = var->value;
+ ETHR_COMPILER_BARRIER;
+ return val;
#else
- return (long) InterlockedExchangeAdd(&var->value, (LONG) 0);
+ return ETHR_ILCKD_ACQ__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDINCREMENTACQUIRE
- return (long) InterlockedIncrementAcquire(&var->value);
-#else
- return (long) InterlockedIncrement(&var->value);
-#endif
+ return ETHR_ILCKD_ACQ__(Increment)(&var->value);
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- (void) InterlockedExchange(&var->value, (LONG) i);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_COMPILER_BARRIER;
+ var->value = i;
+#else
+ (void) ETHR_ILCKD_REL__(Exchange)(&var->value, i);
+#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
- (void) InterlockedDecrementRelease(&var->value);
-#else
- (void) InterlockedDecrement(&var->value);
-#endif
+ (void) ETHR_ILCKD_REL__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
- return (long) InterlockedDecrementRelease(&var->value);
-#else
- return (long) InterlockedDecrement(&var->value);
-#endif
+ return ETHR_ILCKD_REL__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
- return (long) _InterlockedCompareExchange_acq(&var->value, (LONG) new, (LONG) old);
-#else
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
-#endif
+ return ETHR_ILCKD_ACQ__(CompareExchange)(&var->value, new, old);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
-
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
- return (long) _InterlockedCompareExchange_rel(&var->value, (LONG) new, (LONG) old);
-#else
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
-#endif
+ return ETHR_ILCKD_REL__(CompareExchange)(&var->value, new, old);
}
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif
+#undef ETHR_ILCKD__
+#undef ETHR_ILCKD_ACQ__
+#undef ETHR_ILCKD_REL__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#undef ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+
+#endif /* _MSC_VER */
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
index af57c20f91..598816b2c6 100644
--- a/erts/include/internal/win/ethr_event.h
+++ b/erts/include/internal/win/ethr_event.h
@@ -21,22 +21,24 @@
* Author: Rickard Green
*/
-#define ETHR_EVENT_OFF_WAITER__ ((LONG) -1)
-#define ETHR_EVENT_OFF__ ((LONG) 1)
-#define ETHR_EVENT_ON__ ((LONG) 0)
+#define ETHR_EVENT_OFF_WAITER__ ((long) -1)
+#define ETHR_EVENT_OFF__ ((long) 1)
+#define ETHR_EVENT_ON__ ((long) 0)
typedef struct {
- volatile LONG state;
+ volatile long state;
HANDLE handle;
} ethr_event;
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+#pragma intrinsic(_InterlockedExchange)
+
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- /* InterlockedExchange() imply a full memory barrier which is important */
- LONG state = InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ /* _InterlockedExchange() imply a full memory barrier which is important */
+ long state = _InterlockedExchange(&e->state, ETHR_EVENT_ON__);
if (state == ETHR_EVENT_OFF_WAITER__) {
if (!SetEvent(e->handle))
ETHR_FATAL_ERROR__(ethr_win_get_errno__());
@@ -46,7 +48,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- /* InterlockedExchange() imply a full memory barrier which is important */
+ /* _InterlockedExchange() imply a full memory barrier which is important */
InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
}
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
index b52710f6a3..c01b17cf14 100644
--- a/erts/include/internal/win/ethread.h
+++ b/erts/include/internal/win/ethread.h
@@ -25,7 +25,11 @@
#ifndef ETHREAD_WIN_H__
#define ETHREAD_WIN_H__
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "ethr_atomic.h"
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
#endif
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index 0d3181cace..757b3b24e2 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -283,6 +283,7 @@ endif
ETHR_THR_LIB_BASE_DIR=@ETHR_THR_LIB_BASE_DIR@
ifneq ($(strip $(ETHR_LIB_NAME)),)
ETHREAD_LIB_SRC=common/ethr_aux.c \
+ common/ethr_atomics.c \
common/ethr_mutex.c \
common/ethr_cbf.c \
$(ETHR_THR_LIB_BASE_DIR)/ethread.c \
@@ -381,6 +382,11 @@ $(ERTS_LIB): $(ERTS_LIB_OBJS)
# Object files
#
+ifeq ($(TYPE)-@GCC@,debug-yes)
+$(r_OBJ_DIR)/ethr_aux.o: common/ethr_aux.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -Wno-unused-function $(INCLUDES) -c $< -o $@
+endif
+
$(r_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
@@ -445,6 +451,7 @@ INTERNAL_RELEASE_INCLUDES= \
$(ERTS_INCL_INT)/ethread.h \
$(ERTS_INCL_INT)/ethr_mutex.h \
$(ERTS_INCL_INT)/ethr_optimized_fallbacks.h \
+ $(ERTS_INCL_INT)/ethr_atomics.h \
$(ERTS_INCL_INT)/$(TARGET)/ethread.mk \
$(ERTS_INCL_INT)/$(TARGET)/erts_internal.mk \
$(ERTS_INCL_INT)/$(TARGET)/ethread_header_config.h \
diff --git a/erts/lib_src/common/ethr_atomics.c b/erts/lib_src/common/ethr_atomics.c
new file mode 100644
index 0000000000..77694e5952
--- /dev/null
+++ b/erts/lib_src/common/ethr_atomics.c
@@ -0,0 +1,402 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: The ethread atomic API
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_ATOMIC_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+#endif
+
+int
+ethr_init_atomics(void)
+{
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+ {
+ int i;
+ for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
+ res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
+ if (res != 0)
+ return res;
+ }
+ }
+#endif
+ return 0;
+}
+
+/*
+ * --- Pointer size atomics ---------------------------------------------------
+ */
+
+ethr_sint_t *
+ethr_atomic_addr(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(var);
+ return ethr_atomic_addr__(var);
+}
+
+void
+ethr_atomic_init(ethr_atomic_t *var, ethr_sint_t i)
+{
+ ETHR_ASSERT(var);
+ ethr_atomic_init__(var, i);
+}
+
+void
+ethr_atomic_set(ethr_atomic_t *var, ethr_sint_t i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set__(var, i);
+}
+
+ethr_sint_t
+ethr_atomic_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read__(var);
+}
+
+ethr_sint_t
+ethr_atomic_add_read(ethr_atomic_t *var, ethr_sint_t incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_add_read__(var, incr);
+}
+
+ethr_sint_t
+ethr_atomic_inc_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read__(var);
+}
+
+ethr_sint_t
+ethr_atomic_dec_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read__(var);
+}
+
+void
+ethr_atomic_add(ethr_atomic_t *var, ethr_sint_t incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_add__(var, incr);
+}
+
+void
+ethr_atomic_inc(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_inc__(var);
+}
+
+void
+ethr_atomic_dec(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec__(var);
+}
+
+ethr_sint_t
+ethr_atomic_read_band(ethr_atomic_t *var, ethr_sint_t mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_band__(var, mask);
+}
+
+ethr_sint_t
+ethr_atomic_read_bor(ethr_atomic_t *var, ethr_sint_t mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_bor__(var, mask);
+}
+
+ethr_sint_t
+ethr_atomic_xchg(ethr_atomic_t *var, ethr_sint_t new)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_xchg__(var, new);
+}
+
+ethr_sint_t
+ethr_atomic_cmpxchg(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t expected)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg__(var, new, expected);
+}
+
+ethr_sint_t
+ethr_atomic_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_acqb__(var);
+}
+
+ethr_sint_t
+ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read_acqb__(var);
+}
+
+void
+ethr_atomic_set_relb(ethr_atomic_t *var, ethr_sint_t i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set_relb__(var, i);
+}
+
+void
+ethr_atomic_dec_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec_relb__(var);
+}
+
+ethr_sint_t
+ethr_atomic_dec_read_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read_relb__(var);
+}
+
+ethr_sint_t
+ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_acqb__(var, new, exp);
+}
+
+ethr_sint_t
+ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_relb__(var, new, exp);
+}
+
+
+/*
+ * --- 32-bit atomics ---------------------------------------------------------
+ */
+
+ethr_sint32_t *
+ethr_atomic32_addr(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(var);
+ return ethr_atomic32_addr__(var);
+}
+
+void
+ethr_atomic32_init(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+ ETHR_ASSERT(var);
+ ethr_atomic32_init__(var, i);
+}
+
+void
+ethr_atomic32_set(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_set__(var, i);
+}
+
+ethr_sint32_t
+ethr_atomic32_read(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_read__(var);
+}
+
+
+ethr_sint32_t
+ethr_atomic32_add_read(ethr_atomic32_t *var, ethr_sint32_t incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_add_read__(var, incr);
+}
+
+ethr_sint32_t
+ethr_atomic32_inc_read(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_inc_read__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_dec_read(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_dec_read__(var);
+}
+
+void
+ethr_atomic32_add(ethr_atomic32_t *var, ethr_sint32_t incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_add__(var, incr);
+}
+
+void
+ethr_atomic32_inc(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_inc__(var);
+}
+
+void
+ethr_atomic32_dec(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_dec__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_read_band(ethr_atomic32_t *var, ethr_sint32_t mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_read_band__(var, mask);
+}
+
+ethr_sint32_t
+ethr_atomic32_read_bor(ethr_atomic32_t *var, ethr_sint32_t mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_read_bor__(var, mask);
+}
+
+ethr_sint32_t
+ethr_atomic32_xchg(ethr_atomic32_t *var, ethr_sint32_t new)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_xchg__(var, new);
+}
+
+ethr_sint32_t
+ethr_atomic32_cmpxchg(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_cmpxchg__(var, new, expected);
+}
+
+ethr_sint32_t
+ethr_atomic32_read_acqb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_read_acqb__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_inc_read_acqb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_inc_read_acqb__(var);
+}
+
+void
+ethr_atomic32_set_relb(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_set_relb__(var, i);
+}
+
+void
+ethr_atomic32_dec_relb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_dec_relb__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_dec_read_relb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_dec_read_relb__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_cmpxchg_acqb__(var, new, exp);
+}
+
+ethr_sint32_t
+ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_cmpxchg_relb__(var, new, exp);
+}
+
diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c
index 4db4cffd3a..2c3e25a805 100644
--- a/erts/lib_src/common/ethr_aux.c
+++ b/erts/lib_src/common/ethr_aux.c
@@ -31,7 +31,10 @@
#define ETHR_INLINE_FUNC_NAME_(X) X ## __
#define ETHR_AUX_IMPL__
-
+#define ETHR_ATOMIC_IMPL__ /* Needed in order to pull in
+ native atomic implementations
+ for optimized fallbacks of
+ spinlocks and rwspinlocks */
#include "ethread.h"
#include "ethr_internal.h"
#include <string.h>
@@ -51,10 +54,6 @@ int ethr_not_inited__ = 1;
ethr_memory_allocators ethr_mem__ = ETHR_MEM_ALLOCS_DEF_INITER__;
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-#endif
-
void *(*ethr_thr_prepare_func__)(void) = NULL;
void (*ethr_thr_parent_func__)(void *) = NULL;
void (*ethr_thr_child_func__)(void *) = NULL;
@@ -138,16 +137,9 @@ ethr_init_common__(ethr_init_data *id)
#endif
ethr_max_stack_size__ = ETHR_B2KW(ethr_max_stack_size__);
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
- {
- int i;
- for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
- res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
- if (res != 0)
- return res;
- }
- }
-#endif
+ res = ethr_init_atomics();
+ if (res != 0)
+ return res;
res = ethr_mutex_lib_init(erts_get_cpu_configured(ethr_cpu_info__));
if (res != 0)
@@ -279,14 +271,6 @@ typedef union {
static ethr_spinlock_t ts_ev_alloc_lock;
static ethr_ts_event *free_ts_ev;
-#if SIZEOF_VOID_P == SIZEOF_INT
-typedef unsigned int EthrPtrSzUInt;
-#elif SIZEOF_VOID_P == SIZEOF_LONG
-typedef unsigned long EthrPtrSzUInt;
-#else
-#error No pointer sized integer type
-#endif
-
static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
{
int i;
@@ -295,16 +279,16 @@ static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
+ ETHR_CACHE_LINE_SIZE);
if (!atsev)
return NULL;
- if ((((EthrPtrSzUInt) atsev) & ETHR_CACHE_LINE_MASK) == 0)
+ if ((((ethr_uint_t) atsev) & ETHR_CACHE_LINE_MASK) == 0)
atsev = ((ethr_aligned_ts_event *)
- ((((EthrPtrSzUInt) atsev) & ~ETHR_CACHE_LINE_MASK)
+ ((((ethr_uint_t) atsev) & ~ETHR_CACHE_LINE_MASK)
+ ETHR_CACHE_LINE_SIZE));
for (i = 1; i < size; i++) {
atsev[i-1].ts_ev.next = &atsev[i].ts_ev;
- ethr_atomic_init(&atsev[i-1].ts_ev.uaflgs, 0);
+ ethr_atomic32_init(&atsev[i-1].ts_ev.uaflgs, 0);
atsev[i-1].ts_ev.iflgs = 0;
}
- ethr_atomic_init(&atsev[size-1].ts_ev.uaflgs, 0);
+ ethr_atomic32_init(&atsev[size-1].ts_ev.uaflgs, 0);
atsev[size-1].ts_ev.iflgs = 0;
atsev[size-1].ts_ev.next = NULL;
if (endpp)
@@ -466,170 +450,6 @@ int ethr_get_main_thr_status(int *on)
return 0;
}
-
-/* Atomics */
-
-void
-ethr_atomic_init(ethr_atomic_t *var, long i)
-{
- ETHR_ASSERT(var);
- ethr_atomic_init__(var, i);
-}
-
-void
-ethr_atomic_set(ethr_atomic_t *var, long i)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_set__(var, i);
-}
-
-long
-ethr_atomic_read(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_read__(var);
-}
-
-
-long
-ethr_atomic_add_read(ethr_atomic_t *var, long incr)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_add_read__(var, incr);
-}
-
-long
-ethr_atomic_inc_read(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_inc_read__(var);
-}
-
-long
-ethr_atomic_dec_read(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_dec_read__(var);
-}
-
-void
-ethr_atomic_add(ethr_atomic_t *var, long incr)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_add__(var, incr);
-}
-
-void
-ethr_atomic_inc(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_inc__(var);
-}
-
-void
-ethr_atomic_dec(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_dec__(var);
-}
-
-long
-ethr_atomic_read_band(ethr_atomic_t *var, long mask)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_read_band__(var, mask);
-}
-
-long
-ethr_atomic_read_bor(ethr_atomic_t *var, long mask)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_read_bor__(var, mask);
-}
-
-long
-ethr_atomic_xchg(ethr_atomic_t *var, long new)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_xchg__(var, new);
-}
-
-long
-ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg__(var, new, expected);
-}
-
-long
-ethr_atomic_read_acqb(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_read_acqb__(var);
-}
-
-long
-ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_inc_read_acqb__(var);
-}
-
-void
-ethr_atomic_set_relb(ethr_atomic_t *var, long i)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_set_relb__(var, i);
-}
-
-void
-ethr_atomic_dec_relb(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_dec_relb__(var);
-}
-
-long
-ethr_atomic_dec_read_relb(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_dec_read_relb__(var);
-}
-
-long
-ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, long new, long exp)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg_acqb__(var, new, exp);
-}
-
-long
-ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, long new, long exp)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg_relb__(var, new, exp);
-}
-
-
/* Spinlocks and rwspinlocks */
int
diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c
index a2fbf3a454..2ddef32dfc 100644
--- a/erts/lib_src/common/ethr_mutex.c
+++ b/erts/lib_src/common/ethr_mutex.c
@@ -206,16 +206,16 @@ static void hard_debug_chk_q__(struct ethr_mutex_base_ *, int);
#ifdef ETHR_USE_OWN_RWMTX_IMPL__
static void
rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint32_t initial,
int q_locked);
static void
rwmutex_unlock_wake(ethr_rwmutex *rwmtx,
int have_w,
- long initial,
+ ethr_sint32_t initial,
int transfer_read_lock);
static int
rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint32_t initial,
ethr_ts_event *tse,
int start_next_ix,
int check_before_try,
@@ -242,12 +242,12 @@ rwmutex_freqread_rdrs_add(ethr_rwmutex *rwmtx,
int inc)
{
if (type == ETHR_RWMUTEX_TYPE_FREQUENT_READ || ix == 0)
- ethr_atomic_add(&rwmtx->tdata.ra[ix].data.readers, inc);
+ ethr_atomic32_add(&rwmtx->tdata.ra[ix].data.readers, inc);
else {
ETHR_ASSERT(type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
ETHR_ASSERT(inc == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ ethr_atomic32_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint32_t) 1);
}
}
@@ -258,15 +258,15 @@ rwmutex_freqread_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
ix = tse->rgix;
atomic_inc:
- ethr_atomic_inc(&rwmtx->tdata.ra[ix].data.readers);
+ ethr_atomic32_inc(&rwmtx->tdata.ra[ix].data.readers);
}
else {
ix = tse->mtix;
if (ix == 0)
goto atomic_inc;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ethr_atomic32_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint32_t) 1);
}
}
@@ -279,64 +279,65 @@ rwmutex_freqread_rdrs_dec(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
ix = tse->rgix;
atomic_dec:
- ethr_atomic_dec(&rwmtx->tdata.ra[ix].data.readers);
+ ethr_atomic32_dec(&rwmtx->tdata.ra[ix].data.readers);
}
else {
ix = tse->mtix;
if (ix == 0)
goto atomic_dec;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic32_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint32_t) 0);
}
}
#endif
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint32_t
rwmutex_freqread_rdrs_dec_read(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
{
int ix;
if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
ix = tse->rgix;
atomic_dec_read:
- return ethr_atomic_dec_read(&rwmtx->tdata.ra[ix].data.readers);
+ return ethr_atomic32_dec_read(&rwmtx->tdata.ra[ix].data.readers);
}
else {
ix = tse->mtix;
if (ix == 0)
goto atomic_dec_read;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
- return (long) 0;
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic32_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint32_t) 0);
+ return (ethr_sint32_t) 0;
}
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint32_t
rwmutex_freqread_rdrs_dec_read_relb(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
{
int ix;
if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
ix = tse->rgix;
atomic_dec_read:
- return ethr_atomic_dec_read_relb(&rwmtx->tdata.ra[ix].data.readers);
+ return ethr_atomic32_dec_read_relb(&rwmtx->tdata.ra[ix].data.readers);
}
else {
ix = tse->mtix;
if (ix == 0)
goto atomic_dec_read;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set_relb(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
- return (long) 0;
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic32_set_relb(&rwmtx->tdata.ra[ix].data.readers,
+ (ethr_sint32_t) 0);
+ return (ethr_sint32_t) 0;
}
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint32_t
rwmutex_freqread_rdrs_read(ethr_rwmutex *rwmtx, int ix)
{
- long res = ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers);
+ ethr_sint32_t res = ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers);
#ifdef ETHR_DEBUG
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
@@ -402,19 +403,19 @@ static void
event_wait(struct ethr_mutex_base_ *mtxb,
ethr_ts_event *tse,
int spincount,
- long type,
+ ethr_sint32_t type,
int is_rwmtx,
int is_freq_read)
{
int locked = 0;
- long act;
+ ethr_sint32_t act;
int need_try_complete_runlock = 0;
int transfer_read_lock = 0;
/* Need to enqueue and wait... */
tse->uflgs = type;
- ethr_atomic_set(&tse->uaflgs, type);
+ ethr_atomic32_set(&tse->uaflgs, type);
ETHR_MTX_Q_LOCK(&mtxb->qlck);
locked = 1;
@@ -423,7 +424,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
hard_debug_chk_q__(mtxb, is_rwmtx);
#endif
- act = ethr_atomic_read(&mtxb->flgs);
+ act = ethr_atomic32_read(&mtxb->flgs);
if (act & type) {
@@ -453,7 +454,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
/* Set wait bit */
while (1) {
- long new, exp = act;
+ ethr_sint32_t new, exp = act;
need_try_complete_runlock = 0;
transfer_read_lock = 0;
@@ -484,7 +485,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
}
}
- act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs, new, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&mtxb->flgs, new, exp);
if (exp == act) {
if (new & type) {
act = new;
@@ -559,7 +560,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
while (1) {
ethr_event_reset(&tse->event);
- act = ethr_atomic_read_acqb(&tse->uaflgs);
+ act = ethr_atomic32_read_acqb(&tse->uaflgs);
if (!act)
goto done; /* Got it */
@@ -567,7 +568,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
ethr_event_swait(&tse->event, spincount);
/* swait result: 0 || EINTR */
- act = ethr_atomic_read_acqb(&tse->uaflgs);
+ act = ethr_atomic32_read_acqb(&tse->uaflgs);
if (!act)
goto done; /* Got it */
}
@@ -587,7 +588,7 @@ wake_writer(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
dequeue(&mtxb->q, tse, tse);
ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
- ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_RWMTX_W_WAIT_FLG__);
#ifdef ETHR_MTX_HARD_DEBUG_WSQ
mtxb->ws--;
#endif
@@ -597,7 +598,7 @@ wake_writer(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
- ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_atomic32_set(&tse->uaflgs, 0);
ethr_event_set(&tse->event);
}
@@ -649,11 +650,11 @@ int check_readers_array(ethr_rwmutex *rwmtx,
static ETHR_INLINE void
write_lock_wait(struct ethr_mutex_base_ *mtxb,
- long initial,
+ ethr_sint32_t initial,
int is_rwmtx,
int is_freq_read)
{
- long act = initial;
+ ethr_sint32_t act = initial;
int scnt, start_scnt;
ethr_ts_event *tse = NULL;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -706,13 +707,13 @@ write_lock_wait(struct ethr_mutex_base_ *mtxb,
until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
ETHR_YIELD();
}
- act = ethr_atomic_read(&mtxb->flgs);
+ act = ethr_atomic32_read(&mtxb->flgs);
scnt--;
}
- act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs,
- ETHR_RWMTX_W_FLG__,
- 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtxb->flgs,
+ ETHR_RWMTX_W_FLG__,
+ 0);
if (act == 0)
goto done; /* Got it */
}
@@ -756,16 +757,16 @@ mtxb_init(struct ethr_mutex_base_ *mtxb,
}
mtxb->q = NULL;
- ethr_atomic_init(&mtxb->flgs, 0);
+ ethr_atomic32_init(&mtxb->flgs, 0);
return ETHR_MTX_QLOCK_INIT(&mtxb->qlck);
}
static int
mtxb_destroy(struct ethr_mutex_base_ *mtxb)
{
- long act;
+ ethr_sint32_t act;
ETHR_MTX_Q_LOCK(&mtxb->qlck);
- act = ethr_atomic_read(&mtxb->flgs);
+ act = ethr_atomic32_read(&mtxb->flgs);
ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
if (act != 0)
return EINVAL;
@@ -831,13 +832,13 @@ ethr_mutex_destroy(ethr_mutex *mtx)
}
void
-ethr_mutex_lock_wait__(ethr_mutex *mtx, long initial)
+ethr_mutex_lock_wait__(ethr_mutex *mtx, ethr_sint32_t initial)
{
write_lock_wait(&mtx->mtxb, initial, 0, 0);
}
void
-ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
+ethr_mutex_unlock_wake__(ethr_mutex *mtx, ethr_sint32_t initial)
{
ethr_ts_event *tse;
@@ -845,7 +846,7 @@ ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
tse = mtx->mtxb.q;
ETHR_ASSERT(tse);
- ETHR_ASSERT(ethr_atomic_read(&mtx->mtxb.flgs)
+ ETHR_ASSERT(ethr_atomic32_read(&mtx->mtxb.flgs)
== (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
ETHR_ASSERT(initial & ETHR_RWMTX_W_WAIT_FLG__);
ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
@@ -855,7 +856,7 @@ ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
* mtxb->flgs; otherwise, we need to clear the write wait bit...
*/
if (tse->next == mtx->mtxb.q)
- ethr_atomic_set(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__);
+ ethr_atomic32_set(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__);
wake_writer(&mtx->mtxb, 0);
}
@@ -865,7 +866,7 @@ ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
static void
enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
{
- long act;
+ ethr_sint32_t act;
/*
* `ethr_cond_signal()' and `ethr_cond_broadcast()' end up here. If `mtx'
@@ -894,7 +895,7 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
}
#endif
- act = ethr_atomic_read(&mtx->mtxb.flgs);
+ act = ethr_atomic32_read(&mtx->mtxb.flgs);
ETHR_ASSERT(act == 0
|| act == ETHR_RWMTX_W_FLG__
|| act == (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
@@ -902,10 +903,10 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
/* The normal sane case */
if (!(act & ETHR_RWMTX_W_WAIT_FLG__)) {
ETHR_ASSERT(!mtx->mtxb.q);
- act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs,
- (ETHR_RWMTX_W_FLG__
- | ETHR_RWMTX_W_WAIT_FLG__),
- ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg(&mtx->mtxb.flgs,
+ (ETHR_RWMTX_W_FLG__
+ | ETHR_RWMTX_W_WAIT_FLG__),
+ ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__) {
/*
* Sigh... this wasn't so sane after all since, the mutex was
@@ -937,14 +938,14 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
multi = tse_start != tse_end;
while (1) {
- long new, exp = act;
+ ethr_sint32_t new, exp = act;
if (multi || (act & ETHR_RWMTX_W_FLG__))
new = ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__;
else
new = ETHR_RWMTX_W_FLG__;
- act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&mtx->mtxb.flgs, new, exp);
if (exp == act) {
ETHR_ASSERT(!mtx->mtxb.q);
if (act & ETHR_RWMTX_W_FLG__) {
@@ -972,7 +973,7 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
- ethr_atomic_set(&tse_start->uaflgs, 0);
+ ethr_atomic32_set(&tse_start->uaflgs, 0);
ethr_event_set(&tse_start->event);
}
break;
@@ -1063,9 +1064,9 @@ ethr_cond_signal(ethr_cond *cnd)
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
- ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_CND_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_CND_WAIT_FLG__);
- ethr_atomic_set(&tse->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+ ethr_atomic32_set(&tse->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
dequeue(&cnd->q, tse, tse);
@@ -1116,10 +1117,11 @@ ethr_cond_broadcast(ethr_cond *cnd)
/* The normal case */
ETHR_ASSERT(tse_tmp->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
- ETHR_ASSERT(ethr_atomic_read(&tse_tmp->uaflgs)
+ ETHR_ASSERT(ethr_atomic32_read(&tse_tmp->uaflgs)
== ETHR_CND_WAIT_FLG__);
- ethr_atomic_set(&tse_tmp->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+ ethr_atomic32_set(&tse_tmp->uaflgs,
+ ETHR_RWMTX_W_WAIT_FLG__);
}
else {
/* Should be very unusual */
@@ -1172,7 +1174,7 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
tse->udata = (void *) mtx;
tse->uflgs = ETHR_RWMTX_W_WAIT_FLG__; /* Prep for mutex lock op */
- ethr_atomic_set(&tse->uaflgs, ETHR_CND_WAIT_FLG__);
+ ethr_atomic32_set(&tse->uaflgs, ETHR_CND_WAIT_FLG__);
ETHR_MTX_Q_LOCK(&cnd->qlck);
@@ -1185,11 +1187,11 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
/* Wait */
woken = 0;
while (1) {
- long act;
+ ethr_sint32_t act;
ethr_event_reset(&tse->event);
- act = ethr_atomic_read_acqb(&tse->uaflgs);
+ act = ethr_atomic32_read_acqb(&tse->uaflgs);
if (!act)
break; /* Mtx locked */
@@ -1205,7 +1207,7 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
*/
if (act == ETHR_CND_WAIT_FLG__) {
ETHR_MTX_Q_LOCK(&cnd->qlck);
- act = ethr_atomic_read(&tse->uaflgs);
+ act = ethr_atomic32_read(&tse->uaflgs);
ETHR_ASSERT(act == ETHR_CND_WAIT_FLG__
|| act == ETHR_RWMTX_W_WAIT_FLG__);
/*
@@ -1407,7 +1409,7 @@ wake_readers(ethr_rwmutex *rwmtx, int rs)
rwmtx->rq_end = NULL;
ETHR_ASSERT(!rwmtx->mtxb.q
- || (ethr_atomic_read(&rwmtx->mtxb.q->uaflgs)
+ || (ethr_atomic32_read(&rwmtx->mtxb.q->uaflgs)
== ETHR_RWMTX_W_WAIT_FLG__));
ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
@@ -1418,7 +1420,7 @@ wake_readers(ethr_rwmutex *rwmtx, int rs)
#ifdef ETHR_DEBUG
ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
- ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs)
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs)
== ETHR_RWMTX_R_WAIT_FLG__);
drs++;
#endif
@@ -1426,7 +1428,7 @@ wake_readers(ethr_rwmutex *rwmtx, int rs)
tse_next = tse->next; /* we aren't allowed to read tse->next
after we have reset uaflgs */
- ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_atomic32_set(&tse->uaflgs, 0);
ethr_event_set(&tse->event);
tse = tse_next;
}
@@ -1469,7 +1471,7 @@ int check_readers_array(ethr_rwmutex *rwmtx,
ETHR_MEMORY_BARRIER;
do {
- long act = rwmutex_freqread_rdrs_read(rwmtx, ix);
+ ethr_sint32_t act = rwmutex_freqread_rdrs_read(rwmtx, ix);
if (act != 0)
return EBUSY;
ix++;
@@ -1483,9 +1485,9 @@ int check_readers_array(ethr_rwmutex *rwmtx,
static void
rwmutex_freqread_rdrs_dec_chk_wakeup(ethr_rwmutex *rwmtx,
ethr_ts_event *tse,
- long initial)
+ ethr_sint32_t initial)
{
- long act = initial;
+ ethr_sint32_t act = initial;
if ((act & (ETHR_RWMTX_W_FLG__|
ETHR_RWMTX_R_ABRT_UNLCK_FLG__)) == 0) {
@@ -1515,7 +1517,7 @@ rwmutex_freqread_rdrs_dec_chk_wakeup(ethr_rwmutex *rwmtx,
if (!rwmtx->mtxb.q)
ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
else if (is_w_waiter(rwmtx->mtxb.q)) {
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
if ((act & ETHR_RWMTX_W_FLG__) == 0)
rwmutex_try_complete_runlock(rwmtx, act, tse, 1, 0, 0);
@@ -1525,7 +1527,7 @@ rwmutex_freqread_rdrs_dec_chk_wakeup(ethr_rwmutex *rwmtx,
* rwmutex_transfer_read_lock() will
* unlock Q lock.
*/
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act & ETHR_RWMTX_W_FLG__)
ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
else
@@ -1539,7 +1541,7 @@ static void
rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
ethr_ts_event *tse)
{
- long act;
+ ethr_sint32_t act;
/*
* Restore failed increment
*/
@@ -1548,21 +1550,21 @@ rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
ETHR_MEMORY_BARRIER;
if (act == 0) {
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
rwmutex_freqread_rdrs_dec_chk_wakeup(rwmtx, tse, act);
}
}
static int
rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint32_t initial,
ethr_ts_event *tse,
int start_next_ix,
int check_before_try,
int try_write_lock)
{
ethr_ts_event *tse_tmp;
- long act = initial;
+ ethr_sint32_t act = initial;
int six, res, length;
ETHR_ASSERT((act & ETHR_RWMTX_W_FLG__) == 0);
@@ -1606,15 +1608,15 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
restart:
while (1) {
- long exp = act;
- long new = act+1;
+ ethr_sint32_t exp = act;
+ ethr_sint32_t new = act+1;
ETHR_ASSERT((act & ETHR_RWMTX_R_ABRT_UNLCK_FLG__) == 0);
ETHR_ASSERT((act & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
< ETHR_RWMTX_R_PEND_UNLCK_MASK__);
- act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (exp == act) {
act = new;
break;
@@ -1651,8 +1653,8 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
while (1) {
int finished_abort = 0;
- long exp = act;
- long new = act;
+ ethr_sint32_t exp = act;
+ ethr_sint32_t new = act;
new--;
if (act & ETHR_RWMTX_R_ABRT_UNLCK_FLG__) {
@@ -1668,7 +1670,7 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
ETHR_ASSERT(act & ETHR_RWMTX_R_PEND_UNLCK_MASK__);
- act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (exp == act) {
act = new;
if (act & ETHR_RWMTX_W_FLG__)
@@ -1702,9 +1704,9 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
tryrwlock:
/* Try to write lock it */
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__,
- 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__,
+ 0);
return act == 0 ? 0 : EBUSY;
}
@@ -1713,11 +1715,11 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
static ETHR_INLINE void
rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
/*
* Restore failed increment
*/
- act = ethr_atomic_dec_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_dec_read(&rwmtx->mtxb.flgs);
if ((act & ETHR_RWMTX_WAIT_FLGS__)
&& (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
rwmutex_unlock_wake(rwmtx, 0, act, 0);
@@ -1727,10 +1729,9 @@ rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
#endif
static void
-rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
- long initial)
+rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx, ethr_sint32_t initial)
{
- long act = initial, exp;
+ ethr_sint32_t act = initial, exp;
int scnt, start_scnt;
ethr_ts_event *tse = NULL;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -1746,7 +1747,7 @@ rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
#ifdef ETHR_RLOCK_WITH_INC_DEC
rwmutex_incdec_restore_failed_tryrlock(rwmtx);
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
#endif
while (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
@@ -1763,17 +1764,17 @@ rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
ETHR_YIELD();
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
scnt--;
}
exp = act;
#ifdef ETHR_RLOCK_WITH_INC_DEC
- act = ethr_atomic_inc_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_inc_read(&rwmtx->mtxb.flgs);
if ((act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) == 0)
goto done; /* Got it */
#else
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
if (act == exp)
goto done; /* Got it */
#endif
@@ -1792,19 +1793,19 @@ static int
rwmutex_freqread_rlock(ethr_rwmutex *rwmtx, ethr_ts_event *tse, int trylock)
{
int res = 0;
- long act;
+ ethr_sint32_t act;
rwmutex_freqread_rdrs_inc(rwmtx, tse);
ETHR_MEMORY_BARRIER;
- act = ethr_atomic_read_acqb(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read_acqb(&rwmtx->mtxb.flgs);
if (act != ETHR_RWMTX_R_FLG__) {
int wake_other_readers;
while (1) {
- long exp, new;
+ ethr_sint32_t exp, new;
wake_other_readers = 0;
@@ -1846,7 +1847,7 @@ rwmutex_freqread_rlock(ethr_rwmutex *rwmtx, ethr_ts_event *tse, int trylock)
}
exp = act;
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
if (act == exp)
break;
}
@@ -1862,7 +1863,7 @@ static void
rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
ethr_ts_event *tse)
{
- long act;
+ ethr_sint32_t act;
int scnt, start_scnt;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -1875,7 +1876,7 @@ rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
while (1) {
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
while (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
if (scnt <= 0) {
@@ -1890,7 +1891,7 @@ rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
ETHR_YIELD();
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
scnt--;
}
@@ -1900,21 +1901,23 @@ rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
}
static void
-rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, ethr_sint32_t initial)
{
write_lock_wait(&rwmtx->mtxb, initial, 1, 0);
}
static void
-rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, ethr_sint32_t initial)
{
write_lock_wait(&rwmtx->mtxb, initial, 1, 1);
}
static ETHR_INLINE void
-rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, long act_initial)
+rwlock_wake_set_flags(ethr_rwmutex *rwmtx,
+ ethr_sint32_t new_initial,
+ ethr_sint32_t act_initial)
{
- long act, act_mask;
+ ethr_sint32_t act, act_mask;
int chk_abrt_flg;
ETHR_MEMORY_BARRIER;
@@ -1935,18 +1938,18 @@ rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, long act_initial)
#else
/* rs mask always zero */
ETHR_ASSERT((act_initial & ETHR_RWMTX_RS_MASK__) == 0);
- ethr_atomic_set(&rwmtx->mtxb.flgs, new_initial);
+ ethr_atomic32_set(&rwmtx->mtxb.flgs, new_initial);
return;
#endif
}
act = act_initial;
while (1) {
- long exp = act;
- long new = new_initial + (act & act_mask);
+ ethr_sint32_t exp = act;
+ ethr_sint32_t new = new_initial + (act & act_mask);
if (chk_abrt_flg && (act & act_mask))
new |= ETHR_RWMTX_R_ABRT_UNLCK_FLG__;
- act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (act == exp)
break;
exp = act;
@@ -1960,7 +1963,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
int have_w,
ethr_ts_event *tse)
{
- long exp, act, imask;
+ ethr_sint32_t exp, act, imask;
exp = have_w ? ETHR_RWMTX_W_FLG__ : 0;
@@ -1982,7 +1985,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
if (rwmtx->rq_end) {
exp |= ETHR_RWMTX_R_WAIT_FLG__;
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
ETHR_ASSERT((exp & ~imask) == (act & ~imask));
ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
@@ -2001,7 +2004,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
imask |= ETHR_RWMTX_RS_MASK__;
}
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
ETHR_ASSERT((exp & ~imask) == (act & ~imask));
ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
@@ -2012,9 +2015,11 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
#endif
static void
-rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, long initial, int q_locked)
+rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx,
+ ethr_sint32_t initial,
+ int q_locked)
{
- long act = initial;
+ ethr_sint32_t act = initial;
if (!q_locked) {
ethr_ts_event *tse;
@@ -2022,7 +2027,7 @@ rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, long initial, int q_locked)
ETHR_ASSERT((initial & ETHR_RWMTX_W_FLG__) == 0);
ETHR_MTX_Q_LOCK(&rwmtx->mtxb.qlck);
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
tse = rwmtx->mtxb.q;
if ((act & ETHR_RWMTX_W_FLG__) || !tse || is_w_waiter(tse)) {
/* Someone else woke the readers up... */
@@ -2035,10 +2040,10 @@ rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, long initial, int q_locked)
}
static void
-rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, ethr_sint32_t initial,
int transfer_read_lock)
{
- long new, act = initial;
+ ethr_sint32_t new, act = initial;
ethr_ts_event *tse;
if (transfer_read_lock) {
@@ -2060,9 +2065,9 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
return;
else {
while ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
- long exp = act;
+ ethr_sint32_t exp = act;
new = exp & ~ETHR_RWMTX_W_FLG__;
- act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (act == exp)
return;
}
@@ -2075,12 +2080,12 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
if (!have_w) {
if (!tse) {
#ifdef ETHR_DEBUG
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
ETHR_ASSERT((act & ETHR_RWMTX_WAIT_FLGS__) == 0);
#endif
goto already_served;
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act == (ETHR_RWMTX_R_WAIT_FLG__|ETHR_RWMTX_R_FLG__)) {
ETHR_ASSERT(tse && !is_w_waiter(tse));
}
@@ -2099,7 +2104,7 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
if (is_w_waiter(tse)) {
if (!have_w) {
- act = ethr_atomic_read_bor(&rwmtx->mtxb.flgs,
+ act = ethr_atomic32_read_bor(&rwmtx->mtxb.flgs,
ETHR_RWMTX_W_FLG__);
ETHR_ASSERT((act & ~(ETHR_RWMTX_WAIT_FLGS__
| (rwmtx->type == ETHR_RWMUTEX_TYPE_NORMAL
@@ -2131,7 +2136,7 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
if (rwmtx->type == ETHR_RWMUTEX_TYPE_NORMAL) {
rs = rwmtx->tdata.rs;
- new = (long) rs;
+ new = (ethr_sint32_t) rs;
rwmtx->tdata.rs = 0;
}
else {
@@ -2187,16 +2192,16 @@ alloc_readers_array(int length, ethr_rwmutex_lived lived)
if (!mem)
return NULL;
- if ((((unsigned long) mem) & ETHR_CACHE_LINE_MASK) == 0) {
+ if ((((ethr_uint_t) mem) & ETHR_CACHE_LINE_MASK) == 0) {
ra = (ethr_rwmtx_readers_array__ *) mem;
ra->data.byte_offset = 0;
}
else {
ra = ((ethr_rwmtx_readers_array__ *)
- ((((unsigned long) mem) & ~ETHR_CACHE_LINE_MASK)
+ ((((ethr_uint_t) mem) & ~ETHR_CACHE_LINE_MASK)
+ ETHR_CACHE_LINE_SIZE));
- ra->data.byte_offset = (int) ((unsigned long) ra
- - (unsigned long) mem);
+ ra->data.byte_offset = (int) ((ethr_uint_t) ra
+ - (ethr_uint_t) mem);
}
ra->data.lived = lived;
return ra;
@@ -2270,7 +2275,7 @@ ethr_rwmutex_init_opt(ethr_rwmutex *rwmtx, ethr_rwmutex_opt *opt)
rwmtx->tdata.ra = ra;
for (ix = 0; ix < length; ix++) {
- ethr_atomic_init(&rwmtx->tdata.ra[ix].data.readers, 0);
+ ethr_atomic32_init(&rwmtx->tdata.ra[ix].data.readers, 0);
rwmtx->tdata.ra[ix].data.waiting_readers = 0;
}
break;
@@ -2324,7 +2329,7 @@ ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
#endif
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(rwmtx);
if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
- long act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ethr_sint32_t act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act == ETHR_RWMTX_R_FLG__)
rwmutex_try_complete_runlock(rwmtx, act, NULL, 0, 0, 0);
}
@@ -2345,7 +2350,7 @@ int
ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
{
int res = 0;
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2358,22 +2363,22 @@ ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL: {
#ifdef ETHR_RLOCK_WITH_INC_DEC
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
res = EBUSY;
else {
- act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_inc_read_acqb(&rwmtx->mtxb.flgs);
if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
rwmutex_incdec_restore_failed_tryrlock(rwmtx);
res = EBUSY;
}
}
#else
- long exp = 0;
+ ethr_sint32_t exp = 0;
int tries = 0;
while (1) {
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
if (act == exp) {
res = 0;
break;
@@ -2416,7 +2421,7 @@ ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2429,14 +2434,14 @@ ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL: {
#ifdef ETHR_RLOCK_WITH_INC_DEC
- act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_inc_read_acqb(&rwmtx->mtxb.flgs);
if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
rwmutex_normal_rlock_wait(rwmtx, act);
#else
- long exp = 0;
+ ethr_sint32_t exp = 0;
while (1) {
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
if (act == exp)
break;
@@ -2469,7 +2474,7 @@ ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(&rwmtx->mtxb);
ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(&rwmtx->mtxb);
@@ -2484,7 +2489,7 @@ ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL:
- act = ethr_atomic_dec_read_relb(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_dec_read_relb(&rwmtx->mtxb.flgs);
if ((act & ETHR_RWMTX_WAIT_FLGS__)
&& (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
ETHR_ASSERT((act & ETHR_RWMTX_W_FLG__) == 0);
@@ -2503,7 +2508,7 @@ ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
ETHR_MEMORY_BARRIER;
if (act == 0) {
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act != ETHR_RWMTX_R_FLG__)
rwmutex_freqread_rdrs_dec_chk_wakeup(rwmtx, tse, act);
}
@@ -2521,7 +2526,7 @@ int
ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
{
int res = 0;
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2533,8 +2538,8 @@ ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL:
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
if (act != 0)
res = EBUSY;
break;
@@ -2543,13 +2548,13 @@ ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
res = 0;
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
do {
if (act == 0)
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
else if (act == ETHR_RWMTX_R_FLG__) {
res = rwmutex_try_complete_runlock(rwmtx, act, NULL,
0, 1, 1);
@@ -2582,7 +2587,7 @@ ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
@@ -2593,8 +2598,8 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL:
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
if (act != 0)
rwmutex_normal_rwlock_wait(rwmtx, act);
break;
@@ -2602,7 +2607,7 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
do {
@@ -2611,8 +2616,8 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
break;
}
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
} while (act != 0);
@@ -2630,7 +2635,7 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
@@ -2645,16 +2650,16 @@ ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL:
- act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs,
- 0, ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg_relb(&rwmtx->mtxb.flgs,
+ 0, ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__)
rwmutex_unlock_wake(rwmtx, 1, act, 0);
break;
case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
- act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs, 0,
- ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg_relb(&rwmtx->mtxb.flgs, 0,
+ ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__)
rwmutex_unlock_wake(rwmtx, 1, act, 0);
break;
@@ -2779,7 +2784,7 @@ static void
hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
{
int res;
- long flgs = ethr_atomic_read(&mtxb->flgs);
+ ethr_sint32_t flgs = ethr_atomic32_read(&mtxb->flgs);
ETHR_MTX_HARD_ASSERT(res == 0);
@@ -2802,12 +2807,12 @@ hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
tse = mtxb->q;
do {
- long type;
+ ethr_sint32_t type;
ETHR_MTX_HARD_ASSERT(tse->next->prev == tse);
ETHR_MTX_HARD_ASSERT(tse->prev->next == tse);
- type = ethr_atomic_read(&tse->uaflgs);
+ type = ethr_atomic32_read(&tse->uaflgs);
ETHR_MTX_HARD_ASSERT(type == tse->uflgs);
switch (type) {
case ETHR_RWMTX_W_WAIT_FLG__:
diff --git a/erts/lib_src/pthread/ethr_event.c b/erts/lib_src/pthread/ethr_event.c
index 6731c0eb46..9434d60d0a 100644
--- a/erts/lib_src/pthread/ethr_event.c
+++ b/erts/lib_src/pthread/ethr_event.c
@@ -24,6 +24,10 @@
#define ETHR_INLINE_FUNC_NAME_(X) X ## __
#define ETHR_EVENT_IMPL__
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
#include "ethread.h"
#if defined(ETHR_LINUX_FUTEX_IMPL__)
@@ -37,7 +41,7 @@
int
ethr_event_init(ethr_event *e)
{
- ethr_atomic_init(&e->futex, ETHR_EVENT_OFF__);
+ ethr_atomic32_init(&e->futex, ETHR_EVENT_OFF__);
return 0;
}
@@ -52,7 +56,7 @@ wait__(ethr_event *e, int spincount)
{
unsigned sc = spincount;
int res;
- long val;
+ ethr_sint32_t val;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
if (spincount < 0)
@@ -60,7 +64,7 @@ wait__(ethr_event *e, int spincount)
while (1) {
while (1) {
- val = ethr_atomic_read(&e->futex);
+ val = ethr_atomic32_read(&e->futex);
if (val == ETHR_EVENT_ON__)
return 0;
if (sc == 0)
@@ -76,16 +80,18 @@ wait__(ethr_event *e, int spincount)
}
if (val != ETHR_EVENT_OFF_WAITER__) {
- val = ethr_atomic_cmpxchg(&e->futex,
- ETHR_EVENT_OFF_WAITER__,
- ETHR_EVENT_OFF__);
+ val = ethr_atomic32_cmpxchg(&e->futex,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
if (val == ETHR_EVENT_ON__)
return 0;
ETHR_ASSERT(val == ETHR_EVENT_OFF__);
}
- res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAIT__, ETHR_EVENT_OFF_WAITER__);
+ res = ETHR_FUTEX__(&e->futex,
+ ETHR_FUTEX_WAIT__,
+ ETHR_EVENT_OFF_WAITER__);
if (res == EINTR)
break;
if (res != 0 && res != EWOULDBLOCK)
@@ -102,7 +108,7 @@ int
ethr_event_init(ethr_event *e)
{
int res;
- ethr_atomic_init(&e->state, ETHR_EVENT_OFF__);
+ ethr_atomic32_init(&e->state, ETHR_EVENT_OFF__);
res = pthread_mutex_init(&e->mtx, NULL);
if (res != 0)
return res;
@@ -131,7 +137,7 @@ static ETHR_INLINE int
wait__(ethr_event *e, int spincount)
{
int sc = spincount;
- long val;
+ ethr_sint32_t val;
int res, ulres;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -139,7 +145,7 @@ wait__(ethr_event *e, int spincount)
ETHR_FATAL_ERROR__(EINVAL);
while (1) {
- val = ethr_atomic_read(&e->state);
+ val = ethr_atomic32_read(&e->state);
if (val == ETHR_EVENT_ON__)
return 0;
if (sc == 0)
@@ -155,9 +161,9 @@ wait__(ethr_event *e, int spincount)
}
if (val != ETHR_EVENT_OFF_WAITER__) {
- val = ethr_atomic_cmpxchg(&e->state,
- ETHR_EVENT_OFF_WAITER__,
- ETHR_EVENT_OFF__);
+ val = ethr_atomic32_cmpxchg(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
if (val == ETHR_EVENT_ON__)
return 0;
ETHR_ASSERT(val == ETHR_EVENT_OFF__);
@@ -172,7 +178,7 @@ wait__(ethr_event *e, int spincount)
while (1) {
- val = ethr_atomic_read(&e->state);
+ val = ethr_atomic32_read(&e->state);
if (val == ETHR_EVENT_ON__)
break;
diff --git a/erts/lib_src/pthread/ethread.c b/erts/lib_src/pthread/ethread.c
index ea1d9d43f0..f047104103 100644
--- a/erts/lib_src/pthread/ethread.c
+++ b/erts/lib_src/pthread/ethread.c
@@ -72,7 +72,7 @@ static void thr_exit_cleanup(void)
/* Argument passed to thr_wrapper() */
typedef struct {
- ethr_atomic_t result;
+ ethr_atomic32_t result;
ethr_ts_event *tse;
void *(*thr_func)(void *);
void *arg;
@@ -81,14 +81,14 @@ typedef struct {
static void *thr_wrapper(void *vtwd)
{
- long result;
+ ethr_sint32_t result;
void *res;
ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
void *(*thr_func)(void *) = twd->thr_func;
void *arg = twd->arg;
ethr_ts_event *tsep = NULL;
- result = (long) ethr_make_ts_event__(&tsep);
+ result = (ethr_sint32_t) ethr_make_ts_event__(&tsep);
if (result == 0) {
tsep->iflgs |= ETHR_TS_EV_ETHREAD;
@@ -99,7 +99,7 @@ static void *thr_wrapper(void *vtwd)
tsep = twd->tse; /* We aren't allowed to follow twd after
result has been set! */
- ethr_atomic_set(&twd->result, result);
+ ethr_atomic32_set(&twd->result, result);
ethr_event_set(&tsep->event);
@@ -191,7 +191,7 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
}
#endif
- ethr_atomic_init(&twd.result, -1);
+ ethr_atomic32_init(&twd.result, (ethr_sint32_t) -1);
twd.tse = ethr_get_ts_event();
twd.thr_func = func;
twd.arg = arg;
@@ -252,10 +252,10 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
/* Wait for child to initialize... */
while (1) {
- long result;
+ ethr_sint32_t result;
ethr_event_reset(&twd.tse->event);
- result = ethr_atomic_read(&twd.result);
+ result = ethr_atomic32_read(&twd.result);
if (result == 0)
break;
@@ -349,32 +349,6 @@ ethr_leave_ts_event(ethr_ts_event *tsep)
}
/*
- * Current time
- */
-
-int
-ethr_time_now(ethr_timeval *time)
-{
- int res;
- struct timeval tv;
-#if ETHR_XCHK
- if (ethr_not_inited__) {
- ETHR_ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ETHR_ASSERT(0);
- return EINVAL;
- }
-#endif
-
- res = gettimeofday(&tv, NULL);
- time->tv_sec = (long) tv.tv_sec;
- time->tv_nsec = ((long) tv.tv_usec)*1000;
- return res;
-}
-
-/*
* Thread specific data
*/
diff --git a/erts/lib_src/win/ethr_event.c b/erts/lib_src/win/ethr_event.c
index ddb4780ff1..68f093f49c 100644
--- a/erts/lib_src/win/ethr_event.c
+++ b/erts/lib_src/win/ethr_event.c
@@ -28,6 +28,9 @@
/* --- Windows implementation of thread events ------------------------------ */
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedCompareExchange)
+
int
ethr_event_init(ethr_event *e)
{
@@ -72,10 +75,10 @@ wait(ethr_event *e, int spincount)
while (1) {
long on;
while (1) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
state = e->state;
#else
- state = InterlockedExchangeAdd(&e->state, (LONG) 0);
+ state = _InterlockedExchangeAdd(&e->state, (LONG) 0);
#endif
if (state == ETHR_EVENT_ON__)
return 0;
diff --git a/erts/lib_src/win/ethread.c b/erts/lib_src/win/ethread.c
index 69523edf94..789a360b11 100644
--- a/erts/lib_src/win/ethread.c
+++ b/erts/lib_src/win/ethread.c
@@ -49,7 +49,7 @@
/* Argument passed to thr_wrapper() */
typedef struct {
ethr_tid *tid;
- ethr_atomic_t result;
+ ethr_atomic32_t result;
ethr_ts_event *tse;
void *(*thr_func)(void *);
void *arg;
@@ -93,20 +93,20 @@ static void thr_exit_cleanup(ethr_tid *tid, void *res)
static unsigned __stdcall thr_wrapper(LPVOID vtwd)
{
ethr_tid my_tid;
- long result;
+ ethr_sint32_t result;
void *res;
ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
void *(*thr_func)(void *) = twd->thr_func;
void *arg = twd->arg;
ethr_ts_event *tsep = NULL;
- result = (long) ethr_make_ts_event__(&tsep);
+ result = (ethr_sint32_t) ethr_make_ts_event__(&tsep);
if (result == 0) {
tsep->iflgs |= ETHR_TS_EV_ETHREAD;
my_tid = *twd->tid;
if (!TlsSetValue(own_tid_key, (LPVOID) &my_tid)) {
- result = (long) ethr_win_get_errno__();
+ result = (ethr_sint32_t) ethr_win_get_errno__();
ethr_free_ts_event__(tsep);
}
else {
@@ -118,7 +118,7 @@ static unsigned __stdcall thr_wrapper(LPVOID vtwd)
tsep = twd->tse; /* We aren't allowed to follow twd after
result has been set! */
- ethr_atomic_set(&twd->result, result);
+ ethr_atomic32_set(&twd->result, result);
ethr_event_set(&tsep->event);
@@ -128,28 +128,6 @@ static unsigned __stdcall thr_wrapper(LPVOID vtwd)
return 0;
}
-#ifdef __GNUC__
-#define LL_LITERAL(X) X##LL
-#else
-#define LL_LITERAL(X) X##i64
-#endif
-
-#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
-
-static ETHR_INLINE void
-get_curr_time(long *sec, long *nsec)
-{
- SYSTEMTIME t;
- FILETIME ft;
- LONGLONG lft;
-
- GetSystemTime(&t);
- SystemTimeToFileTime(&t, &ft);
- memcpy(&lft, &ft, sizeof(lft));
- *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
- *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
-}
-
/* internal exports */
int
@@ -320,7 +298,7 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
}
- ethr_atomic_init(&twd.result, -1);
+ ethr_atomic32_init(&twd.result, -1);
twd.tid = tid;
twd.thr_func = func;
@@ -352,11 +330,11 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
/* Wait for child to initialize... */
while (1) {
- long result;
+ ethr_sint32_t result;
int err;
ethr_event_reset(&twd.tse->event);
- result = ethr_atomic_read(&twd.result);
+ result = ethr_atomic32_read(&twd.result);
if (result == 0)
break;
@@ -517,23 +495,6 @@ ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
return tid1.id == tid2.id && tid1.id != ETHR_INVALID_TID_ID;
}
-int
-ethr_time_now(ethr_timeval *time)
-{
-#if ETHR_XCHK
- if (ethr_not_inited__) {
- ETHR_ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ETHR_ASSERT(0);
- return EINVAL;
- }
-#endif
- get_curr_time(&time->tv_sec, &time->tv_nsec);
- return 0;
-}
-
/*
* Thread specific data
*/
diff --git a/erts/test/ethread_SUITE.erl b/erts/test/ethread_SUITE.erl
index 93e27fa8d3..69e5af802f 100644
--- a/erts/test/ethread_SUITE.erl
+++ b/erts/test/ethread_SUITE.erl
@@ -37,7 +37,6 @@
equal_tids/1,
mutex/1,
try_lock_mutex/1,
- time_now/1,
cond_wait/1,
broadcast/1,
detached_thread/1,
@@ -55,7 +54,6 @@ tests() ->
equal_tids,
mutex,
try_lock_mutex,
- time_now,
cond_wait,
broadcast,
detached_thread,
@@ -104,17 +102,6 @@ try_lock_mutex(suite) ->
try_lock_mutex(Config) ->
run_case(Config, "try_lock_mutex", "").
-time_now(doc) ->
- ["Tests ethr_time_now by comparing time values with Erlang."];
-time_now(suite) ->
- [];
-time_now(Config) ->
- run_case(Config, "time_now", "", fun (P) ->
- spawn_link(fun () ->
- watchdog(P)
- end)
- end).
-
wd_dispatch(P) ->
receive
bye ->
diff --git a/erts/test/ethread_SUITE_data/ethread_tests.c b/erts/test/ethread_SUITE_data/ethread_tests.c
index 7fc71d8047..0b59ff5aa6 100644
--- a/erts/test/ethread_SUITE_data/ethread_tests.c
+++ b/erts/test/ethread_SUITE_data/ethread_tests.c
@@ -514,69 +514,6 @@ try_lock_mutex_test(void)
}
/*
- * The time now test.
- *
- * Tests ethr_time_now by comparing time values with Erlang.
- */
-#define TNT_MAX_TIME_DIFF 200000
-#define TNT_MAX_TIME_VALUES 52
-
-static void
-time_now_test(void)
-{
- int scanf_res, time_now_res, i, no_values, max_abs_diff;
- static ethr_timeval tv[TNT_MAX_TIME_VALUES];
- static int ms[TNT_MAX_TIME_VALUES];
-
- i = 0;
- do {
- ASSERT(i < TNT_MAX_TIME_VALUES);
- scanf_res = scanf("%d", &ms[i]);
- time_now_res = ethr_time_now(&tv[i]);
- ASSERT(scanf_res == 1);
- ASSERT(time_now_res == 0);
-#if 0
- print_line("Got %d; %ld:%ld", ms[i], tv[i].tv_sec, tv[i].tv_nsec);
-#endif
- i++;
- } while (ms[i-1] >= 0);
-
- no_values = i-1;
-
- ASSERT(ms[0] == 0);
-
- print_line("TNT_MAX_TIME_DIFF = %d (us)", TNT_MAX_TIME_DIFF);
-
- max_abs_diff = 0;
-
- for (i = 1; i < no_values; i++) {
- long diff;
- long tn_us;
- long e_us;
-
- tn_us = (tv[i].tv_sec - tv[0].tv_sec) * 1000000;
- tn_us += (tv[i].tv_nsec - tv[0].tv_nsec)/1000;
-
- e_us = ms[i]*1000;
-
- diff = e_us - tn_us;
-
- print_line("Erlang time = %ld us; ethr_time_now = %ld us; diff %ld us",
- e_us, tn_us, diff);
-
- if (max_abs_diff < abs((int) diff)) {
- max_abs_diff = abs((int) diff);
- }
-
- ASSERT(e_us - TNT_MAX_TIME_DIFF <= tn_us);
- ASSERT(tn_us <= e_us + TNT_MAX_TIME_DIFF);
- }
-
- print_line("Max absolute diff = %d us", max_abs_diff);
- succeed("Max absolute diff = %d us", max_abs_diff);
-}
-
-/*
* The cond wait test case.
*
* Tests ethr_cond_wait with ethr_cond_signal and ethr_cond_broadcast.
@@ -1538,8 +1475,6 @@ main(int argc, char *argv[])
mutex_test();
else if (strcmp(testcase, "try_lock_mutex") == 0)
try_lock_mutex_test();
- else if (strcmp(testcase, "time_now") == 0)
- time_now_test();
else if (strcmp(testcase, "cond_wait") == 0)
cond_wait_test();
else if (strcmp(testcase, "broadcast") == 0)