aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/aclocal.m4141
-rw-r--r--erts/configure.in5
-rw-r--r--erts/emulator/Makefile.in6
-rw-r--r--erts/emulator/beam/beam_bp.c4
-rw-r--r--erts/emulator/beam/beam_bp.h2
-rw-r--r--erts/emulator/beam/beam_debug.c119
-rw-r--r--erts/emulator/beam/beam_emu.c1235
-rw-r--r--erts/emulator/beam/beam_load.c521
-rw-r--r--erts/emulator/beam/bif.c21
-rw-r--r--erts/emulator/beam/bif.tab1
-rw-r--r--erts/emulator/beam/break.c10
-rw-r--r--erts/emulator/beam/dist.c159
-rw-r--r--erts/emulator/beam/dist.h3
-rw-r--r--erts/emulator/beam/erl_alloc.c2
-rw-r--r--erts/emulator/beam/erl_alloc.types2
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c6
-rw-r--r--erts/emulator/beam/erl_bif_info.c16
-rw-r--r--erts/emulator/beam/erl_bif_timer.c12
-rw-r--r--erts/emulator/beam/erl_bits.c14
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c4
-rw-r--r--erts/emulator/beam/erl_db.c151
-rw-r--r--erts/emulator/beam/erl_db.h3
-rw-r--r--erts/emulator/beam/erl_db_hash.c38
-rw-r--r--erts/emulator/beam/erl_db_util.c2
-rw-r--r--erts/emulator/beam/erl_db_util.h5
-rw-r--r--erts/emulator/beam/erl_driver.h53
-rw-r--r--erts/emulator/beam/erl_fun.c8
-rw-r--r--erts/emulator/beam/erl_gc.c2
-rw-r--r--erts/emulator/beam/erl_init.c12
-rw-r--r--erts/emulator/beam/erl_lock_check.c8
-rw-r--r--erts/emulator/beam/erl_lock_count.c14
-rw-r--r--erts/emulator/beam/erl_node_tables.c10
-rw-r--r--erts/emulator/beam/erl_port_task.c25
-rw-r--r--erts/emulator/beam/erl_port_task.h4
-rw-r--r--erts/emulator/beam/erl_process.c608
-rw-r--r--erts/emulator/beam/erl_process.h44
-rw-r--r--erts/emulator/beam/erl_process_lock.c12
-rw-r--r--erts/emulator/beam/erl_process_lock.h30
-rw-r--r--erts/emulator/beam/erl_smp.h401
-rw-r--r--erts/emulator/beam/erl_term.h12
-rw-r--r--erts/emulator/beam/erl_threads.h386
-rw-r--r--erts/emulator/beam/erl_time.h64
-rw-r--r--erts/emulator/beam/erl_time_sup.c19
-rw-r--r--erts/emulator/beam/erl_unicode.c2
-rw-r--r--erts/emulator/beam/global.h84
-rw-r--r--erts/emulator/beam/io.c70
-rw-r--r--erts/emulator/beam/ops.tab269
-rw-r--r--erts/emulator/beam/sys.h83
-rw-r--r--erts/emulator/beam/time.c289
-rw-r--r--erts/emulator/beam/utils.c37
-rw-r--r--erts/emulator/drivers/common/efile_drv.c5
-rw-r--r--erts/emulator/drivers/common/inet_drv.c52
-rw-r--r--erts/emulator/drivers/unix/unix_efile.c2
-rw-r--r--erts/emulator/drivers/win32/win_con.c12
-rw-r--r--erts/emulator/sys/common/erl_mseg.c3
-rw-r--r--erts/emulator/sys/common/erl_poll.c32
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h7
-rw-r--r--erts/emulator/sys/unix/sys.c227
-rw-r--r--erts/emulator/sys/vxworks/sys.c2
-rw-r--r--erts/emulator/sys/win32/erl_poll.c28
-rw-r--r--erts/emulator/sys/win32/erl_win_dyn_driver.h12
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c6
-rw-r--r--erts/emulator/test/beam_SUITE.erl21
-rw-r--r--erts/emulator/test/beam_literals_SUITE.erl45
-rw-r--r--erts/emulator/test/beam_literals_SUITE_data/literal_case_expression.S80
-rw-r--r--erts/emulator/test/bs_construct_SUITE.erl21
-rw-r--r--erts/emulator/test/distribution_SUITE.erl290
-rw-r--r--erts/emulator/test/erl_link_SUITE.erl3
-rw-r--r--erts/emulator/test/erts_debug_SUITE.erl9
-rwxr-xr-xerts/emulator/utils/beam_makeops118
-rwxr-xr-xerts/emulator/utils/count127
-rw-r--r--erts/epmd/src/epmd_srv.c8
-rw-r--r--erts/include/internal/ethr_atomics.h726
-rw-r--r--erts/include/internal/ethr_mutex.h78
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h74
-rw-r--r--erts/include/internal/ethread.h376
-rw-r--r--erts/include/internal/ethread_header_config.h.in36
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h222
-rw-r--r--erts/include/internal/gcc/ethread.h10
-rw-r--r--erts/include/internal/i386/atomic.h190
-rw-r--r--erts/include/internal/i386/ethread.h7
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h200
-rw-r--r--erts/include/internal/ppc32/atomic.h94
-rw-r--r--erts/include/internal/pthread/ethr_event.h54
-rw-r--r--erts/include/internal/sparc32/atomic.h172
-rw-r--r--erts/include/internal/sparc32/ethread.h7
-rw-r--r--erts/include/internal/tile/atomic.h104
-rw-r--r--erts/include/internal/win/ethr_atomic.h415
-rw-r--r--erts/include/internal/win/ethr_event.h16
-rw-r--r--erts/include/internal/win/ethread.h6
-rw-r--r--erts/lib_src/Makefile.in7
-rw-r--r--erts/lib_src/common/ethr_atomics.c402
-rw-r--r--erts/lib_src/common/ethr_aux.c202
-rw-r--r--erts/lib_src/common/ethr_mutex.c327
-rw-r--r--erts/lib_src/pthread/ethr_event.c34
-rw-r--r--erts/lib_src/pthread/ethread.c40
-rw-r--r--erts/lib_src/win/ethr_event.c7
-rw-r--r--erts/lib_src/win/ethread.c55
-rw-r--r--erts/test/ethread_SUITE.erl13
-rw-r--r--erts/test/ethread_SUITE_data/ethread_tests.c65
-rw-r--r--erts/vsn.mk4
101 files changed, 6384 insertions, 3697 deletions
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 443d8622bf..a1211bbf0c 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -747,9 +747,124 @@ case "$THR_LIB_NAME" in
if test $found_win32_winnt = no; then
AC_MSG_ERROR([-D_WIN32_WINNT missing in CPPFLAGS])
fi
- ethr_have_native_atomics=yes
- ethr_have_native_spinlock=yes
+
AC_DEFINE(ETHR_WIN32_THREADS, 1, [Define if you have win32 threads])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedCompareExchange64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedCompareExchange64(var, (__int64) 1, (__int64) 0);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64, 1, [Define if you have _InterlockedCompareExchange64()])
+
+ AC_CHECK_SIZEOF(void *)
+ case "$ac_cv_sizeof_void_p-$have_ilckd" in
+ 8-no)
+ ethr_have_native_atomics=no
+ ethr_have_native_spinlock=no;;
+ *)
+ ethr_have_native_atomics=yes
+ ethr_have_native_spinlock=yes;;
+ esac
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedDecrement64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedDecrement64(var);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDDECREMENT64, 1, [Define if you have _InterlockedDecrement64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedIncrement64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedIncrement64(var);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDINCREMENT64, 1, [Define if you have _InterlockedIncrement64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedExchangeAdd64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedExchangeAdd64(var, (__int64) 1);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDEXCHANGEADD64, 1, [Define if you have _InterlockedExchangeAdd64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedExchange64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedExchange64(var, (__int64) 1);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDEXCHANGE64, 1, [Define if you have _InterlockedExchange64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedAnd64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedAnd64(var, (__int64) 1);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDAND64, 1, [Define if you have _InterlockedAnd64()])
+
+ have_ilckd=no
+ AC_MSG_CHECKING([for _InterlockedOr64()])
+ AC_TRY_LINK([
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+ ],
+ [
+ volatile __int64 *var;
+ _InterlockedOr64(var, (__int64) 1);
+ return 0;
+ ],
+ have_ilckd=yes)
+ AC_MSG_RESULT([$have_ilckd])
+ test $have_ilckd = yes && AC_DEFINE(ETHR_HAVE__INTERLOCKEDOR64, 1, [Define if you have _InterlockedOr64()])
+
;;
pthread)
@@ -1087,6 +1202,28 @@ fi
AC_CHECK_SIZEOF(void *)
AC_DEFINE_UNQUOTED(ETHR_SIZEOF_PTR, $ac_cv_sizeof_void_p, [Define to the size of pointers])
+AC_CHECK_SIZEOF(int)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_INT, $ac_cv_sizeof_int, [Define to the size of int])
+AC_CHECK_SIZEOF(long)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_LONG, $ac_cv_sizeof_long, [Define to the size of long])
+AC_CHECK_SIZEOF(long long)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_LONG_LONG, $ac_cv_sizeof_long_long, [Define to the size of long long])
+AC_CHECK_SIZEOF(__int64)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF___INT64, $ac_cv_sizeof___int64, [Define to the size of __int64])
+
+
+case X$erl_xcomp_bigendian in
+ X) ;;
+ Xyes|Xno) ac_cv_c_bigendian=$erl_xcomp_bigendian;;
+ *) AC_MSG_ERROR([Bad erl_xcomp_bigendian value: $erl_xcomp_bigendian]);;
+esac
+
+AC_C_BIGENDIAN
+
+if test "$ac_cv_c_bigendian" = "yes"; then
+ AC_DEFINE(ETHR_BIGENDIAN, 1, [Define if bigendian])
+fi
+
AC_ARG_ENABLE(native-ethr-impls,
AS_HELP_STRING([--disable-native-ethr-impls],
[disable native ethread implementations]),
diff --git a/erts/configure.in b/erts/configure.in
index 6e983a07b0..627f734409 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -580,6 +580,11 @@ AC_SUBST(WFLAGS)
AC_SUBST(CFLAG_RUNTIME_LIBRARY_PATH)
AC_CHECK_SIZEOF(void *) # Needed for ARCH and smp checks below
+if test "x$ac_cv_sizeof_void_p" = x8; then
+ AC_SUBST(EXTERNAL_WORD_SIZE, 64)
+else
+ AC_SUBST(EXTERNAL_WORD_SIZE, 32)
+fi
dnl
dnl Figure out operating system and cpu architecture
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 6c33e2ca16..f04df354a8 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -505,8 +505,10 @@ ifdef HIPE_ENABLED
OPCODE_TABLES += hipe/hipe_ops.tab
endif
-$(TTF_DIR)/beam_opcodes.h $(TTF_DIR)/beam_opcodes.c: $(OPCODE_TABLES)
- LANG=C $(PERL) utils/beam_makeops -outdir $(TTF_DIR) \
+$(TTF_DIR)/beam_opcodes.h $(TTF_DIR)/beam_opcodes.c: $(OPCODE_TABLES) utils/beam_makeops
+ LANG=C $(PERL) utils/beam_makeops \
+ -wordsize @EXTERNAL_WORD_SIZE@ \
+ -outdir $(TTF_DIR) \
-emulator $(OPCODE_TABLES)
# bif and atom table
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 682f31b83f..31910888d1 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -950,8 +950,8 @@ static int set_function_break(Module *modp, BeamInstr *pc, int bif,
MatchSetUnref(old_match_spec);
} else {
BpDataCount *bdc = (BpDataCount *) bd;
- long count = 0;
- long res = 0;
+ erts_aint_t count = 0;
+ erts_aint_t res = 0;
ASSERT(! match_spec);
ASSERT(is_nil(tracer_pid));
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index ebc171078d..bd8a7249a7 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -157,7 +157,7 @@ do { \
BpData **bds = (BpData **) (pc)[-4]; \
BpDataCount *bdc = NULL; \
Uint ix = bp_sched2ix_proc( (p) ); \
- long count = 0; \
+ erts_aint_t count = 0; \
\
ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
ASSERT(bds); \
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index b0bf14b94f..2855241b91 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -48,7 +48,6 @@
void dbg_bt(Process* p, Eterm* sp);
void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg);
-static void print_big(int to, void *to_arg, Eterm* addr);
static int print_op(int to, void *to_arg, int op, int size, BeamInstr* addr);
Eterm
erts_debug_same_2(Process* p, Eterm term1, Eterm term2)
@@ -157,6 +156,25 @@ void debug_dump_code(BeamInstr *I, int num)
}
#endif
+BIF_RETTYPE
+erts_debug_instructions_0(BIF_ALIST_0)
+{
+ int i = 0;
+ Uint needed = num_instructions * 2;
+ Eterm* hp;
+ Eterm res = NIL;
+
+ for (i = 0; i < num_instructions; i++) {
+ needed += 2*strlen(opc[i].name);
+ }
+ hp = HAlloc(BIF_P, needed);
+ for (i = num_instructions-1; i >= 0; i--) {
+ Eterm s = erts_bld_string_n(&hp, 0, opc[i].name, strlen(opc[i].name));
+ res = erts_bld_cons(&hp, 0, s, res);
+ }
+ return res;
+}
+
Eterm
erts_debug_disassemble_1(Process* p, Eterm addr)
{
@@ -312,6 +330,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
BeamInstr packed = 0; /* Accumulator for packed operations. */
BeamInstr args[8]; /* Arguments for this instruction. */
BeamInstr* ap; /* Pointer to arguments. */
+ BeamInstr* unpacked; /* Unpacked arguments */
start_prog = opc[op].pack;
@@ -360,6 +379,12 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
*ap++ = packed & BEAM_LOOSE_MASK;
packed >>= BEAM_LOOSE_SHIFT;
break;
+#ifdef ARCH_64
+ case 'w': /* Shift 32 steps */
+ *ap++ = packed & BEAM_WIDE_MASK;
+ packed >>= BEAM_WIDE_SHIFT;
+ break;
+#endif
case 'p':
*sp++ = *--ap;
break;
@@ -386,7 +411,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
break;
case 'x': /* x(N) */
if (reg_index(ap[0]) == 0) {
- erts_print(to, to_arg, "X[0]");
+ erts_print(to, to_arg, "x[0]");
} else {
erts_print(to, to_arg, "x(%d)", reg_index(ap[0]));
}
@@ -506,6 +531,7 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
ap++;
break;
case 'P': /* Byte offset into tuple (see beam_load.c) */
+ case 'Q': /* Like 'P', but packable */
erts_print(to, to_arg, "%d", (*ap / sizeof(Eterm)) - 1);
ap++;
break;
@@ -526,9 +552,12 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
* Print more information about certain instructions.
*/
+ unpacked = ap;
ap = addr + size;
switch (op) {
- case op_i_select_val_sfI:
+ case op_i_select_val_rfI:
+ case op_i_select_val_xfI:
+ case op_i_select_val_yfI:
{
int n = ap[-1];
@@ -540,7 +569,24 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
}
}
break;
- case op_i_jump_on_val_sfII:
+ case op_i_select_tuple_arity_rfI:
+ case op_i_select_tuple_arity_xfI:
+ case op_i_select_tuple_arity_yfI:
+ {
+ int n = ap[-1];
+
+ while (n > 0) {
+ Uint arity = arityval(ap[0]);
+ erts_print(to, to_arg, " {%d} f(" HEXF ")", arity, ap[1]);
+ ap += 2;
+ size += 2;
+ n--;
+ }
+ }
+ break;
+ case op_i_jump_on_val_rfII:
+ case op_i_jump_on_val_xfII:
+ case op_i_jump_on_val_yfII:
{
int n;
for (n = ap[-2]; n > 0; n--) {
@@ -550,39 +596,46 @@ print_op(int to, void *to_arg, int op, int size, BeamInstr* addr)
}
}
break;
- case op_i_select_big_sf:
- while (ap[0]) {
- Eterm *bigp = (Eterm *) ap;
- int arity = thing_arityval(*bigp);
- print_big(to, to_arg, bigp);
- size += TermWords(arity+1);
- ap += TermWords(arity+1);
- erts_print(to, to_arg, " f(" HEXF ") ", ap[0]);
- ap++;
- size++;
+ case op_i_jump_on_val_zero_rfI:
+ case op_i_jump_on_val_zero_xfI:
+ case op_i_jump_on_val_zero_yfI:
+ {
+ int n;
+ for (n = ap[-1]; n > 0; n--) {
+ erts_print(to, to_arg, "f(" HEXF ") ", ap[0]);
+ ap++;
+ size++;
+ }
+ }
+ break;
+ case op_i_put_tuple_rI:
+ case op_i_put_tuple_xI:
+ case op_i_put_tuple_yI:
+ {
+ int n = unpacked[-1];
+
+ while (n > 0) {
+ if (!is_header(ap[0])) {
+ erts_print(to, to_arg, " %T", (Eterm) ap[0]);
+ } else {
+ switch ((ap[0] >> 2) & 0x03) {
+ case R_REG_DEF:
+ erts_print(to, to_arg, " x(0)");
+ break;
+ case X_REG_DEF:
+ erts_print(to, to_arg, " x(%d)", ap[0] >> 4);
+ break;
+ case Y_REG_DEF:
+ erts_print(to, to_arg, " y(%d)", ap[0] >> 4);
+ break;
+ }
+ }
+ ap++, size++, n--;
+ }
}
- ap++;
- size++;
break;
}
erts_print(to, to_arg, "\n");
return size;
}
-
-static void
-print_big(int to, void *to_arg, Eterm* addr)
-{
- int i;
- int k;
-
- i = BIG_SIZE(addr);
- if (BIG_SIGN(addr))
- erts_print(to, to_arg, "-#integer(%d) = {", i);
- else
- erts_print(to, to_arg, "#integer(%d) = {", i);
- erts_print(to, to_arg, "0x%x", BIG_DIGIT(addr, 0));
- for (k = 1; k < i; k++)
- erts_print(to, to_arg, ",0x%x", BIG_DIGIT(addr, k));
- erts_print(to, to_arg, "}");
-}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 8a0e12dd4f..16741aa2d7 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -344,6 +344,8 @@ extern int count_instructions;
#define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N)))
#define yb(N) (*(Eterm *) (((unsigned char *)E) + (N)))
#define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N)))
+#define Qb(N) (N)
+#define Ib(N) (N)
#define x(N) reg[N]
#define y(N) E[N]
#define r(N) x##N
@@ -472,6 +474,13 @@ extern int count_instructions;
HEAP_SPACE_VERIFIED(need); \
} while (0)
+#define TestHeapPutList(Need, Reg) \
+ do { \
+ TestHeap((Need), 1); \
+ PutList(Reg, r(0), r(0), StoreSimpleDest); \
+ CHECK_TERM(r(0)); \
+ } while (0)
+
#ifdef HYBRID
#ifdef INCREMENTAL
#define TestGlobalHeap(Nh, Live, hp) \
@@ -516,6 +525,11 @@ extern int count_instructions;
SWAPIN; \
} while (0)
+#define PutTuple(Dst, Arity) \
+ do { \
+ Dst = make_tuple(HTOP); \
+ pt_arity = (Arity); \
+ } while (0)
/*
* Check that we haven't used the reductions and jump to function pointed to by
@@ -674,6 +688,11 @@ extern int count_instructions;
SET_I((BeamInstr *) CallDest); \
Dispatch();
+#define MoveJump(Src) \
+ r(0) = (Src); \
+ SET_I((BeamInstr *) Arg(0)); \
+ Goto(*I);
+
#define GetList(Src, H, T) do { \
Eterm* tmp_ptr = list_val(Src); \
H = CAR(tmp_ptr); \
@@ -723,16 +742,8 @@ extern int count_instructions;
(Dest) = (* (Eterm *) EXPAND_POINTER(tmp_arg1)); \
} while (0)
-#define PutTuple(Arity, Src, Dest) \
- ASSERT(is_arity_value(Arity)); \
- Dest = make_tuple(HTOP); \
- HTOP[0] = (Arity); \
- HTOP[1] = (Src); \
- HTOP += 2
-
-#define Put(Word) *HTOP++ = (Word)
-
#define EqualImmed(X, Y, Action) if (X != Y) { Action; }
+#define NotEqualImmed(X, Y, Action) if (X == Y) { Action; }
#define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; }
@@ -984,8 +995,41 @@ extern int count_instructions;
#define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; }
#define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; }
-static BifFunction translate_gc_bif(void* gcf);
-static BeamInstr* handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf);
+/*
+ * process_main() is already huge, so we want to avoid inlining
+ * into it. Especially functions that are seldom used.
+ */
+#ifdef __GNUC__
+# define NOINLINE __attribute__((__noinline__))
+#else
+# define NOINLINE
+#endif
+
+/*
+ * The following functions are called directly by process_main().
+ * Don't inline them.
+ */
+static BifFunction translate_gc_bif(void* gcf) NOINLINE;
+static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
+ Eterm* reg, BifFunction bf) NOINLINE;
+static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
+ Eterm* reg, Eterm func) NOINLINE;
+static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
+static BeamInstr* apply(Process* p, Eterm module, Eterm function,
+ Eterm args, Eterm* reg) NOINLINE;
+static int hibernate(Process* c_p, Eterm module, Eterm function,
+ Eterm args, Eterm* reg) NOINLINE;
+static BeamInstr* call_fun(Process* p, int arity,
+ Eterm* reg, Eterm args) NOINLINE;
+static BeamInstr* apply_fun(Process* p, Eterm fun,
+ Eterm args, Eterm* reg) NOINLINE;
+static Eterm new_fun(Process* p, Eterm* reg,
+ ErlFunEntry* fe, int num_free) NOINLINE;
+
+
+/*
+ * Functions not directly called by process_main(). OK to inline.
+ */
static BeamInstr* next_catch(Process* c_p, Eterm *reg);
static void terminate_proc(Process* c_p, Eterm Value);
static Eterm add_stacktrace(Process* c_p, Eterm Value, Eterm exc);
@@ -993,16 +1037,6 @@ static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
BifFunction bf, Eterm args);
static struct StackTrace * get_trace_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
-static Eterm call_error_handler(Process* p, BeamInstr* ip, Eterm* reg);
-static Eterm call_breakpoint_handler(Process* p, BeamInstr* fi, Eterm* reg);
-static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity);
-static BeamInstr* apply(Process* p, Eterm module, Eterm function,
- Eterm args, Eterm* reg);
-static int hibernate(Process* c_p, Eterm module, Eterm function,
- Eterm args, Eterm* reg);
-static BeamInstr* call_fun(Process* p, int arity, Eterm* reg, Eterm args);
-static BeamInstr* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg);
-static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free);
#if defined(VXWORKS)
static int init_done;
@@ -1146,6 +1180,8 @@ void process_main(void)
Uint temp_bits; /* Temporary used by BsSkipBits2 & BsGetInteger2 */
+ Eterm pt_arity; /* Used by do_put_tuple */
+
ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
@@ -1246,6 +1282,52 @@ void process_main(void)
#define STORE_ARITH_RESULT(res) StoreBifResult(2, (res));
#define ARITH_FUNC(name) erts_gc_##name
+ {
+ Eterm increment_reg_val;
+ Eterm increment_val;
+ Uint live;
+ Eterm result;
+
+ OpCase(i_increment_yIId):
+ increment_reg_val = yb(Arg(0));
+ goto do_increment;
+
+ OpCase(i_increment_xIId):
+ increment_reg_val = xb(Arg(0));
+ goto do_increment;
+
+ OpCase(i_increment_rIId):
+ increment_reg_val = r(0);
+ I--;
+
+ do_increment:
+ increment_val = Arg(1);
+ if (is_small(increment_reg_val)) {
+ Sint i = signed_val(increment_reg_val) + increment_val;
+ ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i));
+ if (MY_IS_SSMALL(i)) {
+ result = make_small(i);
+ store_result:
+ StoreBifResult(3, result);
+ }
+ }
+
+ live = Arg(2);
+ SWAPOUT;
+ reg[0] = r(0);
+ reg[live] = increment_reg_val;
+ reg[live+1] = make_small(increment_val);
+ result = erts_gc_mixed_plus(c_p, reg, live);
+ r(0) = reg[0];
+ SWAPIN;
+ ERTS_HOLE_CHECK(c_p);
+ if (is_value(result)) {
+ goto store_result;
+ }
+ ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue));
+ goto find_func_info;
+ }
+
OpCase(i_plus_jId):
{
Eterm result;
@@ -1309,6 +1391,52 @@ void process_main(void)
}
Next(1);
+ {
+ Eterm is_eq_exact_lit_val;
+
+ OpCase(i_is_eq_exact_literal_xfc):
+ is_eq_exact_lit_val = xb(Arg(0));
+ I++;
+ goto do_is_eq_exact_literal;
+
+ OpCase(i_is_eq_exact_literal_yfc):
+ is_eq_exact_lit_val = yb(Arg(0));
+ I++;
+ goto do_is_eq_exact_literal;
+
+ OpCase(i_is_eq_exact_literal_rfc):
+ is_eq_exact_lit_val = r(0);
+
+ do_is_eq_exact_literal:
+ if (!eq(Arg(1), is_eq_exact_lit_val)) {
+ ClauseFail();
+ }
+ Next(2);
+ }
+
+ {
+ Eterm is_ne_exact_lit_val;
+
+ OpCase(i_is_ne_exact_literal_xfc):
+ is_ne_exact_lit_val = xb(Arg(0));
+ I++;
+ goto do_is_ne_exact_literal;
+
+ OpCase(i_is_ne_exact_literal_yfc):
+ is_ne_exact_lit_val = yb(Arg(0));
+ I++;
+ goto do_is_ne_exact_literal;
+
+ OpCase(i_is_ne_exact_literal_rfc):
+ is_ne_exact_lit_val = r(0);
+
+ do_is_ne_exact_literal:
+ if (eq(Arg(1), is_ne_exact_lit_val)) {
+ ClauseFail();
+ }
+ Next(2);
+ }
+
OpCase(i_move_call_only_fcr): {
r(0) = Arg(1);
}
@@ -1392,6 +1520,17 @@ void process_main(void)
NextPF(1, next);
}
+ OpCase(move_x1_c): {
+ x(1) = Arg(0);
+ Next(1);
+ }
+
+ OpCase(move_x2_c): {
+ x(2) = Arg(0);
+ Next(1);
+ }
+
+
OpCase(return): {
SET_I(c_p->cp);
/*
@@ -1405,16 +1544,6 @@ void process_main(void)
Goto(*I);
}
- OpCase(test_heap_1_put_list_Iy): {
- BeamInstr *next;
-
- PreFetch(2, next);
- TestHeap(Arg(0), 1);
- PutList(yb(Arg(1)), r(0), r(0), StoreSimpleDest);
- CHECK_TERM(r(0));
- NextPF(2, next);
- }
-
/*
* Send is almost a standard call-BIF with two arguments, except for:
* 1) It cannot be traced.
@@ -1447,24 +1576,36 @@ void process_main(void)
goto find_func_info;
}
- OpCase(i_element_jssd): {
- Eterm index;
- Eterm tuple;
-
- /*
- * Inlined version of element/2 for speed.
- */
- GetArg2(1, index, tuple);
- if (is_small(index) && is_tuple(tuple)) {
- Eterm* tp = tuple_val(tuple);
-
- if ((signed_val(index) >= 1) &&
- (signed_val(index) <= arityval(*tp))) {
- Eterm result = tp[signed_val(index)];
- StoreBifResult(3, result);
- }
- }
- }
+ {
+ Eterm element_index;
+ Eterm element_tuple;
+
+ OpCase(i_element_xjsd):
+ element_tuple = xb(Arg(0));
+ I++;
+ goto do_element;
+
+ OpCase(i_element_yjsd):
+ element_tuple = yb(Arg(0));
+ I++;
+ goto do_element;
+
+ OpCase(i_element_rjsd):
+ element_tuple = r(0);
+ /* Fall through */
+
+ do_element:
+ GetArg1(1, element_index);
+ if (is_small(element_index) && is_tuple(element_tuple)) {
+ Eterm* tp = tuple_val(element_tuple);
+
+ if ((signed_val(element_index) >= 1) &&
+ (signed_val(element_index) <= arityval(*tp))) {
+ Eterm result = tp[signed_val(element_index)];
+ StoreBifResult(2, result);
+ }
+ }
+ }
/* Fall through */
OpCase(badarg_j):
@@ -1472,24 +1613,32 @@ void process_main(void)
c_p->freason = BADARG;
goto lb_Cl_error;
- OpCase(i_fast_element_jIsd): {
- Eterm tuple;
-
- /*
- * Inlined version of element/2 for even more speed.
- * The first argument is an untagged integer >= 1.
- * The second argument is guaranteed to be a register operand.
- */
- GetArg1(2, tuple);
- if (is_tuple(tuple)) {
- Eterm* tp = tuple_val(tuple);
- tmp_arg2 = Arg(1);
- if (tmp_arg2 <= arityval(*tp)) {
- Eterm result = tp[tmp_arg2];
- StoreBifResult(3, result);
- }
- }
+ {
+ Eterm fast_element_tuple;
+
+ OpCase(i_fast_element_rjId):
+ fast_element_tuple = r(0);
+
+ do_fast_element:
+ if (is_tuple(fast_element_tuple)) {
+ Eterm* tp = tuple_val(fast_element_tuple);
+ Eterm pos = Arg(1); /* Untagged integer >= 1 */
+ if (pos <= arityval(*tp)) {
+ Eterm result = tp[pos];
+ StoreBifResult(2, result);
+ }
+ }
goto badarg;
+
+ OpCase(i_fast_element_xjId):
+ fast_element_tuple = xb(Arg(0));
+ I++;
+ goto do_fast_element;
+
+ OpCase(i_fast_element_yjId):
+ fast_element_tuple = yb(Arg(0));
+ I++;
+ goto do_fast_element;
}
OpCase(catch_yf):
@@ -1842,8 +1991,87 @@ void process_main(void)
NextPF(0, next);
}
- OpCase(i_select_val_sfI):
- GetArg1(0, tmp_arg1);
+
+ {
+ Eterm select_val2;
+
+ OpCase(i_select_tuple_arity2_yfAfAf):
+ select_val2 = yb(Arg(0));
+ goto do_select_tuple_arity2;
+
+ OpCase(i_select_tuple_arity2_xfAfAf):
+ select_val2 = xb(Arg(0));
+ goto do_select_tuple_arity2;
+
+ OpCase(i_select_tuple_arity2_rfAfAf):
+ select_val2 = r(0);
+ I--;
+
+ do_select_tuple_arity2:
+ if (is_not_tuple(select_val2)) {
+ goto select_val2_fail;
+ }
+ select_val2 = *tuple_val(select_val2);
+ goto do_select_val2;
+
+ OpCase(i_select_val2_yfcfcf):
+ select_val2 = yb(Arg(0));
+ goto do_select_val2;
+
+ OpCase(i_select_val2_xfcfcf):
+ select_val2 = xb(Arg(0));
+ goto do_select_val2;
+
+ OpCase(i_select_val2_rfcfcf):
+ select_val2 = r(0);
+ I--;
+
+ do_select_val2:
+ if (select_val2 == Arg(2)) {
+ I += 2;
+ } else if (select_val2 == Arg(4)) {
+ I += 4;
+ }
+
+ select_val2_fail:
+ SET_I((BeamInstr *) Arg(1));
+ Goto(*I);
+ }
+
+ {
+ Eterm select_val;
+
+ OpCase(i_select_tuple_arity_xfI):
+ select_val = xb(Arg(0));
+ goto do_select_tuple_arity;
+
+ OpCase(i_select_tuple_arity_yfI):
+ select_val = yb(Arg(0));
+ goto do_select_tuple_arity;
+
+ OpCase(i_select_tuple_arity_rfI):
+ select_val = r(0);
+ I--;
+
+ do_select_tuple_arity:
+ if (is_tuple(select_val)) {
+ select_val = *tuple_val(select_val);
+ goto do_binary_search;
+ }
+ SET_I((BeamInstr *) Arg(1));
+ Goto(*I);
+
+ OpCase(i_select_val_xfI):
+ select_val = xb(Arg(0));
+ goto do_binary_search;
+
+ OpCase(i_select_val_yfI):
+ select_val = yb(Arg(0));
+ goto do_binary_search;
+
+ OpCase(i_select_val_rfI):
+ select_val = r(0);
+ I--;
do_binary_search:
{
@@ -1880,9 +2108,9 @@ void process_main(void)
unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Pairs)-1);
mid = (struct Pairs*)((char*)low + boffset);
- if (tmp_arg1 < mid->val) {
+ if (select_val < mid->val) {
high = mid;
- } else if (tmp_arg1 > mid->val) {
+ } else if (select_val > mid->val) {
low = mid + 1;
} else {
SET_I(mid->addr);
@@ -1892,16 +2120,28 @@ void process_main(void)
SET_I((BeamInstr *) Arg(1));
Goto(*I);
}
+ }
- OpCase(i_jump_on_val_zero_sfI):
{
- Eterm index;
-
- GetArg1(0, index);
- if (is_small(index)) {
- index = signed_val(index);
- if (index < Arg(2)) {
- SET_I((BeamInstr *) (&Arg(3))[index]);
+ Eterm jump_on_val_zero_index;
+
+ OpCase(i_jump_on_val_zero_yfI):
+ jump_on_val_zero_index = yb(Arg(0));
+ goto do_jump_on_val_zero_index;
+
+ OpCase(i_jump_on_val_zero_xfI):
+ jump_on_val_zero_index = xb(Arg(0));
+ goto do_jump_on_val_zero_index;
+
+ OpCase(i_jump_on_val_zero_rfI):
+ jump_on_val_zero_index = r(0);
+ I--;
+
+ do_jump_on_val_zero_index:
+ if (is_small(jump_on_val_zero_index)) {
+ jump_on_val_zero_index = signed_val(jump_on_val_zero_index);
+ if (jump_on_val_zero_index < Arg(2)) {
+ SET_I((BeamInstr *) (&Arg(3))[jump_on_val_zero_index]);
Goto(*I);
}
}
@@ -1909,15 +2149,27 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_jump_on_val_sfII):
{
- Eterm index;
+ Eterm jump_on_val_index;
- GetArg1(0, index);
- if (is_small(index)) {
- index = (Uint) (signed_val(index) - Arg(3));
- if (index < Arg(2)) {
- SET_I((BeamInstr *) (&Arg(4))[index]);
+
+ OpCase(i_jump_on_val_yfII):
+ jump_on_val_index = yb(Arg(0));
+ goto do_jump_on_val_index;
+
+ OpCase(i_jump_on_val_xfII):
+ jump_on_val_index = xb(Arg(0));
+ goto do_jump_on_val_index;
+
+ OpCase(i_jump_on_val_rfII):
+ jump_on_val_index = r(0);
+ I--;
+
+ do_jump_on_val_index:
+ if (is_small(jump_on_val_index)) {
+ jump_on_val_index = (Uint) (signed_val(jump_on_val_index) - Arg(3));
+ if (jump_on_val_index < Arg(2)) {
+ SET_I((BeamInstr *) (&Arg(4))[jump_on_val_index]);
Goto(*I);
}
}
@@ -1925,6 +2177,32 @@ void process_main(void)
Goto(*I);
}
+ do_put_tuple: {
+ Eterm* hp = HTOP;
+
+ *hp++ = make_arityval(pt_arity);
+
+ do {
+ Eterm term = *I++;
+ switch (term & _TAG_IMMED1_MASK) {
+ case (R_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
+ *hp++ = r(0);
+ break;
+ case (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
+ *hp++ = x(term >> _TAG_IMMED1_SIZE);
+ break;
+ case (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER:
+ *hp++ = y(term >> _TAG_IMMED1_SIZE);
+ break;
+ default:
+ *hp++ = term;
+ break;
+ }
+ } while (--pt_arity != 0);
+ HTOP = hp;
+ Goto(*I);
+ }
+
/*
* All guards with zero arguments have special instructions:
* self/0
@@ -2562,23 +2840,25 @@ void process_main(void)
OpCase(i_int_bnot_jsId):
{
- GetArg1(1, tmp_arg1);
- if (is_small(tmp_arg1)) {
- tmp_arg1 = make_small(~signed_val(tmp_arg1));
+ Eterm bnot_val;
+
+ GetArg1(1, bnot_val);
+ if (is_small(bnot_val)) {
+ bnot_val = make_small(~signed_val(bnot_val));
} else {
Uint live = Arg(2);
SWAPOUT;
reg[0] = r(0);
- reg[live] = tmp_arg1;
- tmp_arg1 = erts_gc_bnot(c_p, reg, live);
+ reg[live] = bnot_val;
+ bnot_val = erts_gc_bnot(c_p, reg, live);
r(0) = reg[0];
SWAPIN;
ERTS_HOLE_CHECK(c_p);
- if (is_nil(tmp_arg1)) {
+ if (is_nil(bnot_val)) {
goto lb_Cl_error;
}
}
- StoreBifResult(3, tmp_arg1);
+ StoreBifResult(3, bnot_val);
}
badarith:
@@ -2833,121 +3113,6 @@ void process_main(void)
goto do_schedule1;
}
- OpCase(i_select_tuple_arity_sfI):
- {
- GetArg1(0, tmp_arg1);
-
- if (is_tuple(tmp_arg1)) {
- tmp_arg1 = *tuple_val(tmp_arg1);
- goto do_binary_search;
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
- OpCase(i_select_big_sf):
- {
- Eterm* bigp;
- Uint arity;
- Eterm* given;
- Uint given_arity;
- Uint given_size;
-
- GetArg1(0, tmp_arg1);
- if (is_big(tmp_arg1)) {
-
- /*
- * The loader has sorted the bignumbers in descending order
- * on the arity word. Therefore, we know that the search
- * has failed as soon as we encounter an arity word less than
- * the arity word of the given number. There is a zero word
- * (less than any valid arity word) stored after the last bignumber.
- */
-
- given = big_val(tmp_arg1);
- given_arity = given[0];
- given_size = thing_arityval(given_arity);
- bigp = (Eterm *) &Arg(2);
- while ((arity = bigp[0]) > given_arity) {
- bigp += (TermWords(thing_arityval(arity) + 1) + 1) * (sizeof(BeamInstr)/sizeof(Eterm));
- }
- while (bigp[0] == given_arity) {
- if (memcmp(bigp+1, given+1, sizeof(Eterm)*given_size) == 0) {
- BeamInstr *tmp =
- ((BeamInstr *) (UWord) bigp) + TermWords(given_size + 1);
- SET_I((BeamInstr *) *tmp);
- Goto(*I);
- }
- bigp += (TermWords(thing_arityval(arity) + 1) + 1) * (sizeof(BeamInstr)/sizeof(Eterm));
- }
- }
-
- /*
- * Failed.
- */
-
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-
-#if defined(ARCH_64) && !HALFWORD_HEAP
- OpCase(i_select_float_sfI):
- {
- Uint f;
- int n;
- struct ValLabel {
- Uint f;
- BeamInstr* addr;
- };
- struct ValLabel* ptr;
-
- GetArg1(0, tmp_arg1);
- ASSERT(is_float(tmp_arg1));
- f = float_val(tmp_arg1)[1];
- n = Arg(2);
- ptr = (struct ValLabel *) &Arg(3);
- while (n-- > 0) {
- if (ptr->f == f) {
- SET_I(ptr->addr);
- Goto(*I);
- }
- ptr++;
- }
- SET_I((Eterm *) Arg(1));
- Goto(*I);
- }
-#else
- OpCase(i_select_float_sfI):
- {
- Uint fpart1;
- Uint fpart2;
- int n;
- struct ValLabel {
- Uint fpart1;
- Uint fpart2;
- BeamInstr* addr;
- };
- struct ValLabel* ptr;
-
- GetArg1(0, tmp_arg1);
- ASSERT(is_float(tmp_arg1));
- fpart1 = float_val(tmp_arg1)[1];
- fpart2 = float_val(tmp_arg1)[2];
-
- n = Arg(2);
- ptr = (struct ValLabel *) &Arg(3);
- while (n-- > 0) {
- if (ptr->fpart1 == fpart1 && ptr->fpart2 == fpart2) {
- SET_I(ptr->addr);
- Goto(*I);
- }
- ptr++;
- }
- SET_I((BeamInstr *) Arg(1));
- Goto(*I);
- }
-#endif
-
OpCase(set_tuple_element_sdP): {
Eterm element;
Eterm tuple;
@@ -2993,15 +3158,17 @@ void process_main(void)
the first argument. We also handle atom tags in the first
argument for backwards compatibility.
*/
- GetArg2(0, tmp_arg1, tmp_arg2);
- c_p->fvalue = tmp_arg2;
+ Eterm raise_val1;
+ Eterm raise_val2;
+ GetArg2(0, raise_val1, raise_val2);
+ c_p->fvalue = raise_val2;
if (c_p->freason == EXC_NULL) {
/* a safety check for the R10-0 case; should not happen */
c_p->ftrace = NIL;
c_p->freason = EXC_ERROR;
}
/* for R10-0 code, keep existing c_p->ftrace and hope it's correct */
- switch (tmp_arg1) {
+ switch (raise_val1) {
case am_throw:
c_p->freason = EXC_THROWN & ~EXF_SAVETRACE;
break;
@@ -3017,8 +3184,8 @@ void process_main(void)
passed from a user! Currently only expecting generated calls.
*/
struct StackTrace *s;
- c_p->ftrace = tmp_arg1;
- s = get_trace_from_exc(tmp_arg1);
+ c_p->ftrace = raise_val1;
+ s = get_trace_from_exc(raise_val1);
if (s == NULL) {
c_p->freason = EXC_ERROR;
} else {
@@ -3029,11 +3196,24 @@ void process_main(void)
goto find_func_info;
}
- OpCase(badmatch_s): {
- GetArg1(0, tmp_arg1);
- c_p->fvalue = tmp_arg1;
- c_p->freason = BADMATCH;
- }
+ {
+ Eterm badmatch_val;
+
+ OpCase(badmatch_y):
+ badmatch_val = yb(Arg(0));
+ goto do_badmatch;
+
+ OpCase(badmatch_x):
+ badmatch_val = xb(Arg(0));
+ goto do_badmatch;
+
+ OpCase(badmatch_r):
+ badmatch_val = r(0);
+
+ do_badmatch:
+ c_p->fvalue = badmatch_val;
+ c_p->freason = BADMATCH;
+ }
/* Fall through here */
find_func_info: {
@@ -3056,12 +3236,11 @@ void process_main(void)
*/
SWAPOUT;
reg[0] = r(0);
- tmp_arg1 = call_error_handler(c_p, I-3, reg);
+ I = call_error_handler(c_p, I-3, reg, am_undefined_function);
r(0) = reg[0];
SWAPIN;
- if (tmp_arg1) {
- SET_I(c_p->i);
- Dispatch();
+ if (I) {
+ Goto(*I);
}
/* Fall through */
@@ -3084,128 +3263,142 @@ void process_main(void)
}
}
- OpCase(call_nif):
- {
- /*
- * call_nif is always first instruction in function:
- *
- * I[-3]: Module
- * I[-2]: Function
- * I[-1]: Arity
- * I[0]: &&call_nif
- * I[1]: Function pointer to NIF function
- * I[2]: Pointer to erl_module_nif
- */
- BifFunction vbf;
-
- c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- c_p->fcalls = FCALLS - 1;
- PROCESS_MAIN_CHK_LOCKS(c_p);
- tmp_arg2 = I[-1];
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ {
+ Eterm nif_bif_result;
+ Eterm bif_nif_arity;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- {
- typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
- NifF* fp = vbf = (NifF*) I[1];
- struct enif_environment_t env;
- erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2]);
- reg[0] = r(0);
- tmp_arg1 = (*fp)(&env, tmp_arg2, reg);
- erts_post_nif(&env);
- }
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
- PROCESS_MAIN_CHK_LOCKS(c_p);
- goto apply_bif_or_nif_epilogue;
-
- OpCase(apply_bif):
- /*
- * At this point, I points to the code[3] in the export entry for
- * the BIF:
- *
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&apply_bif
- * code[4]: Function pointer to BIF function
- */
+ OpCase(call_nif):
+ {
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ */
+ BifFunction vbf;
- c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
- c_p->i = I; /* In case we apply check_process_code/2. */
- c_p->arity = 0; /* To allow garbage collection on ourselves
- * (check_process_code/2).
- */
- SWAPOUT;
- c_p->fcalls = FCALLS - 1;
- vbf = (BifFunction) Arg(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- tmp_arg2 = I[-1];
- ASSERT(tmp_arg2 <= 3);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- switch (tmp_arg2) {
- case 3:
+ c_p->current = I-3; /* current and vbf set to please handle_error */
+ SWAPOUT;
+ c_p->fcalls = FCALLS - 1;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ bif_nif_arity = I[-1];
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
{
- Eterm (*bf)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- tmp_arg1 = (*bf)(c_p, r(0), x(1), x(2), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
- PROCESS_MAIN_CHK_LOCKS(c_p);
+ typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
+ NifF* fp = vbf = (NifF*) I[1];
+ struct enif_environment_t env;
+ erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2]);
+ reg[0] = r(0);
+ nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
+ erts_post_nif(&env);
}
- break;
- case 2:
- {
- Eterm (*bf)(Process*, Eterm, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- tmp_arg1 = (*bf)(c_p, r(0), x(1), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
- PROCESS_MAIN_CHK_LOCKS(c_p);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ goto apply_bif_or_nif_epilogue;
+
+ OpCase(apply_bif):
+ /*
+ * At this point, I points to the code[3] in the export entry for
+ * the BIF:
+ *
+ * code[0]: Module
+ * code[1]: Function
+ * code[2]: Arity
+ * code[3]: &&apply_bif
+ * code[4]: Function pointer to BIF function
+ */
+
+ c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
+ c_p->i = I; /* In case we apply check_process_code/2. */
+ c_p->arity = 0; /* To allow garbage collection on ourselves
+ * (check_process_code/2).
+ */
+ SWAPOUT;
+ c_p->fcalls = FCALLS - 1;
+ vbf = (BifFunction) Arg(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ bif_nif_arity = I[-1];
+ ASSERT(bif_nif_arity <= 3);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ switch (bif_nif_arity) {
+ case 3:
+ {
+ Eterm (*bf)(Process*, Eterm, Eterm, Eterm, BeamInstr*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ nif_bif_result = (*bf)(c_p, r(0), x(1), x(2), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
+ is_non_value(nif_bif_result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ }
+ break;
+ case 2:
+ {
+ Eterm (*bf)(Process*, Eterm, Eterm, BeamInstr*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ nif_bif_result = (*bf)(c_p, r(0), x(1), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
+ is_non_value(nif_bif_result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ }
+ break;
+ case 1:
+ {
+ Eterm (*bf)(Process*, Eterm, BeamInstr*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ nif_bif_result = (*bf)(c_p, r(0), I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
+ is_non_value(nif_bif_result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ }
+ break;
+ case 0:
+ {
+ Eterm (*bf)(Process*, BeamInstr*) = vbf;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ nif_bif_result = (*bf)(c_p, I);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p) ||
+ is_non_value(nif_bif_result));
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ break;
+ }
+ default:
+ erl_exit(1, "apply_bif: invalid arity: %u\n",
+ bif_nif_arity);
}
- break;
- case 1:
- {
- Eterm (*bf)(Process*, Eterm, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- tmp_arg1 = (*bf)(c_p, r(0), I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
- PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ apply_bif_or_nif_epilogue:
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ if (c_p->mbuf) {
+ reg[0] = r(0);
+ nif_bif_result = erts_gc_after_bif_call(c_p, nif_bif_result,
+ reg, bif_nif_arity);
+ r(0) = reg[0];
}
- break;
- case 0:
- {
- Eterm (*bf)(Process*, BeamInstr*) = vbf;
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- tmp_arg1 = (*bf)(c_p, I);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(tmp_arg1));
- PROCESS_MAIN_CHK_LOCKS(c_p);
- break;
+ SWAPIN; /* There might have been a garbage collection. */
+ FCALLS = c_p->fcalls;
+ if (is_value(nif_bif_result)) {
+ r(0) = nif_bif_result;
+ CHECK_TERM(r(0));
+ SET_I(c_p->cp);
+ Goto(*I);
+ } else if (c_p->freason == TRAP) {
+ SET_I(*((BeamInstr **) (UWord) ((c_p)->def_arg_reg + 3)));
+ r(0) = c_p->def_arg_reg[0];
+ x(1) = c_p->def_arg_reg[1];
+ x(2) = c_p->def_arg_reg[2];
+ Dispatch();
}
- }
-apply_bif_or_nif_epilogue:
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- ERTS_HOLE_CHECK(c_p);
- if (c_p->mbuf) {
reg[0] = r(0);
- tmp_arg1 = erts_gc_after_bif_call(c_p, tmp_arg1, reg, tmp_arg2);
- r(0) = reg[0];
- }
- SWAPIN; /* There might have been a garbage collection. */
- FCALLS = c_p->fcalls;
- if (is_value(tmp_arg1)) {
- r(0) = tmp_arg1;
- CHECK_TERM(r(0));
- SET_I(c_p->cp);
- Goto(*I);
- } else if (c_p->freason == TRAP) {
- SET_I(*((BeamInstr **) (UWord) ((c_p)->def_arg_reg + 3)));
- r(0) = c_p->def_arg_reg[0];
- x(1) = c_p->def_arg_reg[1];
- x(2) = c_p->def_arg_reg[2];
- Dispatch();
+ I = handle_error(c_p, c_p->cp, reg, vbf);
+ goto post_error_handling;
}
- reg[0] = r(0);
- I = handle_error(c_p, c_p->cp, reg, vbf);
- goto post_error_handling;
}
OpCase(i_get_sd):
@@ -3218,11 +3411,26 @@ apply_bif_or_nif_epilogue:
StoreBifResult(1, result);
}
- OpCase(case_end_s):
- GetArg1(0, tmp_arg1);
- c_p->fvalue = tmp_arg1;
- c_p->freason = EXC_CASE_CLAUSE;
- goto find_func_info;
+ {
+ Eterm case_end_val;
+
+ OpCase(case_end_x):
+ case_end_val = xb(Arg(0));
+ goto do_case_end;
+
+ OpCase(case_end_y):
+ case_end_val = yb(Arg(0));
+ goto do_case_end;
+
+ OpCase(case_end_r):
+ case_end_val = r(0);
+ I--;
+
+ do_case_end:
+ c_p->fvalue = case_end_val;
+ c_p->freason = EXC_CASE_CLAUSE;
+ goto find_func_info;
+ }
OpCase(if_end):
c_p->freason = EXC_IF_CLAUSE;
@@ -3235,10 +3443,13 @@ apply_bif_or_nif_epilogue:
}
OpCase(try_case_end_s):
- GetArg1(0, tmp_arg1);
- c_p->fvalue = tmp_arg1;
- c_p->freason = EXC_TRY_CLAUSE;
- goto find_func_info;
+ {
+ Eterm try_case_end_val;
+ GetArg1(0, try_case_end_val);
+ c_p->fvalue = try_case_end_val;
+ c_p->freason = EXC_TRY_CLAUSE;
+ goto find_func_info;
+ }
/*
* Construction of binaries using new instructions.
@@ -3786,19 +3997,20 @@ apply_bif_or_nif_epilogue:
Eterm header;
BeamInstr *next;
Uint slots;
+ Eterm context;
OpCase(i_bs_start_match2_rfIId): {
- tmp_arg1 = r(0);
+ context = r(0);
do_start_match:
slots = Arg(2);
- if (!is_boxed(tmp_arg1)) {
+ if (!is_boxed(context)) {
ClauseFail();
}
PreFetch(4, next);
- header = *boxed_val(tmp_arg1);
+ header = *boxed_val(context);
if (header_is_bin_matchstate(header)) {
- ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
+ ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context);
Uint actual_slots = HEADER_NUM_SLOTS(header);
ms->save_offset[0] = ms->mb.offset;
if (actual_slots < slots) {
@@ -3806,8 +4018,8 @@ apply_bif_or_nif_epilogue:
Uint live = Arg(1);
Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
- TestHeapPreserve(wordsneeded, live, tmp_arg1);
- ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
+ TestHeapPreserve(wordsneeded, live, context);
+ ms = (ErlBinMatchState *) boxed_val(context);
dst = (ErlBinMatchState *) HTOP;
*dst = *ms;
*HTOP = HEADER_BIN_MATCHSTATE(slots);
@@ -3819,12 +4031,12 @@ apply_bif_or_nif_epilogue:
Eterm result;
Uint live = Arg(1);
Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots);
- TestHeapPreserve(wordsneeded, live, tmp_arg1);
+ TestHeapPreserve(wordsneeded, live, context);
HEAP_TOP(c_p) = HTOP;
#ifdef DEBUG
c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */
#endif
- result = erts_bs_start_match_2(c_p, tmp_arg1, slots);
+ result = erts_bs_start_match_2(c_p, context, slots);
HTOP = HEAP_TOP(c_p);
HEAP_SPACE_VERIFIED(0);
if (is_non_value(result)) {
@@ -3838,12 +4050,12 @@ apply_bif_or_nif_epilogue:
NextPF(4, next);
}
OpCase(i_bs_start_match2_xfIId): {
- tmp_arg1 = xb(Arg(0));
+ context = xb(Arg(0));
I++;
goto do_start_match;
}
OpCase(i_bs_start_match2_yfIId): {
- tmp_arg1 = yb(Arg(0));
+ context = yb(Arg(0));
I++;
goto do_start_match;
}
@@ -3936,93 +4148,105 @@ apply_bif_or_nif_epilogue:
NextPF(2, next);
}
+ {
+ Eterm bs_get_integer8_context;
+
OpCase(i_bs_get_integer_8_rfd): {
- tmp_arg1 = r(0);
- goto do_bs_get_integer_8;
- }
+ bs_get_integer8_context = r(0);
+ goto do_bs_get_integer_8;
+ }
OpCase(i_bs_get_integer_8_xfd): {
- tmp_arg1 = xb(Arg(0));
- I++;
- }
+ bs_get_integer8_context = xb(Arg(0));
+ I++;
+ }
do_bs_get_integer_8: {
- ErlBinMatchBuffer *_mb;
- Eterm _result;
- _mb = ms_matchbuffer(tmp_arg1);
- if (_mb->size - _mb->offset < 8) {
- ClauseFail();
- }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
- } else {
- _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
- _mb->offset += 8;
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ _mb = ms_matchbuffer(bs_get_integer8_context);
+ if (_mb->size - _mb->offset < 8) {
+ ClauseFail();
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _result = erts_bs_get_integer_2(c_p, 8, 0, _mb);
+ } else {
+ _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]);
+ _mb->offset += 8;
+ }
+ StoreBifResult(1, _result);
}
- StoreBifResult(1, _result);
}
- OpCase(i_bs_get_integer_16_rfd): {
- tmp_arg1 = r(0);
+ {
+ Eterm bs_get_integer_16_context;
+
+ OpCase(i_bs_get_integer_16_rfd):
+ bs_get_integer_16_context = r(0);
goto do_bs_get_integer_16;
- }
- OpCase(i_bs_get_integer_16_xfd): {
- tmp_arg1 = xb(Arg(0));
+ OpCase(i_bs_get_integer_16_xfd):
+ bs_get_integer_16_context = xb(Arg(0));
I++;
- }
- do_bs_get_integer_16: {
- ErlBinMatchBuffer *_mb;
- Eterm _result;
- _mb = ms_matchbuffer(tmp_arg1);
- if (_mb->size - _mb->offset < 16) {
- ClauseFail();
- }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
- } else {
- _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
- _mb->offset += 16;
+ do_bs_get_integer_16:
+ {
+ ErlBinMatchBuffer *_mb;
+ Eterm _result;
+ _mb = ms_matchbuffer(bs_get_integer_16_context);
+ if (_mb->size - _mb->offset < 16) {
+ ClauseFail();
+ }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _result = erts_bs_get_integer_2(c_p, 16, 0, _mb);
+ } else {
+ _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset)));
+ _mb->offset += 16;
+ }
+ StoreBifResult(1, _result);
}
- StoreBifResult(1, _result);
}
- OpCase(i_bs_get_integer_32_rfId): {
- tmp_arg1 = r(0);
+ {
+ Eterm bs_get_integer_32_context;
+
+ OpCase(i_bs_get_integer_32_rfId):
+ bs_get_integer_32_context = r(0);
goto do_bs_get_integer_32;
- }
+
- OpCase(i_bs_get_integer_32_xfId): {
- tmp_arg1 = xb(Arg(0));
+ OpCase(i_bs_get_integer_32_xfId):
+ bs_get_integer_32_context = xb(Arg(0));
I++;
- }
- do_bs_get_integer_32: {
- ErlBinMatchBuffer *_mb;
- Uint32 _integer;
- Eterm _result;
- _mb = ms_matchbuffer(tmp_arg1);
- if (_mb->size - _mb->offset < 32) { ClauseFail(); }
- if (BIT_OFFSET(_mb->offset) != 0) {
- _integer = erts_bs_get_unaligned_uint32(_mb);
- } else {
- _integer = get_int32(_mb->base + _mb->offset/8);
- }
- _mb->offset += 32;
+
+ do_bs_get_integer_32:
+ {
+ ErlBinMatchBuffer *_mb;
+ Uint32 _integer;
+ Eterm _result;
+ _mb = ms_matchbuffer(bs_get_integer_32_context);
+ if (_mb->size - _mb->offset < 32) { ClauseFail(); }
+ if (BIT_OFFSET(_mb->offset) != 0) {
+ _integer = erts_bs_get_unaligned_uint32(_mb);
+ } else {
+ _integer = get_int32(_mb->base + _mb->offset/8);
+ }
+ _mb->offset += 32;
#if !defined(ARCH_64) || HALFWORD_HEAP
- if (IS_USMALL(0, _integer)) {
+ if (IS_USMALL(0, _integer)) {
#endif
- _result = make_small(_integer);
+ _result = make_small(_integer);
#if !defined(ARCH_64) || HALFWORD_HEAP
- } else {
- TestHeap(BIG_UINT_HEAP_SIZE, Arg(1));
- _result = uint_to_big((Uint) _integer, HTOP);
- HTOP += BIG_UINT_HEAP_SIZE;
- HEAP_SPACE_VERIFIED(0);
- }
+ } else {
+ TestHeap(BIG_UINT_HEAP_SIZE, Arg(1));
+ _result = uint_to_big((Uint) _integer, HTOP);
+ HTOP += BIG_UINT_HEAP_SIZE;
+ HEAP_SPACE_VERIFIED(0);
+ }
#endif
- StoreBifResult(2, _result);
+ StoreBifResult(2, _result);
+ }
}
/* Operands: Size Live Fail Flags Dst */
@@ -4120,54 +4344,64 @@ apply_bif_or_nif_epilogue:
StoreBifResult(3, result);
}
- /* Operands: MatchContext Fail Dst */
+ {
+ Eterm get_utf8_context;
+
+ /* Operands: MatchContext Fail Dst */
OpCase(i_bs_get_utf8_rfd): {
- tmp_arg1 = r(0);
- goto do_bs_get_utf8;
- }
+ get_utf8_context = r(0);
+ goto do_bs_get_utf8;
+ }
OpCase(i_bs_get_utf8_xfd): {
- tmp_arg1 = xb(Arg(0));
- I++;
- }
+ get_utf8_context = xb(Arg(0));
+ I++;
+ }
- /*
- * tmp_arg1 = match_context
- * Operands: Fail Dst
- */
+ /*
+ * get_utf8_context = match_context
+ * Operands: Fail Dst
+ */
- do_bs_get_utf8: {
- Eterm result = erts_bs_get_utf8(ms_matchbuffer(tmp_arg1));
- if (is_non_value(result)) {
- ClauseFail();
+ do_bs_get_utf8: {
+ Eterm result = erts_bs_get_utf8(ms_matchbuffer(get_utf8_context));
+ if (is_non_value(result)) {
+ ClauseFail();
+ }
+ StoreBifResult(1, result);
}
- StoreBifResult(1, result);
}
- /* Operands: MatchContext Fail Flags Dst */
+ {
+ Eterm get_utf16_context;
+
+ /* Operands: MatchContext Fail Flags Dst */
OpCase(i_bs_get_utf16_rfId): {
- tmp_arg1 = r(0);
- goto do_bs_get_utf16;
- }
+ get_utf16_context = r(0);
+ goto do_bs_get_utf16;
+ }
OpCase(i_bs_get_utf16_xfId): {
- tmp_arg1 = xb(Arg(0));
- I++;
- }
+ get_utf16_context = xb(Arg(0));
+ I++;
+ }
- /*
- * tmp_arg1 = match_context
- * Operands: Fail Flags Dst
- */
- do_bs_get_utf16: {
- Eterm result = erts_bs_get_utf16(ms_matchbuffer(tmp_arg1), Arg(1));
- if (is_non_value(result)) {
- ClauseFail();
+ /*
+ * get_utf16_context = match_context
+ * Operands: Fail Flags Dst
+ */
+ do_bs_get_utf16: {
+ Eterm result = erts_bs_get_utf16(ms_matchbuffer(get_utf16_context),
+ Arg(1));
+ if (is_non_value(result)) {
+ ClauseFail();
+ }
+ StoreBifResult(2, result);
}
- StoreBifResult(2, result);
}
{
+ Eterm context_to_binary_context;
ErlBinMatchBuffer* mb;
ErlSubBin* sb;
Uint size;
@@ -4176,27 +4410,29 @@ apply_bif_or_nif_epilogue:
Uint hole_size;
OpCase(bs_context_to_binary_r): {
- tmp_arg1 = x0;
+ context_to_binary_context = x0;
I -= 2;
goto do_context_to_binary;
}
/* Unfortunately, inlining can generate this instruction. */
OpCase(bs_context_to_binary_y): {
- tmp_arg1 = yb(Arg(0));
+ context_to_binary_context = yb(Arg(0));
goto do_context_to_binary0;
}
OpCase(bs_context_to_binary_x): {
- tmp_arg1 = xb(Arg(0));
+ context_to_binary_context = xb(Arg(0));
do_context_to_binary0:
I--;
}
do_context_to_binary:
- if (is_boxed(tmp_arg1) && header_is_bin_matchstate(*boxed_val(tmp_arg1))) {
- ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(tmp_arg1);
+ if (is_boxed(context_to_binary_context) &&
+ header_is_bin_matchstate(*boxed_val(context_to_binary_context))) {
+ ErlBinMatchState* ms;
+ ms = (ErlBinMatchState *) boxed_val(context_to_binary_context);
mb = &ms->mb;
offs = ms->save_offset[0];
size = mb->size - offs;
@@ -4205,17 +4441,17 @@ apply_bif_or_nif_epilogue:
Next(2);
OpCase(i_bs_get_binary_all_reuse_rfI): {
- tmp_arg1 = x0;
+ context_to_binary_context = x0;
goto do_bs_get_binary_all_reuse;
}
OpCase(i_bs_get_binary_all_reuse_xfI): {
- tmp_arg1 = xb(Arg(0));
+ context_to_binary_context = xb(Arg(0));
I++;
}
do_bs_get_binary_all_reuse:
- mb = ms_matchbuffer(tmp_arg1);
+ mb = ms_matchbuffer(context_to_binary_context);
size = mb->size - mb->offset;
if (size % Arg(1) != 0) {
ClauseFail();
@@ -4224,7 +4460,7 @@ apply_bif_or_nif_epilogue:
do_bs_get_binary_all_reuse_common:
orig = mb->orig;
- sb = (ErlSubBin *) boxed_val(tmp_arg1);
+ sb = (ErlSubBin *) boxed_val(context_to_binary_context);
hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
sb->thing_word = HEADER_SUB_BIN;
sb->size = BYTE_OFFSET(size);
@@ -4240,12 +4476,14 @@ apply_bif_or_nif_epilogue:
}
{
+ Eterm match_string_context;
+
OpCase(i_bs_match_string_rfII): {
- tmp_arg1 = r(0);
+ match_string_context = r(0);
goto do_bs_match_string;
}
OpCase(i_bs_match_string_xfII): {
- tmp_arg1 = xb(Arg(0));
+ match_string_context = xb(Arg(0));
I++;
}
@@ -4260,7 +4498,7 @@ apply_bif_or_nif_epilogue:
PreFetch(3, next);
bits = Arg(1);
bytes = (byte *) Arg(2);
- mb = ms_matchbuffer(tmp_arg1);
+ mb = ms_matchbuffer(match_string_context);
if (mb->size - mb->offset < bits) {
ClauseFail();
}
@@ -4723,7 +4961,7 @@ apply_bif_or_nif_epilogue:
NextPF(2, next);
}
- OpCase(fmove_new_ld): {
+ OpCase(fmove_ld): {
Eterm fr = Arg(0);
Eterm dest = make_float(HTOP);
@@ -4753,11 +4991,6 @@ apply_bif_or_nif_epilogue:
NextPF(2, next);
}
- /*
- * Old allocating fmove.
- */
-
-
#ifdef NO_FPE_SIGNALS
OpCase(fclearerror):
OpCase(i_fcheckerror):
@@ -4969,12 +5202,11 @@ apply_bif_or_nif_epilogue:
OpCase(i_debug_breakpoint): {
SWAPOUT;
reg[0] = r(0);
- tmp_arg1 = call_breakpoint_handler(c_p, I-3, reg);
+ I = call_error_handler(c_p, I-3, reg, am_breakpoint);
r(0) = reg[0];
SWAPIN;
- if (tmp_arg1) {
- SET_I(c_p->i);
- Dispatch();
+ if (I) {
+ Goto(*I);
}
goto no_error_handler;
}
@@ -5724,8 +5956,8 @@ build_stacktrace(Process* c_p, Eterm exc) {
}
-static Eterm
-call_error_handler(Process* p, BeamInstr* fi, Eterm* reg)
+static BeamInstr*
+call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
{
Eterm* hp;
Export* ep;
@@ -5737,14 +5969,12 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg)
/*
* Search for the error_handler module.
*/
- ep = erts_find_function(erts_proc_get_error_handler(p),
- am_undefined_function, 3);
+ ep = erts_find_function(erts_proc_get_error_handler(p), func, 3);
if (ep == NULL) { /* No error handler */
p->current = fi;
p->freason = EXC_UNDEF;
return 0;
}
- p->i = ep->address;
/*
* Create a list with all arguments in the x registers.
@@ -5764,63 +5994,14 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg)
}
/*
- * Set up registers for call to error_handler:undefined_function/3.
+ * Set up registers for call to error_handler:<func>/3.
*/
reg[0] = fi[0];
reg[1] = fi[1];
reg[2] = args;
- return 1;
-}
-
-static Eterm
-call_breakpoint_handler(Process* p, BeamInstr* fi, Eterm* reg)
-{
- Eterm* hp;
- Export* ep;
- int arity;
- Eterm args;
- Uint sz;
- int i;
-
- /*
- * Search for error handler module.
- */
- ep = erts_find_function(erts_proc_get_error_handler(p),
- am_breakpoint, 3);
- if (ep == NULL) { /* No error handler */
- p->current = fi;
- p->freason = EXC_UNDEF;
- return 0;
- }
- p->i = ep->address;
-
- /*
- * Create a list with all arguments in the x registers.
- */
-
- arity = fi[2];
- sz = 2 * arity;
- if (HeapWordsLeft(p) < sz) {
- erts_garbage_collect(p, sz, reg, arity);
- }
- hp = HEAP_TOP(p);
- HEAP_TOP(p) += sz;
- args = NIL;
- for (i = arity-1; i >= 0; i--) {
- args = CONS(hp, reg[i], args);
- hp += 2;
- }
-
- /*
- * Set up registers for call to error_handler:breakpoint/3.
- */
- reg[0] = fi[0];
- reg[1] = fi[1];
- reg[2] = args;
- return 1;
+ return ep->address;
}
-
static Export*
apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, Eterm* reg)
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index df5602b040..e6448931eb 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -89,13 +89,12 @@ typedef struct {
} Label;
/*
- * Type for a operand for a generic instruction.
+ * Type for an operand for a generic instruction.
*/
typedef struct {
unsigned type; /* Type of operand. */
- BeamInstr val; /* Value of operand. */
- Uint bigarity; /* Arity for bignumbers (only). */
+ BeamInstr val; /* Value of operand. */
} GenOpArg;
/*
@@ -326,11 +325,6 @@ typedef struct {
Literal* literals; /* Array of literals. */
LiteralPatch* literal_patches; /* Operands that need to be patched. */
Uint total_literal_size; /* Total heap size for all literals. */
-
- /*
- * Floating point.
- */
- int new_float_instructions; /* New allocation scheme for floating point. */
} LoaderState;
typedef struct {
@@ -476,12 +470,14 @@ static int read_code_header(LoaderState* stp);
static int load_code(LoaderState* stp);
static GenOp* gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
GenOpArg Tuple, GenOpArg Dst);
-static GenOp* gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+static GenOp* gen_split_values(LoaderState* stp, GenOpArg S,
+ GenOpArg TypeFail, GenOpArg Fail,
GenOpArg Size, GenOpArg* Rest);
static GenOp* gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
GenOpArg Size, GenOpArg* Rest);
-static GenOp* gen_select_big(LoaderState* stp, GenOpArg S, GenOpArg Fail,
- GenOpArg Size, GenOpArg* Rest);
+static GenOp* gen_select_literals(LoaderState* stp, GenOpArg S,
+ GenOpArg Fail, GenOpArg Size,
+ GenOpArg* Rest);
static GenOp* const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
GenOpArg Size, GenOpArg* Rest);
static GenOp* gen_func_info(LoaderState* stp, GenOpArg mod, GenOpArg Func,
@@ -818,7 +814,6 @@ init_state(LoaderState* stp)
stp->total_literal_size = 0;
stp->literal_patches = 0;
stp->string_patches = 0;
- stp->new_float_instructions = 0;
stp->may_load_nif = 0;
stp->on_load = 0;
}
@@ -1618,7 +1613,6 @@ load_code(LoaderState* stp)
BeamInstr val;
BeamInstr words = 0;
- stp->new_float_instructions = 1;
GetTagAndValue(stp, tag, n);
VerifyTag(stp, tag, TAG_u);
while (n-- > 0) {
@@ -1772,7 +1766,7 @@ load_code(LoaderState* stp)
}
stp->specific_op = specific;
- CodeNeed(opc[stp->specific_op].sz+2); /* Extra margin for packing */
+ CodeNeed(opc[stp->specific_op].sz+16); /* Extra margin for packing */
code[ci++] = BeamOpCode(stp->specific_op);
}
@@ -1936,7 +1930,8 @@ load_code(LoaderState* stp)
}
code[ci++] = (BeamInstr) stp->import[i].bf;
break;
- case 'P': /* Byte offset into tuple */
+ case 'P': /* Byte offset into tuple or stack */
+ case 'Q': /* Like 'P', but packable */
VerifyTag(stp, tag, TAG_u);
tmp = tmp_op->a[arg].val;
code[ci++] = (BeamInstr) ((tmp_op->a[arg].val+1) * sizeof(Eterm));
@@ -1957,84 +1952,6 @@ load_code(LoaderState* stp)
}
/*
- * Load any list arguments using the primitive tags.
- */
-
- for ( ; arg < tmp_op->arity; arg++) {
- switch (tmp_op->a[arg].type) {
- case TAG_i:
- CodeNeed(1);
- code[ci++] = make_small(tmp_op->a[arg].val);
- break;
- case TAG_u:
- case TAG_a:
- case TAG_v:
- CodeNeed(1);
- code[ci++] = tmp_op->a[arg].val;
- break;
- case TAG_f:
- CodeNeed(1);
- code[ci] = stp->labels[tmp_op->a[arg].val].patches;
- stp->labels[tmp_op->a[arg].val].patches = ci;
- ci++;
- break;
- case TAG_q:
- {
- Eterm lit;
-
- lit = stp->literals[tmp_op->a[arg].val].term;
- if (is_big(lit)) {
- Eterm* bigp;
- Eterm *tmp;
- Uint size;
- Uint term_size;
-
- bigp = big_val(lit);
- term_size = bignum_header_arity(*bigp);
- size = TermWords(term_size + 1);
- CodeNeed(size);
- tmp = (Eterm *) (code + ci);
- *tmp++ = *bigp++;
- while (term_size-- > 0) {
- *tmp++ = *bigp++;
- }
- ci +=size;
- } else if (is_float(lit)) {
-#if defined(ARCH_64) && !HALFWORD_HEAP
- CodeNeed(1);
- code[ci++] = float_val(stp->literals[tmp_op->a[arg].val].term)[1];
-#elif HALFWORD_HEAP
- Eterm* fptr;
- Uint size;
- Eterm *tmp;
-
- fptr = float_val(stp->literals[tmp_op->a[arg].val].term)+1;
- size = TermWords(2);
- CodeNeed(size);
- tmp = (Eterm *) (code + ci);
- *tmp++ = *fptr++;
- *tmp = *fptr;
- ci += size;
-#else
- Eterm* fptr;
-
- fptr = float_val(stp->literals[tmp_op->a[arg].val].term)+1;
- CodeNeed(2);
- code[ci++] = *fptr++;
- code[ci++] = *fptr;
-#endif
- } else {
- LoadError0(stp, "literal is neither float nor big");
- }
- }
- break;
- default:
- LoadError1(stp, "unsupported primitive type '%c'",
- tag_to_letter[tmp_op->a[arg].type]);
- }
- }
-
- /*
* The packing engine.
*/
if (opc[stp->specific_op].pack[0]) {
@@ -2057,6 +1974,11 @@ load_code(LoaderState* stp)
case '6': /* Shift 16 steps */
packed = (packed << BEAM_LOOSE_SHIFT) | code[--ci];
break;
+#ifdef ARCH_64
+ case 'w': /* Shift 32 steps */
+ packed = (packed << BEAM_WIDE_SHIFT) | code[--ci];
+ break;
+#endif
case 'p': /* Put instruction (from stack). */
code[ci++] = *--sp;
break;
@@ -2072,6 +1994,58 @@ load_code(LoaderState* stp)
}
/*
+ * Load any list arguments using the primitive tags.
+ */
+
+ for ( ; arg < tmp_op->arity; arg++) {
+ switch (tmp_op->a[arg].type) {
+ case TAG_i:
+ CodeNeed(1);
+ code[ci++] = make_small(tmp_op->a[arg].val);
+ break;
+ case TAG_u:
+ case TAG_a:
+ case TAG_v:
+ CodeNeed(1);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ case TAG_f:
+ CodeNeed(1);
+ code[ci] = stp->labels[tmp_op->a[arg].val].patches;
+ stp->labels[tmp_op->a[arg].val].patches = ci;
+ ci++;
+ break;
+ case TAG_r:
+ CodeNeed(1);
+ code[ci++] = (R_REG_DEF << _TAG_PRIMARY_SIZE) |
+ TAG_PRIMARY_HEADER;
+ break;
+ case TAG_x:
+ CodeNeed(1);
+ code[ci++] = (tmp_op->a[arg].val << _TAG_IMMED1_SIZE) |
+ (X_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER;
+ break;
+ case TAG_y:
+ CodeNeed(1);
+ code[ci++] = (tmp_op->a[arg].val << _TAG_IMMED1_SIZE) |
+ (Y_REG_DEF << _TAG_PRIMARY_SIZE) | TAG_PRIMARY_HEADER;
+ break;
+ case TAG_n:
+ CodeNeed(1);
+ code[ci++] = NIL;
+ break;
+ case TAG_q:
+ CodeNeed(1);
+ new_literal_patch(stp, ci);
+ code[ci++] = tmp_op->a[arg].val;
+ break;
+ default:
+ LoadError1(stp, "unsupported primitive type '%c'",
+ tag_to_letter[tmp_op->a[arg].type]);
+ }
+ }
+
+ /*
* Handle a few special cases.
*/
switch (stp->specific_op) {
@@ -2239,11 +2213,12 @@ use_jump_tab(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
}
/*
- * Predicate to test whether all values in a table are big numbers.
+ * Predicate to test whether all values in a table are either
+ * floats or bignums.
*/
static int
-all_values_are_big(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
+floats_or_bignums(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
{
int i;
@@ -2255,9 +2230,6 @@ all_values_are_big(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
if (Rest[i].type != TAG_q) {
return 0;
}
- if (is_not_big(stp->literals[Rest[i].val].term)) {
- return 0;
- }
if (Rest[i+1].type != TAG_f) {
return 0;
}
@@ -2317,6 +2289,14 @@ mixed_types(LoaderState* stp, GenOpArg Size, GenOpArg* Rest)
return 0;
}
+static int
+same_label(LoaderState* stp, GenOpArg Target, GenOpArg Label)
+{
+ return Target.type = TAG_f && Label.type == TAG_u &&
+ Target.val == Label.val;
+}
+
+
/*
* Generate an instruction for element/2.
*/
@@ -2328,23 +2308,23 @@ gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index,
GenOp* op;
NEW_GENOP(stp, op);
- op->op = genop_i_element_4;
op->arity = 4;
- op->a[0] = Fail;
- op->a[1] = Index;
- op->a[2] = Tuple;
- op->a[3] = Dst;
op->next = NULL;
- /*
- * If safe, generate a faster instruction.
- */
-
if (Index.type == TAG_i && Index.val > 0 &&
(Tuple.type == TAG_r || Tuple.type == TAG_x || Tuple.type == TAG_y)) {
op->op = genop_i_fast_element_4;
- op->a[1].type = TAG_u;
- op->a[1].val = Index.val;
+ op->a[0] = Tuple;
+ op->a[1] = Fail;
+ op->a[2].type = TAG_u;
+ op->a[2].val = Index.val;
+ op->a[3] = Dst;
+ } else {
+ op->op = genop_i_element_4;
+ op->a[0] = Tuple;
+ op->a[1] = Fail;
+ op->a[2] = Index;
+ op->a[3] = Dst;
}
return op;
@@ -2595,8 +2575,6 @@ binary_too_big_bits(LoaderState* stp, GenOpArg Size)
return Size.type == TAG_u && (((Size.val+7)/8) >> (8*sizeof(Uint)-3) != 0);
}
-#define new_float_allocation(Stp) ((Stp)->new_float_instructions)
-
static GenOp*
gen_put_binary(LoaderState* stp, GenOpArg Fail,GenOpArg Size,
GenOpArg Unit, GenOpArg Flags, GenOpArg Src)
@@ -2809,6 +2787,52 @@ gen_skip_bits2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms,
return op;
}
+static GenOp*
+gen_increment(LoaderState* stp, GenOpArg Reg, GenOpArg Integer,
+ GenOpArg Live, GenOpArg Dst)
+{
+ GenOp* op;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_increment_4;
+ op->arity = 4;
+ op->next = NULL;
+ op->a[0] = Reg;
+ op->a[1].type = TAG_u;
+ op->a[1].val = Integer.val;
+ op->a[2] = Live;
+ op->a[3] = Dst;
+ return op;
+}
+
+static GenOp*
+gen_increment_from_minus(LoaderState* stp, GenOpArg Reg, GenOpArg Integer,
+ GenOpArg Live, GenOpArg Dst)
+{
+ GenOp* op;
+
+ NEW_GENOP(stp, op);
+ op->op = genop_i_increment_4;
+ op->arity = 4;
+ op->next = NULL;
+ op->a[0] = Reg;
+ op->a[1].type = TAG_u;
+ op->a[1].val = -Integer.val;
+ op->a[2] = Live;
+ op->a[3] = Dst;
+ return op;
+}
+
+/*
+ * Test whether the negation of the given number is small.
+ */
+static int
+negation_is_small(LoaderState* stp, GenOpArg Int)
+{
+ return Int.type == TAG_i && IS_SSMALL(-Int.val);
+}
+
+
static int
smp(LoaderState* stp)
{
@@ -3000,6 +3024,21 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
ASSERT(op->a[i].val < op->a[i+2].val);
}
#endif
+
+ /*
+ * Use a special-cased instruction if there are only two values.
+ */
+ if (size == 2) {
+ op->op = genop_i_select_tuple_arity2_6;
+ op->arity--;
+ op->a[2].type = TAG_u;
+ op->a[2].val = arityval(op->a[3].val);
+ op->a[3] = op->a[4];
+ op->a[4].type = TAG_u;
+ op->a[4].val = arityval(op->a[5].val);
+ op->a[5] = op->a[6];
+ }
+
return op;
}
@@ -3009,18 +3048,24 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail,
*/
static GenOp*
-gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg Fail,
- GenOpArg Size, GenOpArg* Rest)
+gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg TypeFail,
+ GenOpArg Fail, GenOpArg Size, GenOpArg* Rest)
{
GenOp* op1;
GenOp* op2;
GenOp* label;
- Uint type;
+ GenOp* is_integer;
int i;
ASSERT(Size.val >= 2 && Size.val % 2 == 0);
+ NEW_GENOP(stp, is_integer);
+ is_integer->op = genop_is_integer_2;
+ is_integer->arity = 2;
+ is_integer->a[0] = TypeFail;
+ is_integer->a[1] = S;
+
NEW_GENOP(stp, label);
label->op = genop_label_1;
label->arity = 1;
@@ -3046,15 +3091,13 @@ gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg Fail,
op2->a[2].type = TAG_u;
op2->a[2].val = 0;
- op1->next = label;
- label->next = op2;
- op2->next = NULL;
-
- type = Rest[0].type;
+ /*
+ * Split the list.
+ */
ASSERT(Size.type == TAG_u);
for (i = 0; i < Size.val; i += 2) {
- GenOp* op = (Rest[i].type == type) ? op1 : op2;
+ GenOp* op = (Rest[i].type == TAG_q) ? op2 : op1;
int dst = 3 + op->a[2].val;
ASSERT(Rest[i+1].type == TAG_f);
@@ -3063,13 +3106,36 @@ gen_split_values(LoaderState* stp, GenOpArg S, GenOpArg Fail,
op->arity += 2;
op->a[2].val += 2;
}
+ ASSERT(op1->a[2].val > 0);
+ ASSERT(op2->a[2].val > 0);
/*
- * None of the instructions should have zero elements in the list.
+ * Order the instruction sequence appropriately.
*/
- ASSERT(op1->a[2].val > 0);
- ASSERT(op2->a[2].val > 0);
+ if (TypeFail.val == Fail.val) {
+ /*
+ * select_val L1 S ... (small numbers)
+ * label L1
+ * is_integer Fail S
+ * select_val Fail S ... (bignums)
+ */
+ op1->next = label;
+ label->next = is_integer;
+ is_integer->next = op2;
+ } else {
+ /*
+ * is_integer TypeFail S
+ * select_val L1 S ... (small numbers)
+ * label L1
+ * select_val Fail S ... (bignums)
+ */
+ is_integer->next = op1;
+ op1->next = label;
+ label->next = op2;
+ op1 = is_integer;
+ }
+ op2->next = NULL;
return op1;
}
@@ -3091,6 +3157,29 @@ gen_jump_tab(LoaderState* stp, GenOpArg S, GenOpArg Fail, GenOpArg Size, GenOpAr
ASSERT(Size.val >= 2 && Size.val % 2 == 0);
/*
+ * If there is only one choice, don't generate a jump table.
+ */
+ if (Size.val == 2) {
+ GenOp* jump;
+
+ NEW_GENOP(stp, op);
+ op->arity = 3;
+ op->op = genop_is_ne_exact_3;
+ op->a[0] = Rest[1];
+ op->a[1] = S;
+ op->a[2] = Rest[0];
+
+ NEW_GENOP(stp, jump);
+ jump->next = NULL;
+ jump->arity = 1;
+ jump->op = genop_jump_1;
+ jump->a[0] = Fail;
+
+ op->next = jump;
+ return op;
+ }
+
+ /*
* Calculate the minimum and maximum values and size of jump table.
*/
@@ -3162,8 +3251,9 @@ genopargcompare(GenOpArg* a, GenOpArg* b)
}
/*
- * Generate a select_val instruction. We know that a jump table is not suitable,
- * and that all values are of the same type (integer, atoms, floats; never bignums).
+ * Generate a select_val instruction. We know that a jump table
+ * is not suitable, and that all values are of the same type
+ * (integer or atoms).
*/
static GenOp*
@@ -3177,12 +3267,7 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
NEW_GENOP(stp, op);
op->next = NULL;
- if (Rest[0].type != TAG_q) {
- op->op = genop_i_select_val_3;
- } else {
- ASSERT(is_float(stp->literals[Rest[0].val].term));
- op->op = genop_i_select_float_3;
- }
+ op->op = genop_i_select_val_3;
GENOP_ARITY(op, arity);
op->a[0] = S;
op->a[1] = Fail;
@@ -3204,19 +3289,19 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
}
#endif
- return op;
-}
-
-/*
- * Compare function for qsort().
- */
+ /*
+ * Use a special-cased instruction if there are only two values.
+ */
+ if (size == 2) {
+ op->op = genop_i_select_val2_6;
+ op->arity--;
+ op->a[2] = op->a[3];
+ op->a[3] = op->a[4];
+ op->a[4] = op->a[5];
+ op->a[5] = op->a[6];
+ }
-static int
-genbigcompare(GenOpArg* a, GenOpArg* b)
-{
- int val = (int)(b->bigarity - a->bigarity);
-
- return val != 0 ? val : ((int) (a->val - b->val));
+ return op;
}
/*
@@ -3224,37 +3309,35 @@ genbigcompare(GenOpArg* a, GenOpArg* b)
*/
static GenOp*
-gen_select_big(LoaderState* stp, GenOpArg S, GenOpArg Fail,
+gen_select_literals(LoaderState* stp, GenOpArg S, GenOpArg Fail,
GenOpArg Size, GenOpArg* Rest)
{
GenOp* op;
- int arity = Size.val + 2 + 1;
- int size = Size.val / 2;
+ GenOp* jump;
+ GenOp** prev_next = &op;
+
int i;
- NEW_GENOP(stp, op);
- op->next = NULL;
- op->op = genop_i_select_big_2;
- GENOP_ARITY(op, arity);
- op->a[0] = S;
- op->a[1] = Fail;
for (i = 0; i < Size.val; i += 2) {
+ GenOp* op;
ASSERT(Rest[i].type == TAG_q);
- op->a[i+2] = Rest[i];
- op->a[i+2].bigarity = *big_val(stp->literals[op->a[i+2].val].term);
- op->a[i+3] = Rest[i+1];
- }
- ASSERT(i+2 == arity-1);
- op->a[arity-1].type = TAG_u;
- op->a[arity-1].val = 0;
-
- /*
- * Sort the values in descending arity order.
- */
-
- qsort(op->a+2, size, 2*sizeof(GenOpArg),
- (int (*)(const void *, const void *)) genbigcompare);
+ NEW_GENOP(stp, op);
+ op->op = genop_is_ne_exact_3;
+ op->arity = 3;
+ op->a[0] = Rest[i+1];
+ op->a[1] = S;
+ op->a[2] = Rest[i];
+ *prev_next = op;
+ prev_next = &op->next;
+ }
+
+ NEW_GENOP(stp, jump);
+ jump->next = NULL;
+ jump->op = genop_jump_1;
+ jump->arity = 1;
+ jump->a[0] = Fail;
+ *prev_next = jump;
return op;
}
@@ -3272,7 +3355,6 @@ const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
int i;
ASSERT(Size.type == TAG_u);
- ASSERT(S.type == TAG_q);
NEW_GENOP(stp, op);
op->next = NULL;
@@ -3283,18 +3365,32 @@ const_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail,
* Search for a literal matching the controlling expression.
*/
- if (S.type == TAG_q) {
- Eterm expr = stp->literals[S.val].term;
- for (i = 0; i < Size.val; i += 2) {
- if (Rest[i].type == TAG_q) {
- Eterm term = stp->literals[Rest[i].val].term;
- if (eq(term, expr)) {
- ASSERT(Rest[i+1].type == TAG_f);
- op->a[0] = Rest[i+1];
- return op;
+ switch (S.type) {
+ case TAG_q:
+ {
+ Eterm expr = stp->literals[S.val].term;
+ for (i = 0; i < Size.val; i += 2) {
+ if (Rest[i].type == TAG_q) {
+ Eterm term = stp->literals[Rest[i].val].term;
+ if (eq(term, expr)) {
+ ASSERT(Rest[i+1].type == TAG_f);
+ op->a[0] = Rest[i+1];
+ return op;
+ }
}
}
}
+ break;
+ case TAG_i:
+ case TAG_a:
+ for (i = 0; i < Size.val; i += 2) {
+ if (Rest[i].val == S.val && Rest[i].type == S.type) {
+ ASSERT(Rest[i+1].type == TAG_f);
+ op->a[0] = Rest[i+1];
+ return op;
+ }
+ }
+ break;
}
/*
@@ -3477,6 +3573,56 @@ gen_guard_bif3(LoaderState* stp, GenOpArg Fail, GenOpArg Live, GenOpArg Bif,
return op;
}
+static GenOp*
+tuple_append_put5(LoaderState* stp, GenOpArg Arity, GenOpArg Dst,
+ GenOpArg* Puts, GenOpArg S1, GenOpArg S2, GenOpArg S3,
+ GenOpArg S4, GenOpArg S5)
+{
+ GenOp* op;
+ int arity = Arity.val; /* Arity of tuple, not the instruction */
+ int i;
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ GENOP_ARITY(op, arity+2+5);
+ op->op = genop_i_put_tuple_2;
+ op->a[0] = Dst;
+ op->a[1].type = TAG_u;
+ op->a[1].val = arity + 5;
+ for (i = 0; i < arity; i++) {
+ op->a[i+2] = Puts[i];
+ }
+ op->a[arity+2] = S1;
+ op->a[arity+3] = S2;
+ op->a[arity+4] = S3;
+ op->a[arity+5] = S4;
+ op->a[arity+6] = S5;
+ return op;
+}
+
+static GenOp*
+tuple_append_put(LoaderState* stp, GenOpArg Arity, GenOpArg Dst,
+ GenOpArg* Puts, GenOpArg S)
+{
+ GenOp* op;
+ int arity = Arity.val; /* Arity of tuple, not the instruction */
+ int i;
+
+ NEW_GENOP(stp, op);
+ op->next = NULL;
+ GENOP_ARITY(op, arity+2+1);
+ op->op = genop_i_put_tuple_2;
+ op->a[0] = Dst;
+ op->a[1].type = TAG_u;
+ op->a[1].val = arity + 1;
+ for (i = 0; i < arity; i++) {
+ op->a[i+2] = Puts[i];
+ }
+ op->a[arity+2] = S;
+ return op;
+}
+
+
/*
* Freeze the code in memory, move the string table into place,
@@ -3876,11 +4022,23 @@ transform_engine(LoaderState* st)
if (i == 0)
goto restart;
break;
+#if defined(TOP_is_eq)
case TOP_is_eq:
ASSERT(ap < instr->arity);
if (*pc++ != instr->a[ap].val)
goto restart;
break;
+#endif
+ case TOP_is_type_eq:
+ mask = *pc++;
+
+ ASSERT(ap < instr->arity);
+ ASSERT(instr->a[ap].type < BEAM_NUM_TAGS);
+ if (((1 << instr->a[ap].type) & mask) == 0)
+ goto restart;
+ if (*pc++ != instr->a[ap].val)
+ goto restart;
+ break;
case TOP_is_same_var:
ASSERT(ap < instr->arity);
i = *pc++;
@@ -4001,14 +4159,17 @@ transform_engine(LoaderState* st)
case TOP_rest_args:
{
int n = *pc++;
+ int formal_arity = gen_opc[instr->op].arity;
+ int num_vars = n + (instr->arity - formal_arity);
+ int j = formal_arity;
+
var = erts_alloc(ERTS_ALC_T_LOADER_TMP,
- instr->arity * sizeof(GenOpArg));
+ num_vars * sizeof(GenOpArg));
for (i = 0; i < n; i++) {
var[i] = def_vars[i];
}
- while (i < instr->arity) {
- var[i] = instr->a[i];
- i++;
+ while (i < num_vars) {
+ var[i++] = instr->a[j++];
}
}
break;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 6e9755ad48..bb237e378a 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -813,7 +813,7 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
so.min_heap_size = H_MIN_SIZE;
so.min_vheap_size = BIN_VH_MIN_SIZE;
so.priority = PRIORITY_NORMAL;
- so.max_gen_gcs = (Uint16) erts_smp_atomic_read(&erts_max_gen_gcs);
+ so.max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
so.scheduler = 0;
/*
@@ -1351,9 +1351,10 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
#ifdef ERTS_SMP
if (rp == BIF_P)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- else
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ if (rp != BIF_P)
erts_smp_proc_dec_refc(rp);
- erts_smp_proc_unlock(rp, rp_locks);
#endif
/*
* We may have exited ourselves and may have to take action.
@@ -3269,12 +3270,13 @@ BIF_RETTYPE ports_0(BIF_ALIST_0)
erts_smp_mtx_lock(&ports_snapshot_mtx); /* One snapshot at a time */
- erts_smp_atomic_set(&erts_dead_ports_ptr, (long) (port_buf + erts_max_ports));
+ erts_smp_atomic_set(&erts_dead_ports_ptr,
+ (erts_aint_t) (port_buf + erts_max_ports));
next_ss = erts_smp_atomic_inctest(&erts_ports_snapshot);
if (erts_smp_atomic_read(&erts_ports_alive) > 0) {
- long i;
+ erts_aint_t i;
for (i = erts_max_ports-1; i >= 0; i--) {
Port* prt = &erts_port[i];
erts_smp_port_state_lock(prt);
@@ -3289,7 +3291,7 @@ BIF_RETTYPE ports_0(BIF_ALIST_0)
}
dead_ports = (Eterm*)erts_smp_atomic_xchg(&erts_dead_ports_ptr,
- (long)NULL);
+ (erts_aint_t) NULL);
erts_smp_mtx_unlock(&ports_snapshot_mtx);
ASSERT(pp <= dead_ports);
@@ -3300,7 +3302,7 @@ BIF_RETTYPE ports_0(BIF_ALIST_0)
ASSERT((alive+dead) <= erts_max_ports);
if (alive+dead > 0) {
- long i;
+ erts_aint_t i;
Eterm *hp = HAlloc(BIF_P, (alive+dead)*2);
for (i = 0; i < alive; i++) {
@@ -3796,7 +3798,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
goto error;
}
nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n);
- oval = (Uint) erts_smp_atomic_xchg(&erts_max_gen_gcs, (long) nval);
+ oval = (Uint) erts_smp_atomic32_xchg(&erts_max_gen_gcs,
+ (erts_aint32_t) nval);
BIF_RET(make_small(oval));
} else if (BIF_ARG_1 == am_min_heap_size) {
int oval = H_MIN_SIZE;
@@ -4139,7 +4142,7 @@ void erts_init_bif(void)
erts_smp_spinlock_init(&make_ref_lock, "make_ref");
erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
- erts_smp_atomic_init(&erts_dead_ports_ptr, (long)NULL);
+ erts_smp_atomic_init(&erts_dead_ports_ptr, (erts_aint_t) NULL);
/*
* bif_return_trap/1 is a hidden BIF that bifs that need to
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 60b4b1946b..d9dd80fa8b 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -660,6 +660,7 @@ bif erts_debug:display/1
bif 'erl.system.debug':display/1 ebif_erts_debug_display_1
bif erts_debug:dist_ext_to_term/2
bif 'erl.system.debug':dist_ext_to_term/2 ebif_erts_debug_dist_ext_to_term_2
+bif erts_debug:instructions/0
#
# Monitor testing bif's...
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index f339e19761..d255cf3558 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -98,7 +98,7 @@ process_killer(void)
switch(j) {
case 'k':
if (rp->status == P_WAITING) {
- Uint32 rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
erts_smp_proc_inc_refc(rp);
erts_smp_proc_lock(rp, rp_locks);
(void) erts_send_exit_signal(NULL,
@@ -558,7 +558,7 @@ do_break(void)
#endif
#ifdef DEBUG
case 't':
- p_slpq();
+ erts_p_slpq();
return;
case 'b':
bin_check();
@@ -624,9 +624,9 @@ bin_check(void)
erts_printf("Process %T holding binary data \n", rp->id);
printed = 1;
}
- erts_printf("0x%08lx orig_size: %ld, norefs = %ld\n",
- (unsigned long)bp->val,
- (long)bp->val->orig_size,
+ erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
+ bp->val,
+ bp->val->orig_size,
erts_smp_atomic_read(&bp->val->refc));
}
}
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 694460d702..02910fad90 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -917,6 +917,7 @@ int erts_net_message(Port *prt,
Eterm token_size;
ErtsMonitor *mon;
ErtsLink *lnk;
+ Uint tuple_arity;
int res;
#ifdef ERTS_DIST_MSG_DBG
int orig_len = len;
@@ -1003,29 +1004,23 @@ int erts_net_message(Port *prt,
#endif
if (is_not_tuple(arg) ||
- (tuple = tuple_val(arg), arityval(*tuple) < 1) ||
+ (tuple = tuple_val(arg), (tuple_arity = arityval(*tuple)) < 1) ||
is_not_small(tuple[1])) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Invalid distribution message: %.200T", arg);
- erts_send_error_to_logger_nogl(dsbufp);
- goto data_error;
+ goto invalid_message;
}
token_size = 0;
switch (type = unsigned_val(tuple[1])) {
case DOP_LINK:
+ if (tuple_arity != 3) {
+ goto invalid_message;
+ }
from = tuple[2];
to = tuple[3]; /* local proc to link to */
if (is_not_pid(from) || is_not_pid(to)) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- PURIFY_MSG("data error");
- erts_dsprintf(dsbufp,
- "Invalid DOP_LINK distribution message: %.200T",
- arg);
- erts_send_error_to_logger_nogl(dsbufp);
- goto data_error;
+ goto invalid_message;
}
rp = erts_pid2proc_opt(NULL, 0,
@@ -1064,8 +1059,14 @@ int erts_net_message(Port *prt,
case DOP_UNLINK: {
ErtsDistLinkData dld;
+ if (tuple_arity != 3) {
+ goto invalid_message;
+ }
from = tuple[2];
to = tuple[3];
+ if (is_not_pid(from) || is_not_pid(to)) {
+ goto invalid_message;
+ }
rp = erts_pid2proc_opt(NULL, 0,
to, ERTS_PROC_LOCK_LINK,
@@ -1092,11 +1093,19 @@ int erts_net_message(Port *prt,
/* A remote process wants to monitor us, we get:
{DOP_MONITOR_P, Remote pid, local pid or name, ref} */
Eterm name;
+
+ if (tuple_arity != 4) {
+ goto invalid_message;
+ }
watcher = tuple[2];
watched = tuple[3]; /* local proc to monitor */
ref = tuple[4];
+ if (is_not_ref(ref)) {
+ goto invalid_message;
+ }
+
if (is_atom(watched)) {
name = watched;
rp = erts_whereis_process(NULL, 0,
@@ -1138,10 +1147,17 @@ int erts_net_message(Port *prt,
We get {DOP_DEMONITOR_P, Remote pid, Local pid or name, ref},
We need only the ref of course */
+ if (tuple_arity != 4) {
+ goto invalid_message;
+ }
/* watcher = tuple[2]; */
/* watched = tuple[3]; May be an atom in case of monitor name */
ref = tuple[4];
+ if(is_not_ref(ref)) {
+ goto invalid_message;
+ }
+
erts_smp_de_links_lock(dep);
mon = erts_remove_monitor(&(dep->monitors),ref);
erts_smp_de_links_unlock(dep);
@@ -1166,10 +1182,11 @@ int erts_net_message(Port *prt,
erts_destroy_monitor(mon);
break;
- case DOP_NODE_LINK: /* XXX never sent ?? */
- break;
-
case DOP_REG_SEND_TT:
+ if (tuple_arity != 5) {
+ goto invalid_message;
+ }
+
token_size = size_object(tuple[5]);
/* Fall through ... */
case DOP_REG_SEND:
@@ -1180,12 +1197,19 @@ int erts_net_message(Port *prt,
* There is intentionally no testing of the cookie (it is always '')
* from R9B and onwards.
*/
+ if (type != DOP_REG_SEND_TT && tuple_arity != 4) {
+ goto invalid_message;
+ }
+
#ifdef ERTS_DIST_MSG_DBG
dist_msg_dbg(&ede, "MSG", buf, orig_len);
#endif
from = tuple[2];
to = tuple[4];
+ if (is_not_pid(from) || is_not_atom(to)){
+ goto invalid_message;
+ }
rp = erts_whereis_process(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
if (rp) {
Uint xsize = (type == DOP_REG_SEND
@@ -1217,6 +1241,10 @@ int erts_net_message(Port *prt,
break;
case DOP_SEND_TT:
+ if (tuple_arity != 4) {
+ goto invalid_message;
+ }
+
token_size = size_object(tuple[4]);
/* Fall through ... */
case DOP_SEND:
@@ -1227,8 +1255,13 @@ int erts_net_message(Port *prt,
#ifdef ERTS_DIST_MSG_DBG
dist_msg_dbg(&ede, "MSG", buf, orig_len);
#endif
-
+ if (type != DOP_SEND_TT && tuple_arity != 3) {
+ goto invalid_message;
+ }
to = tuple[3];
+ if (is_not_pid(to)) {
+ goto invalid_message;
+ }
rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
if (rp) {
Uint xsize = type == DOP_SEND ? 0 : ERTS_HEAP_FRAG_SIZE(token_size);
@@ -1266,11 +1299,19 @@ int erts_net_message(Port *prt,
Eterm sysname;
ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_MSG_SEND|ERTS_PROC_LOCK_LINK;
+ if (tuple_arity != 5) {
+ goto invalid_message;
+ }
+
/* watched = tuple[2]; */ /* remote proc which died */
/* watcher = tuple[3]; */
ref = tuple[4];
reason = tuple[5];
+ if(is_not_ref(ref)) {
+ goto invalid_message;
+ }
+
erts_smp_de_links_lock(dep);
sysname = dep->sysname;
mon = erts_remove_monitor(&(dep->monitors), ref);
@@ -1317,24 +1358,25 @@ int erts_net_message(Port *prt,
ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
/* 'from', which 'to' is linked to, died */
if (type == DOP_EXIT) {
- from = tuple[2];
- to = tuple[3];
- reason = tuple[4];
- token = NIL;
+ if (tuple_arity != 4) {
+ goto invalid_message;
+ }
+
+ from = tuple[2];
+ to = tuple[3];
+ reason = tuple[4];
+ token = NIL;
} else {
- from = tuple[2];
- to = tuple[3];
- token = tuple[4];
- reason = tuple[5];
+ if (tuple_arity != 5) {
+ goto invalid_message;
+ }
+ from = tuple[2];
+ to = tuple[3];
+ token = tuple[4];
+ reason = tuple[5];
}
- if (is_not_internal_pid(to)) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- PURIFY_MSG("data error");
- erts_dsprintf(dsbufp,
- "Invalid DOP_EXIT distribution message: %.200T",
- arg);
- erts_send_error_to_logger_nogl(dsbufp);
- goto data_error;
+ if (is_not_pid(from) || is_not_internal_pid(to)) {
+ goto invalid_message;
}
rp = erts_pid2proc(NULL, 0, to, rp_locks);
@@ -1381,15 +1423,24 @@ int erts_net_message(Port *prt,
ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
/* 'from' is send an exit signal to 'to' */
if (type == DOP_EXIT2) {
- from = tuple[2];
- to = tuple[3];
- reason = tuple[4];
- token = NIL;
+ if (tuple_arity != 4) {
+ goto invalid_message;
+ }
+ from = tuple[2];
+ to = tuple[3];
+ reason = tuple[4];
+ token = NIL;
} else {
- from = tuple[2];
- to = tuple[3];
- token = tuple[4];
- reason = tuple[5];
+ if (tuple_arity != 5) {
+ goto invalid_message;
+ }
+ from = tuple[2];
+ to = tuple[3];
+ token = tuple[4];
+ reason = tuple[5];
+ }
+ if (is_not_pid(from) || is_not_internal_pid(to)) {
+ goto invalid_message;
}
rp = erts_pid2proc_opt(NULL, 0, to, rp_locks,
ERTS_P2P_FLG_SMP_INC_REFC);
@@ -1408,10 +1459,14 @@ int erts_net_message(Port *prt,
break;
}
case DOP_GROUP_LEADER:
+ if (tuple_arity != 3) {
+ goto invalid_message;
+ }
from = tuple[2]; /* Group leader */
to = tuple[3]; /* new member */
- if (is_not_pid(from))
- break;
+ if (is_not_pid(from) || is_not_pid(to)) {
+ goto invalid_message;
+ }
rp = erts_pid2proc(NULL, 0, to, ERTS_PROC_LOCK_MAIN);
if (!rp)
@@ -1420,16 +1475,8 @@ int erts_net_message(Port *prt,
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
break;
- default: {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Illegal value in distribution dispatch switch: "
- "%.200T",
- arg);
- erts_send_error_to_logger_nogl(dsbufp);
- PURIFY_MSG("data error");
- goto data_error;
- }
+ default:
+ goto invalid_message;
}
erts_cleanup_offheap(&off_heap);
@@ -1441,8 +1488,14 @@ int erts_net_message(Port *prt,
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
ERTS_SMP_CHK_NO_PROC_LOCKS;
return 0;
-
+ invalid_message:
+ {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Invalid distribution message: %.200T", arg);
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
data_error:
+ PURIFY_MSG("data error");
erts_cleanup_offheap(&off_heap);
#ifndef HYBRID /* FIND ME! */
if (ctl != ctl_default) {
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index 9ccc3e5ba9..695a4fc3fe 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -52,7 +52,7 @@
#define DOP_SEND 2
#define DOP_EXIT 3
#define DOP_UNLINK 4
-#define DOP_NODE_LINK 5
+/* Ancient DOP_NODE_LINK (5) was here, can be reused */
#define DOP_REG_SEND 6
#define DOP_GROUP_LEADER 7
#define DOP_EXIT2 8
@@ -69,7 +69,6 @@
/* distribution trap functions */
extern Export* dsend2_trap;
extern Export* dsend3_trap;
-/*extern Export* dsend_nosuspend_trap;*/
extern Export* dlink_trap;
extern Export* dunlink_trap;
extern Export* dmonitor_node_trap;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 7793f60f4f..e85e2d7e3f 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1568,7 +1568,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
Eterm atoms[sizeof(size)/sizeof(Uint)];
Uint *uintps[sizeof(size)/sizeof(Uint)];
Eterm euints[sizeof(size)/sizeof(Uint)];
- int need_atom;
int want_tot_or_sys;
int length;
Eterm res = THE_NON_VALUE;
@@ -1756,7 +1755,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
/* Calculate values needed... */
want_tot_or_sys = want.total || want.system;
- need_atom = ERTS_MEM_NEED_ALL_ALCU || want.atom;
if (ERTS_MEM_NEED_ALL_ALCU) {
size.total = 0;
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 408ffd12f7..b7b9c6a133 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -263,6 +263,8 @@ type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list
type PROC_LCK_WTR LONG_LIVED SYSTEM proc_lock_waiter
type PROC_LCK_QS LONG_LIVED SYSTEM proc_lock_queues
type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
+type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
+type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work
+endif
#
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 2c2e283f65..c9cdcb87a6 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1193,7 +1193,7 @@ int erts_ddll_driver_ok(DE_Handle *dh)
static void ddll_no_more_references(void *vdh)
{
DE_Handle *dh = (DE_Handle *) vdh;
- int x;
+ erts_aint_t x;
lock_drv_list();
@@ -1604,7 +1604,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
erts_sys_ddll_close(dh->handle);
return ERL_DE_LOAD_ERROR_BAD_NAME;
}
- erts_smp_atomic_init(&(dh->refc), (long) 0);
+ erts_smp_atomic_init(&(dh->refc), (erts_aint_t) 0);
dh->port_count = 0;
dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
sys_strcpy(dh->full_path, path);
@@ -1672,7 +1672,7 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
dh->handle = NULL;
dh->procs = NULL;
dh->port_count = 0;
- erts_refc_init(&(dh->refc), (long) 0);
+ erts_refc_init(&(dh->refc), (erts_aint_t) 0);
dh->status = -1;
dh->reload_full_path = NULL;
dh->reload_driver_name = NULL;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 75d8db880c..4a717d7271 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2020,7 +2020,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
} else if (BIF_ARG_1 == am_garbage_collection){
- Uint val = (Uint) erts_smp_atomic_read(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs);
Eterm tup;
hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2);
@@ -2035,7 +2035,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
} else if (BIF_ARG_1 == am_fullsweep_after){
- Uint val = (Uint) erts_smp_atomic_read(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_smp_atomic32_read(&erts_max_gen_gcs);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_fullsweep_after, make_small(val));
BIF_RET(res);
@@ -3430,8 +3430,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
*/
if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
&& (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
- long on = (long) (BIF_ARG_2 == am_true);
- long prev_on = erts_smp_atomic_xchg(&available_internal_state, on);
+ erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
+ erts_aint_t prev_on = erts_smp_atomic_xchg(&available_internal_state, on);
if (on) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Process %T ", BIF_P->id);
@@ -3628,7 +3628,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
/* Used by hipe test suites */
- long flag = erts_smp_atomic_read(&hipe_test_reschedule_flag);
+ erts_aint_t flag = erts_smp_atomic_read(&hipe_test_reschedule_flag);
if (!flag && BIF_ARG_2 != am_false) {
erts_smp_atomic_set(&hipe_test_reschedule_flag, 1);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
@@ -3703,7 +3703,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
#ifdef ERTS_ENABLE_LOCK_COUNT
static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
- unsigned long tries = 0, colls = 0;
+ Uint tries = 0, colls = 0;
unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
unsigned int line = 0;
@@ -3716,8 +3716,8 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
* [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
*/
- tries = (unsigned long) ethr_atomic_read(&stats->tries);
- colls = (unsigned long) ethr_atomic_read(&stats->colls);
+ tries = (Uint) ethr_atomic_read(&stats->tries);
+ colls = (Uint) ethr_atomic_read(&stats->colls);
line = stats->line;
timer_s = stats->timer.s;
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index 4ae2f6ebf4..3508e8e0dc 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -478,7 +478,7 @@ setup_bif_timer(Uint32 xflags,
tab_insert(btm);
ASSERT(btm == tab_find(ref));
btm->tm.active = 0; /* MUST be initalized */
- erl_set_timer(&btm->tm,
+ erts_set_timer(&btm->tm,
(ErlTimeoutProc) bif_timer_timeout,
(ErlCancelProc) bif_timer_cleanup,
(void *) btm,
@@ -550,7 +550,7 @@ BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
res = am_false;
}
else {
- Uint left = time_left(&btm->tm);
+ Uint left = erts_time_left(&btm->tm);
if (!(btm->flags & BTM_FLG_BYNAME)) {
erts_smp_proc_lock(btm->receiver.proc.ess, ERTS_PROC_LOCK_MSGQ);
unlink_proc(btm);
@@ -558,7 +558,7 @@ BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
}
tab_remove(btm);
ASSERT(!tab_find(BIF_ARG_1));
- erl_cancel_timer(&btm->tm);
+ erts_cancel_timer(&btm->tm);
erts_smp_btm_rwunlock();
res = erts_make_integer(left, BIF_P);
}
@@ -587,7 +587,7 @@ BIF_RETTYPE read_timer_1(BIF_ALIST_1)
res = am_false;
}
else {
- Uint left = time_left(&btm->tm);
+ Uint left = erts_time_left(&btm->tm);
res = erts_make_integer(left, BIF_P);
}
@@ -613,7 +613,7 @@ erts_print_bif_timer_info(int to, void *to_arg)
: btm->receiver.proc.ess->id);
erts_print(to, to_arg, "=timer:%T\n", receiver);
erts_print(to, to_arg, "Message: %T\n", btm->message);
- erts_print(to, to_arg, "Time left: %d ms\n", time_left(&btm->tm));
+ erts_print(to, to_arg, "Time left: %d ms\n", erts_time_left(&btm->tm));
}
}
@@ -640,7 +640,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
tab_remove(btm);
tmp_btm = btm;
btm = btm->receiver.proc.next;
- erl_cancel_timer(&tmp_btm->tm);
+ erts_cancel_timer(&tmp_btm->tm);
}
p->bif_timers = NULL;
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 88d2c06246..6f8a7436d5 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -555,10 +555,11 @@ fmt_int(byte *buf, Uint sz, Eterm val, Uint size, Uint flags)
{
unsigned long offs;
- ASSERT(size != 0);
offs = BIT_OFFSET(size);
if (is_small(val)) {
Sint v = signed_val(val);
+
+ ASSERT(size != 0); /* Tested by caller */
if (flags & BSF_LITTLE) { /* Little endian */
sz--;
COPY_VAL(buf,1,v,sz);
@@ -578,6 +579,9 @@ fmt_int(byte *buf, Uint sz, Eterm val, Uint size, Uint flags)
ErtsDigit* dp = big_v(val);
int n = MIN(sz,ds);
+ if (size == 0) {
+ return 0;
+ }
if (flags & BSF_LITTLE) {
sz -= n; /* pad with this amount */
if (sign) {
@@ -729,15 +733,13 @@ erts_new_bs_put_integer(ERL_BITS_PROTO_3(Eterm arg, Uint num_bits, unsigned flag
Uint b;
byte *iptr;
- if (num_bits == 0) {
- return 1;
- }
-
bit_offset = BIT_OFFSET(bin_offset);
if (is_small(arg)) {
Uint rbits = 8 - bit_offset;
- if (bit_offset + num_bits <= 8) {
+ if (num_bits == 0) {
+ return 1;
+ } else if (bit_offset + num_bits <= 8) {
/*
* All bits are in the same byte.
*/
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index db95c4a5d4..8a6b4d8d6c 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -487,7 +487,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
/* Make sure we check if we should bind to a cpu or not... */
if (esdp->run_queue->flags & ERTS_RUNQ_FLG_SHARED_RUNQ)
- erts_smp_atomic_set(&esdp->chk_cpu_bind, 1);
+ erts_smp_atomic32_set(&esdp->chk_cpu_bind, 1);
else
esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
}
@@ -503,7 +503,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_callback_call_t *cgcc;
#ifdef ERTS_SMP
if (erts_common_run_queue)
- erts_smp_atomic_set(&esdp->chk_cpu_bind, 0);
+ erts_smp_atomic32_set(&esdp->chk_cpu_bind, 0);
else {
esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
}
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 8577354d27..3173d3510e 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -219,47 +219,68 @@ Export ets_select_continue_exp;
* Static traps
*/
static Export ets_delete_continue_exp;
-
-static ERTS_INLINE DbTable* db_ref(DbTable* tb, db_lock_kind_t kind)
-{
- if (tb != NULL && kind != LCK_READ) {
- erts_refc_inc(&tb->common.ref, 2);
- }
- return tb;
-}
-
-static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind)
+
+static void
+free_dbtable(DbTable* tb)
{
- if (kind != LCK_READ && !erts_refc_dectest(&tb->common.ref, 0)) {
#ifdef HARDDEBUG
if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
- erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n",
- erts_smp_atomic_read(&tb->common.memory_size)-sizeof(DbTable),
+ erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n",
+ erts_smp_atomic_read(&tb->common.memory_size)-sizeof(DbTable),
tb->common.fixations);
}
- erts_fprintf(stderr, "ets: db_unref(%T) deleted!!!\r\n",
+ erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n",
tb->common.id);
- erts_fprintf(stderr, "ets: db_unref: meta_pid_to_tab common.memory_size = %ld\n",
+ erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n",
erts_smp_atomic_read(&meta_pid_to_tab->common.memory_size));
print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab);
- erts_fprintf(stderr, "ets: db_unref: meta_pid_to_fixed_tab common.memory_size = %ld\n",
+ erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n",
erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size));
print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab);
-
#endif
#ifdef ERTS_SMP
erts_smp_rwmtx_destroy(&tb->common.rwlock);
erts_smp_mtx_destroy(&tb->common.fixlock);
#endif
ASSERT(is_immed(tb->common.heir_data));
- erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
+ erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable));
- return NULL;
- }
- return tb;
+}
+
+#ifdef ERTS_SMP
+static void
+chk_free_dbtable(void *vtb)
+{
+ DbTable * tb = (DbTable *) vtb;
+ ERTS_THR_MEMORY_BARRIER;
+ if (erts_refc_dectest(&tb->common.ref, 0) == 0)
+ free_dbtable(tb);
+}
+#endif
+
+static void schedule_free_dbtable(DbTable* tb)
+{
+ /*
+ * NON-SMP case: Caller is *not* allowed to access the *tb
+ * structure after this function has returned!
+ * SMP case: Caller is allowed to access the *tb structure
+ * until the bif has returned (we typically
+ * need to unlock the table lock after this
+ * function has returned).
+ */
+#ifdef ERTS_SMP
+ int scheds = erts_get_max_no_executing_schedulers();
+ ASSERT(scheds >= 1);
+ ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
+ erts_refc_init(&tb->common.ref, scheds);
+ ERTS_THR_MEMORY_BARRIER;
+ erts_smp_schedule_misc_aux_work(0, scheds, chk_free_dbtable, tb);
+#else
+ free_dbtable(tb);
+#endif
}
static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
@@ -270,8 +291,6 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
if (use_frequent_read_lock)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
#endif
- erts_refc_init(&tb->common.ref, 1);
- erts_refc_init(&tb->common.fixref, 0);
#ifdef ERTS_SMP
erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
rwname, tb->common.the_name);
@@ -280,7 +299,7 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
#endif
}
-static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
+static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
#ifdef ERTS_SMP
ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
@@ -308,16 +327,13 @@ static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
#endif
}
-static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
-{
- (void) db_ref(tb, kind);
-#ifdef ERTS_SMP
- db_lock_take_over_ref(tb, kind);
-#endif
-}
-
static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
{
+ /*
+ * In NON-SMP case tb may refer to an already deallocated
+ * DbTable structure. That is, ONLY the SMP case is allowed
+ * to follow the tb pointer!
+ */
#ifdef ERTS_SMP
ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
@@ -344,7 +360,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
}
}
#endif
- (void) db_unref(tb, kind); /* May delete table... */
}
@@ -371,6 +386,13 @@ DbTable* db_get_table_aux(Process *p,
DbTable *tb = NULL;
erts_smp_rwmtx_t *mtl = NULL;
+ /*
+ * IMPORTANT: Only scheduler threads are allowed
+ * to access tables. Memory management
+ * depend on it.
+ */
+ ASSERT(erts_get_scheduler_data());
+
if (is_small(id)) {
Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
if (!meta_already_locked) {
@@ -384,12 +406,8 @@ DbTable* db_get_table_aux(Process *p,
|| erts_lc_rwmtx_is_rwlocked(test_mtl));
}
#endif
- if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) {
- /* SMP: inc to prevent race, between unlock of meta_main_tab_lock
- * and the table locking outside the meta_main_tab_lock
- */
- tb = db_ref(meta_main_tab[slot].u.tb, kind);
- }
+ if (slot < db_max_tabs && IS_SLOT_ALIVE(slot))
+ tb = meta_main_tab[slot].u.tb;
}
else if (is_atom(id)) {
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
@@ -403,16 +421,15 @@ DbTable* db_get_table_aux(Process *p,
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
- if (bucket->u.name_atom == id) {
- tb = db_ref(bucket->pu.tb, kind);
- }
+ if (bucket->u.name_atom == id)
+ tb = bucket->pu.tb;
}
else { /* multi */
Uint cnt = unsigned_val(bucket->u.mcnt);
Uint i;
for (i=0; i<cnt; i++) {
if (bucket->pu.mvec[i].u.name_atom == id) {
- tb = db_ref(bucket->pu.mvec[i].pu.tb, kind);
+ tb = bucket->pu.mvec[i].pu.tb;
break;
}
}
@@ -420,7 +437,7 @@ DbTable* db_get_table_aux(Process *p,
}
}
if (tb) {
- db_lock_take_over_ref(tb, kind);
+ db_lock(tb, kind);
if (tb->common.id != id
|| ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
db_unlock(tb, kind);
@@ -594,11 +611,11 @@ done:
*/
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
- erts_refc_inc(&tb->common.fixref, 1);
+ erts_refc_inc(&tb->common.ref, 1);
}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
{
- if (erts_refc_dectest(&tb->common.fixref, 0) == 0) {
+ if (erts_refc_dectest(&tb->common.ref, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
}
@@ -1414,6 +1431,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
+ erts_refc_init(&tb->common.ref, 0);
db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
"db_tab", "db_tab_fix");
tb->common.keypos = keypos;
@@ -1436,8 +1454,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
"** Too many db tables **\n");
free_heir_data(tb);
tb->common.meth->db_free_table(tb);
- erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable));
+ free_dbtable(tb);
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
@@ -1471,9 +1488,10 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
free_slot(slot);
erts_smp_rwmtx_rwunlock(mmtl);
- db_lock_take_over_ref(tb,LCK_WRITE);
+ db_lock(tb,LCK_WRITE);
free_heir_data(tb);
tb->common.meth->db_free_table(tb);
+ schedule_free_dbtable(tb);
db_unlock(tb,LCK_WRITE);
BIF_ERROR(BIF_P, BADARG);
}
@@ -2845,8 +2863,7 @@ void init_db(void)
meta_pid_to_tab->common.meth = &db_hash;
meta_pid_to_tab->common.compress = 0;
- erts_refc_init(&meta_pid_to_tab->common.ref, 1);
- erts_refc_init(&meta_pid_to_tab->common.fixref, 0);
+ erts_refc_init(&meta_pid_to_tab->common.ref, 0);
/* Neither rwlock or fixlock used
db_init_lock(meta_pid_to_tab, "meta_pid_to_tab", "meta_pid_to_tab_FIX");*/
@@ -2878,8 +2895,7 @@ void init_db(void)
meta_pid_to_fixed_tab->common.meth = &db_hash;
meta_pid_to_fixed_tab->common.compress = 0;
- erts_refc_init(&meta_pid_to_fixed_tab->common.ref, 1);
- erts_refc_init(&meta_pid_to_fixed_tab->common.fixref, 0);
+ erts_refc_init(&meta_pid_to_fixed_tab->common.ref, 0);
/* Neither rwlock or fixlock used
db_init_lock(meta_pid_to_fixed_tab, "meta_pid_to_fixed_tab", "meta_pid_to_fixed_tab_FIX");*/
@@ -3037,12 +3053,10 @@ retry:
to_pid, to_locks,
ERTS_P2P_FLG_TRY_LOCK);
if (to_proc == ERTS_PROC_LOCK_BUSY) {
- db_ref(tb, LCK_NONE); /* while unlocked */
db_unlock(tb,LCK_WRITE);
to_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
to_pid, to_locks);
db_lock(tb,LCK_WRITE);
- tb = db_unref(tb, LCK_NONE);
ASSERT(tb != NULL);
if (tb->common.owner != p->id) {
@@ -3153,13 +3167,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
erts_smp_rwmtx_rlock(mmtl);
if (!IS_SLOT_FREE(ix)) {
- tb = db_ref(GET_ANY_SLOT_TAB(ix), LCK_WRITE);
+ tb = GET_ANY_SLOT_TAB(ix);
ASSERT(tb);
}
erts_smp_rwmtx_runlock(mmtl);
if (tb) {
int do_yield;
- db_lock_take_over_ref(tb, LCK_WRITE);
+ db_lock(tb, LCK_WRITE);
/* Ownership may have changed since
we looked up the table. */
if (tb->common.owner != pid) {
@@ -3241,7 +3255,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
erts_smp_rwmtx_rlock(mmtl);
if (IS_SLOT_ALIVE(ix)) {
- tb = db_ref(meta_main_tab[ix].u.tb, LCK_WRITE_REC);
+ tb = meta_main_tab[ix].u.tb;
ASSERT(tb);
}
erts_smp_rwmtx_runlock(mmtl);
@@ -3249,7 +3263,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
int reds;
DbFixation** pp;
- db_lock_take_over_ref(tb, LCK_WRITE_REC);
+ db_lock(tb, LCK_WRITE_REC);
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
@@ -3259,8 +3273,8 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
pp = &(*pp)->next) {
if ((*pp)->pid == pid) {
DbFixation* fix = *pp;
- long diff = -(long)fix->counter;
- erts_refc_add(&tb->common.fixref,diff,0);
+ erts_aint_t diff = -((erts_aint_t) fix->counter);
+ erts_refc_add(&tb->common.ref,diff,0);
*pp = fix->next;
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, fix, sizeof(DbFixation));
@@ -3335,7 +3349,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
- erts_refc_inc(&tb->common.fixref,1);
+ erts_refc_inc(&tb->common.ref,1);
fix = tb->common.fixations;
if (fix == NULL) {
get_now(&(tb->common.megasec),
@@ -3389,7 +3403,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
if ((*pp)->pid == p->id) {
DbFixation* fix = *pp;
- erts_refc_dec(&tb->common.fixref,0);
+ erts_refc_dec(&tb->common.ref,0);
--(fix->counter);
ASSERT(fix->counter >= 0);
if (fix->counter > 0) {
@@ -3415,11 +3429,10 @@ static void unfix_table_locked(Process* p, DbTable* tb,
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
- && erts_smp_atomic_read(&tb->hash.fixdel) != (long)NULL) {
+ && erts_smp_atomic_read(&tb->hash.fixdel) != (erts_aint_t)NULL) {
#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
- db_ref(tb, LCK_WRITE); /* LCK_WRITE need it, but not LCK_READ */
erts_smp_rwmtx_runlock(&tb->common.rwlock);
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
@@ -3438,6 +3451,8 @@ static void free_fixations_locked(DbTable *tb)
fix = tb->common.fixations;
while (fix != NULL) {
+ erts_aint_t diff = -((erts_aint_t) fix->counter);
+ erts_refc_add(&tb->common.ref,diff,0);
next_fix = fix->next;
db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
db_erase_bag_exact2(meta_pid_to_fixed_tab,
@@ -3561,10 +3576,6 @@ static int free_table_cont(Process *p,
mmtl = get_meta_main_tab_lock(tb->common.slot);
#ifdef ERTS_SMP
if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- /*
- * We keep our increased refc over this op in order to
- * prevent the table from disapearing.
- */
erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
erts_smp_rwmtx_rwlock(mmtl);
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
@@ -3579,7 +3590,7 @@ static int free_table_cont(Process *p,
make_small(tb->common.slot));
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
}
- db_unref(tb, LCK_NONE);
+ schedule_free_dbtable(tb);
BUMP_REDS(p, 100);
return 0;
}
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index cb2da603f0..e0bdebcb01 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -83,7 +83,8 @@ Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
#define ERTS_DB_ALC_MEM_UPDATE_(TAB, FREE_SZ, ALLOC_SZ) \
do { \
- long sz__ = ((long) (ALLOC_SZ)) - ((long) (FREE_SZ)); \
+ erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \
+ - ((erts_aint_t) (FREE_SZ))); \
ASSERT((TAB)); \
erts_smp_atomic_add(&(TAB)->common.memory_size, sz__); \
} while (0)
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 14ee63100a..1e50fee554 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -135,8 +135,8 @@ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
*/
static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
{
- long was_next;
- long exp_next;
+ erts_aint_t was_next;
+ erts_aint_t exp_next;
FixedDeletion* fixd = (FixedDeletion*) erts_db_alloc(ERTS_ALC_T_DB_FIX_DEL,
(DbTable *) tb,
sizeof(FixedDeletion));
@@ -146,7 +146,9 @@ static ERTS_INLINE void add_fixed_deletion(DbTableHash* tb, int ix)
do { /* Lockless atomic insertion in linked list: */
exp_next = was_next;
fixd->next = (FixedDeletion*) exp_next;
- was_next = erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixd, exp_next);
+ was_next = erts_smp_atomic_cmpxchg(&tb->fixdel,
+ (erts_aint_t) fixd,
+ exp_next);
}while (was_next != exp_next);
}
@@ -541,12 +543,12 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
{
/*int tries = 0;*/
DEBUG_WAIT();
- if (erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixdel,
- (long)NULL) != (long)NULL) {
+ if (erts_smp_atomic_cmpxchg(&tb->fixdel, (erts_aint_t)fixdel,
+ (erts_aint_t)NULL) != (erts_aint_t)NULL) {
/* Oboy, must join lists */
FixedDeletion* last = fixdel;
- long was_tail;
- long exp_tail;
+ erts_aint_t was_tail;
+ erts_aint_t exp_tail;
while (last->next != NULL) last = last->next;
was_tail = erts_smp_atomic_read(&tb->fixdel);
@@ -555,7 +557,7 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
last->next = (FixedDeletion*) exp_tail;
/*++tries;*/
DEBUG_WAIT();
- was_tail = erts_smp_atomic_cmpxchg(&tb->fixdel, (long)fixdel,
+ was_tail = erts_smp_atomic_cmpxchg(&tb->fixdel, (erts_aint_t)fixdel,
exp_tail);
}while (was_tail != exp_tail);
}
@@ -573,7 +575,7 @@ void db_unfix_table_hash(DbTableHash *tb)
|| (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
&& !tb->common.is_thread_safe));
restart:
- fixdel = (FixedDeletion*) erts_smp_atomic_xchg(&tb->fixdel, (long)NULL);
+ fixdel = (FixedDeletion*) erts_smp_atomic_xchg(&tb->fixdel, (erts_aint_t)NULL);
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
int ix = fx->slot;
@@ -642,8 +644,8 @@ int db_create_hash(Process *p, DbTable *tbl)
erts_smp_atomic_init(&tb->szm, SEGSZ_MASK);
erts_smp_atomic_init(&tb->nactive, SEGSZ);
- erts_smp_atomic_init(&tb->fixdel, (long)NULL);
- erts_smp_atomic_init(&tb->segtab, (long) alloc_ext_seg(tb,0,NULL)->segtab);
+ erts_smp_atomic_init(&tb->fixdel, (erts_aint_t)NULL);
+ erts_smp_atomic_init(&tb->segtab, (erts_aint_t) alloc_ext_seg(tb,0,NULL)->segtab);
tb->nsegs = NSEG_1;
tb->nslots = SEGSZ;
@@ -1715,9 +1717,9 @@ static int db_select_delete_hash(Process *p,
Eterm mpb;
Eterm egot;
#ifdef ERTS_SMP
- int fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */
+ erts_aint_t fixated_by_me = tb->common.is_thread_safe ? 0 : 1; /* ToDo: something nicer */
#else
- int fixated_by_me = 0;
+ erts_aint_t fixated_by_me = 0;
#endif
erts_smp_rwmtx_t* lck;
@@ -2124,11 +2126,11 @@ static int db_free_table_continue_hash(DbTable *tbl)
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
if (++done >= 2*DELETE_RECORD_LIMIT) {
- erts_smp_atomic_set(&tb->fixdel, (long)fixdel);
+ erts_smp_atomic_set(&tb->fixdel, (erts_aint_t)fixdel);
return 0; /* Not done */
}
}
- erts_smp_atomic_set(&tb->fixdel, (long)NULL);
+ erts_smp_atomic_set(&tb->fixdel, (erts_aint_t)NULL);
done /= 2;
while(tb->nslots != 0) {
@@ -2345,7 +2347,7 @@ static int alloc_seg(DbTableHash *tb)
struct ext_segment* eseg;
eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1];
MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment);
- erts_smp_atomic_set(&tb->segtab, (long) eseg->segtab);
+ erts_smp_atomic_set(&tb->segtab, (erts_aint_t) eseg->segtab);
tb->nsegs = eseg->nsegs;
}
ASSERT(seg_ix < tb->nsegs);
@@ -2417,7 +2419,7 @@ static int free_seg(DbTableHash *tb, int free_records)
MY_ASSERT(newtop->s.is_ext_segment);
if (newtop->prev_segtab != NULL) {
/* Time to use a smaller segtab */
- erts_smp_atomic_set(&tb->segtab, (long)newtop->prev_segtab);
+ erts_smp_atomic_set(&tb->segtab, (erts_aint_t)newtop->prev_segtab);
tb->nsegs = seg_ix;
ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs);
}
@@ -2434,7 +2436,7 @@ static int free_seg(DbTableHash *tb, int free_records)
if (seg_ix > 0) {
if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL;
} else {
- erts_smp_atomic_set(&tb->segtab, (long)NULL);
+ erts_smp_atomic_set(&tb->segtab, (erts_aint_t)NULL);
}
#endif
tb->nslots -= SEGSZ;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index e773361619..2852fb93fe 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -915,7 +915,7 @@ BIF_RETTYPE db_set_trace_control_word_1(Process *p, Eterm new)
if (val != ((Uint32)val))
BIF_ERROR(p, BADARG);
- old_tcw = (Uint32) erts_smp_atomic_xchg(&trace_control_word, (long) val);
+ old_tcw = (Uint32) erts_smp_atomic_xchg(&trace_control_word, (erts_aint_t) val);
BIF_RET(erts_make_integer((Uint) old_tcw, p));
}
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 10ba755e80..58ad39d772 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -206,8 +206,7 @@ typedef struct db_fixation {
*/
typedef struct db_table_common {
- erts_refc_t ref;
- erts_refc_t fixref; /* fixation counter */
+ erts_refc_t ref; /* fixation counter and delete counter */
#ifdef ERTS_SMP
erts_smp_rwmtx_t rwlock; /* rw lock on table */
erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */
@@ -253,7 +252,7 @@ typedef struct db_table_common {
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
#define IS_TREE_TABLE(Status) (!!((Status) & \
DB_ORDERED_SET))
-#define NFIXED(T) (erts_refc_read(&(T)->common.fixref,0))
+#define NFIXED(T) (erts_refc_read(&(T)->common.ref,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
Eterm erts_ets_copy_object(Eterm, Process*);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 9733c0e5b5..13a73e01bb 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -150,6 +150,27 @@ typedef struct {
#define ERL_DRV_FLAG_SOFT_BUSY (1 << 1)
/*
+ * Integer types
+ */
+
+typedef unsigned long ErlDrvTermData;
+typedef unsigned long ErlDrvUInt;
+typedef signed long ErlDrvSInt;
+
+#if defined(__WIN32__)
+typedef unsigned __int64 ErlDrvUInt64;
+typedef __int64 ErlDrvSInt64;
+#elif SIZEOF_LONG == 8
+typedef unsigned long ErlDrvUInt64;
+typedef long ErlDrvSInt64;
+#elif SIZEOF_LONG_LONG == 8
+typedef unsigned long long ErlDrvUInt64;
+typedef long long ErlDrvSInt64;
+#else
+#error No 64-bit integer type
+#endif
+
+/*
* A binary as seen in a driver. Note that a binary should never be
* altered by the driver when it has been sent to Erlang.
*/
@@ -179,26 +200,6 @@ struct erl_drv_event_data {
#endif
typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */
-/*
- * Used in monitors...
- */
-typedef unsigned long ErlDrvTermData;
-typedef unsigned long ErlDrvUInt;
-typedef signed long ErlDrvSInt;
-
-#if defined(__WIN32__)
-typedef unsigned __int64 ErlDrvUInt64;
-typedef __int64 ErlDrvSInt64;
-#elif SIZEOF_LONG == 8
-typedef unsigned long ErlDrvUInt64;
-typedef long ErlDrvSInt64;
-#elif SIZEOF_LONG_LONG == 8
-typedef unsigned long long ErlDrvUInt64;
-typedef long long ErlDrvSInt64;
-#else
-#error No 64-bit integer type
-#endif
-
/*
* A driver monitor
*/
@@ -394,9 +395,9 @@ EXTERN int driver_exit (ErlDrvPort port, int err);
EXTERN ErlDrvPDL driver_pdl_create(ErlDrvPort);
EXTERN void driver_pdl_lock(ErlDrvPDL);
EXTERN void driver_pdl_unlock(ErlDrvPDL);
-EXTERN long driver_pdl_get_refc(ErlDrvPDL);
-EXTERN long driver_pdl_inc_refc(ErlDrvPDL);
-EXTERN long driver_pdl_dec_refc(ErlDrvPDL);
+EXTERN ErlDrvSInt driver_pdl_get_refc(ErlDrvPDL);
+EXTERN ErlDrvSInt driver_pdl_inc_refc(ErlDrvPDL);
+EXTERN ErlDrvSInt driver_pdl_dec_refc(ErlDrvPDL);
/*
* Process monitors
@@ -432,9 +433,9 @@ EXTERN ErlDrvBinary* driver_realloc_binary(ErlDrvBinary *bin, int size);
EXTERN void driver_free_binary(ErlDrvBinary *bin);
/* Referenc count on driver binaries */
-EXTERN long driver_binary_get_refc(ErlDrvBinary *dbp);
-EXTERN long driver_binary_inc_refc(ErlDrvBinary *dbp);
-EXTERN long driver_binary_dec_refc(ErlDrvBinary *dbp);
+EXTERN ErlDrvSInt driver_binary_get_refc(ErlDrvBinary *dbp);
+EXTERN ErlDrvSInt driver_binary_inc_refc(ErlDrvBinary *dbp);
+EXTERN ErlDrvSInt driver_binary_dec_refc(ErlDrvBinary *dbp);
/* Allocation interface */
EXTERN void *driver_alloc(size_t size);
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 84869f12d6..88947b5536 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -97,7 +97,7 @@ erts_put_fun_entry(Eterm mod, int uniq, int index)
{
ErlFunEntry template;
ErlFunEntry* fe;
- long refc;
+ erts_aint_t refc;
ASSERT(is_atom(mod));
template.old_uniq = uniq;
template.old_index = index;
@@ -119,7 +119,7 @@ erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
{
ErlFunEntry template;
ErlFunEntry* fe;
- long refc;
+ erts_aint_t refc;
ASSERT(is_atom(mod));
template.old_uniq = old_uniq;
@@ -157,7 +157,7 @@ erts_get_fun_entry(Eterm mod, int uniq, int index)
erts_fun_read_lock();
ret = (ErlFunEntry *) hash_get(&erts_fun_table, (void*) &template);
if (ret) {
- long refc = erts_refc_inctest(&ret->refc, 1);
+ erts_aint_t refc = erts_refc_inctest(&ret->refc, 1);
if (refc < 2) /* Pending delete */
erts_refc_inc(&ret->refc, 1);
}
@@ -257,7 +257,7 @@ erts_dump_fun_entries(int to, void *to_arg)
#ifdef HIPE
erts_print(to, to_arg, "Native_address: %p\n", fe->native_address);
#endif
- erts_print(to, to_arg, "Refc: %d\n", erts_refc_read(&fe->refc, 1));
+ erts_print(to, to_arg, "Refc: %ld\n", erts_refc_read(&fe->refc, 1));
b = b->next;
}
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 0f4d2a2ef9..2aa932e7d1 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -2471,7 +2471,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
old = 0;
for (u.hdr = MSO(p).first; u.hdr; u.hdr = u.hdr->next) {
- long refc;
+ erts_aint_t refc;
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
refc = erts_refc_read(&u.pb->val->refc, 1);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 464ee750f7..0a57eb6d88 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -100,7 +100,7 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace
int erts_async_max_threads; /* number of threads for async support */
int erts_async_thread_suggested_stack_size;
-erts_smp_atomic_t erts_max_gen_gcs;
+erts_smp_atomic32_t erts_max_gen_gcs;
Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error,
am_info or am_warning, am_error is
@@ -250,7 +250,7 @@ erl_init(int ncpu)
erts_init_monitors();
erts_init_gc();
- init_time();
+ erts_init_time();
erts_init_sys_common_misc();
erts_init_process(ncpu);
erts_init_scheduling(use_multi_run_queue,
@@ -289,7 +289,7 @@ erl_init(int ncpu)
erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2);
erts_late_init_process();
#if HAVE_ERTS_MSEG
- erts_mseg_late_init(); /* Must be after timer (init_time()) and thread
+ erts_mseg_late_init(); /* Must be after timer (erts_init_time()) and thread
initializations */
#endif
#ifdef HIPE
@@ -323,7 +323,7 @@ init_shared_memory(int argc, char **argv)
#endif
global_gen_gcs = 0;
- global_max_gen_gcs = erts_smp_atomic_read(&erts_max_gen_gcs);
+ global_max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
global_gc_flags = erts_default_process_flags;
erts_global_offheap.mso = NULL;
@@ -651,7 +651,7 @@ early_init(int *argc, char **argv) /*
erts_writing_erl_crash_dump = 0;
#endif
- erts_smp_atomic_init(&erts_max_gen_gcs, (long)((Uint16) -1));
+ erts_smp_atomic32_init(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1));
erts_pre_init_process();
#if defined(USE_THREADS) && !defined(ERTS_SMP)
@@ -856,7 +856,7 @@ erl_start(int argc, char **argv)
envbufsz = sizeof(envbuf);
if (erts_sys_getenv("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) {
Uint16 max_gen_gcs = atoi(envbuf);
- erts_smp_atomic_set(&erts_max_gen_gcs, (long) max_gen_gcs);
+ erts_smp_atomic32_set(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs);
}
envbufsz = sizeof(envbuf);
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 04c7dbd2ec..0185baee6b 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -177,6 +177,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "async_id", NULL },
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
+ { "misc_aux_work_queue", "index" },
+ { "misc_aux_work_pre_alloc_lock", "address" },
{ "sched_stat", NULL },
{ "run_queue_sleep_list", "address" },
#endif
@@ -978,10 +980,10 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
/* We only force busy if a lock order violation would occur
and when on an even millisecond. */
{
- erts_thr_timeval_t time;
- erts_thr_time_now(&time);
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
- if ((time.tv_nsec / 1000000) & 1)
+ if ((tv.tv_usec / 1000) & 1)
return 0;
}
#endif
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 239773f366..a36c53560e 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -159,7 +159,7 @@ static char* lock_opt(Uint16 flag) {
}
static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) {
- long int colls, tries, w_state, r_state;
+ erts_aint_t colls, tries, w_state, r_state;
erts_lcnt_lock_stats_t *stats = NULL;
char *type;
@@ -385,7 +385,7 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
/* lock */
void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- long r_state = 0, w_state = 0;
+ erts_aint_t r_state = 0, w_state = 0;
erts_lcnt_thread_data_t *eltd;
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
@@ -418,7 +418,7 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
}
void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
- long w_state;
+ erts_aint_t w_state;
erts_lcnt_thread_data_t *eltd;
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
@@ -471,7 +471,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
erts_lcnt_time_t time_wait;
erts_lcnt_lock_stats_t *stats;
#ifdef DEBUG
- long flowstate;
+ erts_aint_t flowstate;
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
@@ -516,8 +516,8 @@ void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
#ifdef DEBUG
- long w_state;
- long flowstate;
+ erts_aint_t w_state;
+ erts_aint_t flowstate;
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
#ifdef DEBUG
@@ -552,7 +552,7 @@ void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
/* Determine lock_state via res instead of state */
#ifdef DEBUG
- long flowstate;
+ erts_aint_t flowstate;
#endif
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
if (res != EBUSY) {
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 8cdda395df..6daa127d23 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -235,7 +235,7 @@ erts_sysname_to_connected_dist_entry(Eterm sysname)
erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de);
if (res_dep) {
- long refc = erts_refc_inctest(&res_dep->refc, 1);
+ erts_aint_t refc = erts_refc_inctest(&res_dep->refc, 1);
if (refc < 2) /* Pending delete */
erts_refc_inc(&res_dep->refc, 1);
}
@@ -257,7 +257,7 @@ DistEntry *erts_find_or_insert_dist_entry(Eterm sysname)
{
DistEntry *res;
DistEntry de;
- long refc;
+ erts_aint_t refc;
res = erts_find_dist_entry(sysname);
if (res)
return res;
@@ -279,7 +279,7 @@ DistEntry *erts_find_dist_entry(Eterm sysname)
erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
res = hash_get(&erts_dist_table, (void *) &de);
if (res) {
- long refc = erts_refc_inctest(&res->refc, 1);
+ erts_aint_t refc = erts_refc_inctest(&res->refc, 1);
if (refc < 2) /* Pending delete */
erts_refc_inc(&res->refc, 1);
}
@@ -586,7 +586,7 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
res = hash_get(&erts_node_table, (void *) &ne);
if (res && res != erts_this_node) {
- long refc = erts_refc_inctest(&res->refc, 0);
+ erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
erts_refc_inc(&res->refc, 1);
}
@@ -598,7 +598,7 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
res = hash_put(&erts_node_table, (void *) &ne);
ASSERT(res);
if (res != erts_this_node) {
- long refc = erts_refc_inctest(&res->refc, 0);
+ erts_aint_t refc = erts_refc_inctest(&res->refc, 0);
if (refc < 2) /* New or pending delete */
erts_refc_inc(&res->refc, 1);
}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index c10724b951..1b07024ca1 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -129,7 +129,7 @@ reset_handle(ErtsPortTask *ptp)
{
if (ptp->handle) {
ASSERT(ptp == handle2task(ptp->handle));
- erts_smp_atomic_set(ptp->handle, (long) NULL);
+ erts_smp_atomic_set(ptp->handle, (erts_aint_t) NULL);
}
}
@@ -138,7 +138,7 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
ptp->handle = pthp;
if (pthp) {
- erts_smp_atomic_set(pthp, (long) ptp);
+ erts_smp_atomic_set(pthp, (erts_aint_t) ptp);
ASSERT(ptp == handle2task(ptp->handle));
}
}
@@ -568,7 +568,7 @@ erts_port_task_schedule(Eterm id,
ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
if (xrunq) {
/* Port emigrated ... */
- erts_smp_atomic_set(&pp->run_queue, (long) xrunq);
+ erts_smp_atomic_set(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
runq = xrunq;
}
@@ -727,7 +727,8 @@ resume_after_block(void *vd)
ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
erts_smp_runq_lock(d->runq);
if (d->resp)
- *d->resp = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+ *d->resp = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
+ != (erts_aint_t) 0);
}
/*
@@ -748,7 +749,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
ErtsPortTask *ptp;
int res = 0;
int reds = ERTS_PORT_REDS_EXECUTE;
- long io_tasks_executed = 0;
+ erts_aint_t io_tasks_executed = 0;
int fpe_was_unmasked;
ErtsPortTaskExeBlockData blk_data = {runq, NULL};
@@ -942,7 +943,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
}
else {
/* Port emigrated ... */
- erts_smp_atomic_set(&pp->run_queue, (long) xrunq);
+ erts_smp_atomic_set(&pp->run_queue, (erts_aint_t) xrunq);
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
@@ -953,7 +954,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
port_was_enqueued = 1;
}
- res = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+ res = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
+ != (erts_aint_t) 0);
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
@@ -971,7 +973,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_port_release(pp);
#else
{
- long refc;
+ erts_aint_t refc;
erts_smp_mtx_unlock(pp->lock);
refc = erts_smp_atomic_dectest(&pp->refc);
ASSERT(refc >= 0);
@@ -979,7 +981,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_unlock(runq);
erts_port_cleanup(pp); /* Might aquire runq lock */
erts_smp_runq_lock(runq);
- res = erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks) != (long) 0;
+ res = (erts_smp_atomic_read(&erts_port_task_outstanding_io_tasks)
+ != (erts_aint_t) 0);
}
}
#endif
@@ -1112,7 +1115,7 @@ erts_port_migrate(Port *prt, int *prt_locked,
if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt))
return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
dequeue_port(from_rq, prt);
- erts_smp_atomic_set(&prt->run_queue, (long) to_rq);
+ erts_smp_atomic_set(&prt->run_queue, (erts_aint_t) to_rq);
enqueue_port(to_rq, prt);
return ERTS_MIGRATE_SUCCESS;
}
@@ -1125,7 +1128,7 @@ erts_port_migrate(Port *prt, int *prt_locked,
void
erts_port_task_init(void)
{
- erts_smp_atomic_init(&erts_port_task_outstanding_io_tasks, (long) 0);
+ erts_smp_atomic_init(&erts_port_task_outstanding_io_tasks, (erts_aint_t) 0);
init_port_task_alloc();
init_port_taskq_alloc();
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index f12d02da0c..714b4ea7dd 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -79,7 +79,7 @@ ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
ERTS_GLB_INLINE void
erts_port_task_handle_init(ErtsPortTaskHandle *pthp)
{
- erts_smp_atomic_init(pthp, (long) NULL);
+ erts_smp_atomic_init(pthp, (erts_aint_t) NULL);
}
ERTS_GLB_INLINE int
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index fc950af8ce..ddfc27a93f 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -127,21 +127,22 @@ ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
int erts_disable_proc_not_running_opt;
-#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((long) 1) << 0)
-#define ERTS_SCHDLR_SSPND_CHNG_MSB (((long) 1) << 1)
-#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((long) 1) << 2)
+#define ERTS_SCHDLR_SSPND_CHNG_WAITER (((erts_aint32_t) 1) << 0)
+#define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
+#define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2)
#ifndef DEBUG
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
- erts_smp_atomic_set(&schdlr_sspnd.changing, (VAL))
+ erts_smp_atomic32_set(&schdlr_sspnd.changing, (VAL))
#else
#define ERTS_SCHDLR_SSPND_CHNG_SET(VAL, OLD_VAL) \
do { \
- long old_val__ = erts_smp_atomic_xchg(&schdlr_sspnd.changing, \
- (VAL)); \
+ erts_aint32_t old_val__; \
+ old_val__ = erts_smp_atomic32_xchg(&schdlr_sspnd.changing, \
+ (VAL)); \
ASSERT(old_val__ == (OLD_VAL)); \
} while (0)
@@ -154,10 +155,10 @@ static struct {
int online;
int curr_online;
int wait_curr_online;
- erts_smp_atomic_t changing;
- erts_smp_atomic_t active;
+ erts_smp_atomic32_t changing;
+ erts_smp_atomic32_t active;
struct {
- erts_smp_atomic_t ongoing;
+ erts_smp_atomic32_t ongoing;
long wait_active;
ErtsProcList *procs;
} msb; /* Multi Scheduling Block */
@@ -165,11 +166,11 @@ static struct {
static struct {
erts_smp_mtx_t update_mtx;
- erts_smp_atomic_t active_runqs;
+ erts_smp_atomic32_t active_runqs;
int last_active_runqs;
- erts_smp_atomic_t used_runqs;
+ erts_smp_atomic32_t used_runqs;
int forced_check_balance;
- erts_smp_atomic_t checking_balance;
+ erts_smp_atomic32_t checking_balance;
int halftime;
int full_reds_history_index;
struct {
@@ -199,11 +200,11 @@ static erts_tsd_key_t sched_data_key;
static erts_smp_mtx_t proc_tab_mtx;
-static erts_smp_atomic_t function_calls;
+static erts_smp_atomic32_t function_calls;
#ifdef ERTS_SMP
-static erts_smp_atomic_t doing_sys_schedule;
-static erts_smp_atomic_t no_empty_run_queues;
+static erts_smp_atomic32_t doing_sys_schedule;
+static erts_smp_atomic32_t no_empty_run_queues;
#else /* !ERTS_SMP */
ErtsSchedulerData *erts_scheduler_data;
#endif
@@ -247,7 +248,10 @@ Uint erts_num_active_procs;
Process** erts_active_procs;
#endif
-static erts_smp_atomic_t process_count;
+#if ERTS_MAX_PROCESSES > 0x7fffffff
+#error "Need to store process_count in another type"
+#endif
+static erts_smp_atomic32_t process_count;
typedef struct ErtsTermProcElement_ ErtsTermProcElement;
struct ErtsTermProcElement_ {
@@ -407,7 +411,7 @@ erts_init_process(int ncpu)
init_proclist_alloc();
- erts_smp_atomic_init(&process_count, 0);
+ erts_smp_atomic32_init(&process_count, 0);
if (erts_use_r9_pids_ports) {
proc_bits = ERTS_R9_PROC_BITS;
@@ -568,7 +572,7 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
#ifdef ERTS_SMP
void
-erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags)
+erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
{
switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
case ERTS_SSI_FLG_POLL_SLEEPING:
@@ -586,6 +590,122 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags)
}
}
+typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
+struct erts_misc_aux_work_t_ {
+ erts_misc_aux_work_t *next;
+ void (*func)(void *);
+ void *arg;
+};
+
+typedef struct {
+ erts_smp_mtx_t mtx;
+ erts_misc_aux_work_t *first;
+ erts_misc_aux_work_t *last;
+} erts_misc_aux_work_q_t;
+
+typedef union {
+ erts_misc_aux_work_q_t data;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_misc_aux_work_q_t))];
+} erts_algnd_misc_aux_work_q_t;
+
+static erts_algnd_misc_aux_work_q_t *misc_aux_work_queues;
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_aux_work,
+ erts_misc_aux_work_t,
+ 200,
+ ERTS_ALC_T_MISC_AUX_WORK)
+
+static void
+init_misc_aux_work(void)
+{
+ int ix;
+
+ init_misc_aux_work_alloc();
+
+ misc_aux_work_queues = erts_alloc(ERTS_ALC_T_MISC_AUX_WORK_Q,
+ (sizeof(erts_algnd_misc_aux_work_q_t)
+ *(erts_no_schedulers+1)));
+ if ((((UWord) misc_aux_work_queues) & ERTS_CACHE_LINE_MASK) != 0)
+ misc_aux_work_queues = ((erts_algnd_misc_aux_work_q_t *)
+ ((((UWord) misc_aux_work_queues)
+ & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE));
+
+ for (ix = 0; ix < erts_no_schedulers; ix++) {
+ erts_smp_mtx_init_x(&misc_aux_work_queues[ix].data.mtx,
+ "misc_aux_work_queue",
+ make_small(ix + 1));
+ misc_aux_work_queues[ix].data.first = NULL;
+ misc_aux_work_queues[ix].data.last = NULL;
+ }
+}
+
+static void
+handle_misc_aux_work(ErtsSchedulerData *esdp)
+{
+ int ix = (int) esdp->no - 1;
+ erts_misc_aux_work_t *mawp;
+
+ erts_smp_mtx_lock(&misc_aux_work_queues[ix].data.mtx);
+ mawp = misc_aux_work_queues[ix].data.first;
+ misc_aux_work_queues[ix].data.first = NULL;
+ misc_aux_work_queues[ix].data.last = NULL;
+ erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx);
+
+ while (mawp) {
+ erts_misc_aux_work_t *free_mawp;
+ mawp->func(mawp->arg);
+ free_mawp = mawp;
+ mawp = mawp->next;
+ misc_aux_work_free(free_mawp);
+ }
+}
+
+void
+erts_smp_schedule_misc_aux_work(int ignore_self,
+ int max_sched,
+ void (*func)(void *),
+ void *arg)
+{
+ int ix, ignore_ix = -1;
+
+ if (ignore_self) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp)
+ ignore_ix = (int) esdp->no - 1;
+ }
+
+ ASSERT(0 <= max_sched && max_sched <= erts_no_schedulers);
+
+ for (ix = 0; ix < max_sched; ix++) {
+ erts_aint32_t aux_work;
+ erts_misc_aux_work_t *mawp;
+ ErtsSchedulerSleepInfo *ssi;
+ if (ix == ignore_ix)
+ continue;
+
+ mawp = misc_aux_work_alloc();
+
+ mawp->func = func;
+ mawp->arg = arg;
+ mawp->next = NULL;
+
+ erts_smp_mtx_lock(&misc_aux_work_queues[ix].data.mtx);
+ if (!misc_aux_work_queues[ix].data.last)
+ misc_aux_work_queues[ix].data.first = mawp;
+ else
+ misc_aux_work_queues[ix].data.last->next = mawp;
+ misc_aux_work_queues[ix].data.last = mawp;
+ erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx);
+
+ ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
+ aux_work = erts_smp_atomic32_bor(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_MISC);
+ if ((aux_work & ERTS_SSI_AUX_WORK_MISC) == 0)
+ erts_sched_poke(ssi);
+ }
+}
+
#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
void
erts_smp_notify_check_children_needed(void)
@@ -593,11 +713,11 @@ erts_smp_notify_check_children_needed(void)
int i;
for (i = 0; i < erts_no_schedulers; i++) {
- long aux_work;
+ erts_aint32_t aux_work;
ErtsSchedulerSleepInfo *ssi;
ssi = ERTS_SCHED_SLEEP_INFO_IX(i);
- aux_work = erts_smp_atomic_bor(&ssi->aux_work,
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work = erts_smp_atomic32_bor(&ssi->aux_work,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
if (!(aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN))
erts_sched_poke(ssi);
}
@@ -605,16 +725,22 @@ erts_smp_notify_check_children_needed(void)
#endif
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
-static ERTS_INLINE long
+static ERTS_INLINE erts_aint32_t
blockable_aux_work(ErtsSchedulerData *esdp,
ErtsSchedulerSleepInfo *ssi,
- long aux_work)
+ erts_aint32_t aux_work)
{
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
+ if (aux_work & ERTS_SSI_AUX_WORK_MISC) {
+ aux_work = erts_smp_atomic32_band(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_MISC);
+ aux_work &= ~ERTS_SSI_AUX_WORK_MISC;
+ handle_misc_aux_work(esdp);
+ }
#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) {
- aux_work = erts_smp_atomic_band(&ssi->aux_work,
- ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
+ aux_work = erts_smp_atomic32_band(&ssi->aux_work,
+ ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
aux_work &= ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
erts_check_children();
}
@@ -626,10 +752,10 @@ blockable_aux_work(ErtsSchedulerData *esdp,
#endif
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
-static ERTS_INLINE long
+static ERTS_INLINE erts_aint32_t
nonblockable_aux_work(ErtsSchedulerData *esdp,
ErtsSchedulerSleepInfo *ssi,
- long aux_work)
+ erts_aint32_t aux_work)
{
if (aux_work & ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK) {
@@ -694,10 +820,10 @@ prepare_for_sys_schedule(void)
{
#ifdef ERTS_SMP
while (!erts_port_task_have_outstanding_io_tasks()
- && !erts_smp_atomic_xchg(&doing_sys_schedule, 1)) {
+ && !erts_smp_atomic32_xchg(&doing_sys_schedule, 1)) {
if (!erts_port_task_have_outstanding_io_tasks())
return 1;
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
}
return 0;
#else
@@ -745,53 +871,55 @@ sched_active(Uint no, ErtsRunQueue *rq)
static int ERTS_INLINE
ongoing_multi_scheduling_block(void)
{
- return erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing) != 0;
+ return erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing) != 0;
}
static ERTS_INLINE void
empty_runq(ErtsRunQueue *rq)
{
- long oifls = erts_smp_atomic_band(&rq->info_flags, ~ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_aint32_t oifls = erts_smp_atomic32_band(&rq->info_flags,
+ ~ERTS_RUNQ_IFLG_NONEMPTY);
if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
#ifdef DEBUG
- long empty = erts_smp_atomic_read(&no_empty_run_queues);
+ erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
*/
ASSERT(0 <= empty && empty < 2*erts_no_run_queues);
#endif
- erts_smp_atomic_inc(&no_empty_run_queues);
+ erts_smp_atomic32_inc(&no_empty_run_queues);
}
}
static ERTS_INLINE void
non_empty_runq(ErtsRunQueue *rq)
{
- long oifls = erts_smp_atomic_bor(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_aint32_t oifls = erts_smp_atomic32_bor(&rq->info_flags,
+ ERTS_RUNQ_IFLG_NONEMPTY);
if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
#ifdef DEBUG
- long empty = erts_smp_atomic_read(&no_empty_run_queues);
+ erts_aint32_t empty = erts_smp_atomic32_read(&no_empty_run_queues);
/*
* For a short period of time no_empty_run_queues may have
* been increased twice for a specific run queue.
*/
ASSERT(0 < empty && empty <= 2*erts_no_run_queues);
#endif
- erts_smp_atomic_dec(&no_empty_run_queues);
+ erts_smp_atomic32_dec(&no_empty_run_queues);
}
}
-static long
+static erts_aint32_t
sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
- long oflgs;
- long nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
- long xflgs = 0;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ erts_aint32_t xflgs = 0;
do {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -799,16 +927,16 @@ sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi)
return oflgs;
}
-static long
+static erts_aint32_t
sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
{
- long oflgs;
- long nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING);
- long xflgs = ERTS_SSI_FLG_WAITING;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING);
+ erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING;
do {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -817,15 +945,15 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi)
return oflgs;
}
-static long
+static erts_aint32_t
sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
{
- long until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
+ int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
int sc = spincount;
- long flgs;
+ erts_aint32_t flgs;
do {
- flgs = erts_smp_atomic_read(&ssi->flags);
+ flgs = erts_smp_atomic32_read(&ssi->flags);
if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
!= (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) {
break;
@@ -839,18 +967,18 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount)
return flgs;
}
-static long
-sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
+static erts_aint32_t
+sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
{
- long oflgs;
- long nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
- long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING|sleep_type;
+ erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
if (sleep_type == ERTS_SSI_FLG_TSE_SLEEPING)
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING))
@@ -867,14 +995,14 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, long sleep_type)
!= ERTS_SSI_FLG_WAITING)
static void
-scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
+scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
{
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
int spincount;
- long flgs;
+ erts_aint32_t flgs;
#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
|| defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
- long aux_work;
+ erts_aint32_t aux_work;
#endif
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -910,7 +1038,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
tse_wait:
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
tse_blockable_aux_work:
aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
@@ -920,7 +1048,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
#endif
nonblockable_aux_work(esdp, ssi, aux_work);
#endif
@@ -953,7 +1081,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
goto tse_blockable_aux_work;
@@ -965,16 +1093,16 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
erts_smp_runq_lock(rq);
sched_active(esdp->no, rq);
}
else {
- long dt;
+ erts_aint_t dt;
- erts_smp_atomic_set(&function_calls, 0);
+ erts_smp_atomic32_set(&function_calls, 0);
*fcalls = 0;
sched_waiting_sys(esdp->no, rq);
@@ -991,23 +1119,23 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erl_sys_schedule(1); /* Might give us something to do */
- dt = do_time_read_and_reset();
- if (dt) bump_timer(dt);
+ dt = erts_do_time_read_and_reset();
+ if (dt) erts_bump_timer(dt);
sys_aux_work:
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
aux_work = blockable_aux_work(esdp, ssi, aux_work);
#endif
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
#endif
nonblockable_aux_work(esdp, ssi, aux_work);
#endif
- flgs = erts_smp_atomic_read(&ssi->flags);
+ flgs = erts_smp_atomic32_read(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
goto sys_woken;
@@ -1025,7 +1153,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* call erl_sys_schedule() until it is handled.
*/
if (erts_port_task_have_outstanding_io_tasks()) {
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
/*
* Got to check that we still got I/O tasks; otherwise
* we have to continue checking for I/O...
@@ -1044,7 +1172,7 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* sleep in erl_sys_schedule().
*/
if (erts_port_task_have_outstanding_io_tasks()) {
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
/*
* Got to check that we still got I/O tasks; otherwise
@@ -1088,8 +1216,8 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erl_sys_schedule(0);
- dt = do_time_read_and_reset();
- if (dt) bump_timer(dt);
+ dt = erts_do_time_read_and_reset();
+ if (dt) erts_bump_timer(dt);
flgs = sched_prep_cont_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_WAITING)
@@ -1098,9 +1226,9 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sys_woken:
erts_smp_runq_lock(rq);
sys_locked_woken:
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
if (flgs & ~ERTS_SSI_FLG_SUSPENDED)
- erts_smp_atomic_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_band(&ssi->flags, ERTS_SSI_FLG_SUSPENDED);
sched_active_sys(esdp->no, rq);
}
}
@@ -1108,15 +1236,15 @@ scheduler_wait(long *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
}
-static ERTS_INLINE long
+static ERTS_INLINE erts_aint32_t
ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
{
/* reset all flags but suspended */
- long oflgs;
- long nflgs = 0;
- long xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = 0;
+ erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING;
while (1) {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return oflgs;
nflgs = oflgs & ERTS_SSI_FLG_SUSPENDED;
@@ -1148,7 +1276,7 @@ wake_scheduler(ErtsRunQueue *rq, int incq, int one)
if (!ssi)
erts_smp_spin_unlock(&sl->lock);
else if (one) {
- long flgs;
+ erts_aint32_t flgs;
if (ssi->prev)
ssi->prev->next = ssi->next;
else {
@@ -1195,15 +1323,17 @@ wake_all_schedulers(void)
static ERTS_INLINE int
chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
{
- long iflgs;
+ erts_aint32_t iflgs;
ErtsRunQueue *wrq;
if (crq->ix == ix)
return 0;
wrq = ERTS_RUNQ_IX(ix);
- iflgs = erts_smp_atomic_read(&wrq->info_flags);
+ iflgs = erts_smp_atomic32_read(&wrq->info_flags);
if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
if (activate) {
- if (ix == erts_smp_atomic_cmpxchg(&balance_info.active_runqs, ix+1, ix)) {
+ if (ix == erts_smp_atomic32_cmpxchg(&balance_info.active_runqs,
+ ix+1,
+ ix)) {
erts_smp_xrunq_lock(crq, wrq);
wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
erts_smp_xrunq_unlock(crq, wrq);
@@ -1220,8 +1350,8 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq)
{
int ix = crq->ix;
int stop_ix = ix;
- int active_ix = erts_smp_atomic_read(&balance_info.active_runqs);
- int balance_ix = erts_smp_atomic_read(&balance_info.used_runqs);
+ int active_ix = erts_smp_atomic32_read(&balance_info.active_runqs);
+ int balance_ix = erts_smp_atomic32_read(&balance_info.used_runqs);
if (active_ix > balance_ix)
active_ix = balance_ix;
@@ -1273,7 +1403,7 @@ erts_sched_notify_check_cpu_bind(void)
int ix;
if (erts_common_run_queue) {
for (ix = 0; ix < erts_no_schedulers; ix++)
- erts_smp_atomic_set(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1);
+ erts_smp_atomic32_set(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1);
wake_all_schedulers();
}
else {
@@ -1441,14 +1571,15 @@ evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
erts_smp_runq_lock(evac_rq);
- erts_smp_atomic_bor(&evac_rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_bor(&evac_rq->scheduler->ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
| ERTS_RUNQ_FLGS_EVACUATE_QMASK
| ERTS_RUNQ_FLG_SUSPENDED);
- erts_smp_atomic_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
+ erts_smp_atomic32_bor(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
/*
* Need to set up evacuation paths first since we
* may release the run queue lock on evac_rq
@@ -1697,7 +1828,7 @@ static ERTS_INLINE int
check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
- long iflgs = erts_smp_atomic_read(&vrq->info_flags);
+ erts_aint32_t iflgs = erts_smp_atomic32_read(&vrq->info_flags);
if (iflgs & ERTS_RUNQ_IFLG_NONEMPTY)
return try_steal_task_from_victim(rq, rq_lockedp, vrq);
else
@@ -1727,8 +1858,8 @@ try_steal_task(ErtsRunQueue *rq)
ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked);
- active_rqs = erts_smp_atomic_read(&balance_info.active_runqs);
- blnc_rqs = erts_smp_atomic_read(&balance_info.used_runqs);
+ active_rqs = erts_smp_atomic32_read(&balance_info.active_runqs);
+ blnc_rqs = erts_smp_atomic32_read(&balance_info.used_runqs);
if (active_rqs > blnc_rqs)
active_rqs = blnc_rqs;
@@ -1739,7 +1870,7 @@ try_steal_task(ErtsRunQueue *rq)
if (active_rqs < blnc_rqs) {
int no = blnc_rqs - active_rqs;
int stop_ix = vix = active_rqs + rq->ix % no;
- while (erts_smp_atomic_read(&no_empty_run_queues) < blnc_rqs) {
+ while (erts_smp_atomic32_read(&no_empty_run_queues) < blnc_rqs) {
res = check_possible_steal_victim(rq, &rq_locked, vix);
if (res)
goto done;
@@ -1754,7 +1885,7 @@ try_steal_task(ErtsRunQueue *rq)
vix = rq->ix;
/* ... then try to steal a job from another active queue... */
- while (erts_smp_atomic_read(&no_empty_run_queues) < blnc_rqs) {
+ while (erts_smp_atomic32_read(&no_empty_run_queues) < blnc_rqs) {
vix++;
if (vix >= active_rqs)
vix = 0;
@@ -1850,15 +1981,15 @@ check_balance(ErtsRunQueue *c_rq)
int forced, active, current_active, oowc, half_full_scheds, full_scheds,
mmax_len, blnc_no_rqs, qix, pix, freds_hist_ix;
- if (erts_smp_atomic_xchg(&balance_info.checking_balance, 1)) {
+ if (erts_smp_atomic32_xchg(&balance_info.checking_balance, 1)) {
c_rq->check_balance_reds = INT_MAX;
return;
}
- blnc_no_rqs = (int) erts_smp_atomic_read(&balance_info.used_runqs);
+ blnc_no_rqs = (int) erts_smp_atomic32_read(&balance_info.used_runqs);
if (blnc_no_rqs == 1) {
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set(&balance_info.checking_balance, 0);
return;
}
@@ -1866,7 +1997,7 @@ check_balance(ErtsRunQueue *c_rq)
if (balance_info.halftime) {
balance_info.halftime = 0;
- erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set(&balance_info.checking_balance, 0);
ERTS_FOREACH_RUNQ(rq,
{
if (rq->waiting)
@@ -1894,12 +2025,12 @@ check_balance(ErtsRunQueue *c_rq)
forced = balance_info.forced_check_balance;
balance_info.forced_check_balance = 0;
- blnc_no_rqs = (int) erts_smp_atomic_read(&balance_info.used_runqs);
+ blnc_no_rqs = (int) erts_smp_atomic32_read(&balance_info.used_runqs);
if (blnc_no_rqs == 1) {
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_runq_lock(c_rq);
c_rq->check_balance_reds = INT_MAX;
- erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set(&balance_info.checking_balance, 0);
return;
}
@@ -1908,7 +2039,7 @@ check_balance(ErtsRunQueue *c_rq)
if (balance_info.full_reds_history_index >= ERTS_FULL_REDS_HISTORY_SIZE)
balance_info.full_reds_history_index = 0;
- current_active = erts_smp_atomic_read(&balance_info.active_runqs);
+ current_active = erts_smp_atomic32_read(&balance_info.active_runqs);
/* Read balance information for all run queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -2243,10 +2374,10 @@ erts_fprintf(stderr, "--------------------------------\n");
}
balance_info.last_active_runqs = active;
- erts_smp_atomic_set(&balance_info.active_runqs, active);
+ erts_smp_atomic32_set(&balance_info.active_runqs, active);
balance_info.halftime = 1;
- erts_smp_atomic_set(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_set(&balance_info.checking_balance, 0);
/* Write migration paths and reset balance statistics in all queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
@@ -2395,7 +2526,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ASSERT((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) == 0);
#ifdef ERTS_SMP
- erts_smp_atomic_init(&no_empty_run_queues, 0);
+ erts_smp_atomic32_init(&no_empty_run_queues, 0);
#endif
erts_no_run_queues = n;
@@ -2405,7 +2536,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
rq->ix = ix;
- erts_smp_atomic_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
+ erts_smp_atomic32_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
/* make sure that the "extra" id correponds to the schedulers
* id if the esdp->no <-> ix+1 mapping change.
@@ -2502,9 +2633,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ssi->next = NULL;
ssi->prev = NULL;
#endif
- erts_smp_atomic_init(&ssi->flags, 0);
+ erts_smp_atomic32_init(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
- erts_smp_atomic_init(&ssi->aux_work, 0);
+ erts_smp_atomic32_init(&ssi->aux_work, 0);
}
#endif
@@ -2555,7 +2686,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
}
#ifdef ERTS_SMP
- erts_smp_atomic_init(&esdp->chk_cpu_bind, 0);
+ erts_smp_atomic32_init(&esdp->chk_cpu_bind, 0);
#endif
}
@@ -2563,21 +2694,21 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
erts_smp_cnd_init(&schdlr_sspnd.cnd);
- erts_smp_atomic_init(&schdlr_sspnd.changing, 0);
+ erts_smp_atomic32_init(&schdlr_sspnd.changing, 0);
schdlr_sspnd.online = no_schedulers_online;
schdlr_sspnd.curr_online = no_schedulers;
- erts_smp_atomic_init(&schdlr_sspnd.msb.ongoing, 0);
- erts_smp_atomic_init(&schdlr_sspnd.active, no_schedulers);
+ erts_smp_atomic32_init(&schdlr_sspnd.msb.ongoing, 0);
+ erts_smp_atomic32_init(&schdlr_sspnd.active, no_schedulers);
schdlr_sspnd.msb.procs = NULL;
- erts_smp_atomic_set(&balance_info.used_runqs,
- erts_common_run_queue ? 1 : no_schedulers_online);
- erts_smp_atomic_init(&balance_info.active_runqs, no_schedulers);
+ erts_smp_atomic32_set(&balance_info.used_runqs,
+ erts_common_run_queue ? 1 : no_schedulers_online);
+ erts_smp_atomic32_init(&balance_info.active_runqs, no_schedulers);
balance_info.last_active_runqs = no_schedulers;
erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
- erts_smp_atomic_init(&balance_info.checking_balance, 0);
+ erts_smp_atomic32_init(&balance_info.checking_balance, 0);
balance_info.prev_rise.active_runqs = 0;
balance_info.prev_rise.max_len = 0;
balance_info.prev_rise.reds = 0;
@@ -2586,8 +2717,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
if (no_schedulers_online < no_schedulers) {
if (erts_common_run_queue) {
for (ix = no_schedulers_online; ix < no_schedulers; ix++)
- erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
else {
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
@@ -2601,7 +2732,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
ERTS_SCHDLR_SSPND_CHNG_SET((ERTS_SCHDLR_SSPND_CHNG_ONLN
| ERTS_SCHDLR_SSPND_CHNG_WAITER), 0);
- erts_smp_atomic_init(&doing_sys_schedule, 0);
+ erts_smp_atomic32_init(&doing_sys_schedule, 0);
+
+ init_misc_aux_work();
#else /* !ERTS_SMP */
{
@@ -2615,7 +2748,7 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
erts_no_schedulers = 1;
#endif
- erts_smp_atomic_init(&function_calls, 0);
+ erts_smp_atomic32_init(&function_calls, 0);
/* init port tasks */
erts_port_task_init();
@@ -2730,6 +2863,19 @@ resume_process(Process *p)
p->rstatus = P_FREE;
}
+int
+erts_get_max_no_executing_schedulers(void)
+{
+#ifdef ERTS_SMP
+ if (erts_smp_atomic32_read(&schdlr_sspnd.changing))
+ return (int) erts_no_schedulers;
+ ERTS_THR_MEMORY_BARRIER;
+ return (int) erts_smp_atomic32_read(&schdlr_sspnd.active);
+#else
+ return 1;
+#endif
+}
+
#ifdef ERTS_SMP
static void
@@ -2748,13 +2894,13 @@ static void
scheduler_ix_resume_wake(Uint ix)
{
ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- long xflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
- long oflgs;
+ erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t oflgs;
do {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, 0, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, 0, xflgs);
if (oflgs == xflgs) {
erts_sched_finish_poke(ssi, oflgs);
break;
@@ -2763,17 +2909,17 @@ scheduler_ix_resume_wake(Uint ix)
} while (oflgs & ERTS_SSI_FLG_SUSPENDED);
}
-static long
-sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
+static erts_aint32_t
+sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
{
- long oflgs;
- long nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
- long xflgs = xpct;
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t xflgs = xpct;
do {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
xflgs = oflgs;
@@ -2782,15 +2928,15 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, long xpct)
return oflgs;
}
-static long
+static erts_aint32_t
sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
{
int until_yield = ERTS_SCHED_SPIN_UNTIL_YIELD;
int sc = spincount;
- long flgs;
+ erts_aint32_t flgs;
do {
- flgs = erts_smp_atomic_read(&ssi->flags);
+ flgs = erts_smp_atomic32_read(&ssi->flags);
if ((flgs & (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED))
@@ -2808,22 +2954,22 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount)
return flgs;
}
-static long
+static erts_aint32_t
sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
{
- long oflgs;
- long nflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_TSE_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
- long xflgs = (ERTS_SSI_FLG_SLEEPING
- | ERTS_SSI_FLG_WAITING
- | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t oflgs;
+ erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_TSE_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
+ erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING
+ | ERTS_SSI_FLG_WAITING
+ | ERTS_SSI_FLG_SUSPENDED);
erts_tse_reset(ssi->event);
while (1) {
- oflgs = erts_smp_atomic_cmpxchg(&ssi->flags, nflgs, xflgs);
+ oflgs = erts_smp_atomic32_cmpxchg(&ssi->flags, nflgs, xflgs);
if (oflgs == xflgs)
return nflgs;
if ((oflgs & (ERTS_SSI_FLG_SLEEPING
@@ -2841,8 +2987,8 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
static void
suspend_scheduler(ErtsSchedulerData *esdp)
{
- long flgs;
- int changing;
+ erts_aint32_t flgs;
+ erts_aint32_t changing;
long no = (long) esdp->no;
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
long active_schedulers;
@@ -2850,7 +2996,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
int wake = 0;
#if defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK) \
|| defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK)
- long aux_work;
+ erts_aint32_t aux_work;
#endif
/*
@@ -2878,15 +3024,15 @@ suspend_scheduler(ErtsSchedulerData *esdp)
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
- active_schedulers = erts_smp_atomic_dectest(&schdlr_sspnd.active);
+ active_schedulers = erts_smp_atomic32_dectest(&schdlr_sspnd.active);
ASSERT(active_schedulers >= 1);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (changing & ERTS_SCHDLR_SSPND_CHNG_MSB) {
if (active_schedulers == schdlr_sspnd.msb.wait_active)
wake = 1;
if (active_schedulers == 1) {
- changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ changing = erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
changing &= ~ERTS_SCHDLR_SSPND_CHNG_MSB;
}
}
@@ -2908,8 +3054,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
&& schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online)
wake = 1;
if (schdlr_sspnd.online == schdlr_sspnd.curr_online) {
- changing = erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ changing = erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
changing &= ~ERTS_SCHDLR_SSPND_CHNG_ONLN;
}
}
@@ -2919,29 +3065,30 @@ suspend_scheduler(ErtsSchedulerData *esdp)
wake = 0;
}
- flgs = erts_smp_atomic_read(&ssi->flags);
+ flgs = erts_smp_atomic32_read(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
blockable_aux_work:
blockable_aux_work(esdp, ssi, aux_work);
#endif
erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
while (1) {
- long flgs;
+ erts_aint32_t flgs;
#ifdef ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK
#ifndef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
#endif
nonblockable_aux_work(esdp, ssi, aux_work);
#endif
- flgs = sched_spin_suspended(ssi, ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
+ flgs = sched_spin_suspended(ssi,
+ ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT);
if (flgs == (ERTS_SSI_FLG_SLEEPING
| ERTS_SSI_FLG_WAITING
| ERTS_SSI_FLG_SUSPENDED)) {
@@ -2961,13 +3108,13 @@ suspend_scheduler(ErtsSchedulerData *esdp)
| ERTS_SSI_FLG_SUSPENDED));
if (!(flgs & ERTS_SSI_FLG_SUSPENDED))
break;
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER)
break;
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
- aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ aux_work = erts_smp_atomic32_read(&ssi->aux_work);
if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) {
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
goto blockable_aux_work;
@@ -2979,19 +3126,19 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
}
- active_schedulers = erts_smp_atomic_inctest(&schdlr_sspnd.active);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ active_schedulers = erts_smp_atomic32_inctest(&schdlr_sspnd.active);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
&& schdlr_sspnd.online == active_schedulers) {
- erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
}
ASSERT(no <= schdlr_sspnd.online);
- ASSERT(!erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
+ ASSERT(!erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing));
}
@@ -3020,7 +3167,7 @@ do { \
(RQ)->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK \
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); \
(RQ)->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; \
- erts_smp_atomic_band(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED); \
+ erts_smp_atomic32_band(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\
for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) { \
(RQ)->procs.prio_info[pix__].max_len = 0; \
(RQ)->procs.prio_info[pix__].reds = 0; \
@@ -3062,9 +3209,9 @@ erts_schedulers_state(Uint *total,
int yield_allowed)
{
int res;
- long changing;
+ erts_aint32_t changing;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (yield_allowed && (changing & ~ERTS_SCHDLR_SSPND_CHNG_WAITER))
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
else {
@@ -3085,7 +3232,7 @@ erts_set_schedulers_online(Process *p,
Sint *old_no)
{
int ix, res, no, have_unlocked_plocks;
- long changing;
+ erts_aint32_t changing;
if (new_no < 1 || erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -3095,7 +3242,7 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART;
}
@@ -3142,7 +3289,7 @@ erts_set_schedulers_online(Process *p,
ErtsRunQueue *to_rq = ERTS_RUNQ_IX(ix % no);
evacuate_run_queue(from_rq, to_rq);
}
- erts_smp_atomic_set(&balance_info.used_runqs, no);
+ erts_smp_atomic32_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
@@ -3170,8 +3317,8 @@ erts_set_schedulers_online(Process *p,
for (ix = no; ix < online; ix++) {
ErtsSchedulerSleepInfo *ssi;
ssi = ERTS_SCHED_SLEEP_INFO_IX(ix);
- erts_smp_atomic_bor(&ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_bor(&ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
}
wake_all_schedulers();
}
@@ -3196,7 +3343,7 @@ erts_set_schedulers_online(Process *p,
for (ix = erts_no_run_queues-1; ix >= no; ix--)
evacuate_run_queue(ERTS_RUNQ_IX(ix),
ERTS_RUNQ_IX(ix % no));
- erts_smp_atomic_set(&balance_info.used_runqs, no);
+ erts_smp_atomic32_set(&balance_info.used_runqs, no);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
for (ix = no; ix < online; ix++) {
@@ -3218,10 +3365,11 @@ erts_set_schedulers_online(Process *p,
NULL);
ASSERT(res != ERTS_SCHDLR_SSPND_DONE
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ & erts_smp_atomic32_read(&schdlr_sspnd.changing))
: (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic_read(&schdlr_sspnd.changing)));
- erts_smp_atomic_band(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ == erts_smp_atomic32_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
}
@@ -3236,11 +3384,11 @@ ErtsSchedSuspendResult
erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
{
int ix, res, have_unlocked_plocks = 0;
- long changing;
+ erts_aint32_t changing;
ErtsProcList *plp;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- changing = erts_smp_atomic_read(&schdlr_sspnd.changing);
+ changing = erts_smp_atomic32_read(&schdlr_sspnd.changing);
if (changing) {
res = ERTS_SCHDLR_SSPND_YIELD_RESTART; /* Yield */
}
@@ -3250,7 +3398,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
plp->next = schdlr_sspnd.msb.procs;
schdlr_sspnd.msb.procs = plp;
p->flags |= F_HAVE_BLCKD_MSCHED;
- ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
}
@@ -3261,11 +3409,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
- ASSERT(0 == erts_smp_atomic_read(&schdlr_sspnd.msb.ongoing));
- erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 1);
+ ASSERT(0 == erts_smp_atomic32_read(&schdlr_sspnd.msb.ongoing));
+ erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 1);
if (online == 1) {
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
- ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
}
else {
@@ -3285,14 +3433,14 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
}
if (erts_common_run_queue) {
for (ix = 1; ix < online; ix++)
- erts_smp_atomic_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_bor(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
erts_smp_mtx_lock(&balance_info.update_mtx);
- erts_smp_atomic_set(&balance_info.used_runqs, 1);
+ erts_smp_atomic32_set(&balance_info.used_runqs, 1);
for (ix = 0; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
erts_smp_runq_lock(rq);
@@ -3314,7 +3462,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
susp_sched_prep_block,
susp_sched_resume_block,
NULL);
- while (erts_smp_atomic_read(&schdlr_sspnd.active)
+ while (erts_smp_atomic32_read(&schdlr_sspnd.active)
!= schdlr_sspnd.msb.wait_active)
erts_smp_cnd_wait(&schdlr_sspnd.cnd, &schdlr_sspnd.mtx);
erts_smp_activity_end(ERTS_ACTIVITY_WAIT,
@@ -3323,11 +3471,11 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
NULL);
ASSERT(res != ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED
? (ERTS_SCHDLR_SSPND_CHNG_WAITER
- & erts_smp_atomic_read(&schdlr_sspnd.changing))
+ & erts_smp_atomic32_read(&schdlr_sspnd.changing))
: (ERTS_SCHDLR_SSPND_CHNG_WAITER
- == erts_smp_atomic_read(&schdlr_sspnd.changing)));
- erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
+ == erts_smp_atomic32_read(&schdlr_sspnd.changing)));
+ erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
plp->next = schdlr_sspnd.msb.procs;
@@ -3394,16 +3542,16 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
});
#endif
p->flags &= ~F_HAVE_BLCKD_MSCHED;
- erts_smp_atomic_set(&schdlr_sspnd.msb.ongoing, 0);
+ erts_smp_atomic32_set(&schdlr_sspnd.msb.ongoing, 0);
if (schdlr_sspnd.online == 1) {
/* No schedulers to resume */
- ASSERT(erts_smp_atomic_read(&schdlr_sspnd.active) == 1);
+ ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.active) == 1);
ERTS_SCHDLR_SSPND_CHNG_SET(0, ERTS_SCHDLR_SSPND_CHNG_MSB);
}
else if (erts_common_run_queue) {
for (ix = 1; ix < schdlr_sspnd.online; ix++)
- erts_smp_atomic_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
- ~ERTS_SSI_FLG_SUSPENDED);
+ erts_smp_atomic32_band(&ERTS_SCHED_SLEEP_INFO_IX(ix)->flags,
+ ~ERTS_SSI_FLG_SUSPENDED);
wake_all_schedulers();
}
else {
@@ -3429,7 +3577,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
evacuate_run_queue(ERTS_RUNQ_IX(ix),
ERTS_RUNQ_IX(ix % online));
- erts_smp_atomic_set(&balance_info.used_runqs, online);
+ erts_smp_atomic32_set(&balance_info.used_runqs, online);
/* Make sure that we balance soon... */
balance_info.forced_check_balance = 1;
erts_smp_runq_lock(ERTS_RUNQ_IX(0));
@@ -3453,7 +3601,7 @@ void
erts_dbg_multi_scheduling_return_trap(Process *p, Eterm return_value)
{
if (return_value == am_blocked) {
- long active = erts_smp_atomic_read(&schdlr_sspnd.active);
+ erts_aint32_t active = erts_smp_atomic32_read(&schdlr_sspnd.active);
ASSERT(1 <= active && active <= 2);
ASSERT(ERTS_PROC_GET_SCHDATA(p)->no == 1);
}
@@ -3536,12 +3684,12 @@ sched_thread_func(void *vesdp)
erts_thread_init_float();
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- ASSERT(erts_smp_atomic_read(&schdlr_sspnd.changing)
+ ASSERT(erts_smp_atomic32_read(&schdlr_sspnd.changing)
& ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (--schdlr_sspnd.curr_online == schdlr_sspnd.wait_curr_online) {
- erts_smp_atomic_band(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
+ erts_smp_atomic32_band(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_ONLN);
if (((ErtsSchedulerData *) vesdp)->no != 1)
erts_smp_cnd_signal(&schdlr_sspnd.cnd);
}
@@ -4914,10 +5062,10 @@ Process *schedule(Process *p, int calls)
{
ErtsRunQueue *rq;
ErtsRunPrioQueue *rpq;
- long dt;
+ erts_aint_t dt;
ErtsSchedulerData *esdp;
int context_reds;
- long fcalls;
+ int fcalls;
int input_reductions;
int actual_reds;
int reds;
@@ -4940,7 +5088,7 @@ Process *schedule(Process *p, int calls)
esdp = erts_get_scheduler_data();
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
- fcalls = erts_smp_atomic_read(&function_calls);
+ fcalls = (int) erts_smp_atomic32_read(&function_calls);
actual_reds = reds = 0;
erts_smp_runq_lock(rq);
} else {
@@ -4958,7 +5106,7 @@ Process *schedule(Process *p, int calls)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
- fcalls = erts_smp_atomic_addtest(&function_calls, reds);
+ fcalls = (int) erts_smp_atomic32_addtest(&function_calls, reds);
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
@@ -5059,10 +5207,10 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- dt = do_time_read_and_reset();
+ dt = erts_do_time_read_and_reset();
if (dt) {
erts_smp_runq_unlock(rq);
- bump_timer(dt);
+ erts_bump_timer(dt);
erts_smp_runq_lock(rq);
}
BM_STOP_TIMER(system);
@@ -5091,14 +5239,14 @@ Process *schedule(Process *p, int calls)
| ERTS_RUNQ_FLG_CHK_CPU_BIND
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic_read(&esdp->ssi->flags)
+ || (erts_smp_atomic32_read(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
suspend_scheduler(esdp);
}
if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
- || erts_smp_atomic_read(&esdp->chk_cpu_bind)) {
+ || erts_smp_atomic32_read(&esdp->chk_cpu_bind)) {
erts_sched_check_cpu_bind(esdp);
}
}
@@ -5107,7 +5255,7 @@ Process *schedule(Process *p, int calls)
|| defined(ERTS_SCHED_NEED_NONBLOCKABLE_AUX_WORK)
{
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
- long aux_work = erts_smp_atomic_read(&ssi->aux_work);
+ erts_aint32_t aux_work = erts_smp_atomic32_read(&ssi->aux_work);
if (aux_work) {
erts_smp_runq_unlock(rq);
#ifdef ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
@@ -5149,9 +5297,9 @@ Process *schedule(Process *p, int calls)
if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ
| ERTS_RUNQ_FLG_SUSPENDED)) {
if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- || (erts_smp_atomic_read(&esdp->ssi->flags)
+ || (erts_smp_atomic32_read(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED)) {
- ASSERT(erts_smp_atomic_read(&esdp->ssi->flags)
+ ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags)
& ERTS_SSI_FLG_SUSPENDED);
non_empty_runq(rq);
goto continue_check_activities_to_run;
@@ -5193,7 +5341,7 @@ Process *schedule(Process *p, int calls)
* Schedule system-level activities.
*/
- erts_smp_atomic_set(&function_calls, 0);
+ erts_smp_atomic32_set(&function_calls, 0);
fcalls = 0;
ASSERT(!erts_port_task_have_outstanding_io_tasks());
@@ -5203,11 +5351,11 @@ Process *schedule(Process *p, int calls)
#endif
erts_smp_runq_unlock(rq);
erl_sys_schedule(runnable);
- dt = do_time_read_and_reset();
- if (dt) bump_timer(dt);
+ dt = erts_do_time_read_and_reset();
+ if (dt) erts_bump_timer(dt);
#ifdef ERTS_SMP
erts_smp_runq_lock(rq);
- erts_smp_atomic_set(&doing_sys_schedule, 0);
+ erts_smp_atomic32_set(&doing_sys_schedule, 0);
goto continue_check_activities_to_run;
#else
if (!runnable)
@@ -5235,7 +5383,7 @@ Process *schedule(Process *p, int calls)
if (erts_common_run_queue->waiting)
wake_scheduler(erts_common_run_queue, 0, 1);
}
- else if (erts_smp_atomic_read(&no_empty_run_queues) != 0) {
+ else if (erts_smp_atomic32_read(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
}
@@ -5692,7 +5840,7 @@ erts_test_next_pid(int set, Uint next)
Uint erts_process_count(void)
{
- long res = erts_smp_atomic_read(&process_count);
+ erts_aint32_t res = erts_smp_atomic32_read(&process_count);
ASSERT(res >= 0);
return (Uint) res;
}
@@ -5741,7 +5889,7 @@ alloc_process(void)
ASSERT(!process_tab[p_next]);
process_tab[p_next] = p;
- erts_smp_atomic_inc(&process_count);
+ erts_smp_atomic32_inc(&process_count);
p->id = make_internal_pid(p_serial << p_serial_shift | p_next);
if (p->id == ERTS_INVALID_PID) {
/* Do not use the invalid pid; change serial */
@@ -5867,7 +6015,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
p->prio = PRIORITY_NORMAL;
- p->max_gen_gcs = (Uint16) erts_smp_atomic_read(&erts_max_gen_gcs);
+ p->max_gen_gcs = (Uint16) erts_smp_atomic32_read(&erts_max_gen_gcs);
}
p->skipped = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
@@ -7324,8 +7472,8 @@ continue_exit_process(Process *p
p->status_flags = 0;
#endif
process_tab[pix] = NULL; /* Time of death! */
- ASSERT(erts_smp_atomic_read(&process_count) > 0);
- erts_smp_atomic_dec(&process_count);
+ ASSERT(erts_smp_atomic32_read(&process_count) > 0);
+ erts_smp_atomic32_dec(&process_count);
#ifdef ERTS_SMP
erts_pix_unlock(pix_lock);
@@ -7465,7 +7613,7 @@ cancel_timer(Process* p)
#ifdef ERTS_SMP
erts_cancel_smp_ptimer(p->u.ptimer);
#else
- erl_cancel_timer(&p->u.tm);
+ erts_cancel_timer(&p->u.tm);
#endif
}
@@ -7491,7 +7639,7 @@ set_timer(Process* p, Uint timeout)
(ErlTimeoutProc) timeout_proc,
timeout);
#else
- erl_set_timer(&p->u.tm,
+ erts_set_timer(&p->u.tm,
(ErlTimeoutProc) timeout_proc,
NULL,
(void*) p,
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index c038e57b65..d927415f37 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -174,8 +174,8 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_UNSET_RUNQ_FLG_EVACUATE(FLGS, PRIO) \
((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO)))
-#define ERTS_RUNQ_IFLG_SUSPENDED (((long) 1) << 0)
-#define ERTS_RUNQ_IFLG_NONEMPTY (((long) 1) << 1)
+#define ERTS_RUNQ_IFLG_SUSPENDED (((erts_aint32_t) 1) << 0)
+#define ERTS_RUNQ_IFLG_NONEMPTY (((erts_aint32_t) 1) << 1)
#ifdef DEBUG
@@ -219,11 +219,11 @@ typedef enum {
ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED
} ErtsMigrateResult;
-#define ERTS_SSI_FLG_SLEEPING (((long) 1) << 0)
-#define ERTS_SSI_FLG_POLL_SLEEPING (((long) 1) << 1)
-#define ERTS_SSI_FLG_TSE_SLEEPING (((long) 1) << 2)
-#define ERTS_SSI_FLG_WAITING (((long) 1) << 3)
-#define ERTS_SSI_FLG_SUSPENDED (((long) 1) << 4)
+#define ERTS_SSI_FLG_SLEEPING (((erts_aint32_t) 1) << 0)
+#define ERTS_SSI_FLG_POLL_SLEEPING (((erts_aint32_t) 1) << 1)
+#define ERTS_SSI_FLG_TSE_SLEEPING (((erts_aint32_t) 1) << 2)
+#define ERTS_SSI_FLG_WAITING (((erts_aint32_t) 1) << 3)
+#define ERTS_SSI_FLG_SUSPENDED (((erts_aint32_t) 1) << 4)
#define ERTS_SSI_FLGS_SLEEP_TYPE \
(ERTS_SSI_FLG_TSE_SLEEPING|ERTS_SSI_FLG_POLL_SLEEPING)
@@ -236,16 +236,14 @@ typedef enum {
| ERTS_SSI_FLG_WAITING \
| ERTS_SSI_FLG_SUSPENDED)
-
-#if !defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \
- && defined(ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN)
#define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK
-#endif
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((long) 1) << 0)
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 0)
+#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 1)
#define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \
- (ERTS_SSI_AUX_WORK_CHECK_CHILDREN)
+ (ERTS_SSI_AUX_WORK_CHECK_CHILDREN \
+ | ERTS_SSI_AUX_WORK_MISC)
#define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \
(0)
@@ -259,9 +257,9 @@ typedef struct {
struct ErtsSchedulerSleepInfo_ {
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
- erts_smp_atomic_t flags;
+ erts_smp_atomic32_t flags;
erts_tse_t *event;
- erts_smp_atomic_t aux_work;
+ erts_smp_atomic32_t aux_work;
};
/* times to reschedule low prio process before running */
@@ -311,7 +309,7 @@ typedef struct {
struct ErtsRunQueue_ {
int ix;
- erts_smp_atomic_t info_flags;
+ erts_smp_atomic32_t info_flags;
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
@@ -421,7 +419,7 @@ struct ErtsSchedulerData_ {
#ifdef ERTS_SMP
/* NOTE: These fields are modified under held mutexes by other threads */
- erts_smp_atomic_t chk_cpu_bind; /* Only used when common run queue */
+ erts_smp_atomic32_t chk_cpu_bind; /* Only used when common run queue */
#endif
};
@@ -1034,6 +1032,7 @@ int erts_sched_set_wakeup_limit(char *str);
#ifdef DEBUG
void erts_dbg_multi_scheduling_return_trap(Process *, Eterm);
#endif
+int erts_get_max_no_executing_schedulers(void);
#ifdef ERTS_SMP
ErtsSchedSuspendResult
erts_schedulers_state(Uint *, Uint *, Uint *, int);
@@ -1048,6 +1047,11 @@ int erts_is_multi_scheduling_blocked(void);
Eterm erts_multi_scheduling_blockers(Process *);
void erts_start_schedulers(void);
void erts_smp_notify_check_children_needed(void);
+void
+erts_smp_schedule_misc_aux_work(int ignore_self,
+ int max_sched,
+ void (*func)(void *),
+ void *arg);
#endif
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
@@ -1555,7 +1559,7 @@ extern int erts_disable_proc_not_running_opt;
void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
#ifdef ERTS_SMP
-void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, long);
+void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -1563,11 +1567,11 @@ ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
ERTS_GLB_INLINE void
erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
{
- long flags = erts_smp_atomic_read(&ssi->flags);
+ erts_aint32_t flags = erts_smp_atomic32_read(&ssi->flags);
ASSERT(!(flags & ERTS_SSI_FLG_SLEEPING)
|| (flags & ERTS_SSI_FLG_WAITING));
if (flags & ERTS_SSI_FLG_SLEEPING) {
- flags = erts_smp_atomic_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
+ flags = erts_smp_atomic32_band(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP);
erts_sched_finish_poke(ssi, flags);
}
}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 1bebcdb911..72560aa124 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -124,7 +124,7 @@ erts_init_proc_lock(int cpus)
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_smp_spinlock_init_x(&erts_pix_locks[i].u.spnlck,
- "pix_lock", make_small(i));
+ "pix_lock", make_small(i));
#else
erts_smp_spinlock_init(&erts_pix_locks[i].u.spnlck, "pix_lock");
#endif
@@ -413,7 +413,7 @@ transfer_locks(Process *p,
do {
erts_tse_t *tmp = wake;
wake = wake->next;
- erts_atomic_set(&tmp->uaflgs, 0);
+ erts_atomic32_set(&tmp->uaflgs, 0);
erts_tse_set(tmp);
} while (wake);
@@ -509,14 +509,14 @@ wait_for_locks(Process *p,
ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0);
- erts_atomic_set(&wtr->uaflgs, 1);
+ erts_atomic32_set(&wtr->uaflgs, 1);
erts_pix_unlock(pix_lock);
while (1) {
int res;
erts_tse_reset(wtr);
- if (erts_atomic_read(&wtr->uaflgs) == 0)
+ if (erts_atomic32_read(&wtr->uaflgs) == 0)
break;
/*
@@ -955,7 +955,7 @@ erts_proc_lock_init(Process *p)
{
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic_init(&p->lock.flags, (long) ERTS_PROC_LOCKS_ALL);
+ erts_smp_atomic32_init(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL);
#else
p->lock.flags = ERTS_PROC_LOCKS_ALL;
#endif
@@ -974,7 +974,7 @@ erts_proc_lock_init(Process *p)
{
int i;
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
- erts_smp_atomic_init(&p->lock.locked[i], (long) 1);
+ erts_smp_atomic32_init(&p->lock.locked[i], (erts_aint32_t) 1);
}
#endif
}
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 4fe30c7209..355179f084 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -54,20 +54,20 @@
#define ERTS_PROC_LOCK_MAX_BIT 3
-typedef Uint32 ErtsProcLocks;
+typedef erts_aint32_t ErtsProcLocks;
typedef struct erts_proc_lock_queues_t_ erts_proc_lock_queues_t;
typedef struct erts_proc_lock_t_ {
#if ERTS_PROC_LOCK_ATOMIC_IMPL
- erts_smp_atomic_t flags;
+ erts_smp_atomic32_t flags;
#else
ErtsProcLocks flags;
#endif
erts_proc_lock_queues_t *queues;
- long refc;
+ Sint32 refc;
#ifdef ERTS_PROC_LOCK_DEBUG
- erts_smp_atomic_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
+ erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_t lcnt_main;
@@ -270,17 +270,19 @@ typedef struct {
#if ERTS_PROC_LOCK_ATOMIC_IMPL
#define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic_band(&(L)->flags, (long) (MSK)))
+ ((ErtsProcLocks) erts_smp_atomic32_band(&(L)->flags, (erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_BOR_(L, MSK) \
- ((ErtsProcLocks) erts_smp_atomic_bor(&(L)->flags, (long) (MSK)))
+ ((ErtsProcLocks) erts_smp_atomic32_bor(&(L)->flags, (erts_aint32_t) (MSK)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg_acqb(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+ ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \
+ (erts_aint32_t) (NEW), \
+ (erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \
- ((ErtsProcLocks) erts_smp_atomic_cmpxchg_relb(&(L)->flags, \
- (long) (NEW), (long) (EXPECTED)))
+ ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_relb(&(L)->flags, \
+ (erts_aint32_t) (NEW), \
+ (erts_aint32_t) (EXPECTED)))
#define ERTS_PROC_LOCK_FLGS_READ_(L) \
- ((ErtsProcLocks) erts_smp_atomic_read(&(L)->flags))
+ ((ErtsProcLocks) erts_smp_atomic32_read(&(L)->flags))
#else /* no opt atomic ops */
@@ -619,13 +621,13 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) {
ErtsProcLocks lock = ((ErtsProcLocks) 1) << i;
if (locks & lock) {
- long lock_count;
+ erts_aint32_t lock_count;
if (locked) {
- lock_count = erts_smp_atomic_inctest(&p->lock.locked[i]);
+ lock_count = erts_smp_atomic32_inctest(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 1);
}
else {
- lock_count = erts_smp_atomic_dectest(&p->lock.locked[i]);
+ lock_count = erts_smp_atomic32_dectest(&p->lock.locked[i]);
ERTS_LC_ASSERT(lock_count == 0);
}
}
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index b41fa70476..287327bfe1 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -54,10 +54,10 @@ typedef erts_cnd_t erts_smp_cnd_t;
typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t;
typedef erts_rwmtx_t erts_smp_rwmtx_t;
typedef erts_tsd_key_t erts_smp_tsd_key_t;
-typedef ethr_atomic_t erts_smp_atomic_t;
+typedef erts_atomic_t erts_smp_atomic_t;
+typedef erts_atomic32_t erts_smp_atomic32_t;
typedef erts_spinlock_t erts_smp_spinlock_t;
typedef erts_rwlock_t erts_smp_rwlock_t;
-typedef erts_thr_timeval_t erts_smp_thr_timeval_t;
void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
#else /* #ifdef ERTS_SMP */
@@ -83,7 +83,8 @@ typedef struct {
} erts_smp_rwmtx_opt_t;
typedef int erts_smp_rwmtx_t;
typedef int erts_smp_tsd_key_t;
-typedef long erts_smp_atomic_t;
+typedef SWord erts_smp_atomic_t;
+typedef Uint32 erts_smp_atomic32_t;
#if __GNUC__ > 2
typedef struct { } erts_smp_spinlock_t;
typedef struct { } erts_smp_rwlock_t;
@@ -92,11 +93,6 @@ typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t;
typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t;
#endif
-typedef struct {
- long tv_sec;
- long tv_nsec;
-} erts_smp_thr_timeval_t;
-
#endif /* #ifdef ERTS_SMP */
ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id);
@@ -164,33 +160,82 @@ ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_atomic_init(erts_smp_atomic_t *var, long i);
-ERTS_GLB_INLINE void erts_smp_atomic_set(erts_smp_atomic_t *var, long i);
-ERTS_GLB_INLINE long erts_smp_atomic_read(erts_smp_atomic_t *var);
-ERTS_GLB_INLINE long erts_smp_atomic_inctest(erts_smp_atomic_t *incp);
-ERTS_GLB_INLINE long erts_smp_atomic_dectest(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE void erts_smp_atomic_init(erts_smp_atomic_t *var,
+ erts_aint_t i);
+ERTS_GLB_INLINE void erts_smp_atomic_set(erts_smp_atomic_t *var, erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_read(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_inctest(erts_smp_atomic_t *incp);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_dectest(erts_smp_atomic_t *decp);
ERTS_GLB_INLINE void erts_smp_atomic_inc(erts_smp_atomic_t *incp);
ERTS_GLB_INLINE void erts_smp_atomic_dec(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE long erts_smp_atomic_addtest(erts_smp_atomic_t *addp,
- long i);
-ERTS_GLB_INLINE void erts_smp_atomic_add(erts_smp_atomic_t *addp, long i);
-ERTS_GLB_INLINE long erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp,
- long new);
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
- long new,
- long expected);
-ERTS_GLB_INLINE long erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask);
-ERTS_GLB_INLINE long erts_smp_atomic_band(erts_smp_atomic_t *var, long mask);
-ERTS_GLB_INLINE long erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
-ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_addtest(erts_smp_atomic_t *addp,
+ erts_aint_t i);
+ERTS_GLB_INLINE void erts_smp_atomic_add(erts_smp_atomic_t *addp,
+ erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp,
+ erts_aint_t new);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t expected);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_bor(erts_smp_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_band(erts_smp_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_read_acqb(erts_smp_atomic_t *var);
+ERTS_GLB_INLINE void erts_smp_atomic_set_relb(erts_smp_atomic_t *var,
+ erts_aint_t i);
ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE long erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
- long new,
- long exp);
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
- long new,
- long exp);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp);
+ERTS_GLB_INLINE erts_aint_t erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_init(erts_smp_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_set(erts_smp_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_read(erts_smp_atomic32_t *var);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_inctest(erts_smp_atomic32_t *incp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_dectest(erts_smp_atomic32_t *decp);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_inc(erts_smp_atomic32_t *incp);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_dec(erts_smp_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_addtest(erts_smp_atomic32_t *addp, erts_aint32_t i);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_add(erts_smp_atomic32_t *addp, erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_xchg(erts_smp_atomic32_t *xchgp, erts_aint32_t new);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_bor(erts_smp_atomic32_t *var, erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_band(erts_smp_atomic32_t *var, erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_read_acqb(erts_smp_atomic32_t *var);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_set_relb(erts_smp_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE void
+erts_smp_atomic32_dec_relb(erts_smp_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_dectest_relb(erts_smp_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg_acqb(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp);
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg_relb(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp);
ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
char *name,
Eterm extra);
@@ -221,7 +266,6 @@ ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_thr_time_now(erts_smp_thr_timeval_t *time);
ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key);
ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value);
@@ -611,7 +655,7 @@ erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_init(erts_smp_atomic_t *var, long i)
+erts_smp_atomic_init(erts_smp_atomic_t *var, erts_aint_t i)
{
#ifdef ERTS_SMP
erts_atomic_init(var, i);
@@ -621,7 +665,7 @@ erts_smp_atomic_init(erts_smp_atomic_t *var, long i)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_set(erts_smp_atomic_t *var, long i)
+erts_smp_atomic_set(erts_smp_atomic_t *var, erts_aint_t i)
{
#ifdef ERTS_SMP
erts_atomic_set(var, i);
@@ -630,7 +674,7 @@ erts_smp_atomic_set(erts_smp_atomic_t *var, long i)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_read(erts_smp_atomic_t *var)
{
#ifdef ERTS_SMP
@@ -640,7 +684,7 @@ erts_smp_atomic_read(erts_smp_atomic_t *var)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_inctest(erts_smp_atomic_t *incp)
{
#ifdef ERTS_SMP
@@ -650,7 +694,7 @@ erts_smp_atomic_inctest(erts_smp_atomic_t *incp)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_dectest(erts_smp_atomic_t *decp)
{
#ifdef ERTS_SMP
@@ -680,8 +724,8 @@ erts_smp_atomic_dec(erts_smp_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_addtest(erts_smp_atomic_t *addp, long i)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_addtest(erts_smp_atomic_t *addp, erts_aint_t i)
{
#ifdef ERTS_SMP
return erts_atomic_addtest(addp, i);
@@ -691,7 +735,7 @@ erts_smp_atomic_addtest(erts_smp_atomic_t *addp, long i)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_add(erts_smp_atomic_t *addp, long i)
+erts_smp_atomic_add(erts_smp_atomic_t *addp, erts_aint_t i)
{
#ifdef ERTS_SMP
erts_atomic_add(addp, i);
@@ -700,59 +744,61 @@ erts_smp_atomic_add(erts_smp_atomic_t *addp, long i)
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp, long new)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_xchg(erts_smp_atomic_t *xchgp, erts_aint_t new)
{
#ifdef ERTS_SMP
return erts_atomic_xchg(xchgp, new);
#else
- long old;
+ erts_aint_t old;
old = *xchgp;
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp, long new, long expected)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_cmpxchg(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t expected)
{
#ifdef ERTS_SMP
return erts_atomic_cmpxchg(xchgp, new, expected);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
if (old == expected)
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_bor(erts_smp_atomic_t *var, long mask)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_bor(erts_smp_atomic_t *var, erts_aint_t mask)
{
#ifdef ERTS_SMP
return erts_atomic_bor(var, mask);
#else
- long old;
+ erts_aint_t old;
old = *var;
*var |= mask;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_smp_atomic_band(erts_smp_atomic_t *var, long mask)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_band(erts_smp_atomic_t *var, erts_aint_t mask)
{
#ifdef ERTS_SMP
return erts_atomic_band(var, mask);
#else
- long old;
+ erts_aint_t old;
old = *var;
*var &= mask;
return old;
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
{
#ifdef ERTS_SMP
@@ -763,7 +809,7 @@ erts_smp_atomic_read_acqb(erts_smp_atomic_t *var)
}
ERTS_GLB_INLINE void
-erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
+erts_smp_atomic_set_relb(erts_smp_atomic_t *var, erts_aint_t i)
{
#ifdef ERTS_SMP
erts_atomic_set_relb(var, i);
@@ -772,7 +818,8 @@ erts_smp_atomic_set_relb(erts_smp_atomic_t *var, long i)
#endif
}
-ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
+ERTS_GLB_INLINE void
+erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
{
#ifdef ERTS_SMP
erts_atomic_dec_relb(decp);
@@ -781,7 +828,7 @@ ERTS_GLB_INLINE void erts_smp_atomic_dec_relb(erts_smp_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
{
#ifdef ERTS_SMP
@@ -791,28 +838,244 @@ erts_smp_atomic_dectest_relb(erts_smp_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
- long new,
- long exp)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_cmpxchg_acqb(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp)
{
#ifdef ERTS_SMP
return erts_atomic_cmpxchg_acqb(xchgp, new, exp);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
if (old == exp)
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
- long new,
- long exp)
+ERTS_GLB_INLINE erts_aint_t
+erts_smp_atomic_cmpxchg_relb(erts_smp_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp)
{
#ifdef ERTS_SMP
return erts_atomic_cmpxchg_relb(xchgp, new, exp);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_init(erts_smp_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_init(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_set(erts_smp_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_set(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_read(erts_smp_atomic32_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_read(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_inctest(erts_smp_atomic32_t *incp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_inctest(incp);
+#else
+ return ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_dectest(erts_smp_atomic32_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_dectest(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_inc(erts_smp_atomic32_t *incp)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_inc(incp);
+#else
+ ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_dec(erts_smp_atomic32_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_dec(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_addtest(erts_smp_atomic32_t *addp, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_addtest(addp, i);
+#else
+ return *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_add(erts_smp_atomic32_t *addp, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_add(addp, i);
+#else
+ *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_xchg(erts_smp_atomic32_t *xchgp, erts_aint32_t new)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_xchg(xchgp, new);
+#else
+ erts_aint32_t old;
+ old = *xchgp;
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_cmpxchg(xchgp, new, expected);
+#else
+ erts_aint32_t old = *xchgp;
+ if (old == expected)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_bor(erts_smp_atomic32_t *var, erts_aint32_t mask)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_bor(var, mask);
+#else
+ erts_aint32_t old;
+ old = *var;
+ *var |= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_band(erts_smp_atomic32_t *var, erts_aint32_t mask)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_band(var, mask);
+#else
+ erts_aint32_t old;
+ old = *var;
+ *var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_read_acqb(erts_smp_atomic32_t *var)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_set_relb(erts_smp_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_atomic32_dec_relb(erts_smp_atomic32_t *decp)
+{
+#ifdef ERTS_SMP
+ erts_atomic32_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_dectest_relb(erts_smp_atomic32_t *decp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_dectest_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg_acqb(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_cmpxchg_acqb(xchgp, new, exp);
+#else
+ erts_aint32_t old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_smp_atomic32_cmpxchg_relb(erts_smp_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp)
+{
+#ifdef ERTS_SMP
+ return erts_atomic32_cmpxchg_relb(xchgp, new, exp);
+#else
+ erts_aint32_t old = *xchgp;
if (old == exp)
*xchgp = new;
return old;
@@ -988,14 +1251,6 @@ erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_smp_thr_time_now(erts_smp_thr_timeval_t *time)
-{
-#ifdef ERTS_SMP
- erts_thr_time_now(time);
-#endif
-}
-
-ERTS_GLB_INLINE void
erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp)
{
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index b8e4473141..815cc1beae 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -193,7 +193,7 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#endif
#define _is_aligned(x) (((Uint)(x) & 0x3) == 0)
#define _unchecked_make_boxed(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_BOXED)
-_ET_DECLARE_CHECKED(Eterm,make_boxed,Eterm*);
+_ET_DECLARE_CHECKED(Eterm,make_boxed,Eterm*)
#define make_boxed(x) _ET_APPLY(make_boxed,(x))
#if 1
#define _is_not_boxed(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_BOXED))
@@ -204,12 +204,12 @@ _ET_DECLARE_CHECKED(int,is_boxed,Eterm)
#define is_boxed(x) (((x) & _TAG_PRIMARY_MASK) == TAG_PRIMARY_BOXED)
#endif
#define _unchecked_boxed_val(x) ((Eterm*) EXPAND_POINTER(((x) - TAG_PRIMARY_BOXED)))
-_ET_DECLARE_CHECKED(Eterm*,boxed_val,Eterm);
+_ET_DECLARE_CHECKED(Eterm*,boxed_val,Eterm)
#define boxed_val(x) _ET_APPLY(boxed_val,(x))
/* cons cell ("list") access methods */
#define _unchecked_make_list(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_LIST)
-_ET_DECLARE_CHECKED(Eterm,make_list,Eterm*);
+_ET_DECLARE_CHECKED(Eterm,make_list,Eterm*)
#define make_list(x) _ET_APPLY(make_list,(x))
#if 1
#define _unchecked_is_not_list(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_LIST))
@@ -226,7 +226,7 @@ _ET_DECLARE_CHECKED(int,is_not_list,Eterm)
#define _list_precond(x) (is_list(x))
#endif
#define _unchecked_list_val(x) ((Eterm*) EXPAND_POINTER((x) - TAG_PRIMARY_LIST))
-_ET_DECLARE_CHECKED(Eterm*,list_val,Eterm);
+_ET_DECLARE_CHECKED(Eterm*,list_val,Eterm)
#define list_val(x) _ET_APPLY(list_val,(x))
#define CONS(hp, car, cdr) \
@@ -995,14 +995,14 @@ _ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm)
#endif
#define _unchecked_make_cp(x) ((Eterm) COMPRESS_POINTER(x))
-_ET_DECLARE_CHECKED(Eterm,make_cp,BeamInstr*);
+_ET_DECLARE_CHECKED(Eterm,make_cp,BeamInstr*)
#define make_cp(x) _ET_APPLY(make_cp,(x))
#define is_not_CP(x) ((x) & _CPMASK)
#define is_CP(x) (!is_not_CP(x))
#define _unchecked_cp_val(x) ((BeamInstr*) EXPAND_POINTER(x))
-_ET_DECLARE_CHECKED(BeamInstr*,cp_val,Eterm);
+_ET_DECLARE_CHECKED(BeamInstr*,cp_val,Eterm)
#define cp_val(x) _ET_APPLY(cp_val,(x))
#define make_catch(x) (((x) << _TAG_IMMED2_SIZE) | _TAG_IMMED2_CATCH)
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index a74cf79b8c..84a20b51f2 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -89,7 +89,10 @@ typedef ethr_rwmutex_opt erts_rwmtx_opt_t;
typedef ethr_tsd_key erts_tsd_key_t;
typedef ethr_ts_event erts_tse_t;
+typedef ethr_sint_t erts_aint_t;
typedef ethr_atomic_t erts_atomic_t;
+typedef ethr_sint32_t erts_aint32_t;
+typedef ethr_atomic32_t erts_atomic32_t;
/* spinlock */
typedef struct {
@@ -113,7 +116,6 @@ typedef struct {
#endif
} erts_rwlock_t;
-typedef ethr_timeval erts_thr_timeval_t;
__decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
/* implemented in erl_init.c */
@@ -152,7 +154,10 @@ typedef struct {
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
typedef int erts_tse_t;
-typedef long erts_atomic_t;
+typedef SWord erts_aint_t;
+typedef SWord erts_atomic_t;
+typedef SWord erts_aint32_t;
+typedef SWord erts_atomic32_t;
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
typedef struct { } erts_rwlock_t;
@@ -160,10 +165,6 @@ typedef struct { } erts_rwlock_t;
typedef struct { int gcc_is_buggy; } erts_spinlock_t;
typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#endif
-typedef struct {
- long tv_sec;
- long tv_nsec;
-} erts_thr_timeval_t;
#define ERTS_MTX_INITER 0
#define ERTS_CND_INITER 0
@@ -173,6 +174,8 @@ typedef struct {
#endif /* #ifdef USE_THREADS */
+#define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
+
ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id);
ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *),
@@ -231,33 +234,65 @@ ERTS_GLB_INLINE int erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_atomic_init(erts_atomic_t *var, long i);
-ERTS_GLB_INLINE void erts_atomic_set(erts_atomic_t *var, long i);
-ERTS_GLB_INLINE long erts_atomic_read(erts_atomic_t *var);
-ERTS_GLB_INLINE long erts_atomic_inctest(erts_atomic_t *incp);
-ERTS_GLB_INLINE long erts_atomic_dectest(erts_atomic_t *decp);
+ERTS_GLB_INLINE void erts_atomic_init(erts_atomic_t *var, erts_aint_t i);
+ERTS_GLB_INLINE void erts_atomic_set(erts_atomic_t *var, erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_read(erts_atomic_t *var);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_inctest(erts_atomic_t *incp);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_dectest(erts_atomic_t *decp);
ERTS_GLB_INLINE void erts_atomic_inc(erts_atomic_t *incp);
ERTS_GLB_INLINE void erts_atomic_dec(erts_atomic_t *decp);
-ERTS_GLB_INLINE long erts_atomic_addtest(erts_atomic_t *addp,
- long i);
-ERTS_GLB_INLINE void erts_atomic_add(erts_atomic_t *addp, long i);
-ERTS_GLB_INLINE long erts_atomic_xchg(erts_atomic_t *xchgp,
- long new);
-ERTS_GLB_INLINE long erts_atomic_cmpxchg(erts_atomic_t *xchgp,
- long new,
- long expected);
-ERTS_GLB_INLINE long erts_atomic_bor(erts_atomic_t *var, long mask);
-ERTS_GLB_INLINE long erts_atomic_band(erts_atomic_t *var, long mask);
-ERTS_GLB_INLINE long erts_atomic_read_acqb(erts_atomic_t *var);
-ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, long i);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_addtest(erts_atomic_t *addp,
+ erts_aint_t i);
+ERTS_GLB_INLINE void erts_atomic_add(erts_atomic_t *addp, erts_aint_t i);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_xchg(erts_atomic_t *xchgp,
+ erts_aint_t new);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t expected);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_bor(erts_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_band(erts_atomic_t *var,
+ erts_aint_t mask);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_read_acqb(erts_atomic_t *var);
+ERTS_GLB_INLINE void erts_atomic_set_relb(erts_atomic_t *var, erts_aint_t i);
ERTS_GLB_INLINE void erts_atomic_dec_relb(erts_atomic_t *decp);
-ERTS_GLB_INLINE long erts_atomic_dectest_relb(erts_atomic_t *decp);
-ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
- long new,
- long exp);
-ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
- long new,
- long exp);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_dectest_relb(erts_atomic_t *decp);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp);
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp);
+ERTS_GLB_INLINE void erts_atomic32_init(erts_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE void erts_atomic32_set(erts_atomic32_t *var, erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_read(erts_atomic32_t *var);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_inctest(erts_atomic32_t *incp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_dectest(erts_atomic32_t *decp);
+ERTS_GLB_INLINE void erts_atomic32_inc(erts_atomic32_t *incp);
+ERTS_GLB_INLINE void erts_atomic32_dec(erts_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_addtest(erts_atomic32_t *addp,
+ erts_aint32_t i);
+ERTS_GLB_INLINE void erts_atomic32_add(erts_atomic32_t *addp, erts_aint32_t i);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_xchg(erts_atomic32_t *xchgp,
+ erts_aint32_t new);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_bor(erts_atomic32_t *var,
+ erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_band(erts_atomic32_t *var,
+ erts_aint32_t mask);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_read_acqb(erts_atomic32_t *var);
+ERTS_GLB_INLINE void erts_atomic32_set_relb(erts_atomic32_t *var,
+ erts_aint32_t i);
+ERTS_GLB_INLINE void erts_atomic32_dec_relb(erts_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_dectest_relb(erts_atomic32_t *decp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg_acqb(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp);
+ERTS_GLB_INLINE erts_aint32_t erts_atomic32_cmpxchg_relb(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp);
ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
char *name,
Eterm extra,
@@ -292,7 +327,6 @@ ERTS_GLB_INLINE void erts_write_lock(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock);
ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock);
-ERTS_GLB_INLINE void erts_thr_time_now(erts_thr_timeval_t *time);
ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp);
ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key);
ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value);
@@ -925,7 +959,7 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
}
ERTS_GLB_INLINE void
-erts_atomic_init(erts_atomic_t *var, long i)
+erts_atomic_init(erts_atomic_t *var, erts_aint_t i)
{
#ifdef USE_THREADS
ethr_atomic_init(var, i);
@@ -935,7 +969,7 @@ erts_atomic_init(erts_atomic_t *var, long i)
}
ERTS_GLB_INLINE void
-erts_atomic_set(erts_atomic_t *var, long i)
+erts_atomic_set(erts_atomic_t *var, erts_aint_t i)
{
#ifdef USE_THREADS
ethr_atomic_set(var, i);
@@ -944,7 +978,7 @@ erts_atomic_set(erts_atomic_t *var, long i)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_read(erts_atomic_t *var)
{
#ifdef USE_THREADS
@@ -954,7 +988,7 @@ erts_atomic_read(erts_atomic_t *var)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_inctest(erts_atomic_t *incp)
{
#ifdef USE_THREADS
@@ -964,7 +998,7 @@ erts_atomic_inctest(erts_atomic_t *incp)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_dectest(erts_atomic_t *decp)
{
#ifdef USE_THREADS
@@ -994,8 +1028,8 @@ erts_atomic_dec(erts_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_addtest(erts_atomic_t *addp, long i)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_addtest(erts_atomic_t *addp, erts_aint_t i)
{
#ifdef USE_THREADS
return ethr_atomic_add_read(addp, i);
@@ -1005,7 +1039,7 @@ erts_atomic_addtest(erts_atomic_t *addp, long i)
}
ERTS_GLB_INLINE void
-erts_atomic_add(erts_atomic_t *addp, long i)
+erts_atomic_add(erts_atomic_t *addp, erts_aint_t i)
{
#ifdef USE_THREADS
ethr_atomic_add(addp, i);
@@ -1014,59 +1048,58 @@ erts_atomic_add(erts_atomic_t *addp, long i)
#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_xchg(erts_atomic_t *xchgp, long new)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_xchg(erts_atomic_t *xchgp, erts_aint_t new)
{
- long old;
#ifdef USE_THREADS
return ethr_atomic_xchg(xchgp, new);
#else
- old = *xchgp;
+ erts_aint_t old = *xchgp;
*xchgp = new;
-#endif
return old;
+#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_cmpxchg(erts_atomic_t *xchgp, long new, long expected)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_cmpxchg(erts_atomic_t *xchgp, erts_aint_t new, erts_aint_t expected)
{
#ifdef USE_THREADS
return ethr_atomic_cmpxchg(xchgp, new, expected);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
if (old == expected)
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_bor(erts_atomic_t *var, long mask)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_bor(erts_atomic_t *var, erts_aint_t mask)
{
#ifdef USE_THREADS
return ethr_atomic_read_bor(var, mask);
#else
- long old;
+ erts_aint_t old;
old = *var;
*var |= mask;
return old;
#endif
}
-ERTS_GLB_INLINE long
-erts_atomic_band(erts_atomic_t *var, long mask)
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_band(erts_atomic_t *var, erts_aint_t mask)
{
#ifdef USE_THREADS
return ethr_atomic_read_band(var, mask);
#else
- long old;
+ erts_aint_t old;
old = *var;
*var &= mask;
return old;
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_read_acqb(erts_atomic_t *var)
{
#ifdef USE_THREADS
@@ -1077,7 +1110,7 @@ erts_atomic_read_acqb(erts_atomic_t *var)
}
ERTS_GLB_INLINE void
-erts_atomic_set_relb(erts_atomic_t *var, long i)
+erts_atomic_set_relb(erts_atomic_t *var, erts_aint_t i)
{
#ifdef USE_THREADS
ethr_atomic_set_relb(var, i);
@@ -1096,7 +1129,7 @@ erts_atomic_dec_relb(erts_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long
+ERTS_GLB_INLINE erts_aint_t
erts_atomic_dectest_relb(erts_atomic_t *decp)
{
#ifdef USE_THREADS
@@ -1106,28 +1139,243 @@ erts_atomic_dectest_relb(erts_atomic_t *decp)
#endif
}
-ERTS_GLB_INLINE long erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
- long new,
- long exp)
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_acqb(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp)
{
#ifdef USE_THREADS
return ethr_atomic_cmpxchg_acqb(xchgp, new, exp);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
if (old == exp)
*xchgp = new;
return old;
#endif
}
-ERTS_GLB_INLINE long erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
- long new,
- long exp)
+ERTS_GLB_INLINE erts_aint_t erts_atomic_cmpxchg_relb(erts_atomic_t *xchgp,
+ erts_aint_t new,
+ erts_aint_t exp)
{
#ifdef USE_THREADS
return ethr_atomic_cmpxchg_relb(xchgp, new, exp);
#else
- long old = *xchgp;
+ erts_aint_t old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+/* atomic32 */
+
+ERTS_GLB_INLINE void
+erts_atomic32_init(erts_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_init(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_set(erts_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_set(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read(erts_atomic32_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_read(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_inctest(erts_atomic32_t *incp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_inc_read(incp);
+#else
+ return ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_dectest(erts_atomic32_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_dec_read(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_inc(erts_atomic32_t *incp)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_inc(incp);
+#else
+ ++(*incp);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_dec(erts_atomic32_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_dec(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_addtest(erts_atomic32_t *addp, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_add_read(addp, i);
+#else
+ return *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_add(erts_atomic32_t *addp, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_add(addp, i);
+#else
+ *addp += i;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_xchg(erts_atomic32_t *xchgp, erts_aint32_t new)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_xchg(xchgp, new);
+#else
+ erts_aint32_t old = *xchgp;
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_cmpxchg(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t expected)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_cmpxchg(xchgp, new, expected);
+#else
+ erts_aint32_t old = *xchgp;
+ if (old == expected)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_bor(erts_atomic32_t *var, erts_aint32_t mask)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_read_bor(var, mask);
+#else
+ erts_aint32_t old;
+ old = *var;
+ *var |= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_band(erts_atomic32_t *var, erts_aint32_t mask)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_read_band(var, mask);
+#else
+ erts_aint32_t old;
+ old = *var;
+ *var &= mask;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_acqb(erts_atomic32_t *var)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_read_acqb(var);
+#else
+ return *var;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_set_relb(erts_atomic32_t *var, erts_aint32_t i)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_set_relb(var, i);
+#else
+ *var = i;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_atomic32_dec_relb(erts_atomic32_t *decp)
+{
+#ifdef USE_THREADS
+ ethr_atomic32_dec_relb(decp);
+#else
+ --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_dectest_relb(erts_atomic32_t *decp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_dec_read_relb(decp);
+#else
+ return --(*decp);
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_cmpxchg_acqb(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_cmpxchg_acqb(xchgp, new, exp);
+#else
+ erts_aint32_t old = *xchgp;
+ if (old == exp)
+ *xchgp = new;
+ return old;
+#endif
+}
+
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_cmpxchg_relb(erts_atomic32_t *xchgp,
+ erts_aint32_t new,
+ erts_aint32_t exp)
+{
+#ifdef USE_THREADS
+ return ethr_atomic32_cmpxchg_relb(xchgp, new, exp);
+#else
+ erts_aint32_t old = *xchgp;
if (old == exp)
*xchgp = new;
return old;
@@ -1428,16 +1676,6 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_thr_time_now(erts_thr_timeval_t *time)
-{
-#ifdef USE_THREADS
- int res = ethr_time_now(time);
- if (res)
- erts_thr_fatal_error(res, "get current time");
-#endif
-}
-
-ERTS_GLB_INLINE void
erts_tsd_key_create(erts_tsd_key_t *keyp)
{
#ifdef USE_THREADS
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index 6f6b971d34..93d8ea4cb4 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -20,11 +20,15 @@
#ifndef ERL_TIME_H__
#define ERL_TIME_H__
+extern erts_smp_atomic_t do_time; /* set at clock interrupt */
+extern SysTimeval erts_first_emu_time;
+
/*
** Timer entry:
*/
typedef struct erl_timer {
struct erl_timer* next; /* next entry tiw slot or chain */
+ struct erl_timer* prev; /* prev entry tiw slot or chain */
Uint slot; /* slot in timer wheel */
Uint count; /* number of loops remaining */
int active; /* 1=activated, 0=deactivated */
@@ -39,7 +43,6 @@ typedef void (*ErlTimeoutProc)(void*);
typedef void (*ErlCancelProc)(void*);
#ifdef ERTS_SMP
-
/*
* Process and port timer
*/
@@ -61,7 +64,66 @@ void erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
ErlTimeoutProc timeout_func,
Uint timeout);
void erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer);
+#endif
+
+/* timer-wheel api */
+void erts_init_time(void);
+void erts_set_timer(ErlTimer*, ErlTimeoutProc, ErlCancelProc, void*, Uint);
+void erts_cancel_timer(ErlTimer*);
+void erts_bump_timer(erts_aint_t);
+Uint erts_timer_wheel_memory_size(void);
+Uint erts_time_left(ErlTimer *);
+erts_aint_t erts_next_time(void);
+
+#ifdef DEBUG
+void erts_p_slpq(void);
#endif
+ERTS_GLB_INLINE erts_aint_t erts_do_time_read_and_reset(void);
+ERTS_GLB_INLINE void erts_do_time_add(long);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE erts_aint_t erts_do_time_read_and_reset(void) { return erts_smp_atomic_xchg(&do_time, 0L); }
+ERTS_GLB_INLINE void erts_do_time_add(long elapsed) { erts_smp_atomic_add(&do_time, elapsed); }
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+/* time_sup */
+
+#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
+# ifndef HAVE_ERTS_NOW_CPU
+# define HAVE_ERTS_NOW_CPU
+# ifdef HAVE_GETHRVTIME
+# define erts_start_now_cpu() sys_start_hrvtime()
+# define erts_stop_now_cpu() sys_stop_hrvtime()
+# endif
+# endif
+void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec);
#endif
+
+void erts_get_timeval(SysTimeval *tv);
+long erts_get_time(void);
+void erts_get_emu_time(SysTimeval *);
+
+ERTS_GLB_INLINE int erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p)
+{
+ if (t1p->tv_sec == t2p->tv_sec) {
+ if (t1p->tv_usec < t2p->tv_usec)
+ return -1;
+ else if (t1p->tv_usec > t2p->tv_usec)
+ return 1;
+ return 0;
+ }
+ return t1p->tv_sec < t2p->tv_sec ? -1 : 1;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+#endif /* ERL_TIME_H__ */
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 7b8706ea13..ca4b54188e 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -358,10 +358,6 @@ static int clock_resolution;
** instead of something like select.
*/
-#if defined(ERTS_TIMER_THREAD)
-static ERTS_INLINE void init_erts_deliver_time(const SysTimeval *inittv) { }
-static ERTS_INLINE void do_erts_deliver_time(const SysTimeval *current) { }
-#else
static SysTimeval last_delivered;
static void init_erts_deliver_time(const SysTimeval *inittv)
@@ -389,11 +385,10 @@ static void do_erts_deliver_time(const SysTimeval *current)
this by simply pretend as if the time stood still. :) */
if (elapsed > 0) {
- do_time_add(elapsed);
+ erts_do_time_add(elapsed);
last_delivered = cur_time;
}
}
-#endif
int
erts_init_time_sup(void)
@@ -786,7 +781,6 @@ get_sys_now(Uint* megasec, Uint* sec, Uint* microsec)
to a struct timeval representing current time (to save
a gettimeofday() where possible) or NULL */
-#if !defined(ERTS_TIMER_THREAD)
void erts_deliver_time(void) {
SysTimeval now;
@@ -797,7 +791,6 @@ void erts_deliver_time(void) {
erts_smp_mtx_unlock(&erts_timeofday_mtx);
}
-#endif
/* get *real* time (not ticks) remaining until next timeout - if there
isn't one, give a "long" time, that is guaranteed
@@ -806,14 +799,12 @@ void erts_deliver_time(void) {
void erts_time_remaining(SysTimeval *rem_time)
{
int ticks;
-#if !defined(ERTS_TIMER_THREAD)
SysTimeval cur_time;
-#endif
long elapsed;
- /* next_time() returns no of ticks to next timeout or -1 if none */
+ /* erts_next_time() returns no of ticks to next timeout or -1 if none */
- if ((ticks = next_time()) == -1) {
+ if ((ticks = erts_next_time()) == -1) {
/* timer queue empty */
/* this will cause at most 100000000 ticks */
rem_time->tv_sec = 100000;
@@ -822,9 +813,6 @@ void erts_time_remaining(SysTimeval *rem_time)
/* next timeout after ticks ticks */
ticks *= CLOCK_RESOLUTION;
-#if defined(ERTS_TIMER_THREAD)
- elapsed = 0;
-#else
erts_smp_mtx_lock(&erts_timeofday_mtx);
get_tolerant_timeofday(&cur_time);
@@ -839,7 +827,6 @@ void erts_time_remaining(SysTimeval *rem_time)
rem_time->tv_sec = rem_time->tv_usec = 0;
return;
}
-#endif
rem_time->tv_sec = (ticks - elapsed) / 1000;
rem_time->tv_usec = 1000 * ((ticks - elapsed) % 1000);
}
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 72207df621..545b345a71 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -2420,7 +2420,7 @@ void erts_copy_utf8_to_utf16_little(byte *target, byte *bytes, int num_chars)
/*
* This internal bif converts a filename to whatever format is suitable for the file driver
- * It also adds zero termination so that prim_file neednt bother with the character encoding
+ * It also adds zero termination so that prim_file needn't bother with the character encoding
* of the file driver
*/
BIF_RETTYPE prim_file_internal_name2native_1(BIF_ALIST_1)
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 524db2a2eb..e8a9d5f32f 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -544,7 +544,7 @@ ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt)
if (prt->snapshot != erts_smp_atomic_read(&erts_ports_snapshot)) {
/* Dead ports are added from the end of the snapshot buffer */
Eterm* tombstone = (Eterm*) erts_smp_atomic_addtest(&erts_dead_ports_ptr,
- -(long)sizeof(Eterm));
+ -(erts_aint_t)sizeof(Eterm));
ASSERT(tombstone+1 != NULL);
ASSERT(prt->snapshot == (Uint32) erts_smp_atomic_read(&erts_ports_snapshot) - 1);
*tombstone = prt->id;
@@ -563,7 +563,7 @@ extern Uint display_items; /* no of items to display in traces etc */
extern Uint display_loads; /* print info about loaded modules */
extern int erts_backtrace_depth;
-extern erts_smp_atomic_t erts_max_gen_gcs;
+extern erts_smp_atomic32_t erts_max_gen_gcs;
extern int erts_disable_tolerant_timeofday;
@@ -1206,7 +1206,7 @@ ERTS_GLB_INLINE void
erts_smp_port_unlock(Port *prt)
{
#ifdef ERTS_SMP
- long refc;
+ erts_aint_t refc;
erts_smp_mtx_unlock(prt->lock);
refc = erts_smp_atomic_dectest(&prt->refc);
ASSERT(refc >= 0);
@@ -1425,84 +1425,6 @@ void erl_drv_thr_init(void);
/* time.c */
-ERTS_GLB_INLINE long do_time_read_and_reset(void);
-#ifdef ERTS_TIMER_THREAD
-ERTS_GLB_INLINE int next_time(void);
-ERTS_GLB_INLINE void bump_timer(long);
-#else
-int next_time(void);
-void bump_timer(long);
-extern erts_smp_atomic_t do_time; /* set at clock interrupt */
-ERTS_GLB_INLINE void do_time_add(long);
-#endif
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-#ifdef ERTS_TIMER_THREAD
-ERTS_GLB_INLINE long do_time_read_and_reset(void) { return 0; }
-ERTS_GLB_INLINE int next_time(void) { return -1; }
-ERTS_GLB_INLINE void bump_timer(long ignore) { }
-#else
-ERTS_GLB_INLINE long do_time_read_and_reset(void)
-{
- return erts_smp_atomic_xchg(&do_time, 0L);
-}
-ERTS_GLB_INLINE void do_time_add(long elapsed)
-{
- erts_smp_atomic_add(&do_time, elapsed);
-}
-#endif
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-void init_time(void);
-void erl_set_timer(ErlTimer*, ErlTimeoutProc, ErlCancelProc, void*, Uint);
-void erl_cancel_timer(ErlTimer*);
-Uint time_left(ErlTimer *);
-
-Uint erts_timer_wheel_memory_size(void);
-
-#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
-# ifndef HAVE_ERTS_NOW_CPU
-# define HAVE_ERTS_NOW_CPU
-# ifdef HAVE_GETHRVTIME
-# define erts_start_now_cpu() sys_start_hrvtime()
-# define erts_stop_now_cpu() sys_stop_hrvtime()
-# endif
-# endif
-void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec);
-#endif
-
-void erts_get_timeval(SysTimeval *tv);
-long erts_get_time(void);
-
-extern SysTimeval erts_first_emu_time;
-
-void erts_get_emu_time(SysTimeval *);
-
-ERTS_GLB_INLINE int erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE int
-erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p)
-{
- if (t1p->tv_sec == t2p->tv_sec) {
- if (t1p->tv_usec < t2p->tv_usec)
- return -1;
- else if (t1p->tv_usec > t2p->tv_usec)
- return 1;
- return 0;
- }
- return t1p->tv_sec < t2p->tv_sec ? -1 : 1;
-}
-
-#endif
-
-#ifdef DEBUG
-void p_slpq(void);
-#endif
-
/* utils.c */
/*
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 9ed92bbe03..f21a96c754 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -428,7 +428,7 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver,
old_name = prt->name;
prt->name = new_name;
#ifdef ERTS_SMP
- erts_smp_atomic_set(&prt->run_queue, (long) runq);
+ erts_smp_atomic_set(&prt->run_queue, (erts_aint_t) runq);
#endif
ASSERT(!prt->drv_ptr);
prt->drv_ptr = driver;
@@ -670,7 +670,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
#ifdef ERTS_SMP
erts_cancel_smp_ptimer(port->ptimer);
#else
- erl_cancel_timer(&(port->tm));
+ erts_cancel_timer(&(port->tm));
#endif
stopq(port);
kill_port(port);
@@ -1297,7 +1297,7 @@ void init_io(void)
erts_port[i].port_data_lock = NULL;
}
- erts_smp_atomic_init(&erts_ports_snapshot, (long) 0);
+ erts_smp_atomic_init(&erts_ports_snapshot, (erts_aint_t) 0);
last_port_num = 0;
erts_smp_spinlock_init(&get_free_port_lck, "get_free_port");
@@ -1839,7 +1839,7 @@ terminate_port(Port *prt)
#ifdef ERTS_SMP
erts_cancel_smp_ptimer(prt->ptimer);
#else
- erl_cancel_timer(&prt->tm);
+ erts_cancel_timer(&prt->tm);
#endif
drv = prt->drv_ptr;
@@ -3252,7 +3252,7 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, int hlen,
return 0;
prt->bytes_in += (hlen + len);
- erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + len));
+ erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + len));
if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
return erts_net_message(prt,
prt->dist_entry,
@@ -3287,7 +3287,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, int hlen, char* buf, int len)
return 0;
prt->bytes_in += (hlen + len);
- erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + len));
+ erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + len));
if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
if (len == 0)
return erts_net_message(prt,
@@ -3364,7 +3364,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, int hlen, ErlIOVec* vec, int skip)
/* XXX handle distribution !!! */
prt->bytes_in += (hlen + size);
- erts_smp_atomic_add(&erts_bytes_in, (long) (hlen + size));
+ erts_smp_atomic_add(&erts_bytes_in, (erts_aint_t) (hlen + size));
deliver_vec_message(prt, prt->connected, hbuf, hlen, binv, iov, n, size);
return 0;
}
@@ -3408,25 +3408,25 @@ int len;
* reference count on driver binaries...
*/
-long
+ErlDrvSInt
driver_binary_get_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return erts_refc_read(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_read(&bp->refc, 1);
}
-long
+ErlDrvSInt
driver_binary_inc_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return erts_refc_inctest(&bp->refc, 2);
+ return (ErlDrvSInt) erts_refc_inctest(&bp->refc, 2);
}
-long
+ErlDrvSInt
driver_binary_dec_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return erts_refc_dectest(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_dectest(&bp->refc, 1);
}
@@ -3541,12 +3541,12 @@ pdl_init_refc(ErlDrvPDL pdl)
erts_atomic_init(&pdl->refc, 1);
}
-static ERTS_INLINE long
+static ERTS_INLINE ErlDrvSInt
pdl_read_refc(ErlDrvPDL pdl)
{
- long refc = erts_atomic_read(&pdl->refc);
+ erts_aint_t refc = erts_atomic_read(&pdl->refc);
ERTS_LC_ASSERT(refc >= 0);
- return refc;
+ return (ErlDrvSInt) refc;
}
static ERTS_INLINE void
@@ -3556,12 +3556,12 @@ pdl_inc_refc(ErlDrvPDL pdl)
ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) > 1);
}
-static ERTS_INLINE long
+static ERTS_INLINE ErlDrvSInt
pdl_inctest_refc(ErlDrvPDL pdl)
{
- long refc = erts_atomic_inctest(&pdl->refc);
+ erts_aint_t refc = erts_atomic_inctest(&pdl->refc);
ERTS_LC_ASSERT(refc > 1);
- return refc;
+ return (ErlDrvSInt) refc;
}
#if 0 /* unused */
@@ -3573,12 +3573,12 @@ pdl_dec_refc(ErlDrvPDL pdl)
}
#endif
-static ERTS_INLINE long
+static ERTS_INLINE ErlDrvSInt
pdl_dectest_refc(ErlDrvPDL pdl)
{
- long refc = erts_atomic_dectest(&pdl->refc);
+ erts_aint_t refc = erts_atomic_dectest(&pdl->refc);
ERTS_LC_ASSERT(refc >= 0);
- return refc;
+ return (ErlDrvSInt) refc;
}
static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
@@ -3649,7 +3649,7 @@ driver_pdl_lock(ErlDrvPDL pdl)
void
driver_pdl_unlock(ErlDrvPDL pdl)
{
- long refc;
+ ErlDrvSInt refc;
#ifdef HARDDEBUG
erts_fprintf(stderr, "driver_pdl_unlock(0x%08X)\r\n",(unsigned) pdl);
#endif
@@ -3659,28 +3659,30 @@ driver_pdl_unlock(ErlDrvPDL pdl)
pdl_destroy(pdl);
}
-long
+ErlDrvSInt
driver_pdl_get_refc(ErlDrvPDL pdl)
{
return pdl_read_refc(pdl);
}
-long
+ErlDrvSInt
driver_pdl_inc_refc(ErlDrvPDL pdl)
{
- long refc = pdl_inctest_refc(pdl);
+ ErlDrvSInt refc = pdl_inctest_refc(pdl);
#ifdef HARDDEBUG
- erts_fprintf(stderr, "driver_pdl_inc_refc(0x%08X) -> %ld\r\n",(unsigned) pdl, refc);
+ erts_fprintf(stderr, "driver_pdl_inc_refc(%p) -> %bpd\r\n",
+ pdl, refc);
#endif
return refc;
}
-long
+ErlDrvSInt
driver_pdl_dec_refc(ErlDrvPDL pdl)
{
- long refc = pdl_dectest_refc(pdl);
+ ErlDrvSInt refc = pdl_dectest_refc(pdl);
#ifdef HARDDEBUG
- erts_fprintf(stderr, "driver_pdl_dec_refc(0x%08X) -> %ld\r\n",(unsigned) pdl, refc);
+ erts_fprintf(stderr, "driver_pdl_dec_refc(%p) -> %bpd\r\n",
+ pdl, refc);
#endif
if (!refc)
pdl_destroy(pdl);
@@ -4066,7 +4068,7 @@ drv_cancel_timer(Port *prt)
#ifdef ERTS_SMP
erts_cancel_smp_ptimer(prt->ptimer);
#else
- erl_cancel_timer(&prt->tm);
+ erts_cancel_timer(&prt->tm);
#endif
if (erts_port_task_is_scheduled(&prt->timeout_task))
erts_port_task_abort(prt->id, &prt->timeout_task);
@@ -4090,7 +4092,7 @@ int driver_set_timer(ErlDrvPort ix, UWord t)
(ErlTimeoutProc) schedule_port_timeout,
t);
#else
- erl_set_timer(&prt->tm,
+ erts_set_timer(&prt->tm,
(ErlTimeoutProc) schedule_port_timeout,
NULL,
prt,
@@ -4121,9 +4123,9 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
#ifdef ERTS_SMP
- *t = prt->ptimer ? time_left(&prt->ptimer->timer.tm) : 0;
+ *t = prt->ptimer ? erts_time_left(&prt->ptimer->timer.tm) : 0;
#else
- *t = time_left(&prt->tm);
+ *t = erts_time_left(&prt->tm);
#endif
return 0;
}
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index a2439d5582..e861f97e7a 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -101,16 +101,16 @@ return
%macro: test_heap TestHeap -pack
allocate t t
-allocate_heap I I I
+allocate_heap t I t
deallocate I
init y
allocate_zero t t
-allocate_heap_zero I I I
+allocate_heap_zero t I t
trim N Remaining => i_trim N
i_trim I
-test_heap I I
+test_heap I t
allocate_heap S u==0 R => allocate S R
allocate_heap_zero S u==0 R => allocate_zero S R
@@ -124,7 +124,7 @@ init Y1 | init Y2 => init2 Y1 Y2
# Selecting values
-select_val S=q Fail=f Size=u Rest=* => const_select_val(S, Fail, Size, Rest)
+select_val S=aiq Fail=f Size=u Rest=* => const_select_val(S, Fail, Size, Rest)
select_val S=s Fail=f Size=u Rest=* | use_jump_tab(Size, Rest) => \
gen_jump_tab(S, Fail, Size, Rest)
@@ -132,34 +132,59 @@ select_val S=s Fail=f Size=u Rest=* | use_jump_tab(Size, Rest) => \
is_integer Fail=f S | select_val S=s Fail=f Size=u Rest=* | use_jump_tab(Size, Rest) => \
gen_jump_tab(S, Fail, Size, Rest)
+is_integer TypeFail=f S | select_val S=s Fail=f Size=u Rest=* | \
+ mixed_types(Size, Rest) => \
+ gen_split_values(S, TypeFail, Fail, Size, Rest)
+
select_val S=s Fail=f Size=u Rest=* | mixed_types(Size, Rest) => \
- gen_split_values(S, Fail, Size, Rest)
+ gen_split_values(S, Fail, Fail, Size, Rest)
-is_integer Fail=f S | select_val S=s Fail=f Size=u Rest=* | \
+is_integer Fail=f S | select_val S=d Fail=f Size=u Rest=* | \
fixed_size_values(Size, Rest) => gen_select_val(S, Fail, Size, Rest)
-is_atom Fail=f S | select_val S=s Fail=f Size=u Rest=* | \
+is_atom Fail=f S | select_val S=d Fail=f Size=u Rest=* | \
fixed_size_values(Size, Rest) => gen_select_val(S, Fail, Size, Rest)
-select_val S=s Fail=f Size=u Rest=* | fixed_size_values(Size, Rest) => \
- gen_select_val(S, Fail, Size, Rest)
+select_val S=s Fail=f Size=u Rest=* | floats_or_bignums(Size, Rest) => \
+ gen_select_literals(S, Fail, Size, Rest)
-select_val S=s Fail=f Size=u Rest=* | all_values_are_big(Size, Rest) => \
- gen_select_big(S, Fail, Size, Rest)
+select_val S=d Fail=f Size=u Rest=* | fixed_size_values(Size, Rest) => \
+ gen_select_val(S, Fail, Size, Rest)
-is_tuple Fail=f S | select_tuple_arity S=s Fail=f Size=u Rest=* => \
+is_tuple Fail=f S | select_tuple_arity S=d Fail=f Size=u Rest=* => \
gen_select_tuple_arity(S, Fail, Size, Rest)
-select_tuple_arity S=s Fail=f Size=u Rest=* => \
+select_tuple_arity S=d Fail=f Size=u Rest=* => \
gen_select_tuple_arity(S, Fail, Size, Rest)
-i_select_val s f I
-i_select_tuple_arity s f I
-i_select_big s f
-i_select_float s f I
+i_select_val r f I
+i_select_val x f I
+i_select_val y f I
+
+i_select_val2 r f c f c f
+i_select_val2 x f c f c f
+i_select_val2 y f c f c f
+
+i_select_tuple_arity2 r f A f A f
+i_select_tuple_arity2 x f A f A f
+i_select_tuple_arity2 y f A f A f
+
+i_select_tuple_arity r f I
+i_select_tuple_arity x f I
+i_select_tuple_arity y f I
+
+i_jump_on_val_zero r f I
+i_jump_on_val_zero x f I
+i_jump_on_val_zero y f I
+
+i_jump_on_val r f I I
+i_jump_on_val x f I I
+i_jump_on_val y f I I
-i_jump_on_val_zero s f I
-i_jump_on_val s f I I
+jump Target | label Lbl | same_label(Target, Lbl) => label Lbl
+
+is_ne_exact L1 S1 S2 | jump Fail | label L2 | same_label(L1, L2) => \
+ is_eq_exact Fail S1 S2 | label L2
%macro: get_list GetList -pack
get_list x x x
@@ -234,11 +259,17 @@ is_number Fail Literal=q => move Literal x | is_number Fail x
jump f
-case_end Literal=q => move Literal x | case_end x
-badmatch Literal=q => move Literal x | badmatch x
+case_end Literal=cq => move Literal x | case_end x
+badmatch Literal=cq => move Literal x | badmatch x
+
+case_end r
+case_end x
+case_end y
+
+badmatch r
+badmatch x
+badmatch y
-case_end s
-badmatch s
if_end
raise s s
@@ -248,12 +279,33 @@ system_limit j
move R R =>
+move C=cxy r | jump Lbl => move_jump Lbl C
+
+%macro: move_jump MoveJump -nonext
+move_jump f n
+move_jump f c
+move_jump f x
+move_jump f y
+
move X1=x Y1=y | move X2=x Y2=y => move2 X1 Y1 X2 Y2
move Y1=y X1=x | move Y2=y X2=x => move2 Y1 X1 Y2 X2
+move X1=x X2=x | move X3=x X4=x => move2 X1 X2 X3 X4
+
+move C=aiq X=x==1 => move_x1 C
+move C=aiq X=x==2 => move_x2 C
+
+move_x1 c
+move_x2 c
%macro: move2 Move2 -pack
move2 x y x y
move2 y x y x
+move2 x x x x
+
+# The compiler almost never generates a "move Literal y(Y)" instruction,
+# so let's cheat if we encounter one.
+move S=n D=y => init D
+move S=c D=y => move S x | move x D
%macro:move Move -pack -gen_dest
move x x
@@ -265,15 +317,10 @@ move r x
move r y
move c r
move c x
-move c y
move n x
move n r
move y y
-%cold
-move s d
-%hot
-
# Receive operations.
loop_rec Fail Src | smp_mark_target_label(Fail) => i_loop_rec Fail Src
@@ -306,55 +353,78 @@ i_wait_error_locked
send
#
-# Comparisions.
+# Optimized comparisons with one immediate/literal operand.
+#
+
+is_eq_exact Lbl R=rxy C=ian => i_is_eq_exact_immed Lbl R C
+is_eq_exact Lbl R=rxy C=q => i_is_eq_exact_literal R Lbl C
+
+is_ne_exact Lbl R=rxy C=ian => i_is_ne_exact_immed Lbl R C
+is_ne_exact Lbl R=rxy C=q => i_is_ne_exact_literal R Lbl C
+
+%macro: i_is_eq_exact_immed EqualImmed -fail_action
+i_is_eq_exact_immed f r c
+i_is_eq_exact_immed f x c
+i_is_eq_exact_immed f y c
+
+i_is_eq_exact_literal r f c
+i_is_eq_exact_literal x f c
+i_is_eq_exact_literal y f c
+
+%macro: i_is_ne_exact_immed NotEqualImmed -fail_action
+i_is_ne_exact_immed f r c
+i_is_ne_exact_immed f x c
+i_is_ne_exact_immed f y c
+
+i_is_ne_exact_literal r f c
+i_is_ne_exact_literal x f c
+i_is_ne_exact_literal y f c
+
+#
+# All other comparisons.
#
-is_eq_exact Lbl=f R=rxy C=ian => i_is_eq_immed Lbl R C
-is_eq Lbl=f R=rxy C=an => i_is_eq_immed Lbl R C
+is_eq_exact Lbl S1 S2 => i_fetch S1 S2 | i_is_eq_exact Lbl
+is_ne_exact Lbl S1 S2 => i_fetch S1 S2 | i_is_ne_exact Lbl
is_ge Lbl S1 S2 => i_fetch S1 S2 | i_is_ge Lbl
is_lt Lbl S1 S2 => i_fetch S1 S2 | i_is_lt Lbl
is_eq Lbl S1 S2 => i_fetch S1 S2 | i_is_eq Lbl
is_ne Lbl S1 S2 => i_fetch S1 S2 | i_is_ne Lbl
-is_eq_exact Lbl=f S1 S2 => i_fetch S1 S2 | i_is_eq_exact Lbl
-is_ne_exact Lbl S1 S2 => i_fetch S1 S2 | i_is_ne_exact Lbl
-
+i_is_eq_exact f
+i_is_ne_exact f
i_is_lt f
i_is_ge f
i_is_eq f
i_is_ne f
-i_is_eq_exact f
-i_is_ne_exact f
-
-%macro: i_is_eq_immed EqualImmed -fail_action
-i_is_eq_immed f r c
-i_is_eq_immed f x c
-i_is_eq_immed f y c
#
# Putting things.
#
-put_tuple Arity Dst | put V => i_put_tuple Arity V Dst
+put_tuple Arity Dst => i_put_tuple Dst u
-%macro: i_put_tuple PutTuple -pack
-i_put_tuple A x x
-i_put_tuple A y x
-i_put_tuple A r x
-i_put_tuple A n x
-i_put_tuple A c x
-i_put_tuple A x y
-i_put_tuple A x r
-i_put_tuple A y r
-i_put_tuple A n r
-i_put_tuple A c r
+i_put_tuple Dst Arity Puts=* | put S1 | put S2 | \
+ put S3 | put S4 | put S5 => \
+ tuple_append_put5(Arity, Dst, Puts, S1, S2, S3, S4, S5)
-%cold
-i_put_tuple A r y
-i_put_tuple A y y
-i_put_tuple A c y
-%hot
+i_put_tuple Dst Arity Puts=* | put S => \
+ tuple_append_put(Arity, Dst, Puts, S)
+
+i_put_tuple/2
+
+%macro:i_put_tuple PutTuple -pack -goto:do_put_tuple
+i_put_tuple r I
+i_put_tuple x I
+i_put_tuple y I
+
+#
+# The instruction "put_list Const [] Dst" will not be generated by
+# the current BEAM compiler. But until R15A, play it safe by handling
+# that instruction with the following transformation.
+#
+put_list Const=c n Dst => move Const x | put_list x n Dst
%macro:put_list PutList -pack -gen_dest
@@ -362,10 +432,8 @@ put_list x n x
put_list y n x
put_list x x x
put_list y x x
-put_list c n x
put_list x x r
put_list y r r
-put_list c n r
put_list y y x
put_list x y x
@@ -376,6 +444,13 @@ put_list y y r
put_list y r x
put_list r n x
+put_list x r x
+put_list x y r
+put_list y x r
+put_list y x x
+
+put_list x r r
+
# put_list SrcReg Constant Dst
put_list r c r
put_list r c x
@@ -403,17 +478,9 @@ put_list c y x
put_list c y y
%cold
-put_list x r r
put_list s s d
%hot
-%macro: put Put
-put x
-put r
-put y
-put c
-put n
-
%macro: i_fetch FetchArgs -pack
i_fetch c c
i_fetch c r
@@ -464,19 +531,20 @@ move_return n r
move S r | deallocate D | return => move_deallocate_return S r D
-%macro: move_deallocate_return MoveDeallocateReturn -nonext
-move_deallocate_return x r P
-move_deallocate_return y r P
-move_deallocate_return c r P
-move_deallocate_return n r P
+%macro: move_deallocate_return MoveDeallocateReturn -pack -nonext
+move_deallocate_return x r Q
+move_deallocate_return y r Q
+move_deallocate_return c r Q
+move_deallocate_return n r Q
deallocate D | return => deallocate_return D
%macro: deallocate_return DeallocateReturn -nonext
-deallocate_return P
+deallocate_return Q
test_heap Need u==1 | put_list Y=y r r => test_heap_1_put_list Need Y
+%macro: test_heap_1_put_list TestHeapPutList -pack
test_heap_1_put_list I y
# Test tuple & arity (head)
@@ -576,14 +644,14 @@ is_list f y
is_nonempty_list Fail=f S=rx | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs
-%macro:is_nonempty_list_allocate IsNonemptyListAllocate -fail_action
-is_nonempty_list_allocate f x I I
-is_nonempty_list_allocate f r I I
+%macro:is_nonempty_list_allocate IsNonemptyListAllocate -fail_action -pack
+is_nonempty_list_allocate f x I t
+is_nonempty_list_allocate f r I t
is_nonempty_list F=f r | test_heap I1 I2 => is_non_empty_list_test_heap F r I1 I2
-%macro: is_non_empty_list_test_heap IsNonemptyListTestHeap -fail_action
-is_non_empty_list_test_heap f r I I
+%macro: is_non_empty_list_test_heap IsNonemptyListTestHeap -fail_action -pack
+is_non_empty_list_test_heap f r I t
%macro: is_nonempty_list IsNonemptyList -fail_action
is_nonempty_list f x
@@ -912,8 +980,13 @@ node x
node y
%hot
-i_fast_element j I s d
-i_element j s s d
+i_fast_element r j I d
+i_fast_element x j I d
+i_fast_element y j I d
+
+i_element r j s d
+i_element x j s d
+i_element y j s d
bif1 f b s d
bif1_body b s d
@@ -940,11 +1013,11 @@ move S r | call_last Ar P=f D => move_call_last S r P D
i_move_call_last f P c r
-%macro:move_call_last MoveCallLast -arg_f -nonext
+%macro:move_call_last MoveCallLast -arg_f -nonext -pack
move_call_last/4
-move_call_last x r f P
-move_call_last y r f P
+move_call_last x r f Q
+move_call_last y r f Q
move S=c r | call_only Ar P=f => i_move_call_only P S r
move S=x r | call_only Ar P=f => move_call_only S r P
@@ -1307,6 +1380,8 @@ fconv Arg=iqan Dst=l => move Arg x | fconv x Dst
fmove q l
fmove d l
+fmove l d
+
fconv d l
i_fadd l l l
@@ -1322,12 +1397,6 @@ fcheckerror p => i_fcheckerror
i_fcheckerror
fclearerror
-fmove FR=l Dst=d | new_float_allocation() => fmove_new FR Dst
-
-# The new instruction for moving a float out of a floating point register.
-# (No allocation.)
-fmove_new l d
-
#
# New apply instructions in R10B.
#
@@ -1336,7 +1405,21 @@ apply I
apply_last I P
#
-# New GCing arithmetic instructions.
+# Optimize addition and subtraction of small literals using
+# the i_increment/4 instruction (in bodies, not in guards).
+#
+
+gc_bif2 p Live u$bif:erlang:splus/2 Int=i Reg=d Dst => \
+ gen_increment(Reg, Int, Live, Dst)
+gc_bif2 p Live u$bif:erlang:splus/2 Reg=d Int=i Dst => \
+ gen_increment(Reg, Int, Live, Dst)
+
+gc_bif2 p Live u$bif:erlang:sminus/2 Reg=d Int=i Dst | \
+ negation_is_small(Int) => \
+ gen_increment_from_minus(Reg, Int, Live, Dst)
+
+#
+# GCing arithmetic instructions.
#
gc_bif2 Fail I u$bif:erlang:splus/2 S1 S2 Dst=d => i_fetch S1 S2 | i_plus Fail I Dst
@@ -1359,6 +1442,10 @@ gc_bif1 Fail I u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src I Dst
gc_bif1 Fail I u$bif:erlang:sminus/1 Src Dst=d => i_fetch i Src | i_minus Fail I Dst
gc_bif1 Fail I u$bif:erlang:splus/1 Src Dst=d => i_fetch i Src | i_plus Fail I Dst
+i_increment r I I d
+i_increment x I I d
+i_increment y I I d
+
i_plus j I d
i_minus j I d
i_times j I d
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 27c5f99320..dff2dc37a2 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -39,13 +39,6 @@
#define ENABLE_CHILD_WAITER_THREAD 1
#endif
-/* The ERTS_TIMER_TREAD #define must be visible to the
- erl_${OS}_sys.h #include files: it controls whether
- certain optional facilities should be defined or not. */
-#if defined(ERTS_SMP) && 0
-#define ERTS_TIMER_THREAD
-#endif
-
#if defined (__WIN32__)
# include "erl_win_sys.h"
#elif defined (VXWORKS)
@@ -562,11 +555,7 @@ extern char *erts_default_arg0;
extern char os_type[];
extern int sys_init_time(void);
-#if defined(ERTS_TIMER_THREAD)
-#define erts_deliver_time()
-#else
extern void erts_deliver_time(void);
-#endif
extern void erts_time_remaining(SysTimeval *);
extern int erts_init_time_sup(void);
extern void erts_sys_init_float(void);
@@ -728,11 +717,11 @@ typedef enum {
} erts_activity_error_t;
typedef struct {
- erts_smp_atomic_t do_block;
+ erts_smp_atomic32_t do_block;
struct {
- erts_smp_atomic_t wait;
- erts_smp_atomic_t gc;
- erts_smp_atomic_t io;
+ erts_smp_atomic32_t wait;
+ erts_smp_atomic32_t gc;
+ erts_smp_atomic32_t io;
} in_activity;
} erts_system_block_state_t;
@@ -883,7 +872,7 @@ ERTS_GLB_INLINE int
erts_smp_pending_system_block(void)
{
#ifdef ERTS_SMP
- return erts_smp_atomic_read(&erts_system_block_state.do_block);
+ return (int) erts_smp_atomic32_read(&erts_system_block_state.do_block);
#else
return 0;
#endif
@@ -919,7 +908,7 @@ erts_smp_set_activity(erts_activity_t old_activity,
case ERTS_ACTIVITY_UNDEFINED:
break;
case ERTS_ACTIVITY_WAIT:
- erts_smp_atomic_dec(&erts_system_block_state.in_activity.wait);
+ erts_smp_atomic32_dec(&erts_system_block_state.in_activity.wait);
if (locked) {
/* You are not allowed to leave activity waiting
* without supplying the possibility to block
@@ -930,10 +919,10 @@ erts_smp_set_activity(erts_activity_t old_activity,
}
break;
case ERTS_ACTIVITY_GC:
- erts_smp_atomic_dec(&erts_system_block_state.in_activity.gc);
+ erts_smp_atomic32_dec(&erts_system_block_state.in_activity.gc);
break;
case ERTS_ACTIVITY_IO:
- erts_smp_atomic_dec(&erts_system_block_state.in_activity.io);
+ erts_smp_atomic32_dec(&erts_system_block_state.in_activity.io);
break;
default:
erts_set_activity_error(ERTS_ACT_ERR_LEAVE_UNKNOWN_ACTIVITY,
@@ -949,13 +938,13 @@ erts_smp_set_activity(erts_activity_t old_activity,
case ERTS_ACTIVITY_UNDEFINED:
break;
case ERTS_ACTIVITY_WAIT:
- erts_smp_atomic_inc(&erts_system_block_state.in_activity.wait);
+ erts_smp_atomic32_inc(&erts_system_block_state.in_activity.wait);
break;
case ERTS_ACTIVITY_GC:
- erts_smp_atomic_inc(&erts_system_block_state.in_activity.gc);
+ erts_smp_atomic32_inc(&erts_system_block_state.in_activity.gc);
break;
case ERTS_ACTIVITY_IO:
- erts_smp_atomic_inc(&erts_system_block_state.in_activity.io);
+ erts_smp_atomic32_inc(&erts_system_block_state.in_activity.io);
break;
default:
erts_set_activity_error(ERTS_ACT_ERR_ENTER_UNKNOWN_ACTIVITY,
@@ -990,27 +979,31 @@ erts_smp_set_activity(erts_activity_t old_activity,
typedef erts_smp_atomic_t erts_refc_t;
-ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, long val);
-ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, long min_val);
-ERTS_GLB_INLINE long erts_refc_inctest(erts_refc_t *refcp, long min_val);
-ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, long min_val);
-ERTS_GLB_INLINE long erts_refc_dectest(erts_refc_t *refcp, long min_val);
-ERTS_GLB_INLINE void erts_refc_add(erts_refc_t *refcp, long diff, long min_val);
-ERTS_GLB_INLINE long erts_refc_read(erts_refc_t *refcp, long min_val);
+ERTS_GLB_INLINE void erts_refc_init(erts_refc_t *refcp, erts_aint_t val);
+ERTS_GLB_INLINE void erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_inctest(erts_refc_t *refcp,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE void erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_dectest(erts_refc_t *refcp,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE void erts_refc_add(erts_refc_t *refcp, erts_aint_t diff,
+ erts_aint_t min_val);
+ERTS_GLB_INLINE erts_aint_t erts_refc_read(erts_refc_t *refcp,
+ erts_aint_t min_val);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_refc_init(erts_refc_t *refcp, long val)
+erts_refc_init(erts_refc_t *refcp, erts_aint_t val)
{
erts_smp_atomic_init((erts_smp_atomic_t *) refcp, val);
}
ERTS_GLB_INLINE void
-erts_refc_inc(erts_refc_t *refcp, long min_val)
+erts_refc_inc(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- long val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_inc(): Bad refc found (refc=%ld < %ld)!\n",
@@ -1020,10 +1013,10 @@ erts_refc_inc(erts_refc_t *refcp, long min_val)
#endif
}
-ERTS_GLB_INLINE long
-erts_refc_inctest(erts_refc_t *refcp, long min_val)
+ERTS_GLB_INLINE erts_aint_t
+erts_refc_inctest(erts_refc_t *refcp, erts_aint_t min_val)
{
- long val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_inctest((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
@@ -1034,10 +1027,10 @@ erts_refc_inctest(erts_refc_t *refcp, long min_val)
}
ERTS_GLB_INLINE void
-erts_refc_dec(erts_refc_t *refcp, long min_val)
+erts_refc_dec(erts_refc_t *refcp, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- long val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_dec(): Bad refc found (refc=%ld < %ld)!\n",
@@ -1047,10 +1040,10 @@ erts_refc_dec(erts_refc_t *refcp, long min_val)
#endif
}
-ERTS_GLB_INLINE long
-erts_refc_dectest(erts_refc_t *refcp, long min_val)
+ERTS_GLB_INLINE erts_aint_t
+erts_refc_dectest(erts_refc_t *refcp, erts_aint_t min_val)
{
- long val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_dectest((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
@@ -1061,10 +1054,10 @@ erts_refc_dectest(erts_refc_t *refcp, long min_val)
}
ERTS_GLB_INLINE void
-erts_refc_add(erts_refc_t *refcp, long diff, long min_val)
+erts_refc_add(erts_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val)
{
#ifdef ERTS_REFC_DEBUG
- long val = erts_smp_atomic_addtest((erts_smp_atomic_t *) refcp, diff);
+ erts_aint_t val = erts_smp_atomic_addtest((erts_smp_atomic_t *) refcp, diff);
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
"erts_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n",
@@ -1074,10 +1067,10 @@ erts_refc_add(erts_refc_t *refcp, long diff, long min_val)
#endif
}
-ERTS_GLB_INLINE long
-erts_refc_read(erts_refc_t *refcp, long min_val)
+ERTS_GLB_INLINE erts_aint_t
+erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
{
- long val = erts_smp_atomic_read((erts_smp_atomic_t *) refcp);
+ erts_aint_t val = erts_smp_atomic_read((erts_smp_atomic_t *) refcp);
#ifdef ERTS_REFC_DEBUG
if (val < min_val)
erl_exit(ERTS_ABORT_EXIT,
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 53d39aef0e..c65cc37fc6 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -99,80 +99,37 @@ static erts_smp_mtx_t tiw_lock;
static ErlTimer** tiw; /* the timing wheel, allocated in init_time() */
static Uint tiw_pos; /* current position in wheel */
static Uint tiw_nto; /* number of timeouts in wheel */
+static Uint tiw_min;
+static ErlTimer *tiw_min_ptr;
/* END tiw_lock protected variables */
/* Actual interval time chosen by sys_init_time() */
static int itime; /* Constant after init */
-#if defined(ERTS_TIMER_THREAD)
-static SysTimeval time_start; /* start of current time interval */
-static long ticks_end; /* time_start+ticks_end == time_wakeup */
-static long ticks_latest; /* delta from time_start at latest time update*/
-
-static ERTS_INLINE long time_gettimeofday(SysTimeval *now)
-{
- long elapsed;
-
- erts_get_timeval(now);
- now->tv_usec = 1000 * (now->tv_usec / 1000); /* ms resolution */
- elapsed = (1000 * (now->tv_sec - time_start.tv_sec) +
- (now->tv_usec - time_start.tv_usec) / 1000);
- // elapsed /= CLOCK_RESOLUTION;
- return elapsed;
-}
-
-static long do_time_update(void)
-{
- SysTimeval now;
- long elapsed;
-
- elapsed = time_gettimeofday(&now);
- ticks_latest = elapsed;
- return elapsed;
-}
-
-static ERTS_INLINE long do_time_read(void)
-{
- return ticks_latest;
-}
-
-static long do_time_reset(void)
-{
- SysTimeval now;
- long elapsed;
-
- elapsed = time_gettimeofday(&now);
- time_start = now;
- ticks_end = LONG_MAX;
- ticks_latest = 0;
- return elapsed;
-}
-
-static ERTS_INLINE void do_time_init(void)
-{
- (void)do_time_reset();
-}
-
-#else
erts_smp_atomic_t do_time; /* set at clock interrupt */
-static ERTS_INLINE long do_time_read(void) { return erts_smp_atomic_read(&do_time); }
-static ERTS_INLINE long do_time_update(void) { return do_time_read(); }
+static ERTS_INLINE erts_aint_t do_time_read(void) { return erts_smp_atomic_read(&do_time); }
+static ERTS_INLINE erts_aint_t do_time_update(void) { return do_time_read(); }
static ERTS_INLINE void do_time_init(void) { erts_smp_atomic_init(&do_time, 0L); }
-#endif
/* get the time (in units of itime) to the next timeout,
or -1 if there are no timeouts */
-static int next_time_internal(void) /* PRE: tiw_lock taken by caller */
+static erts_aint_t next_time_internal(void) /* PRE: tiw_lock taken by caller */
{
int i, tm, nto;
unsigned int min;
ErlTimer* p;
- long dt;
+ erts_aint_t dt;
if (tiw_nto == 0)
return -1; /* no timeouts in wheel */
+
+ if (tiw_min_ptr) {
+ min = tiw_min;
+ dt = do_time_read();
+ return ((min >= dt) ? (min - dt) : 0);
+ }
/* start going through wheel to find next timeout */
tm = nto = 0;
@@ -185,11 +142,17 @@ static int next_time_internal(void) /* PRE: tiw_lock taken by caller */
if (p->count == 0) {
/* found next timeout */
dt = do_time_read();
+ /* p->count is zero */
+ tiw_min_ptr = p;
+ tiw_min = tm;
return ((tm >= dt) ? (tm - dt) : 0);
} else {
/* keep shortest time in 'min' */
- if (tm + p->count*TIW_SIZE < min)
+ if (tm + p->count*TIW_SIZE < min) {
min = tm + p->count*TIW_SIZE;
+ tiw_min_ptr = p;
+ tiw_min = min;
+ }
}
p = p->next;
}
@@ -202,11 +165,35 @@ static int next_time_internal(void) /* PRE: tiw_lock taken by caller */
return ((min >= dt) ? (min - dt) : 0);
}
-#if !defined(ERTS_TIMER_THREAD)
+static void remove_timer(ErlTimer *p) {
+ /* first */
+ if (!p->prev) {
+ tiw[p->slot] = p->next;
+ if(p->next)
+ p->next->prev = NULL;
+ } else {
+ p->prev->next = p->next;
+ }
+
+ /* last */
+ if (!p->next) {
+ if (p->prev)
+ p->prev->next = NULL;
+ } else {
+ p->next->prev = p->prev;
+ }
+
+ p->next = NULL;
+ p->prev = NULL;
+ /* Make sure cancel callback isn't called */
+ p->active = 0;
+ tiw_nto--;
+}
+
/* Private export to erl_time_sup.c */
-int next_time(void)
+erts_aint_t erts_next_time(void)
{
- int ret;
+ erts_aint_t ret;
erts_smp_mtx_lock(&tiw_lock);
(void)do_time_update();
@@ -214,14 +201,13 @@ int next_time(void)
erts_smp_mtx_unlock(&tiw_lock);
return ret;
}
-#endif
-static ERTS_INLINE void bump_timer_internal(long dt) /* PRE: tiw_lock is write-locked */
+static ERTS_INLINE void bump_timer_internal(erts_aint_t dt) /* PRE: tiw_lock is write-locked */
{
Uint keep_pos;
Uint count;
ErlTimer *p, **prev, *timeout_head, **timeout_tail;
- Uint dtime = (unsigned long)dt;
+ Uint dtime = (Uint) dt;
/* no need to bump the position if there aren't any timeouts */
if (tiw_nto == 0) {
@@ -242,12 +228,16 @@ static ERTS_INLINE void bump_timer_internal(long dt) /* PRE: tiw_lock is write-l
if (tiw_pos == keep_pos) count--;
prev = &tiw[tiw_pos];
while ((p = *prev) != NULL) {
+ ASSERT( p != p->next);
if (p->count < count) { /* we have a timeout */
- *prev = p->next; /* Remove from list */
- tiw_nto--;
- p->next = NULL;
- p->active = 0; /* Make sure cancel callback
- isn't called */
+ /* remove min time */
+ if (tiw_min_ptr == p) {
+ tiw_min_ptr = NULL;
+ tiw_min = 0;
+ }
+
+ /* Remove from list */
+ remove_timer(p);
*timeout_tail = p; /* Insert in timeout queue */
timeout_tail = &p->next;
}
@@ -261,6 +251,8 @@ static ERTS_INLINE void bump_timer_internal(long dt) /* PRE: tiw_lock is write-l
dtime--;
}
tiw_pos = keep_pos;
+ if (tiw_min_ptr)
+ tiw_min -= dt;
erts_smp_mtx_unlock(&tiw_lock);
@@ -275,24 +267,17 @@ static ERTS_INLINE void bump_timer_internal(long dt) /* PRE: tiw_lock is write-l
* callback is called.
*/
p->next = NULL;
+ p->prev = NULL;
p->slot = 0;
(*p->timeout)(p->arg);
}
}
-#if defined(ERTS_TIMER_THREAD)
-static void timer_thread_bump_timer(void)
-{
- erts_smp_mtx_lock(&tiw_lock);
- bump_timer_internal(do_time_reset());
-}
-#else
-void bump_timer(long dt) /* dt is value from do_time */
+void erts_bump_timer(erts_aint_t dt) /* dt is value from do_time */
{
erts_smp_mtx_lock(&tiw_lock);
bump_timer_internal(dt);
}
-#endif
Uint
erts_timer_wheel_memory_size(void)
@@ -300,82 +285,10 @@ erts_timer_wheel_memory_size(void)
return (Uint) TIW_SIZE * sizeof(ErlTimer*);
}
-#if defined(ERTS_TIMER_THREAD)
-static struct erts_iwait *timer_thread_iwait;
-
-static int timer_thread_setup_delay(SysTimeval *rem_time)
-{
- long elapsed;
- int ticks;
-
- erts_smp_mtx_lock(&tiw_lock);
- elapsed = do_time_update();
- ticks = next_time_internal();
- if (ticks == -1) /* timer queue empty */
- ticks = 100*1000*1000;
- if (elapsed > ticks)
- elapsed = ticks;
- ticks -= elapsed;
- //ticks *= CLOCK_RESOLUTION;
- rem_time->tv_sec = ticks / 1000;
- rem_time->tv_usec = 1000 * (ticks % 1000);
- ticks_end = ticks;
- erts_smp_mtx_unlock(&tiw_lock);
- return ticks;
-}
-
-static void *timer_thread_start(void *ignore)
-{
- SysTimeval delay;
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_set_thread_name("timer");
-#endif
- erts_register_blockable_thread();
-
- for(;;) {
- if (timer_thread_setup_delay(&delay)) {
- erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- ASSERT_NO_LOCKED_LOCKS;
- erts_iwait_wait(timer_thread_iwait, &delay);
- ASSERT_NO_LOCKED_LOCKS;
- erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
- }
- else
- erts_smp_chk_system_block(NULL, NULL, NULL);
- timer_thread_bump_timer();
- ASSERT_NO_LOCKED_LOCKS;
- }
- /*NOTREACHED*/
- return NULL;
-}
-
-static ERTS_INLINE void timer_thread_post_insert(Uint ticks)
-{
- if ((Sint)ticks < ticks_end)
- erts_iwait_interrupt(timer_thread_iwait);
-}
-
-static void timer_thread_init(void)
-{
- erts_thr_opts_t opts = ERTS_THR_OPTS_DEFAULT_INITER;
- erts_tid_t tid;
-
- opts->detached = 1;
-
- timer_thread_iwait = erts_iwait_init();
- erts_thr_create(&tid, timer_thread_start, NULL, &opts);
-}
-
-#else
-static ERTS_INLINE void timer_thread_post_insert(Uint ticks) { }
-static ERTS_INLINE void timer_thread_init(void) { }
-#endif
-
/* this routine links the time cells into a free list at the start
and sets the time queue as empty */
void
-init_time(void)
+erts_init_time(void)
{
int i;
@@ -391,10 +304,13 @@ init_time(void)
tiw[i] = NULL;
do_time_init();
tiw_pos = tiw_nto = 0;
-
- timer_thread_init();
+ tiw_min_ptr = NULL;
+ tiw_min = 0;
}
+
+
+
/*
** Insert a process into the time queue, with a timeout 't'
*/
@@ -424,16 +340,31 @@ insert_timer(ErlTimer* p, Uint t)
/* insert at head of list at slot */
p->next = tiw[tm];
+ p->prev = NULL;
+ if (p->next != NULL)
+ p->next->prev = p;
tiw[tm] = p;
- tiw_nto++;
- timer_thread_post_insert(ticks);
+
+ /* insert min time */
+ if ((tiw_nto == 0) || ((tiw_min_ptr != NULL) && (ticks < tiw_min))) {
+ tiw_min = ticks;
+ tiw_min_ptr = p;
+ }
+ if ((tiw_min_ptr == p) && (ticks > tiw_min)) {
+ /* some other timer might be 'min' now */
+ tiw_min = 0;
+ tiw_min_ptr = NULL;
+ }
+
+ tiw_nto++;
}
void
-erl_set_timer(ErlTimer* p, ErlTimeoutProc timeout, ErlCancelProc cancel,
+erts_set_timer(ErlTimer* p, ErlTimeoutProc timeout, ErlCancelProc cancel,
void* arg, Uint t)
{
+
erts_deliver_time();
erts_smp_mtx_lock(&tiw_lock);
if (p->active) { /* XXX assert ? */
@@ -446,42 +377,34 @@ erl_set_timer(ErlTimer* p, ErlTimeoutProc timeout, ErlCancelProc cancel,
p->active = 1;
insert_timer(p, t);
erts_smp_mtx_unlock(&tiw_lock);
-#if defined(ERTS_SMP) && !defined(ERTS_TIMER_THREAD)
+#if defined(ERTS_SMP)
if (t <= (Uint) LONG_MAX)
erts_sys_schedule_interrupt_timed(1, (long) t);
#endif
}
void
-erl_cancel_timer(ErlTimer* p)
+erts_cancel_timer(ErlTimer* p)
{
- ErlTimer *tp;
- ErlTimer **prev;
-
erts_smp_mtx_lock(&tiw_lock);
if (!p->active) { /* allow repeated cancel (drivers) */
erts_smp_mtx_unlock(&tiw_lock);
return;
}
- /* find p in linked list at slot p->slot and remove it */
- prev = &tiw[p->slot];
- while ((tp = *prev) != NULL) {
- if (tp == p) {
- *prev = p->next; /* Remove from list */
- tiw_nto--;
- p->next = NULL;
- p->slot = p->count = 0;
- p->active = 0;
- if (p->cancel != NULL) {
- erts_smp_mtx_unlock(&tiw_lock);
- (*p->cancel)(p->arg);
- } else {
- erts_smp_mtx_unlock(&tiw_lock);
- }
- return;
- } else {
- prev = &tp->next;
- }
+
+ /* is it the 'min' timer, remove min */
+ if (p == tiw_min_ptr) {
+ tiw_min_ptr = NULL;
+ tiw_min = 0;
+ }
+
+ remove_timer(p);
+ p->slot = p->count = 0;
+
+ if (p->cancel != NULL) {
+ erts_smp_mtx_unlock(&tiw_lock);
+ (*p->cancel)(p->arg);
+ return;
}
erts_smp_mtx_unlock(&tiw_lock);
}
@@ -493,10 +416,10 @@ erl_cancel_timer(ErlTimer* p)
immediately if it hadn't been cancelled).
*/
Uint
-time_left(ErlTimer *p)
+erts_time_left(ErlTimer *p)
{
Uint left;
- long dt;
+ erts_aint_t dt;
erts_smp_mtx_lock(&tiw_lock);
@@ -517,12 +440,11 @@ time_left(ErlTimer *p)
erts_smp_mtx_unlock(&tiw_lock);
- return left * itime;
+ return (Uint) left * itime;
}
#ifdef DEBUG
-
-void p_slpq()
+void erts_p_slpq()
{
int i;
ErlTimer* p;
@@ -551,5 +473,4 @@ void p_slpq()
erts_smp_mtx_unlock(&tiw_lock);
}
-
#endif /* DEBUG */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index ab5e8b5d4a..1d60b54d21 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -59,13 +59,6 @@
/* profile_scheduler mini message queue */
-#ifdef ERTS_TIMER_THREAD
-/* A timer thread is not welcomed with this lock violation work around.
- * - Bj�rn-Egil
- */
-#error Timer thread may not be enabled due to lock violation.
-#endif
-
typedef struct {
Uint scheduler_id;
Uint no_schedulers;
@@ -3183,7 +3176,7 @@ erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
*timer_ref = res;
- erl_set_timer(&res->timer.tm,
+ erts_set_timer(&res->timer.tm,
(ErlTimeoutProc) ptimer_timeout,
(ErlCancelProc) ptimer_cancelled,
(void*) res,
@@ -3197,7 +3190,7 @@ erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer)
ASSERT(*ptimer->timer.timer_ref == ptimer);
*ptimer->timer.timer_ref = NULL;
ptimer->timer.flags |= ERTS_PTMR_FLG_CANCELLED;
- erl_cancel_timer(&ptimer->timer.tm);
+ erts_cancel_timer(&ptimer->timer.tm);
}
}
@@ -3637,19 +3630,19 @@ erts_set_activity_error(erts_activity_error_t error, char *file, int line)
}
-static ERTS_INLINE int
+static ERTS_INLINE erts_aint32_t
threads_not_under_control(void)
{
- int res = system_block_state.threads_to_block;
+ erts_aint32_t res = system_block_state.threads_to_block;
/* Waiting is always an allowed activity... */
- res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.wait);
+ res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.wait);
if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_GC)
- res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.gc);
+ res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.gc);
if (system_block_state.allowed_activities & ERTS_BS_FLG_ALLOW_IO)
- res -= erts_smp_atomic_read(&erts_system_block_state.in_activity.io);
+ res -= erts_smp_atomic32_read(&erts_system_block_state.in_activity.io);
if (res < 0) {
ASSERT(0);
@@ -3709,7 +3702,7 @@ erts_block_system(Uint32 allowed_activities)
}
else {
- erts_smp_atomic_inc(&erts_system_block_state.do_block);
+ erts_smp_atomic32_inc(&erts_system_block_state.do_block);
/* Someone else might be waiting for us to block... */
if (do_block) {
@@ -3761,11 +3754,11 @@ erts_emergency_block_system(long timeout, Uint32 allowed_activities)
another_blocker = erts_smp_pending_system_block();
system_block_state.emergency = 1;
- erts_smp_atomic_inc(&erts_system_block_state.do_block);
+ erts_smp_atomic32_inc(&erts_system_block_state.do_block);
if (another_blocker) {
if (is_blocker()) {
- erts_smp_atomic_dec(&erts_system_block_state.do_block);
+ erts_smp_atomic32_dec(&erts_system_block_state.do_block);
res = 0;
goto done;
}
@@ -3822,7 +3815,7 @@ erts_release_system(void)
if (system_block_state.recursive_block)
system_block_state.recursive_block--;
else {
- do_block = erts_smp_atomic_dectest(&erts_system_block_state.do_block);
+ do_block = erts_smp_atomic32_dectest(&erts_system_block_state.do_block);
system_block_state.have_blocker = 0;
if (is_blockable_thread())
system_block_state.threads_to_block++;
@@ -3957,10 +3950,10 @@ erts_system_block_init(void)
/* Global state... */
- erts_smp_atomic_init(&erts_system_block_state.do_block, 0L);
- erts_smp_atomic_init(&erts_system_block_state.in_activity.wait, 0L);
- erts_smp_atomic_init(&erts_system_block_state.in_activity.gc, 0L);
- erts_smp_atomic_init(&erts_system_block_state.in_activity.io, 0L);
+ erts_smp_atomic32_init(&erts_system_block_state.do_block, 0);
+ erts_smp_atomic32_init(&erts_system_block_state.in_activity.wait, 0);
+ erts_smp_atomic32_init(&erts_system_block_state.in_activity.gc, 0);
+ erts_smp_atomic32_init(&erts_system_block_state.in_activity.io, 0);
/* Make sure blockable threads unregister when exiting... */
erts_smp_install_exit_handler(erts_unregister_blockable_thread);
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 786fa7da77..6449c6f506 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -385,7 +385,6 @@ struct t_data
ErlDrvBinary *binp;
int size;
int offset;
- char name[1];
} read_file;
struct {
struct t_readdir_buf *first_buf;
@@ -1117,7 +1116,7 @@ static void invoke_read_file(void *data)
Sint64 size;
if (! (d->result_ok =
- efile_openfile(&d->errInfo, d->c.read_file.name,
+ efile_openfile(&d->errInfo, d->b,
EFILE_MODE_READ, &fd, &size))) {
goto done;
}
@@ -3071,7 +3070,7 @@ file_outputv(ErlDrvData e, ErlIOVec *ev) {
d->command = command;
d->reply = !0;
/* Copy name */
- FILENAME_COPY(d->c.read_file.name, filename);
+ FILENAME_COPY(d->b, filename);
d->c.read_file.binp = NULL;
d->invoke = invoke_read_file;
d->free = free_read_file;
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 1382d1dfe4..818bc6334e 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -4945,39 +4945,45 @@ static int inet_ctl_getifaddrs(inet_descriptor* desc_p,
*buf_p++ = '\0';
*buf_p++ = INET_IFOPT_FLAGS;
put_int32(IFGET_FLAGS(ifa_p->ifa_flags), buf_p); buf_p += 4;
- if (ifa_p->ifa_addr->sa_family == AF_INET
+ if (ifa_p->ifa_addr) {
+ if (ifa_p->ifa_addr->sa_family == AF_INET
#if defined(AF_INET6)
- || ifa_p->ifa_addr->sa_family == AF_INET6
+ || ifa_p->ifa_addr->sa_family == AF_INET6
#endif
- ) {
- SOCKADDR_TO_BUF(INET_IFOPT_ADDR, ifa_p->ifa_addr);
- BUF_ENSURE(1);
- SOCKADDR_TO_BUF(INET_IFOPT_NETMASK, ifa_p->ifa_netmask);
- if (ifa_p->ifa_flags & IFF_POINTOPOINT) {
- BUF_ENSURE(1);
- SOCKADDR_TO_BUF(INET_IFOPT_DSTADDR, ifa_p->ifa_dstaddr);
- } else if (ifa_p->ifa_flags & IFF_BROADCAST) {
- BUF_ENSURE(1);
- SOCKADDR_TO_BUF(INET_IFOPT_BROADADDR, ifa_p->ifa_broadaddr);
+ ) {
+ SOCKADDR_TO_BUF(INET_IFOPT_ADDR, ifa_p->ifa_addr);
+ if (ifa_p->ifa_netmask) {
+ BUF_ENSURE(1);
+ SOCKADDR_TO_BUF(INET_IFOPT_NETMASK, ifa_p->ifa_netmask);
+ }
+ if (ifa_p->ifa_dstaddr &&
+ (ifa_p->ifa_flags & IFF_POINTOPOINT)) {
+ BUF_ENSURE(1);
+ SOCKADDR_TO_BUF(INET_IFOPT_DSTADDR, ifa_p->ifa_dstaddr);
+ } else if (ifa_p->ifa_broadaddr &&
+ (ifa_p->ifa_flags & IFF_BROADCAST)) {
+ BUF_ENSURE(1);
+ SOCKADDR_TO_BUF(INET_IFOPT_BROADADDR, ifa_p->ifa_broadaddr);
+ }
}
- }
#if defined(AF_LINK) || defined(AF_PACKET)
- else if (
+ else if (
#if defined(AF_LINK)
- ifa_p->ifa_addr->sa_family == AF_LINK
+ ifa_p->ifa_addr->sa_family == AF_LINK
#else
- 0
+ 0
#endif
#if defined(AF_PACKET)
- || ifa_p->ifa_addr->sa_family == AF_PACKET
+ || ifa_p->ifa_addr->sa_family == AF_PACKET
#endif
- ) {
- char *bp = buf_p;
- BUF_ENSURE(1);
- SOCKADDR_TO_BUF(INET_IFOPT_HWADDR, ifa_p->ifa_addr);
- if (buf_p - bp < 4) buf_p = bp; /* Empty hwaddr */
- }
+ ) {
+ char *bp = buf_p;
+ BUF_ENSURE(1);
+ SOCKADDR_TO_BUF(INET_IFOPT_HWADDR, ifa_p->ifa_addr);
+ if (buf_p - bp < 4) buf_p = bp; /* Empty hwaddr */
+ }
#endif
+ }
BUF_ENSURE(1);
*buf_p++ = '\0';
}
diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c
index 6297ccb8bc..4b3934657c 100644
--- a/erts/emulator/drivers/unix/unix_efile.c
+++ b/erts/emulator/drivers/unix/unix_efile.c
@@ -620,7 +620,7 @@ efile_readdir(Efile_error* errInfo, /* Where to return error codes. */
if (IS_DOT_OR_DOTDOT(dirp->d_name))
continue;
buffer[0] = '\0';
- strncat(buffer, dirp->d_name, size-1);
+ strncat(buffer, dirp->d_name, (*size)-1);
*size = strlen(dirp->d_name);
return 1;
}
diff --git a/erts/emulator/drivers/win32/win_con.c b/erts/emulator/drivers/win32/win_con.c
index 2202ca655f..14f7941643 100644
--- a/erts/emulator/drivers/win32/win_con.c
+++ b/erts/emulator/drivers/win32/win_con.c
@@ -704,6 +704,18 @@ FrameWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam)
}
write_inbuf(&c, 1);
return 0;
+ case WM_MOUSEWHEEL:
+ {
+ int delta = GET_WHEEL_DELTA_WPARAM(wParam);
+ if (delta < 0) {
+ PostMessage(hClientWnd, WM_VSCROLL, MAKELONG(SB_THUMBTRACK,
+ (iVscrollPos + 5)),0);
+ } else {
+ WORD pos = ((iVscrollPos - 5) < 0) ? 0 : (iVscrollPos - 5);
+ PostMessage(hClientWnd, WM_VSCROLL, MAKELONG(SB_THUMBTRACK,pos),0);
+ }
+ return 0;
+ }
case WM_CHAR:
c = (TCHAR)wParam;
write_inbuf(&c,1);
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index b1ee165489..010d60eb63 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -35,6 +35,7 @@
#include "global.h"
#include "erl_threads.h"
#include "erl_mtrace.h"
+#include "erl_time.h"
#include "big.h"
#if HAVE_ERTS_MSEG
@@ -271,7 +272,7 @@ schedule_cache_check(void)
#endif
{
cache_check_timer.active = 0;
- erl_set_timer(&cache_check_timer,
+ erts_set_timer(&cache_check_timer,
check_cache,
NULL,
NULL,
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index c17806d96c..4d0ca97889 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -124,9 +124,9 @@
erts_smp_mtx_unlock(&(PS)->mtx)
#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->polled, (long) 1))
+ ((int) erts_smp_atomic_xchg(&(PS)->polled, (erts_aint_t) 1))
#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (long) 0)
+ erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
((int) erts_smp_atomic_read(&(PS)->polled))
@@ -134,11 +134,11 @@
#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
do { \
ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->woken, (long) 1); \
+ erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 1); \
} while (0)
#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
do { \
- erts_smp_atomic_set(&(PS)->woken, (long) 0); \
+ erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 0); \
ERTS_THR_MEMORY_BARRIER; \
} while (0)
#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
@@ -179,9 +179,9 @@ do { \
#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic_set(&(PS)->have_update_requests, (long) 1)
+ erts_smp_atomic_set(&(PS)->have_update_requests, (erts_aint_t) 1)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \
- erts_smp_atomic_set(&(PS)->have_update_requests, (long) 0)
+ erts_smp_atomic_set(&(PS)->have_update_requests, (erts_aint_t) 0)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \
((int) erts_smp_atomic_read(&(PS)->have_update_requests))
#else
@@ -202,13 +202,13 @@ do { \
#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
do { \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 0); \
+ erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 0); \
ERTS_THR_MEMORY_BARRIER; \
} while (0)
#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
do { \
ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 1); \
+ erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 1); \
} while (0)
#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
((int) erts_smp_atomic_read(&(PS)->interrupt))
@@ -356,7 +356,7 @@ unset_interrupted_chk(ErtsPollSet ps)
res = ps->interrupt;
ps->interrupt = 0;
#else
- res = (int) erts_smp_atomic_xchg(&ps->interrupt, (long) 0);
+ res = (int) erts_smp_atomic_xchg(&ps->interrupt, (erts_aint_t) 0);
ERTS_THR_MEMORY_BARRIER;
#endif
return res;
@@ -369,7 +369,7 @@ static ERTS_INLINE int
set_poller_woken_chk(ErtsPollSet ps)
{
ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic_xchg(&ps->woken, (long) 1);
+ return (int) erts_smp_atomic_xchg(&ps->woken, (erts_aint_t) 1);
}
#endif
@@ -1918,7 +1918,7 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
return 0;
}
else {
- long timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
+ erts_aint_t timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
ASSERT(timeout >= 0);
erts_smp_atomic_set(&ps->timeout, timeout);
#if ERTS_POLL_USE_FALLBACK
@@ -2112,7 +2112,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#endif
done:
- erts_smp_atomic_set(&ps->timeout, LONG_MAX);
+ erts_smp_atomic_set(&ps->timeout, ERTS_AINT_T_MAX);
#ifdef ERTS_POLL_DEBUG_PRINT
erts_printf("Leaving %s = erts_poll_wait()\n",
res == 0 ? "0" : erl_errno_id(res));
@@ -2150,10 +2150,12 @@ ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
* is not guaranteed that it will timeout before 'msec' milli seconds.
*/
void
-ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, int set, long msec)
+ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
+ int set,
+ long msec)
{
if (set) {
- if (erts_smp_atomic_read(&ps->timeout) > msec) {
+ if (erts_smp_atomic_read(&ps->timeout) > (erts_aint_t) msec) {
ERTS_POLLSET_SET_INTERRUPTED(ps);
#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
wake_poller(ps);
@@ -2315,7 +2317,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
#else
erts_smp_atomic_init(&ps->interrupt, 0);
#endif
- erts_smp_atomic_init(&ps->timeout, LONG_MAX);
+ erts_smp_atomic_init(&ps->timeout, ERTS_AINT_T_MAX);
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_init(&ps->no_avoided_wakeups, 0);
erts_smp_atomic_init(&ps->no_avoided_interrupts, 0);
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index 2d5ef882f6..824678a0bb 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -329,11 +329,4 @@ extern int exit_async(void);
#define ERTS_EXIT_AFTER_DUMP _exit
-#ifdef ERTS_TIMER_THREAD
-struct erts_iwait; /* opaque for clients */
-extern struct erts_iwait *erts_iwait_init(void);
-extern void erts_iwait_wait(struct erts_iwait *iwait, struct timeval *delay);
-extern void erts_iwait_interrupt(struct erts_iwait *iwait);
-#endif /* ERTS_TIMER_THREAD */
-
#endif /* #ifndef _ERL_UNIX_SYS_H */
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 01ba773688..bfc04faa45 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -237,9 +237,9 @@ static int max_files = -1;
#ifdef ERTS_SMP
erts_smp_atomic_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 1)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 0)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -3109,226 +3109,3 @@ erl_sys_args(int* argc, char** argv)
}
*argc = j;
}
-
-#ifdef ERTS_TIMER_THREAD
-
-/*
- * Interruptible-wait facility: low-level synchronisation state
- * and methods that are implementation dependent.
- *
- * Constraint: Every implementation must define 'struct erts_iwait'
- * with a field 'erts_smp_atomic_t state;'.
- */
-
-/* values for struct erts_iwait's state field */
-#define IWAIT_WAITING 0
-#define IWAIT_AWAKE 1
-#define IWAIT_INTERRUPT 2
-
-#if 0 /* XXX: needs feature test in erts/configure.in */
-
-/*
- * This is an implementation of the interruptible wait facility on
- * top of Linux-specific futexes.
- */
-#include <asm/unistd.h>
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-static int sys_futex(void *futex, int op, int val, const struct timespec *timeout)
-{
- return syscall(__NR_futex, futex, op, val, timeout);
-}
-
-struct erts_iwait {
- erts_smp_atomic_t state; /* &state.counter is our futex */
-};
-
-static void iwait_lowlevel_init(struct erts_iwait *iwait) { /* empty */ }
-
-static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
-{
- struct timespec timeout;
- int res;
-
- timeout.tv_sec = delay->tv_sec;
- timeout.tv_nsec = delay->tv_usec * 1000;
- res = sys_futex((void*)&iwait->state.counter, FUTEX_WAIT, IWAIT_WAITING, &timeout);
- if (res < 0 && errno != ETIMEDOUT && errno != EWOULDBLOCK && errno != EINTR)
- perror("FUTEX_WAIT");
-}
-
-static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
-{
- int res = sys_futex((void*)&iwait->state.counter, FUTEX_WAKE, 1, NULL);
- if (res < 0)
- perror("FUTEX_WAKE");
-}
-
-#else /* using poll() or select() */
-
-/*
- * This is an implementation of the interruptible wait facility on
- * top of pipe(), poll() or select(), read(), and write().
- */
-struct erts_iwait {
- erts_smp_atomic_t state;
- int read_fd; /* wait polls and reads this fd */
- int write_fd; /* interrupt writes this fd */
-};
-
-static void iwait_lowlevel_init(struct erts_iwait *iwait)
-{
- int fds[2];
-
- if (pipe(fds) < 0) {
- perror("pipe()");
- exit(1);
- }
- iwait->read_fd = fds[0];
- iwait->write_fd = fds[1];
-}
-
-#if defined(ERTS_USE_POLL)
-
-#include <sys/poll.h>
-#define PERROR_POLL "poll()"
-
-static int iwait_lowlevel_poll(int read_fd, struct timeval *delay)
-{
- struct pollfd pollfd;
- int timeout;
-
- pollfd.fd = read_fd;
- pollfd.events = POLLIN;
- pollfd.revents = 0;
- timeout = delay->tv_sec * 1000 + delay->tv_usec / 1000;
- return poll(&pollfd, 1, timeout);
-}
-
-#else /* !ERTS_USE_POLL */
-
-#include <sys/select.h>
-#define PERROR_POLL "select()"
-
-static int iwait_lowlevel_poll(int read_fd, struct timeval *delay)
-{
- fd_set readfds;
-
- FD_ZERO(&readfds);
- FD_SET(read_fd, &readfds);
- return select(read_fd + 1, &readfds, NULL, NULL, delay);
-}
-
-#endif /* !ERTS_USE_POLL */
-
-static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
-{
- int res;
- char buf[64];
-
- res = iwait_lowlevel_poll(iwait->read_fd, delay);
- if (res > 0)
- (void)read(iwait->read_fd, buf, sizeof buf);
- else if (res < 0 && errno != EINTR)
- perror(PERROR_POLL);
-}
-
-static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
-{
- int res = write(iwait->write_fd, "!", 1);
- if (res < 0)
- perror("write()");
-}
-
-#endif /* using poll() or select() */
-
-#if 0 /* not using poll() or select() */
-/*
- * This is an implementation of the interruptible wait facility on
- * top of pthread_cond_timedwait(). This has two problems:
- * 1. pthread_cond_timedwait() requires an absolute time point,
- * so the relative delay must be converted to absolute time.
- * Worse, this breaks if the machine's time is adjusted while
- * we're preparing to wait.
- * 2. Each cond operation requires additional mutex lock/unlock operations.
- *
- * Problem 2 is probably not too bad on Linux (they'll just become
- * relatively cheap futex operations), but problem 1 is the real killer.
- * Only use this implementation if no better alternatives are available!
- */
-struct erts_iwait {
- erts_smp_atomic_t state;
- pthread_cond_t cond;
- pthread_mutex_t mutex;
-};
-
-static void iwait_lowlevel_init(struct erts_iwait *iwait)
-{
- iwait->cond = (pthread_cond_t) PTHREAD_COND_INITIALIZER;
- iwait->mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
-}
-
-static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
-{
- struct timeval tmp;
- struct timespec timeout;
-
- /* Due to pthread_cond_timedwait()'s use of absolute
- time, this must be the real gettimeofday(), _not_
- the "smoothed" one beam/erl_time_sup.c implements. */
- gettimeofday(&tmp, NULL);
-
- tmp.tv_sec += delay->tv_sec;
- tmp.tv_usec += delay->tv_usec;
- if (tmp.tv_usec >= 1000*1000) {
- tmp.tv_usec -= 1000*1000;
- tmp.tv_sec += 1;
- }
- timeout.tv_sec = tmp.tv_sec;
- timeout.tv_nsec = tmp.tv_usec * 1000;
- pthread_mutex_lock(&iwait->mutex);
- pthread_cond_timedwait(&iwait->cond, &iwait->mutex, &timeout);
- pthread_mutex_unlock(&iwait->mutex);
-}
-
-static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
-{
- pthread_mutex_lock(&iwait->mutex);
- pthread_cond_signal(&iwait->cond);
- pthread_mutex_unlock(&iwait->mutex);
-}
-
-#endif /* not using POLL */
-
-/*
- * Interruptible-wait facility. This is just a wrapper around the
- * low-level synchronisation code, where we maintain our logical
- * state in order to suppress some state transitions.
- */
-
-struct erts_iwait *erts_iwait_init(void)
-{
- struct erts_iwait *iwait = malloc(sizeof *iwait);
- if (!iwait) {
- perror("malloc");
- exit(1);
- }
- iwait_lowlevel_init(iwait);
- erts_smp_atomic_init(&iwait->state, IWAIT_AWAKE);
- return iwait;
-}
-
-void erts_iwait_wait(struct erts_iwait *iwait, struct timeval *delay)
-{
- if (erts_smp_atomic_xchg(&iwait->state, IWAIT_WAITING) != IWAIT_INTERRUPT)
- iwait_lowlevel_wait(iwait, delay);
- erts_smp_atomic_set(&iwait->state, IWAIT_AWAKE);
-}
-
-void erts_iwait_interrupt(struct erts_iwait *iwait)
-{
- if (erts_smp_atomic_xchg(&iwait->state, IWAIT_INTERRUPT) == IWAIT_WAITING)
- iwait_lowlevel_interrupt(iwait);
-}
-
-#endif /* ERTS_TIMER_THREAD */
diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c
index 411b4b37cf..c6e7b65f32 100644
--- a/erts/emulator/sys/vxworks/sys.c
+++ b/erts/emulator/sys/vxworks/sys.c
@@ -85,7 +85,7 @@ EXTERN_FUNCTION(void, erl_exit, (int n, char*, _DOTS_));
EXTERN_FUNCTION(void, erl_error, (char*, va_list));
EXTERN_FUNCTION(int, driver_interrupt, (int, int));
EXTERN_FUNCTION(void, increment_time, (int));
-EXTERN_FUNCTION(int, next_time, (_VOID_));
+EXTERN_FUNCTION(int, erts_next_time, (_VOID_));
EXTERN_FUNCTION(void, set_reclaim_free_function, (FreeFunction));
EXTERN_FUNCTION(int, erl_mem_info_get, (MEM_PART_STATS *));
EXTERN_FUNCTION(void, erl_crash_dump, (char* file, int line, char* fmt, ...));
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index a766fe9575..d84ae2ede2 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -297,11 +297,11 @@ struct ErtsPollSet_ {
#define ERTS_POLLSET_UNLOCK(PS) \
erts_smp_mtx_unlock(&(PS)->mtx)
#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
- ((int) erts_smp_atomic_xchg(&(PS)->polled, (long) 1))
+ ((int) erts_smp_atomic_xchg(&(PS)->polled, (erts_aint_t) 1))
#define ERTS_POLLSET_SET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (long) 1)
+ erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 1)
#define ERTS_POLLSET_UNSET_POLLED(PS) \
- erts_smp_atomic_set(&(PS)->polled, (long) 0)
+ erts_smp_atomic_set(&(PS)->polled, (erts_aint_t) 0)
#define ERTS_POLLSET_IS_POLLED(PS) \
((int) erts_smp_atomic_read(&(PS)->polled))
@@ -309,11 +309,11 @@ struct ErtsPollSet_ {
#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
do { \
ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->woken, (long) 1); \
+ erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 1); \
} while (0)
#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
do { \
- erts_smp_atomic_set(&(PS)->woken, (long) 0); \
+ erts_smp_atomic_set(&(PS)->woken, (erts_aint_t) 0); \
ERTS_THR_MEMORY_BARRIER; \
} while (0)
#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
@@ -322,13 +322,13 @@ do { \
#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
do { \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 0); \
+ erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 0); \
ERTS_THR_MEMORY_BARRIER; \
} while (0)
#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
do { \
ERTS_THR_MEMORY_BARRIER; \
- erts_smp_atomic_set(&(PS)->interrupt, (long) 1); \
+ erts_smp_atomic_set(&(PS)->interrupt, (erts_aint_t) 1); \
} while (0)
#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
((int) erts_smp_atomic_read(&(PS)->interrupt))
@@ -336,7 +336,7 @@ do { \
static ERTS_INLINE int
unset_interrupted_chk(ErtsPollSet ps)
{
- int res = (int) erts_smp_atomic_xchg(&ps->interrupt, (long) 0);
+ int res = (int) erts_smp_atomic_xchg(&ps->interrupt, (erts_aint_t) 0);
ERTS_THR_MEMORY_BARRIER;
return res;
@@ -346,7 +346,7 @@ static ERTS_INLINE int
set_poller_woken_chk(ErtsPollSet ps)
{
ERTS_THR_MEMORY_BARRIER;
- return (int) erts_smp_atomic_xchg(&ps->woken, (long) 1);
+ return (int) erts_smp_atomic_xchg(&ps->woken, (erts_aint_t) 1);
}
#else
@@ -413,9 +413,9 @@ set_poller_woken_chk(ErtsPollSet ps)
#ifdef ERTS_SMP
extern erts_smp_atomic_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 1)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 0)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
#else
extern volatile int erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
@@ -986,7 +986,7 @@ void erts_poll_interrupt_timed(ErtsPollSet ps,
HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,msec));
#ifdef ERTS_SMP
if (set) {
- if (erts_smp_atomic_read(&ps->timeout) > msec) {
+ if (erts_smp_atomic_read(&ps->timeout) > (erts_aint_t) msec) {
ERTS_POLLSET_SET_INTERRUPTED(ps);
wake_poller(ps);
}
@@ -1228,7 +1228,7 @@ int erts_poll_wait(ErtsPollSet ps,
erts_mtx_unlock(&w->mtx);
}
done:
- erts_smp_atomic_set(&ps->timeout, LONG_MAX);
+ erts_smp_atomic_set(&ps->timeout, ERTS_AINT_T_MAX);
*len = num;
ERTS_POLLSET_UNLOCK(ps);
HARDTRACEF(("Out erts_poll_wait"));
@@ -1314,7 +1314,7 @@ ErtsPollSet erts_poll_create_pollset(void)
erts_smp_mtx_init(&ps->mtx, "pollset");
erts_smp_atomic_init(&ps->interrupt, 0);
#endif
- erts_smp_atomic_init(&ps->timeout, LONG_MAX);
+ erts_smp_atomic_init(&ps->timeout, ERTS_AINT_T_MAX);
HARDTRACEF(("Out erts_poll_create_pollset"));
return ps;
diff --git a/erts/emulator/sys/win32/erl_win_dyn_driver.h b/erts/emulator/sys/win32/erl_win_dyn_driver.h
index 4949998abc..1347eead91 100644
--- a/erts/emulator/sys/win32/erl_win_dyn_driver.h
+++ b/erts/emulator/sys/win32/erl_win_dyn_driver.h
@@ -87,15 +87,15 @@ WDD_TYPEDEF(unsigned long, erts_alc_test, (unsigned long,
unsigned long,
unsigned long,
unsigned long));
-WDD_TYPEDEF(long, driver_binary_get_refc, (ErlDrvBinary *dbp));
-WDD_TYPEDEF(long, driver_binary_inc_refc, (ErlDrvBinary *dbp));
-WDD_TYPEDEF(long, driver_binary_dec_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(ErlDrvSInt, driver_binary_get_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(ErlDrvSInt, driver_binary_inc_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(ErlDrvSInt, driver_binary_dec_refc, (ErlDrvBinary *dbp));
WDD_TYPEDEF(ErlDrvPDL, driver_pdl_create, (ErlDrvPort));
WDD_TYPEDEF(void, driver_pdl_lock, (ErlDrvPDL));
WDD_TYPEDEF(void, driver_pdl_unlock, (ErlDrvPDL));
-WDD_TYPEDEF(long, driver_pdl_get_refc, (ErlDrvPDL));
-WDD_TYPEDEF(long, driver_pdl_inc_refc, (ErlDrvPDL));
-WDD_TYPEDEF(long, driver_pdl_dec_refc, (ErlDrvPDL));
+WDD_TYPEDEF(ErlDrvSInt, driver_pdl_get_refc, (ErlDrvPDL));
+WDD_TYPEDEF(ErlDrvSInt, driver_pdl_inc_refc, (ErlDrvPDL));
+WDD_TYPEDEF(ErlDrvSInt, driver_pdl_dec_refc, (ErlDrvPDL));
WDD_TYPEDEF(void, driver_system_info, (ErlDrvSysInfo *, size_t));
WDD_TYPEDEF(int, driver_get_now, (ErlDrvNowData *));
WDD_TYPEDEF(int, driver_monitor_process, (ErlDrvPort port,
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
index d2449a1bdb..262f84babc 100644
--- a/erts/emulator/sys/win32/sys_interrupt.c
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1997-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -33,9 +33,9 @@
#ifdef ERTS_SMP
erts_smp_atomic_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 1)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
- erts_smp_atomic_set(&erts_break_requested, (long) 0)
+ erts_smp_atomic_set(&erts_break_requested, (erts_aint_t) 0)
#else
volatile int erts_break_requested = 0;
#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
diff --git a/erts/emulator/test/beam_SUITE.erl b/erts/emulator/test/beam_SUITE.erl
index 228ff15341..32ac07cb2d 100644
--- a/erts/emulator/test/beam_SUITE.erl
+++ b/erts/emulator/test/beam_SUITE.erl
@@ -20,7 +20,8 @@
-module(beam_SUITE).
-export([all/1, packed_registers/1, apply_last/1, apply_last_bif/1,
- buildo_mucho/1, heap_sizes/1, big_lists/1, fconv/1]).
+ buildo_mucho/1, heap_sizes/1, big_lists/1, fconv/1,
+ select_val/1]).
-export([applied/2]).
@@ -28,7 +29,7 @@
all(suite) ->
[packed_registers, apply_last, apply_last_bif, buildo_mucho,
- heap_sizes, big_lists].
+ heap_sizes, big_lists, select_val].
%% Verify that apply(M, F, A) is really tail recursive.
@@ -302,3 +303,19 @@ do_fconv(nil, Float) when is_float(Float) ->
Float + [];
do_fconv(tuple_literal, Float) when is_float(Float) ->
Float + {a,b}.
+
+select_val(Config) when is_list(Config) ->
+ ?line zero = do_select_val(0),
+ ?line big = do_select_val(1 bsl 64),
+ ?line integer = do_select_val(42),
+ ok.
+
+do_select_val(X) ->
+ case X of
+ 0 ->
+ zero;
+ 1 bsl 64 ->
+ big;
+ Int when is_integer(Int) ->
+ integer
+ end.
diff --git a/erts/emulator/test/beam_literals_SUITE.erl b/erts/emulator/test/beam_literals_SUITE.erl
index 75841adbfc..1eda939cf8 100644
--- a/erts/emulator/test/beam_literals_SUITE.erl
+++ b/erts/emulator/test/beam_literals_SUITE.erl
@@ -23,7 +23,8 @@
matching_bigs/1, matching_more_bigs/1,
matching_bigs_and_smalls/1, badmatch/1, case_clause/1,
receiving/1, literal_type_tests/1,
- put_list/1, fconv/1, literal_case_expression/1]).
+ put_list/1, fconv/1, literal_case_expression/1,
+ increment/1]).
-include("test_server.hrl").
@@ -32,7 +33,7 @@ all(suite) ->
matching_bigs, matching_more_bigs,
matching_bigs_and_smalls, badmatch, case_clause,
receiving, literal_type_tests,
- put_list, fconv, literal_case_expression].
+ put_list, fconv, literal_case_expression, increment].
putting(doc) -> "Test creating lists and tuples containing big number literals.";
putting(Config) when is_list(Config) ->
@@ -48,6 +49,7 @@ matching_bigs(doc) -> "Test matching of a few big number literals (in Beam,"
matching_bigs(Config) when is_list(Config) ->
a = matching1(3972907842873739),
b = matching1(-389789298378939783333333333333333333784),
+ other = matching1(3141699999999999999999999999999999999),
other = matching1(42).
matching_smalls(doc) -> "Test matching small numbers (both positive and negative).";
@@ -405,14 +407,51 @@ fconv_2(F) when is_float(F) ->
literal_case_expression(Config) when is_list(Config) ->
?line DataDir = ?config(data_dir, Config),
?line Src = filename:join(DataDir, "literal_case_expression"),
- ?line {ok,literal_case_expression=Mod,Code} = compile:file(Src, [from_asm,binary]),
+ ?line {ok,literal_case_expression=Mod,Code} =
+ compile:file(Src, [from_asm,binary]),
?line {module,Mod} = code:load_binary(Mod, Src, Code),
?line ok = Mod:x(),
?line ok = Mod:y(),
+ ?line ok = Mod:zi1(),
+ ?line ok = Mod:zi2(),
+ ?line ok = Mod:za1(),
+ ?line ok = Mod:za2(),
?line true = code:delete(Mod),
?line code:purge(Mod),
ok.
+%% Test the i_increment instruction.
+increment(Config) when is_list(Config) ->
+ %% In the 32-bit emulator, Neg32 can be represented as a small,
+ %% but -Neg32 cannot. Therefore the i_increment instruction must
+ %% not be used in the subtraction that follows (since i_increment
+ %% cannot handle a bignum literal).
+ Neg32 = -(1 bsl 27),
+ Big32 = id(1 bsl 32),
+ Result32 = (1 bsl 32) + (1 bsl 27),
+ ?line Result32 = Big32 + (1 bsl 27),
+ ?line Result32 = Big32 - Neg32,
+
+ %% Same thing, but for the 64-bit emulator.
+ Neg64 = -(1 bsl 59),
+ Big64 = id(1 bsl 64),
+ Result64 = (1 bsl 64) + (1 bsl 59),
+ ?line Result64 = Big64 + (1 bsl 59),
+ ?line Result64 = Big64 - Neg64,
+
+ %% Test error handling for the i_increment instruction.
+ Bad = id(bad),
+ ?line {'EXIT',{badarith,_}} = (catch Bad + 42),
+
+ %% Small operands, but a big result.
+ Res32 = 1 bsl 27,
+ Small32 = id(Res32-1),
+ ?line Res32 = Small32 + 1,
+ Res64 = 1 bsl 59,
+ Small64 = id(Res64-1),
+ ?line Res64 = Small64 + 1,
+ ok.
+
%% Help functions.
chksum(Term) ->
diff --git a/erts/emulator/test/beam_literals_SUITE_data/literal_case_expression.S b/erts/emulator/test/beam_literals_SUITE_data/literal_case_expression.S
index c0ffe9ab53..bfdfc079dc 100644
--- a/erts/emulator/test/beam_literals_SUITE_data/literal_case_expression.S
+++ b/erts/emulator/test/beam_literals_SUITE_data/literal_case_expression.S
@@ -1,10 +1,11 @@
{module, literal_case_expression}. %% version = 0
-{exports, [{module_info,0},{module_info,1},{x,0},{y,0}]}.
+{exports, [{module_info,0},{module_info,1},{x,0},{y,0},
+ {zi1,0},{zi2,0},{za1,0},{za2,0}]}.
{attributes, []}.
-{labels, 15}.
+{labels, 32}.
{function, x, 0, 2}.
@@ -52,6 +53,81 @@
{label,10}.
{case_end,{float,34.0000}}.
+{function, zi1, 0, 16}.
+ {label,15}.
+ {func_info,{atom,literal_case_expression},{atom,zi1},0}.
+ {label,16}.
+ {test,is_integer,{f,19},[{integer,42}]}.
+ {select_val,{integer,42},
+ {f,18},
+ {list,[{integer,42},
+ {f,17},
+ {integer,1000},
+ {f,18}]}}.
+ {label,17}.
+ {move,{atom,ok},{x,0}}.
+ return.
+ {label,18}.
+ {move,{atom,error},{x,0}}.
+ return.
+ {label,19}.
+ {case_end,{integer,42}}.
+
+{function, zi2, 0, 16}.
+ {label,20}.
+ {func_info,{atom,literal_case_expression},{atom,zi2},0}.
+ {label,21}.
+ {test,is_integer,{f,23},[{integer,42}]}.
+ {select_val,{integer,42},
+ {f,23},
+ {list,[{integer,42},
+ {f,22},
+ {integer,1000},
+ {f,23}]}}.
+ {label,22}.
+ {move,{atom,ok},{x,0}}.
+ return.
+ {label,23}.
+ {move,{atom,error},{x,0}}.
+ return.
+
+{function, za1, 0, 25}.
+ {label,24}.
+ {func_info,{atom,literal_case_expression},{atom,za1},0}.
+ {label,25}.
+ {test,is_atom,{f,28},[{atom,x}]}.
+ {select_val,{atom,x},
+ {f,27},
+ {list,[{atom,a},
+ {f,27},
+ {atom,x},
+ {f,26}]}}.
+ {label,26}.
+ {move,{atom,ok},{x,0}}.
+ return.
+ {label,27}.
+ {move,{atom,error},{x,0}}.
+ return.
+ {label,28}.
+ {case_end,{atom,x}}.
+
+{function, za2, 0, 30}.
+ {label,29}.
+ {func_info,{atom,literal_case_expression},{atom,za2},0}.
+ {label,30}.
+ {test,is_atom,{f,32},[{atom,x}]}.
+ {select_val,{atom,x},
+ {f,32},
+ {list,[{atom,a},
+ {f,32},
+ {atom,x},
+ {f,31}]}}.
+ {label,31}.
+ {move,{atom,ok},{x,0}}.
+ return.
+ {label,32}.
+ {move,{atom,error},{x,0}}.
+ return.
{function, module_info, 0, 12}.
{label,11}.
diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl
index 3d9b51d278..138a19a626 100644
--- a/erts/emulator/test/bs_construct_SUITE.erl
+++ b/erts/emulator/test/bs_construct_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2010. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -27,7 +27,7 @@
mem_leak/1, coerce_to_float/1, bjorn/1,
huge_float_field/1, huge_binary/1, system_limit/1, badarg/1,
copy_writable_binary/1, kostis/1, dynamic/1, bs_add/1,
- otp_7422/1]).
+ otp_7422/1, zero_width/1]).
-include("test_server.hrl").
@@ -36,7 +36,7 @@ all(suite) ->
not_used, in_guard, mem_leak, coerce_to_float, bjorn,
huge_float_field, huge_binary, system_limit, badarg,
copy_writable_binary, kostis, dynamic, bs_add,
- otp_7422].
+ otp_7422, zero_width].
big(1) ->
57285702734876389752897683.
@@ -786,5 +786,20 @@ otp_7422_bin(N) when N < 512 ->
end),
otp_7422_bin(N+1);
otp_7422_bin(_) -> ok.
+
+zero_width(Config) when is_list(Config) ->
+ ?line Z = id(0),
+ Small = id(42),
+ Big = id(1 bsl 128),
+ ?line <<>> = <<Small:Z>>,
+ ?line <<>> = <<Small:0>>,
+ ?line <<>> = <<Big:Z>>,
+ ?line <<>> = <<Big:0>>,
+
+ ?line {'EXIT',{badarg,_}} = (catch <<not_a_number:0>>),
+ ?line {'EXIT',{badarg,_}} = (catch <<(id(not_a_number)):Z>>),
+ ?line {'EXIT',{badarg,_}} = (catch <<(id(not_a_number)):0>>),
+
+ ok.
id(I) -> I.
diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl
index 79252d0593..f26455e6da 100644
--- a/erts/emulator/test/distribution_SUITE.erl
+++ b/erts/emulator/test/distribution_SUITE.erl
@@ -39,6 +39,7 @@
atom_roundtrip/1,
atom_roundtrip_r12b/1,
contended_atom_cache_entry/1,
+ bad_dist_structure/1,
bad_dist_ext/1,
bad_dist_ext_receive/1,
bad_dist_ext_process_info/1,
@@ -61,6 +62,7 @@ all(suite) -> [
stop_dist, trap_bif, dist_auto_connect, dist_parallel_send,
atom_roundtrip, atom_roundtrip_r12b,
contended_atom_cache_entry,
+ bad_dist_structure,
bad_dist_ext
].
@@ -174,7 +176,7 @@ bulk_sendsend2(Terms, BinSize, BusyBufSize) ->
?line {ok, NodeRecv} = start_node(bulk_receiver),
?line Recv = spawn(NodeRecv, erlang, apply, [fun receiver/2, [0, 0]]),
?line Bin = list_to_binary(lists:duplicate(BinSize*1024, 253)),
- ?line Size = Terms*size(Bin),
+ %%?line Size = Terms*size(Bin),
%% SLF LEFT OFF HERE.
%% When the caller uses small hunks, like 4k via
@@ -187,7 +189,7 @@ bulk_sendsend2(Terms, BinSize, BusyBufSize) ->
?line {ok, NodeSend} = start_node(bulk_sender, "+zdbbl " ++ integer_to_list(BusyBufSize)),
?line _Send = spawn(NodeSend, erlang, apply, [fun sendersender/4, [self(), Recv, Bin, Terms]]),
- ?line {Elapsed, {TermsN, SizeN}, MonitorCount} =
+ ?line {Elapsed, {_TermsN, SizeN}, MonitorCount} =
receive {sendersender, BigRes} ->
BigRes
end,
@@ -227,7 +229,7 @@ sendersender3(To, _Bin, 0, SendDone, MonitorCount) ->
ok
end,
receive
- {monitor, _Pid, _Type, _Info} = M ->
+ {monitor, _Pid, _Type, _Info} ->
sendersender3(To, _Bin, 0, SendDone, MonitorCount + 1)
after 0 ->
if SendDone ->
@@ -522,7 +524,7 @@ sink1() ->
lost_exit(doc) ->
"Test that EXIT and DOWN messages send to another node are not lost if "
- "if the distribution port is busy.";
+ "the distribution port is busy.";
lost_exit(Config) when is_list(Config) ->
?line {ok, Node} = start_node(lost_exit),
@@ -1143,8 +1145,7 @@ contended_atom_cache_entry(Config) when is_list(Config) ->
?line {ok, SNode} = start_node(Config),
?line {ok, RNode} = start_node(Config),
?line Success = make_ref(),
- ?line Mstr
- = spawn_link(
+ ?line spawn_link(
SNode,
fun () ->
erts_debug:set_internal_state(available_internal_state,
@@ -1201,13 +1202,13 @@ contended_atom_cache_entry(Config) when is_list(Config) ->
?line stop_node(RNode),
?line ok.
-send_ref_atom(To, Ref, Atom, 0) ->
+send_ref_atom(_To, _Ref, _Atom, 0) ->
ok;
send_ref_atom(To, Ref, Atom, N) ->
To ! {Ref, Atom},
send_ref_atom(To, Ref, Atom, N-1).
-receive_ref_atom(Ref, Atom, 0) ->
+receive_ref_atom(_Ref, _Atom, 0) ->
ok;
receive_ref_atom(Ref, Atom, N) ->
receive
@@ -1242,7 +1243,7 @@ unwanted_cixs() ->
nodes()).
-get_conflicting_atoms(CIX, 0) ->
+get_conflicting_atoms(_CIX, 0) ->
[];
get_conflicting_atoms(CIX, N) ->
{A, B, C} = now(),
@@ -1256,6 +1257,186 @@ get_conflicting_atoms(CIX, N) ->
get_conflicting_atoms(CIX, N)
end.
+-define(COOKIE, '').
+-define(DOP_LINK, 1).
+-define(DOP_SEND, 2).
+-define(DOP_EXIT, 3).
+-define(DOP_UNLINK, 4).
+-define(DOP_REG_SEND, 6).
+-define(DOP_GROUP_LEADER, 7).
+-define(DOP_EXIT2, 8).
+
+-define(DOP_SEND_TT, 12).
+-define(DOP_EXIT_TT, 13).
+-define(DOP_REG_SEND_TT, 16).
+-define(DOP_EXIT2_TT, 18).
+
+-define(DOP_MONITOR_P, 19).
+-define(DOP_DEMONITOR_P, 20).
+-define(DOP_MONITOR_P_EXIT, 21).
+
+start_monitor(Offender,P) ->
+ ?line Parent = self(),
+ ?line Q = spawn(Offender,
+ fun () ->
+ Ref = erlang:monitor(process,P),
+ Parent ! {self(),ref,Ref},
+ receive
+ just_stay_alive -> ok
+ end
+ end),
+ ?line Ref = receive
+ {Q,ref,R} ->
+ R
+ after 5000 ->
+ error
+ end,
+ io:format("Ref is ~p~n",[Ref]),
+ ok.
+start_link(Offender,P) ->
+ ?line Parent = self(),
+ ?line Q = spawn(Offender,
+ fun () ->
+ process_flag(trap_exit,true),
+ link(P),
+ Parent ! {self(),ref,P},
+ receive
+ just_stay_alive -> ok
+ end
+ end),
+ ?line Ref = receive
+ {Q,ref,R} ->
+ R
+ after 5000 ->
+ error
+ end,
+ io:format("Ref is ~p~n",[Ref]),
+ ok.
+
+bad_dist_structure(suite) ->
+ [];
+bad_dist_structure(doc) ->
+ ["Test dist messages with valid structure (binary to term ok) but malformed"
+ "control content"];
+bad_dist_structure(Config) when is_list(Config) ->
+ %process_flag(trap_exit,true),
+ ODog = ?config(watchdog, Config),
+ ?t:timetrap_cancel(ODog),
+ Dog = ?t:timetrap(?t:seconds(15)),
+
+ ?line {ok, Offender} = start_node(bad_dist_structure_offender),
+ ?line {ok, Victim} = start_node(bad_dist_structure_victim),
+ ?line start_node_monitors([Offender,Victim]),
+ ?line Parent = self(),
+ ?line P = spawn(Victim,
+ fun () ->
+ process_flag(trap_exit,true),
+ Parent ! {self(), started},
+ receive check_msgs -> ok end,
+ bad_dist_struct_check_msgs([one,
+ two]),
+ Parent ! {self(), messages_checked},
+ receive done -> ok end
+ end),
+ ?line receive {P, started} -> ok end,
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line verify_up(Offender, Victim),
+ ?line true = lists:member(Offender, rpc:call(Victim, erlang, nodes, [])),
+ ?line start_monitor(Offender,P),
+ ?line P ! one,
+ ?line send_bad_structure(Offender, P,{?DOP_MONITOR_P_EXIT,'replace',P,normal},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_monitor(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_MONITOR_P_EXIT,'replace',P,normal,normal},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_link(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_LINK},0),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_link(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_UNLINK,'replace'},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_link(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_UNLINK,'replace',make_ref()},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_link(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_UNLINK,make_ref(),P},0),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_link(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_UNLINK,normal,normal},0),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_monitor(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_MONITOR_P,'replace',P},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_monitor(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_MONITOR_P,'replace',P,normal},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_monitor(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_DEMONITOR_P,'replace',P},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line start_monitor(Offender,P),
+ ?line send_bad_structure(Offender, P,{?DOP_DEMONITOR_P,'replace',P,normal},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_EXIT,'replace',P},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_EXIT,make_ref(),normal,normal},0),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_EXIT_TT,'replace',token,P},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_EXIT_TT,make_ref(),token,normal,normal},0),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_EXIT2,'replace',P},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_EXIT2,make_ref(),normal,normal},0),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_EXIT2_TT,'replace',token,P},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_EXIT2_TT,make_ref(),token,normal,normal},0),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_GROUP_LEADER,'replace'},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_GROUP_LEADER,'replace','atomic'},2),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_GROUP_LEADER,'replace',P},0),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_REG_SEND_TT,'replace','',name},2,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_REG_SEND_TT,'replace','',name,token},0,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_REG_SEND,'replace',''},2,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_REG_SEND,'replace','',P},0,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_REG_SEND,'replace','',name},0,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_REG_SEND,'replace','',name,{token}},2,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_SEND_TT,'',P},0,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_SEND_TT,'',name,token},0,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_SEND,''},0,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_SEND,'',name},0,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line send_bad_structure(Offender, P,{?DOP_SEND,'',P,{token}},0,{message}),
+ ?line pong = rpc:call(Victim, net_adm, ping, [Offender]),
+ ?line P ! two,
+ ?line P ! check_msgs,
+ ?line receive
+ {P, messages_checked} -> ok
+ after 5000 ->
+ exit(victim_is_dead)
+ end,
+
+ ?line {message_queue_len, 0}
+ = rpc:call(Victim, erlang, process_info, [P, message_queue_len]),
+
+ ?line unlink(P),
+ ?line P ! done,
+ ?line stop_node(Offender),
+ ?line stop_node(Victim),
+ ?t:timetrap_cancel(Dog),
+ ok.
bad_dist_ext(doc) -> [];
bad_dist_ext(suite) ->
@@ -1483,6 +1664,22 @@ bad_dist_ext_connection_id(Config) when is_list(Config) ->
?line stop_node(Victim).
+bad_dist_struct_check_msgs([]) ->
+ receive
+ Msg ->
+ exit({unexpected_message, Msg})
+ after 0 ->
+ ok
+ end;
+bad_dist_struct_check_msgs([M|Ms]) ->
+ receive
+ {'EXIT',_,_} = EM ->
+ io:format("Ignoring exit message: ~p~n",[EM]),
+ bad_dist_struct_check_msgs([M|Ms]);
+ Msg ->
+ M = Msg,
+ bad_dist_struct_check_msgs(Ms)
+ end.
bad_dist_ext_check_msgs([]) ->
receive
Msg ->
@@ -1497,24 +1694,6 @@ bad_dist_ext_check_msgs([M|Ms]) ->
bad_dist_ext_check_msgs(Ms)
end.
--define(COOKIE, '').
--define(DOP_LINK, 1).
--define(DOP_SEND, 2).
--define(DOP_EXIT, 3).
--define(DOP_UNLINK, 4).
--define(DOP_NODE_LINK, 5).
--define(DOP_REG_SEND, 6).
--define(DOP_GROUP_LEADER, 7).
--define(DOP_EXIT2, 8).
-
--define(DOP_SEND_TT, 12).
--define(DOP_EXIT_TT, 13).
--define(DOP_REG_SEND_TT, 16).
--define(DOP_EXIT2_TT, 18).
-
--define(DOP_MONITOR_P, 19).
--define(DOP_DEMONITOR_P, 20).
--define(DOP_MONITOR_P_EXIT, 21).
dport_reg_send(Node, Name, Msg) ->
DPrt = case dport(Node) of
@@ -1546,6 +1725,39 @@ dport_send(To, Msg) ->
?COOKIE,
To}),
dmsg_ext(Msg)]).
+send_bad_structure(Offender,Victim,Bad,WhereToPutSelf) ->
+ send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,[]).
+send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,PayLoad) ->
+ Parent = self(),
+ Done = make_ref(),
+ spawn(Offender,
+ fun () ->
+ Node = node(Victim),
+ pong = net_adm:ping(Node),
+ DPrt = dport(Node),
+ Bad1 = case WhereToPutSelf of
+ 0 ->
+ Bad;
+ N when N > 0 ->
+ setelement(N,Bad,self())
+ end,
+ DData = [dmsg_hdr(),
+ dmsg_ext(Bad1)] ++
+ case PayLoad of
+ [] -> [];
+ _Other -> [dmsg_ext(PayLoad)]
+ end,
+ port_command(DPrt, DData),
+ Parent ! {DData,Done}
+ end),
+ receive
+ {WhatSent,Done} ->
+ io:format("Offender sent ~p~n",[WhatSent]),
+ ok
+ after 5000 ->
+ exit(unable_to_send)
+ end.
+
%% send_bad_msgs():
%% Send a valid distribution header and control message
@@ -1629,10 +1841,10 @@ dmsg_bad_hdr() ->
255]. % 255 atom references
-dmsg_fake_hdr1() ->
- A = <<"fake header atom 1">>,
- [131, % Version Magic
- $D, 1, 16#8, 0, size(A), A]. % Fake header
+%% dmsg_fake_hdr1() ->
+%% A = <<"fake header atom 1">>,
+%% [131, % Version Magic
+%% $D, 1, 16#8, 0, size(A), A]. % Fake header
dmsg_fake_hdr2() ->
A1 = <<"fake header atom 1">>,
@@ -1817,7 +2029,7 @@ flush_node_changes() ->
node_monitor_loop(Master) ->
receive
- {nodeup, Node, InfoList} = Msg ->
+ {nodeup, Node, _InfoList} = Msg ->
Master ! {nodeup, node(), Node},
?t:format("~p ~p: ~p~n", [node(), erlang:now(), Msg]),
node_monitor_loop(Master);
@@ -1854,9 +2066,9 @@ verify_no_down(A, B) ->
ok
end.
-verify_down(A, B) ->
- receive {nodedown, A, B, _} -> ok end,
- receive {nodedown, B, A, _} -> ok end.
+%% verify_down(A, B) ->
+%% receive {nodedown, A, B, _} -> ok end,
+%% receive {nodedown, B, A, _} -> ok end.
verify_down(A, ReasonA, B, ReasonB) ->
receive
@@ -1876,11 +2088,11 @@ from(H, [H | T]) -> T;
from(H, [_ | T]) -> from(H, T);
from(_, []) -> [].
-fun_spawn(Fun) ->
- fun_spawn(Fun, []).
+%% fun_spawn(Fun) ->
+%% fun_spawn(Fun, []).
-fun_spawn(Fun, Args) ->
- spawn_link(erlang, apply, [Fun, Args]).
+%% fun_spawn(Fun, Args) ->
+%% spawn_link(erlang, apply, [Fun, Args]).
long_or_short() ->
diff --git a/erts/emulator/test/erl_link_SUITE.erl b/erts/emulator/test/erl_link_SUITE.erl
index 542c8dffbe..11a7a61586 100644
--- a/erts/emulator/test/erl_link_SUITE.erl
+++ b/erts/emulator/test/erl_link_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2001-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2001-2010. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -1050,7 +1050,6 @@ stop_node(Node) ->
-define(DOP_SEND, 2).
-define(DOP_EXIT, 3).
-define(DOP_UNLINK, 4).
--define(DOP_NODE_LINK, 5).
-define(DOP_REG_SEND, 6).
-define(DOP_GROUP_LEADER, 7).
-define(DOP_EXIT2, 8).
diff --git a/erts/emulator/test/erts_debug_SUITE.erl b/erts/emulator/test/erts_debug_SUITE.erl
index e60a999df1..934a1b10a4 100644
--- a/erts/emulator/test/erts_debug_SUITE.erl
+++ b/erts/emulator/test/erts_debug_SUITE.erl
@@ -21,10 +21,10 @@
-include("test_server.hrl").
-export([all/1,init_per_testcase/2,fin_per_testcase/2,
- flat_size/1,flat_size_big/1,df/1]).
+ flat_size/1,flat_size_big/1,df/1,instructions/1]).
all(suite) ->
- [flat_size,flat_size_big,df].
+ [flat_size,flat_size_big,df,instructions].
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Dog=?t:timetrap(?t:minutes(2)),
@@ -70,3 +70,8 @@ df(Config) when is_list(Config) ->
pps() ->
{erlang:ports()}.
+
+instructions(Config) when is_list(Config) ->
+ ?line Is = erts_debug:instructions(),
+ ?line _ = [list_to_atom(I) || I <- Is],
+ ok.
diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops
index de19a2e35b..e7c57142c0 100755
--- a/erts/emulator/utils/beam_makeops
+++ b/erts/emulator/utils/beam_makeops
@@ -27,6 +27,7 @@ my $outdir = "."; # Directory for output files.
my $verbose = 0;
my $hot = 1;
my $num_file_opcodes = 0;
+my $wordsize = 32;
# This is shift counts and mask for the packer.
my $WHOLE_WORD = '';
@@ -36,12 +37,20 @@ my @pack_mask;
$pack_instr[2] = ['6', 'i'];
$pack_instr[3] = ['0', '0', 'i'];
+$pack_instr[4] = ['6', '6', '6', 'i']; # Only for 64 bit wordsize
$pack_shift[2] = ['0', 'BEAM_LOOSE_SHIFT'];
$pack_shift[3] = ['0', 'BEAM_TIGHT_SHIFT', '(2*BEAM_TIGHT_SHIFT)'];
+$pack_shift[4] = ['0', 'BEAM_LOOSE_SHIFT', # Only for 64 bit wordsize
+ '(2*BEAM_LOOSE_SHIFT)',
+ '(3*BEAM_LOOSE_SHIFT)'];
$pack_mask[2] = ['BEAM_LOOSE_MASK', $WHOLE_WORD];
$pack_mask[3] = ['BEAM_TIGHT_MASK', 'BEAM_TIGHT_MASK', 'BEAM_TIGHT_MASK'];
+$pack_mask[4] = ['BEAM_LOOSE_MASK', # Only for 64 bit wordsize
+ 'BEAM_LOOSE_MASK',
+ 'BEAM_LOOSE_MASK',
+ $WHOLE_WORD];
# There are two types of instructions: generic and specific.
# The generic instructions are those generated by the Beam compiler.
@@ -80,6 +89,8 @@ my %cold_code;
my @unnumbered_generic;
my %unnumbered;
+my %is_transformed;
+
#
# Code transformations.
#
@@ -118,7 +129,8 @@ my %arg_size = ('r' => 0, # x(0) - x register zero
't' => 1, # untagged integer -- can be packed
'b' => 1, # pointer to bif
'A' => 1, # arity value
- 'P' => 1, # byte offset into tuple
+ 'P' => 1, # byte offset into tuple or stack
+ 'Q' => 1, # like 'P', but packable
'h' => 1, # character
'l' => 1, # float reg
'q' => 1, # literal term
@@ -157,6 +169,7 @@ my @tag_type;
$type_bit{'U'} = $type_bit{'u'};
$type_bit{'e'} = $type_bit{'u'};
$type_bit{'P'} = $type_bit{'u'};
+ $type_bit{'Q'} = $type_bit{'u'};
}
#
@@ -169,6 +182,7 @@ while (@ARGV && $ARGV[0] =~ /^-(.*)/) {
($target = \&emulator_output), next if /^emulator/;
($target = \&compiler_output), next if /^compiler/;
($outdir = shift), next if /^outdir/;
+ ($wordsize = shift), next if /^wordsize/;
($verbose = 1), next if /^v/;
die "$0: Bad option: -$_\n";
}
@@ -474,8 +488,9 @@ sub emulator_output {
$gen_transform_offset{$key} : -1;
my($spec_op) = $gen_to_spec{$key};
my($num_specific) = $num_specific{$key};
- defined $spec_op or $tr != -1 or
+ defined $spec_op or
$obsolete[$gen_opnum{$name,$arity}] or
+ $is_transformed{$name,$arity} or
error("instruction $key has no specific instruction");
$spec_op = -1 unless defined $spec_op;
&init_item($name, $arity, $spec_op, $num_specific, $tr, $min_window{$key});
@@ -498,12 +513,14 @@ sub emulator_output {
print "#define NUM_SPECIFIC_OPS ", scalar(@op_to_name), "\n";
print "\n";
print "#ifdef ARCH_64\n";
+ print "# define BEAM_WIDE_MASK 0xFFFFUL\n";
print "# define BEAM_LOOSE_MASK 0x1FFFUL\n";
print "#if HALFWORD_HEAP\n";
print "# define BEAM_TIGHT_MASK 0x1FFCUL\n";
print "#else\n";
print "# define BEAM_TIGHT_MASK 0x1FF8UL\n";
print "#endif\n";
+ print "# define BEAM_WIDE_SHIFT 32\n";
print "# define BEAM_LOOSE_SHIFT 16\n";
print "# define BEAM_TIGHT_SHIFT 16\n";
print "#else\n";
@@ -796,6 +813,7 @@ sub basic_generator {
'I' => 1,
't' => 1,
'P' => 1,
+ 'Q' => 1,
);
# Pick up the macro to use and its flags (if any).
@@ -916,7 +934,18 @@ sub basic_generator {
$var_decls .= "BeamInstr tmp_packed2;"
if $macro_code =~ /tmp_packed2/;
if ($flags =~ /-nonext/) {
- $code = "$macro_code\n";
+ $code = join("\n",
+ "{ $var_decls",
+ $macro_code,
+ "}");
+ } elsif ($flags =~ /-goto:(\S*)/) {
+ my $goto = $1;
+ $code = join("\n",
+ "{ $var_decls",
+ $macro_code,
+ "I += $size + 1;",
+ "goto $goto;",
+ "}");
} else {
$code = join("\n",
"{ $var_decls",
@@ -935,18 +964,31 @@ sub basic_generator {
sub do_pack {
my(@args) = @_;
- my($i);
my($packable_args) = 0;
+ my @is_packable; # Packability (boolean) for each argument.
+ my $wide_packing = 0;
#
# Count the number of packable arguments. If we encounter any 's' or 'd'
# arguments, packing is not possible.
#
- for ($i = 0; $i < @args; $i++) {
- if ($args[$i] =~ /[xyt]/) {
+ my $packable_types = "xytQ";
+ foreach my $arg (@args) {
+ if ($arg =~ /^[$packable_types]/) {
$packable_args++;
- } elsif ($args[$i] =~ /[sd]/) {
+ push @is_packable, 1;
+ } elsif ($arg =~ /^I/ and $wordsize == 64 and $packable_args < 2) {
+ $wide_packing = 1;
+ push @is_packable, 1;
+ if (++$packable_args == 2) {
+ # We can only pack two arguments. Turn off packing
+ # for the rest of the arguments.
+ $packable_types = "\xFF";
+ }
+ } elsif ($arg =~ /^[sd]/) {
return ('', '', @args);
+ } else {
+ push @is_packable, 0;
}
}
@@ -962,10 +1004,27 @@ sub do_pack {
# beginning).
my($up) = ''; # Pack commands (storing back while
# moving forward).
- my($args_per_word) = $packable_args < 4 ? $packable_args : 2;
- my(@shift) = @{$pack_shift[$args_per_word]};
- my(@mask) = @{$pack_mask[$args_per_word]};
- my(@pack_instr) = @{$pack_instr[$args_per_word]};
+ my $args_per_word;
+ if ($packable_args < 4 or $wordsize == 64) {
+ $args_per_word = $packable_args;
+ } else {
+ # 4 packable argument, 32 bit wordsize. Need 2 words.
+ $args_per_word = 2;
+ }
+
+ my @shift;
+ my @mask;
+ my @instr;
+
+ if ($wide_packing) {
+ @shift = ('0', 'BEAM_WIDE_SHIFT');
+ @mask = ('BEAM_WIDE_MASK', $WHOLE_WORD);
+ @instr = ('w', 'i');
+ } else {
+ @shift = @{$pack_shift[$args_per_word]};
+ @mask = @{$pack_mask[$args_per_word]};
+ @instr = @{$pack_instr[$args_per_word]};
+ }
#
# Now generate the packing instructions. One complication is that
@@ -979,10 +1038,10 @@ sub do_pack {
my($ap) = 0; # Argument number within word.
my($tmpnum) = 1; # Number of temporary variable.
my($expr) = '';
- for ($i = 0; $i < @args; $i++) {
+ for (my $i = 0; $i < @args; $i++) {
my($reg) = $args[$i];
my($this_size) = $arg_size{$reg};
- if ($reg =~ /[xyt]/) {
+ if ($is_packable[$i]) {
$this_size = 0;
$did_some_packing = 1;
@@ -993,7 +1052,7 @@ sub do_pack {
$this_size = 1;
}
- $down = "$pack_instr[$ap]$down";
+ $down = "$instr[$ap]$down";
my($unpack) = &make_unpack($tmpnum, $shift[$ap], $mask[$ap]);
$args[$i] = "pack:$this_size:$reg" . "b($unpack)";
@@ -1103,6 +1162,10 @@ sub compile_transform {
if ($obsolete[$gen_opnum{$name,$arity}]) {
error("obsolete function must not be used in transformations");
}
+
+ if ($src) {
+ $is_transformed{$name,$arity} = 1;
+ }
[$name,$arity,@ops];
}
@@ -1291,13 +1354,28 @@ sub tr_gen_from {
my($var, $type, $type_val, $cond, $val) = @$op;
if ($type ne '' && $type ne '*') {
- my($types) = '';
- my($type_mask) = 0;
- foreach (split('', $type)) {
- $types .= "$_ ";
- $type_mask |= $type_bit{$_};
+ #
+ # The is_bif, is_not_bif, and is_func instructions have
+ # their own built-in type test and don't need to
+ # be guarded with a type test instruction.
+ #
+ unless ($cond eq 'is_bif' or
+ $cond eq 'is_not_bif' or
+ $cond eq 'is_func') {
+ my($types) = '';
+ my($type_mask) = 0;
+ foreach (split('', $type)) {
+ $types .= "$_ ";
+ $type_mask |= $type_bit{$_};
+ }
+ if ($cond ne 'is_eq') {
+ push(@code, &make_op($types, 'is_type', $type_mask));
+ } else {
+ $cond = '';
+ push(@code, &make_op($types, 'is_type_eq',
+ $type_mask, $val));
+ }
}
- push(@code, &make_op($types, 'is_type', $type_mask));
}
if ($cond eq 'is_func') {
diff --git a/erts/emulator/utils/count b/erts/emulator/utils/count
new file mode 100755
index 0000000000..617f5c25e8
--- /dev/null
+++ b/erts/emulator/utils/count
@@ -0,0 +1,127 @@
+%% -*- erlang -*-
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-mode(compile).
+
+main(_) ->
+ DisDir = "./dis",
+ ok = filelib:ensure_dir(filename:join(DisDir, "dummy")),
+ io:format("Dissambling to ~s\n", [DisDir]),
+ ok = file:set_cwd(DisDir),
+ Path = code:get_path() -- ["."],
+ Beams0 = [filelib:wildcard(filename:join(Dir, "*.beam")) ||
+ Dir <- Path],
+ Beams = lists:append(Beams0),
+ Mods0 = [list_to_atom(filename:rootname(filename:basename(F))) ||
+ F <- Beams],
+ Mods = lists:usort(Mods0),
+ start_sem(),
+ Ps = [begin
+ {_,Ref} = spawn_monitor(fun() -> count(M) end),
+ Ref
+ end || M <- Mods],
+ [put(list_to_atom(I), 0) || I <- erts_debug:instructions()],
+ Res = wait_for_all(Ps, 1),
+ OutFile = "count",
+ {ok,Out} = file:open(OutFile, [write]),
+ [io:format(Out, "~s ~p\n", [I,C]) || {I,C} <- Res],
+ ok = file:close(Out),
+ io:format("\nResult written to ~s\n",
+ [filename:join(DisDir, OutFile)]),
+ ok.
+
+wait_for_all([], _) ->
+ lists:reverse(lists:keysort(2, get()));
+wait_for_all([_|_]=Ps, I) ->
+ receive
+ {'DOWN',Ref,process,_,Result} ->
+ io:format("\r~p", [I]),
+ [increment(Key, Count) || {Key,Count} <- Result],
+ wait_for_all(Ps -- [Ref], I+1)
+ end.
+
+count(M) ->
+ down(),
+ erts_debug:df(M),
+ {ok,Fd} = file:open(atom_to_list(M) ++ ".dis", [read,raw]),
+ count_is(Fd),
+ ok = file:close(Fd),
+ exit(get()).
+
+count_is(Fd) ->
+ case file:read_line(Fd) of
+ {ok,Line} ->
+ count_instr(Line),
+ count_is(Fd);
+ eof ->
+ ok
+ end.
+
+count_instr([$\s|T]) ->
+ count_instr_1(T, []);
+count_instr([_|T]) ->
+ count_instr(T);
+count_instr([]) ->
+ %% Empty line.
+ ok.
+
+count_instr_1([$\s|_], Acc) ->
+ Instr = list_to_atom(lists:reverse(Acc)),
+ increment(Instr, 1);
+count_instr_1([H|T], Acc) ->
+ count_instr_1(T, [H|Acc]).
+
+increment(Key, Inc) ->
+ case get(Key) of
+ undefined ->
+ put(Key, Inc);
+ Count ->
+ put(Key, Count+Inc)
+ end.
+
+%%%
+%%% Counting sempahore to limit the number of processes that
+%%% can run concurrently.
+%%%
+
+down() ->
+ sem ! {down,self()},
+ receive
+ sem_taken -> ok
+ end.
+
+start_sem() ->
+ spawn(fun() ->
+ register(sem, self()),
+ process_flag(trap_exit, true),
+ do_sem(erlang:system_info(schedulers)+1) end).
+
+do_sem(0) ->
+ receive
+ {'EXIT',_,_} ->
+ do_sem(1)
+ end;
+do_sem(C) ->
+ receive
+ {down,Pid} ->
+ link(Pid),
+ Pid ! sem_taken,
+ do_sem(C-1)
+ end.
diff --git a/erts/epmd/src/epmd_srv.c b/erts/epmd/src/epmd_srv.c
index ef471a473a..3499ab2934 100644
--- a/erts/epmd/src/epmd_srv.c
+++ b/erts/epmd/src/epmd_srv.c
@@ -2,7 +2,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2010. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -157,8 +157,10 @@ void run(EpmdVars *g)
dbg_printf(g,2,"starting");
- listen(listensock, SOMAXCONN);
-
+ if(listen(listensock, SOMAXCONN) < 0) {
+ dbg_perror(g,"failed to listen on socket");
+ epmd_cleanup_exit(g,1);
+ }
FD_ZERO(&g->orig_read_mask);
FD_SET(listensock,&g->orig_read_mask);
diff --git a/erts/include/internal/ethr_atomics.h b/erts/include/internal/ethr_atomics.h
new file mode 100644
index 0000000000..1caf4d0567
--- /dev/null
+++ b/erts/include/internal/ethr_atomics.h
@@ -0,0 +1,726 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: The ethread atomic API
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_ATOMIC_H__
+#define ETHR_ATOMIC_H__
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_ATOMIC_PROTOTYPES__
+#endif
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * No native atomic implementation available. :(
+ * Use fallback...
+ */
+typedef ethr_sint32_t ethr_atomic32_t;
+typedef ethr_sint_t ethr_atomic_t;
+#else
+/*
+ * Map ethread native atomics to ethread API atomics.
+ *
+ * We do at least have a native atomic implementation that
+ * can handle integers of a size larger than or equal to
+ * the size of pointers.
+ */
+
+/* -- Pointer size atomics -- */
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+#if ETHR_SIZEOF_PTR == 8
+# if defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic64_addr
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# else
+# error "Missing native atomic implementation"
+# endif
+#elif ETHR_SIZEOF_PTR == 4
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic32_addr
+# ifdef ETHR_HAVE_NATIVE_ATOMIC32
+typedef ethr_native_atomic32_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+# elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NATMC_T__ ethr_native_atomic64_t
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# else
+# error "Missing native atomic implementation"
+# endif
+#endif
+
+/* -- 32-bit atomics -- */
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint32_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint64_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
+
+#endif
+
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+ethr_sint_t *ethr_atomic_addr(ethr_atomic_t *);
+void ethr_atomic_init(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_set(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_inc(ethr_atomic_t *);
+void ethr_atomic_dec(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_add_read(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_add(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_band(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_bor(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_xchg(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_acqb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_dec_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg_relb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+
+ethr_sint32_t *ethr_atomic32_addr(ethr_atomic32_t *);
+void ethr_atomic32_init(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_set(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_inc_read(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_dec_read(ethr_atomic32_t *);
+void ethr_atomic32_inc(ethr_atomic32_t *);
+void ethr_atomic32_dec(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_add_read(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_add(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_band(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_bor(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_xchg(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_cmpxchg(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_acqb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_inc_read_acqb(ethr_atomic32_t *);
+void ethr_atomic32_set_relb(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_dec_relb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_dec_read_relb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+#endif
+
+int ethr_init_atomics(void);
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * Fallbacks for atomics used in absence of a native implementation.
+ */
+
+#define ETHR_ATOMIC_ADDR_BITS 10
+#define ETHR_ATOMIC_ADDR_SHIFT 6
+
+typedef struct {
+ union {
+ ethr_spinlock_t lck;
+ char buf[ETHR_CACHE_LINE_SIZE];
+ } u;
+} ethr_atomic_protection_t;
+
+extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+
+#define ETHR_ATOMIC_PTR2LCK__(PTR) \
+(&ethr_atomic_protection__[((((ethr_uint_t) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
+
+
+#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
+do { \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
+ { EXPS; } \
+ ethr_spin_unlock(slp__); \
+} while (0)
+
+#endif
+
+/*
+ * --- Pointer size atomics ---------------------------------------------------
+ */
+
+static ETHR_INLINE ethr_sint_t *
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_addr)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t *) ETHR_NATMC_ADDR_FUNC__(var);
+#else
+ return (ethr_sint_t *) var;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, ethr_sint_t incr)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) i);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(inc)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(dec)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
+ ethr_sint_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var,
+ (ETHR_NAINT_T__) mask);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
+ ethr_sint_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var,
+ (ETHR_NAINT_T__) mask);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var, ethr_sint_t new)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var,
+ (ETHR_NAINT_T__) new);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
+}
+
+/*
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
+ */
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var,
+ ethr_sint_t val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#endif
+}
+
+/*
+ * --- 32-bit atomics ---------------------------------------------------------
+ */
+
+static ETHR_INLINE ethr_sint32_t *
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_addr)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic32_addr(var);
+#else
+ return (ethr_sint32_t *) var;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_init)(ethr_atomic32_t *var,
+ ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add)(ethr_atomic32_t *var,
+ ethr_sint32_t incr)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add_read)(ethr_atomic32_t *var,
+ ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) i);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(inc)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(dec)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_band)(ethr_atomic32_t *var,
+ ethr_sint32_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) mask);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_bor)(ethr_atomic32_t *var,
+ ethr_sint32_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return
+ (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var,
+ (ETHR_NAINT32_T__) mask);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_xchg)(ethr_atomic32_t *var,
+ ethr_sint32_t new)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var,
+ (ETHR_NAINT32_T__) new);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
+}
+
+/*
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
+ */
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_acqb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read_acqb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set_relb)(ethr_atomic32_t *var,
+ ethr_sint32_t val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_relb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read_relb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_acqb)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_relb)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(cmpxchg_relb)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+#endif
+}
+
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+
+#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
index 01855864e3..fadaf1e2a4 100644
--- a/erts/include/internal/ethr_mutex.h
+++ b/erts/include/internal/ethr_mutex.h
@@ -78,13 +78,13 @@
# error Need a qlock implementation
#endif
-#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
-#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
-#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+#define ETHR_RWMTX_W_FLG__ (((ethr_sint32_t) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((ethr_sint32_t) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((ethr_sint32_t) 1) << 29)
/* frequent read kind */
-#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
-#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((long) 1) << 27)
+#define ETHR_RWMTX_R_FLG__ (((ethr_sint32_t) 1) << 28)
+#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((ethr_sint32_t) 1) << 27)
#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_ABRT_UNLCK_FLG__ - 1)
/* normal kind */
@@ -106,28 +106,28 @@
#endif
#define ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(MTX) \
- ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic_read(&(MTX)->mtxb.flgs))
+ ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic32_read(&(MTX)->mtxb.flgs))
struct ethr_mutex_base_ {
#ifdef ETHR_MTX_HARD_DEBUG_FENCE
long pre_fence;
#endif
- ethr_atomic_t flgs;
- ETHR_MTX_QLOCK_TYPE__ qlck;
- ethr_ts_event *q;
+ ethr_atomic32_t flgs;
short aux_scnt;
short main_scnt;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
#ifdef ETHR_MTX_HARD_DEBUG_WSQ
int ws;
#endif
#ifdef ETHR_MTX_CHK_EXCL
- ethr_atomic_t exclusive;
+ ethr_atomic32_t exclusive;
#endif
#ifdef ETHR_MTX_CHK_NON_EXCL
- ethr_atomic_t non_exclusive;
+ ethr_atomic32_t non_exclusive;
#endif
#ifdef ETHR_MTX_HARD_DEBUG_LFS
- ethr_atomic_t hdbg_lfs;
+ ethr_atomic32_t hdbg_lfs;
#endif
};
@@ -236,7 +236,7 @@ typedef struct {
typedef union {
struct {
- ethr_atomic_t readers;
+ ethr_atomic32_t readers;
int waiting_readers;
int byte_offset;
ethr_rwmutex_lived lived;
@@ -298,13 +298,13 @@ void ethr_rwmutex_rwunlock(ethr_rwmutex *);
#ifdef ETHR_MTX_HARD_DEBUG_LFS
# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
do { \
- ethr_atomic_init(&(MTXB)->hdbg_lfs, 0); \
+ ethr_atomic32_init(&(MTXB)->hdbg_lfs, 0); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint32_t val__; \
ETHR_COMPILER_BARRIER; \
- val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
+ val__ = ethr_atomic32_inc_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ > 0); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
@@ -317,15 +317,15 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ethr_sint32_t val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ >= 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint32_t val__; \
ETHR_COMPILER_BARRIER; \
- val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == -1); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
@@ -338,7 +338,7 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ethr_sint32_t val__ = ethr_atomic32_inctest(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
@@ -386,12 +386,12 @@ do { \
#endif
# define ETHR_MTX_CHK_EXCL_INIT__(MTXB) \
- ethr_atomic_init(&(MTXB)->exclusive, 0)
+ ethr_atomic32_init(&(MTXB)->exclusive, 0)
# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (!ethr_atomic_read(&(MTXB)->exclusive)) \
+ if (!ethr_atomic32_read(&(MTXB)->exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -399,7 +399,7 @@ do { \
# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (ethr_atomic_read(&(MTXB)->exclusive)) \
+ if (ethr_atomic32_read(&(MTXB)->exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is not exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -407,13 +407,13 @@ do { \
# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB) \
do { \
ETHR_MTX_CHK_EXCL_IS_NOT_EXCL((MTXB)); \
- ethr_atomic_set(&(MTXB)->exclusive, 1); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 1); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB) \
do { \
ETHR_MTX_CHK_EXCL_IS_EXCL((MTXB)); \
- ethr_atomic_set(&(MTXB)->exclusive, 0); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
@@ -424,11 +424,11 @@ do { \
#endif
# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) \
- ethr_atomic_init(&(MTXB)->non_exclusive, 0)
+ ethr_atomic32_init(&(MTXB)->non_exclusive, 0)
# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (!ethr_atomic_read(&(MTXB)->non_exclusive)) \
+ if (!ethr_atomic32_read(&(MTXB)->non_exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is non-exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -436,7 +436,7 @@ do { \
# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (ethr_atomic_read(&(MTXB)->non_exclusive)) \
+ if (ethr_atomic32_read(&(MTXB)->non_exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is not non-exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -444,19 +444,19 @@ do { \
# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_inc(&(MTXB)->non_exclusive); \
+ ethr_atomic32_inc(&(MTXB)->non_exclusive); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_add(&(MTXB)->non_exclusive, (NO)); \
+ ethr_atomic32_add(&(MTXB)->non_exclusive, (NO)); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_dec(&(MTXB)->non_exclusive); \
+ ethr_atomic32_dec(&(MTXB)->non_exclusive); \
ETHR_COMPILER_BARRIER; \
} while (0)
#else
@@ -501,18 +501,18 @@ do { \
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
-void ethr_mutex_lock_wait__(ethr_mutex *, long);
-void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+void ethr_mutex_lock_wait__(ethr_mutex *, ethr_sint32_t);
+void ethr_mutex_unlock_wake__(ethr_mutex *, ethr_sint32_t);
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
int res;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
- act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
res = (act == 0) ? 0 : EBUSY;
#ifdef ETHR_MTX_CHK_EXCL
@@ -531,11 +531,11 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
- act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
if (act != 0)
ethr_mutex_lock_wait__(mtx, act);
@@ -551,7 +551,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_COMPILER_BARRIER;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
@@ -559,7 +559,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
ETHR_MTX_CHK_EXCL_UNSET_EXCL(&mtx->mtxb);
- act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__)
ethr_mutex_unlock_wake__(mtx, act);
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
index 2f9f987d0b..8e04692856 100644
--- a/erts/include/internal/ethr_optimized_fallbacks.h
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -71,36 +71,46 @@ ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
#define ETHR_HAVE_NATIVE_SPINLOCKS 1
#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
-typedef ethr_native_atomic_t ethr_native_spinlock_t;
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_native_spinlock_t;
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_native_spinlock_t;
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
{
- ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+ ETHR_NATMC_FUNC__(init)(lock, 0);
}
static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
{
ETHR_COMPILER_BARRIER;
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) == 1);
- ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == 1);
+ ETHR_NATMC_FUNC__(set_relb)(lock, 0);
}
static ETHR_INLINE void
ethr_native_spin_lock(ethr_native_spinlock_t *lock)
{
- while (ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- (long) 1, (long) 0) != 0) {
- ETHR_SPIN_BODY;
+ while (ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, 1, 0) != 0) {
+ while (ETHR_NATMC_FUNC__(read)(lock) != 0)
+ ETHR_SPIN_BODY;
}
ETHR_COMPILER_BARRIER;
}
#endif
+#undef ETHR_NATMC_FUNC__
+
#endif
@@ -111,16 +121,26 @@ ethr_native_spin_lock(ethr_native_spinlock_t *lock)
#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
-typedef ethr_native_atomic_t ethr_native_rwlock_t;
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_native_rwlock_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_WLOCK_FLAG__ (((ethr_sint32_t) 1) << 30)
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_native_rwlock_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_WLOCK_FLAG__ (((ethr_sint64_t) 1) << 62)
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-#define ETHR_WLOCK_FLAG__ (((long) 1) << 30)
-
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
{
- ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+ ETHR_NATMC_FUNC__(init)(lock, 0);
}
static ETHR_INLINE void
@@ -128,22 +148,24 @@ ethr_native_read_unlock(ethr_native_rwlock_t *lock)
{
ETHR_COMPILER_BARRIER;
#ifdef DEBUG
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) >= 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) >= 0);
#endif
- ethr_native_atomic_dec_relb((ethr_native_atomic_t *) lock);
+ ETHR_NATMC_FUNC__(dec_relb)(lock);
}
static ETHR_INLINE void
ethr_native_read_lock(ethr_native_rwlock_t *lock)
{
- long act, exp = 0;
+ ETHR_NAINT_T__ act, exp = 0;
while (1) {
- act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- exp+1, exp);
+ act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp+1, exp);
if (act == exp)
break;
- ETHR_SPIN_BODY;
- exp = (act & ETHR_WLOCK_FLAG__) ? 0 : act;
+ while (act & ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ETHR_NATMC_FUNC__(read)(lock);
+ }
+ exp = act;
}
ETHR_COMPILER_BARRIER;
}
@@ -152,18 +174,16 @@ static ETHR_INLINE void
ethr_native_write_unlock(ethr_native_rwlock_t *lock)
{
ETHR_COMPILER_BARRIER;
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock)
- == ETHR_WLOCK_FLAG__);
- ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == ETHR_WLOCK_FLAG__);
+ ETHR_NATMC_FUNC__(set_relb)(lock, 0);
}
static ETHR_INLINE void
ethr_native_write_lock(ethr_native_rwlock_t *lock)
{
- long act, exp = 0;
+ ETHR_NAINT_T__ act, exp = 0;
while (1) {
- act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- exp|ETHR_WLOCK_FLAG__, exp);
+ act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp|ETHR_WLOCK_FLAG__, exp);
if (act == exp)
break;
ETHR_SPIN_BODY;
@@ -173,13 +193,17 @@ ethr_native_write_lock(ethr_native_rwlock_t *lock)
/* Wait for readers to leave */
while (act != ETHR_WLOCK_FLAG__) {
ETHR_SPIN_BODY;
- act = ethr_native_atomic_read_acqb((ethr_native_atomic_t *) lock);
+ act = ETHR_NATMC_FUNC__(read_acqb)(lock);
}
ETHR_COMPILER_BARRIER;
}
#endif
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_WLOCK_FLAG__
+
#endif
#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 53fa1acdc2..4cd95faf6a 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -37,11 +37,6 @@
#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
-typedef struct {
- long tv_sec;
- long tv_nsec;
-} ethr_timeval;
-
#if defined(DEBUG)
# define ETHR_DEBUG
#endif
@@ -73,7 +68,7 @@ typedef struct {
#endif
/* Assume 64-byte cache line size */
-#define ETHR_CACHE_LINE_SIZE 64L
+#define ETHR_CACHE_LINE_SIZE ((ethr_uint_t) 64)
#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
@@ -171,6 +166,22 @@ typedef pthread_key_t ethr_tsd_key;
# undef WIN32_LEAN_AND_MEAN
#endif
+#if defined(_MSC_VER)
+
+#if ETHR_SIZEOF_LONG == 4
+#define ETHR_HAVE_INT32_T 1
+typedef long ethr_sint32_t;
+typedef unsigned long ethr_uint32_t;
+#endif
+
+#if ETHR_SIZEOF___INT64 == 8
+#define ETHR_HAVE_INT64_T 1
+typedef __int64 ethr_sint64_t;
+typedef unsigned __int64 ethr_uint64_t;
+#endif
+
+#endif
+
struct ethr_join_data_;
/* Types */
@@ -198,12 +209,48 @@ typedef DWORD ethr_tsd_key;
#endif
-#ifdef SIZEOF_LONG
-#if SIZEOF_LONG < ETHR_SIZEOF_PTR
-#error size of long currently needs to be at least the same as size of void *
+#ifndef ETHR_HAVE_INT32_T
+#if ETHR_SIZEOF_INT == 4
+#define ETHR_HAVE_INT32_T 1
+typedef int ethr_sint32_t;
+typedef unsigned int ethr_uint32_t;
+#elif ETHR_SIZEOF_LONG == 4
+#define ETHR_HAVE_INT32_T 1
+typedef long ethr_sint32_t;
+typedef unsigned long ethr_uint32_t;
#endif
#endif
+#ifndef ETHR_HAVE_INT64_T
+#if ETHR_SIZEOF_INT == 8
+#define ETHR_HAVE_INT64_T 1
+typedef int ethr_sint64_t;
+typedef unsigned int ethr_uint64_t;
+#elif ETHR_SIZEOF_LONG == 8
+#define ETHR_HAVE_INT64_T 1
+typedef long ethr_sint64_t;
+typedef unsigned long ethr_uint64_t;
+#elif ETHR_SIZEOF_LONG_LONG == 8
+#define ETHR_HAVE_INT64_T 1
+typedef long long ethr_sint64_t;
+typedef unsigned long long ethr_uint64_t;
+#endif
+#endif
+
+#if ETHR_SIZEOF_PTR == 4
+#ifndef ETHR_HAVE_INT32_T
+#error "No 32-bit integer type found"
+#endif
+typedef ethr_sint32_t ethr_sint_t;
+typedef ethr_uint32_t ethr_uint_t;
+#elif ETHR_SIZEOF_PTR == 8
+#ifndef ETHR_HAVE_INT64_T
+#error "No 64-bit integer type found"
+#endif
+typedef ethr_sint64_t ethr_sint_t;
+typedef ethr_uint64_t ethr_uint_t;
+#endif
+
/* __builtin_expect() is needed by both native atomics code
* and the fallback code */
#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
@@ -386,7 +433,6 @@ typedef struct {
#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
# define ETHR_NEED_SPINLOCK_PROTOTYPES__
# define ETHR_NEED_RWSPINLOCK_PROTOTYPES__
-# define ETHR_NEED_ATOMIC_PROTOTYPES__
#endif
int ethr_init(ethr_init_data *);
@@ -399,7 +445,6 @@ void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_time_now(ethr_timeval *);
int ethr_tsd_key_create(ethr_tsd_key *);
int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
@@ -502,312 +547,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-/*
- * Map ethread native atomics to ethread API atomics.
- */
-typedef ethr_native_atomic_t ethr_atomic_t;
-#else
-typedef long ethr_atomic_t;
-#endif
-
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-void ethr_atomic_init(ethr_atomic_t *, long);
-void ethr_atomic_set(ethr_atomic_t *, long);
-long ethr_atomic_read(ethr_atomic_t *);
-long ethr_atomic_inc_read(ethr_atomic_t *);
-long ethr_atomic_dec_read(ethr_atomic_t *);
-void ethr_atomic_inc(ethr_atomic_t *);
-void ethr_atomic_dec(ethr_atomic_t *);
-long ethr_atomic_add_read(ethr_atomic_t *, long);
-void ethr_atomic_add(ethr_atomic_t *, long);
-long ethr_atomic_read_band(ethr_atomic_t *, long);
-long ethr_atomic_read_bor(ethr_atomic_t *, long);
-long ethr_atomic_xchg(ethr_atomic_t *, long);
-long ethr_atomic_cmpxchg(ethr_atomic_t *, long, long);
-long ethr_atomic_read_acqb(ethr_atomic_t *);
-long ethr_atomic_inc_read_acqb(ethr_atomic_t *);
-void ethr_atomic_set_relb(ethr_atomic_t *, long);
-void ethr_atomic_dec_relb(ethr_atomic_t *);
-long ethr_atomic_dec_read_relb(ethr_atomic_t *);
-long ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, long, long);
-long ethr_atomic_cmpxchg_relb(ethr_atomic_t *, long, long);
-#endif
-
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-
-#ifndef ETHR_HAVE_NATIVE_ATOMICS
-/*
- * Fallbacks for atomics used in absence of a native implementation.
- */
-
-#define ETHR_ATOMIC_ADDR_BITS 10
-#define ETHR_ATOMIC_ADDR_SHIFT 6
-
-typedef struct {
- union {
- ethr_spinlock_t lck;
- char buf[ETHR_CACHE_LINE_SIZE];
- } u;
-} ethr_atomic_protection_t;
-
-extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
-
-
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- ethr_spin_lock(slp__); \
- { EXPS; } \
- ethr_spin_unlock(slp__); \
-} while (0)
-
-#endif
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_init(var, i);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_set(var, i);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_read(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) *var);
- return res;
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_add(var, incr);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_add_return(var, i);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
- return res;
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_inc(var);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_dec(var);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_inc_return(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) ++(*var));
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_dec_return(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) --(*var));
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
- long mask)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_and_retold(var, mask);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
- long mask)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_or_retold(var, mask);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_xchg(var, new);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg(var, new, exp);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
- {
- res = *var;
- if (__builtin_expect(res == exp, 1))
- *var = new;
- });
- return res;
-#endif
-}
-
-/*
- * Important memory barrier requirements.
- *
- * The following atomic operations *must* supply a memory barrier of
- * at least the type specified by its suffix:
- * _acqb = acquire barrier
- * _relb = release barrier
- */
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_read_acqb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_inc_return_acqb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var, long val)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_set_relb(var, val);
-#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_dec_relb(var);
-#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_dec_return_relb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg_acqb(var, new, exp);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg_relb(var, new, exp);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
-#endif
-}
-
-#endif /* ETHR_TRY_INLINE_FUNCS */
+#include "ethr_atomics.h"
typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
@@ -825,7 +565,7 @@ struct ethr_ts_event_ {
ethr_ts_event *prev;
ethr_event event;
void *udata;
- ethr_atomic_t uaflgs;
+ ethr_atomic32_t uaflgs;
unsigned uflgs;
unsigned iflgs; /* for ethr lib only */
short rgix; /* for ethr lib only */
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index 5debb44756..f394d790d2 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -20,6 +20,21 @@
/* Define to the size of pointers */
#undef ETHR_SIZEOF_PTR
+/* Define to the size of int */
+#undef ETHR_SIZEOF_INT
+
+/* Define to the size of long */
+#undef ETHR_SIZEOF_LONG
+
+/* Define to the size of long long */
+#undef ETHR_SIZEOF_LONG_LONG
+
+/* Define to the size of __int64 */
+#undef ETHR_SIZEOF___INT64
+
+/* Define if bigendian */
+#undef ETHR_BIGENDIAN
+
/* Define if you want to disable native ethread implementations */
#undef ETHR_DISABLE_NATIVE_IMPLS
@@ -100,6 +115,27 @@
/* Define to the size of AO_t if libatomic_ops is used */
#undef ETHR_SIZEOF_AO_T
+/* Define if you have _InterlockedCompareExchange64() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+
+/* Define if you have _InterlockedDecrement64() */
+#undef ETHR_HAVE__INTERLOCKEDDECREMENT64
+
+/* Define if you have _InterlockedIncrement64() */
+#undef ETHR_HAVE__INTERLOCKEDINCREMENT64
+
+/* Define if you have _InterlockedExchangeAdd64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+
+/* Define if you have _InterlockedExchange64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+
+/* Define if you have _InterlockedAnd64() */
+#undef ETHR_HAVE__INTERLOCKEDAND64
+
+/* Define if you have _InterlockedOr64() */
+#undef ETHR_HAVE__INTERLOCKEDOR64
+
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index e8e529dd48..16935084b1 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -22,24 +22,35 @@
* Author: Rickard Green
*/
-#ifndef ETHR_GCC_ATOMIC_H__
-#define ETHR_GCC_ATOMIC_H__
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_GCC_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_GCC_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_GCC_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_GCC_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
-#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#ifndef ETHR_GCC_ATOMIC_COMMON__
+#define ETHR_GCC_ATOMIC_COMMON__
-#define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-/* Enable immediate read/write on platforms where we know it is safe */
+#define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 0
#if defined(__i386__) || defined(__x86_64__) || defined(__sparc__) \
|| defined(__powerpc__) || defined(__ppc__) || defined(__mips__)
-# undef ETHR_IMMED_ATOMIC_SET_GET_SAFE__
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# undef ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
+# define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 1
#endif
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
-
+#if defined(__x86_64__) || (defined(__i386__) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
+#else
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
+#endif
/*
* According to the documentation this is what we want:
@@ -47,34 +58,73 @@ typedef struct {
* However, __sync_synchronize() is known to erroneously be
* a noop on at least some platforms with some gcc versions.
* This has suposedly been fixed in some gcc version, but we
- * don't know from which version. Therefore, we use the
- * workaround implemented below on all gcc versions except
- * for gcc 4.2 or above for MIPS, where it's been verified.
+ * don't know from which version. Therefore, we only use
+ * it when it has been verified to work. Otherwise
+ * we use a workaround.
*/
#if defined(__mips__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
+/* __sync_synchronize() has been verified to work here */
#define ETHR_MEMORY_BARRIER __sync_synchronize()
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __sync_synchronize()
+#elif defined(__x86_64__) || (defined(__i386__) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+/* Use fence instructions directly instead of workaround */
+#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
#else
+/* Workaround */
#define ETHR_MEMORY_BARRIER \
do { \
- volatile long x___ = 0; \
- (void) __sync_val_compare_and_swap(&x___, (long) 0, (long) 1); \
+ volatile ethr_sint32_t x___ = 0; \
+ (void) __sync_val_compare_and_swap(&x___, (ethr_sint32_t) 0, (ethr_sint32_t) 1); \
} while (0)
-#endif
#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+
+#define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_GCC_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#else
+#error "Unsupported integer size"
+#endif
+
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
var->counter = value;
#else
/*
* Unfortunately no __sync_store() or similar exist in the gcc atomic
* op interface. We therefore have to simulate it this way...
*/
- long act = 0, exp;
+ ETHR_AINT_T__ act = 0, exp;
do {
exp = act;
act = __sync_val_compare_and_swap(&var->counter, exp, value);
@@ -82,80 +132,86 @@ ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
#endif
}
-#define ethr_native_atomic_init ethr_native_atomic_set
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ ETHR_NATMC_FUNC__(set)(var, value);
+}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
return var->counter;
#else
/*
* Unfortunately no __sync_fetch() or similar exist in the gcc atomic
* op interface. We therefore have to simulate it this way...
*/
- return __sync_add_and_fetch(&var->counter, (long) 0);
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
(void) __sync_add_and_fetch(&var->counter, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
return __sync_add_and_fetch(&var->counter, incr);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) __sync_add_and_fetch(&var->counter, (long) 1);
+ (void) __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) __sync_sub_and_fetch(&var->counter, (long) 1);
+ (void) __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return __sync_add_and_fetch(&var->counter, (long) 1);
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return __sync_sub_and_fetch(&var->counter, (long) 1);
+ return __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
return __sync_fetch_and_and(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) __sync_fetch_and_or(&var->counter, mask);
+ return (ETHR_AINT_T__) __sync_fetch_and_or(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
return __sync_val_compare_and_swap(&var->counter, old, new);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
- long exp, act = 0;
+ ETHR_AINT_T__ exp, act = 0;
do {
exp = act;
act = __sync_val_compare_and_swap(&var->counter, exp, new);
@@ -167,22 +223,68 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- return __sync_add_and_fetch(&var->counter, (long) 0);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_AINT_T__ val = var->counter;
+ ETHR_COMPILER_BARRIER;
+ return val;
+#else
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
+#endif
}
-#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
-#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
-#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
-#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_COMPILER_BARRIER;
+ var->counter = i;
+#else
+ (void) ETHR_NATMC_FUNC__(xchg)(var, i);
+#endif
+}
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(inc_return)(var);
+}
-#endif
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+{
+ ETHR_NATMC_FUNC__(dec)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(dec_return)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
#endif
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_AINT_SUFFIX__
+
#endif
diff --git a/erts/include/internal/gcc/ethread.h b/erts/include/internal/gcc/ethread.h
index bb378e31e0..392a1aa2b2 100644
--- a/erts/include/internal/gcc/ethread.h
+++ b/erts/include/internal/gcc/ethread.h
@@ -25,6 +25,16 @@
#ifndef ETHREAD_GCC_H__
#define ETHREAD_GCC_H__
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "ethr_atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
+
+#endif
#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 52d01aab32..4e402f261a 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -23,14 +23,24 @@
*
* This code requires a 486 or newer processor.
*/
-#ifndef ETHREAD_I386_ATOMIC_H
-#define ETHREAD_I386_ATOMIC_H
-/* An atomic is an aligned long accessed via locked operations.
- */
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_X86_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_X86_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_X86_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_X86_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#ifndef ETHR_X86_ATOMIC_COMMON__
+#define ETHR_X86_ATOMIC_COMMON__
+
+#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
@@ -40,123 +50,161 @@ typedef struct {
#else
#define ETHR_MEMORY_BARRIER \
do { \
- volatile long x___ = 0; \
+ volatile ethr_sint32_t x___ = 0; \
__asm__ __volatile__("lock; incl %0" : "=m"(x___) : "m"(x___) : "memory"); \
} while (0)
#endif
-#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
-
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_X86_ATOMIC_COMMON__ */
-#ifdef __x86_64__
-#define LONG_SUFFIX "q"
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_AINT_SUFFIX__ "l"
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_AINT_SUFFIX__ "q"
#else
-#define LONG_SUFFIX "l"
+#error "Unsupported integer size"
#endif
+/* An atomic is an aligned ETHR_AINT_T__ accessed via locked operations.
+ */
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->counter = i;
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
__asm__ __volatile__(
- "lock; add" LONG_SUFFIX " %1, %0"
+ "lock; add" ETHR_AINT_SUFFIX__ " %1, %0"
: "=m"(var->counter)
: "ir"(incr), "m"(var->counter));
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__(
- "lock; inc" LONG_SUFFIX " %0"
+ "lock; inc" ETHR_AINT_SUFFIX__ " %0"
: "=m"(var->counter)
: "m"(var->counter));
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__(
- "lock; dec" LONG_SUFFIX " %0"
+ "lock; dec" ETHR_AINT_SUFFIX__ " %0"
: "=m"(var->counter)
: "m"(var->counter));
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- long tmp;
+ ETHR_AINT_T__ tmp;
tmp = incr;
__asm__ __volatile__(
- "lock; xadd" LONG_SUFFIX " %0, %1" /* xadd didn't exist prior to the 486 */
+ "lock; xadd" ETHR_AINT_SUFFIX__ " %0, %1" /* xadd didn't exist prior to the 486 */
: "=r"(tmp)
: "m"(var->counter), "0"(tmp));
/* now tmp is the atomic's previous value */
return tmp + incr;
}
-#define ethr_native_atomic_inc_return(var) ethr_native_atomic_add_return((var), 1)
-#define ethr_native_atomic_dec_return(var) ethr_native_atomic_add_return((var), -1)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) 1);
+}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) -1);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
__asm__ __volatile__(
- "lock; cmpxchg" LONG_SUFFIX " %2, %3"
+ "lock; cmpxchg" ETHR_AINT_SUFFIX__ " %2, %3"
: "=a"(old), "=m"(var->counter)
: "r"(new), "m"(var->counter), "0"(old)
: "cc", "memory"); /* full memory clobber to make this a compiler barrier */
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long tmp, old;
+ ETHR_AINT_T__ tmp, old;
tmp = var->counter;
do {
old = tmp;
- tmp = ethr_native_atomic_cmpxchg(var, tmp & mask, tmp);
+ tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp & mask, tmp);
} while (__builtin_expect(tmp != old, 0));
/* now tmp is the atomic's previous value */
return tmp;
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long tmp, old;
+ ETHR_AINT_T__ tmp, old;
tmp = var->counter;
do {
old = tmp;
- tmp = ethr_native_atomic_cmpxchg(var, tmp | mask, tmp);
+ tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp | mask, tmp);
} while (__builtin_expect(tmp != old, 0));
/* now tmp is the atomic's previous value */
return tmp;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
- long tmp = val;
+ ETHR_AINT_T__ tmp = val;
__asm__ __volatile__(
- "xchg" LONG_SUFFIX " %0, %1"
+ "xchg" ETHR_AINT_SUFFIX__ " %0, %1"
: "=r"(tmp)
: "m"(var->counter), "0"(tmp));
/* now tmp is the atomic's previous value */
@@ -167,57 +215,73 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- long val;
+ ETHR_AINT_T__ val;
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
val = var->counter;
#else
- val = ethr_native_atomic_add_return(var, 0);
+ val = ETHR_NATMC_FUNC__(add_return)(var, 0);
#endif
__asm__ __volatile__("" : : : "memory");
return val;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
__asm__ __volatile__("" : : : "memory");
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
var->counter = i;
#else
- (void) ethr_native_atomic_xchg(var, i);
+ (void) ETHR_NATMC_FUNC__(xchg)(var, i);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
__asm__ __volatile__("" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("" : : : "memory");
- ethr_native_atomic_dec(var);
+ ETHR_NATMC_FUNC__(dec)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("" : : : "memory");
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
}
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
-#undef LONG_SUFFIX
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* ETHREAD_I386_ATOMIC_H */
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_AINT_SUFFIX__
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index ed43e77279..b5a17caefb 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,12 @@
#ifndef ETHREAD_I386_ETHREAD_H
#define ETHREAD_I386_ETHREAD_H
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "atomic.h"
+#endif
#include "spinlock.h"
#include "rwlock.h"
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
index a6eb43a0bd..d56693dbf8 100644
--- a/erts/include/internal/libatomic_ops/ethr_atomic.h
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -46,17 +46,39 @@
* - AO_store()
* - AO_compare_and_swap()
*
- * The `AO_t' type also have to be at least as large as
- * `void *' and `long' types.
+ * The `AO_t' type also have to be at least as large as the `void *' type.
*/
#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
#error The AO_t type is too small
#endif
+#if ETHR_SIZEOF_AO_T == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_AINT_SUFFIX__ "l"
+#elif ETHR_SIZEOF_AO_T == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_AINT_SUFFIX__ "q"
+#else
+#error "Unsupported integer size"
+#endif
+
+#if ETHR_SIZEOF_AO_T == 8
+typedef union {
+ volatile AO_t counter;
+ ethr_sint32_t sint32[2];
+} ETHR_ATMC_T__;
+#else
typedef struct {
volatile AO_t counter;
-} ethr_native_atomic_t;
+} ETHR_ATMC_T__;
+#endif
#define ETHR_MEMORY_BARRIER AO_nop_full()
#ifdef AO_HAVE_nop_write
@@ -72,123 +94,151 @@ typedef struct {
#ifdef AO_NO_DD_ORDERING
# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
#else
-# define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("":::"memory")
+# define ETHR_READ_DEPEND_MEMORY_BARRIER AO_compiler_barrier()
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
+#if ETHR_SIZEOF_AO_T == 8
+/*
+ * We also need to provide an ethr_native_atomic32_addr(), since
+ * this 64-bit implementation will be used implementing 32-bit
+ * native atomics.
+ */
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ETHR_ATMC_T__ *var)
+{
+ ETHR_ASSERT(((void *) &var->sint32[0]) == ((void *) &var->counter));
+#ifdef ETHR_BIGENDIAN
+ return &var->sint32[1];
+#else
+ return &var->sint32[0];
#endif
+}
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_SIZEOF_AO_T == 8 */
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
AO_store(&var->counter, (AO_t) value);
}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
- ethr_native_atomic_set(var, value);
+ ETHR_NATMC_FUNC__(set)(var, value);
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
- return (long) AO_load(&var->counter);
+ return (ETHR_AINT_T__) AO_load(&var->counter);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
#ifdef AO_HAVE_fetch_and_add
- return ((long) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+ return ((ETHR_AINT_T__) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
#else
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp + (AO_t) incr;
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) new;
+ return (ETHR_AINT_T__) new;
}
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void) ethr_native_atomic_add_return(var, incr);
+ (void) ETHR_NATMC_FUNC__(add_return)(var, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_add1
- return ((long) AO_fetch_and_add1(&var->counter)) + 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_add1(&var->counter)) + 1;
#else
- return ethr_native_atomic_add_return(var, 1);
+ return ETHR_NATMC_FUNC__(add_return)(var, 1);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_inc_return(var);
+ (void) ETHR_NATMC_FUNC__(inc_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_sub1
- return ((long) AO_fetch_and_sub1(&var->counter)) - 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1(&var->counter)) - 1;
#else
- return ethr_native_atomic_add_return(var, -1);
+ return ETHR_NATMC_FUNC__(add_return)(var, -1);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_dec_return(var);
+ (void) ETHR_NATMC_FUNC__(dec_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp & ((AO_t) mask);
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp | ((AO_t) mask);
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
return act;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
while (1) {
AO_t exp = AO_load(&var->counter);
if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
@@ -196,97 +246,105 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_load_acquire
- return (long) AO_load_acquire(&var->counter);
+ return (ETHR_AINT_T__) AO_load_acquire(&var->counter);
#else
- long res = ethr_native_atomic_read(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
ETHR_MEMORY_BARRIER;
return res;
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_add1_acquire
- return ((long) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_add1_acquire(&var->counter)) + 1;
#else
- long res = ethr_native_atomic_add_return(var, 1);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(add_return)(var, 1);
ETHR_MEMORY_BARRIER;
return res;
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
#ifdef AO_HAVE_store_release
AO_store_release(&var->counter, (AO_t) value);
#else
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_set(var, value);
+ ETHR_NATMC_FUNC__(set)(var, value);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_sub1_release
- return ((long) AO_fetch_and_sub1_release(&var->counter)) - 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1_release(&var->counter)) - 1;
#else
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_dec_return_relb(var);
+ (void) ETHR_NATMC_FUNC__(dec_return_relb)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
#ifdef AO_HAVE_compare_and_swap_acquire
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
AO_nop_full();
return act;
#else
- long act = ethr_native_atomic_cmpxchg(var, new, exp);
+ ETHR_AINT_T__ act = ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
ETHR_MEMORY_BARRIER;
return act;
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
#ifdef AO_HAVE_compare_and_swap_release
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
return act;
#else
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
#endif
}
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
-#endif
+#endif /* !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS) */
+
+#endif /* ETHR_LIBATOMIC_OPS_ATOMIC_H__ */
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index f21f7c9588..522f433649 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -28,31 +28,39 @@
#ifndef ETHREAD_PPC_ATOMIC_H
#define ETHREAD_PPC_ATOMIC_H
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
typedef struct {
- volatile int counter;
-} ethr_native_atomic_t;
+ volatile ethr_sint32_t counter;
+} ethr_native_atomic32_t;
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
+{
+ return (ethr_sint32_t *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
+ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
+#define ethr_native_atomic32_set(v, i) ethr_native_atomic32_init((v), (i))
-static ETHR_INLINE int
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read(ethr_native_atomic32_t *var)
{
return var->counter;
}
-static ETHR_INLINE int
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -69,16 +77,16 @@ ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, int incr)
+ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_add_return(var, incr);
+ (void)ethr_native_atomic32_add_return(var, incr);
}
-static ETHR_INLINE int
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -95,16 +103,16 @@ ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_inc_return(var);
+ (void)ethr_native_atomic32_inc_return(var);
}
-static ETHR_INLINE int
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -121,16 +129,16 @@ ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_dec_return(var);
+ (void)ethr_native_atomic32_dec_return(var);
}
-static ETHR_INLINE int
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- int old, new;
+ ethr_sint32_t old, new;
__asm__ __volatile__(
"eieio\n\t"
@@ -146,10 +154,10 @@ ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
return old;
}
-static ETHR_INLINE int
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- int old, new;
+ ethr_sint32_t old, new;
__asm__ __volatile__(
"eieio\n\t"
@@ -165,10 +173,10 @@ ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
return old;
}
-static ETHR_INLINE int
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -183,10 +191,12 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
return tmp;
}
-static ETHR_INLINE int
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
- int old;
+ ethr_sint32_t old;
__asm__ __volatile__(
"eieio\n\t"
@@ -210,20 +220,20 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
*/
static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_read(var);
+ long res = ethr_native_atomic32_read(var);
ETHR_MEMORY_BARRIER;
return res;
}
-#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
-#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
-#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
-#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic32_set_relb ethr_native_atomic32_xchg
+#define ethr_native_atomic32_inc_return_acqb ethr_native_atomic32_inc_return
+#define ethr_native_atomic32_dec_relb ethr_native_atomic32_dec_return
+#define ethr_native_atomic32_dec_return_relb ethr_native_atomic32_dec_return
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic32_cmpxchg_acqb ethr_native_atomic32_cmpxchg
+#define ethr_native_atomic32_cmpxchg_relb ethr_native_atomic32_cmpxchg
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
index 104ec287e0..93da8a0429 100644
--- a/erts/include/internal/pthread/ethr_event.h
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -30,31 +30,9 @@
#include <linux/futex.h>
#include <sys/time.h>
-/*
- * Note: Linux futexes operate on 32-bit integers, but
- * ethr_native_atomic_t are 64-bits on 64-bit
- * platforms. This has to be taken into account.
- * Therefore, in each individual value used each
- * byte look the same.
- */
-
-#if ETHR_SIZEOF_PTR == 8
-
-#define ETHR_EVENT_OFF_WAITER__ 0xffffffffffffffffL
-#define ETHR_EVENT_OFF__ 0x7777777777777777L
-#define ETHR_EVENT_ON__ 0L
-
-#elif ETHR_SIZEOF_PTR == 4
-
-#define ETHR_EVENT_OFF_WAITER__ 0xffffffffL
-#define ETHR_EVENT_OFF__ 0x77777777L
-#define ETHR_EVENT_ON__ 0L
-
-#else
-
-#error ehrm...
-
-#endif
+#define ETHR_EVENT_OFF_WAITER__ ((ethr_sint32_t) -1)
+#define ETHR_EVENT_OFF__ ((ethr_sint32_t) 1)
+#define ETHR_EVENT_ON__ ((ethr_sint32_t) 0)
#if defined(FUTEX_WAIT_PRIVATE) && defined(FUTEX_WAKE_PRIVATE)
# define ETHR_FUTEX_WAIT__ FUTEX_WAIT_PRIVATE
@@ -65,11 +43,17 @@
#endif
typedef struct {
- ethr_atomic_t futex;
+ ethr_atomic32_t futex;
} ethr_event;
-#define ETHR_FUTEX__(FTX, OP, VAL) \
- (-1 == syscall(__NR_futex, (void *) (FTX), (OP), (int) (VAL), NULL, NULL, 0)\
+#define ETHR_FUTEX__(FTX, OP, VAL) \
+ (-1 == syscall(__NR_futex, \
+ (void *) ethr_atomic32_addr((FTX)), \
+ (OP), \
+ (int) (VAL), \
+ NULL, \
+ NULL, \
+ 0) \
? errno : 0)
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
@@ -77,9 +61,9 @@ typedef struct {
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- long val;
+ ethr_sint32_t val;
ETHR_WRITE_MEMORY_BARRIER;
- val = ethr_atomic_xchg(&e->futex, ETHR_EVENT_ON__);
+ val = ethr_atomic32_xchg(&e->futex, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
if (res != 0)
@@ -90,7 +74,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- ethr_atomic_set(&e->futex, ETHR_EVENT_OFF__);
+ ethr_atomic32_set(&e->futex, ETHR_EVENT_OFF__);
ETHR_MEMORY_BARRIER;
}
@@ -100,7 +84,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
/* --- Posix mutex/cond implementation of events ---------------------------- */
typedef struct {
- ethr_atomic_t state;
+ ethr_atomic32_t state;
pthread_mutex_t mtx;
pthread_cond_t cnd;
} ethr_event;
@@ -114,9 +98,9 @@ typedef struct {
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- long val;
+ ethr_sint32_t val;
ETHR_WRITE_MEMORY_BARRIER;
- val = ethr_atomic_xchg(&e->state, ETHR_EVENT_ON__);
+ val = ethr_atomic32_xchg(&e->state, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
int res = pthread_mutex_lock(&e->mtx);
if (res != 0)
@@ -133,7 +117,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- ethr_atomic_set(&e->state, ETHR_EVENT_OFF__);
+ ethr_atomic32_set(&e->state, ETHR_EVENT_OFF__);
ETHR_MEMORY_BARRIER;
}
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 2da6472393..00380dbf07 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -21,49 +21,86 @@
* Native ethread atomics on SPARC V9.
* Author: Mikael Pettersson.
*/
-#ifndef ETHR_SPARC32_ATOMIC_H
-#define ETHR_SPARC32_ATOMIC_H
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_SPARC_V9_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_SPARC_V9_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_SPARC_V9_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_SPARC_V9_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#ifndef ETHR_SPARC_V9_ATOMIC_COMMON__
+#define ETHR_SPARC_V9_ATOMIC_COMMON__
#define ETHR_MEMORY_BARRIER \
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
: : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-
-#if defined(__arch64__)
-#define CASX "casx"
+#endif /* ETHR_SPARC_V9_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_CAS__ "cas"
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_CAS__ "casx"
#else
-#define CASX "cas"
+#error "Unsupported integer size"
#endif
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->counter = i;
+}
+
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old+incr;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -73,46 +110,46 @@ ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void)ethr_native_atomic_add_return(var, incr);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return ethr_native_atomic_add_return(var, 1);
+ return ETHR_NATMC_FUNC__(add_return)(var, 1);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void)ethr_native_atomic_add_return(var, 1);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return ethr_native_atomic_add_return(var, -1);
+ return ETHR_NATMC_FUNC__(add_return)(var, -1);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void)ethr_native_atomic_add_return(var, -1);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, -1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old & mask;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -121,17 +158,17 @@ ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old | mask;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -140,17 +177,17 @@ ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
- long old, new;
+ ETHR_AINT_T__ old, new;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad");
do {
old = var->counter;
new = val;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
@@ -159,12 +196,12 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
@@ -178,58 +215,63 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
/* TODO: relax acquire barriers */
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_read(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- ethr_native_atomic_set(var, i);
+ ETHR_NATMC_FUNC__(set)(var, i);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
__asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- ethr_native_atomic_dec(var);
+ ETHR_NATMC_FUNC__(dec)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
- long res = ethr_native_atomic_cmpxchg(var, new, old);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
__asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
return res;
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- return ethr_native_atomic_cmpxchg(var, new, old);
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
}
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* ETHR_SPARC32_ATOMIC_H */
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_CAS__
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index dca113b4d6..aea9794390 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,12 @@
#ifndef ETHREAD_SPARC32_ETHREAD_H
#define ETHREAD_SPARC32_ETHREAD_H
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "atomic.h"
+#endif
#include "spinlock.h"
#include "rwlock.h"
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 69569d82d1..48e4c0c6c8 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -24,92 +24,102 @@
#ifndef ETHREAD_TILE_ATOMIC_H
#define ETHREAD_TILE_ATOMIC_H
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
#include <atomic.h>
/* An atomic is an aligned int accessed via locked operations.
*/
typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+ volatile ethr_sint32_t counter;
+} ethr_native_atomic32_t;
#define ETHR_MEMORY_BARRIER __insn_mf()
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
+{
+ return (ethr_sint32_t *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+ethr_native_atomic32_set(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
atomic_exchange_acq(&var->counter, i);
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read(ethr_native_atomic32_t *var)
{
return var->counter;
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
atomic_add(&var->counter, incr);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
atomic_increment(&var->counter);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
atomic_decrement(&var->counter);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
return atomic_exchange_and_add(&var->counter, incr) + incr;
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
- return ethr_native_atomic_add_return(var, 1);
+ return ethr_native_atomic32_add_return(var, 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
{
- return ethr_native_atomic_add_return(var, -1);
+ return ethr_native_atomic32_add_return(var, -1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
return atomic_and_val(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
return atomic_or_val(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
return atomic_exchange_acq(&var->counter, val);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
@@ -118,54 +128,58 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_read(var);
+ ethr_sint32_t res = ethr_native_atomic32_read(var);
ETHR_MEMORY_BARRIER;
return res;
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ethr_sint32_t res = ethr_native_atomic32_inc_return(var);
ETHR_MEMORY_BARRIER;
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long val)
+ethr_native_atomic32_set_relb(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_set(var, val);
+ ethr_native_atomic32_set(var, val);
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec_relb(ethr_native_atomic32_t *var)
{
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_dec(var);
+ ethr_native_atomic32_dec(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return_relb(ethr_native_atomic32_t *var)
{
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_dec_return(var);
+ return ethr_native_atomic32_dec_return(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_acqb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
{
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ethr_native_atomic32_cmpxchg(var, new, exp);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_relb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
{
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ethr_native_atomic32_cmpxchg(var, new, exp);
}
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
index 500459dd6c..60def01a7e 100644
--- a/erts/include/internal/win/ethr_atomic.h
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -22,223 +22,394 @@
* Author: Rickard Green
*/
-#ifndef ETHR_WIN_ATOMIC_H__
-#define ETHR_WIN_ATOMIC_H__
-
-#ifdef _MSC_VER
-# if _MSC_VER < 1300
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-# endif
-# endif
-# endif
-# if _MSC_VER >= 1400
-# include <intrin.h>
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_WIN_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_WIN_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_WIN_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_WIN_ATOMIC64_H__
+#ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+/* _InterlockedCompareExchange64() required... */
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#endif
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+
+#ifndef ETHR_WIN_ATOMIC_COMMON__
+#define ETHR_WIN_ATOMIC_COMMON__
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if defined(_M_IX86) || defined(_M_AMD64) || defined(_M_IA64)
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 1
#else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 0
#endif
+#if defined(_M_AMD64) || (defined(_M_IX86) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
+#else
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
+#endif
/*
- * No configure test checking for _Interlocked*_{acq,rel} and
- * Interlocked*{Acquire,Release} have been written yet...
+ * No configure test checking for interlocked acquire/release
+ * versions have been written, yet. It should define
+ * ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS if, and
+ * only if, all used interlocked operations with barriers
+ * exists.
*
* Note, that these are pure optimizations for the itanium
* processor.
*/
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
-#pragma intrinsic(_InterlockedCompareExchange_acq)
+#include <intrin.h>
+#undef ETHR_COMPILER_BARRIER
+#define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedCompareExchange)
+
+#if defined(_M_AMD64) || (defined(_M_IX86) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+#include <emmintrin.h>
+#include <mmintrin.h>
+#pragma intrinsic(_mm_mfence)
+#define ETHR_MEMORY_BARRIER _mm_mfence()
+#pragma intrinsic(_mm_sfence)
+#define ETHR_WRITE_MEMORY_BARRIER _mm_sfence()
+#pragma intrinsic(_mm_lfence)
+#define ETHR_READ_MEMORY_BARRIER _mm_lfence()
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+
+#else
+
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile long x___ = 0; \
+ _InterlockedCompareExchange(&x___, (long) 1, (long) 0); \
+} while (0)
+
#endif
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+
+#endif /* ETHR_WIN_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
+/*
+ * All used operations available as 32-bit intrinsics
+ */
+
+#pragma intrinsic(_InterlockedDecrement)
+#pragma intrinsic(_InterlockedIncrement)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#pragma intrinsic(_InterlockedExchangeAdd_acq)
+#pragma intrinsic(_InterlockedIncrement_acq)
+#pragma intrinsic(_InterlockedDecrement_rel)
+#pragma intrinsic(_InterlockedCompareExchange_acq)
#pragma intrinsic(_InterlockedCompareExchange_rel)
#endif
+#define ETHR_ILCKD__(X) _Interlocked ## X
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## _acq
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## _rel
+#else
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X
+#endif
+
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+
+/*
+ * _InterlockedCompareExchange64() is required. The other may not
+ * be available, but if so, we can generate them.
+ */
+#pragma intrinsic(_InterlockedCompareExchange64)
+
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) *(PTR)
+#else
+#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) (__int64) 0
+#endif
+
+#define ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, PTR, NEW, ACT, EXP, OPS, RET) \
+{ \
+ __int64 NEW, ACT, EXP; \
+ ACT = ETHR_OWN_ILCKD_INIT_VAL__(PTR); \
+ do { \
+ EXP = ACT; \
+ { OPS; } \
+ ACT = _InterlockedCompareExchange64(PTR, NEW, EXP); \
+ } while (ACT != EXP); \
+ return RET; \
+}
+
+#define ETHR_OWN_ILCKD_1_IMPL__(FUNC, NEW, ACT, EXP, OPS, RET) \
+static __forceinline __int64 \
+FUNC(__int64 volatile *ptr) \
+ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+
+#define ETHR_OWN_ILCKD_2_IMPL__(FUNC, NEW, ACT, EXP, OPS, ARG, RET) \
+static __forceinline __int64 \
+FUNC(__int64 volatile *ptr, __int64 ARG) \
+ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+
+
+#ifdef ETHR_HAVE__INTERLOCKEDDECREMENT64
+#pragma intrinsic(_InterlockedDecrement64)
+#else
+ETHR_OWN_ILCKD_1_IMPL__(_InterlockedDecrement64, new, act, exp,
+ new = act - 1, new)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDINCREMENT64
+#pragma intrinsic(_InterlockedIncrement64)
+#else
+ETHR_OWN_ILCKD_1_IMPL__(_InterlockedIncrement64, new, act, exp,
+ new = act + 1, new)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchangeAdd64, new, act, exp,
+ new = act + arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+#pragma intrinsic(_InterlockedExchange64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchange64, new, act, exp,
+ new = arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDAND64
+#pragma intrinsic(_InterlockedAnd64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedAnd64, new, act, exp,
+ new = act & arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDOR64
+#pragma intrinsic(_InterlockedOr64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedOr64, new, act, exp,
+ new = act | arg, arg, act)
+#endif
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#pragma intrinsic(_InterlockedExchangeAdd64_acq)
+#pragma intrinsic(_InterlockedIncrement64_acq)
+#pragma intrinsic(_InterlockedDecrement64_rel)
+#pragma intrinsic(_InterlockedCompareExchange64_acq)
+#pragma intrinsic(_InterlockedCompareExchange64_rel)
+#endif
+
+#define ETHR_ILCKD__(X) _Interlocked ## X ## 64
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64_acq
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64_rel
+#else
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64
+#endif
+
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+
+#else
+#error "Unsupported integer size"
+#endif
typedef struct {
- volatile LONG value;
-} ethr_native_atomic_t;
+ volatile ETHR_AINT_T__ value;
+} ETHR_ATMC_T__;
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile LONG x___ = 0; \
- _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
-} while (0)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->value;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- var->value = (LONG) i;
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ var->value = i;
+#else
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
+#endif
}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ var->value = i;
#else
- (void) InterlockedExchange(&var->value, (LONG) i);
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
return var->value;
#else
- return InterlockedExchangeAdd(&var->value, (LONG) 0);
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
+ (void) ETHR_ILCKD__(ExchangeAdd)(&var->value, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long i)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- LONG tmp = InterlockedExchangeAdd(&var->value, (LONG) i);
- return tmp + i;
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, i) + i;
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) InterlockedIncrement(&var->value);
+ (void) ETHR_ILCKD__(Increment)(&var->value);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) InterlockedDecrement(&var->value);
+ (void) ETHR_ILCKD__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return (long) InterlockedIncrement(&var->value);
+ return ETHR_ILCKD__(Increment)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return (long) InterlockedDecrement(&var->value);
+ return ETHR_ILCKD__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) _InterlockedAnd(&var->value, mask);
+ return ETHR_ILCKD__(And)(&var->value, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) _InterlockedOr(&var->value, mask);
+ return ETHR_ILCKD__(Or)(&var->value, mask);
}
-
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+ return ETHR_ILCKD__(CompareExchange)(&var->value, new, old);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
- return (long) InterlockedExchange(&var->value, (LONG) new);
+ return ETHR_ILCKD__(Exchange)(&var->value, new);
}
/*
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDEXCHANGEADDACQUIRE
- return (long) InterlockedExchangeAddAcquire(&var->value, (LONG) 0);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_AINT_T__ val = var->value;
+ ETHR_COMPILER_BARRIER;
+ return val;
#else
- return (long) InterlockedExchangeAdd(&var->value, (LONG) 0);
+ return ETHR_ILCKD_ACQ__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDINCREMENTACQUIRE
- return (long) InterlockedIncrementAcquire(&var->value);
-#else
- return (long) InterlockedIncrement(&var->value);
-#endif
+ return ETHR_ILCKD_ACQ__(Increment)(&var->value);
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- (void) InterlockedExchange(&var->value, (LONG) i);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_COMPILER_BARRIER;
+ var->value = i;
+#else
+ (void) ETHR_ILCKD_REL__(Exchange)(&var->value, i);
+#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
- (void) InterlockedDecrementRelease(&var->value);
-#else
- (void) InterlockedDecrement(&var->value);
-#endif
+ (void) ETHR_ILCKD_REL__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
- return (long) InterlockedDecrementRelease(&var->value);
-#else
- return (long) InterlockedDecrement(&var->value);
-#endif
+ return ETHR_ILCKD_REL__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
- return (long) _InterlockedCompareExchange_acq(&var->value, (LONG) new, (LONG) old);
-#else
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
-#endif
+ return ETHR_ILCKD_ACQ__(CompareExchange)(&var->value, new, old);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
-
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
- return (long) _InterlockedCompareExchange_rel(&var->value, (LONG) new, (LONG) old);
-#else
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
-#endif
+ return ETHR_ILCKD_REL__(CompareExchange)(&var->value, new, old);
}
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif
+#undef ETHR_ILCKD__
+#undef ETHR_ILCKD_ACQ__
+#undef ETHR_ILCKD_REL__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#undef ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+
+#endif /* _MSC_VER */
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
index af57c20f91..598816b2c6 100644
--- a/erts/include/internal/win/ethr_event.h
+++ b/erts/include/internal/win/ethr_event.h
@@ -21,22 +21,24 @@
* Author: Rickard Green
*/
-#define ETHR_EVENT_OFF_WAITER__ ((LONG) -1)
-#define ETHR_EVENT_OFF__ ((LONG) 1)
-#define ETHR_EVENT_ON__ ((LONG) 0)
+#define ETHR_EVENT_OFF_WAITER__ ((long) -1)
+#define ETHR_EVENT_OFF__ ((long) 1)
+#define ETHR_EVENT_ON__ ((long) 0)
typedef struct {
- volatile LONG state;
+ volatile long state;
HANDLE handle;
} ethr_event;
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+#pragma intrinsic(_InterlockedExchange)
+
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- /* InterlockedExchange() imply a full memory barrier which is important */
- LONG state = InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ /* _InterlockedExchange() imply a full memory barrier which is important */
+ long state = _InterlockedExchange(&e->state, ETHR_EVENT_ON__);
if (state == ETHR_EVENT_OFF_WAITER__) {
if (!SetEvent(e->handle))
ETHR_FATAL_ERROR__(ethr_win_get_errno__());
@@ -46,7 +48,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- /* InterlockedExchange() imply a full memory barrier which is important */
+ /* _InterlockedExchange() imply a full memory barrier which is important */
InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
}
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
index b52710f6a3..c01b17cf14 100644
--- a/erts/include/internal/win/ethread.h
+++ b/erts/include/internal/win/ethread.h
@@ -25,7 +25,11 @@
#ifndef ETHREAD_WIN_H__
#define ETHREAD_WIN_H__
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "ethr_atomic.h"
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
#endif
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index 0d3181cace..757b3b24e2 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -283,6 +283,7 @@ endif
ETHR_THR_LIB_BASE_DIR=@ETHR_THR_LIB_BASE_DIR@
ifneq ($(strip $(ETHR_LIB_NAME)),)
ETHREAD_LIB_SRC=common/ethr_aux.c \
+ common/ethr_atomics.c \
common/ethr_mutex.c \
common/ethr_cbf.c \
$(ETHR_THR_LIB_BASE_DIR)/ethread.c \
@@ -381,6 +382,11 @@ $(ERTS_LIB): $(ERTS_LIB_OBJS)
# Object files
#
+ifeq ($(TYPE)-@GCC@,debug-yes)
+$(r_OBJ_DIR)/ethr_aux.o: common/ethr_aux.c
+ $(CC) $(THR_DEFS) $(CFLAGS) -Wno-unused-function $(INCLUDES) -c $< -o $@
+endif
+
$(r_OBJ_DIR)/%.o: common/%.c
$(CC) $(THR_DEFS) $(CFLAGS) $(INCLUDES) -c $< -o $@
@@ -445,6 +451,7 @@ INTERNAL_RELEASE_INCLUDES= \
$(ERTS_INCL_INT)/ethread.h \
$(ERTS_INCL_INT)/ethr_mutex.h \
$(ERTS_INCL_INT)/ethr_optimized_fallbacks.h \
+ $(ERTS_INCL_INT)/ethr_atomics.h \
$(ERTS_INCL_INT)/$(TARGET)/ethread.mk \
$(ERTS_INCL_INT)/$(TARGET)/erts_internal.mk \
$(ERTS_INCL_INT)/$(TARGET)/ethread_header_config.h \
diff --git a/erts/lib_src/common/ethr_atomics.c b/erts/lib_src/common/ethr_atomics.c
new file mode 100644
index 0000000000..94557d904a
--- /dev/null
+++ b/erts/lib_src/common/ethr_atomics.c
@@ -0,0 +1,402 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: The ethread atomic API
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define ETHR_INLINE_FUNC_NAME_(X) X ## __
+#define ETHR_ATOMIC_IMPL__
+
+#include "ethread.h"
+#include "ethr_internal.h"
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+#endif
+
+int
+ethr_init_atomics(void)
+{
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+ {
+ int i;
+ for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
+ int res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
+ if (res != 0)
+ return res;
+ }
+ }
+#endif
+ return 0;
+}
+
+/*
+ * --- Pointer size atomics ---------------------------------------------------
+ */
+
+ethr_sint_t *
+ethr_atomic_addr(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(var);
+ return ethr_atomic_addr__(var);
+}
+
+void
+ethr_atomic_init(ethr_atomic_t *var, ethr_sint_t i)
+{
+ ETHR_ASSERT(var);
+ ethr_atomic_init__(var, i);
+}
+
+void
+ethr_atomic_set(ethr_atomic_t *var, ethr_sint_t i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set__(var, i);
+}
+
+ethr_sint_t
+ethr_atomic_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read__(var);
+}
+
+ethr_sint_t
+ethr_atomic_add_read(ethr_atomic_t *var, ethr_sint_t incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_add_read__(var, incr);
+}
+
+ethr_sint_t
+ethr_atomic_inc_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read__(var);
+}
+
+ethr_sint_t
+ethr_atomic_dec_read(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read__(var);
+}
+
+void
+ethr_atomic_add(ethr_atomic_t *var, ethr_sint_t incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_add__(var, incr);
+}
+
+void
+ethr_atomic_inc(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_inc__(var);
+}
+
+void
+ethr_atomic_dec(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec__(var);
+}
+
+ethr_sint_t
+ethr_atomic_read_band(ethr_atomic_t *var, ethr_sint_t mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_band__(var, mask);
+}
+
+ethr_sint_t
+ethr_atomic_read_bor(ethr_atomic_t *var, ethr_sint_t mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_bor__(var, mask);
+}
+
+ethr_sint_t
+ethr_atomic_xchg(ethr_atomic_t *var, ethr_sint_t new)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_xchg__(var, new);
+}
+
+ethr_sint_t
+ethr_atomic_cmpxchg(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t expected)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg__(var, new, expected);
+}
+
+ethr_sint_t
+ethr_atomic_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_read_acqb__(var);
+}
+
+ethr_sint_t
+ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_inc_read_acqb__(var);
+}
+
+void
+ethr_atomic_set_relb(ethr_atomic_t *var, ethr_sint_t i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_set_relb__(var, i);
+}
+
+void
+ethr_atomic_dec_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic_dec_relb__(var);
+}
+
+ethr_sint_t
+ethr_atomic_dec_read_relb(ethr_atomic_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_dec_read_relb__(var);
+}
+
+ethr_sint_t
+ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_acqb__(var, new, exp);
+}
+
+ethr_sint_t
+ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic_cmpxchg_relb__(var, new, exp);
+}
+
+
+/*
+ * --- 32-bit atomics ---------------------------------------------------------
+ */
+
+ethr_sint32_t *
+ethr_atomic32_addr(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(var);
+ return ethr_atomic32_addr__(var);
+}
+
+void
+ethr_atomic32_init(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+ ETHR_ASSERT(var);
+ ethr_atomic32_init__(var, i);
+}
+
+void
+ethr_atomic32_set(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_set__(var, i);
+}
+
+ethr_sint32_t
+ethr_atomic32_read(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_read__(var);
+}
+
+
+ethr_sint32_t
+ethr_atomic32_add_read(ethr_atomic32_t *var, ethr_sint32_t incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_add_read__(var, incr);
+}
+
+ethr_sint32_t
+ethr_atomic32_inc_read(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_inc_read__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_dec_read(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_dec_read__(var);
+}
+
+void
+ethr_atomic32_add(ethr_atomic32_t *var, ethr_sint32_t incr)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_add__(var, incr);
+}
+
+void
+ethr_atomic32_inc(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_inc__(var);
+}
+
+void
+ethr_atomic32_dec(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_dec__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_read_band(ethr_atomic32_t *var, ethr_sint32_t mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_read_band__(var, mask);
+}
+
+ethr_sint32_t
+ethr_atomic32_read_bor(ethr_atomic32_t *var, ethr_sint32_t mask)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_read_bor__(var, mask);
+}
+
+ethr_sint32_t
+ethr_atomic32_xchg(ethr_atomic32_t *var, ethr_sint32_t new)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_xchg__(var, new);
+}
+
+ethr_sint32_t
+ethr_atomic32_cmpxchg(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_cmpxchg__(var, new, expected);
+}
+
+ethr_sint32_t
+ethr_atomic32_read_acqb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_read_acqb__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_inc_read_acqb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_inc_read_acqb__(var);
+}
+
+void
+ethr_atomic32_set_relb(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_set_relb__(var, i);
+}
+
+void
+ethr_atomic32_dec_relb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ ethr_atomic32_dec_relb__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_dec_read_relb(ethr_atomic32_t *var)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_dec_read_relb__(var);
+}
+
+ethr_sint32_t
+ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_cmpxchg_acqb__(var, new, exp);
+}
+
+ethr_sint32_t
+ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+ ETHR_ASSERT(!ethr_not_inited__);
+ ETHR_ASSERT(var);
+ return ethr_atomic32_cmpxchg_relb__(var, new, exp);
+}
+
diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c
index 4db4cffd3a..2c3e25a805 100644
--- a/erts/lib_src/common/ethr_aux.c
+++ b/erts/lib_src/common/ethr_aux.c
@@ -31,7 +31,10 @@
#define ETHR_INLINE_FUNC_NAME_(X) X ## __
#define ETHR_AUX_IMPL__
-
+#define ETHR_ATOMIC_IMPL__ /* Needed in order to pull in
+ native atomic implementations
+ for optimized fallbacks of
+ spinlocks and rwspinlocks */
#include "ethread.h"
#include "ethr_internal.h"
#include <string.h>
@@ -51,10 +54,6 @@ int ethr_not_inited__ = 1;
ethr_memory_allocators ethr_mem__ = ETHR_MEM_ALLOCS_DEF_INITER__;
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-#endif
-
void *(*ethr_thr_prepare_func__)(void) = NULL;
void (*ethr_thr_parent_func__)(void *) = NULL;
void (*ethr_thr_child_func__)(void *) = NULL;
@@ -138,16 +137,9 @@ ethr_init_common__(ethr_init_data *id)
#endif
ethr_max_stack_size__ = ETHR_B2KW(ethr_max_stack_size__);
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
- {
- int i;
- for (i = 0; i < (1 << ETHR_ATOMIC_ADDR_BITS); i++) {
- res = ethr_spinlock_init(&ethr_atomic_protection__[i].u.lck);
- if (res != 0)
- return res;
- }
- }
-#endif
+ res = ethr_init_atomics();
+ if (res != 0)
+ return res;
res = ethr_mutex_lib_init(erts_get_cpu_configured(ethr_cpu_info__));
if (res != 0)
@@ -279,14 +271,6 @@ typedef union {
static ethr_spinlock_t ts_ev_alloc_lock;
static ethr_ts_event *free_ts_ev;
-#if SIZEOF_VOID_P == SIZEOF_INT
-typedef unsigned int EthrPtrSzUInt;
-#elif SIZEOF_VOID_P == SIZEOF_LONG
-typedef unsigned long EthrPtrSzUInt;
-#else
-#error No pointer sized integer type
-#endif
-
static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
{
int i;
@@ -295,16 +279,16 @@ static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
+ ETHR_CACHE_LINE_SIZE);
if (!atsev)
return NULL;
- if ((((EthrPtrSzUInt) atsev) & ETHR_CACHE_LINE_MASK) == 0)
+ if ((((ethr_uint_t) atsev) & ETHR_CACHE_LINE_MASK) == 0)
atsev = ((ethr_aligned_ts_event *)
- ((((EthrPtrSzUInt) atsev) & ~ETHR_CACHE_LINE_MASK)
+ ((((ethr_uint_t) atsev) & ~ETHR_CACHE_LINE_MASK)
+ ETHR_CACHE_LINE_SIZE));
for (i = 1; i < size; i++) {
atsev[i-1].ts_ev.next = &atsev[i].ts_ev;
- ethr_atomic_init(&atsev[i-1].ts_ev.uaflgs, 0);
+ ethr_atomic32_init(&atsev[i-1].ts_ev.uaflgs, 0);
atsev[i-1].ts_ev.iflgs = 0;
}
- ethr_atomic_init(&atsev[size-1].ts_ev.uaflgs, 0);
+ ethr_atomic32_init(&atsev[size-1].ts_ev.uaflgs, 0);
atsev[size-1].ts_ev.iflgs = 0;
atsev[size-1].ts_ev.next = NULL;
if (endpp)
@@ -466,170 +450,6 @@ int ethr_get_main_thr_status(int *on)
return 0;
}
-
-/* Atomics */
-
-void
-ethr_atomic_init(ethr_atomic_t *var, long i)
-{
- ETHR_ASSERT(var);
- ethr_atomic_init__(var, i);
-}
-
-void
-ethr_atomic_set(ethr_atomic_t *var, long i)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_set__(var, i);
-}
-
-long
-ethr_atomic_read(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_read__(var);
-}
-
-
-long
-ethr_atomic_add_read(ethr_atomic_t *var, long incr)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_add_read__(var, incr);
-}
-
-long
-ethr_atomic_inc_read(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_inc_read__(var);
-}
-
-long
-ethr_atomic_dec_read(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_dec_read__(var);
-}
-
-void
-ethr_atomic_add(ethr_atomic_t *var, long incr)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_add__(var, incr);
-}
-
-void
-ethr_atomic_inc(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_inc__(var);
-}
-
-void
-ethr_atomic_dec(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_dec__(var);
-}
-
-long
-ethr_atomic_read_band(ethr_atomic_t *var, long mask)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_read_band__(var, mask);
-}
-
-long
-ethr_atomic_read_bor(ethr_atomic_t *var, long mask)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_read_bor__(var, mask);
-}
-
-long
-ethr_atomic_xchg(ethr_atomic_t *var, long new)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_xchg__(var, new);
-}
-
-long
-ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg__(var, new, expected);
-}
-
-long
-ethr_atomic_read_acqb(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_read_acqb__(var);
-}
-
-long
-ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_inc_read_acqb__(var);
-}
-
-void
-ethr_atomic_set_relb(ethr_atomic_t *var, long i)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_set_relb__(var, i);
-}
-
-void
-ethr_atomic_dec_relb(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- ethr_atomic_dec_relb__(var);
-}
-
-long
-ethr_atomic_dec_read_relb(ethr_atomic_t *var)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_dec_read_relb__(var);
-}
-
-long
-ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, long new, long exp)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg_acqb__(var, new, exp);
-}
-
-long
-ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, long new, long exp)
-{
- ETHR_ASSERT(!ethr_not_inited__);
- ETHR_ASSERT(var);
- return ethr_atomic_cmpxchg_relb__(var, new, exp);
-}
-
-
/* Spinlocks and rwspinlocks */
int
diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c
index a2fbf3a454..2ddef32dfc 100644
--- a/erts/lib_src/common/ethr_mutex.c
+++ b/erts/lib_src/common/ethr_mutex.c
@@ -206,16 +206,16 @@ static void hard_debug_chk_q__(struct ethr_mutex_base_ *, int);
#ifdef ETHR_USE_OWN_RWMTX_IMPL__
static void
rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint32_t initial,
int q_locked);
static void
rwmutex_unlock_wake(ethr_rwmutex *rwmtx,
int have_w,
- long initial,
+ ethr_sint32_t initial,
int transfer_read_lock);
static int
rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint32_t initial,
ethr_ts_event *tse,
int start_next_ix,
int check_before_try,
@@ -242,12 +242,12 @@ rwmutex_freqread_rdrs_add(ethr_rwmutex *rwmtx,
int inc)
{
if (type == ETHR_RWMUTEX_TYPE_FREQUENT_READ || ix == 0)
- ethr_atomic_add(&rwmtx->tdata.ra[ix].data.readers, inc);
+ ethr_atomic32_add(&rwmtx->tdata.ra[ix].data.readers, inc);
else {
ETHR_ASSERT(type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
ETHR_ASSERT(inc == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ ethr_atomic32_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint32_t) 1);
}
}
@@ -258,15 +258,15 @@ rwmutex_freqread_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
ix = tse->rgix;
atomic_inc:
- ethr_atomic_inc(&rwmtx->tdata.ra[ix].data.readers);
+ ethr_atomic32_inc(&rwmtx->tdata.ra[ix].data.readers);
}
else {
ix = tse->mtix;
if (ix == 0)
goto atomic_inc;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
+ ethr_atomic32_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint32_t) 1);
}
}
@@ -279,64 +279,65 @@ rwmutex_freqread_rdrs_dec(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
ix = tse->rgix;
atomic_dec:
- ethr_atomic_dec(&rwmtx->tdata.ra[ix].data.readers);
+ ethr_atomic32_dec(&rwmtx->tdata.ra[ix].data.readers);
}
else {
ix = tse->mtix;
if (ix == 0)
goto atomic_dec;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic32_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint32_t) 0);
}
}
#endif
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint32_t
rwmutex_freqread_rdrs_dec_read(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
{
int ix;
if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
ix = tse->rgix;
atomic_dec_read:
- return ethr_atomic_dec_read(&rwmtx->tdata.ra[ix].data.readers);
+ return ethr_atomic32_dec_read(&rwmtx->tdata.ra[ix].data.readers);
}
else {
ix = tse->mtix;
if (ix == 0)
goto atomic_dec_read;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
- return (long) 0;
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic32_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint32_t) 0);
+ return (ethr_sint32_t) 0;
}
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint32_t
rwmutex_freqread_rdrs_dec_read_relb(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
{
int ix;
if (rwmtx->type == ETHR_RWMUTEX_TYPE_FREQUENT_READ) {
ix = tse->rgix;
atomic_dec_read:
- return ethr_atomic_dec_read_relb(&rwmtx->tdata.ra[ix].data.readers);
+ return ethr_atomic32_dec_read_relb(&rwmtx->tdata.ra[ix].data.readers);
}
else {
ix = tse->mtix;
if (ix == 0)
goto atomic_dec_read;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
- ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set_relb(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
- return (long) 0;
+ ETHR_ASSERT(ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
+ ethr_atomic32_set_relb(&rwmtx->tdata.ra[ix].data.readers,
+ (ethr_sint32_t) 0);
+ return (ethr_sint32_t) 0;
}
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint32_t
rwmutex_freqread_rdrs_read(ethr_rwmutex *rwmtx, int ix)
{
- long res = ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers);
+ ethr_sint32_t res = ethr_atomic32_read(&rwmtx->tdata.ra[ix].data.readers);
#ifdef ETHR_DEBUG
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
@@ -402,19 +403,19 @@ static void
event_wait(struct ethr_mutex_base_ *mtxb,
ethr_ts_event *tse,
int spincount,
- long type,
+ ethr_sint32_t type,
int is_rwmtx,
int is_freq_read)
{
int locked = 0;
- long act;
+ ethr_sint32_t act;
int need_try_complete_runlock = 0;
int transfer_read_lock = 0;
/* Need to enqueue and wait... */
tse->uflgs = type;
- ethr_atomic_set(&tse->uaflgs, type);
+ ethr_atomic32_set(&tse->uaflgs, type);
ETHR_MTX_Q_LOCK(&mtxb->qlck);
locked = 1;
@@ -423,7 +424,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
hard_debug_chk_q__(mtxb, is_rwmtx);
#endif
- act = ethr_atomic_read(&mtxb->flgs);
+ act = ethr_atomic32_read(&mtxb->flgs);
if (act & type) {
@@ -453,7 +454,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
/* Set wait bit */
while (1) {
- long new, exp = act;
+ ethr_sint32_t new, exp = act;
need_try_complete_runlock = 0;
transfer_read_lock = 0;
@@ -484,7 +485,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
}
}
- act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs, new, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&mtxb->flgs, new, exp);
if (exp == act) {
if (new & type) {
act = new;
@@ -559,7 +560,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
while (1) {
ethr_event_reset(&tse->event);
- act = ethr_atomic_read_acqb(&tse->uaflgs);
+ act = ethr_atomic32_read_acqb(&tse->uaflgs);
if (!act)
goto done; /* Got it */
@@ -567,7 +568,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
ethr_event_swait(&tse->event, spincount);
/* swait result: 0 || EINTR */
- act = ethr_atomic_read_acqb(&tse->uaflgs);
+ act = ethr_atomic32_read_acqb(&tse->uaflgs);
if (!act)
goto done; /* Got it */
}
@@ -587,7 +588,7 @@ wake_writer(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
dequeue(&mtxb->q, tse, tse);
ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
- ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_RWMTX_W_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_RWMTX_W_WAIT_FLG__);
#ifdef ETHR_MTX_HARD_DEBUG_WSQ
mtxb->ws--;
#endif
@@ -597,7 +598,7 @@ wake_writer(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
- ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_atomic32_set(&tse->uaflgs, 0);
ethr_event_set(&tse->event);
}
@@ -649,11 +650,11 @@ int check_readers_array(ethr_rwmutex *rwmtx,
static ETHR_INLINE void
write_lock_wait(struct ethr_mutex_base_ *mtxb,
- long initial,
+ ethr_sint32_t initial,
int is_rwmtx,
int is_freq_read)
{
- long act = initial;
+ ethr_sint32_t act = initial;
int scnt, start_scnt;
ethr_ts_event *tse = NULL;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -706,13 +707,13 @@ write_lock_wait(struct ethr_mutex_base_ *mtxb,
until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
ETHR_YIELD();
}
- act = ethr_atomic_read(&mtxb->flgs);
+ act = ethr_atomic32_read(&mtxb->flgs);
scnt--;
}
- act = ethr_atomic_cmpxchg_acqb(&mtxb->flgs,
- ETHR_RWMTX_W_FLG__,
- 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtxb->flgs,
+ ETHR_RWMTX_W_FLG__,
+ 0);
if (act == 0)
goto done; /* Got it */
}
@@ -756,16 +757,16 @@ mtxb_init(struct ethr_mutex_base_ *mtxb,
}
mtxb->q = NULL;
- ethr_atomic_init(&mtxb->flgs, 0);
+ ethr_atomic32_init(&mtxb->flgs, 0);
return ETHR_MTX_QLOCK_INIT(&mtxb->qlck);
}
static int
mtxb_destroy(struct ethr_mutex_base_ *mtxb)
{
- long act;
+ ethr_sint32_t act;
ETHR_MTX_Q_LOCK(&mtxb->qlck);
- act = ethr_atomic_read(&mtxb->flgs);
+ act = ethr_atomic32_read(&mtxb->flgs);
ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
if (act != 0)
return EINVAL;
@@ -831,13 +832,13 @@ ethr_mutex_destroy(ethr_mutex *mtx)
}
void
-ethr_mutex_lock_wait__(ethr_mutex *mtx, long initial)
+ethr_mutex_lock_wait__(ethr_mutex *mtx, ethr_sint32_t initial)
{
write_lock_wait(&mtx->mtxb, initial, 0, 0);
}
void
-ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
+ethr_mutex_unlock_wake__(ethr_mutex *mtx, ethr_sint32_t initial)
{
ethr_ts_event *tse;
@@ -845,7 +846,7 @@ ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
tse = mtx->mtxb.q;
ETHR_ASSERT(tse);
- ETHR_ASSERT(ethr_atomic_read(&mtx->mtxb.flgs)
+ ETHR_ASSERT(ethr_atomic32_read(&mtx->mtxb.flgs)
== (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
ETHR_ASSERT(initial & ETHR_RWMTX_W_WAIT_FLG__);
ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
@@ -855,7 +856,7 @@ ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
* mtxb->flgs; otherwise, we need to clear the write wait bit...
*/
if (tse->next == mtx->mtxb.q)
- ethr_atomic_set(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__);
+ ethr_atomic32_set(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__);
wake_writer(&mtx->mtxb, 0);
}
@@ -865,7 +866,7 @@ ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
static void
enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
{
- long act;
+ ethr_sint32_t act;
/*
* `ethr_cond_signal()' and `ethr_cond_broadcast()' end up here. If `mtx'
@@ -894,7 +895,7 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
}
#endif
- act = ethr_atomic_read(&mtx->mtxb.flgs);
+ act = ethr_atomic32_read(&mtx->mtxb.flgs);
ETHR_ASSERT(act == 0
|| act == ETHR_RWMTX_W_FLG__
|| act == (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__));
@@ -902,10 +903,10 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
/* The normal sane case */
if (!(act & ETHR_RWMTX_W_WAIT_FLG__)) {
ETHR_ASSERT(!mtx->mtxb.q);
- act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs,
- (ETHR_RWMTX_W_FLG__
- | ETHR_RWMTX_W_WAIT_FLG__),
- ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg(&mtx->mtxb.flgs,
+ (ETHR_RWMTX_W_FLG__
+ | ETHR_RWMTX_W_WAIT_FLG__),
+ ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__) {
/*
* Sigh... this wasn't so sane after all since, the mutex was
@@ -937,14 +938,14 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
multi = tse_start != tse_end;
while (1) {
- long new, exp = act;
+ ethr_sint32_t new, exp = act;
if (multi || (act & ETHR_RWMTX_W_FLG__))
new = ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__;
else
new = ETHR_RWMTX_W_FLG__;
- act = ethr_atomic_cmpxchg(&mtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&mtx->mtxb.flgs, new, exp);
if (exp == act) {
ETHR_ASSERT(!mtx->mtxb.q);
if (act & ETHR_RWMTX_W_FLG__) {
@@ -972,7 +973,7 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
ETHR_MTX_HARD_DEBUG_CHK_Q(mtx);
ETHR_MTX_Q_UNLOCK(&mtx->mtxb.qlck);
- ethr_atomic_set(&tse_start->uaflgs, 0);
+ ethr_atomic32_set(&tse_start->uaflgs, 0);
ethr_event_set(&tse_start->event);
}
break;
@@ -1063,9 +1064,9 @@ ethr_cond_signal(ethr_cond *cnd)
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
- ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs) == ETHR_CND_WAIT_FLG__);
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_CND_WAIT_FLG__);
- ethr_atomic_set(&tse->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+ ethr_atomic32_set(&tse->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
dequeue(&cnd->q, tse, tse);
@@ -1116,10 +1117,11 @@ ethr_cond_broadcast(ethr_cond *cnd)
/* The normal case */
ETHR_ASSERT(tse_tmp->uflgs == ETHR_RWMTX_W_WAIT_FLG__);
- ETHR_ASSERT(ethr_atomic_read(&tse_tmp->uaflgs)
+ ETHR_ASSERT(ethr_atomic32_read(&tse_tmp->uaflgs)
== ETHR_CND_WAIT_FLG__);
- ethr_atomic_set(&tse_tmp->uaflgs, ETHR_RWMTX_W_WAIT_FLG__);
+ ethr_atomic32_set(&tse_tmp->uaflgs,
+ ETHR_RWMTX_W_WAIT_FLG__);
}
else {
/* Should be very unusual */
@@ -1172,7 +1174,7 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
tse->udata = (void *) mtx;
tse->uflgs = ETHR_RWMTX_W_WAIT_FLG__; /* Prep for mutex lock op */
- ethr_atomic_set(&tse->uaflgs, ETHR_CND_WAIT_FLG__);
+ ethr_atomic32_set(&tse->uaflgs, ETHR_CND_WAIT_FLG__);
ETHR_MTX_Q_LOCK(&cnd->qlck);
@@ -1185,11 +1187,11 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
/* Wait */
woken = 0;
while (1) {
- long act;
+ ethr_sint32_t act;
ethr_event_reset(&tse->event);
- act = ethr_atomic_read_acqb(&tse->uaflgs);
+ act = ethr_atomic32_read_acqb(&tse->uaflgs);
if (!act)
break; /* Mtx locked */
@@ -1205,7 +1207,7 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
*/
if (act == ETHR_CND_WAIT_FLG__) {
ETHR_MTX_Q_LOCK(&cnd->qlck);
- act = ethr_atomic_read(&tse->uaflgs);
+ act = ethr_atomic32_read(&tse->uaflgs);
ETHR_ASSERT(act == ETHR_CND_WAIT_FLG__
|| act == ETHR_RWMTX_W_WAIT_FLG__);
/*
@@ -1407,7 +1409,7 @@ wake_readers(ethr_rwmutex *rwmtx, int rs)
rwmtx->rq_end = NULL;
ETHR_ASSERT(!rwmtx->mtxb.q
- || (ethr_atomic_read(&rwmtx->mtxb.q->uaflgs)
+ || (ethr_atomic32_read(&rwmtx->mtxb.q->uaflgs)
== ETHR_RWMTX_W_WAIT_FLG__));
ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
@@ -1418,7 +1420,7 @@ wake_readers(ethr_rwmutex *rwmtx, int rs)
#ifdef ETHR_DEBUG
ETHR_ASSERT(tse->uflgs == ETHR_RWMTX_R_WAIT_FLG__);
- ETHR_ASSERT(ethr_atomic_read(&tse->uaflgs)
+ ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs)
== ETHR_RWMTX_R_WAIT_FLG__);
drs++;
#endif
@@ -1426,7 +1428,7 @@ wake_readers(ethr_rwmutex *rwmtx, int rs)
tse_next = tse->next; /* we aren't allowed to read tse->next
after we have reset uaflgs */
- ethr_atomic_set(&tse->uaflgs, 0);
+ ethr_atomic32_set(&tse->uaflgs, 0);
ethr_event_set(&tse->event);
tse = tse_next;
}
@@ -1469,7 +1471,7 @@ int check_readers_array(ethr_rwmutex *rwmtx,
ETHR_MEMORY_BARRIER;
do {
- long act = rwmutex_freqread_rdrs_read(rwmtx, ix);
+ ethr_sint32_t act = rwmutex_freqread_rdrs_read(rwmtx, ix);
if (act != 0)
return EBUSY;
ix++;
@@ -1483,9 +1485,9 @@ int check_readers_array(ethr_rwmutex *rwmtx,
static void
rwmutex_freqread_rdrs_dec_chk_wakeup(ethr_rwmutex *rwmtx,
ethr_ts_event *tse,
- long initial)
+ ethr_sint32_t initial)
{
- long act = initial;
+ ethr_sint32_t act = initial;
if ((act & (ETHR_RWMTX_W_FLG__|
ETHR_RWMTX_R_ABRT_UNLCK_FLG__)) == 0) {
@@ -1515,7 +1517,7 @@ rwmutex_freqread_rdrs_dec_chk_wakeup(ethr_rwmutex *rwmtx,
if (!rwmtx->mtxb.q)
ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
else if (is_w_waiter(rwmtx->mtxb.q)) {
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
if ((act & ETHR_RWMTX_W_FLG__) == 0)
rwmutex_try_complete_runlock(rwmtx, act, tse, 1, 0, 0);
@@ -1525,7 +1527,7 @@ rwmutex_freqread_rdrs_dec_chk_wakeup(ethr_rwmutex *rwmtx,
* rwmutex_transfer_read_lock() will
* unlock Q lock.
*/
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act & ETHR_RWMTX_W_FLG__)
ETHR_MTX_Q_UNLOCK(&rwmtx->mtxb.qlck);
else
@@ -1539,7 +1541,7 @@ static void
rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
ethr_ts_event *tse)
{
- long act;
+ ethr_sint32_t act;
/*
* Restore failed increment
*/
@@ -1548,21 +1550,21 @@ rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
ETHR_MEMORY_BARRIER;
if (act == 0) {
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
rwmutex_freqread_rdrs_dec_chk_wakeup(rwmtx, tse, act);
}
}
static int
rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint32_t initial,
ethr_ts_event *tse,
int start_next_ix,
int check_before_try,
int try_write_lock)
{
ethr_ts_event *tse_tmp;
- long act = initial;
+ ethr_sint32_t act = initial;
int six, res, length;
ETHR_ASSERT((act & ETHR_RWMTX_W_FLG__) == 0);
@@ -1606,15 +1608,15 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
restart:
while (1) {
- long exp = act;
- long new = act+1;
+ ethr_sint32_t exp = act;
+ ethr_sint32_t new = act+1;
ETHR_ASSERT((act & ETHR_RWMTX_R_ABRT_UNLCK_FLG__) == 0);
ETHR_ASSERT((act & ETHR_RWMTX_R_PEND_UNLCK_MASK__)
< ETHR_RWMTX_R_PEND_UNLCK_MASK__);
- act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (exp == act) {
act = new;
break;
@@ -1651,8 +1653,8 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
while (1) {
int finished_abort = 0;
- long exp = act;
- long new = act;
+ ethr_sint32_t exp = act;
+ ethr_sint32_t new = act;
new--;
if (act & ETHR_RWMTX_R_ABRT_UNLCK_FLG__) {
@@ -1668,7 +1670,7 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
ETHR_ASSERT(act & ETHR_RWMTX_R_PEND_UNLCK_MASK__);
- act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (exp == act) {
act = new;
if (act & ETHR_RWMTX_W_FLG__)
@@ -1702,9 +1704,9 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
tryrwlock:
/* Try to write lock it */
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__,
- 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__,
+ 0);
return act == 0 ? 0 : EBUSY;
}
@@ -1713,11 +1715,11 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
static ETHR_INLINE void
rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
/*
* Restore failed increment
*/
- act = ethr_atomic_dec_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_dec_read(&rwmtx->mtxb.flgs);
if ((act & ETHR_RWMTX_WAIT_FLGS__)
&& (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
rwmutex_unlock_wake(rwmtx, 0, act, 0);
@@ -1727,10 +1729,9 @@ rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
#endif
static void
-rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
- long initial)
+rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx, ethr_sint32_t initial)
{
- long act = initial, exp;
+ ethr_sint32_t act = initial, exp;
int scnt, start_scnt;
ethr_ts_event *tse = NULL;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -1746,7 +1747,7 @@ rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
#ifdef ETHR_RLOCK_WITH_INC_DEC
rwmutex_incdec_restore_failed_tryrlock(rwmtx);
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
#endif
while (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
@@ -1763,17 +1764,17 @@ rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
ETHR_YIELD();
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
scnt--;
}
exp = act;
#ifdef ETHR_RLOCK_WITH_INC_DEC
- act = ethr_atomic_inc_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_inc_read(&rwmtx->mtxb.flgs);
if ((act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) == 0)
goto done; /* Got it */
#else
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
if (act == exp)
goto done; /* Got it */
#endif
@@ -1792,19 +1793,19 @@ static int
rwmutex_freqread_rlock(ethr_rwmutex *rwmtx, ethr_ts_event *tse, int trylock)
{
int res = 0;
- long act;
+ ethr_sint32_t act;
rwmutex_freqread_rdrs_inc(rwmtx, tse);
ETHR_MEMORY_BARRIER;
- act = ethr_atomic_read_acqb(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read_acqb(&rwmtx->mtxb.flgs);
if (act != ETHR_RWMTX_R_FLG__) {
int wake_other_readers;
while (1) {
- long exp, new;
+ ethr_sint32_t exp, new;
wake_other_readers = 0;
@@ -1846,7 +1847,7 @@ rwmutex_freqread_rlock(ethr_rwmutex *rwmtx, ethr_ts_event *tse, int trylock)
}
exp = act;
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs, new, exp);
if (act == exp)
break;
}
@@ -1862,7 +1863,7 @@ static void
rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
ethr_ts_event *tse)
{
- long act;
+ ethr_sint32_t act;
int scnt, start_scnt;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -1875,7 +1876,7 @@ rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
while (1) {
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
while (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
if (scnt <= 0) {
@@ -1890,7 +1891,7 @@ rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
ETHR_YIELD();
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
scnt--;
}
@@ -1900,21 +1901,23 @@ rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
}
static void
-rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, ethr_sint32_t initial)
{
write_lock_wait(&rwmtx->mtxb, initial, 1, 0);
}
static void
-rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, ethr_sint32_t initial)
{
write_lock_wait(&rwmtx->mtxb, initial, 1, 1);
}
static ETHR_INLINE void
-rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, long act_initial)
+rwlock_wake_set_flags(ethr_rwmutex *rwmtx,
+ ethr_sint32_t new_initial,
+ ethr_sint32_t act_initial)
{
- long act, act_mask;
+ ethr_sint32_t act, act_mask;
int chk_abrt_flg;
ETHR_MEMORY_BARRIER;
@@ -1935,18 +1938,18 @@ rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, long act_initial)
#else
/* rs mask always zero */
ETHR_ASSERT((act_initial & ETHR_RWMTX_RS_MASK__) == 0);
- ethr_atomic_set(&rwmtx->mtxb.flgs, new_initial);
+ ethr_atomic32_set(&rwmtx->mtxb.flgs, new_initial);
return;
#endif
}
act = act_initial;
while (1) {
- long exp = act;
- long new = new_initial + (act & act_mask);
+ ethr_sint32_t exp = act;
+ ethr_sint32_t new = new_initial + (act & act_mask);
if (chk_abrt_flg && (act & act_mask))
new |= ETHR_RWMTX_R_ABRT_UNLCK_FLG__;
- act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (act == exp)
break;
exp = act;
@@ -1960,7 +1963,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
int have_w,
ethr_ts_event *tse)
{
- long exp, act, imask;
+ ethr_sint32_t exp, act, imask;
exp = have_w ? ETHR_RWMTX_W_FLG__ : 0;
@@ -1982,7 +1985,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
if (rwmtx->rq_end) {
exp |= ETHR_RWMTX_R_WAIT_FLG__;
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
ETHR_ASSERT((exp & ~imask) == (act & ~imask));
ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
@@ -2001,7 +2004,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
imask |= ETHR_RWMTX_RS_MASK__;
}
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
ETHR_ASSERT((exp & ~imask) == (act & ~imask));
ETHR_RWMTX_HARD_DEBUG_CHK_Q(rwmtx);
@@ -2012,9 +2015,11 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
#endif
static void
-rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, long initial, int q_locked)
+rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx,
+ ethr_sint32_t initial,
+ int q_locked)
{
- long act = initial;
+ ethr_sint32_t act = initial;
if (!q_locked) {
ethr_ts_event *tse;
@@ -2022,7 +2027,7 @@ rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, long initial, int q_locked)
ETHR_ASSERT((initial & ETHR_RWMTX_W_FLG__) == 0);
ETHR_MTX_Q_LOCK(&rwmtx->mtxb.qlck);
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
tse = rwmtx->mtxb.q;
if ((act & ETHR_RWMTX_W_FLG__) || !tse || is_w_waiter(tse)) {
/* Someone else woke the readers up... */
@@ -2035,10 +2040,10 @@ rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, long initial, int q_locked)
}
static void
-rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, ethr_sint32_t initial,
int transfer_read_lock)
{
- long new, act = initial;
+ ethr_sint32_t new, act = initial;
ethr_ts_event *tse;
if (transfer_read_lock) {
@@ -2060,9 +2065,9 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
return;
else {
while ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
- long exp = act;
+ ethr_sint32_t exp = act;
new = exp & ~ETHR_RWMTX_W_FLG__;
- act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
+ act = ethr_atomic32_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (act == exp)
return;
}
@@ -2075,12 +2080,12 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
if (!have_w) {
if (!tse) {
#ifdef ETHR_DEBUG
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
ETHR_ASSERT((act & ETHR_RWMTX_WAIT_FLGS__) == 0);
#endif
goto already_served;
}
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act == (ETHR_RWMTX_R_WAIT_FLG__|ETHR_RWMTX_R_FLG__)) {
ETHR_ASSERT(tse && !is_w_waiter(tse));
}
@@ -2099,7 +2104,7 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
if (is_w_waiter(tse)) {
if (!have_w) {
- act = ethr_atomic_read_bor(&rwmtx->mtxb.flgs,
+ act = ethr_atomic32_read_bor(&rwmtx->mtxb.flgs,
ETHR_RWMTX_W_FLG__);
ETHR_ASSERT((act & ~(ETHR_RWMTX_WAIT_FLGS__
| (rwmtx->type == ETHR_RWMUTEX_TYPE_NORMAL
@@ -2131,7 +2136,7 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
if (rwmtx->type == ETHR_RWMUTEX_TYPE_NORMAL) {
rs = rwmtx->tdata.rs;
- new = (long) rs;
+ new = (ethr_sint32_t) rs;
rwmtx->tdata.rs = 0;
}
else {
@@ -2187,16 +2192,16 @@ alloc_readers_array(int length, ethr_rwmutex_lived lived)
if (!mem)
return NULL;
- if ((((unsigned long) mem) & ETHR_CACHE_LINE_MASK) == 0) {
+ if ((((ethr_uint_t) mem) & ETHR_CACHE_LINE_MASK) == 0) {
ra = (ethr_rwmtx_readers_array__ *) mem;
ra->data.byte_offset = 0;
}
else {
ra = ((ethr_rwmtx_readers_array__ *)
- ((((unsigned long) mem) & ~ETHR_CACHE_LINE_MASK)
+ ((((ethr_uint_t) mem) & ~ETHR_CACHE_LINE_MASK)
+ ETHR_CACHE_LINE_SIZE));
- ra->data.byte_offset = (int) ((unsigned long) ra
- - (unsigned long) mem);
+ ra->data.byte_offset = (int) ((ethr_uint_t) ra
+ - (ethr_uint_t) mem);
}
ra->data.lived = lived;
return ra;
@@ -2270,7 +2275,7 @@ ethr_rwmutex_init_opt(ethr_rwmutex *rwmtx, ethr_rwmutex_opt *opt)
rwmtx->tdata.ra = ra;
for (ix = 0; ix < length; ix++) {
- ethr_atomic_init(&rwmtx->tdata.ra[ix].data.readers, 0);
+ ethr_atomic32_init(&rwmtx->tdata.ra[ix].data.readers, 0);
rwmtx->tdata.ra[ix].data.waiting_readers = 0;
}
break;
@@ -2324,7 +2329,7 @@ ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
#endif
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(rwmtx);
if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
- long act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ethr_sint32_t act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act == ETHR_RWMTX_R_FLG__)
rwmutex_try_complete_runlock(rwmtx, act, NULL, 0, 0, 0);
}
@@ -2345,7 +2350,7 @@ int
ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
{
int res = 0;
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2358,22 +2363,22 @@ ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL: {
#ifdef ETHR_RLOCK_WITH_INC_DEC
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
res = EBUSY;
else {
- act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_inc_read_acqb(&rwmtx->mtxb.flgs);
if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__)) {
rwmutex_incdec_restore_failed_tryrlock(rwmtx);
res = EBUSY;
}
}
#else
- long exp = 0;
+ ethr_sint32_t exp = 0;
int tries = 0;
while (1) {
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
if (act == exp) {
res = 0;
break;
@@ -2416,7 +2421,7 @@ ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2429,14 +2434,14 @@ ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL: {
#ifdef ETHR_RLOCK_WITH_INC_DEC
- act = ethr_atomic_inc_read_acqb(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_inc_read_acqb(&rwmtx->mtxb.flgs);
if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
rwmutex_normal_rlock_wait(rwmtx, act);
#else
- long exp = 0;
+ ethr_sint32_t exp = 0;
while (1) {
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
if (act == exp)
break;
@@ -2469,7 +2474,7 @@ ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(&rwmtx->mtxb);
ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(&rwmtx->mtxb);
@@ -2484,7 +2489,7 @@ ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL:
- act = ethr_atomic_dec_read_relb(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_dec_read_relb(&rwmtx->mtxb.flgs);
if ((act & ETHR_RWMTX_WAIT_FLGS__)
&& (act & ~ETHR_RWMTX_WAIT_FLGS__) == 0) {
ETHR_ASSERT((act & ETHR_RWMTX_W_FLG__) == 0);
@@ -2503,7 +2508,7 @@ ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
ETHR_MEMORY_BARRIER;
if (act == 0) {
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
if (act != ETHR_RWMTX_R_FLG__)
rwmutex_freqread_rdrs_dec_chk_wakeup(rwmtx, tse, act);
}
@@ -2521,7 +2526,7 @@ int
ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
{
int res = 0;
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2533,8 +2538,8 @@ ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL:
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
if (act != 0)
res = EBUSY;
break;
@@ -2543,13 +2548,13 @@ ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
res = 0;
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
do {
if (act == 0)
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
else if (act == ETHR_RWMTX_R_FLG__) {
res = rwmutex_try_complete_runlock(rwmtx, act, NULL,
0, 1, 1);
@@ -2582,7 +2587,7 @@ ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
@@ -2593,8 +2598,8 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL:
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
if (act != 0)
rwmutex_normal_rwlock_wait(rwmtx, act);
break;
@@ -2602,7 +2607,7 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
- act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ act = ethr_atomic32_read(&rwmtx->mtxb.flgs);
do {
@@ -2611,8 +2616,8 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
break;
}
- act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs,
- ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&rwmtx->mtxb.flgs,
+ ETHR_RWMTX_W_FLG__, 0);
} while (act != 0);
@@ -2630,7 +2635,7 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
@@ -2645,16 +2650,16 @@ ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_NORMAL:
- act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs,
- 0, ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg_relb(&rwmtx->mtxb.flgs,
+ 0, ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__)
rwmutex_unlock_wake(rwmtx, 1, act, 0);
break;
case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
case ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ:
- act = ethr_atomic_cmpxchg_relb(&rwmtx->mtxb.flgs, 0,
- ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg_relb(&rwmtx->mtxb.flgs, 0,
+ ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__)
rwmutex_unlock_wake(rwmtx, 1, act, 0);
break;
@@ -2779,7 +2784,7 @@ static void
hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
{
int res;
- long flgs = ethr_atomic_read(&mtxb->flgs);
+ ethr_sint32_t flgs = ethr_atomic32_read(&mtxb->flgs);
ETHR_MTX_HARD_ASSERT(res == 0);
@@ -2802,12 +2807,12 @@ hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
tse = mtxb->q;
do {
- long type;
+ ethr_sint32_t type;
ETHR_MTX_HARD_ASSERT(tse->next->prev == tse);
ETHR_MTX_HARD_ASSERT(tse->prev->next == tse);
- type = ethr_atomic_read(&tse->uaflgs);
+ type = ethr_atomic32_read(&tse->uaflgs);
ETHR_MTX_HARD_ASSERT(type == tse->uflgs);
switch (type) {
case ETHR_RWMTX_W_WAIT_FLG__:
diff --git a/erts/lib_src/pthread/ethr_event.c b/erts/lib_src/pthread/ethr_event.c
index 6731c0eb46..9434d60d0a 100644
--- a/erts/lib_src/pthread/ethr_event.c
+++ b/erts/lib_src/pthread/ethr_event.c
@@ -24,6 +24,10 @@
#define ETHR_INLINE_FUNC_NAME_(X) X ## __
#define ETHR_EVENT_IMPL__
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
#include "ethread.h"
#if defined(ETHR_LINUX_FUTEX_IMPL__)
@@ -37,7 +41,7 @@
int
ethr_event_init(ethr_event *e)
{
- ethr_atomic_init(&e->futex, ETHR_EVENT_OFF__);
+ ethr_atomic32_init(&e->futex, ETHR_EVENT_OFF__);
return 0;
}
@@ -52,7 +56,7 @@ wait__(ethr_event *e, int spincount)
{
unsigned sc = spincount;
int res;
- long val;
+ ethr_sint32_t val;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
if (spincount < 0)
@@ -60,7 +64,7 @@ wait__(ethr_event *e, int spincount)
while (1) {
while (1) {
- val = ethr_atomic_read(&e->futex);
+ val = ethr_atomic32_read(&e->futex);
if (val == ETHR_EVENT_ON__)
return 0;
if (sc == 0)
@@ -76,16 +80,18 @@ wait__(ethr_event *e, int spincount)
}
if (val != ETHR_EVENT_OFF_WAITER__) {
- val = ethr_atomic_cmpxchg(&e->futex,
- ETHR_EVENT_OFF_WAITER__,
- ETHR_EVENT_OFF__);
+ val = ethr_atomic32_cmpxchg(&e->futex,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
if (val == ETHR_EVENT_ON__)
return 0;
ETHR_ASSERT(val == ETHR_EVENT_OFF__);
}
- res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAIT__, ETHR_EVENT_OFF_WAITER__);
+ res = ETHR_FUTEX__(&e->futex,
+ ETHR_FUTEX_WAIT__,
+ ETHR_EVENT_OFF_WAITER__);
if (res == EINTR)
break;
if (res != 0 && res != EWOULDBLOCK)
@@ -102,7 +108,7 @@ int
ethr_event_init(ethr_event *e)
{
int res;
- ethr_atomic_init(&e->state, ETHR_EVENT_OFF__);
+ ethr_atomic32_init(&e->state, ETHR_EVENT_OFF__);
res = pthread_mutex_init(&e->mtx, NULL);
if (res != 0)
return res;
@@ -131,7 +137,7 @@ static ETHR_INLINE int
wait__(ethr_event *e, int spincount)
{
int sc = spincount;
- long val;
+ ethr_sint32_t val;
int res, ulres;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -139,7 +145,7 @@ wait__(ethr_event *e, int spincount)
ETHR_FATAL_ERROR__(EINVAL);
while (1) {
- val = ethr_atomic_read(&e->state);
+ val = ethr_atomic32_read(&e->state);
if (val == ETHR_EVENT_ON__)
return 0;
if (sc == 0)
@@ -155,9 +161,9 @@ wait__(ethr_event *e, int spincount)
}
if (val != ETHR_EVENT_OFF_WAITER__) {
- val = ethr_atomic_cmpxchg(&e->state,
- ETHR_EVENT_OFF_WAITER__,
- ETHR_EVENT_OFF__);
+ val = ethr_atomic32_cmpxchg(&e->state,
+ ETHR_EVENT_OFF_WAITER__,
+ ETHR_EVENT_OFF__);
if (val == ETHR_EVENT_ON__)
return 0;
ETHR_ASSERT(val == ETHR_EVENT_OFF__);
@@ -172,7 +178,7 @@ wait__(ethr_event *e, int spincount)
while (1) {
- val = ethr_atomic_read(&e->state);
+ val = ethr_atomic32_read(&e->state);
if (val == ETHR_EVENT_ON__)
break;
diff --git a/erts/lib_src/pthread/ethread.c b/erts/lib_src/pthread/ethread.c
index ea1d9d43f0..f047104103 100644
--- a/erts/lib_src/pthread/ethread.c
+++ b/erts/lib_src/pthread/ethread.c
@@ -72,7 +72,7 @@ static void thr_exit_cleanup(void)
/* Argument passed to thr_wrapper() */
typedef struct {
- ethr_atomic_t result;
+ ethr_atomic32_t result;
ethr_ts_event *tse;
void *(*thr_func)(void *);
void *arg;
@@ -81,14 +81,14 @@ typedef struct {
static void *thr_wrapper(void *vtwd)
{
- long result;
+ ethr_sint32_t result;
void *res;
ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
void *(*thr_func)(void *) = twd->thr_func;
void *arg = twd->arg;
ethr_ts_event *tsep = NULL;
- result = (long) ethr_make_ts_event__(&tsep);
+ result = (ethr_sint32_t) ethr_make_ts_event__(&tsep);
if (result == 0) {
tsep->iflgs |= ETHR_TS_EV_ETHREAD;
@@ -99,7 +99,7 @@ static void *thr_wrapper(void *vtwd)
tsep = twd->tse; /* We aren't allowed to follow twd after
result has been set! */
- ethr_atomic_set(&twd->result, result);
+ ethr_atomic32_set(&twd->result, result);
ethr_event_set(&tsep->event);
@@ -191,7 +191,7 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
}
#endif
- ethr_atomic_init(&twd.result, -1);
+ ethr_atomic32_init(&twd.result, (ethr_sint32_t) -1);
twd.tse = ethr_get_ts_event();
twd.thr_func = func;
twd.arg = arg;
@@ -252,10 +252,10 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
/* Wait for child to initialize... */
while (1) {
- long result;
+ ethr_sint32_t result;
ethr_event_reset(&twd.tse->event);
- result = ethr_atomic_read(&twd.result);
+ result = ethr_atomic32_read(&twd.result);
if (result == 0)
break;
@@ -349,32 +349,6 @@ ethr_leave_ts_event(ethr_ts_event *tsep)
}
/*
- * Current time
- */
-
-int
-ethr_time_now(ethr_timeval *time)
-{
- int res;
- struct timeval tv;
-#if ETHR_XCHK
- if (ethr_not_inited__) {
- ETHR_ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ETHR_ASSERT(0);
- return EINVAL;
- }
-#endif
-
- res = gettimeofday(&tv, NULL);
- time->tv_sec = (long) tv.tv_sec;
- time->tv_nsec = ((long) tv.tv_usec)*1000;
- return res;
-}
-
-/*
* Thread specific data
*/
diff --git a/erts/lib_src/win/ethr_event.c b/erts/lib_src/win/ethr_event.c
index ddb4780ff1..68f093f49c 100644
--- a/erts/lib_src/win/ethr_event.c
+++ b/erts/lib_src/win/ethr_event.c
@@ -28,6 +28,9 @@
/* --- Windows implementation of thread events ------------------------------ */
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedCompareExchange)
+
int
ethr_event_init(ethr_event *e)
{
@@ -72,10 +75,10 @@ wait(ethr_event *e, int spincount)
while (1) {
long on;
while (1) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
state = e->state;
#else
- state = InterlockedExchangeAdd(&e->state, (LONG) 0);
+ state = _InterlockedExchangeAdd(&e->state, (LONG) 0);
#endif
if (state == ETHR_EVENT_ON__)
return 0;
diff --git a/erts/lib_src/win/ethread.c b/erts/lib_src/win/ethread.c
index 69523edf94..789a360b11 100644
--- a/erts/lib_src/win/ethread.c
+++ b/erts/lib_src/win/ethread.c
@@ -49,7 +49,7 @@
/* Argument passed to thr_wrapper() */
typedef struct {
ethr_tid *tid;
- ethr_atomic_t result;
+ ethr_atomic32_t result;
ethr_ts_event *tse;
void *(*thr_func)(void *);
void *arg;
@@ -93,20 +93,20 @@ static void thr_exit_cleanup(ethr_tid *tid, void *res)
static unsigned __stdcall thr_wrapper(LPVOID vtwd)
{
ethr_tid my_tid;
- long result;
+ ethr_sint32_t result;
void *res;
ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
void *(*thr_func)(void *) = twd->thr_func;
void *arg = twd->arg;
ethr_ts_event *tsep = NULL;
- result = (long) ethr_make_ts_event__(&tsep);
+ result = (ethr_sint32_t) ethr_make_ts_event__(&tsep);
if (result == 0) {
tsep->iflgs |= ETHR_TS_EV_ETHREAD;
my_tid = *twd->tid;
if (!TlsSetValue(own_tid_key, (LPVOID) &my_tid)) {
- result = (long) ethr_win_get_errno__();
+ result = (ethr_sint32_t) ethr_win_get_errno__();
ethr_free_ts_event__(tsep);
}
else {
@@ -118,7 +118,7 @@ static unsigned __stdcall thr_wrapper(LPVOID vtwd)
tsep = twd->tse; /* We aren't allowed to follow twd after
result has been set! */
- ethr_atomic_set(&twd->result, result);
+ ethr_atomic32_set(&twd->result, result);
ethr_event_set(&tsep->event);
@@ -128,28 +128,6 @@ static unsigned __stdcall thr_wrapper(LPVOID vtwd)
return 0;
}
-#ifdef __GNUC__
-#define LL_LITERAL(X) X##LL
-#else
-#define LL_LITERAL(X) X##i64
-#endif
-
-#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
-
-static ETHR_INLINE void
-get_curr_time(long *sec, long *nsec)
-{
- SYSTEMTIME t;
- FILETIME ft;
- LONGLONG lft;
-
- GetSystemTime(&t);
- SystemTimeToFileTime(&t, &ft);
- memcpy(&lft, &ft, sizeof(lft));
- *nsec = ((long) (lft % LL_LITERAL(10000000)))*100;
- *sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
-}
-
/* internal exports */
int
@@ -320,7 +298,7 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
ETHR_PAGE_ALIGN(ETHR_KW2B(suggested_stack_size));
}
- ethr_atomic_init(&twd.result, -1);
+ ethr_atomic32_init(&twd.result, -1);
twd.tid = tid;
twd.thr_func = func;
@@ -352,11 +330,11 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
/* Wait for child to initialize... */
while (1) {
- long result;
+ ethr_sint32_t result;
int err;
ethr_event_reset(&twd.tse->event);
- result = ethr_atomic_read(&twd.result);
+ result = ethr_atomic32_read(&twd.result);
if (result == 0)
break;
@@ -517,23 +495,6 @@ ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
return tid1.id == tid2.id && tid1.id != ETHR_INVALID_TID_ID;
}
-int
-ethr_time_now(ethr_timeval *time)
-{
-#if ETHR_XCHK
- if (ethr_not_inited__) {
- ETHR_ASSERT(0);
- return EACCES;
- }
- if (!time) {
- ETHR_ASSERT(0);
- return EINVAL;
- }
-#endif
- get_curr_time(&time->tv_sec, &time->tv_nsec);
- return 0;
-}
-
/*
* Thread specific data
*/
diff --git a/erts/test/ethread_SUITE.erl b/erts/test/ethread_SUITE.erl
index 93e27fa8d3..69e5af802f 100644
--- a/erts/test/ethread_SUITE.erl
+++ b/erts/test/ethread_SUITE.erl
@@ -37,7 +37,6 @@
equal_tids/1,
mutex/1,
try_lock_mutex/1,
- time_now/1,
cond_wait/1,
broadcast/1,
detached_thread/1,
@@ -55,7 +54,6 @@ tests() ->
equal_tids,
mutex,
try_lock_mutex,
- time_now,
cond_wait,
broadcast,
detached_thread,
@@ -104,17 +102,6 @@ try_lock_mutex(suite) ->
try_lock_mutex(Config) ->
run_case(Config, "try_lock_mutex", "").
-time_now(doc) ->
- ["Tests ethr_time_now by comparing time values with Erlang."];
-time_now(suite) ->
- [];
-time_now(Config) ->
- run_case(Config, "time_now", "", fun (P) ->
- spawn_link(fun () ->
- watchdog(P)
- end)
- end).
-
wd_dispatch(P) ->
receive
bye ->
diff --git a/erts/test/ethread_SUITE_data/ethread_tests.c b/erts/test/ethread_SUITE_data/ethread_tests.c
index 7fc71d8047..0b59ff5aa6 100644
--- a/erts/test/ethread_SUITE_data/ethread_tests.c
+++ b/erts/test/ethread_SUITE_data/ethread_tests.c
@@ -514,69 +514,6 @@ try_lock_mutex_test(void)
}
/*
- * The time now test.
- *
- * Tests ethr_time_now by comparing time values with Erlang.
- */
-#define TNT_MAX_TIME_DIFF 200000
-#define TNT_MAX_TIME_VALUES 52
-
-static void
-time_now_test(void)
-{
- int scanf_res, time_now_res, i, no_values, max_abs_diff;
- static ethr_timeval tv[TNT_MAX_TIME_VALUES];
- static int ms[TNT_MAX_TIME_VALUES];
-
- i = 0;
- do {
- ASSERT(i < TNT_MAX_TIME_VALUES);
- scanf_res = scanf("%d", &ms[i]);
- time_now_res = ethr_time_now(&tv[i]);
- ASSERT(scanf_res == 1);
- ASSERT(time_now_res == 0);
-#if 0
- print_line("Got %d; %ld:%ld", ms[i], tv[i].tv_sec, tv[i].tv_nsec);
-#endif
- i++;
- } while (ms[i-1] >= 0);
-
- no_values = i-1;
-
- ASSERT(ms[0] == 0);
-
- print_line("TNT_MAX_TIME_DIFF = %d (us)", TNT_MAX_TIME_DIFF);
-
- max_abs_diff = 0;
-
- for (i = 1; i < no_values; i++) {
- long diff;
- long tn_us;
- long e_us;
-
- tn_us = (tv[i].tv_sec - tv[0].tv_sec) * 1000000;
- tn_us += (tv[i].tv_nsec - tv[0].tv_nsec)/1000;
-
- e_us = ms[i]*1000;
-
- diff = e_us - tn_us;
-
- print_line("Erlang time = %ld us; ethr_time_now = %ld us; diff %ld us",
- e_us, tn_us, diff);
-
- if (max_abs_diff < abs((int) diff)) {
- max_abs_diff = abs((int) diff);
- }
-
- ASSERT(e_us - TNT_MAX_TIME_DIFF <= tn_us);
- ASSERT(tn_us <= e_us + TNT_MAX_TIME_DIFF);
- }
-
- print_line("Max absolute diff = %d us", max_abs_diff);
- succeed("Max absolute diff = %d us", max_abs_diff);
-}
-
-/*
* The cond wait test case.
*
* Tests ethr_cond_wait with ethr_cond_signal and ethr_cond_broadcast.
@@ -1538,8 +1475,6 @@ main(int argc, char *argv[])
mutex_test();
else if (strcmp(testcase, "try_lock_mutex") == 0)
try_lock_mutex_test();
- else if (strcmp(testcase, "time_now") == 0)
- time_now_test();
else if (strcmp(testcase, "cond_wait") == 0)
cond_wait_test();
else if (strcmp(testcase, "broadcast") == 0)
diff --git a/erts/vsn.mk b/erts/vsn.mk
index a5dd62feb2..8a1590e74c 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -17,8 +17,8 @@
# %CopyrightEnd%
#
-VSN = 5.8.2
-SYSTEM_VSN = R14B01
+VSN = 5.8.3
+SYSTEM_VSN = R14B02
# Port number 4365 in 4.2
# Port number 4366 in 4.3