aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/aclocal.m47
-rw-r--r--erts/include/internal/ethr_mutex.h28
-rw-r--r--erts/include/internal/ethread.h141
-rw-r--r--erts/include/internal/ethread_header_config.h.in9
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h5
-rw-r--r--erts/include/internal/i386/atomic.h4
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h97
-rw-r--r--erts/include/internal/ppc32/atomic.h4
-rw-r--r--erts/include/internal/pthread/ethr_event.h16
-rw-r--r--erts/include/internal/sparc32/atomic.h4
-rw-r--r--erts/include/internal/tile/atomic.h4
-rw-r--r--erts/include/internal/win/ethr_atomic.h1
-rw-r--r--erts/lib_src/common/ethr_aux.c60
-rw-r--r--erts/lib_src/common/ethr_mutex.c141
-rw-r--r--erts/lib_src/pthread/ethr_event.c8
-rw-r--r--erts/lib_src/pthread/ethread.c8
-rw-r--r--erts/lib_src/win/ethread.c8
17 files changed, 300 insertions, 245 deletions
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 443d8622bf..d45b36bfed 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -1087,6 +1087,13 @@ fi
AC_CHECK_SIZEOF(void *)
AC_DEFINE_UNQUOTED(ETHR_SIZEOF_PTR, $ac_cv_sizeof_void_p, [Define to the size of pointers])
+AC_CHECK_SIZEOF(int)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_INT, $ac_cv_sizeof_int, [Define to the size of int])
+AC_CHECK_SIZEOF(long)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_LONG, $ac_cv_sizeof_long, [Define to the size of long])
+AC_CHECK_SIZEOF(long long)
+AC_DEFINE_UNQUOTED(ETHR_SIZEOF_LONG_LONG, $ac_cv_sizeof_long_long, [Define to the size of long long])
+
AC_ARG_ENABLE(native-ethr-impls,
AS_HELP_STRING([--disable-native-ethr-impls],
[disable native ethread implementations]),
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
index 01855864e3..ebff845a54 100644
--- a/erts/include/internal/ethr_mutex.h
+++ b/erts/include/internal/ethr_mutex.h
@@ -78,13 +78,13 @@
# error Need a qlock implementation
#endif
-#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
-#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
-#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+#define ETHR_RWMTX_W_FLG__ (((ethr_sint_t) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((ethr_sint_t) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((ethr_sint_t) 1) << 29)
/* frequent read kind */
-#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
-#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((long) 1) << 27)
+#define ETHR_RWMTX_R_FLG__ (((ethr_sint_t) 1) << 28)
+#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((ethr_sint_t) 1) << 27)
#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_ABRT_UNLCK_FLG__ - 1)
/* normal kind */
@@ -302,7 +302,7 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint_t val__; \
ETHR_COMPILER_BARRIER; \
val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ > 0); \
@@ -317,13 +317,13 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ethr_sint_t val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ >= 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint_t val__; \
ETHR_COMPILER_BARRIER; \
val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == -1); \
@@ -338,7 +338,7 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ethr_sint_t val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
@@ -501,13 +501,13 @@ do { \
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
-void ethr_mutex_lock_wait__(ethr_mutex *, long);
-void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+void ethr_mutex_lock_wait__(ethr_mutex *, ethr_sint_t);
+void ethr_mutex_unlock_wake__(ethr_mutex *, ethr_sint_t);
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint_t act;
int res;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
@@ -531,7 +531,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint_t act;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
@@ -551,7 +551,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint_t act;
ETHR_COMPILER_BARRIER;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 53fa1acdc2..30b7f03889 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -73,7 +73,7 @@ typedef struct {
#endif
/* Assume 64-byte cache line size */
-#define ETHR_CACHE_LINE_SIZE 64L
+#define ETHR_CACHE_LINE_SIZE ((ethr_uint_t) 64)
#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
@@ -171,6 +171,15 @@ typedef pthread_key_t ethr_tsd_key;
# undef WIN32_LEAN_AND_MEAN
#endif
+#if defined(_MSC_VER)
+#if ETHR_SIZEOF_PTR == 4
+#define ETHR_HAVE_INT_T 1
+typedef LONG ethr_sint_t;
+typedef ULONG ethr_uint_t;
+#else
+#error "Only 32-bit windows is supported"
+#endif
+#endif
struct ethr_join_data_;
/* Types */
@@ -198,9 +207,19 @@ typedef DWORD ethr_tsd_key;
#endif
-#ifdef SIZEOF_LONG
-#if SIZEOF_LONG < ETHR_SIZEOF_PTR
-#error size of long currently needs to be at least the same as size of void *
+#ifndef ETHR_HAVE_INT_T
+#define ETHR_HAVE_INT_T 1
+#if ETHR_SIZEOF_INT == ETHR_SIZEOF_PTR
+typedef int ethr_sint_t;
+typedef unsigned int ethr_uint_t;
+#elif ETHR_SIZEOF_LONG == ETHR_SIZEOF_PTR
+typedef long ethr_sint_t;
+typedef unsigned long ethr_uint_t;
+#elif ETHR_SIZEOF_LONG_LONG == ETHR_SIZEOF_PTR
+typedef long long ethr_sint_t;
+typedef unsigned long long ethr_uint_t;
+#else
+#error "No integer type of the same size as pointers found"
#endif
#endif
@@ -508,30 +527,30 @@ ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
*/
typedef ethr_native_atomic_t ethr_atomic_t;
#else
-typedef long ethr_atomic_t;
+typedef ethr_sint_t ethr_atomic_t;
#endif
#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-void ethr_atomic_init(ethr_atomic_t *, long);
-void ethr_atomic_set(ethr_atomic_t *, long);
-long ethr_atomic_read(ethr_atomic_t *);
-long ethr_atomic_inc_read(ethr_atomic_t *);
-long ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_init(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_set(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read(ethr_atomic_t *);
void ethr_atomic_inc(ethr_atomic_t *);
void ethr_atomic_dec(ethr_atomic_t *);
-long ethr_atomic_add_read(ethr_atomic_t *, long);
-void ethr_atomic_add(ethr_atomic_t *, long);
-long ethr_atomic_read_band(ethr_atomic_t *, long);
-long ethr_atomic_read_bor(ethr_atomic_t *, long);
-long ethr_atomic_xchg(ethr_atomic_t *, long);
-long ethr_atomic_cmpxchg(ethr_atomic_t *, long, long);
-long ethr_atomic_read_acqb(ethr_atomic_t *);
-long ethr_atomic_inc_read_acqb(ethr_atomic_t *);
-void ethr_atomic_set_relb(ethr_atomic_t *, long);
+ethr_sint_t ethr_atomic_add_read(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_add(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_band(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_bor(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_xchg(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_acqb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, ethr_sint_t);
void ethr_atomic_dec_relb(ethr_atomic_t *);
-long ethr_atomic_dec_read_relb(ethr_atomic_t *);
-long ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, long, long);
-long ethr_atomic_cmpxchg_relb(ethr_atomic_t *, long, long);
+ethr_sint_t ethr_atomic_dec_read_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg_relb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
#endif
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
@@ -569,7 +588,7 @@ do { \
#endif
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, ethr_sint_t i)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
ethr_native_atomic_init(var, i);
@@ -579,7 +598,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, ethr_sint_t i)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
ethr_native_atomic_set(var, i);
@@ -588,20 +607,20 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_read(var);
#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) *var);
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
return res;
#endif
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, ethr_sint_t incr)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
ethr_native_atomic_add(var, incr);
@@ -610,13 +629,13 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
#endif
}
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, long i)
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, ethr_sint_t i)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_add_return(var, i);
#else
- long res;
+ ethr_sint_t res;
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
return res;
#endif
@@ -642,78 +661,77 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_inc_return(var);
#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) ++(*var));
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
return res;
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_dec_return(var);
#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) --(*var));
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
return res;
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
- long mask)
+ ethr_sint_t mask)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_and_retold(var, mask);
#else
- long res;
+ ethr_sint_t res;
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
return res;
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
- long mask)
+ ethr_sint_t mask)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_or_retold(var, mask);
#else
- long res;
+ ethr_sint_t res;
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
return res;
#endif
}
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new)
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var, ethr_sint_t new)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_xchg(var, new);
#else
- long res;
+ ethr_sint_t res;
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
return res;
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long exp)
+ ethr_sint_t new,
+ ethr_sint_t exp)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_cmpxchg(var, new, exp);
#else
- long res;
+ ethr_sint_t res;
ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
{
res = *var;
@@ -733,7 +751,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
* _relb = release barrier
*/
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
@@ -743,7 +761,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
@@ -754,7 +772,8 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
}
static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var, long val)
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var,
+ ethr_sint_t val)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
ethr_native_atomic_set_relb(var, val);
@@ -773,7 +792,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
@@ -783,10 +802,10 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
- long new,
- long exp)
+ ethr_sint_t new,
+ ethr_sint_t exp)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_cmpxchg_acqb(var, new, exp);
@@ -795,10 +814,10 @@ ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
- long new,
- long exp)
+ ethr_sint_t new,
+ ethr_sint_t exp)
{
#ifdef ETHR_HAVE_NATIVE_ATOMICS
return ethr_native_atomic_cmpxchg_relb(var, new, exp);
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index 5debb44756..8c6fbc1fdd 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -20,6 +20,15 @@
/* Define to the size of pointers */
#undef ETHR_SIZEOF_PTR
+/* Define to the size of int */
+#undef ETHR_SIZEOF_INT
+
+/* Define to the size of long */
+#undef ETHR_SIZEOF_LONG
+
+/* Define to the size of long long */
+#undef ETHR_SIZEOF_LONG_LONG
+
/* Define if you want to disable native ethread implementations */
#undef ETHR_DISABLE_NATIVE_IMPLS
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index e8e529dd48..49a8ceeab1 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -36,11 +36,14 @@
# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
#endif
+#if ETHR_SIZEOF_LONG != ETHR_SIZEOF_PTR
+# error "Incompatible size of 'long'"
+#endif
+
typedef struct {
volatile long counter;
} ethr_native_atomic_t;
-
/*
* According to the documentation this is what we want:
* #define ETHR_MEMORY_BARRIER __sync_synchronize()
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 52d01aab32..709a6a17fc 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -26,6 +26,10 @@
#ifndef ETHREAD_I386_ATOMIC_H
#define ETHREAD_I386_ATOMIC_H
+#if ETHR_SIZEOF_LONG != ETHR_SIZEOF_PTR
+# error "Incompatible size of 'long'"
+#endif
+
/* An atomic is an aligned long accessed via locked operations.
*/
typedef struct {
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
index a6eb43a0bd..7def51ebd1 100644
--- a/erts/include/internal/libatomic_ops/ethr_atomic.h
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -46,8 +46,7 @@
* - AO_store()
* - AO_compare_and_swap()
*
- * The `AO_t' type also have to be at least as large as
- * `void *' and `long' types.
+ * The `AO_t' type also have to be at least as large as the `void *' type.
*/
#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
@@ -78,49 +77,49 @@ typedef struct {
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+ethr_native_atomic_set(ethr_native_atomic_t *var, ethr_sint_t value)
{
AO_store(&var->counter, (AO_t) value);
}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long value)
+ethr_native_atomic_init(ethr_native_atomic_t *var, ethr_sint_t value)
{
ethr_native_atomic_set(var, value);
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ethr_native_atomic_read(ethr_native_atomic_t *var)
{
- return (long) AO_load(&var->counter);
+ return (ethr_sint_t) AO_load(&var->counter);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ethr_sint_t
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, ethr_sint_t incr)
{
#ifdef AO_HAVE_fetch_and_add
- return ((long) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+ return ((ethr_sint_t) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
#else
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp + (AO_t) incr;
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) new;
+ return (ethr_sint_t) new;
}
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ethr_native_atomic_add(ethr_native_atomic_t *var, ethr_sint_t incr)
{
(void) ethr_native_atomic_add_return(var, incr);
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
{
#ifdef AO_HAVE_fetch_and_add1
- return ((long) AO_fetch_and_add1(&var->counter)) + 1;
+ return ((ethr_sint_t) AO_fetch_and_add1(&var->counter)) + 1;
#else
return ethr_native_atomic_add_return(var, 1);
#endif
@@ -132,11 +131,11 @@ ethr_native_atomic_inc(ethr_native_atomic_t *var)
(void) ethr_native_atomic_inc_return(var);
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
{
#ifdef AO_HAVE_fetch_and_sub1
- return ((long) AO_fetch_and_sub1(&var->counter)) - 1;
+ return ((ethr_sint_t) AO_fetch_and_sub1(&var->counter)) - 1;
#else
return ethr_native_atomic_add_return(var, -1);
#endif
@@ -148,47 +147,49 @@ ethr_native_atomic_dec(ethr_native_atomic_t *var)
(void) ethr_native_atomic_dec_return(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint_t
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, ethr_sint_t mask)
{
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp & ((AO_t) mask);
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) exp;
+ return (ethr_sint_t) exp;
}
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint_t
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, ethr_sint_t mask)
{
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp | ((AO_t) mask);
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) exp;
+ return (ethr_sint_t) exp;
}
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint_t
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
{
- long act;
+ ethr_sint_t act;
do {
if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ethr_sint_t) AO_load(&var->counter);
} while (act == exp);
return act;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ethr_sint_t
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, ethr_sint_t new)
{
while (1) {
AO_t exp = AO_load(&var->counter);
if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
- return (long) exp;
+ return (ethr_sint_t) exp;
}
}
@@ -196,32 +197,32 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
{
#ifdef AO_HAVE_load_acquire
- return (long) AO_load_acquire(&var->counter);
+ return (ethr_sint_t) AO_load_acquire(&var->counter);
#else
- long res = ethr_native_atomic_read(var);
+ ethr_sint_t res = ethr_native_atomic_read(var);
ETHR_MEMORY_BARRIER;
return res;
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
{
#ifdef AO_HAVE_fetch_and_add1_acquire
- return ((long) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+ return ((ethr_sint_t) AO_fetch_and_add1_acquire(&var->counter)) + 1;
#else
- long res = ethr_native_atomic_add_return(var, 1);
+ ethr_sint_t res = ethr_native_atomic_add_return(var, 1);
ETHR_MEMORY_BARRIER;
return res;
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, ethr_sint_t value)
{
#ifdef AO_HAVE_store_release
AO_store_release(&var->counter, (AO_t) value);
@@ -231,11 +232,11 @@ ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
#endif
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
{
#ifdef AO_HAVE_fetch_and_sub1_release
- return ((long) AO_fetch_and_sub1_release(&var->counter)) - 1;
+ return ((ethr_sint_t) AO_fetch_and_sub1_release(&var->counter)) - 1;
#else
ETHR_MEMORY_BARRIER;
return ethr_native_atomic_dec_return(var);
@@ -248,34 +249,38 @@ ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
(void) ethr_native_atomic_dec_return_relb(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint_t
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
{
#ifdef AO_HAVE_compare_and_swap_acquire
- long act;
+ ethr_sint_t act;
do {
if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ethr_sint_t) AO_load(&var->counter);
} while (act == exp);
AO_nop_full();
return act;
#else
- long act = ethr_native_atomic_cmpxchg(var, new, exp);
+ ethr_sint_t act = ethr_native_atomic_cmpxchg(var, new, exp);
ETHR_MEMORY_BARRIER;
return act;
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint_t
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
{
#ifdef AO_HAVE_compare_and_swap_release
- long act;
+ ethr_sint_t act;
do {
if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ethr_sint_t) AO_load(&var->counter);
} while (act == exp);
return act;
#else
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index f21f7c9588..daa0de88ab 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -28,6 +28,10 @@
#ifndef ETHREAD_PPC_ATOMIC_H
#define ETHREAD_PPC_ATOMIC_H
+#if ETHR_SIZEOF_LONG != ETHR_SIZEOF_PTR
+# error "Incompatible size of 'int'"
+#endif
+
typedef struct {
volatile int counter;
} ethr_native_atomic_t;
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
index 104ec287e0..4cf87406d7 100644
--- a/erts/include/internal/pthread/ethr_event.h
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -40,15 +40,15 @@
#if ETHR_SIZEOF_PTR == 8
-#define ETHR_EVENT_OFF_WAITER__ 0xffffffffffffffffL
-#define ETHR_EVENT_OFF__ 0x7777777777777777L
-#define ETHR_EVENT_ON__ 0L
+#define ETHR_EVENT_OFF_WAITER__ ((ethr_sint_t) 0xffffffffffffffffL)
+#define ETHR_EVENT_OFF__ ((ethr_sint_t) 0x7777777777777777L)
+#define ETHR_EVENT_ON__ ((ethr_sint_t) 0L)
#elif ETHR_SIZEOF_PTR == 4
-#define ETHR_EVENT_OFF_WAITER__ 0xffffffffL
-#define ETHR_EVENT_OFF__ 0x77777777L
-#define ETHR_EVENT_ON__ 0L
+#define ETHR_EVENT_OFF_WAITER__ ((ethr_sint_t) 0xffffffffL)
+#define ETHR_EVENT_OFF__ ((ethr_sint_t) 0x77777777L)
+#define ETHR_EVENT_ON__ ((ethr_sint_t) 0L)
#else
@@ -77,7 +77,7 @@ typedef struct {
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- long val;
+ ethr_sint_t val;
ETHR_WRITE_MEMORY_BARRIER;
val = ethr_atomic_xchg(&e->futex, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
@@ -114,7 +114,7 @@ typedef struct {
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- long val;
+ ethr_sint_t val;
ETHR_WRITE_MEMORY_BARRIER;
val = ethr_atomic_xchg(&e->state, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 2da6472393..8d94fa939f 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -24,6 +24,10 @@
#ifndef ETHR_SPARC32_ATOMIC_H
#define ETHR_SPARC32_ATOMIC_H
+#if ETHR_SIZEOF_LONG != ETHR_SIZEOF_PTR
+# error "Incompatible size of 'long'"
+#endif
+
typedef struct {
volatile long counter;
} ethr_native_atomic_t;
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 69569d82d1..05246a2aa9 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -26,6 +26,10 @@
#include <atomic.h>
+#if ETHR_SIZEOF_LONG != ETHR_SIZEOF_PTR
+# error "Incompatible size of 'long'"
+#endif
+
/* An atomic is an aligned int accessed via locked operations.
*/
typedef struct {
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
index 500459dd6c..47068927a6 100644
--- a/erts/include/internal/win/ethr_atomic.h
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -66,7 +66,6 @@
#pragma intrinsic(_InterlockedCompareExchange_rel)
#endif
-
typedef struct {
volatile LONG value;
} ethr_native_atomic_t;
diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c
index 4db4cffd3a..38cf80c7df 100644
--- a/erts/lib_src/common/ethr_aux.c
+++ b/erts/lib_src/common/ethr_aux.c
@@ -279,14 +279,6 @@ typedef union {
static ethr_spinlock_t ts_ev_alloc_lock;
static ethr_ts_event *free_ts_ev;
-#if SIZEOF_VOID_P == SIZEOF_INT
-typedef unsigned int EthrPtrSzUInt;
-#elif SIZEOF_VOID_P == SIZEOF_LONG
-typedef unsigned long EthrPtrSzUInt;
-#else
-#error No pointer sized integer type
-#endif
-
static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
{
int i;
@@ -295,9 +287,9 @@ static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp)
+ ETHR_CACHE_LINE_SIZE);
if (!atsev)
return NULL;
- if ((((EthrPtrSzUInt) atsev) & ETHR_CACHE_LINE_MASK) == 0)
+ if ((((ethr_uint_t) atsev) & ETHR_CACHE_LINE_MASK) == 0)
atsev = ((ethr_aligned_ts_event *)
- ((((EthrPtrSzUInt) atsev) & ~ETHR_CACHE_LINE_MASK)
+ ((((ethr_uint_t) atsev) & ~ETHR_CACHE_LINE_MASK)
+ ETHR_CACHE_LINE_SIZE));
for (i = 1; i < size; i++) {
atsev[i-1].ts_ev.next = &atsev[i].ts_ev;
@@ -470,21 +462,21 @@ int ethr_get_main_thr_status(int *on)
/* Atomics */
void
-ethr_atomic_init(ethr_atomic_t *var, long i)
+ethr_atomic_init(ethr_atomic_t *var, ethr_sint_t i)
{
ETHR_ASSERT(var);
ethr_atomic_init__(var, i);
}
void
-ethr_atomic_set(ethr_atomic_t *var, long i)
+ethr_atomic_set(ethr_atomic_t *var, ethr_sint_t i)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
ethr_atomic_set__(var, i);
}
-long
+ethr_sint_t
ethr_atomic_read(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
@@ -493,15 +485,15 @@ ethr_atomic_read(ethr_atomic_t *var)
}
-long
-ethr_atomic_add_read(ethr_atomic_t *var, long incr)
+ethr_sint_t
+ethr_atomic_add_read(ethr_atomic_t *var, ethr_sint_t incr)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
return ethr_atomic_add_read__(var, incr);
}
-long
+ethr_sint_t
ethr_atomic_inc_read(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
@@ -509,7 +501,7 @@ ethr_atomic_inc_read(ethr_atomic_t *var)
return ethr_atomic_inc_read__(var);
}
-long
+ethr_sint_t
ethr_atomic_dec_read(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
@@ -518,7 +510,7 @@ ethr_atomic_dec_read(ethr_atomic_t *var)
}
void
-ethr_atomic_add(ethr_atomic_t *var, long incr)
+ethr_atomic_add(ethr_atomic_t *var, ethr_sint_t incr)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
@@ -541,39 +533,39 @@ ethr_atomic_dec(ethr_atomic_t *var)
ethr_atomic_dec__(var);
}
-long
-ethr_atomic_read_band(ethr_atomic_t *var, long mask)
+ethr_sint_t
+ethr_atomic_read_band(ethr_atomic_t *var, ethr_sint_t mask)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
return ethr_atomic_read_band__(var, mask);
}
-long
-ethr_atomic_read_bor(ethr_atomic_t *var, long mask)
+ethr_sint_t
+ethr_atomic_read_bor(ethr_atomic_t *var, ethr_sint_t mask)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
return ethr_atomic_read_bor__(var, mask);
}
-long
-ethr_atomic_xchg(ethr_atomic_t *var, long new)
+ethr_sint_t
+ethr_atomic_xchg(ethr_atomic_t *var, ethr_sint_t new)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
return ethr_atomic_xchg__(var, new);
}
-long
-ethr_atomic_cmpxchg(ethr_atomic_t *var, long new, long expected)
+ethr_sint_t
+ethr_atomic_cmpxchg(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t expected)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
return ethr_atomic_cmpxchg__(var, new, expected);
}
-long
+ethr_sint_t
ethr_atomic_read_acqb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
@@ -581,7 +573,7 @@ ethr_atomic_read_acqb(ethr_atomic_t *var)
return ethr_atomic_read_acqb__(var);
}
-long
+ethr_sint_t
ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
@@ -590,7 +582,7 @@ ethr_atomic_inc_read_acqb(ethr_atomic_t *var)
}
void
-ethr_atomic_set_relb(ethr_atomic_t *var, long i)
+ethr_atomic_set_relb(ethr_atomic_t *var, ethr_sint_t i)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
@@ -605,7 +597,7 @@ ethr_atomic_dec_relb(ethr_atomic_t *var)
ethr_atomic_dec_relb__(var);
}
-long
+ethr_sint_t
ethr_atomic_dec_read_relb(ethr_atomic_t *var)
{
ETHR_ASSERT(!ethr_not_inited__);
@@ -613,16 +605,16 @@ ethr_atomic_dec_read_relb(ethr_atomic_t *var)
return ethr_atomic_dec_read_relb__(var);
}
-long
-ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, long new, long exp)
+ethr_sint_t
+ethr_atomic_cmpxchg_acqb(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t exp)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
return ethr_atomic_cmpxchg_acqb__(var, new, exp);
}
-long
-ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, long new, long exp)
+ethr_sint_t
+ethr_atomic_cmpxchg_relb(ethr_atomic_t *var, ethr_sint_t new, ethr_sint_t exp)
{
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(var);
diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c
index a2fbf3a454..7bbd74c03f 100644
--- a/erts/lib_src/common/ethr_mutex.c
+++ b/erts/lib_src/common/ethr_mutex.c
@@ -206,16 +206,16 @@ static void hard_debug_chk_q__(struct ethr_mutex_base_ *, int);
#ifdef ETHR_USE_OWN_RWMTX_IMPL__
static void
rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint_t initial,
int q_locked);
static void
rwmutex_unlock_wake(ethr_rwmutex *rwmtx,
int have_w,
- long initial,
+ ethr_sint_t initial,
int transfer_read_lock);
static int
rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint_t initial,
ethr_ts_event *tse,
int start_next_ix,
int check_before_try,
@@ -247,7 +247,7 @@ rwmutex_freqread_rdrs_add(ethr_rwmutex *rwmtx,
ETHR_ASSERT(type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
ETHR_ASSERT(inc == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint_t) 1);
}
}
@@ -266,7 +266,7 @@ rwmutex_freqread_rdrs_inc(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
goto atomic_inc;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 0);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 1);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint_t) 1);
}
}
@@ -287,13 +287,13 @@ rwmutex_freqread_rdrs_dec(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
goto atomic_dec;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint_t) 0);
}
}
#endif
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
rwmutex_freqread_rdrs_dec_read(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
{
int ix;
@@ -308,12 +308,12 @@ rwmutex_freqread_rdrs_dec_read(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
goto atomic_dec_read;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
- return (long) 0;
+ ethr_atomic_set(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint_t) 0);
+ return (ethr_sint_t) 0;
}
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
rwmutex_freqread_rdrs_dec_read_relb(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
{
int ix;
@@ -328,15 +328,15 @@ rwmutex_freqread_rdrs_dec_read_relb(ethr_rwmutex *rwmtx, ethr_ts_event *tse)
goto atomic_dec_read;
ETHR_ASSERT(rwmtx->type == ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ);
ETHR_ASSERT(ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers) == 1);
- ethr_atomic_set_relb(&rwmtx->tdata.ra[ix].data.readers, (long) 0);
- return (long) 0;
+ ethr_atomic_set_relb(&rwmtx->tdata.ra[ix].data.readers, (ethr_sint_t) 0);
+ return (ethr_sint_t) 0;
}
}
-static ETHR_INLINE long
+static ETHR_INLINE ethr_sint_t
rwmutex_freqread_rdrs_read(ethr_rwmutex *rwmtx, int ix)
{
- long res = ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers);
+ ethr_sint_t res = ethr_atomic_read(&rwmtx->tdata.ra[ix].data.readers);
#ifdef ETHR_DEBUG
switch (rwmtx->type) {
case ETHR_RWMUTEX_TYPE_FREQUENT_READ:
@@ -402,12 +402,12 @@ static void
event_wait(struct ethr_mutex_base_ *mtxb,
ethr_ts_event *tse,
int spincount,
- long type,
+ ethr_sint_t type,
int is_rwmtx,
int is_freq_read)
{
int locked = 0;
- long act;
+ ethr_sint_t act;
int need_try_complete_runlock = 0;
int transfer_read_lock = 0;
@@ -453,7 +453,7 @@ event_wait(struct ethr_mutex_base_ *mtxb,
/* Set wait bit */
while (1) {
- long new, exp = act;
+ ethr_sint_t new, exp = act;
need_try_complete_runlock = 0;
transfer_read_lock = 0;
@@ -649,11 +649,11 @@ int check_readers_array(ethr_rwmutex *rwmtx,
static ETHR_INLINE void
write_lock_wait(struct ethr_mutex_base_ *mtxb,
- long initial,
+ ethr_sint_t initial,
int is_rwmtx,
int is_freq_read)
{
- long act = initial;
+ ethr_sint_t act = initial;
int scnt, start_scnt;
ethr_ts_event *tse = NULL;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -763,7 +763,7 @@ mtxb_init(struct ethr_mutex_base_ *mtxb,
static int
mtxb_destroy(struct ethr_mutex_base_ *mtxb)
{
- long act;
+ ethr_sint_t act;
ETHR_MTX_Q_LOCK(&mtxb->qlck);
act = ethr_atomic_read(&mtxb->flgs);
ETHR_MTX_Q_UNLOCK(&mtxb->qlck);
@@ -831,13 +831,13 @@ ethr_mutex_destroy(ethr_mutex *mtx)
}
void
-ethr_mutex_lock_wait__(ethr_mutex *mtx, long initial)
+ethr_mutex_lock_wait__(ethr_mutex *mtx, ethr_sint_t initial)
{
write_lock_wait(&mtx->mtxb, initial, 0, 0);
}
void
-ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
+ethr_mutex_unlock_wake__(ethr_mutex *mtx, ethr_sint_t initial)
{
ethr_ts_event *tse;
@@ -865,7 +865,7 @@ ethr_mutex_unlock_wake__(ethr_mutex *mtx, long initial)
static void
enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
{
- long act;
+ ethr_sint_t act;
/*
* `ethr_cond_signal()' and `ethr_cond_broadcast()' end up here. If `mtx'
@@ -937,7 +937,7 @@ enqueue_mtx(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end)
multi = tse_start != tse_end;
while (1) {
- long new, exp = act;
+ ethr_sint_t new, exp = act;
if (multi || (act & ETHR_RWMTX_W_FLG__))
new = ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__;
@@ -1185,7 +1185,7 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx)
/* Wait */
woken = 0;
while (1) {
- long act;
+ ethr_sint_t act;
ethr_event_reset(&tse->event);
@@ -1469,7 +1469,7 @@ int check_readers_array(ethr_rwmutex *rwmtx,
ETHR_MEMORY_BARRIER;
do {
- long act = rwmutex_freqread_rdrs_read(rwmtx, ix);
+ ethr_sint_t act = rwmutex_freqread_rdrs_read(rwmtx, ix);
if (act != 0)
return EBUSY;
ix++;
@@ -1483,9 +1483,9 @@ int check_readers_array(ethr_rwmutex *rwmtx,
static void
rwmutex_freqread_rdrs_dec_chk_wakeup(ethr_rwmutex *rwmtx,
ethr_ts_event *tse,
- long initial)
+ ethr_sint_t initial)
{
- long act = initial;
+ ethr_sint_t act = initial;
if ((act & (ETHR_RWMTX_W_FLG__|
ETHR_RWMTX_R_ABRT_UNLCK_FLG__)) == 0) {
@@ -1539,7 +1539,7 @@ static void
rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
ethr_ts_event *tse)
{
- long act;
+ ethr_sint_t act;
/*
* Restore failed increment
*/
@@ -1555,14 +1555,14 @@ rwmutex_freqread_restore_failed_tryrlock(ethr_rwmutex *rwmtx,
static int
rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
- long initial,
+ ethr_sint_t initial,
ethr_ts_event *tse,
int start_next_ix,
int check_before_try,
int try_write_lock)
{
ethr_ts_event *tse_tmp;
- long act = initial;
+ ethr_sint_t act = initial;
int six, res, length;
ETHR_ASSERT((act & ETHR_RWMTX_W_FLG__) == 0);
@@ -1606,8 +1606,8 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
restart:
while (1) {
- long exp = act;
- long new = act+1;
+ ethr_sint_t exp = act;
+ ethr_sint_t new = act+1;
ETHR_ASSERT((act & ETHR_RWMTX_R_ABRT_UNLCK_FLG__) == 0);
@@ -1651,8 +1651,8 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
while (1) {
int finished_abort = 0;
- long exp = act;
- long new = act;
+ ethr_sint_t exp = act;
+ ethr_sint_t new = act;
new--;
if (act & ETHR_RWMTX_R_ABRT_UNLCK_FLG__) {
@@ -1713,7 +1713,7 @@ rwmutex_try_complete_runlock(ethr_rwmutex *rwmtx,
static ETHR_INLINE void
rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint_t act;
/*
* Restore failed increment
*/
@@ -1727,10 +1727,9 @@ rwmutex_incdec_restore_failed_tryrlock(ethr_rwmutex *rwmtx)
#endif
static void
-rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx,
- long initial)
+rwmutex_normal_rlock_wait(ethr_rwmutex *rwmtx, ethr_sint_t initial)
{
- long act = initial, exp;
+ ethr_sint_t act = initial, exp;
int scnt, start_scnt;
ethr_ts_event *tse = NULL;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -1792,7 +1791,7 @@ static int
rwmutex_freqread_rlock(ethr_rwmutex *rwmtx, ethr_ts_event *tse, int trylock)
{
int res = 0;
- long act;
+ ethr_sint_t act;
rwmutex_freqread_rdrs_inc(rwmtx, tse);
@@ -1804,7 +1803,7 @@ rwmutex_freqread_rlock(ethr_rwmutex *rwmtx, ethr_ts_event *tse, int trylock)
int wake_other_readers;
while (1) {
- long exp, new;
+ ethr_sint_t exp, new;
wake_other_readers = 0;
@@ -1862,7 +1861,7 @@ static void
rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
ethr_ts_event *tse)
{
- long act;
+ ethr_sint_t act;
int scnt, start_scnt;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
@@ -1900,21 +1899,23 @@ rwmutex_freqread_rlock_wait(ethr_rwmutex *rwmtx,
}
static void
-rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+rwmutex_normal_rwlock_wait(ethr_rwmutex *rwmtx, ethr_sint_t initial)
{
write_lock_wait(&rwmtx->mtxb, initial, 1, 0);
}
static void
-rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, long initial)
+rwmutex_freqread_rwlock_wait(ethr_rwmutex *rwmtx, ethr_sint_t initial)
{
write_lock_wait(&rwmtx->mtxb, initial, 1, 1);
}
static ETHR_INLINE void
-rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, long act_initial)
+rwlock_wake_set_flags(ethr_rwmutex *rwmtx,
+ ethr_sint_t new_initial,
+ ethr_sint_t act_initial)
{
- long act, act_mask;
+ ethr_sint_t act, act_mask;
int chk_abrt_flg;
ETHR_MEMORY_BARRIER;
@@ -1942,8 +1943,8 @@ rwlock_wake_set_flags(ethr_rwmutex *rwmtx, long new_initial, long act_initial)
act = act_initial;
while (1) {
- long exp = act;
- long new = new_initial + (act & act_mask);
+ ethr_sint_t exp = act;
+ ethr_sint_t new = new_initial + (act & act_mask);
if (chk_abrt_flg && (act & act_mask))
new |= ETHR_RWMTX_R_ABRT_UNLCK_FLG__;
act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
@@ -1960,7 +1961,7 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
int have_w,
ethr_ts_event *tse)
{
- long exp, act, imask;
+ ethr_sint_t exp, act, imask;
exp = have_w ? ETHR_RWMTX_W_FLG__ : 0;
@@ -2012,9 +2013,9 @@ dbg_unlock_wake(ethr_rwmutex *rwmtx,
#endif
static void
-rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, long initial, int q_locked)
+rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, ethr_sint_t initial, int q_locked)
{
- long act = initial;
+ ethr_sint_t act = initial;
if (!q_locked) {
ethr_ts_event *tse;
@@ -2035,10 +2036,10 @@ rwmutex_transfer_read_lock(ethr_rwmutex *rwmtx, long initial, int q_locked)
}
static void
-rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
+rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, ethr_sint_t initial,
int transfer_read_lock)
{
- long new, act = initial;
+ ethr_sint_t new, act = initial;
ethr_ts_event *tse;
if (transfer_read_lock) {
@@ -2060,7 +2061,7 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
return;
else {
while ((act & ETHR_RWMTX_WAIT_FLGS__) == 0) {
- long exp = act;
+ ethr_sint_t exp = act;
new = exp & ~ETHR_RWMTX_W_FLG__;
act = ethr_atomic_cmpxchg(&rwmtx->mtxb.flgs, new, exp);
if (act == exp)
@@ -2131,7 +2132,7 @@ rwmutex_unlock_wake(ethr_rwmutex *rwmtx, int have_w, long initial,
if (rwmtx->type == ETHR_RWMUTEX_TYPE_NORMAL) {
rs = rwmtx->tdata.rs;
- new = (long) rs;
+ new = (ethr_sint_t) rs;
rwmtx->tdata.rs = 0;
}
else {
@@ -2187,16 +2188,16 @@ alloc_readers_array(int length, ethr_rwmutex_lived lived)
if (!mem)
return NULL;
- if ((((unsigned long) mem) & ETHR_CACHE_LINE_MASK) == 0) {
+ if ((((ethr_uint_t) mem) & ETHR_CACHE_LINE_MASK) == 0) {
ra = (ethr_rwmtx_readers_array__ *) mem;
ra->data.byte_offset = 0;
}
else {
ra = ((ethr_rwmtx_readers_array__ *)
- ((((unsigned long) mem) & ~ETHR_CACHE_LINE_MASK)
+ ((((ethr_uint_t) mem) & ~ETHR_CACHE_LINE_MASK)
+ ETHR_CACHE_LINE_SIZE));
- ra->data.byte_offset = (int) ((unsigned long) ra
- - (unsigned long) mem);
+ ra->data.byte_offset = (int) ((ethr_uint_t) ra
+ - (ethr_uint_t) mem);
}
ra->data.lived = lived;
return ra;
@@ -2324,7 +2325,7 @@ ethr_rwmutex_destroy(ethr_rwmutex *rwmtx)
#endif
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(rwmtx);
if (rwmtx->type != ETHR_RWMUTEX_TYPE_NORMAL) {
- long act = ethr_atomic_read(&rwmtx->mtxb.flgs);
+ ethr_sint_t act = ethr_atomic_read(&rwmtx->mtxb.flgs);
if (act == ETHR_RWMTX_R_FLG__)
rwmutex_try_complete_runlock(rwmtx, act, NULL, 0, 0, 0);
}
@@ -2345,7 +2346,7 @@ int
ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
{
int res = 0;
- long act;
+ ethr_sint_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2369,7 +2370,7 @@ ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
}
}
#else
- long exp = 0;
+ ethr_sint_t exp = 0;
int tries = 0;
while (1) {
@@ -2416,7 +2417,7 @@ ethr_rwmutex_tryrlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2433,7 +2434,7 @@ ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
if (act & (ETHR_RWMTX_W_FLG__|ETHR_RWMTX_W_WAIT_FLG__))
rwmutex_normal_rlock_wait(rwmtx, act);
#else
- long exp = 0;
+ ethr_sint_t exp = 0;
while (1) {
act = ethr_atomic_cmpxchg_acqb(&rwmtx->mtxb.flgs, exp+1, exp);
@@ -2469,7 +2470,7 @@ ethr_rwmutex_rlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_runlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint_t act;
ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(&rwmtx->mtxb);
ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(&rwmtx->mtxb);
@@ -2521,7 +2522,7 @@ int
ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
{
int res = 0;
- long act;
+ ethr_sint_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
@@ -2582,7 +2583,7 @@ ethr_rwmutex_tryrwlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
@@ -2630,7 +2631,7 @@ ethr_rwmutex_rwlock(ethr_rwmutex *rwmtx)
void
ethr_rwmutex_rwunlock(ethr_rwmutex *rwmtx)
{
- long act;
+ ethr_sint_t act;
ETHR_ASSERT(!ethr_not_inited__);
ETHR_ASSERT(rwmtx);
ETHR_ASSERT(rwmtx->initialized == ETHR_RWMUTEX_INITIALIZED);
@@ -2779,7 +2780,7 @@ static void
hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
{
int res;
- long flgs = ethr_atomic_read(&mtxb->flgs);
+ ethr_sint_t flgs = ethr_atomic_read(&mtxb->flgs);
ETHR_MTX_HARD_ASSERT(res == 0);
@@ -2802,7 +2803,7 @@ hard_debug_chk_q__(struct ethr_mutex_base_ *mtxb, int is_rwmtx)
tse = mtxb->q;
do {
- long type;
+ ethr_sint_t type;
ETHR_MTX_HARD_ASSERT(tse->next->prev == tse);
ETHR_MTX_HARD_ASSERT(tse->prev->next == tse);
diff --git a/erts/lib_src/pthread/ethr_event.c b/erts/lib_src/pthread/ethr_event.c
index 6731c0eb46..ae1d827731 100644
--- a/erts/lib_src/pthread/ethr_event.c
+++ b/erts/lib_src/pthread/ethr_event.c
@@ -24,6 +24,10 @@
#define ETHR_INLINE_FUNC_NAME_(X) X ## __
#define ETHR_EVENT_IMPL__
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
#include "ethread.h"
#if defined(ETHR_LINUX_FUTEX_IMPL__)
@@ -52,7 +56,7 @@ wait__(ethr_event *e, int spincount)
{
unsigned sc = spincount;
int res;
- long val;
+ ethr_sint_t val;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
if (spincount < 0)
@@ -131,7 +135,7 @@ static ETHR_INLINE int
wait__(ethr_event *e, int spincount)
{
int sc = spincount;
- long val;
+ ethr_sint_t val;
int res, ulres;
int until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS;
diff --git a/erts/lib_src/pthread/ethread.c b/erts/lib_src/pthread/ethread.c
index ea1d9d43f0..7e5f92655c 100644
--- a/erts/lib_src/pthread/ethread.c
+++ b/erts/lib_src/pthread/ethread.c
@@ -81,14 +81,14 @@ typedef struct {
static void *thr_wrapper(void *vtwd)
{
- long result;
+ ethr_sint_t result;
void *res;
ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
void *(*thr_func)(void *) = twd->thr_func;
void *arg = twd->arg;
ethr_ts_event *tsep = NULL;
- result = (long) ethr_make_ts_event__(&tsep);
+ result = (ethr_sint_t) ethr_make_ts_event__(&tsep);
if (result == 0) {
tsep->iflgs |= ETHR_TS_EV_ETHREAD;
@@ -191,7 +191,7 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
}
#endif
- ethr_atomic_init(&twd.result, -1);
+ ethr_atomic_init(&twd.result, (ethr_sint_t) -1);
twd.tse = ethr_get_ts_event();
twd.thr_func = func;
twd.arg = arg;
@@ -252,7 +252,7 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
/* Wait for child to initialize... */
while (1) {
- long result;
+ ethr_sint_t result;
ethr_event_reset(&twd.tse->event);
result = ethr_atomic_read(&twd.result);
diff --git a/erts/lib_src/win/ethread.c b/erts/lib_src/win/ethread.c
index 69523edf94..63cf99e317 100644
--- a/erts/lib_src/win/ethread.c
+++ b/erts/lib_src/win/ethread.c
@@ -93,20 +93,20 @@ static void thr_exit_cleanup(ethr_tid *tid, void *res)
static unsigned __stdcall thr_wrapper(LPVOID vtwd)
{
ethr_tid my_tid;
- long result;
+ ethr_sint_t result;
void *res;
ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
void *(*thr_func)(void *) = twd->thr_func;
void *arg = twd->arg;
ethr_ts_event *tsep = NULL;
- result = (long) ethr_make_ts_event__(&tsep);
+ result = (ethr_sint_t) ethr_make_ts_event__(&tsep);
if (result == 0) {
tsep->iflgs |= ETHR_TS_EV_ETHREAD;
my_tid = *twd->tid;
if (!TlsSetValue(own_tid_key, (LPVOID) &my_tid)) {
- result = (long) ethr_win_get_errno__();
+ result = (ethr_sint_t) ethr_win_get_errno__();
ethr_free_ts_event__(tsep);
}
else {
@@ -352,7 +352,7 @@ ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
/* Wait for child to initialize... */
while (1) {
- long result;
+ ethr_sint_t result;
int err;
ethr_event_reset(&twd.tse->event);