aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include
diff options
context:
space:
mode:
Diffstat (limited to 'erts/include')
-rw-r--r--erts/include/internal/ethr_atomics.h726
-rw-r--r--erts/include/internal/ethr_mutex.h78
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h74
-rw-r--r--erts/include/internal/ethread.h376
-rw-r--r--erts/include/internal/ethread_header_config.h.in36
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h222
-rw-r--r--erts/include/internal/gcc/ethread.h10
-rw-r--r--erts/include/internal/i386/atomic.h190
-rw-r--r--erts/include/internal/i386/ethread.h7
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h200
-rw-r--r--erts/include/internal/ppc32/atomic.h94
-rw-r--r--erts/include/internal/pthread/ethr_event.h54
-rw-r--r--erts/include/internal/sparc32/atomic.h172
-rw-r--r--erts/include/internal/sparc32/ethread.h7
-rw-r--r--erts/include/internal/tile/atomic.h104
-rw-r--r--erts/include/internal/win/ethr_atomic.h415
-rw-r--r--erts/include/internal/win/ethr_event.h16
-rw-r--r--erts/include/internal/win/ethread.h6
18 files changed, 1892 insertions, 895 deletions
diff --git a/erts/include/internal/ethr_atomics.h b/erts/include/internal/ethr_atomics.h
new file mode 100644
index 0000000000..1caf4d0567
--- /dev/null
+++ b/erts/include/internal/ethr_atomics.h
@@ -0,0 +1,726 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: The ethread atomic API
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_ATOMIC_H__
+#define ETHR_ATOMIC_H__
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_ATOMIC_PROTOTYPES__
+#endif
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * No native atomic implementation available. :(
+ * Use fallback...
+ */
+typedef ethr_sint32_t ethr_atomic32_t;
+typedef ethr_sint_t ethr_atomic_t;
+#else
+/*
+ * Map ethread native atomics to ethread API atomics.
+ *
+ * We do at least have a native atomic implementation that
+ * can handle integers of a size larger than or equal to
+ * the size of pointers.
+ */
+
+/* -- Pointer size atomics -- */
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+#if ETHR_SIZEOF_PTR == 8
+# if defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic64_addr
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# else
+# error "Missing native atomic implementation"
+# endif
+#elif ETHR_SIZEOF_PTR == 4
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic32_addr
+# ifdef ETHR_HAVE_NATIVE_ATOMIC32
+typedef ethr_native_atomic32_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+# elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NATMC_T__ ethr_native_atomic64_t
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# else
+# error "Missing native atomic implementation"
+# endif
+#endif
+
+/* -- 32-bit atomics -- */
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint32_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint64_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
+
+#endif
+
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+ethr_sint_t *ethr_atomic_addr(ethr_atomic_t *);
+void ethr_atomic_init(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_set(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_inc(ethr_atomic_t *);
+void ethr_atomic_dec(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_add_read(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_add(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_band(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_bor(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_xchg(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_acqb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_dec_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg_relb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+
+ethr_sint32_t *ethr_atomic32_addr(ethr_atomic32_t *);
+void ethr_atomic32_init(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_set(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_inc_read(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_dec_read(ethr_atomic32_t *);
+void ethr_atomic32_inc(ethr_atomic32_t *);
+void ethr_atomic32_dec(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_add_read(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_add(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_band(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_bor(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_xchg(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_cmpxchg(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_acqb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_inc_read_acqb(ethr_atomic32_t *);
+void ethr_atomic32_set_relb(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_dec_relb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_dec_read_relb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+#endif
+
+int ethr_init_atomics(void);
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * Fallbacks for atomics used in absence of a native implementation.
+ */
+
+#define ETHR_ATOMIC_ADDR_BITS 10
+#define ETHR_ATOMIC_ADDR_SHIFT 6
+
+typedef struct {
+ union {
+ ethr_spinlock_t lck;
+ char buf[ETHR_CACHE_LINE_SIZE];
+ } u;
+} ethr_atomic_protection_t;
+
+extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+
+#define ETHR_ATOMIC_PTR2LCK__(PTR) \
+(&ethr_atomic_protection__[((((ethr_uint_t) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
+
+
+#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
+do { \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
+ { EXPS; } \
+ ethr_spin_unlock(slp__); \
+} while (0)
+
+#endif
+
+/*
+ * --- Pointer size atomics ---------------------------------------------------
+ */
+
+static ETHR_INLINE ethr_sint_t *
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_addr)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t *) ETHR_NATMC_ADDR_FUNC__(var);
+#else
+ return (ethr_sint_t *) var;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, ethr_sint_t incr)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) i);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(inc)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(dec)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
+ ethr_sint_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var,
+ (ETHR_NAINT_T__) mask);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
+ ethr_sint_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var,
+ (ETHR_NAINT_T__) mask);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var, ethr_sint_t new)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var,
+ (ETHR_NAINT_T__) new);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
+}
+
+/*
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
+ */
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var,
+ ethr_sint_t val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#endif
+}
+
+/*
+ * --- 32-bit atomics ---------------------------------------------------------
+ */
+
+static ETHR_INLINE ethr_sint32_t *
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_addr)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic32_addr(var);
+#else
+ return (ethr_sint32_t *) var;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_init)(ethr_atomic32_t *var,
+ ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add)(ethr_atomic32_t *var,
+ ethr_sint32_t incr)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add_read)(ethr_atomic32_t *var,
+ ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) i);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(inc)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(dec)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_band)(ethr_atomic32_t *var,
+ ethr_sint32_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) mask);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_bor)(ethr_atomic32_t *var,
+ ethr_sint32_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return
+ (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var,
+ (ETHR_NAINT32_T__) mask);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_xchg)(ethr_atomic32_t *var,
+ ethr_sint32_t new)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var,
+ (ETHR_NAINT32_T__) new);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
+}
+
+/*
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
+ */
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_acqb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read_acqb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set_relb)(ethr_atomic32_t *var,
+ ethr_sint32_t val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_relb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read_relb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_acqb)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_relb)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(cmpxchg_relb)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+#endif
+}
+
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+
+#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
index 01855864e3..fadaf1e2a4 100644
--- a/erts/include/internal/ethr_mutex.h
+++ b/erts/include/internal/ethr_mutex.h
@@ -78,13 +78,13 @@
# error Need a qlock implementation
#endif
-#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
-#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
-#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+#define ETHR_RWMTX_W_FLG__ (((ethr_sint32_t) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((ethr_sint32_t) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((ethr_sint32_t) 1) << 29)
/* frequent read kind */
-#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
-#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((long) 1) << 27)
+#define ETHR_RWMTX_R_FLG__ (((ethr_sint32_t) 1) << 28)
+#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((ethr_sint32_t) 1) << 27)
#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_ABRT_UNLCK_FLG__ - 1)
/* normal kind */
@@ -106,28 +106,28 @@
#endif
#define ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(MTX) \
- ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic_read(&(MTX)->mtxb.flgs))
+ ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic32_read(&(MTX)->mtxb.flgs))
struct ethr_mutex_base_ {
#ifdef ETHR_MTX_HARD_DEBUG_FENCE
long pre_fence;
#endif
- ethr_atomic_t flgs;
- ETHR_MTX_QLOCK_TYPE__ qlck;
- ethr_ts_event *q;
+ ethr_atomic32_t flgs;
short aux_scnt;
short main_scnt;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
#ifdef ETHR_MTX_HARD_DEBUG_WSQ
int ws;
#endif
#ifdef ETHR_MTX_CHK_EXCL
- ethr_atomic_t exclusive;
+ ethr_atomic32_t exclusive;
#endif
#ifdef ETHR_MTX_CHK_NON_EXCL
- ethr_atomic_t non_exclusive;
+ ethr_atomic32_t non_exclusive;
#endif
#ifdef ETHR_MTX_HARD_DEBUG_LFS
- ethr_atomic_t hdbg_lfs;
+ ethr_atomic32_t hdbg_lfs;
#endif
};
@@ -236,7 +236,7 @@ typedef struct {
typedef union {
struct {
- ethr_atomic_t readers;
+ ethr_atomic32_t readers;
int waiting_readers;
int byte_offset;
ethr_rwmutex_lived lived;
@@ -298,13 +298,13 @@ void ethr_rwmutex_rwunlock(ethr_rwmutex *);
#ifdef ETHR_MTX_HARD_DEBUG_LFS
# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
do { \
- ethr_atomic_init(&(MTXB)->hdbg_lfs, 0); \
+ ethr_atomic32_init(&(MTXB)->hdbg_lfs, 0); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint32_t val__; \
ETHR_COMPILER_BARRIER; \
- val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
+ val__ = ethr_atomic32_inc_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ > 0); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
@@ -317,15 +317,15 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ethr_sint32_t val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ >= 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint32_t val__; \
ETHR_COMPILER_BARRIER; \
- val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == -1); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
@@ -338,7 +338,7 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ethr_sint32_t val__ = ethr_atomic32_inctest(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
@@ -386,12 +386,12 @@ do { \
#endif
# define ETHR_MTX_CHK_EXCL_INIT__(MTXB) \
- ethr_atomic_init(&(MTXB)->exclusive, 0)
+ ethr_atomic32_init(&(MTXB)->exclusive, 0)
# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (!ethr_atomic_read(&(MTXB)->exclusive)) \
+ if (!ethr_atomic32_read(&(MTXB)->exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -399,7 +399,7 @@ do { \
# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (ethr_atomic_read(&(MTXB)->exclusive)) \
+ if (ethr_atomic32_read(&(MTXB)->exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is not exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -407,13 +407,13 @@ do { \
# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB) \
do { \
ETHR_MTX_CHK_EXCL_IS_NOT_EXCL((MTXB)); \
- ethr_atomic_set(&(MTXB)->exclusive, 1); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 1); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB) \
do { \
ETHR_MTX_CHK_EXCL_IS_EXCL((MTXB)); \
- ethr_atomic_set(&(MTXB)->exclusive, 0); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
@@ -424,11 +424,11 @@ do { \
#endif
# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) \
- ethr_atomic_init(&(MTXB)->non_exclusive, 0)
+ ethr_atomic32_init(&(MTXB)->non_exclusive, 0)
# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (!ethr_atomic_read(&(MTXB)->non_exclusive)) \
+ if (!ethr_atomic32_read(&(MTXB)->non_exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is non-exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -436,7 +436,7 @@ do { \
# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- if (ethr_atomic_read(&(MTXB)->non_exclusive)) \
+ if (ethr_atomic32_read(&(MTXB)->non_exclusive)) \
ethr_assert_failed(__FILE__, __LINE__, __func__,\
"is not non-exclusive"); \
ETHR_COMPILER_BARRIER; \
@@ -444,19 +444,19 @@ do { \
# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_inc(&(MTXB)->non_exclusive); \
+ ethr_atomic32_inc(&(MTXB)->non_exclusive); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_add(&(MTXB)->non_exclusive, (NO)); \
+ ethr_atomic32_add(&(MTXB)->non_exclusive, (NO)); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) \
do { \
ETHR_COMPILER_BARRIER; \
- ethr_atomic_dec(&(MTXB)->non_exclusive); \
+ ethr_atomic32_dec(&(MTXB)->non_exclusive); \
ETHR_COMPILER_BARRIER; \
} while (0)
#else
@@ -501,18 +501,18 @@ do { \
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
-void ethr_mutex_lock_wait__(ethr_mutex *, long);
-void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+void ethr_mutex_lock_wait__(ethr_mutex *, ethr_sint32_t);
+void ethr_mutex_unlock_wake__(ethr_mutex *, ethr_sint32_t);
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
int res;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
- act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
res = (act == 0) ? 0 : EBUSY;
#ifdef ETHR_MTX_CHK_EXCL
@@ -531,11 +531,11 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
- act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
if (act != 0)
ethr_mutex_lock_wait__(mtx, act);
@@ -551,7 +551,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_COMPILER_BARRIER;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
@@ -559,7 +559,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
ETHR_MTX_CHK_EXCL_UNSET_EXCL(&mtx->mtxb);
- act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__)
ethr_mutex_unlock_wake__(mtx, act);
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
index 2f9f987d0b..8e04692856 100644
--- a/erts/include/internal/ethr_optimized_fallbacks.h
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -71,36 +71,46 @@ ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
#define ETHR_HAVE_NATIVE_SPINLOCKS 1
#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
-typedef ethr_native_atomic_t ethr_native_spinlock_t;
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_native_spinlock_t;
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_native_spinlock_t;
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
{
- ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+ ETHR_NATMC_FUNC__(init)(lock, 0);
}
static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
{
ETHR_COMPILER_BARRIER;
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) == 1);
- ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == 1);
+ ETHR_NATMC_FUNC__(set_relb)(lock, 0);
}
static ETHR_INLINE void
ethr_native_spin_lock(ethr_native_spinlock_t *lock)
{
- while (ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- (long) 1, (long) 0) != 0) {
- ETHR_SPIN_BODY;
+ while (ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, 1, 0) != 0) {
+ while (ETHR_NATMC_FUNC__(read)(lock) != 0)
+ ETHR_SPIN_BODY;
}
ETHR_COMPILER_BARRIER;
}
#endif
+#undef ETHR_NATMC_FUNC__
+
#endif
@@ -111,16 +121,26 @@ ethr_native_spin_lock(ethr_native_spinlock_t *lock)
#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
-typedef ethr_native_atomic_t ethr_native_rwlock_t;
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_native_rwlock_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_WLOCK_FLAG__ (((ethr_sint32_t) 1) << 30)
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_native_rwlock_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_WLOCK_FLAG__ (((ethr_sint64_t) 1) << 62)
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-#define ETHR_WLOCK_FLAG__ (((long) 1) << 30)
-
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
{
- ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+ ETHR_NATMC_FUNC__(init)(lock, 0);
}
static ETHR_INLINE void
@@ -128,22 +148,24 @@ ethr_native_read_unlock(ethr_native_rwlock_t *lock)
{
ETHR_COMPILER_BARRIER;
#ifdef DEBUG
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) >= 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) >= 0);
#endif
- ethr_native_atomic_dec_relb((ethr_native_atomic_t *) lock);
+ ETHR_NATMC_FUNC__(dec_relb)(lock);
}
static ETHR_INLINE void
ethr_native_read_lock(ethr_native_rwlock_t *lock)
{
- long act, exp = 0;
+ ETHR_NAINT_T__ act, exp = 0;
while (1) {
- act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- exp+1, exp);
+ act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp+1, exp);
if (act == exp)
break;
- ETHR_SPIN_BODY;
- exp = (act & ETHR_WLOCK_FLAG__) ? 0 : act;
+ while (act & ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ETHR_NATMC_FUNC__(read)(lock);
+ }
+ exp = act;
}
ETHR_COMPILER_BARRIER;
}
@@ -152,18 +174,16 @@ static ETHR_INLINE void
ethr_native_write_unlock(ethr_native_rwlock_t *lock)
{
ETHR_COMPILER_BARRIER;
- ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock)
- == ETHR_WLOCK_FLAG__);
- ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == ETHR_WLOCK_FLAG__);
+ ETHR_NATMC_FUNC__(set_relb)(lock, 0);
}
static ETHR_INLINE void
ethr_native_write_lock(ethr_native_rwlock_t *lock)
{
- long act, exp = 0;
+ ETHR_NAINT_T__ act, exp = 0;
while (1) {
- act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
- exp|ETHR_WLOCK_FLAG__, exp);
+ act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp|ETHR_WLOCK_FLAG__, exp);
if (act == exp)
break;
ETHR_SPIN_BODY;
@@ -173,13 +193,17 @@ ethr_native_write_lock(ethr_native_rwlock_t *lock)
/* Wait for readers to leave */
while (act != ETHR_WLOCK_FLAG__) {
ETHR_SPIN_BODY;
- act = ethr_native_atomic_read_acqb((ethr_native_atomic_t *) lock);
+ act = ETHR_NATMC_FUNC__(read_acqb)(lock);
}
ETHR_COMPILER_BARRIER;
}
#endif
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_WLOCK_FLAG__
+
#endif
#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 53fa1acdc2..4cd95faf6a 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -37,11 +37,6 @@
#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
-typedef struct {
- long tv_sec;
- long tv_nsec;
-} ethr_timeval;
-
#if defined(DEBUG)
# define ETHR_DEBUG
#endif
@@ -73,7 +68,7 @@ typedef struct {
#endif
/* Assume 64-byte cache line size */
-#define ETHR_CACHE_LINE_SIZE 64L
+#define ETHR_CACHE_LINE_SIZE ((ethr_uint_t) 64)
#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
@@ -171,6 +166,22 @@ typedef pthread_key_t ethr_tsd_key;
# undef WIN32_LEAN_AND_MEAN
#endif
+#if defined(_MSC_VER)
+
+#if ETHR_SIZEOF_LONG == 4
+#define ETHR_HAVE_INT32_T 1
+typedef long ethr_sint32_t;
+typedef unsigned long ethr_uint32_t;
+#endif
+
+#if ETHR_SIZEOF___INT64 == 8
+#define ETHR_HAVE_INT64_T 1
+typedef __int64 ethr_sint64_t;
+typedef unsigned __int64 ethr_uint64_t;
+#endif
+
+#endif
+
struct ethr_join_data_;
/* Types */
@@ -198,12 +209,48 @@ typedef DWORD ethr_tsd_key;
#endif
-#ifdef SIZEOF_LONG
-#if SIZEOF_LONG < ETHR_SIZEOF_PTR
-#error size of long currently needs to be at least the same as size of void *
+#ifndef ETHR_HAVE_INT32_T
+#if ETHR_SIZEOF_INT == 4
+#define ETHR_HAVE_INT32_T 1
+typedef int ethr_sint32_t;
+typedef unsigned int ethr_uint32_t;
+#elif ETHR_SIZEOF_LONG == 4
+#define ETHR_HAVE_INT32_T 1
+typedef long ethr_sint32_t;
+typedef unsigned long ethr_uint32_t;
#endif
#endif
+#ifndef ETHR_HAVE_INT64_T
+#if ETHR_SIZEOF_INT == 8
+#define ETHR_HAVE_INT64_T 1
+typedef int ethr_sint64_t;
+typedef unsigned int ethr_uint64_t;
+#elif ETHR_SIZEOF_LONG == 8
+#define ETHR_HAVE_INT64_T 1
+typedef long ethr_sint64_t;
+typedef unsigned long ethr_uint64_t;
+#elif ETHR_SIZEOF_LONG_LONG == 8
+#define ETHR_HAVE_INT64_T 1
+typedef long long ethr_sint64_t;
+typedef unsigned long long ethr_uint64_t;
+#endif
+#endif
+
+#if ETHR_SIZEOF_PTR == 4
+#ifndef ETHR_HAVE_INT32_T
+#error "No 32-bit integer type found"
+#endif
+typedef ethr_sint32_t ethr_sint_t;
+typedef ethr_uint32_t ethr_uint_t;
+#elif ETHR_SIZEOF_PTR == 8
+#ifndef ETHR_HAVE_INT64_T
+#error "No 64-bit integer type found"
+#endif
+typedef ethr_sint64_t ethr_sint_t;
+typedef ethr_uint64_t ethr_uint_t;
+#endif
+
/* __builtin_expect() is needed by both native atomics code
* and the fallback code */
#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
@@ -386,7 +433,6 @@ typedef struct {
#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
# define ETHR_NEED_SPINLOCK_PROTOTYPES__
# define ETHR_NEED_RWSPINLOCK_PROTOTYPES__
-# define ETHR_NEED_ATOMIC_PROTOTYPES__
#endif
int ethr_init(ethr_init_data *);
@@ -399,7 +445,6 @@ void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_time_now(ethr_timeval *);
int ethr_tsd_key_create(ethr_tsd_key *);
int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
@@ -502,312 +547,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-/*
- * Map ethread native atomics to ethread API atomics.
- */
-typedef ethr_native_atomic_t ethr_atomic_t;
-#else
-typedef long ethr_atomic_t;
-#endif
-
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-void ethr_atomic_init(ethr_atomic_t *, long);
-void ethr_atomic_set(ethr_atomic_t *, long);
-long ethr_atomic_read(ethr_atomic_t *);
-long ethr_atomic_inc_read(ethr_atomic_t *);
-long ethr_atomic_dec_read(ethr_atomic_t *);
-void ethr_atomic_inc(ethr_atomic_t *);
-void ethr_atomic_dec(ethr_atomic_t *);
-long ethr_atomic_add_read(ethr_atomic_t *, long);
-void ethr_atomic_add(ethr_atomic_t *, long);
-long ethr_atomic_read_band(ethr_atomic_t *, long);
-long ethr_atomic_read_bor(ethr_atomic_t *, long);
-long ethr_atomic_xchg(ethr_atomic_t *, long);
-long ethr_atomic_cmpxchg(ethr_atomic_t *, long, long);
-long ethr_atomic_read_acqb(ethr_atomic_t *);
-long ethr_atomic_inc_read_acqb(ethr_atomic_t *);
-void ethr_atomic_set_relb(ethr_atomic_t *, long);
-void ethr_atomic_dec_relb(ethr_atomic_t *);
-long ethr_atomic_dec_read_relb(ethr_atomic_t *);
-long ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, long, long);
-long ethr_atomic_cmpxchg_relb(ethr_atomic_t *, long, long);
-#endif
-
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-
-#ifndef ETHR_HAVE_NATIVE_ATOMICS
-/*
- * Fallbacks for atomics used in absence of a native implementation.
- */
-
-#define ETHR_ATOMIC_ADDR_BITS 10
-#define ETHR_ATOMIC_ADDR_SHIFT 6
-
-typedef struct {
- union {
- ethr_spinlock_t lck;
- char buf[ETHR_CACHE_LINE_SIZE];
- } u;
-} ethr_atomic_protection_t;
-
-extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
-
-
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- ethr_spin_lock(slp__); \
- { EXPS; } \
- ethr_spin_unlock(slp__); \
-} while (0)
-
-#endif
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_init(var, i);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_set(var, i);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_read(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) *var);
- return res;
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_add(var, incr);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, long i)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_add_return(var, i);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
- return res;
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_inc(var);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_dec(var);
-#else
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_inc_return(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) ++(*var));
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_dec_return(var);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) --(*var));
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
- long mask)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_and_retold(var, mask);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
- long mask)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_or_retold(var, mask);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_xchg(var, new);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
- return res;
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg(var, new, exp);
-#else
- long res;
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
- {
- res = *var;
- if (__builtin_expect(res == exp, 1))
- *var = new;
- });
- return res;
-#endif
-}
-
-/*
- * Important memory barrier requirements.
- *
- * The following atomic operations *must* supply a memory barrier of
- * at least the type specified by its suffix:
- * _acqb = acquire barrier
- * _relb = release barrier
- */
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_read_acqb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_inc_return_acqb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var, long val)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_set_relb(var, val);
-#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
-#endif
-}
-
-static ETHR_INLINE void
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- ethr_native_atomic_dec_relb(var);
-#else
- ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_dec_return_relb(var);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg_acqb(var, new, exp);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
-#endif
-}
-
-static ETHR_INLINE long
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
- long new,
- long exp)
-{
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
- return ethr_native_atomic_cmpxchg_relb(var, new, exp);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
-#endif
-}
-
-#endif /* ETHR_TRY_INLINE_FUNCS */
+#include "ethr_atomics.h"
typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
@@ -825,7 +565,7 @@ struct ethr_ts_event_ {
ethr_ts_event *prev;
ethr_event event;
void *udata;
- ethr_atomic_t uaflgs;
+ ethr_atomic32_t uaflgs;
unsigned uflgs;
unsigned iflgs; /* for ethr lib only */
short rgix; /* for ethr lib only */
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index 5debb44756..f394d790d2 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -20,6 +20,21 @@
/* Define to the size of pointers */
#undef ETHR_SIZEOF_PTR
+/* Define to the size of int */
+#undef ETHR_SIZEOF_INT
+
+/* Define to the size of long */
+#undef ETHR_SIZEOF_LONG
+
+/* Define to the size of long long */
+#undef ETHR_SIZEOF_LONG_LONG
+
+/* Define to the size of __int64 */
+#undef ETHR_SIZEOF___INT64
+
+/* Define if bigendian */
+#undef ETHR_BIGENDIAN
+
/* Define if you want to disable native ethread implementations */
#undef ETHR_DISABLE_NATIVE_IMPLS
@@ -100,6 +115,27 @@
/* Define to the size of AO_t if libatomic_ops is used */
#undef ETHR_SIZEOF_AO_T
+/* Define if you have _InterlockedCompareExchange64() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+
+/* Define if you have _InterlockedDecrement64() */
+#undef ETHR_HAVE__INTERLOCKEDDECREMENT64
+
+/* Define if you have _InterlockedIncrement64() */
+#undef ETHR_HAVE__INTERLOCKEDINCREMENT64
+
+/* Define if you have _InterlockedExchangeAdd64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+
+/* Define if you have _InterlockedExchange64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+
+/* Define if you have _InterlockedAnd64() */
+#undef ETHR_HAVE__INTERLOCKEDAND64
+
+/* Define if you have _InterlockedOr64() */
+#undef ETHR_HAVE__INTERLOCKEDOR64
+
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index e8e529dd48..16935084b1 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -22,24 +22,35 @@
* Author: Rickard Green
*/
-#ifndef ETHR_GCC_ATOMIC_H__
-#define ETHR_GCC_ATOMIC_H__
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_GCC_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_GCC_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_GCC_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_GCC_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
-#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#ifndef ETHR_GCC_ATOMIC_COMMON__
+#define ETHR_GCC_ATOMIC_COMMON__
-#define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-/* Enable immediate read/write on platforms where we know it is safe */
+#define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 0
#if defined(__i386__) || defined(__x86_64__) || defined(__sparc__) \
|| defined(__powerpc__) || defined(__ppc__) || defined(__mips__)
-# undef ETHR_IMMED_ATOMIC_SET_GET_SAFE__
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# undef ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
+# define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 1
#endif
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
-
+#if defined(__x86_64__) || (defined(__i386__) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
+#else
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
+#endif
/*
* According to the documentation this is what we want:
@@ -47,34 +58,73 @@ typedef struct {
* However, __sync_synchronize() is known to erroneously be
* a noop on at least some platforms with some gcc versions.
* This has suposedly been fixed in some gcc version, but we
- * don't know from which version. Therefore, we use the
- * workaround implemented below on all gcc versions except
- * for gcc 4.2 or above for MIPS, where it's been verified.
+ * don't know from which version. Therefore, we only use
+ * it when it has been verified to work. Otherwise
+ * we use a workaround.
*/
#if defined(__mips__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
+/* __sync_synchronize() has been verified to work here */
#define ETHR_MEMORY_BARRIER __sync_synchronize()
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __sync_synchronize()
+#elif defined(__x86_64__) || (defined(__i386__) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+/* Use fence instructions directly instead of workaround */
+#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
#else
+/* Workaround */
#define ETHR_MEMORY_BARRIER \
do { \
- volatile long x___ = 0; \
- (void) __sync_val_compare_and_swap(&x___, (long) 0, (long) 1); \
+ volatile ethr_sint32_t x___ = 0; \
+ (void) __sync_val_compare_and_swap(&x___, (ethr_sint32_t) 0, (ethr_sint32_t) 1); \
} while (0)
-#endif
#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+
+#define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_GCC_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#else
+#error "Unsupported integer size"
+#endif
+
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
var->counter = value;
#else
/*
* Unfortunately no __sync_store() or similar exist in the gcc atomic
* op interface. We therefore have to simulate it this way...
*/
- long act = 0, exp;
+ ETHR_AINT_T__ act = 0, exp;
do {
exp = act;
act = __sync_val_compare_and_swap(&var->counter, exp, value);
@@ -82,80 +132,86 @@ ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
#endif
}
-#define ethr_native_atomic_init ethr_native_atomic_set
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ ETHR_NATMC_FUNC__(set)(var, value);
+}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
return var->counter;
#else
/*
* Unfortunately no __sync_fetch() or similar exist in the gcc atomic
* op interface. We therefore have to simulate it this way...
*/
- return __sync_add_and_fetch(&var->counter, (long) 0);
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
(void) __sync_add_and_fetch(&var->counter, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
return __sync_add_and_fetch(&var->counter, incr);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) __sync_add_and_fetch(&var->counter, (long) 1);
+ (void) __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) __sync_sub_and_fetch(&var->counter, (long) 1);
+ (void) __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return __sync_add_and_fetch(&var->counter, (long) 1);
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return __sync_sub_and_fetch(&var->counter, (long) 1);
+ return __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
return __sync_fetch_and_and(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) __sync_fetch_and_or(&var->counter, mask);
+ return (ETHR_AINT_T__) __sync_fetch_and_or(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
return __sync_val_compare_and_swap(&var->counter, old, new);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
- long exp, act = 0;
+ ETHR_AINT_T__ exp, act = 0;
do {
exp = act;
act = __sync_val_compare_and_swap(&var->counter, exp, new);
@@ -167,22 +223,68 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- return __sync_add_and_fetch(&var->counter, (long) 0);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_AINT_T__ val = var->counter;
+ ETHR_COMPILER_BARRIER;
+ return val;
+#else
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
+#endif
}
-#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
-#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
-#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
-#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_COMPILER_BARRIER;
+ var->counter = i;
+#else
+ (void) ETHR_NATMC_FUNC__(xchg)(var, i);
+#endif
+}
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(inc_return)(var);
+}
-#endif
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+{
+ ETHR_NATMC_FUNC__(dec)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(dec_return)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
#endif
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_AINT_SUFFIX__
+
#endif
diff --git a/erts/include/internal/gcc/ethread.h b/erts/include/internal/gcc/ethread.h
index bb378e31e0..392a1aa2b2 100644
--- a/erts/include/internal/gcc/ethread.h
+++ b/erts/include/internal/gcc/ethread.h
@@ -25,6 +25,16 @@
#ifndef ETHREAD_GCC_H__
#define ETHREAD_GCC_H__
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "ethr_atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
+
+#endif
#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 52d01aab32..4e402f261a 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -23,14 +23,24 @@
*
* This code requires a 486 or newer processor.
*/
-#ifndef ETHREAD_I386_ATOMIC_H
-#define ETHREAD_I386_ATOMIC_H
-/* An atomic is an aligned long accessed via locked operations.
- */
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_X86_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_X86_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_X86_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_X86_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#ifndef ETHR_X86_ATOMIC_COMMON__
+#define ETHR_X86_ATOMIC_COMMON__
+
+#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
@@ -40,123 +50,161 @@ typedef struct {
#else
#define ETHR_MEMORY_BARRIER \
do { \
- volatile long x___ = 0; \
+ volatile ethr_sint32_t x___ = 0; \
__asm__ __volatile__("lock; incl %0" : "=m"(x___) : "m"(x___) : "memory"); \
} while (0)
#endif
-#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
-
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_X86_ATOMIC_COMMON__ */
-#ifdef __x86_64__
-#define LONG_SUFFIX "q"
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_AINT_SUFFIX__ "l"
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_AINT_SUFFIX__ "q"
#else
-#define LONG_SUFFIX "l"
+#error "Unsupported integer size"
#endif
+/* An atomic is an aligned ETHR_AINT_T__ accessed via locked operations.
+ */
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->counter = i;
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
__asm__ __volatile__(
- "lock; add" LONG_SUFFIX " %1, %0"
+ "lock; add" ETHR_AINT_SUFFIX__ " %1, %0"
: "=m"(var->counter)
: "ir"(incr), "m"(var->counter));
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__(
- "lock; inc" LONG_SUFFIX " %0"
+ "lock; inc" ETHR_AINT_SUFFIX__ " %0"
: "=m"(var->counter)
: "m"(var->counter));
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__(
- "lock; dec" LONG_SUFFIX " %0"
+ "lock; dec" ETHR_AINT_SUFFIX__ " %0"
: "=m"(var->counter)
: "m"(var->counter));
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- long tmp;
+ ETHR_AINT_T__ tmp;
tmp = incr;
__asm__ __volatile__(
- "lock; xadd" LONG_SUFFIX " %0, %1" /* xadd didn't exist prior to the 486 */
+ "lock; xadd" ETHR_AINT_SUFFIX__ " %0, %1" /* xadd didn't exist prior to the 486 */
: "=r"(tmp)
: "m"(var->counter), "0"(tmp));
/* now tmp is the atomic's previous value */
return tmp + incr;
}
-#define ethr_native_atomic_inc_return(var) ethr_native_atomic_add_return((var), 1)
-#define ethr_native_atomic_dec_return(var) ethr_native_atomic_add_return((var), -1)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) 1);
+}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) -1);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
__asm__ __volatile__(
- "lock; cmpxchg" LONG_SUFFIX " %2, %3"
+ "lock; cmpxchg" ETHR_AINT_SUFFIX__ " %2, %3"
: "=a"(old), "=m"(var->counter)
: "r"(new), "m"(var->counter), "0"(old)
: "cc", "memory"); /* full memory clobber to make this a compiler barrier */
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long tmp, old;
+ ETHR_AINT_T__ tmp, old;
tmp = var->counter;
do {
old = tmp;
- tmp = ethr_native_atomic_cmpxchg(var, tmp & mask, tmp);
+ tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp & mask, tmp);
} while (__builtin_expect(tmp != old, 0));
/* now tmp is the atomic's previous value */
return tmp;
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long tmp, old;
+ ETHR_AINT_T__ tmp, old;
tmp = var->counter;
do {
old = tmp;
- tmp = ethr_native_atomic_cmpxchg(var, tmp | mask, tmp);
+ tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp | mask, tmp);
} while (__builtin_expect(tmp != old, 0));
/* now tmp is the atomic's previous value */
return tmp;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
- long tmp = val;
+ ETHR_AINT_T__ tmp = val;
__asm__ __volatile__(
- "xchg" LONG_SUFFIX " %0, %1"
+ "xchg" ETHR_AINT_SUFFIX__ " %0, %1"
: "=r"(tmp)
: "m"(var->counter), "0"(tmp));
/* now tmp is the atomic's previous value */
@@ -167,57 +215,73 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- long val;
+ ETHR_AINT_T__ val;
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
val = var->counter;
#else
- val = ethr_native_atomic_add_return(var, 0);
+ val = ETHR_NATMC_FUNC__(add_return)(var, 0);
#endif
__asm__ __volatile__("" : : : "memory");
return val;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
__asm__ __volatile__("" : : : "memory");
#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
var->counter = i;
#else
- (void) ethr_native_atomic_xchg(var, i);
+ (void) ETHR_NATMC_FUNC__(xchg)(var, i);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
__asm__ __volatile__("" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("" : : : "memory");
- ethr_native_atomic_dec(var);
+ ETHR_NATMC_FUNC__(dec)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("" : : : "memory");
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
}
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
-#undef LONG_SUFFIX
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* ETHREAD_I386_ATOMIC_H */
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_AINT_SUFFIX__
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index ed43e77279..b5a17caefb 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,12 @@
#ifndef ETHREAD_I386_ETHREAD_H
#define ETHREAD_I386_ETHREAD_H
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "atomic.h"
+#endif
#include "spinlock.h"
#include "rwlock.h"
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
index a6eb43a0bd..d56693dbf8 100644
--- a/erts/include/internal/libatomic_ops/ethr_atomic.h
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -46,17 +46,39 @@
* - AO_store()
* - AO_compare_and_swap()
*
- * The `AO_t' type also have to be at least as large as
- * `void *' and `long' types.
+ * The `AO_t' type also have to be at least as large as the `void *' type.
*/
#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
#error The AO_t type is too small
#endif
+#if ETHR_SIZEOF_AO_T == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_AINT_SUFFIX__ "l"
+#elif ETHR_SIZEOF_AO_T == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_AINT_SUFFIX__ "q"
+#else
+#error "Unsupported integer size"
+#endif
+
+#if ETHR_SIZEOF_AO_T == 8
+typedef union {
+ volatile AO_t counter;
+ ethr_sint32_t sint32[2];
+} ETHR_ATMC_T__;
+#else
typedef struct {
volatile AO_t counter;
-} ethr_native_atomic_t;
+} ETHR_ATMC_T__;
+#endif
#define ETHR_MEMORY_BARRIER AO_nop_full()
#ifdef AO_HAVE_nop_write
@@ -72,123 +94,151 @@ typedef struct {
#ifdef AO_NO_DD_ORDERING
# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
#else
-# define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("":::"memory")
+# define ETHR_READ_DEPEND_MEMORY_BARRIER AO_compiler_barrier()
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
+#if ETHR_SIZEOF_AO_T == 8
+/*
+ * We also need to provide an ethr_native_atomic32_addr(), since
+ * this 64-bit implementation will be used implementing 32-bit
+ * native atomics.
+ */
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ETHR_ATMC_T__ *var)
+{
+ ETHR_ASSERT(((void *) &var->sint32[0]) == ((void *) &var->counter));
+#ifdef ETHR_BIGENDIAN
+ return &var->sint32[1];
+#else
+ return &var->sint32[0];
#endif
+}
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#endif /* ETHR_SIZEOF_AO_T == 8 */
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
AO_store(&var->counter, (AO_t) value);
}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
- ethr_native_atomic_set(var, value);
+ ETHR_NATMC_FUNC__(set)(var, value);
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
- return (long) AO_load(&var->counter);
+ return (ETHR_AINT_T__) AO_load(&var->counter);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
#ifdef AO_HAVE_fetch_and_add
- return ((long) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+ return ((ETHR_AINT_T__) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
#else
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp + (AO_t) incr;
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) new;
+ return (ETHR_AINT_T__) new;
}
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void) ethr_native_atomic_add_return(var, incr);
+ (void) ETHR_NATMC_FUNC__(add_return)(var, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_add1
- return ((long) AO_fetch_and_add1(&var->counter)) + 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_add1(&var->counter)) + 1;
#else
- return ethr_native_atomic_add_return(var, 1);
+ return ETHR_NATMC_FUNC__(add_return)(var, 1);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_inc_return(var);
+ (void) ETHR_NATMC_FUNC__(inc_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_sub1
- return ((long) AO_fetch_and_sub1(&var->counter)) - 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1(&var->counter)) - 1;
#else
- return ethr_native_atomic_add_return(var, -1);
+ return ETHR_NATMC_FUNC__(add_return)(var, -1);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_dec_return(var);
+ (void) ETHR_NATMC_FUNC__(dec_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp & ((AO_t) mask);
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
while (1) {
AO_t exp = AO_load(&var->counter);
AO_t new = exp | ((AO_t) mask);
if (AO_compare_and_swap(&var->counter, exp, new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
return act;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
while (1) {
AO_t exp = AO_load(&var->counter);
if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
- return (long) exp;
+ return (ETHR_AINT_T__) exp;
}
}
@@ -196,97 +246,105 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_load_acquire
- return (long) AO_load_acquire(&var->counter);
+ return (ETHR_AINT_T__) AO_load_acquire(&var->counter);
#else
- long res = ethr_native_atomic_read(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
ETHR_MEMORY_BARRIER;
return res;
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_add1_acquire
- return ((long) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_add1_acquire(&var->counter)) + 1;
#else
- long res = ethr_native_atomic_add_return(var, 1);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(add_return)(var, 1);
ETHR_MEMORY_BARRIER;
return res;
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
#ifdef AO_HAVE_store_release
AO_store_release(&var->counter, (AO_t) value);
#else
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_set(var, value);
+ ETHR_NATMC_FUNC__(set)(var, value);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
#ifdef AO_HAVE_fetch_and_sub1_release
- return ((long) AO_fetch_and_sub1_release(&var->counter)) - 1;
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1_release(&var->counter)) - 1;
#else
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
- (void) ethr_native_atomic_dec_return_relb(var);
+ (void) ETHR_NATMC_FUNC__(dec_return_relb)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
#ifdef AO_HAVE_compare_and_swap_acquire
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
AO_nop_full();
return act;
#else
- long act = ethr_native_atomic_cmpxchg(var, new, exp);
+ ETHR_AINT_T__ act = ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
ETHR_MEMORY_BARRIER;
return act;
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
{
#ifdef AO_HAVE_compare_and_swap_release
- long act;
+ ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
- act = (long) AO_load(&var->counter);
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
} while (act == exp);
return act;
#else
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
#endif
}
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
-#endif
+#endif /* !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS) */
+
+#endif /* ETHR_LIBATOMIC_OPS_ATOMIC_H__ */
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index f21f7c9588..522f433649 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -28,31 +28,39 @@
#ifndef ETHREAD_PPC_ATOMIC_H
#define ETHREAD_PPC_ATOMIC_H
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
typedef struct {
- volatile int counter;
-} ethr_native_atomic_t;
+ volatile ethr_sint32_t counter;
+} ethr_native_atomic32_t;
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
+{
+ return (ethr_sint32_t *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
+ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
+#define ethr_native_atomic32_set(v, i) ethr_native_atomic32_init((v), (i))
-static ETHR_INLINE int
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read(ethr_native_atomic32_t *var)
{
return var->counter;
}
-static ETHR_INLINE int
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -69,16 +77,16 @@ ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, int incr)
+ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_add_return(var, incr);
+ (void)ethr_native_atomic32_add_return(var, incr);
}
-static ETHR_INLINE int
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -95,16 +103,16 @@ ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_inc_return(var);
+ (void)ethr_native_atomic32_inc_return(var);
}
-static ETHR_INLINE int
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -121,16 +129,16 @@ ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_dec_return(var);
+ (void)ethr_native_atomic32_dec_return(var);
}
-static ETHR_INLINE int
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- int old, new;
+ ethr_sint32_t old, new;
__asm__ __volatile__(
"eieio\n\t"
@@ -146,10 +154,10 @@ ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
return old;
}
-static ETHR_INLINE int
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- int old, new;
+ ethr_sint32_t old, new;
__asm__ __volatile__(
"eieio\n\t"
@@ -165,10 +173,10 @@ ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
return old;
}
-static ETHR_INLINE int
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -183,10 +191,12 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
return tmp;
}
-static ETHR_INLINE int
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
- int old;
+ ethr_sint32_t old;
__asm__ __volatile__(
"eieio\n\t"
@@ -210,20 +220,20 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
*/
static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_read(var);
+ long res = ethr_native_atomic32_read(var);
ETHR_MEMORY_BARRIER;
return res;
}
-#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
-#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
-#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
-#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic32_set_relb ethr_native_atomic32_xchg
+#define ethr_native_atomic32_inc_return_acqb ethr_native_atomic32_inc_return
+#define ethr_native_atomic32_dec_relb ethr_native_atomic32_dec_return
+#define ethr_native_atomic32_dec_return_relb ethr_native_atomic32_dec_return
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic32_cmpxchg_acqb ethr_native_atomic32_cmpxchg
+#define ethr_native_atomic32_cmpxchg_relb ethr_native_atomic32_cmpxchg
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
index 104ec287e0..93da8a0429 100644
--- a/erts/include/internal/pthread/ethr_event.h
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -30,31 +30,9 @@
#include <linux/futex.h>
#include <sys/time.h>
-/*
- * Note: Linux futexes operate on 32-bit integers, but
- * ethr_native_atomic_t are 64-bits on 64-bit
- * platforms. This has to be taken into account.
- * Therefore, in each individual value used each
- * byte look the same.
- */
-
-#if ETHR_SIZEOF_PTR == 8
-
-#define ETHR_EVENT_OFF_WAITER__ 0xffffffffffffffffL
-#define ETHR_EVENT_OFF__ 0x7777777777777777L
-#define ETHR_EVENT_ON__ 0L
-
-#elif ETHR_SIZEOF_PTR == 4
-
-#define ETHR_EVENT_OFF_WAITER__ 0xffffffffL
-#define ETHR_EVENT_OFF__ 0x77777777L
-#define ETHR_EVENT_ON__ 0L
-
-#else
-
-#error ehrm...
-
-#endif
+#define ETHR_EVENT_OFF_WAITER__ ((ethr_sint32_t) -1)
+#define ETHR_EVENT_OFF__ ((ethr_sint32_t) 1)
+#define ETHR_EVENT_ON__ ((ethr_sint32_t) 0)
#if defined(FUTEX_WAIT_PRIVATE) && defined(FUTEX_WAKE_PRIVATE)
# define ETHR_FUTEX_WAIT__ FUTEX_WAIT_PRIVATE
@@ -65,11 +43,17 @@
#endif
typedef struct {
- ethr_atomic_t futex;
+ ethr_atomic32_t futex;
} ethr_event;
-#define ETHR_FUTEX__(FTX, OP, VAL) \
- (-1 == syscall(__NR_futex, (void *) (FTX), (OP), (int) (VAL), NULL, NULL, 0)\
+#define ETHR_FUTEX__(FTX, OP, VAL) \
+ (-1 == syscall(__NR_futex, \
+ (void *) ethr_atomic32_addr((FTX)), \
+ (OP), \
+ (int) (VAL), \
+ NULL, \
+ NULL, \
+ 0) \
? errno : 0)
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
@@ -77,9 +61,9 @@ typedef struct {
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- long val;
+ ethr_sint32_t val;
ETHR_WRITE_MEMORY_BARRIER;
- val = ethr_atomic_xchg(&e->futex, ETHR_EVENT_ON__);
+ val = ethr_atomic32_xchg(&e->futex, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
if (res != 0)
@@ -90,7 +74,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- ethr_atomic_set(&e->futex, ETHR_EVENT_OFF__);
+ ethr_atomic32_set(&e->futex, ETHR_EVENT_OFF__);
ETHR_MEMORY_BARRIER;
}
@@ -100,7 +84,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
/* --- Posix mutex/cond implementation of events ---------------------------- */
typedef struct {
- ethr_atomic_t state;
+ ethr_atomic32_t state;
pthread_mutex_t mtx;
pthread_cond_t cnd;
} ethr_event;
@@ -114,9 +98,9 @@ typedef struct {
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- long val;
+ ethr_sint32_t val;
ETHR_WRITE_MEMORY_BARRIER;
- val = ethr_atomic_xchg(&e->state, ETHR_EVENT_ON__);
+ val = ethr_atomic32_xchg(&e->state, ETHR_EVENT_ON__);
if (val == ETHR_EVENT_OFF_WAITER__) {
int res = pthread_mutex_lock(&e->mtx);
if (res != 0)
@@ -133,7 +117,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static void ETHR_INLINE
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- ethr_atomic_set(&e->state, ETHR_EVENT_OFF__);
+ ethr_atomic32_set(&e->state, ETHR_EVENT_OFF__);
ETHR_MEMORY_BARRIER;
}
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 2da6472393..00380dbf07 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -21,49 +21,86 @@
* Native ethread atomics on SPARC V9.
* Author: Mikael Pettersson.
*/
-#ifndef ETHR_SPARC32_ATOMIC_H
-#define ETHR_SPARC32_ATOMIC_H
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_SPARC_V9_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_SPARC_V9_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_SPARC_V9_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_SPARC_V9_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#ifndef ETHR_SPARC_V9_ATOMIC_COMMON__
+#define ETHR_SPARC_V9_ATOMIC_COMMON__
#define ETHR_MEMORY_BARRIER \
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
: : : "memory")
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
-
-#if defined(__arch64__)
-#define CASX "casx"
+#endif /* ETHR_SPARC_V9_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_CAS__ "cas"
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_CAS__ "casx"
#else
-#define CASX "cas"
+#error "Unsupported integer size"
#endif
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->counter = i;
+}
+
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old+incr;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -73,46 +110,46 @@ ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void)ethr_native_atomic_add_return(var, incr);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return ethr_native_atomic_add_return(var, 1);
+ return ETHR_NATMC_FUNC__(add_return)(var, 1);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void)ethr_native_atomic_add_return(var, 1);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return ethr_native_atomic_add_return(var, -1);
+ return ETHR_NATMC_FUNC__(add_return)(var, -1);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void)ethr_native_atomic_add_return(var, -1);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, -1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old & mask;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -121,17 +158,17 @@ ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old | mask;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -140,17 +177,17 @@ ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
- long old, new;
+ ETHR_AINT_T__ old, new;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad");
do {
old = var->counter;
new = val;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
@@ -159,12 +196,12 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
@@ -178,58 +215,63 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
/* TODO: relax acquire barriers */
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_read(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- ethr_native_atomic_set(var, i);
+ ETHR_NATMC_FUNC__(set)(var, i);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
__asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- ethr_native_atomic_dec(var);
+ ETHR_NATMC_FUNC__(dec)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- return ethr_native_atomic_dec_return(var);
+ return ETHR_NATMC_FUNC__(dec_return)(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
- long res = ethr_native_atomic_cmpxchg(var, new, old);
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
__asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
return res;
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
__asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
- return ethr_native_atomic_cmpxchg(var, new, old);
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
}
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* ETHR_SPARC32_ATOMIC_H */
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_CAS__
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index dca113b4d6..aea9794390 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,12 @@
#ifndef ETHREAD_SPARC32_ETHREAD_H
#define ETHREAD_SPARC32_ETHREAD_H
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "atomic.h"
+#endif
#include "spinlock.h"
#include "rwlock.h"
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 69569d82d1..48e4c0c6c8 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -24,92 +24,102 @@
#ifndef ETHREAD_TILE_ATOMIC_H
#define ETHREAD_TILE_ATOMIC_H
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
#include <atomic.h>
/* An atomic is an aligned int accessed via locked operations.
*/
typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+ volatile ethr_sint32_t counter;
+} ethr_native_atomic32_t;
#define ETHR_MEMORY_BARRIER __insn_mf()
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
+{
+ return (ethr_sint32_t *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+ethr_native_atomic32_set(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
atomic_exchange_acq(&var->counter, i);
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read(ethr_native_atomic32_t *var)
{
return var->counter;
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
atomic_add(&var->counter, incr);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
atomic_increment(&var->counter);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
atomic_decrement(&var->counter);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
return atomic_exchange_and_add(&var->counter, incr) + incr;
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
- return ethr_native_atomic_add_return(var, 1);
+ return ethr_native_atomic32_add_return(var, 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
{
- return ethr_native_atomic_add_return(var, -1);
+ return ethr_native_atomic32_add_return(var, -1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
return atomic_and_val(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
return atomic_or_val(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
return atomic_exchange_acq(&var->counter, val);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
@@ -118,54 +128,58 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_read(var);
+ ethr_sint32_t res = ethr_native_atomic32_read(var);
ETHR_MEMORY_BARRIER;
return res;
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return_acqb(ethr_native_atomic32_t *var)
{
- long res = ethr_native_atomic_inc_return(var);
+ ethr_sint32_t res = ethr_native_atomic32_inc_return(var);
ETHR_MEMORY_BARRIER;
return res;
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long val)
+ethr_native_atomic32_set_relb(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_set(var, val);
+ ethr_native_atomic32_set(var, val);
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec_relb(ethr_native_atomic32_t *var)
{
ETHR_MEMORY_BARRIER;
- ethr_native_atomic_dec(var);
+ ethr_native_atomic32_dec(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return_relb(ethr_native_atomic32_t *var)
{
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_dec_return(var);
+ return ethr_native_atomic32_dec_return(var);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_acqb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
{
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ethr_native_atomic32_cmpxchg(var, new, exp);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_relb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
{
ETHR_MEMORY_BARRIER;
- return ethr_native_atomic_cmpxchg(var, new, exp);
+ return ethr_native_atomic32_cmpxchg(var, new, exp);
}
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
index 500459dd6c..60def01a7e 100644
--- a/erts/include/internal/win/ethr_atomic.h
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -22,223 +22,394 @@
* Author: Rickard Green
*/
-#ifndef ETHR_WIN_ATOMIC_H__
-#define ETHR_WIN_ATOMIC_H__
-
-#ifdef _MSC_VER
-# if _MSC_VER < 1300
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-# endif
-# endif
-# endif
-# if _MSC_VER >= 1400
-# include <intrin.h>
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_WIN_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_WIN_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_WIN_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_WIN_ATOMIC64_H__
+#ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+/* _InterlockedCompareExchange64() required... */
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#endif
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+
+#ifndef ETHR_WIN_ATOMIC_COMMON__
+#define ETHR_WIN_ATOMIC_COMMON__
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if defined(_M_IX86) || defined(_M_AMD64) || defined(_M_IA64)
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 1
#else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 0
#endif
+#if defined(_M_AMD64) || (defined(_M_IX86) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
+#else
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
+#endif
/*
- * No configure test checking for _Interlocked*_{acq,rel} and
- * Interlocked*{Acquire,Release} have been written yet...
+ * No configure test checking for interlocked acquire/release
+ * versions have been written, yet. It should define
+ * ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS if, and
+ * only if, all used interlocked operations with barriers
+ * exists.
*
* Note, that these are pure optimizations for the itanium
* processor.
*/
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
-#pragma intrinsic(_InterlockedCompareExchange_acq)
+#include <intrin.h>
+#undef ETHR_COMPILER_BARRIER
+#define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedCompareExchange)
+
+#if defined(_M_AMD64) || (defined(_M_IX86) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+#include <emmintrin.h>
+#include <mmintrin.h>
+#pragma intrinsic(_mm_mfence)
+#define ETHR_MEMORY_BARRIER _mm_mfence()
+#pragma intrinsic(_mm_sfence)
+#define ETHR_WRITE_MEMORY_BARRIER _mm_sfence()
+#pragma intrinsic(_mm_lfence)
+#define ETHR_READ_MEMORY_BARRIER _mm_lfence()
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+
+#else
+
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile long x___ = 0; \
+ _InterlockedCompareExchange(&x___, (long) 1, (long) 0); \
+} while (0)
+
#endif
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+
+#endif /* ETHR_WIN_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
+/*
+ * All used operations available as 32-bit intrinsics
+ */
+
+#pragma intrinsic(_InterlockedDecrement)
+#pragma intrinsic(_InterlockedIncrement)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#pragma intrinsic(_InterlockedExchangeAdd_acq)
+#pragma intrinsic(_InterlockedIncrement_acq)
+#pragma intrinsic(_InterlockedDecrement_rel)
+#pragma intrinsic(_InterlockedCompareExchange_acq)
#pragma intrinsic(_InterlockedCompareExchange_rel)
#endif
+#define ETHR_ILCKD__(X) _Interlocked ## X
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## _acq
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## _rel
+#else
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X
+#endif
+
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+
+/*
+ * _InterlockedCompareExchange64() is required. The other may not
+ * be available, but if so, we can generate them.
+ */
+#pragma intrinsic(_InterlockedCompareExchange64)
+
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) *(PTR)
+#else
+#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) (__int64) 0
+#endif
+
+#define ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, PTR, NEW, ACT, EXP, OPS, RET) \
+{ \
+ __int64 NEW, ACT, EXP; \
+ ACT = ETHR_OWN_ILCKD_INIT_VAL__(PTR); \
+ do { \
+ EXP = ACT; \
+ { OPS; } \
+ ACT = _InterlockedCompareExchange64(PTR, NEW, EXP); \
+ } while (ACT != EXP); \
+ return RET; \
+}
+
+#define ETHR_OWN_ILCKD_1_IMPL__(FUNC, NEW, ACT, EXP, OPS, RET) \
+static __forceinline __int64 \
+FUNC(__int64 volatile *ptr) \
+ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+
+#define ETHR_OWN_ILCKD_2_IMPL__(FUNC, NEW, ACT, EXP, OPS, ARG, RET) \
+static __forceinline __int64 \
+FUNC(__int64 volatile *ptr, __int64 ARG) \
+ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+
+
+#ifdef ETHR_HAVE__INTERLOCKEDDECREMENT64
+#pragma intrinsic(_InterlockedDecrement64)
+#else
+ETHR_OWN_ILCKD_1_IMPL__(_InterlockedDecrement64, new, act, exp,
+ new = act - 1, new)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDINCREMENT64
+#pragma intrinsic(_InterlockedIncrement64)
+#else
+ETHR_OWN_ILCKD_1_IMPL__(_InterlockedIncrement64, new, act, exp,
+ new = act + 1, new)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchangeAdd64, new, act, exp,
+ new = act + arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+#pragma intrinsic(_InterlockedExchange64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchange64, new, act, exp,
+ new = arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDAND64
+#pragma intrinsic(_InterlockedAnd64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedAnd64, new, act, exp,
+ new = act & arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDOR64
+#pragma intrinsic(_InterlockedOr64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedOr64, new, act, exp,
+ new = act | arg, arg, act)
+#endif
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#pragma intrinsic(_InterlockedExchangeAdd64_acq)
+#pragma intrinsic(_InterlockedIncrement64_acq)
+#pragma intrinsic(_InterlockedDecrement64_rel)
+#pragma intrinsic(_InterlockedCompareExchange64_acq)
+#pragma intrinsic(_InterlockedCompareExchange64_rel)
+#endif
+
+#define ETHR_ILCKD__(X) _Interlocked ## X ## 64
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64_acq
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64_rel
+#else
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64
+#endif
+
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+
+#else
+#error "Unsupported integer size"
+#endif
typedef struct {
- volatile LONG value;
-} ethr_native_atomic_t;
+ volatile ETHR_AINT_T__ value;
+} ETHR_ATMC_T__;
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile LONG x___ = 0; \
- _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
-} while (0)
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->value;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- var->value = (LONG) i;
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ var->value = i;
+#else
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
+#endif
}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ var->value = i;
#else
- (void) InterlockedExchange(&var->value, (LONG) i);
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
return var->value;
#else
- return InterlockedExchangeAdd(&var->value, (LONG) 0);
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
+ (void) ETHR_ILCKD__(ExchangeAdd)(&var->value, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long i)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- LONG tmp = InterlockedExchangeAdd(&var->value, (LONG) i);
- return tmp + i;
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, i) + i;
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) InterlockedIncrement(&var->value);
+ (void) ETHR_ILCKD__(Increment)(&var->value);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) InterlockedDecrement(&var->value);
+ (void) ETHR_ILCKD__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return (long) InterlockedIncrement(&var->value);
+ return ETHR_ILCKD__(Increment)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return (long) InterlockedDecrement(&var->value);
+ return ETHR_ILCKD__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) _InterlockedAnd(&var->value, mask);
+ return ETHR_ILCKD__(And)(&var->value, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) _InterlockedOr(&var->value, mask);
+ return ETHR_ILCKD__(Or)(&var->value, mask);
}
-
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+ return ETHR_ILCKD__(CompareExchange)(&var->value, new, old);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
- return (long) InterlockedExchange(&var->value, (LONG) new);
+ return ETHR_ILCKD__(Exchange)(&var->value, new);
}
/*
* Atomic ops with at least specified barriers.
*/
-static ETHR_INLINE long
-ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDEXCHANGEADDACQUIRE
- return (long) InterlockedExchangeAddAcquire(&var->value, (LONG) 0);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_AINT_T__ val = var->value;
+ ETHR_COMPILER_BARRIER;
+ return val;
#else
- return (long) InterlockedExchangeAdd(&var->value, (LONG) 0);
+ return ETHR_ILCKD_ACQ__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
#endif
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDINCREMENTACQUIRE
- return (long) InterlockedIncrementAcquire(&var->value);
-#else
- return (long) InterlockedIncrement(&var->value);
-#endif
+ return ETHR_ILCKD_ACQ__(Increment)(&var->value);
}
static ETHR_INLINE void
-ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
- (void) InterlockedExchange(&var->value, (LONG) i);
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_COMPILER_BARRIER;
+ var->value = i;
+#else
+ (void) ETHR_ILCKD_REL__(Exchange)(&var->value, i);
+#endif
}
static ETHR_INLINE void
-ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
- (void) InterlockedDecrementRelease(&var->value);
-#else
- (void) InterlockedDecrement(&var->value);
-#endif
+ (void) ETHR_ILCKD_REL__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
{
-#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
- return (long) InterlockedDecrementRelease(&var->value);
-#else
- return (long) InterlockedDecrement(&var->value);
-#endif
+ return ETHR_ILCKD_REL__(Decrement)(&var->value);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
- return (long) _InterlockedCompareExchange_acq(&var->value, (LONG) new, (LONG) old);
-#else
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
-#endif
+ return ETHR_ILCKD_ACQ__(CompareExchange)(&var->value, new, old);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
-
-#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
- return (long) _InterlockedCompareExchange_rel(&var->value, (LONG) new, (LONG) old);
-#else
- return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
-#endif
+ return ETHR_ILCKD_REL__(CompareExchange)(&var->value, new, old);
}
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif
+#undef ETHR_ILCKD__
+#undef ETHR_ILCKD_ACQ__
+#undef ETHR_ILCKD_REL__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#undef ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+
+#endif /* _MSC_VER */
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
index af57c20f91..598816b2c6 100644
--- a/erts/include/internal/win/ethr_event.h
+++ b/erts/include/internal/win/ethr_event.h
@@ -21,22 +21,24 @@
* Author: Rickard Green
*/
-#define ETHR_EVENT_OFF_WAITER__ ((LONG) -1)
-#define ETHR_EVENT_OFF__ ((LONG) 1)
-#define ETHR_EVENT_ON__ ((LONG) 0)
+#define ETHR_EVENT_OFF_WAITER__ ((long) -1)
+#define ETHR_EVENT_OFF__ ((long) 1)
+#define ETHR_EVENT_ON__ ((long) 0)
typedef struct {
- volatile LONG state;
+ volatile long state;
HANDLE handle;
} ethr_event;
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+#pragma intrinsic(_InterlockedExchange)
+
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
{
- /* InterlockedExchange() imply a full memory barrier which is important */
- LONG state = InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ /* _InterlockedExchange() imply a full memory barrier which is important */
+ long state = _InterlockedExchange(&e->state, ETHR_EVENT_ON__);
if (state == ETHR_EVENT_OFF_WAITER__) {
if (!SetEvent(e->handle))
ETHR_FATAL_ERROR__(ethr_win_get_errno__());
@@ -46,7 +48,7 @@ ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
{
- /* InterlockedExchange() imply a full memory barrier which is important */
+ /* _InterlockedExchange() imply a full memory barrier which is important */
InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
}
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
index b52710f6a3..c01b17cf14 100644
--- a/erts/include/internal/win/ethread.h
+++ b/erts/include/internal/win/ethread.h
@@ -25,7 +25,11 @@
#ifndef ETHREAD_WIN_H__
#define ETHREAD_WIN_H__
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "ethr_atomic.h"
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
#endif