aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include
diff options
context:
space:
mode:
Diffstat (limited to 'erts/include')
-rw-r--r--erts/include/internal/erl_misc_utils.h9
-rw-r--r--erts/include/internal/ethr_atomics.h726
-rw-r--r--erts/include/internal/ethr_internal.h67
-rw-r--r--erts/include/internal/ethr_mutex.h674
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h209
-rw-r--r--erts/include/internal/ethread.h1503
-rw-r--r--erts/include/internal/ethread_header_config.h.in93
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h224
-rw-r--r--erts/include/internal/gcc/ethread.h10
-rw-r--r--erts/include/internal/i386/atomic.h214
-rw-r--r--erts/include/internal/i386/ethread.h10
-rw-r--r--erts/include/internal/i386/rwlock.h12
-rw-r--r--erts/include/internal/i386/spinlock.h14
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h350
-rw-r--r--erts/include/internal/libatomic_ops/ethread.h30
-rw-r--r--erts/include/internal/ppc32/atomic.h98
-rw-r--r--erts/include/internal/ppc32/ethread.h5
-rw-r--r--erts/include/internal/ppc32/rwlock.h12
-rw-r--r--erts/include/internal/ppc32/spinlock.h12
-rw-r--r--erts/include/internal/pthread/ethr_event.h135
-rw-r--r--erts/include/internal/sparc32/atomic.h190
-rw-r--r--erts/include/internal/sparc32/ethread.h10
-rw-r--r--erts/include/internal/sparc32/rwlock.h12
-rw-r--r--erts/include/internal/sparc32/spinlock.h12
-rw-r--r--erts/include/internal/tile/atomic.h131
-rw-r--r--erts/include/internal/win/ethr_atomic.h415
-rw-r--r--erts/include/internal/win/ethr_event.h64
-rw-r--r--erts/include/internal/win/ethread.h35
28 files changed, 3890 insertions, 1386 deletions
diff --git a/erts/include/internal/erl_misc_utils.h b/erts/include/internal/erl_misc_utils.h
index 82e9ba3798..507e1726f4 100644
--- a/erts/include/internal/erl_misc_utils.h
+++ b/erts/include/internal/erl_misc_utils.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -34,7 +34,7 @@ typedef struct {
erts_cpu_info_t *erts_cpu_info_create(void);
void erts_cpu_info_destroy(erts_cpu_info_t *cpuinfo);
-void erts_cpu_info_update(erts_cpu_info_t *cpuinfo);
+int erts_cpu_info_update(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_configured(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_online(erts_cpu_info_t *cpuinfo);
int erts_get_cpu_available(erts_cpu_info_t *cpuinfo);
@@ -50,4 +50,9 @@ int erts_unbind_from_cpu_str(char *str);
int erts_milli_sleep(long);
+#ifdef __WIN32__
+int erts_map_win_error_to_errno(DWORD win_error);
+int erts_get_last_win_errno(void);
+#endif
+
#endif /* #ifndef ERL_MISC_UTILS_H_ */
diff --git a/erts/include/internal/ethr_atomics.h b/erts/include/internal/ethr_atomics.h
new file mode 100644
index 0000000000..1caf4d0567
--- /dev/null
+++ b/erts/include/internal/ethr_atomics.h
@@ -0,0 +1,726 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: The ethread atomic API
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_ATOMIC_H__
+#define ETHR_ATOMIC_H__
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+# define ETHR_NEED_ATOMIC_PROTOTYPES__
+#endif
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * No native atomic implementation available. :(
+ * Use fallback...
+ */
+typedef ethr_sint32_t ethr_atomic32_t;
+typedef ethr_sint_t ethr_atomic_t;
+#else
+/*
+ * Map ethread native atomics to ethread API atomics.
+ *
+ * We do at least have a native atomic implementation that
+ * can handle integers of a size larger than or equal to
+ * the size of pointers.
+ */
+
+/* -- Pointer size atomics -- */
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+#if ETHR_SIZEOF_PTR == 8
+# if defined(ETHR_HAVE_NATIVE_ATOMIC64)
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic64_addr
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# else
+# error "Missing native atomic implementation"
+# endif
+#elif ETHR_SIZEOF_PTR == 4
+# define ETHR_NATMC_ADDR_FUNC__ ethr_native_atomic32_addr
+# ifdef ETHR_HAVE_NATIVE_ATOMIC32
+typedef ethr_native_atomic32_t ethr_atomic_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+# elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_atomic_t;
+# define ETHR_NATMC_T__ ethr_native_atomic64_t
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+# else
+# error "Missing native atomic implementation"
+# endif
+#endif
+
+/* -- 32-bit atomics -- */
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint32_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_atomic32_t;
+# define ETHR_NAINT32_T__ ethr_sint64_t
+# define ETHR_NATMC32_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
+
+#endif
+
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+ethr_sint_t *ethr_atomic_addr(ethr_atomic_t *);
+void ethr_atomic_init(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_set(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_inc(ethr_atomic_t *);
+void ethr_atomic_dec(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_add_read(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_add(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_band(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_bor(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_xchg(ethr_atomic_t *, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_read_acqb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, ethr_sint_t);
+void ethr_atomic_dec_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_dec_read_relb(ethr_atomic_t *);
+ethr_sint_t ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+ethr_sint_t ethr_atomic_cmpxchg_relb(ethr_atomic_t *, ethr_sint_t, ethr_sint_t);
+
+ethr_sint32_t *ethr_atomic32_addr(ethr_atomic32_t *);
+void ethr_atomic32_init(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_set(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_inc_read(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_dec_read(ethr_atomic32_t *);
+void ethr_atomic32_inc(ethr_atomic32_t *);
+void ethr_atomic32_dec(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_add_read(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_add(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_band(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_bor(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_xchg(ethr_atomic32_t *, ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_cmpxchg(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_read_acqb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_inc_read_acqb(ethr_atomic32_t *);
+void ethr_atomic32_set_relb(ethr_atomic32_t *, ethr_sint32_t);
+void ethr_atomic32_dec_relb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_dec_read_relb(ethr_atomic32_t *);
+ethr_sint32_t ethr_atomic32_cmpxchg_acqb(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+ethr_sint32_t ethr_atomic32_cmpxchg_relb(ethr_atomic32_t *,
+ ethr_sint32_t,
+ ethr_sint32_t);
+#endif
+
+int ethr_init_atomics(void);
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
+/*
+ * Fallbacks for atomics used in absence of a native implementation.
+ */
+
+#define ETHR_ATOMIC_ADDR_BITS 10
+#define ETHR_ATOMIC_ADDR_SHIFT 6
+
+typedef struct {
+ union {
+ ethr_spinlock_t lck;
+ char buf[ETHR_CACHE_LINE_SIZE];
+ } u;
+} ethr_atomic_protection_t;
+
+extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
+
+#define ETHR_ATOMIC_PTR2LCK__(PTR) \
+(&ethr_atomic_protection__[((((ethr_uint_t) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
+
+
+#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
+do { \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
+ { EXPS; } \
+ ethr_spin_unlock(slp__); \
+} while (0)
+
+#endif
+
+/*
+ * --- Pointer size atomics ---------------------------------------------------
+ */
+
+static ETHR_INLINE ethr_sint_t *
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_addr)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t *) ETHR_NATMC_ADDR_FUNC__(var);
+#else
+ return (ethr_sint_t *) var;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(init)(var, (ETHR_NAINT_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(set)(var, (ETHR_NAINT_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(read)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, ethr_sint_t incr)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(add)(var, (ETHR_NAINT_T__) incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, ethr_sint_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(add_return)(var, (ETHR_NAINT_T__) i);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(inc)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(dec)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return)(var);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
+ ethr_sint_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(and_retold)(var,
+ (ETHR_NAINT_T__) mask);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
+ ethr_sint_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(or_retold)(var,
+ (ETHR_NAINT_T__) mask);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var, ethr_sint_t new)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(xchg)(var,
+ (ETHR_NAINT_T__) new);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ ethr_sint_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
+}
+
+/*
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
+ */
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(read_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(inc_return_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var,
+ ethr_sint_t val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(set_relb)(var, (ETHR_NAINT_T__) val);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC_FUNC__(dec_relb)(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_acqb)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#endif
+}
+
+static ETHR_INLINE ethr_sint_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
+ ethr_sint_t new,
+ ethr_sint_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint_t) ETHR_NATMC_FUNC__(cmpxchg_relb)(var,
+ (ETHR_NAINT_T__) new,
+ (ETHR_NAINT_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
+#endif
+}
+
+/*
+ * --- 32-bit atomics ---------------------------------------------------------
+ */
+
+static ETHR_INLINE ethr_sint32_t *
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_addr)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic32_addr(var);
+#else
+ return (ethr_sint32_t *) var;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_init)(ethr_atomic32_t *var,
+ ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(init)(var, (ETHR_NAINT32_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(ethr_atomic32_t *var, ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(set)(var, (ETHR_NAINT32_T__) i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add)(ethr_atomic32_t *var,
+ ethr_sint32_t incr)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(add)(var, (ETHR_NAINT32_T__) incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_add_read)(ethr_atomic32_t *var,
+ ethr_sint32_t i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(add_return)(var, (ETHR_NAINT32_T__) i);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(inc)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(dec)(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = ++(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return)(var);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = --(*var));
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_band)(ethr_atomic32_t *var,
+ ethr_sint32_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(and_retold)(var, (ETHR_NAINT32_T__) mask);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_bor)(ethr_atomic32_t *var,
+ ethr_sint32_t mask)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return
+ (ethr_sint32_t) ETHR_NATMC32_FUNC__(or_retold)(var,
+ (ETHR_NAINT32_T__) mask);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_xchg)(ethr_atomic32_t *var,
+ ethr_sint32_t new)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(xchg)(var,
+ (ETHR_NAINT32_T__) new);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(cmpxchg)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ ethr_sint32_t res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
+}
+
+/*
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
+ */
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read_acqb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(read_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read_acqb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(inc_return_acqb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_inc_read)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set_relb)(ethr_atomic32_t *var,
+ ethr_sint32_t val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(set_relb)(var, (ETHR_NAINT32_T__) val);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic32_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_relb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ETHR_NATMC32_FUNC__(dec_relb)(var);
+#else
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read_relb)(ethr_atomic32_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t) ETHR_NATMC32_FUNC__(dec_return_relb)(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_dec_read)(var);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_acqb)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(cmpxchg_acqb)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+#endif
+}
+
+static ETHR_INLINE ethr_sint32_t
+ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg_relb)(ethr_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return (ethr_sint32_t)
+ ETHR_NATMC32_FUNC__(cmpxchg_relb)(var,
+ (ETHR_NAINT32_T__) new,
+ (ETHR_NAINT32_T__) exp);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic32_cmpxchg)(var, new, exp);
+#endif
+}
+
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_NATMC_ADDR_FUNC__
+
+#undef ETHR_NAINT32_T__
+#undef ETHR_NATMC32_FUNC__
+
+#endif
diff --git a/erts/include/internal/ethr_internal.h b/erts/include/internal/ethr_internal.h
new file mode 100644
index 0000000000..e9c3daf783
--- /dev/null
+++ b/erts/include/internal/ethr_internal.h
@@ -0,0 +1,67 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Internal ethread exports
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_INTERNAL_H__
+#define ETHR_INTERNAL_H__
+
+#include "erl_misc_utils.h"
+
+extern ethr_memory_allocators ethr_mem__;
+extern erts_cpu_info_t *ethr_cpu_info__;
+extern size_t ethr_pagesize__;
+extern size_t ethr_min_stack_size__; /* kilo words */
+extern size_t ethr_max_stack_size__; /* kilo words */
+extern int ethr_not_completely_inited__;
+extern int ethr_not_inited__;
+
+extern void *(*ethr_thr_prepare_func__)(void);
+extern void (*ethr_thr_parent_func__)(void *);
+extern void (*ethr_thr_child_func__)(void *);
+
+#define ETHR_PAGE_ALIGN(SZ) \
+ (((((size_t) (SZ)) - 1)/ethr_pagesize__ + 1)*ethr_pagesize__)
+#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
+#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
+
+#undef ETHR_STACK_GUARD_SIZE
+#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
+# define ETHR_STACK_GUARD_SIZE (ethr_pagesize__)
+#endif
+
+/* implemented in lib_src/<thr-lib>/ethread.c */
+int ethr_set_tse__(ethr_ts_event *tsep);
+ethr_ts_event *ethr_get_tse__(void);
+ETHR_PROTO_NORETURN__ ethr_abort__(void);
+#ifdef ETHR_WIN32_THREADS
+int ethr_win_get_errno__(void);
+#endif
+
+/* implemented in lib_src/common/ethread_aux.c */
+int ethr_init_common__(ethr_init_data *id);
+int ethr_late_init_common__(ethr_late_init_data *lid);
+void ethr_run_exit_handlers__(void);
+void ethr_ts_event_destructor__(void *vtsep);
+
+
+#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
new file mode 100644
index 0000000000..fadaf1e2a4
--- /dev/null
+++ b/erts/include/internal/ethr_mutex.h
@@ -0,0 +1,674 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Mutex, rwmutex and condition variable implementation
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_MUTEX_H__
+#define ETHR_MUTEX_H__
+
+#define ETHR_RWMUTEX_INITIALIZED 0x99999999
+#define ETHR_MUTEX_INITIALIZED 0x77777777
+#define ETHR_COND_INITIALIZED 0x55555555
+
+#if 0
+# define ETHR_MTX_HARD_DEBUG
+#endif
+
+#if 0
+# define ETHR_MTX_CHK_EXCL
+#if 1
+# define ETHR_MTX_CHK_NON_EXCL
+#endif
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+# ifdef __GNUC__
+# warning ETHR_MTX_HARD_DEBUG
+# endif
+/*# define ETHR_MTX_HARD_DEBUG_LFS*/
+/*# define ETHR_MTX_HARD_DEBUG_FENCE*/
+/*# define ETHR_MTX_HARD_DEBUG_Q*/
+# define ETHR_MTX_HARD_DEBUG_WSQ
+
+# if !defined(ETHR_MTX_HARD_DEBUG_WSQ) && defined(ETHR_MTX_HARD_DEBUG_Q)
+# define ETHR_MTX_HARD_DEBUG_WSQ
+# endif
+#endif
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+#ifdef ETHR_DEBUG
+# ifndef ETHR_MTX_CHK_EXCL
+# define ETHR_MTX_CHK_EXCL
+# endif
+# ifndef ETHR_MTX_CHK_NON_EXCL
+# define ETHR_MTX_CHK_NON_EXCL
+# endif
+#endif
+
+#if 0
+# define ETHR_MTX_Q_LOCK_SPINLOCK__
+# define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t
+#elif defined(ETHR_PTHREADS)
+# define ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__
+# define ETHR_MTX_QLOCK_TYPE__ pthread_mutex_t
+#elif defined(ETHR_WIN32_THREADS)
+# define ETHR_MTX_Q_LOCK_CRITICAL_SECTION__
+# define ETHR_MTX_QLOCK_TYPE__ CRITICAL_SECTION
+#else
+# error Need a qlock implementation
+#endif
+
+#define ETHR_RWMTX_W_FLG__ (((ethr_sint32_t) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((ethr_sint32_t) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((ethr_sint32_t) 1) << 29)
+
+/* frequent read kind */
+#define ETHR_RWMTX_R_FLG__ (((ethr_sint32_t) 1) << 28)
+#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((ethr_sint32_t) 1) << 27)
+#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_ABRT_UNLCK_FLG__ - 1)
+
+/* normal kind */
+#define ETHR_RWMTX_RS_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+#define ETHR_RWMTX_WAIT_FLGS__ \
+ (ETHR_RWMTX_W_WAIT_FLG__|ETHR_RWMTX_R_WAIT_FLG__)
+
+#define ETHR_CND_WAIT_FLG__ ETHR_RWMTX_R_WAIT_FLG__
+
+#ifdef ETHR_DEBUG
+#define ETHR_DBG_CHK_UNUSED_FLG_BITS(V) \
+ ETHR_ASSERT(!((V) & ~(ETHR_RWMTX_W_FLG__ \
+ | ETHR_RWMTX_W_WAIT_FLG__ \
+ | ETHR_RWMTX_R_WAIT_FLG__ \
+ | ETHR_RWMTX_RS_MASK__)))
+#else
+#define ETHR_DBG_CHK_UNUSED_FLG_BITS(V)
+#endif
+
+#define ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(MTX) \
+ ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic32_read(&(MTX)->mtxb.flgs))
+
+struct ethr_mutex_base_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long pre_fence;
+#endif
+ ethr_atomic32_t flgs;
+ short aux_scnt;
+ short main_scnt;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ int ws;
+#endif
+#ifdef ETHR_MTX_CHK_EXCL
+ ethr_atomic32_t exclusive;
+#endif
+#ifdef ETHR_MTX_CHK_NON_EXCL
+ ethr_atomic32_t non_exclusive;
+#endif
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+ ethr_atomic32_t hdbg_lfs;
+#endif
+};
+
+#endif
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_mutex_opt;
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_cond_opt;
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ struct ethr_mutex_base_ mtxb;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ struct {
+ long pre_fence;
+ } mtxb; /* mtxb allows us to use same macro as for mutex and rwmutex... */
+#endif
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread */
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ pthread_mutex_t pt_mtx;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+ pthread_cond_t pt_cnd;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread */
+
+int ethr_mutex_init_opt(ethr_mutex *, ethr_mutex_opt *);
+int ethr_mutex_init(ethr_mutex *);
+int ethr_mutex_destroy(ethr_mutex *);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+int ethr_mutex_trylock(ethr_mutex *);
+void ethr_mutex_lock(ethr_mutex *);
+void ethr_mutex_unlock(ethr_mutex *);
+#endif
+int ethr_cond_init_opt(ethr_cond *, ethr_cond_opt *);
+int ethr_cond_init(ethr_cond *);
+int ethr_cond_destroy(ethr_cond *);
+void ethr_cond_signal(ethr_cond *);
+void ethr_cond_broadcast(ethr_cond *);
+int ethr_cond_wait(ethr_cond *, ethr_mutex *);
+
+typedef enum {
+ ETHR_RWMUTEX_TYPE_NORMAL,
+ ETHR_RWMUTEX_TYPE_FREQUENT_READ,
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+} ethr_rwmutex_type;
+
+typedef enum {
+ ETHR_RWMUTEX_LONG_LIVED,
+ ETHR_RWMUTEX_SHORT_LIVED,
+ ETHR_RWMUTEX_UNKNOWN_LIVED
+} ethr_rwmutex_lived;
+
+typedef struct {
+ ethr_rwmutex_type type;
+ ethr_rwmutex_lived lived;
+ int main_spincount;
+ int aux_spincount;
+} ethr_rwmutex_opt;
+
+#define ETHR_RWMUTEX_OPT_DEFAULT_INITER \
+ {ETHR_RWMUTEX_TYPE_NORMAL, ETHR_RWMUTEX_UNKNOWN_LIVED, -1, -1}
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+typedef union {
+ struct {
+ ethr_atomic32_t readers;
+ int waiting_readers;
+ int byte_offset;
+ ethr_rwmutex_lived lived;
+ } data;
+ char align__[ETHR_CACHE_LINE_SIZE];
+} ethr_rwmtx_readers_array__;
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ struct ethr_mutex_base_ mtxb;
+ ethr_rwmutex_type type;
+ ethr_ts_event *rq_end;
+ union {
+ ethr_rwmtx_readers_array__ *ra;
+ int rs;
+ } tdata;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread_rwlock */
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ pthread_rwlock_t pt_rwlock;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread_rwlock */
+
+int ethr_rwmutex_set_reader_group(int);
+int ethr_rwmutex_init_opt(ethr_rwmutex *, ethr_rwmutex_opt *);
+int ethr_rwmutex_init(ethr_rwmutex *);
+int ethr_rwmutex_destroy(ethr_rwmutex *);
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) \
+ || !defined(ETHR_TRY_INLINE_FUNCS) \
+ || defined(ETHR_MUTEX_IMPL__)
+int ethr_rwmutex_tryrlock(ethr_rwmutex *);
+void ethr_rwmutex_rlock(ethr_rwmutex *);
+void ethr_rwmutex_runlock(ethr_rwmutex *);
+int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwunlock(ethr_rwmutex *);
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+#define ETHR_MTX_HARD_ASSERT(A) \
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
+#else
+#define ETHR_MTX_HARD_ASSERT(A) ((void) 1)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
+do { \
+ ethr_atomic32_init(&(MTXB)->hdbg_lfs, 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
+do { \
+ ethr_sint32_t val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic32_inc_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ > 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
+do { \
+ ethr_sint32_t val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ >= 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
+do { \
+ ethr_sint32_t val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == -1); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
+do { \
+ ethr_sint32_t val__ = ethr_atomic32_inctest(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#else
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeadbeefdeadbeefL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeaddeaddeadL
+#else
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeaddeadL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeadL
+#endif
+
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ETHR_MTX_HARD_ASSERT((X)->mtxb.pre_fence == ETHR_MTX_HARD_DEBUG_PRE_FENCE);\
+ ETHR_MTX_HARD_ASSERT((X)->post_fence == ETHR_MTX_HARD_DEBUG_POST_FENCE); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X) \
+do { \
+ (X)->mtxb.pre_fence = ETHR_MTX_HARD_DEBUG_PRE_FENCE; \
+ (X)->post_fence = ETHR_MTX_HARD_DEBUG_POST_FENCE; \
+} while (0)
+#else
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X)
+#endif
+
+#ifdef ETHR_MTX_CHK_EXCL
+
+#if !defined(ETHR_DEBUG) && defined(__GNUC__)
+#warning "check exclusive is enabled"
+#endif
+
+# define ETHR_MTX_CHK_EXCL_INIT__(MTXB) \
+ ethr_atomic32_init(&(MTXB)->exclusive, 0)
+
+# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if (!ethr_atomic32_read(&(MTXB)->exclusive)) \
+ ethr_assert_failed(__FILE__, __LINE__, __func__,\
+ "is exclusive"); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if (ethr_atomic32_read(&(MTXB)->exclusive)) \
+ ethr_assert_failed(__FILE__, __LINE__, __func__,\
+ "is not exclusive"); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB) \
+do { \
+ ETHR_MTX_CHK_EXCL_IS_NOT_EXCL((MTXB)); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 1); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB) \
+do { \
+ ETHR_MTX_CHK_EXCL_IS_EXCL((MTXB)); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+
+#ifdef ETHR_MTX_CHK_NON_EXCL
+
+#if !defined(ETHR_DEBUG) && defined(__GNUC__)
+#warning "check non-exclusive is enabled"
+#endif
+
+# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) \
+ ethr_atomic32_init(&(MTXB)->non_exclusive, 0)
+# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if (!ethr_atomic32_read(&(MTXB)->non_exclusive)) \
+ ethr_assert_failed(__FILE__, __LINE__, __func__,\
+ "is non-exclusive"); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if (ethr_atomic32_read(&(MTXB)->non_exclusive)) \
+ ethr_assert_failed(__FILE__, __LINE__, __func__,\
+ "is not non-exclusive"); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ethr_atomic32_inc(&(MTXB)->non_exclusive); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ethr_atomic32_add(&(MTXB)->non_exclusive, (NO)); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ethr_atomic32_dec(&(MTXB)->non_exclusive); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#else
+# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB)
+#endif
+
+#else
+# define ETHR_MTX_CHK_EXCL_INIT__(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB)
+# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB)
+#endif
+
+# define ETHR_MTX_CHK_EXCL_INIT(MTXB) \
+do { \
+ ETHR_MTX_CHK_EXCL_INIT__((MTXB)); \
+ ETHR_MTX_CHK_NON_EXCL_INIT__((MTXB)); \
+} while (0)
+
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+#define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT_MAX 2000
+#define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT_BASE 800
+#define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT_INC 50
+#define ETHR_MTX_DEFAULT_AUX_SPINCOUNT 50
+
+#define ETHR_CND_DEFAULT_MAIN_SPINCOUNT 0
+#define ETHR_CND_DEFAULT_AUX_SPINCOUNT 0
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+void ethr_mutex_lock_wait__(ethr_mutex *, ethr_sint32_t);
+void ethr_mutex_unlock_wake__(ethr_mutex *, ethr_sint32_t);
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ ethr_sint32_t act;
+ int res;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
+
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ res = (act == 0) ? 0 : EBUSY;
+
+#ifdef ETHR_MTX_CHK_EXCL
+ if (res == 0)
+ ETHR_MTX_CHK_EXCL_SET_EXCL(&mtx->mtxb);
+#endif
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&mtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
+
+ ETHR_COMPILER_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ ethr_sint32_t act;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
+
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ ethr_mutex_lock_wait__(mtx, act);
+
+ ETHR_MTX_CHK_EXCL_SET_EXCL(&mtx->mtxb);
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
+
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ ethr_sint32_t act;
+ ETHR_COMPILER_BARRIER;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
+
+ ETHR_MTX_CHK_EXCL_UNSET_EXCL(&mtx->mtxb);
+
+ act = ethr_atomic32_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ ethr_mutex_unlock_wake__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#else /* pthread_mutex */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ int res;
+ res = pthread_mutex_trylock(&mtx->pt_mtx);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_lock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_unlock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_mutex */
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+#define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT_MAX 2000
+#define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT_BASE 800
+#define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT_INC 50
+#define ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT 50
+
+#else /* pthread_rwlock */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_rwlock */
+
+int ethr_mutex_lib_init(int);
+int ethr_mutex_lib_late_init(int, int);
+
+#endif /* #ifndef ETHR_MUTEX_H__ */
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
new file mode 100644
index 0000000000..8e04692856
--- /dev/null
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -0,0 +1,209 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: "Optimized" fallbacks used when native ops are missing
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_OPTIMIZED_FALLBACKS_H__
+#define ETHR_OPTIMIZED_FALLBACKS_H__
+
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
+#endif
+
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+/* --- Optimized spinlocks using pthread spinlocks -------------------------- */
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef pthread_spinlock_t ethr_opt_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE int
+ethr_opt_spinlock_init(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_init((pthread_spinlock_t *) lock, 0);
+}
+
+static ETHR_INLINE int
+ethr_opt_spinlock_destroy(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_destroy((pthread_spinlock_t *) lock);
+}
+
+
+static ETHR_INLINE int
+ethr_opt_spin_unlock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_unlock((pthread_spinlock_t *) lock);
+}
+
+static ETHR_INLINE int
+ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_lock((pthread_spinlock_t *) lock);
+}
+
+#endif
+
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native spinlocks using native atomics -------------------------------- */
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_native_spinlock_t;
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_native_spinlock_t;
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ ETHR_NATMC_FUNC__(init)(lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == 1);
+ ETHR_NATMC_FUNC__(set_relb)(lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ while (ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, 1, 0) != 0) {
+ while (ETHR_NATMC_FUNC__(read)(lock) != 0)
+ ETHR_SPIN_BODY;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#undef ETHR_NATMC_FUNC__
+
+#endif
+
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native rwspinlocks using native atomics ------------------------------ */
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+
+#if defined(ETHR_HAVE_NATIVE_ATOMIC32)
+typedef ethr_native_atomic32_t ethr_native_rwlock_t;
+# define ETHR_NAINT_T__ ethr_sint32_t
+# define ETHR_WLOCK_FLAG__ (((ethr_sint32_t) 1) << 30)
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#elif defined(ETHR_HAVE_NATIVE_ATOMIC64)
+typedef ethr_native_atomic64_t ethr_native_rwlock_t;
+# define ETHR_NAINT_T__ ethr_sint64_t
+# define ETHR_WLOCK_FLAG__ (((ethr_sint64_t) 1) << 62)
+# define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#else
+# error "Missing native atomic implementation"
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ ETHR_NATMC_FUNC__(init)(lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+#ifdef DEBUG
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) >= 0);
+#endif
+ ETHR_NATMC_FUNC__(dec_relb)(lock);
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ ETHR_NAINT_T__ act, exp = 0;
+ while (1) {
+ act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp+1, exp);
+ if (act == exp)
+ break;
+ while (act & ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ETHR_NATMC_FUNC__(read)(lock);
+ }
+ exp = act;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ETHR_NATMC_FUNC__(read)(lock) == ETHR_WLOCK_FLAG__);
+ ETHR_NATMC_FUNC__(set_relb)(lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ ETHR_NAINT_T__ act, exp = 0;
+ while (1) {
+ act = ETHR_NATMC_FUNC__(cmpxchg_acqb)(lock, exp|ETHR_WLOCK_FLAG__, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = act & ~ETHR_WLOCK_FLAG__;
+ }
+ act |= ETHR_WLOCK_FLAG__;
+ /* Wait for readers to leave */
+ while (act != ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ETHR_NATMC_FUNC__(read_acqb)(lock);
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#undef ETHR_NAINT_T__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_WLOCK_FLAG__
+
+#endif
+
+#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 4e7a38cd5c..4cd95faf6a 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -33,27 +33,15 @@
#include <stdlib.h>
#include "erl_errno.h"
-/*
- * Extra memory barrier requirements:
- * - ethr_atomic_or_old() needs to enforce a memory barrier sufficient
- * for a lock operation.
- * - ethr_atomic_and_old() needs to enforce a memory barrier sufficient
- * for an unlock operation.
- * - ethr_atomic_cmpxchg() needs to enforce a memory barrier sufficient
- * for a lock and unlock operation.
- */
-
-
-#undef ETHR_USE_RWMTX_FALLBACK
#undef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-#undef ETHR_HAVE_OPTIMIZED_LOCKS
-
-typedef struct {
- long tv_sec;
- long tv_nsec;
-} ethr_timeval;
+#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
+#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
#if defined(DEBUG)
+# define ETHR_DEBUG
+#endif
+
+#if defined(ETHR_DEBUG)
# undef ETHR_XCHK
# define ETHR_XCHK 1
#else
@@ -68,47 +56,57 @@ typedef struct {
#elif defined(__WIN32__)
# define ETHR_INLINE __forceinline
#endif
-#if defined(DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
+#if defined(ETHR_DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
|| (defined(__GNUC__) && defined(ERTS_MIXED_CYGWIN_VC))
# undef ETHR_INLINE
# define ETHR_INLINE
# undef ETHR_TRY_INLINE_FUNCS
#endif
-#ifdef ETHR_FORCE_INLINE_FUNCS
-# define ETHR_TRY_INLINE_FUNCS
-#endif
-#if !defined(ETHR_DISABLE_NATIVE_IMPLS) \
- && (defined(PURIFY) || defined(VALGRIND) || defined(ERTS_MIXED_CYGWIN_VC))
+#if !defined(ETHR_DISABLE_NATIVE_IMPLS) && (defined(PURIFY)||defined(VALGRIND))
# define ETHR_DISABLE_NATIVE_IMPLS
#endif
-#define ETHR_RWMUTEX_INITIALIZED 0x99999999
-#define ETHR_MUTEX_INITIALIZED 0x77777777
-#define ETHR_COND_INITIALIZED 0x55555555
+/* Assume 64-byte cache line size */
+#define ETHR_CACHE_LINE_SIZE ((ethr_uint_t) 64)
+#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
-#define ETHR_CACHE_LINE_SIZE 64
+#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
+ (((((SZ) - 1) / ETHR_CACHE_LINE_SIZE) + 1) * ETHR_CACHE_LINE_SIZE)
-#ifdef ETHR_INLINE_FUNC_NAME_
-# define ETHR_CUSTOM_INLINE_FUNC_NAME_
-#else
+#ifndef ETHR_INLINE_FUNC_NAME_
# define ETHR_INLINE_FUNC_NAME_(X) X
#endif
-#define ETHR_COMPILER_BARRIER ethr_compiler_barrier()
-#ifdef __GNUC__
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER __asm__ __volatile__("":::"memory")
+#if !defined(__func__)
+# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L
+# if !defined(__GNUC__) || __GNUC__ < 2
+# define __func__ "[unknown_function]"
+# else
+# define __func__ __FUNCTION__
+# endif
+# endif
#endif
-#ifdef DEBUG
+int ethr_assert_failed(const char *file, int line, const char *func, char *a);
+#ifdef ETHR_DEBUG
#define ETHR_ASSERT(A) \
- ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
-int ethr_assert_failed(char *f, int l, char *a);
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, __func__, #A)))
#else
#define ETHR_ASSERT(A) ((void) 1)
#endif
+#if defined(__GNUC__)
+# define ETHR_PROTO_NORETURN__ void __attribute__((noreturn))
+# define ETHR_IMPL_NORETURN__ void
+#elif defined(__WIN32__) && defined(_MSC_VER)
+# define ETHR_PROTO_NORETURN__ __declspec(noreturn) void
+# define ETHR_IMPL_NORETURN__ __declspec(noreturn) void
+#else
+# define ETHR_PROTO_NORETURN__ void
+# define ETHR_IMPL_NORETURN__ void
+#endif
+
#if defined(ETHR_PTHREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The pthread implementation *
@@ -118,7 +116,9 @@ int ethr_assert_failed(char *f, int l, char *a);
#error "_GNU_SOURCE not defined. Please, compile all files with -D_GNU_SOURCE."
#endif
-#if defined(ETHR_HAVE_MIT_PTHREAD_H)
+#if defined(ETHR_NEED_NPTL_PTHREAD_H)
+#include <nptl/pthread.h>
+#elif defined(ETHR_HAVE_MIT_PTHREAD_H)
#include <pthread/mit/pthread.h>
#elif defined(ETHR_HAVE_PTHREAD_H)
#include <pthread.h>
@@ -128,130 +128,23 @@ int ethr_assert_failed(char *f, int l, char *a);
typedef pthread_t ethr_tid;
-typedef struct ethr_mutex_ ethr_mutex;
-struct ethr_mutex_ {
- pthread_mutex_t pt_mtx;
- int is_rec_mtx;
- ethr_mutex *prev;
- ethr_mutex *next;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-
-typedef struct ethr_cond_ ethr_cond;
-struct ethr_cond_ {
- pthread_cond_t pt_cnd;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
+typedef pthread_key_t ethr_tsd_key;
-#ifndef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-#define ETHR_USE_RWMTX_FALLBACK
-#else
-typedef struct ethr_rwmutex_ ethr_rwmutex;
-struct ethr_rwmutex_ {
- pthread_rwlock_t pt_rwlock;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-#endif
+#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-/* Static initializers */
-#if ETHR_XCHK
-#define ETHR_MUTEX_XCHK_INITER , ETHR_MUTEX_INITIALIZED
-#define ETHR_COND_XCHK_INITER , ETHR_COND_INITIALIZED
-#else
-#define ETHR_MUTEX_XCHK_INITER
-#define ETHR_COND_XCHK_INITER
+#if defined(PURIFY) || defined(VALGRIND)
+# define ETHR_FORCE_PTHREAD_RWLOCK
+# define ETHR_FORCE_PTHREAD_MUTEX
#endif
-#define ETHR_MUTEX_INITER {PTHREAD_MUTEX_INITIALIZER, 0, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-#define ETHR_COND_INITER {PTHREAD_COND_INITIALIZER ETHR_COND_XCHK_INITER}
-
-#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE) \
- || defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
-# define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
-# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
-# define ETHR_REC_MUTEX_INITER \
- {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, 1, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-# endif
-#else
-# undef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+#if !defined(ETHR_FORCE_PTHREAD_RWLOCK)
+# define ETHR_USE_OWN_RWMTX_IMPL__
#endif
-#ifndef ETHR_HAVE_PTHREAD_ATFORK
-# define ETHR_NO_FORKSAFETY 1
+#if !defined(ETHR_FORCE_PTHREAD_MUTEX) && 0
+# define ETHR_USE_OWN_MTX_IMPL__
#endif
-typedef pthread_key_t ethr_tsd_key;
-
-#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- return pthread_mutex_trylock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- return pthread_mutex_lock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- return pthread_mutex_unlock(&mtx->pt_mtx);
-}
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-#endif /* ETHR_HAVE_PTHREAD_RWLOCK_INIT */
-
-#endif /* ETHR_TRY_INLINE_FUNCS */
-
#elif defined(ETHR_WIN32_THREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The native win32 threads implementation *
@@ -273,418 +166,89 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
# undef WIN32_LEAN_AND_MEAN
#endif
-/* Types */
-typedef long ethr_tid; /* thread id type */
-typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
-#if ETHR_XCHK
- int is_rec_mtx;
-#endif
-} ethr_mutex;
-
-typedef struct cnd_wait_event__ cnd_wait_event_;
-
-typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
- cnd_wait_event_ *queue;
- cnd_wait_event_ *queue_end;
-} ethr_cond;
-
-#define ETHR_USE_RWMTX_FALLBACK
-
-/* Static initializers */
-
-#define ETHR_MUTEX_INITER {0}
-#define ETHR_COND_INITER {0}
-
-#define ETHR_REC_MUTEX_INITER ETHR_MUTEX_INITER
-
-#define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
-
-typedef DWORD ethr_tsd_key;
-
-#undef ETHR_HAVE_ETHR_SIG_FUNCS
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-int ethr_fake_static_mutex_init(ethr_mutex *mtx);
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- return TryEnterCriticalSection(&mtx->cs) ? 0 : EBUSY;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&mtx->cs);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- LeaveCriticalSection(&mtx->cs);
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
+#if defined(_MSC_VER)
-#ifdef ERTS_MIXED_CYGWIN_VC
-
-/* atomics */
-
-#ifdef _MSC_VER
-# if _MSC_VER < 1300
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-# endif
-# endif
-# endif
-# if _MSC_VER >= 1400
-# include <intrin.h>
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
-#else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+#if ETHR_SIZEOF_LONG == 4
+#define ETHR_HAVE_INT32_T 1
+typedef long ethr_sint32_t;
+typedef unsigned long ethr_uint32_t;
#endif
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
-
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile LONG x___ = 0; \
- (void) _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
-} while (0)
-
-typedef struct {
- volatile LONG value;
-} ethr_atomic_t;
-
-typedef struct {
- volatile LONG locked;
-} ethr_spinlock_t;
-
-typedef struct {
- volatile LONG counter;
-} ethr_rwlock_t;
-#define ETHR_WLOCK_FLAG__ (((LONG) 1) << 30)
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
+#if ETHR_SIZEOF___INT64 == 8
+#define ETHR_HAVE_INT64_T 1
+typedef __int64 ethr_sint64_t;
+typedef unsigned __int64 ethr_uint64_t;
#endif
- return 0;
-}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
#endif
- return 0;
-}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- *i = var->value;
-#else
- *i = InterlockedExchangeAdd(&var->value, (LONG) 0);
-#endif
- return 0;
-}
+struct ethr_join_data_;
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = InterlockedExchangeAdd(&var->value, (LONG) i);
- *testp += i;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- (void) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- (void) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedDecrement(&var->value);
- return 0;
-}
+/* Types */
+typedef struct {
+ long id;
+ struct ethr_join_data_ *jdata;
+} ethr_tid; /* thread id type */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedAnd() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedAnd(&var->value, mask);
- return 0;
-}
+typedef DWORD ethr_tsd_key;
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedOr() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedOr(&var->value, mask);
- return 0;
-}
+#undef ETHR_HAVE_ETHR_SIG_FUNCS
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedCompareExchange() provides a full
- * memory barrier.
- */
- *old = _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) expected);
- return 0;
-}
+#define ETHR_USE_OWN_RWMTX_IMPL__
+#define ETHR_USE_OWN_MTX_IMPL__
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = (long) InterlockedExchange(&var->value, (LONG) new);
- return 0;
-}
+#define ETHR_YIELD() (Sleep(0), 0)
-/*
- * According to msdn InterlockedExchange() provides a full
- * memory barrier.
- */
+#else /* No supported thread lib found */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->locked = (LONG) 0;
+#ifdef ETHR_NO_SUPP_THR_LIB_NOT_FATAL
+#define ETHR_NO_THREAD_LIB
#else
- (void) InterlockedExchange(&lock->locked, (LONG) 0);
+#error "No supported thread lib found"
#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
#endif
- InterlockedExchange(&lock->locked, (LONG) 0);
-#ifdef DEBUG
- ETHR_ASSERT(old == 1);
-#endif
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
-{
- LONG old;
- do {
- old = InterlockedExchange(&lock->locked, (LONG) 1);
- } while (old != (LONG) 0);
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-/*
- * According to msdn InterlockedIncrement, InterlockedDecrement,
- * and InterlockedExchangeAdd(), _InterlockedAnd, and _InterlockedOr
- * provides full memory barriers.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->counter = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->counter, (LONG) 0);
+#ifndef ETHR_HAVE_INT32_T
+#if ETHR_SIZEOF_INT == 4
+#define ETHR_HAVE_INT32_T 1
+typedef int ethr_sint32_t;
+typedef unsigned int ethr_uint32_t;
+#elif ETHR_SIZEOF_LONG == 4
+#define ETHR_HAVE_INT32_T 1
+typedef long ethr_sint32_t;
+typedef unsigned long ethr_uint32_t;
#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
#endif
- InterlockedDecrement(&lock->counter);
- ETHR_ASSERT(old != 0);
- }
- return 0;
-}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- while (1) {
- LONG old = InterlockedIncrement(&lock->counter);
- if ((old & ETHR_WLOCK_FLAG__) == 0)
- break; /* Got read lock */
- /* Restore and wait for writers to unlock */
- old = InterlockedDecrement(&lock->counter);
- while (old & ETHR_WLOCK_FLAG__) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
+#ifndef ETHR_HAVE_INT64_T
+#if ETHR_SIZEOF_INT == 8
+#define ETHR_HAVE_INT64_T 1
+typedef int ethr_sint64_t;
+typedef unsigned int ethr_uint64_t;
+#elif ETHR_SIZEOF_LONG == 8
+#define ETHR_HAVE_INT64_T 1
+typedef long ethr_sint64_t;
+typedef unsigned long ethr_uint64_t;
+#elif ETHR_SIZEOF_LONG_LONG == 8
+#define ETHR_HAVE_INT64_T 1
+typedef long long ethr_sint64_t;
+typedef unsigned long long ethr_uint64_t;
#endif
- }
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
#endif
- _InterlockedAnd(&lock->counter, ~ETHR_WLOCK_FLAG__);
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- return 0;
-}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- LONG old;
- do {
- old = _InterlockedOr(&lock->counter, ETHR_WLOCK_FLAG__);
- } while (old & ETHR_WLOCK_FLAG__);
- /* We got the write part of the lock; wait for readers to unlock */
- while ((old & ~ETHR_WLOCK_FLAG__) != 0) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
+#if ETHR_SIZEOF_PTR == 4
+#ifndef ETHR_HAVE_INT32_T
+#error "No 32-bit integer type found"
#endif
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* #ifdef ERTS_MIXED_CYGWIN_VC */
-
-#else /* No supported thread lib found */
-
-#ifdef ETHR_NO_SUPP_THR_LIB_NOT_FATAL
-#define ETHR_NO_THREAD_LIB
-#else
-#error "No supported thread lib found"
+typedef ethr_sint32_t ethr_sint_t;
+typedef ethr_uint32_t ethr_uint_t;
+#elif ETHR_SIZEOF_PTR == 8
+#ifndef ETHR_HAVE_INT64_T
+#error "No 64-bit integer type found"
#endif
-
+typedef ethr_sint64_t ethr_sint_t;
+typedef ethr_uint64_t ethr_uint_t;
#endif
/* __builtin_expect() is needed by both native atomics code
@@ -698,6 +262,8 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
# if defined(__GNUC__)
# if defined(ETHR_PREFER_GCC_NATIVE_IMPLS)
# include "gcc/ethread.h"
+# elif defined(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS)
+# include "libatomic_ops/ethread.h"
# endif
# ifndef ETHR_HAVE_NATIVE_ATOMICS
# if ETHR_SIZEOF_PTR == 4
@@ -718,115 +284,143 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
# endif
# endif
# include "gcc/ethread.h"
+# include "libatomic_ops/ethread.h"
# endif
+# elif defined(ETHR_HAVE_LIBATOMIC_OPS)
+# include "libatomic_ops/ethread.h"
+# elif defined(ETHR_WIN32_THREADS)
+# include "win/ethread.h"
# endif
-#endif /* !defined(ETHR_DISABLE_NATIVE_IMPLS) */
+#endif /* !ETHR_DISABLE_NATIVE_IMPLS */
-#ifdef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-# undef ETHR_HAVE_NATIVE_ATOMICS
-#endif
-#ifdef ETHR_HAVE_OPTIMIZED_LOCKS
-# undef ETHR_HAVE_NATIVE_LOCKS
-#endif
-
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
+#if defined(__GNUC__)
+# ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
+# endif
+# ifndef ETHR_SPIN_BODY
+# if defined(__i386__) || defined(__x86_64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("rep;nop" : : : "memory")
+# elif defined(__ia64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("hint @pause" : : : "memory")
+# elif defined(__sparc__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("membar #LoadLoad")
+# else
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+# endif
+# endif
+#elif defined(ETHR_WIN32_THREADS)
+# ifndef ETHR_COMPILER_BARRIER
+# include <intrin.h>
+# pragma intrinsic(_ReadWriteBarrier)
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+# ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY do {YieldProcessor();ETHR_COMPILER_BARRIER;} while(0)
+# endif
#endif
-typedef struct {
- unsigned open;
- ethr_mutex mtx;
- ethr_cond cnd;
-} ethr_gate;
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Map ethread native atomics to ethread API atomics.
+ * ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
+ * i.e. when our lock based atomic fallback is used, a noop is sufficient.
*/
-typedef ethr_native_atomic_t ethr_atomic_t;
+#define ETHR_MEMORY_BARRIER do { } while (0)
+#define ETHR_WRITE_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER do { } while (0)
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-/*
- * Map ethread native spinlocks to ethread API spinlocks.
- */
-typedef ethr_native_spinlock_t ethr_spinlock_t;
-/*
- * Map ethread native rwlocks to ethread API rwlocks.
- */
-typedef ethr_native_rwlock_t ethr_rwlock_t;
+#ifndef ETHR_WRITE_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER_IS_FULL
#endif
-
-#ifdef ETHR_USE_RWMTX_FALLBACK
-typedef struct {
- ethr_mutex mtx;
- ethr_cond rcnd;
- ethr_cond wcnd;
- unsigned readers;
- unsigned waiting_readers;
- unsigned waiting_writers;
-#if ETHR_XCHK
- int initialized;
+#ifndef ETHR_READ_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER_IS_FULL
#endif
-} ethr_rwmutex;
+#ifndef ETHR_READ_DEPEND_MEMORY_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER_IS_COMPILER_BARRIER
#endif
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-typedef long ethr_atomic_t;
-#endif
+#define ETHR_FATAL_ERROR__(ERR) \
+ ethr_fatal_error__(__FILE__, __LINE__, __func__, (ERR))
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
+ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err);
-#if defined(ETHR_WIN32_THREADS)
-typedef struct {
- CRITICAL_SECTION cs;
-} ethr_spinlock_t;
-typedef struct {
- CRITICAL_SECTION cs;
- unsigned counter;
-} ethr_rwlock_t;
+void ethr_compiler_barrier_fallback(void);
+#ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
+#endif
-int ethr_do_spinlock_init(ethr_spinlock_t *lock);
-int ethr_do_rwlock_init(ethr_rwlock_t *lock);
+#ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+#endif
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+#ifndef ETHR_YIELD
+# if defined(ETHR_HAVE_SCHED_YIELD)
+# ifdef ETHR_HAVE_SCHED_H
+# include <sched.h>
+# endif
+# include <errno.h>
+# if defined(ETHR_SCHED_YIELD_RET_INT)
+# define ETHR_YIELD() (sched_yield() < 0 ? errno : 0)
+# else
+# define ETHR_YIELD() (sched_yield(), 0)
+# endif
+# elif defined(ETHR_HAVE_PTHREAD_YIELD)
+# if defined(ETHR_PTHREAD_YIELD_RET_INT)
+# define ETHR_YIELD() pthread_yield()
+# else
+# define ETHR_YIELD() (pthread_yield(), 0)
+# endif
+# else
+# define ETHR_YIELD() (ethr_compiler_barrier(), 0)
+# endif
+#endif
+
+#include "ethr_optimized_fallbacks.h"
-#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-typedef struct {
- pthread_spinlock_t spnlck;
-} ethr_spinlock_t;
typedef struct {
- pthread_spinlock_t spnlck;
- unsigned counter;
-} ethr_rwlock_t;
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+ void *(*thread_create_prepare_func)(void);
+ void (*thread_create_parent_func)(void *);
+ void (*thread_create_child_func)(void *);
+} ethr_init_data;
-#else /* ethr mutex/rwmutex */
+#define ETHR_INIT_DATA_DEFAULT_INITER {NULL, NULL, NULL}
typedef struct {
- ethr_mutex mtx;
-} ethr_spinlock_t;
+ void *(*alloc)(size_t);
+ void *(*realloc)(void *, size_t);
+ void (*free)(void *);
+} ethr_memory_allocator;
+
+#define ETHR_MEM_ALLOC_DEF_INITER__ {NULL, NULL, NULL}
typedef struct {
- ethr_rwmutex rwmtx;
-} ethr_rwlock_t;
+ ethr_memory_allocator std;
+ ethr_memory_allocator sl;
+ ethr_memory_allocator ll;
+} ethr_memory_allocators;
-#endif /* end mutex/rwmutex */
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
+#define ETHR_MEM_ALLOCS_DEF_INITER__ \
+ {ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__}
typedef struct {
- void *(*alloc)(size_t);
- void *(*realloc)(void *, size_t);
- void (*free)(void *);
- void *(*thread_create_prepare_func)(void);
- void (*thread_create_parent_func)(void *);
- void (*thread_create_child_func)(void *);
-} ethr_init_data;
+ ethr_memory_allocators mem;
+ int reader_groups;
+ int main_threads;
+} ethr_late_init_data;
-#define ETHR_INIT_DATA_DEFAULT_INITER {malloc, realloc, free, NULL, NULL, NULL}
+#define ETHR_LATE_INIT_DATA_DEFAULT_INITER \
+ {ETHR_MEM_ALLOCS_DEF_INITER__, 0, 0}
typedef struct {
int detached; /* boolean (default false) */
@@ -835,18 +429,14 @@ typedef struct {
#define ETHR_THR_OPTS_DEFAULT_INITER {0, -1}
-#if defined(ETHR_CUSTOM_INLINE_FUNC_NAME_) || !defined(ETHR_TRY_INLINE_FUNCS)
-# define ETHR_NEED_MTX_PROTOTYPES__
-# define ETHR_NEED_RWMTX_PROTOTYPES__
-# define ETHR_NEED_SPINLOCK_PROTOTYPES__
-# define ETHR_NEED_ATOMIC_PROTOTYPES__
-#endif
-#if !defined(ETHR_NEED_RWMTX_PROTOTYPES__) && defined(ETHR_USE_RWMTX_FALLBACK)
-# define ETHR_NEED_RWMTX_PROTOTYPES__
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+# define ETHR_NEED_SPINLOCK_PROTOTYPES__
+# define ETHR_NEED_RWSPINLOCK_PROTOTYPES__
#endif
int ethr_init(ethr_init_data *);
+int ethr_late_init(ethr_late_init_data *);
int ethr_install_exit_handler(void (*funcp)(void));
int ethr_thr_create(ethr_tid *, void * (*)(void *), void *, ethr_thr_opts *);
int ethr_thr_join(ethr_tid, void **);
@@ -854,79 +444,12 @@ int ethr_thr_detach(ethr_tid);
void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_mutex_init(ethr_mutex *);
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-int ethr_rec_mutex_init(ethr_mutex *);
-#endif
-int ethr_mutex_destroy(ethr_mutex *);
-int ethr_mutex_set_forksafe(ethr_mutex *);
-int ethr_mutex_unset_forksafe(ethr_mutex *);
-#ifdef ETHR_NEED_MTX_PROTOTYPES__
-int ethr_mutex_trylock(ethr_mutex *);
-int ethr_mutex_lock(ethr_mutex *);
-int ethr_mutex_unlock(ethr_mutex *);
-#endif
-int ethr_cond_init(ethr_cond *);
-int ethr_cond_destroy(ethr_cond *);
-int ethr_cond_signal(ethr_cond *);
-int ethr_cond_broadcast(ethr_cond *);
-int ethr_cond_wait(ethr_cond *, ethr_mutex *);
-int ethr_cond_timedwait(ethr_cond *, ethr_mutex *, ethr_timeval *);
-
-int ethr_rwmutex_init(ethr_rwmutex *);
-int ethr_rwmutex_destroy(ethr_rwmutex *);
-#ifdef ETHR_NEED_RWMTX_PROTOTYPES__
-int ethr_rwmutex_tryrlock(ethr_rwmutex *);
-int ethr_rwmutex_rlock(ethr_rwmutex *);
-int ethr_rwmutex_runlock(ethr_rwmutex *);
-int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwunlock(ethr_rwmutex *);
-#endif
-
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-int ethr_atomic_init(ethr_atomic_t *, long);
-int ethr_atomic_set(ethr_atomic_t *, long);
-int ethr_atomic_read(ethr_atomic_t *, long *);
-int ethr_atomic_inctest(ethr_atomic_t *, long *);
-int ethr_atomic_dectest(ethr_atomic_t *, long *);
-int ethr_atomic_inc(ethr_atomic_t *);
-int ethr_atomic_dec(ethr_atomic_t *);
-int ethr_atomic_addtest(ethr_atomic_t *, long, long *);
-int ethr_atomic_add(ethr_atomic_t *, long);
-int ethr_atomic_and_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_or_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_xchg(ethr_atomic_t *, long, long *);
-int ethr_atomic_cmpxchg(ethr_atomic_t *, long, long, long *);
-#endif
-#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
-int ethr_spinlock_init(ethr_spinlock_t *);
-int ethr_spinlock_destroy(ethr_spinlock_t *);
-int ethr_spin_unlock(ethr_spinlock_t *);
-int ethr_spin_lock(ethr_spinlock_t *);
-
-int ethr_rwlock_init(ethr_rwlock_t *);
-int ethr_rwlock_destroy(ethr_rwlock_t *);
-int ethr_read_unlock(ethr_rwlock_t *);
-int ethr_read_lock(ethr_rwlock_t *);
-int ethr_write_unlock(ethr_rwlock_t *);
-int ethr_write_lock(ethr_rwlock_t *);
-#endif
-
-int ethr_time_now(ethr_timeval *);
int ethr_tsd_key_create(ethr_tsd_key *);
int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
void *ethr_tsd_get(ethr_tsd_key);
-int ethr_gate_init(ethr_gate *);
-int ethr_gate_destroy(ethr_gate *);
-int ethr_gate_close(ethr_gate *);
-int ethr_gate_let_through(ethr_gate *, unsigned);
-int ethr_gate_wait(ethr_gate *);
-int ethr_gate_swait(ethr_gate *, int);
-
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#include <signal.h>
int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset);
@@ -935,534 +458,274 @@ int ethr_sigwait(const sigset_t *set, int *sig);
void ethr_compiler_barrier(void);
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_init(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_set(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
- *i = ethr_native_atomic_read(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ethr_native_atomic_add(var, incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = ethr_native_atomic_add_return(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- ethr_native_atomic_inc(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- ethr_native_atomic_dec(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_inc_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_dec_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_and_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_or_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = ethr_native_atomic_xchg(var, new);
- return 0;
-}
-
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_cmpxchg(var, new, expected);
- return 0;
-}
+#if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
+typedef ethr_native_spinlock_t ethr_spinlock_t;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+typedef ethr_opt_spinlock_t ethr_spinlock_t;
+#elif defined(__WIN32__)
+typedef CRITICAL_SECTION ethr_spinlock_t;
+#else
+typedef pthread_mutex_t ethr_spinlock_t;
+#endif
-#endif /* ETHR_HAVE_NATIVE_ATOMICS */
+#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
+int ethr_spinlock_init(ethr_spinlock_t *);
+int ethr_spinlock_destroy(ethr_spinlock_t *);
+void ethr_spin_unlock(ethr_spinlock_t *);
+void ethr_spin_lock(ethr_spinlock_t *);
+#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spinlock_init(lock);
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_init((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ if (!InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION *) lock, INT_MAX))
+ return ethr_win_get_errno__();
+ return 0;
+#else
+ return pthread_mutex_init((pthread_mutex_t *) lock, NULL);
+#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
+ return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_destroy((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ DeleteCriticalSection((CRITICAL_SECTION *) lock);
return 0;
+#else
+ return pthread_mutex_destroy((pthread_mutex_t *) lock);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_unlock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_unlock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ LeaveCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_unlock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
- ethr_native_rwlock_init(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_lock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_lock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ EnterCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_lock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-#endif /* ETHR_HAVE_NATIVE_LOCKS */
-
#endif /* ETHR_TRY_INLINE_FUNCS */
-/*
- * Fallbacks for atomics used in absence of optimized implementation.
- */
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-
-/*
- * ETHR_MEMORY_BARRIER orders between locked and atomic accesses only,
- * i.e. when this atomic fallback is used a noop is sufficient.
- */
-#define ETHR_MEMORY_BARRIER
+#include "ethr_atomics.h"
-#define ETHR_ATOMIC_ADDR_BITS 10
-#define ETHR_ATOMIC_ADDR_SHIFT 6
+typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
-typedef struct {
- union {
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- pthread_spinlock_t spnlck;
+#if defined(ETHR_WIN32_THREADS)
+# include "win/ethr_event.h"
#else
- ethr_mutex mtx;
-#endif
- char buf[ETHR_CACHE_LINE_SIZE];
- } u;
-} ethr_atomic_protection_t;
-
-extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-
-
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.spnlck)
-
-
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- pthread_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = pthread_spin_lock(slp__); \
- if (res__ != 0) \
- return res__; \
- { EXPS; } \
- return pthread_spin_unlock(slp__); \
-} while (0)
-
-#else /* ethread mutex */
+# include "pthread/ethr_event.h"
+#endif
+
+int ethr_set_main_thr_status(int, int);
+int ethr_get_main_thr_status(int *);
+
+struct ethr_ts_event_ {
+ ethr_ts_event *next;
+ ethr_ts_event *prev;
+ ethr_event event;
+ void *udata;
+ ethr_atomic32_t uaflgs;
+ unsigned uflgs;
+ unsigned iflgs; /* for ethr lib only */
+ short rgix; /* for ethr lib only */
+ short mtix; /* for ethr lib only */
+};
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.mtx)
+#define ETHR_TS_EV_ETHREAD (((unsigned) 1) << 0)
+#define ETHR_TS_EV_INITED (((unsigned) 1) << 1)
+#define ETHR_TS_EV_TMP (((unsigned) 1) << 2)
+#define ETHR_TS_EV_MAIN_THR (((unsigned) 1) << 3)
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_mutex *mtxp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(mtxp__); \
- if (res__ != 0) \
- return res__; \
- { EXPS; } \
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(mtxp__); \
-} while (0)
+int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp);
+int ethr_free_ts_event__(ethr_ts_event *tsep);
+int ethr_make_ts_event__(ethr_ts_event **tsepp);
-#endif /* end ethread mutex */
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+ethr_ts_event *ethr_get_ts_event(void);
+void ethr_leave_ts_event(ethr_ts_event *);
+#endif
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_PTHREADS)
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
-}
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
-}
+extern pthread_key_t ethr_ts_event_key__;
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *i = (long) *var);
+ ethr_ts_event *tsep = pthread_getspecific(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_make_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *incp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *testp = (long) ++(*incp));
-}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *decp, long *testp)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, *testp = (long) --(*decp));
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *incp,
- long i,
- long *testp)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *incp += i; *testp = *incp);
-}
+#endif
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *incp)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, ++(*incp));
-}
+#elif defined(ETHR_WIN32_THREADS)
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *decp)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, --(*decp));
-}
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var &= mask);
-}
+extern DWORD ethr_ts_event_key__;
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var |= mask);
+ ethr_ts_event *tsep = TlsGetValue(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_get_tmp_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var = new);
-}
-
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(
- var,
- long old_val = *var;
- *old = old_val;
- if (__builtin_expect(old_val == expected, 1))
- *var = new;
- );
- return 0;
+ if (tsep->iflgs & ETHR_TS_EV_TMP) {
+ int res = ethr_free_ts_event__(tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
-
-/*
- * Fallbacks for spin locks, and rw spin locks used in absence of
- * optimized implementation.
- */
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
-{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_init(&lock->spnlck, 0);
-#else
- return ethr_mutex_init(&lock->mtx);
#endif
-}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
-{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
-#else
- return ethr_mutex_destroy(&lock->mtx);
#endif
-}
+#include "ethr_mutex.h" /* Need atomic declarations and tse */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
-{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+typedef ethr_native_rwlock_t ethr_rwlock_t;
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(&lock->mtx);
+typedef ethr_rwmutex ethr_rwlock_t;
#endif
-}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
-{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_lock(&lock->spnlck);
-#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(&lock->mtx);
+#ifdef ETHR_NEED_RWSPINLOCK_PROTOTYPES__
+int ethr_rwlock_init(ethr_rwlock_t *);
+int ethr_rwlock_destroy(ethr_rwlock_t *);
+void ethr_read_unlock(ethr_rwlock_t *);
+void ethr_read_lock(ethr_rwlock_t *);
+void ethr_write_unlock(ethr_rwlock_t *);
+void ethr_write_lock(ethr_rwlock_t *);
#endif
-}
-#ifdef ETHR_USE_RWMTX_FALLBACK
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) X
-#else
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) ETHR_INLINE_FUNC_NAME_(X)
-#endif
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_rwlock_init(lock);
+ return 0;
#else
- return ethr_rwmutex_init(&lock->rwmtx);
+ return ethr_rwmutex_init_opt((ethr_rwmutex *) lock, NULL);
#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ return 0;
#else
- return ethr_rwmutex_destroy(&lock->rwmtx);
+ return ethr_rwmutex_destroy((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter--;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_runlock)(&lock->rwmtx);
+ ethr_rwmutex_runlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int locked = 0;
- do {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- if ((lock->counter & ETHR_RWLOCK_WRITERS) == 0) {
- lock->counter++;
- locked = 1;
- }
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- } while (!locked);
- return 0;
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rlock)(&lock->rwmtx);
+ ethr_rwmutex_rlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwunlock)(&lock->rwmtx);
+ ethr_rwmutex_rwunlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- while (1) {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter |= ETHR_RWLOCK_WRITERS;
- if (lock->counter == ETHR_RWLOCK_WRITERS)
- return 0;
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- }
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwlock)(&lock->rwmtx);
+ ethr_rwmutex_rwlock((ethr_rwmutex *) lock);
#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
-
-#if defined(ETHR_HAVE_OPTIMIZED_LOCKS) || defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-# define ETHR_HAVE_OPTIMIZED_SPINLOCK
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* #ifndef ETHREAD_H__ */
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index c9fd87c2f6..f394d790d2 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -20,6 +20,21 @@
/* Define to the size of pointers */
#undef ETHR_SIZEOF_PTR
+/* Define to the size of int */
+#undef ETHR_SIZEOF_INT
+
+/* Define to the size of long */
+#undef ETHR_SIZEOF_LONG
+
+/* Define to the size of long long */
+#undef ETHR_SIZEOF_LONG_LONG
+
+/* Define to the size of __int64 */
+#undef ETHR_SIZEOF___INT64
+
+/* Define if bigendian */
+#undef ETHR_BIGENDIAN
+
/* Define if you want to disable native ethread implementations */
#undef ETHR_DISABLE_NATIVE_IMPLS
@@ -29,26 +44,54 @@
/* Define if you have pthreads */
#undef ETHR_PTHREADS
+/* Define if you need the <nptl/pthread.h> header file. */
+#undef ETHR_NEED_NPTL_PTHREAD_H
+
/* Define if you have the <pthread.h> header file. */
#undef ETHR_HAVE_PTHREAD_H
/* Define if the pthread.h header file is in pthread/mit directory. */
#undef ETHR_HAVE_MIT_PTHREAD_H
-/* Define if you have the pthread_mutexattr_settype function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE
+/* Define if you have the pthread_spin_lock function. */
+#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
-/* Define if you have the pthread_mutexattr_setkind_np function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP
+/* Define if you want to force usage of pthread rwlocks */
+#undef ETHR_FORCE_PTHREAD_RWLOCK
-/* Define if you have the pthread_atfork function. */
-#undef ETHR_HAVE_PTHREAD_ATFORK
+/* Define if you have the pthread_rwlockattr_setkind_np() function. */
+#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
-/* Define if you have the pthread_spin_lock function. */
-#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
+/* Define if you have the PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP rwlock
+ attribute. */
+#undef ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
-/* Define if you have a pthread_rwlock implementation that can be used */
-#undef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+/* Define if you have a linux futex implementation. */
+#undef ETHR_HAVE_LINUX_FUTEX
+
+/* Define if you have gcc atomic operations */
+#undef ETHR_HAVE_GCC_ATOMIC_OPS
+
+/* Define if you prefer gcc native ethread implementations */
+#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+
+/* Define if you have the <sched.h> header file. */
+#undef ETHR_HAVE_SCHED_H
+
+/* Define if you have the sched_yield() function. */
+#undef ETHR_HAVE_SCHED_YIELD
+
+/* Define if you have the pthread_yield() function. */
+#undef ETHR_HAVE_PTHREAD_YIELD
+
+/* Define if pthread_yield() returns an int. */
+#undef ETHR_PTHREAD_YIELD_RET_INT
+
+/* Define if sched_yield() returns an int. */
+#undef ETHR_SCHED_YIELD_RET_INT
+
+/* Define if you want compatibilty with x86 processors before pentium4. */
+#undef ETHR_PRE_PENTIUM4_COMPAT
/* Define if you have the pthread_rwlockattr_setkind_np() function. */
#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
@@ -63,6 +106,36 @@
/* Define if you prefer gcc native ethread implementations */
#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+/* Define if you have libatomic_ops atomic operations */
+#undef ETHR_HAVE_LIBATOMIC_OPS
+
+/* Define if you prefer libatomic_ops native ethread implementations */
+#undef ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS
+
+/* Define to the size of AO_t if libatomic_ops is used */
+#undef ETHR_SIZEOF_AO_T
+
+/* Define if you have _InterlockedCompareExchange64() */
+#undef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+
+/* Define if you have _InterlockedDecrement64() */
+#undef ETHR_HAVE__INTERLOCKEDDECREMENT64
+
+/* Define if you have _InterlockedIncrement64() */
+#undef ETHR_HAVE__INTERLOCKEDINCREMENT64
+
+/* Define if you have _InterlockedExchangeAdd64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+
+/* Define if you have _InterlockedExchange64() */
+#undef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+
+/* Define if you have _InterlockedAnd64() */
+#undef ETHR_HAVE__INTERLOCKEDAND64
+
+/* Define if you have _InterlockedOr64() */
+#undef ETHR_HAVE__INTERLOCKEDOR64
+
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index 775030c8d5..16935084b1 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -22,24 +22,35 @@
* Author: Rickard Green
*/
-#ifndef ETHR_GCC_ATOMIC_H__
-#define ETHR_GCC_ATOMIC_H__
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_GCC_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_GCC_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_GCC_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_GCC_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
-#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
-#define ETHR_HAVE_NATIVE_ATOMICS 1
+#ifndef ETHR_GCC_ATOMIC_COMMON__
+#define ETHR_GCC_ATOMIC_COMMON__
-#define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-/* Enable immediate read/write on platforms where we know it is safe */
+#define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 0
#if defined(__i386__) || defined(__x86_64__) || defined(__sparc__) \
- || defined(__powerpc__) || defined(__ppc__)
-# undef ETHR_IMMED_ATOMIC_SET_GET_SAFE__
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+ || defined(__powerpc__) || defined(__ppc__) || defined(__mips__)
+# undef ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
+# define ETHR_READ_AND_SET_WITHOUT_SYNC_OP__ 1
#endif
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
-
+#if defined(__x86_64__) || (defined(__i386__) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
+#else
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
+#endif
/*
* According to the documentation this is what we want:
@@ -47,28 +58,73 @@ typedef struct {
* However, __sync_synchronize() is known to erroneously be
* a noop on at least some platforms with some gcc versions.
* This has suposedly been fixed in some gcc version, but we
- * don't know from which version. Therefore, we use the
- * workaround implemented below on all gcc versions.
+ * don't know from which version. Therefore, we only use
+ * it when it has been verified to work. Otherwise
+ * we use a workaround.
*/
+#if defined(__mips__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))
+/* __sync_synchronize() has been verified to work here */
+#define ETHR_MEMORY_BARRIER __sync_synchronize()
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __sync_synchronize()
+#elif defined(__x86_64__) || (defined(__i386__) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+/* Use fence instructions directly instead of workaround */
+#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
+#else
+/* Workaround */
#define ETHR_MEMORY_BARRIER \
do { \
- volatile long x___ = 0; \
- (void) __sync_val_compare_and_swap(&x___, (long) 0, (long) 1); \
+ volatile ethr_sint32_t x___ = 0; \
+ (void) __sync_val_compare_and_swap(&x___, (ethr_sint32_t) 0, (ethr_sint32_t) 1); \
} while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+
+#define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
+
+#endif /* ETHR_GCC_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#else
+#error "Unsupported integer size"
+#endif
+
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
-#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
var->counter = value;
#else
/*
* Unfortunately no __sync_store() or similar exist in the gcc atomic
* op interface. We therefore have to simulate it this way...
*/
- long act = 0, exp;
+ ETHR_AINT_T__ act = 0, exp;
do {
exp = act;
act = __sync_val_compare_and_swap(&var->counter, exp, value);
@@ -76,80 +132,86 @@ ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
#endif
}
-#define ethr_native_atomic_init ethr_native_atomic_set
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ ETHR_NATMC_FUNC__(set)(var, value);
+}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+#if ETHR_READ_AND_SET_WITHOUT_SYNC_OP__
return var->counter;
#else
/*
* Unfortunately no __sync_fetch() or similar exist in the gcc atomic
* op interface. We therefore have to simulate it this way...
*/
- return __sync_add_and_fetch(&var->counter, (long) 0);
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
#endif
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
(void) __sync_add_and_fetch(&var->counter, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
return __sync_add_and_fetch(&var->counter, incr);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void) __sync_add_and_fetch(&var->counter, (long) 1);
+ (void) __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void) __sync_sub_and_fetch(&var->counter, (long) 1);
+ (void) __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return __sync_add_and_fetch(&var->counter, (long) 1);
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return __sync_sub_and_fetch(&var->counter, (long) 1);
+ return __sync_sub_and_fetch(&var->counter, (ETHR_AINT_T__) 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
return __sync_fetch_and_and(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- return (long) __sync_fetch_and_or(&var->counter, mask);
+ return (ETHR_AINT_T__) __sync_fetch_and_or(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
return __sync_val_compare_and_swap(&var->counter, old, new);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
{
- long exp, act = 0;
+ ETHR_AINT_T__ exp, act = 0;
do {
exp = act;
act = __sync_val_compare_and_swap(&var->counter, exp, new);
@@ -157,8 +219,72 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
return act;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
+{
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_AINT_T__ val = var->counter;
+ ETHR_COMPILER_BARRIER;
+ return val;
+#else
+ return __sync_add_and_fetch(&var->counter, (ETHR_AINT_T__) 0);
#endif
+}
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_COMPILER_BARRIER;
+ var->counter = i;
+#else
+ (void) ETHR_NATMC_FUNC__(xchg)(var, i);
#endif
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(inc_return)(var);
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+{
+ ETHR_NATMC_FUNC__(dec)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(dec_return)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
+
+#endif
+
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_AINT_SUFFIX__
#endif
diff --git a/erts/include/internal/gcc/ethread.h b/erts/include/internal/gcc/ethread.h
index bb378e31e0..392a1aa2b2 100644
--- a/erts/include/internal/gcc/ethread.h
+++ b/erts/include/internal/gcc/ethread.h
@@ -25,6 +25,16 @@
#ifndef ETHREAD_GCC_H__
#define ETHREAD_GCC_H__
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_GCC_ATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "ethr_atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
+
+#endif
#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 90b4c5f773..4e402f261a 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -23,143 +23,265 @@
*
* This code requires a 486 or newer processor.
*/
-#ifndef ETHREAD_I386_ATOMIC_H
-#define ETHREAD_I386_ATOMIC_H
-/* An atomic is an aligned long accessed via locked operations.
- */
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_X86_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_X86_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_X86_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_X86_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
-#ifdef __x86_64__
+#ifndef ETHR_X86_ATOMIC_COMMON__
+#define ETHR_X86_ATOMIC_COMMON__
+
+#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
+
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
#else
#define ETHR_MEMORY_BARRIER \
do { \
- volatile long x___ = 0; \
+ volatile ethr_sint32_t x___ = 0; \
__asm__ __volatile__("lock; incl %0" : "=m"(x___) : "m"(x___) : "memory"); \
} while (0)
#endif
-#ifdef ETHR_TRY_INLINE_FUNCS
+#endif /* ETHR_X86_ATOMIC_COMMON__ */
-#ifdef __x86_64__
-#define LONG_SUFFIX "q"
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_AINT_SUFFIX__ "l"
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_AINT_SUFFIX__ "q"
#else
-#define LONG_SUFFIX "l"
+#error "Unsupported integer size"
#endif
+/* An atomic is an aligned ETHR_AINT_T__ accessed via locked operations.
+ */
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->counter = i;
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
__asm__ __volatile__(
- "lock; add" LONG_SUFFIX " %1, %0"
+ "lock; add" ETHR_AINT_SUFFIX__ " %1, %0"
: "=m"(var->counter)
: "ir"(incr), "m"(var->counter));
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__(
- "lock; inc" LONG_SUFFIX " %0"
+ "lock; inc" ETHR_AINT_SUFFIX__ " %0"
: "=m"(var->counter)
: "m"(var->counter));
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
__asm__ __volatile__(
- "lock; dec" LONG_SUFFIX " %0"
+ "lock; dec" ETHR_AINT_SUFFIX__ " %0"
: "=m"(var->counter)
: "m"(var->counter));
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- long tmp;
+ ETHR_AINT_T__ tmp;
tmp = incr;
__asm__ __volatile__(
- "lock; xadd" LONG_SUFFIX " %0, %1" /* xadd didn't exist prior to the 486 */
+ "lock; xadd" ETHR_AINT_SUFFIX__ " %0, %1" /* xadd didn't exist prior to the 486 */
: "=r"(tmp)
: "m"(var->counter), "0"(tmp));
/* now tmp is the atomic's previous value */
return tmp + incr;
}
-#define ethr_native_atomic_inc_return(var) ethr_native_atomic_add_return((var), 1)
-#define ethr_native_atomic_dec_return(var) ethr_native_atomic_add_return((var), -1)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) 1);
+}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_NATMC_FUNC__(add_return)(var, (ETHR_AINT_T__) -1);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
{
__asm__ __volatile__(
- "lock; cmpxchg" LONG_SUFFIX " %2, %3"
+ "lock; cmpxchg" ETHR_AINT_SUFFIX__ " %2, %3"
: "=a"(old), "=m"(var->counter)
: "r"(new), "m"(var->counter), "0"(old)
: "cc", "memory"); /* full memory clobber to make this a compiler barrier */
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long tmp, old;
+ ETHR_AINT_T__ tmp, old;
tmp = var->counter;
do {
old = tmp;
- tmp = ethr_native_atomic_cmpxchg(var, tmp & mask, tmp);
+ tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp & mask, tmp);
} while (__builtin_expect(tmp != old, 0));
/* now tmp is the atomic's previous value */
return tmp;
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long tmp, old;
+ ETHR_AINT_T__ tmp, old;
tmp = var->counter;
do {
old = tmp;
- tmp = ethr_native_atomic_cmpxchg(var, tmp | mask, tmp);
+ tmp = ETHR_NATMC_FUNC__(cmpxchg)(var, tmp | mask, tmp);
} while (__builtin_expect(tmp != old, 0));
/* now tmp is the atomic's previous value */
return tmp;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
- long tmp = val;
+ ETHR_AINT_T__ tmp = val;
__asm__ __volatile__(
- "xchg" LONG_SUFFIX " %0, %1"
+ "xchg" ETHR_AINT_SUFFIX__ " %0, %1"
: "=r"(tmp)
: "m"(var->counter), "0"(tmp));
/* now tmp is the atomic's previous value */
return tmp;
}
-#undef LONG_SUFFIX
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
+{
+ ETHR_AINT_T__ val;
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
+ val = var->counter;
+#else
+ val = ETHR_NATMC_FUNC__(add_return)(var, 0);
+#endif
+ __asm__ __volatile__("" : : : "memory");
+ return val;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ __asm__ __volatile__("" : : : "memory");
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
+ var->counter = i;
+#else
+ (void) ETHR_NATMC_FUNC__(xchg)(var, i);
+#endif
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
+ __asm__ __volatile__("" : : : "memory");
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+{
+ __asm__ __volatile__("" : : : "memory");
+ ETHR_NATMC_FUNC__(dec)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+{
+ __asm__ __volatile__("" : : : "memory");
+ return ETHR_NATMC_FUNC__(dec_return)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* ETHREAD_I386_ATOMIC_H */
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_AINT_SUFFIX__
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index fad8b108fa..b5a17caefb 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,11 +24,17 @@
#ifndef ETHREAD_I386_ETHREAD_H
#define ETHREAD_I386_ETHREAD_H
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "atomic.h"
+#endif
#include "spinlock.h"
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_I386_ETHREAD_H */
diff --git a/erts/include/internal/i386/rwlock.h b/erts/include/internal/i386/rwlock.h
index c009be8ef1..be47f459ce 100644
--- a/erts/include/internal/i386/rwlock.h
+++ b/erts/include/internal/i386/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#define ETHR_RWLOCK_OFFSET (1<<24)
diff --git a/erts/include/internal/i386/spinlock.h b/erts/include/internal/i386/spinlock.h
index 2b4832e26a..0325324895 100644
--- a/erts/include/internal/i386/spinlock.h
+++ b/erts/include/internal/i386/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
@@ -46,7 +46,7 @@ ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
* On i386 this needs to be a locked operation
* to avoid Pentium Pro errata 66 and 92.
*/
-#if defined(__x86_64__)
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
__asm__ __volatile__("" : : : "memory");
*(unsigned char*)&lock->lock = 0;
#else
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
new file mode 100644
index 0000000000..d56693dbf8
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -0,0 +1,350 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_LIBATOMIC_OPS_ATOMIC_H__
+#define ETHR_LIBATOMIC_OPS_ATOMIC_H__
+
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if (defined(__i386__) && !defined(ETHR_PRE_PENTIUM4_COMPAT)) \
+ || defined(__x86_64__)
+#define AO_USE_PENTIUM4_INSTRS
+#endif
+
+#include "atomic_ops.h"
+
+/*
+ * libatomic_ops can be downloaded from:
+ * http://www.hpl.hp.com/research/linux/atomic_ops/
+ *
+ * These operations need to be defined by libatomic_ops;
+ * otherwise, we won't compile:
+ * - AO_nop_full()
+ * - AO_load()
+ * - AO_store()
+ * - AO_compare_and_swap()
+ *
+ * The `AO_t' type also have to be at least as large as the `void *' type.
+ */
+
+#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
+#error The AO_t type is too small
+#endif
+
+#if ETHR_SIZEOF_AO_T == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_AINT_SUFFIX__ "l"
+#elif ETHR_SIZEOF_AO_T == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_AINT_SUFFIX__ "q"
+#else
+#error "Unsupported integer size"
+#endif
+
+#if ETHR_SIZEOF_AO_T == 8
+typedef union {
+ volatile AO_t counter;
+ ethr_sint32_t sint32[2];
+} ETHR_ATMC_T__;
+#else
+typedef struct {
+ volatile AO_t counter;
+} ETHR_ATMC_T__;
+#endif
+
+#define ETHR_MEMORY_BARRIER AO_nop_full()
+#ifdef AO_HAVE_nop_write
+# define ETHR_WRITE_MEMORY_BARRIER AO_nop_write()
+#else
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_HAVE_nop_read
+# define ETHR_READ_MEMORY_BARRIER AO_nop_read()
+#else
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_NO_DD_ORDERING
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
+#else
+# define ETHR_READ_DEPEND_MEMORY_BARRIER AO_compiler_barrier()
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
+#if ETHR_SIZEOF_AO_T == 8
+/*
+ * We also need to provide an ethr_native_atomic32_addr(), since
+ * this 64-bit implementation will be used implementing 32-bit
+ * native atomics.
+ */
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ETHR_ATMC_T__ *var)
+{
+ ETHR_ASSERT(((void *) &var->sint32[0]) == ((void *) &var->counter));
+#ifdef ETHR_BIGENDIAN
+ return &var->sint32[1];
+#else
+ return &var->sint32[0];
+#endif
+}
+
+#endif /* ETHR_SIZEOF_AO_T == 8 */
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ AO_store(&var->counter, (AO_t) value);
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ ETHR_NATMC_FUNC__(set)(var, value);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__) AO_load(&var->counter);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+#ifdef AO_HAVE_fetch_and_add
+ return ((ETHR_AINT_T__) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+#else
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp + (AO_t) incr;
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (ETHR_AINT_T__) new;
+ }
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+ (void) ETHR_NATMC_FUNC__(add_return)(var, incr);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
+{
+#ifdef AO_HAVE_fetch_and_add1
+ return ((ETHR_AINT_T__) AO_fetch_and_add1(&var->counter)) + 1;
+#else
+ return ETHR_NATMC_FUNC__(add_return)(var, 1);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
+{
+ (void) ETHR_NATMC_FUNC__(inc_return)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1(&var->counter)) - 1;
+#else
+ return ETHR_NATMC_FUNC__(add_return)(var, -1);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
+{
+ (void) ETHR_NATMC_FUNC__(dec_return)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp & ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (ETHR_AINT_T__) exp;
+ }
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp | ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (ETHR_AINT_T__) exp;
+ }
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
+ return (ETHR_AINT_T__) exp;
+ }
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
+{
+#ifdef AO_HAVE_load_acquire
+ return (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#else
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+{
+#ifdef AO_HAVE_fetch_and_add1_acquire
+ return ((ETHR_AINT_T__) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+#else
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(add_return)(var, 1);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+#ifdef AO_HAVE_store_release
+ AO_store_release(&var->counter, (AO_t) value);
+#else
+ ETHR_MEMORY_BARRIER;
+ ETHR_NATMC_FUNC__(set)(var, value);
+#endif
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1_release
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1_release(&var->counter)) - 1;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ETHR_NATMC_FUNC__(dec_return)(var);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+{
+ (void) ETHR_NATMC_FUNC__(dec_return_relb)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+#ifdef AO_HAVE_compare_and_swap_acquire
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+ } while (act == exp);
+ AO_nop_full();
+ return act;
+#else
+ ETHR_AINT_T__ act = ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
+ ETHR_MEMORY_BARRIER;
+ return act;
+#endif
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+#ifdef AO_HAVE_compare_and_swap_release
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, exp);
+#endif
+}
+
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+
+#endif /* !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS) */
+
+#endif /* ETHR_LIBATOMIC_OPS_ATOMIC_H__ */
diff --git a/erts/include/internal/libatomic_ops/ethread.h b/erts/include/internal/libatomic_ops/ethread.h
new file mode 100644
index 0000000000..ee73ba73bc
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethread.h
@@ -0,0 +1,30 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_LIBATOMIC_OPS_H__
+#define ETHREAD_LIBATOMIC_OPS_H__
+
+#include "ethr_atomic.h"
+
+#endif
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index 105d874995..522f433649 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -28,31 +28,39 @@
#ifndef ETHREAD_PPC_ATOMIC_H
#define ETHREAD_PPC_ATOMIC_H
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
typedef struct {
- volatile int counter;
-} ethr_native_atomic_t;
+ volatile ethr_sint32_t counter;
+} ethr_native_atomic32_t;
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
+{
+ return (ethr_sint32_t *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
+ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
+#define ethr_native_atomic32_set(v, i) ethr_native_atomic32_init((v), (i))
-static ETHR_INLINE int
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read(ethr_native_atomic32_t *var)
{
return var->counter;
}
-static ETHR_INLINE int
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -69,16 +77,16 @@ ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, int incr)
+ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_add_return(var, incr);
+ (void)ethr_native_atomic32_add_return(var, incr);
}
-static ETHR_INLINE int
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -95,16 +103,16 @@ ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_inc_return(var);
+ (void)ethr_native_atomic32_inc_return(var);
}
-static ETHR_INLINE int
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -121,16 +129,16 @@ ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
/* XXX: could use weaker version here w/o eieio+isync */
- (void)ethr_native_atomic_dec_return(var);
+ (void)ethr_native_atomic32_dec_return(var);
}
-static ETHR_INLINE int
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- int old, new;
+ ethr_sint32_t old, new;
__asm__ __volatile__(
"eieio\n\t"
@@ -146,10 +154,10 @@ ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
return old;
}
-static ETHR_INLINE int
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- int old, new;
+ ethr_sint32_t old, new;
__asm__ __volatile__(
"eieio\n\t"
@@ -165,10 +173,10 @@ ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
return old;
}
-static ETHR_INLINE int
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
- int tmp;
+ ethr_sint32_t tmp;
__asm__ __volatile__(
"eieio\n\t"
@@ -183,10 +191,12 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
return tmp;
}
-static ETHR_INLINE int
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
- int old;
+ ethr_sint32_t old;
__asm__ __volatile__(
"eieio\n\t"
@@ -205,6 +215,26 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
return old;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
+{
+ long res = ethr_native_atomic32_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+#define ethr_native_atomic32_set_relb ethr_native_atomic32_xchg
+#define ethr_native_atomic32_inc_return_acqb ethr_native_atomic32_inc_return
+#define ethr_native_atomic32_dec_relb ethr_native_atomic32_dec_return
+#define ethr_native_atomic32_dec_return_relb ethr_native_atomic32_dec_return
+
+#define ethr_native_atomic32_cmpxchg_acqb ethr_native_atomic32_cmpxchg
+#define ethr_native_atomic32_cmpxchg_relb ethr_native_atomic32_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_PPC_ATOMIC_H */
diff --git a/erts/include/internal/ppc32/ethread.h b/erts/include/internal/ppc32/ethread.h
index d2a72c3dc1..3b619e9d01 100644
--- a/erts/include/internal/ppc32/ethread.h
+++ b/erts/include/internal/ppc32/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2011. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_PPC32_ETHREAD_H */
diff --git a/erts/include/internal/ppc32/rwlock.h b/erts/include/internal/ppc32/rwlock.h
index 9bdab12826..19ec26ab68 100644
--- a/erts/include/internal/ppc32/rwlock.h
+++ b/erts/include/internal/ppc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/ppc32/spinlock.h b/erts/include/internal/ppc32/spinlock.h
index 034c20c143..c8460a3e8a 100644
--- a/erts/include/internal/ppc32/spinlock.h
+++ b/erts/include/internal/ppc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
new file mode 100644
index 0000000000..4c29b28536
--- /dev/null
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -0,0 +1,135 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#if defined(ETHR_HAVE_LINUX_FUTEX) && defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Linux futex implementation of ethread events ------------------------- */
+#define ETHR_LINUX_FUTEX_IMPL__
+
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <linux/futex.h>
+#include <sys/time.h>
+
+#define ETHR_EVENT_OFF_WAITER__ ((ethr_sint32_t) -1)
+#define ETHR_EVENT_OFF__ ((ethr_sint32_t) 1)
+#define ETHR_EVENT_ON__ ((ethr_sint32_t) 0)
+
+#if defined(FUTEX_WAIT_PRIVATE) && defined(FUTEX_WAKE_PRIVATE)
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT_PRIVATE
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE_PRIVATE
+#else
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE
+#endif
+
+typedef struct {
+ ethr_atomic32_t futex;
+} ethr_event;
+
+#define ETHR_FUTEX__(FTX, OP, VAL) \
+ (-1 == syscall(__NR_futex, \
+ (void *) ethr_atomic32_addr((FTX)), \
+ (OP), \
+ (int) (VAL), \
+ NULL, \
+ NULL, \
+ 0) \
+ ? errno : 0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ ethr_sint32_t val;
+ ETHR_MEMORY_BARRIER;
+ val = ethr_atomic32_xchg(&e->futex, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic32_set(&e->futex, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#elif defined(ETHR_PTHREADS)
+/* --- Posix mutex/cond implementation of events ---------------------------- */
+
+typedef struct {
+ ethr_atomic32_t state;
+ pthread_mutex_t mtx;
+ pthread_cond_t cnd;
+} ethr_event;
+
+#define ETHR_EVENT_OFF_WAITER__ -1L
+#define ETHR_EVENT_OFF__ 1L
+#define ETHR_EVENT_ON__ 0L
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ ethr_sint32_t val;
+ ETHR_MEMORY_BARRIER;
+ val = ethr_atomic32_xchg(&e->state, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_cond_signal(&e->cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_mutex_unlock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic32_set(&e->state, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 8fde449a52..00380dbf07 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -21,49 +21,86 @@
* Native ethread atomics on SPARC V9.
* Author: Mikael Pettersson.
*/
-#ifndef ETHR_SPARC32_ATOMIC_H
-#define ETHR_SPARC32_ATOMIC_H
-typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_SPARC_V9_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_SPARC_V9_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_SPARC_V9_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_SPARC_V9_ATOMIC64_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#ifndef ETHR_SPARC_V9_ATOMIC_COMMON__
+#define ETHR_SPARC_V9_ATOMIC_COMMON__
#define ETHR_MEMORY_BARRIER \
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
: : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#endif /* ETHR_SPARC_V9_ATOMIC_COMMON__ */
-#if defined(__arch64__)
-#define CASX "casx"
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+#define ETHR_CAS__ "cas"
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+#define ETHR_CAS__ "casx"
#else
-#define CASX "cas"
+#error "Unsupported integer size"
#endif
+typedef struct {
+ volatile ETHR_AINT_T__ counter;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->counter;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ var->counter = i;
+}
+
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
{
var->counter = i;
}
-#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
{
return var->counter;
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old+incr;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -73,46 +110,46 @@ ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
{
- (void)ethr_native_atomic_add_return(var, incr);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, incr);
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
{
- return ethr_native_atomic_add_return(var, 1);
+ return ETHR_NATMC_FUNC__(add_return)(var, 1);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
{
- (void)ethr_native_atomic_add_return(var, 1);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
{
- return ethr_native_atomic_add_return(var, -1);
+ return ETHR_NATMC_FUNC__(add_return)(var, -1);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
{
- (void)ethr_native_atomic_add_return(var, -1);
+ (void)ETHR_NATMC_FUNC__(add_return)(var, -1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old & mask;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -121,17 +158,17 @@ ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
{
- long old, tmp;
+ ETHR_AINT_T__ old, tmp;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
do {
old = var->counter;
tmp = old | mask;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(tmp)
: "r"(old), "r"(&var->counter), "0"(tmp)
: "memory");
@@ -140,17 +177,17 @@ ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ val)
{
- long old, new;
+ ETHR_AINT_T__ old, new;
__asm__ __volatile__("membar #LoadLoad|#StoreLoad");
do {
old = var->counter;
new = val;
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
@@ -159,12 +196,12 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
return old;
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
{
__asm__ __volatile__("membar #LoadLoad|#StoreLoad\n");
__asm__ __volatile__(
- CASX " [%2], %1, %0"
+ ETHR_CAS__ " [%2], %1, %0"
: "=&r"(new)
: "r"(old), "r"(&var->counter), "0"(new)
: "memory");
@@ -172,6 +209,69 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
return new;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+/* TODO: relax acquire barriers */
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
+{
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(read)(var);
+ __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore" : : : "memory");
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
+ ETHR_NATMC_FUNC__(set)(var, i);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(inc_return)(var);
+ __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
+ ETHR_NATMC_FUNC__(dec)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
+ return ETHR_NATMC_FUNC__(dec_return)(var);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
+{
+ ETHR_AINT_T__ res = ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+ __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
+ return res;
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new, ETHR_AINT_T__ old)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
+ return ETHR_NATMC_FUNC__(cmpxchg)(var, new, old);
+}
+
#endif /* ETHR_TRY_INLINE_FUNCS */
-#endif /* ETHR_SPARC32_ATOMIC_H */
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_CAS__
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index 1d55399640..aea9794390 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -24,11 +24,17 @@
#ifndef ETHREAD_SPARC32_ETHREAD_H
#define ETHREAD_SPARC32_ETHREAD_H
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
#include "atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "atomic.h"
+#endif
#include "spinlock.h"
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_SPARC32_ETHREAD_H */
diff --git a/erts/include/internal/sparc32/rwlock.h b/erts/include/internal/sparc32/rwlock.h
index 12448e0b06..465ec96866 100644
--- a/erts/include/internal/sparc32/rwlock.h
+++ b/erts/include/internal/sparc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/sparc32/spinlock.h b/erts/include/internal/sparc32/spinlock.h
index b4fe48b714..493d514210 100644
--- a/erts/include/internal/sparc32/spinlock.h
+++ b/erts/include/internal/sparc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile unsigned char lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 5e4c7ac9fe..48e4c0c6c8 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -24,107 +24,164 @@
#ifndef ETHREAD_TILE_ATOMIC_H
#define ETHREAD_TILE_ATOMIC_H
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
#include <atomic.h>
/* An atomic is an aligned int accessed via locked operations.
*/
typedef struct {
- volatile long counter;
-} ethr_native_atomic_t;
+ volatile ethr_sint32_t counter;
+} ethr_native_atomic32_t;
#define ETHR_MEMORY_BARRIER __insn_mf()
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ethr_sint32_t *
+ethr_native_atomic32_addr(ethr_native_atomic32_t *var)
+{
+ return (ethr_sint32_t *) &var->counter;
+}
static ETHR_INLINE void
-ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+ethr_native_atomic32_init(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
var->counter = i;
}
static ETHR_INLINE void
-ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+ethr_native_atomic32_set(ethr_native_atomic32_t *var, ethr_sint32_t i)
{
- __insn_mf();
atomic_exchange_acq(&var->counter, i);
}
-static ETHR_INLINE long
-ethr_native_atomic_read(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read(ethr_native_atomic32_t *var)
{
return var->counter;
}
static ETHR_INLINE void
-ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+ethr_native_atomic32_add(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- __insn_mf();
atomic_add(&var->counter, incr);
}
static ETHR_INLINE void
-ethr_native_atomic_inc(ethr_native_atomic_t *var)
+ethr_native_atomic32_inc(ethr_native_atomic32_t *var)
{
- __insn_mf();
atomic_increment(&var->counter);
}
static ETHR_INLINE void
-ethr_native_atomic_dec(ethr_native_atomic_t *var)
+ethr_native_atomic32_dec(ethr_native_atomic32_t *var)
{
- __insn_mf();
atomic_decrement(&var->counter);
}
-static ETHR_INLINE long
-ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return(ethr_native_atomic32_t *var, ethr_sint32_t incr)
{
- __insn_mf();
return atomic_exchange_and_add(&var->counter, incr) + incr;
}
-static ETHR_INLINE long
-ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return(ethr_native_atomic32_t *var)
{
- return ethr_native_atomic_add_return(var, 1);
+ return ethr_native_atomic32_add_return(var, 1);
}
-static ETHR_INLINE long
-ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return(ethr_native_atomic32_t *var)
{
- return ethr_native_atomic_add_return(var, -1);
+ return ethr_native_atomic32_add_return(var, -1);
}
-static ETHR_INLINE long
-ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_and_val(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold(ethr_native_atomic32_t *var, ethr_sint32_t mask)
{
- __insn_mf();
return atomic_or_val(&var->counter, mask);
}
-static ETHR_INLINE long
-ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg(ethr_native_atomic32_t *var, ethr_sint32_t val)
{
- __insn_mf();
return atomic_exchange_acq(&var->counter, val);
}
-static ETHR_INLINE long
-ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_read_acqb(ethr_native_atomic32_t *var)
+{
+ ethr_sint32_t res = ethr_native_atomic32_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return_acqb(ethr_native_atomic32_t *var)
+{
+ ethr_sint32_t res = ethr_native_atomic32_inc_return(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic32_set_relb(ethr_native_atomic32_t *var, ethr_sint32_t val)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic32_set(var, val);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic32_dec_relb(ethr_native_atomic32_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic32_dec(var);
+}
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return_relb(ethr_native_atomic32_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic32_dec_return(var);
+}
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_acqb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+ return ethr_native_atomic32_cmpxchg(var, new, exp);
+}
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_relb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t exp)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic32_cmpxchg(var, new, exp);
+}
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_TILE_ATOMIC_H */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
new file mode 100644
index 0000000000..60def01a7e
--- /dev/null
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -0,0 +1,415 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#undef ETHR_INCLUDE_ATOMIC_IMPL__
+#if !defined(ETHR_WIN_ATOMIC32_H__) && defined(ETHR_ATOMIC_WANT_32BIT_IMPL__)
+#define ETHR_WIN_ATOMIC32_H__
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 4
+#undef ETHR_ATOMIC_WANT_32BIT_IMPL__
+#elif !defined(ETHR_WIN_ATOMIC64_H__) && defined(ETHR_ATOMIC_WANT_64BIT_IMPL__)
+#define ETHR_WIN_ATOMIC64_H__
+#ifdef ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE64
+/* _InterlockedCompareExchange64() required... */
+#define ETHR_INCLUDE_ATOMIC_IMPL__ 8
+#endif
+#undef ETHR_ATOMIC_WANT_64BIT_IMPL__
+#endif
+
+#ifdef ETHR_INCLUDE_ATOMIC_IMPL__
+
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+
+#ifndef ETHR_WIN_ATOMIC_COMMON__
+#define ETHR_WIN_ATOMIC_COMMON__
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if defined(_M_IX86) || defined(_M_AMD64) || defined(_M_IA64)
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 1
+#else
+# define ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ 0
+#endif
+
+#if defined(_M_AMD64) || (defined(_M_IX86) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 1
+#else
+# define ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__ 0
+#endif
+/*
+ * No configure test checking for interlocked acquire/release
+ * versions have been written, yet. It should define
+ * ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS if, and
+ * only if, all used interlocked operations with barriers
+ * exists.
+ *
+ * Note, that these are pure optimizations for the itanium
+ * processor.
+ */
+
+#include <intrin.h>
+#undef ETHR_COMPILER_BARRIER
+#define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedCompareExchange)
+
+#if defined(_M_AMD64) || (defined(_M_IX86) \
+ && !defined(ETHR_PRE_PENTIUM4_COMPAT))
+#include <emmintrin.h>
+#include <mmintrin.h>
+#pragma intrinsic(_mm_mfence)
+#define ETHR_MEMORY_BARRIER _mm_mfence()
+#pragma intrinsic(_mm_sfence)
+#define ETHR_WRITE_MEMORY_BARRIER _mm_sfence()
+#pragma intrinsic(_mm_lfence)
+#define ETHR_READ_MEMORY_BARRIER _mm_lfence()
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+
+#else
+
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile long x___ = 0; \
+ _InterlockedCompareExchange(&x___, (long) 1, (long) 0); \
+} while (0)
+
+#endif
+
+#endif /* ETHR_WIN_ATOMIC_COMMON__ */
+
+#if ETHR_INCLUDE_ATOMIC_IMPL__ == 4
+
+#define ETHR_HAVE_NATIVE_ATOMIC32 1
+
+/*
+ * All used operations available as 32-bit intrinsics
+ */
+
+#pragma intrinsic(_InterlockedDecrement)
+#pragma intrinsic(_InterlockedIncrement)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#pragma intrinsic(_InterlockedExchangeAdd_acq)
+#pragma intrinsic(_InterlockedIncrement_acq)
+#pragma intrinsic(_InterlockedDecrement_rel)
+#pragma intrinsic(_InterlockedCompareExchange_acq)
+#pragma intrinsic(_InterlockedCompareExchange_rel)
+#endif
+
+#define ETHR_ILCKD__(X) _Interlocked ## X
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## _acq
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## _rel
+#else
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X
+#endif
+
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic32_t
+#define ETHR_AINT_T__ ethr_sint32_t
+
+#elif ETHR_INCLUDE_ATOMIC_IMPL__ == 8
+
+#define ETHR_HAVE_NATIVE_ATOMIC64 1
+
+/*
+ * _InterlockedCompareExchange64() is required. The other may not
+ * be available, but if so, we can generate them.
+ */
+#pragma intrinsic(_InterlockedCompareExchange64)
+
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) *(PTR)
+#else
+#define ETHR_OWN_ILCKD_INIT_VAL__(PTR) (__int64) 0
+#endif
+
+#define ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, PTR, NEW, ACT, EXP, OPS, RET) \
+{ \
+ __int64 NEW, ACT, EXP; \
+ ACT = ETHR_OWN_ILCKD_INIT_VAL__(PTR); \
+ do { \
+ EXP = ACT; \
+ { OPS; } \
+ ACT = _InterlockedCompareExchange64(PTR, NEW, EXP); \
+ } while (ACT != EXP); \
+ return RET; \
+}
+
+#define ETHR_OWN_ILCKD_1_IMPL__(FUNC, NEW, ACT, EXP, OPS, RET) \
+static __forceinline __int64 \
+FUNC(__int64 volatile *ptr) \
+ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+
+#define ETHR_OWN_ILCKD_2_IMPL__(FUNC, NEW, ACT, EXP, OPS, ARG, RET) \
+static __forceinline __int64 \
+FUNC(__int64 volatile *ptr, __int64 ARG) \
+ETHR_OWN_ILCKD_BODY_IMPL__(FUNC, ptr, NEW, ACT, EXP, OPS, RET)
+
+
+#ifdef ETHR_HAVE__INTERLOCKEDDECREMENT64
+#pragma intrinsic(_InterlockedDecrement64)
+#else
+ETHR_OWN_ILCKD_1_IMPL__(_InterlockedDecrement64, new, act, exp,
+ new = act - 1, new)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDINCREMENT64
+#pragma intrinsic(_InterlockedIncrement64)
+#else
+ETHR_OWN_ILCKD_1_IMPL__(_InterlockedIncrement64, new, act, exp,
+ new = act + 1, new)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGEADD64
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchangeAdd64, new, act, exp,
+ new = act + arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDEXCHANGE64
+#pragma intrinsic(_InterlockedExchange64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedExchange64, new, act, exp,
+ new = arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDAND64
+#pragma intrinsic(_InterlockedAnd64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedAnd64, new, act, exp,
+ new = act & arg, arg, act)
+#endif
+#ifdef ETHR_HAVE__INTERLOCKEDOR64
+#pragma intrinsic(_InterlockedOr64)
+#else
+ETHR_OWN_ILCKD_2_IMPL__(_InterlockedOr64, new, act, exp,
+ new = act | arg, arg, act)
+#endif
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#pragma intrinsic(_InterlockedExchangeAdd64_acq)
+#pragma intrinsic(_InterlockedIncrement64_acq)
+#pragma intrinsic(_InterlockedDecrement64_rel)
+#pragma intrinsic(_InterlockedCompareExchange64_acq)
+#pragma intrinsic(_InterlockedCompareExchange64_rel)
+#endif
+
+#define ETHR_ILCKD__(X) _Interlocked ## X ## 64
+#ifdef ETHR_HAVE_INTERLOCKED_ACQUIRE_RELEASE_BARRIERS
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64_acq
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64_rel
+#else
+#define ETHR_ILCKD_ACQ__(X) _Interlocked ## X ## 64
+#define ETHR_ILCKD_REL__(X) _Interlocked ## X ## 64
+#endif
+
+#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
+#define ETHR_ATMC_T__ ethr_native_atomic64_t
+#define ETHR_AINT_T__ ethr_sint64_t
+
+#else
+#error "Unsupported integer size"
+#endif
+
+typedef struct {
+ volatile ETHR_AINT_T__ value;
+} ETHR_ATMC_T__;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+static ETHR_INLINE ETHR_AINT_T__ *
+ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__ *) &var->value;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(init)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ var->value = i;
+#else
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ var->value = i;
+#else
+ (void) ETHR_ILCKD__(Exchange)(&var->value, i);
+#endif
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
+{
+#if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+ return var->value;
+#else
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(add)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+ (void) ETHR_ILCKD__(ExchangeAdd)(&var->value, incr);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+ return ETHR_ILCKD__(ExchangeAdd)(&var->value, i) + i;
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(inc)(ETHR_ATMC_T__ *var)
+{
+ (void) ETHR_ILCKD__(Increment)(&var->value);
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec)(ETHR_ATMC_T__ *var)
+{
+ (void) ETHR_ILCKD__(Decrement)(&var->value);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_ILCKD__(Increment)(&var->value);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_ILCKD__(Decrement)(&var->value);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(and_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
+{
+ return ETHR_ILCKD__(And)(&var->value, mask);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(or_retold)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ mask)
+{
+ return ETHR_ILCKD__(Or)(&var->value, mask);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_ILCKD__(CompareExchange)(&var->value, new, old);
+}
+
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(xchg)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ new)
+{
+ return ETHR_ILCKD__(Exchange)(&var->value, new);
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
+{
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_AINT_T__ val = var->value;
+ ETHR_COMPILER_BARRIER;
+ return val;
+#else
+ return ETHR_ILCKD_ACQ__(ExchangeAdd)(&var->value, (ETHR_AINT_T__) 0);
+#endif
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_ILCKD_ACQ__(Increment)(&var->value);
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ i)
+{
+#if ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+ ETHR_COMPILER_BARRIER;
+ var->value = i;
+#else
+ (void) ETHR_ILCKD_REL__(Exchange)(&var->value, i);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(dec_relb)(ETHR_ATMC_T__ *var)
+{
+ (void) ETHR_ILCKD_REL__(Decrement)(&var->value);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
+{
+ return ETHR_ILCKD_REL__(Decrement)(&var->value);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_ILCKD_ACQ__(CompareExchange)(&var->value, new, old);
+}
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ old)
+{
+ return ETHR_ILCKD_REL__(CompareExchange)(&var->value, new, old);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#undef ETHR_ILCKD__
+#undef ETHR_ILCKD_ACQ__
+#undef ETHR_ILCKD_REL__
+#undef ETHR_NATMC_FUNC__
+#undef ETHR_ATMC_T__
+#undef ETHR_AINT_T__
+#undef ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__
+#undef ETHR_READ_ACQB_AND_SET_RELB_COMPILER_BARRIER_ONLY__
+
+#endif /* _MSC_VER */
+
+#endif /* ETHR_INCLUDE_ATOMIC_IMPL__ */
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
new file mode 100644
index 0000000000..598816b2c6
--- /dev/null
+++ b/erts/include/internal/win/ethr_event.h
@@ -0,0 +1,64 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_EVENT_OFF_WAITER__ ((long) -1)
+#define ETHR_EVENT_OFF__ ((long) 1)
+#define ETHR_EVENT_ON__ ((long) 0)
+
+typedef struct {
+ volatile long state;
+ HANDLE handle;
+} ethr_event;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+#pragma intrinsic(_InterlockedExchange)
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ /* _InterlockedExchange() imply a full memory barrier which is important */
+ long state = _InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ if (state == ETHR_EVENT_OFF_WAITER__) {
+ if (!SetEvent(e->handle))
+ ETHR_FATAL_ERROR__(ethr_win_get_errno__());
+ }
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ /* _InterlockedExchange() imply a full memory barrier which is important */
+ InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
+}
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
new file mode 100644
index 0000000000..c01b17cf14
--- /dev/null
+++ b/erts/include/internal/win/ethread.h
@@ -0,0 +1,35 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomic and spinlock ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_WIN_H__
+#define ETHREAD_WIN_H__
+
+#define ETHR_ATOMIC_WANT_32BIT_IMPL__
+#include "ethr_atomic.h"
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_ATOMIC_WANT_64BIT_IMPL__
+# include "ethr_atomic.h"
+#endif
+
+#endif