aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include
diff options
context:
space:
mode:
Diffstat (limited to 'erts/include')
-rw-r--r--erts/include/erl_native_features_config.h.in21
-rw-r--r--erts/include/internal/ethr_mutex.h8
-rw-r--r--erts/include/internal/ethread.h142
-rw-r--r--erts/include/internal/ethread_header_config.h.in2
-rw-r--r--erts/include/internal/ethread_inline.h49
-rw-r--r--erts/include/internal/gcc/ethread.h5
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h343
-rw-r--r--erts/include/internal/libatomic_ops/ethr_dw_atomic.h567
-rw-r--r--erts/include/internal/libatomic_ops/ethread.h3
-rw-r--r--erts/include/internal/ose/ethr_event.h113
-rw-r--r--erts/include/internal/win/ethr_membar.h8
11 files changed, 1218 insertions, 43 deletions
diff --git a/erts/include/erl_native_features_config.h.in b/erts/include/erl_native_features_config.h.in
new file mode 100644
index 0000000000..d1674cb256
--- /dev/null
+++ b/erts/include/erl_native_features_config.h.in
@@ -0,0 +1,21 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Dirty scheduler support */
+#undef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
index 86a1e9fbdf..6c931e0cd4 100644
--- a/erts/include/internal/ethr_mutex.h
+++ b/erts/include/internal/ethr_mutex.h
@@ -97,7 +97,7 @@ void LeaveCriticalSection(CRITICAL_SECTION *);
#if 0
# define ETHR_MTX_Q_LOCK_SPINLOCK__
# define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t
-#elif defined(ETHR_PTHREADS)
+#elif defined(ETHR_PTHREADS) || defined(ETHR_OSE_THREADS)
# define ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__
# define ETHR_MTX_QLOCK_TYPE__ pthread_mutex_t
#elif defined(ETHR_WIN32_THREADS)
@@ -210,7 +210,7 @@ struct ethr_cond_ {
#endif
};
-#elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
+#elif (defined(ETHR_PTHREADS) || defined(ETHR_OSE_THREADS)) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
typedef struct ethr_mutex_ ethr_mutex;
struct ethr_mutex_ {
@@ -354,7 +354,7 @@ void ethr_rwmutex_rwunlock(ethr_rwmutex *);
#ifdef ETHR_MTX_HARD_DEBUG
#define ETHR_MTX_HARD_ASSERT(A) \
- ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, __func__,#A)))
#else
#define ETHR_MTX_HARD_ASSERT(A) ((void) 1)
#endif
@@ -633,7 +633,7 @@ ETHR_INLINE_MTX_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
#endif /* ETHR_TRY_INLINE_FUNCS */
-#elif defined(ETHR_PTHREADS) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
+#elif (defined(ETHR_PTHREADS) || defined(ETHR_OSE_THREADS)) && !defined(ETHR_DBG_WIN_MTX_WITH_PTHREADS)
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index ab728c65fa..ad5d05704c 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -31,12 +31,18 @@
#endif
#include <stdlib.h>
+#include "ethread_inline.h"
#include "erl_errno.h"
#if defined(DEBUG)
# define ETHR_DEBUG
#endif
+#if defined(__PPC__) || defined (__POWERPC)
+/* OSE compiler should be fixed! */
+#define __ppc__
+#endif
+
#if defined(ETHR_DEBUG)
# undef ETHR_XCHK
# define ETHR_XCHK 1
@@ -46,21 +52,17 @@
# endif
#endif
-#undef ETHR_INLINE
-#if defined(__GNUC__)
-# define ETHR_INLINE __inline__
-#elif defined(__WIN32__)
-# define ETHR_INLINE __forceinline
-#endif
#if defined(ETHR_DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
|| (defined(__GNUC__) && defined(ERTS_MIXED_CYGWIN_VC))
# undef ETHR_INLINE
# define ETHR_INLINE
+# undef ETHR_FORCE_INLINE
+# define ETHR_FORCE_INLINE
# undef ETHR_TRY_INLINE_FUNCS
#endif
/* Assume 64-byte cache line size */
-#define ETHR_CACHE_LINE_SIZE 64
+#define ETHR_CACHE_LINE_SIZE ASSUMED_CACHE_LINE_SIZE
#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
@@ -190,6 +192,30 @@ typedef DWORD ethr_tsd_key;
#define ETHR_YIELD() (Sleep(0), 0)
+#elif defined(ETHR_OSE_THREADS)
+
+#include "ose.h"
+#undef NIL
+
+#if defined(ETHR_HAVE_PTHREAD_H)
+#include <pthread.h>
+#endif
+
+typedef struct {
+ PROCESS id;
+ unsigned int tsd_key_index;
+ void *res;
+} ethr_tid;
+
+typedef OSPPDKEY ethr_tsd_key;
+
+#undef ETHR_HAVE_ETHR_SIG_FUNCS
+
+/* Out own RW mutexes are probably faster, but use OSEs mutexes */
+#define ETHR_USE_OWN_RWMTX_IMPL__
+
+#define ETHR_HAVE_THREAD_NAMES
+
#else /* No supported thread lib found */
#ifdef ETHR_NO_SUPP_THR_LIB_NOT_FATAL
@@ -256,19 +282,6 @@ ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
const char *func,
int err);
-#if !defined(__GNUC__)
-# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0
-#elif !defined(__GNUC_MINOR__)
-# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
- ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
-#elif !defined(__GNUC_PATCHLEVEL__)
-# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
- (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
-#else
-# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
- (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
-#endif
-
#if !ETHR_AT_LEAST_GCC_VSN__(2, 96, 0)
#define __builtin_expect(X, Y) (X)
#endif
@@ -351,8 +364,8 @@ extern ethr_runtime_t ethr_runtime__;
# include "sparc64/ethread.h"
# endif
# endif
-# include "gcc/ethread.h"
# include "libatomic_ops/ethread.h"
+# include "gcc/ethread.h"
# endif
# elif defined(ETHR_HAVE_LIBATOMIC_OPS)
# include "libatomic_ops/ethread.h"
@@ -361,9 +374,25 @@ extern ethr_runtime_t ethr_runtime__;
# endif
#endif /* !ETHR_DISABLE_NATIVE_IMPLS */
+#if !defined(ETHR_HAVE_NATIVE_ATOMIC32) && !defined(ETHR_HAVE_NATIVE_ATOMIC64) && !defined(ETHR_DISABLE_NATIVE_IMPLS) && defined(ETHR_SMP_REQUIRE_NATIVE_IMPLS)
+#error "No native ethread implementation found. If you want to use fallbacks you have to disable native ethread support with configure."
+#endif
+
#include "ethr_atomics.h" /* The atomics API */
-#if defined(__GNUC__)
+#if defined (ETHR_OSE_THREADS)
+static ETHR_INLINE void
+ose_yield(void)
+{
+ if (get_ptype(current_process()) == OS_PRI_PROC) {
+ set_pri(get_pri(current_process()));
+ } else {
+ delay(1);
+ }
+}
+#endif
+
+#if defined(__GNUC__) && !defined(ETHR_OSE_THREADS)
# ifndef ETHR_SPIN_BODY
# if defined(__i386__) || defined(__x86_64__)
# define ETHR_SPIN_BODY __asm__ __volatile__("rep;nop" : : : "memory")
@@ -379,9 +408,20 @@ extern ethr_runtime_t ethr_runtime__;
# ifndef ETHR_SPIN_BODY
# define ETHR_SPIN_BODY do {YieldProcessor();ETHR_COMPILER_BARRIER;} while(0)
# endif
+#elif defined(ETHR_OSE_THREADS)
+# ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY ose_yield()
+# else
+# error "OSE should use ose_yield()"
+# endif
#endif
+#ifndef ETHR_OSE_THREADS
#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
+#else
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 0
+#endif
+
#ifndef ETHR_SPIN_BODY
# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
@@ -404,13 +444,20 @@ extern ethr_runtime_t ethr_runtime__;
# else
# define ETHR_YIELD() (pthread_yield(), 0)
# endif
+# elif defined(ETHR_OSE_THREADS)
+# define ETHR_YIELD() (ose_yield(), 0)
# else
# define ETHR_YIELD() (ethr_compiler_barrier(), 0)
# endif
#endif
-#ifdef VALGRIND /* mutex as fallback for spinlock for VALGRIND */
+#if defined(VALGRIND) || defined(ETHR_OSE_THREADS)
+/* mutex as fallback for spinlock for VALGRIND and OSE.
+ OSE cannot use spinlocks as processes working on the
+ same execution unit have a tendency to deadlock.
+ */
# undef ETHR_HAVE_NATIVE_SPINLOCKS
+# undef ETHR_HAVE_NATIVE_RWSPINLOCKS
#else
# include "ethr_optimized_fallbacks.h"
#endif
@@ -454,9 +501,19 @@ typedef struct {
typedef struct {
int detached; /* boolean (default false) */
int suggested_stack_size; /* kilo words (default sys dependent) */
+#ifdef ETHR_OSE_THREADS
+ char *name;
+ U32 coreNo;
+#endif
} ethr_thr_opts;
+#if defined(ETHR_OSE_THREADS)
+/* Default ethr name is big as we want to be able to sprint stuff in there */
+#define ETHR_THR_OPTS_DEFAULT_INITER \
+ {0, -1, "ethread", 0}
+#else
#define ETHR_THR_OPTS_DEFAULT_INITER {0, -1}
+#endif
#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
@@ -474,7 +531,7 @@ void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_tsd_key_create(ethr_tsd_key *);
+int ethr_tsd_key_create(ethr_tsd_key *,char *);
int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
void *ethr_tsd_get(ethr_tsd_key);
@@ -566,8 +623,10 @@ typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
#if defined(ETHR_WIN32_THREADS)
# include "win/ethr_event.h"
-#else
+#elif defined(ETHR_PTHREADS)
# include "pthread/ethr_event.h"
+#elif defined(ETHR_OSE_THREADS)
+# include "ose/ethr_event.h"
#endif
int ethr_set_main_thr_status(int, int);
@@ -657,6 +716,37 @@ ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
#endif
+#elif defined (ETHR_OSE_THREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern ethr_tsd_key ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = *(ethr_ts_event**)ose_get_ppdata(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_get_tmp_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+ if (tsep->iflgs & ETHR_TS_EV_TMP) {
+ int res = ethr_free_ts_event__(tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+#endif
+
#endif
#include "ethr_mutex.h" /* Need atomic declarations and tse */
@@ -693,7 +783,7 @@ static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
{
#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
- return 0;
+ return ethr_native_rwlock_destroy(lock);
#else
return ethr_rwmutex_destroy((ethr_rwmutex *) lock);
#endif
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index dd3599f86d..b36322490a 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -235,3 +235,5 @@
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
+/* Assumed cache-line size (in bytes) */
+#undef ASSUMED_CACHE_LINE_SIZE
diff --git a/erts/include/internal/ethread_inline.h b/erts/include/internal/ethread_inline.h
new file mode 100644
index 0000000000..ffb756c84f
--- /dev/null
+++ b/erts/include/internal/ethread_inline.h
@@ -0,0 +1,49 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2014. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ETHREAD_INLINE_H__
+#define ETHREAD_INLINE_H__
+
+#if !defined(__GNUC__)
+# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0
+#elif !defined(__GNUC_MINOR__)
+# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#elif !defined(__GNUC_PATCHLEVEL__)
+# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#else
+# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#endif
+
+#undef ETHR_INLINE
+#if defined(__GNUC__)
+# define ETHR_INLINE __inline__
+# if ETHR_AT_LEAST_GCC_VSN__(3, 1, 1)
+# define ETHR_FORCE_INLINE __inline__ __attribute__((__always_inline__))
+# else
+# define ETHR_FORCE_INLINE __inline__
+# endif
+#elif defined(__WIN32__)
+# define ETHR_INLINE __forceinline
+# define ETHR_FORCE_INLINE __forceinline
+#endif
+
+#endif /* #ifndef ETHREAD_INLINE_H__ */
diff --git a/erts/include/internal/gcc/ethread.h b/erts/include/internal/gcc/ethread.h
index fcfdc39441..365a3535cf 100644
--- a/erts/include/internal/gcc/ethread.h
+++ b/erts/include/internal/gcc/ethread.h
@@ -25,6 +25,9 @@
#ifndef ETHREAD_GCC_H__
#define ETHREAD_GCC_H__
+#if defined(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP32) \
+ || defined(ETHR_HAVE___SYNC_VAL_COMPARE_AND_SWAP64)
+
#ifndef ETHR_MEMBAR
# include "ethr_membar.h"
#endif
@@ -46,3 +49,5 @@
#endif
#endif
+
+#endif
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
index fb1288c330..734cdf0890 100644
--- a/erts/include/internal/libatomic_ops/ethr_atomic.h
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -32,22 +32,23 @@
* These operations need to be defined by libatomic_ops;
* otherwise, we won't compile:
* - AO_nop_full()
- * - AO_load()
- * - AO_store()
- * - AO_compare_and_swap()
+ * - AO_load() || AO_load_aquire()
+ * - AO_store() || AO_store_release()
+ * - AO_compare_and_swap() || AO_compare_and_swap_acquire()
+ * || AO_compare_and_swap_release() || AO_compare_and_swap_full()
*
*/
#if ETHR_SIZEOF_AO_T == 4
#define ETHR_HAVE_NATIVE_ATOMIC32 1
-#define ETHR_NATIVE_ATOMIC32_IMPL "libatomic_ops"
+#define ETHR_NATIVE_ATOMIC32_IMPL ETHR_NATIVE_IMPL__
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic32_t
#define ETHR_AINT_T__ ethr_sint32_t
#define ETHR_AINT_SUFFIX__ "l"
#elif ETHR_SIZEOF_AO_T == 8
#define ETHR_HAVE_NATIVE_ATOMIC64 1
-#define ETHR_NATIVE_ATOMIC64_IMPL "libatomic_ops"
+#define ETHR_NATIVE_ATOMIC64_IMPL ETHR_NATIVE_IMPL__
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic64_t
#define ETHR_AINT_T__ ethr_sint64_t
@@ -74,6 +75,8 @@ ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
return (ETHR_AINT_T__ *) &var->counter;
}
+#ifdef AO_HAVE_store
+
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET 1
#else
@@ -86,6 +89,24 @@ ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
AO_store(&var->counter, (AO_t) value);
}
+#endif
+
+#ifdef AO_HAVE_store_write
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_WB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_WB 1
+#endif
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_wb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ AO_store_write(&var->counter, (AO_t) value);
+}
+
+#endif
+
#ifdef AO_HAVE_store_release
#if ETHR_SIZEOF_AO_T == 4
@@ -102,6 +123,24 @@ ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
#endif
+#ifdef AO_HAVE_store_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_MB 1
+#endif
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ AO_store_full(&var->counter, (AO_t) value);
+}
+
+#endif
+
+#ifdef AO_HAVE_load
+
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
#else
@@ -114,6 +153,24 @@ ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
return (ETHR_AINT_T__) AO_load(&var->counter);
}
+#endif
+
+#ifdef AO_HAVE_load_read
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_RB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_rb)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__) AO_load_read(&var->counter);
+}
+
+#endif
+
#ifdef AO_HAVE_load_acquire
#if ETHR_SIZEOF_AO_T == 4
@@ -130,6 +187,22 @@ ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
#endif
+#ifdef AO_HAVE_load_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_mb)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__) AO_load_full(&var->counter);
+}
+
+#endif
+
#ifdef AO_HAVE_fetch_and_add
#if ETHR_SIZEOF_AO_T == 4
@@ -146,6 +219,54 @@ ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
#endif
+#ifdef AO_HAVE_fetch_and_add_acquire
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_ACQB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_ACQB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return_acqb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add_acquire(&var->counter, (AO_t) incr)) + incr;
+}
+
+#endif
+
+#ifdef AO_HAVE_fetch_and_add_release
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RELB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_RELB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add_release(&var->counter, (AO_t) incr)) + incr;
+}
+
+#endif
+
+#ifdef AO_HAVE_fetch_and_add_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add_full(&var->counter, (AO_t) incr)) + incr;
+}
+
+#endif
+
#ifdef AO_HAVE_fetch_and_add1
#if ETHR_SIZEOF_AO_T == 4
@@ -178,6 +299,38 @@ ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
#endif
+#ifdef AO_HAVE_fetch_and_add1_release
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RELB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_RELB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_relb)(ETHR_ATMC_T__ *var)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add1_release(&var->counter)) + 1;
+}
+
+#endif
+
+#ifdef AO_HAVE_fetch_and_add1_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_mb)(ETHR_ATMC_T__ *var)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add1_full(&var->counter)) + 1;
+}
+
+#endif
+
#ifdef AO_HAVE_fetch_and_sub1
#if ETHR_SIZEOF_AO_T == 4
@@ -194,6 +347,22 @@ ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
#endif
+#ifdef AO_HAVE_fetch_and_sub1_acquire
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_ACQB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_ACQB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1_acquire(&var->counter)) - 1;
+}
+
+#endif
+
#ifdef AO_HAVE_fetch_and_sub1_release
#if ETHR_SIZEOF_AO_T == 4
@@ -210,7 +379,60 @@ ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
#endif
-#ifdef AO_HAVE_compare_and_swap
+#ifdef AO_HAVE_fetch_and_sub1_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_mb)(ETHR_ATMC_T__ *var)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1_full(&var->counter)) - 1;
+}
+
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_full) || defined(AO_HAVE_fetch_compare_and_swap_full)
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_mb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+#if defined(AO_HAVE_fetch_compare_and_swap_full)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_full(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap_full(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+#ifdef AO_HAVE_load_acquire
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+#endif
+ } while (act == exp);
+#ifndef AO_HAVE_load_acquire
+ AO_nop_full();
+#endif
+ return act;
+#endif
+}
+
+#endif
+
+#if defined(AO_HAVE_compare_and_swap) || defined(AO_HAVE_fetch_compare_and_swap)
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG 1
@@ -223,18 +445,28 @@ ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
ETHR_AINT_T__ exp)
{
+#if defined(AO_HAVE_fetch_compare_and_swap)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
+#ifdef AO_HAVE_load
act = (ETHR_AINT_T__) AO_load(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load_aquire(&var->counter);
+#endif
} while (act == exp);
return act;
+#endif
}
#endif
-#ifdef AO_HAVE_compare_and_swap_acquire
+#if defined(AO_HAVE_compare_and_swap_acquire) || defined(AO_HAVE_fetch_compare_and_swap_acquire)
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB 1
@@ -247,6 +479,11 @@ ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
ETHR_AINT_T__ exp)
{
+#if defined(AO_HAVE_fetch_compare_and_swap_acquire)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_acquire(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
@@ -261,11 +498,55 @@ ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
AO_nop_full();
#endif
return act;
+#endif
+}
+
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_read) || defined(AO_HAVE_fetch_compare_and_swap_read)
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_rb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+#if defined(AO_HAVE_fetch_compare_and_swap_read)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_read(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap_read(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+#if defined(AO_HAVE_load_read)
+ act = (ETHR_AINT_T__) AO_load_read(&var->counter);
+#elif defined(AO_HAVE_load)
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#endif
+ } while (act == exp);
+#ifndef AO_HAVE_load_read
+#ifdef AO_HAVE_nop_read
+ AO_nop_read();
+#else
+ AO_nop_full();
+#endif
+#endif
+ return act;
+#endif
}
#endif
-#ifdef AO_HAVE_compare_and_swap_release
+#if defined(AO_HAVE_compare_and_swap_release) || defined(AO_HAVE_fetch_compare_and_swap_release)
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RELB 1
@@ -278,13 +559,57 @@ ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
ETHR_AINT_T__ exp)
{
+#if defined(AO_HAVE_fetch_compare_and_swap_release)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_release(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
+#ifdef AO_HAVE_load
act = (ETHR_AINT_T__) AO_load(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#endif
+ } while (act == exp);
+ return act;
+#endif
+}
+
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_write) || defined(AO_HAVE_fetch_compare_and_swap_write)
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_WB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_WB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_wb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+#if defined(AO_HAVE_fetch_compare_and_swap_write)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_write(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap_write(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+#ifdef AO_HAVE_load
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#endif
} while (act == exp);
return act;
+#endif
}
#endif
diff --git a/erts/include/internal/libatomic_ops/ethr_dw_atomic.h b/erts/include/internal/libatomic_ops/ethr_dw_atomic.h
new file mode 100644
index 0000000000..4dd9f41e96
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethr_dw_atomic.h
@@ -0,0 +1,567 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native double word atomics using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_LIBATOMIC_OPS_DW_ATOMIC_H__
+#define ETHR_LIBATOMIC_OPS_DW_ATOMIC_H__
+
+#if defined(AO_HAVE_double_t) \
+ && (defined(AO_HAVE_double_load_acquire) \
+ || defined(AO_HAVE_double_load)) \
+ && (defined(AO_HAVE_compare_double_and_swap_double) \
+ || defined(AO_HAVE_compare_double_and_swap_double_full) \
+ || defined(AO_HAVE_compare_double_and_swap_double_acquire) \
+ || defined(AO_HAVE_compare_double_and_swap_double_release) \
+ || defined(AO_HAVE_double_compare_and_swap) \
+ || defined(AO_HAVE_double_compare_and_swap_full) \
+ || defined(AO_HAVE_double_compare_and_swap_acquire) \
+ || defined(AO_HAVE_double_compare_and_swap_release))
+
+#if ETHR_SIZEOF_PTR == 4
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint64_t
+#elif ETHR_SIZEOF_PTR == 8 && defined(ETHR_HAVE_INT128_T)
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint128_t
+#endif
+
+typedef union {
+ volatile AO_double_t dw_mem;
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+ ETHR_NATIVE_SU_DW_SINT_T su_dw_sint;
+#endif
+} ethr_native_dw_atomic_t;
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_NATIVE_SU_DW_ATOMIC
+#else
+# define ETHR_HAVE_NATIVE_DW_ATOMIC
+#endif
+
+#define ETHR_NATIVE_DW_ATOMIC_IMPL ETHR_NATIVE_IMPL__
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_NDWA_FUNC__(Func) ethr_native_su_dw_atomic_ ## Func
+# define ETHR_NDWA_RET_3_TYPE__ ETHR_NATIVE_SU_DW_SINT_T
+# define ETHR_NDWA_RET_2_TYPE__ ETHR_NATIVE_SU_DW_SINT_T
+# define ETHR_NDWA_VAL_ARG_TYPE__ ETHR_NATIVE_SU_DW_SINT_T
+# define ETHR_NDWA_DECL_ARG__(Arg)
+# if defined(AO_HAVE_DOUBLE_PTR_STORAGE)
+# define ETHR_NDWA_VAL2AOVAL__(AOV, V) \
+ ((AOV).AO_whole = (double_ptr_storage) (V))
+# define ETHR_NDWA_AOVAL2VAL__(AOV, V) \
+ ((V) = (ETHR_NATIVE_SU_DW_SINT_T) (AOV).AO_whole)
+# define ETHR_NDWA_RETURN_VAL_3__(SUCCESS, AOVAL, VAL) \
+ do { \
+ return (ETHR_NATIVE_SU_DW_SINT_T) (AOVAL).AO_whole; \
+ } while (0)
+# define ETHR_NDWA_RETURN_VAL_2__(AOVAL, VAL) \
+ do { \
+ return (ETHR_NATIVE_SU_DW_SINT_T) (AOVAL).AO_whole; \
+ } while (0)
+# define ETHR_NDWA_AOVAL_EQ__(AOV1, AOV2) \
+ ((AOV1).AO_whole == (AOV2).AO_whole)
+# else
+typedef union {
+ ethr_sint_t sint[2];
+ ETHR_NATIVE_SU_DW_SINT_T dw_sint;
+} ethr_dw_splitter_t;
+# define ETHR_NDWA_VAL2AOVAL__(AOV, V) \
+ do { \
+ ethr_dw_splitter_t tmp__; \
+ tmp__.dw_sint = (V); \
+ (AOV).AO_val1 = (AO_t) tmp__.sint[0]; \
+ (AOV).AO_val2 = (AO_t) tmp__.sint[1]; \
+ } while (0)
+# define ETHR_NDWA_AOVAL2VAL__(AOV, V) \
+ do { \
+ ethr_dw_splitter_t tmp__; \
+ tmp__.sint[0] = (ethr_sint_t) (AOV).AO_val1; \
+ tmp__.sint[1] = (ethr_sint_t) (AOV).AO_val2; \
+ (V) = tmp__.dw_sint; \
+ } while (0)
+# define ETHR_NDWA_RETURN_VAL_3__(SUCCESS, AOVAL, VAL) \
+ do { \
+ ethr_dw_splitter_t tmp__; \
+ tmp__.sint[0] = (ethr_sint_t) (AOVAL).AO_val1; \
+ tmp__.sint[1] = (ethr_sint_t) (AOVAL).AO_val2; \
+ return tmp__.dw_sint; \
+ } while (0)
+# define ETHR_NDWA_AOVAL_EQ__(AOV1, AOV2) \
+ ((AOV1).AO_val1 == (AOV2).AO_val1 \
+ && (AOV1).AO_val2 == (AOV2).AO_val2)
+# endif
+#else
+# define ETHR_NDWA_FUNC__(Func) ethr_native_dw_atomic_ ## Func
+# define ETHR_NDWA_RET_3_TYPE__ int
+# define ETHR_NDWA_RET_2_TYPE__ void
+# define ETHR_NDWA_VAL_ARG_TYPE__ ethr_sint_t *
+# define ETHR_NDWA_DECL_ARG__(Arg) , ETHR_NDWA_VAL_ARG_TYPE__ Arg
+# define ETHR_NDWA_VAL2AOVAL__(AOV, V) \
+ do { \
+ (AOV).AO_val1 = (AO_t) (V)[0]; \
+ (AOV).AO_val2 = (AO_t) (V)[1]; \
+ } while (0)
+# define ETHR_NDWA_AOVAL2VAL__(AOV, V) \
+ do { \
+ ethr_dw_splitter_t tmp__; \
+ (V)[0] = (ethr_sint_t) (AOV).AO_val1; \
+ (V)[1] = (ethr_sint_t) (AOV).AO_val2; \
+ } while (0)
+# define ETHR_NDWA_RETURN_VAL_3__(SUCCESS, AOVAL, VAL) \
+ do { \
+ (VAL)[0] = (ethr_sint_t) (AOVAL).AO_val1; \
+ (VAL)[1] = (ethr_sint_t) (AOVAL).AO_val2; \
+ return (SUCCESS); \
+ } while (0)
+# define ETHR_NDWA_RETURN_VAL_2__(AOVAL, VAL) \
+ do { \
+ (VAL)[0] = (ethr_sint_t) (AOVAL).AO_val1; \
+ (VAL)[1] = (ethr_sint_t) (AOVAL).AO_val2; \
+ return; \
+ } while (0)
+# if defined(AO_HAVE_DOUBLE_PTR_STORAGE)
+# define ETHR_NDWA_AOVAL_EQ__(AOV1, AOV2) \
+ ((AOV1).AO_whole == (AOV2).AO_whole)
+# else
+# define ETHR_NDWA_AOVAL_EQ__(AOV1, AOV2) \
+ ((AOV1).AO_val1 == (AOV2).AO_val1 \
+ && (AOV1).AO_val2 == (AOV2).AO_val2)
+# endif
+#endif
+
+#define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_ADDR
+static ETHR_INLINE ethr_sint_t *
+ethr_native_dw_atomic_addr(ethr_native_dw_atomic_t *var)
+{
+ return (ethr_sint_t *) &var->dw_mem;
+}
+
+#ifdef AO_HAVE_double_load
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_2_TYPE__
+ETHR_NDWA_FUNC__(read)(ethr_native_dw_atomic_t *var
+ ETHR_NDWA_DECL_ARG__(val))
+{
+ AO_double_t act = AO_double_load(&var->dw_mem);
+ ETHR_NDWA_RETURN_VAL_2__(act, val);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_load_read
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_RB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_RB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_2_TYPE__
+ETHR_NDWA_FUNC__(read_rb)(ethr_native_dw_atomic_t *var
+ ETHR_NDWA_DECL_ARG__(val))
+{
+ AO_double_t act = AO_double_load_read(&var->dw_mem);
+ ETHR_NDWA_RETURN_VAL_2__(act, val);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_load_acquire
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_ACQB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_ACQB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_2_TYPE__
+ETHR_NDWA_FUNC__(read_acqb)(ethr_native_dw_atomic_t *var
+ ETHR_NDWA_DECL_ARG__(val))
+{
+ AO_double_t act = AO_double_load_acquire(&var->dw_mem);
+ ETHR_NDWA_RETURN_VAL_2__(act, val);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_store
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET
+#endif
+
+static ETHR_INLINE void
+ETHR_NDWA_FUNC__(set)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ val)
+{
+ AO_double_t new;
+ ETHR_NDWA_VAL2AOVAL__(new, val);
+ AO_double_store(&var->dw_mem, new);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_store_write
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_WB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_WB
+#endif
+
+static ETHR_INLINE void
+ETHR_NDWA_FUNC__(set_wb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ val)
+{
+ AO_double_t new;
+ ETHR_NDWA_VAL2AOVAL__(new, val);
+ AO_double_store_write(&var->dw_mem, new);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_store_release
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_RELB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_RELB
+#endif
+
+static ETHR_INLINE void
+ETHR_NDWA_FUNC__(set_relb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ val)
+{
+ AO_double_t new;
+ ETHR_NDWA_VAL2AOVAL__(new, val);
+ AO_double_store_release(&var->dw_mem, new);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_full) || defined(AO_HAVE_compare_double_and_swap_double_full)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_MB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_MB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_mb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_full)
+ xchgd = AO_double_compare_and_swap_full(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_full)
+ xchgd = AO_compare_double_and_swap_double_full(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#ifdef AO_HAVE_double_load_acquire
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#else
+ ao_act = AO_double_load(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+#ifndef AO_HAVE_double_load_acquire
+ AO_nop_full();
+#endif
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap) || defined(AO_HAVE_compare_double_and_swap_double)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap)
+ xchgd = AO_double_compare_and_swap(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double)
+ xchgd = AO_compare_double_and_swap_double(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#ifdef AO_HAVE_double_load
+ ao_act = AO_double_load(&var->dw_mem);
+#else
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_read) || defined(AO_HAVE_compare_double_and_swap_double_read)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_RB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_RB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_rb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_read)
+ xchgd = AO_double_compare_and_swap_read(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_read)
+ xchgd = AO_compare_double_and_swap_double_read(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#if defined(AO_HAVE_double_load_read)
+ ao_act = AO_double_load_read(&var->dw_mem);
+#elif defined(AO_HAVE_double_load)
+ ao_act = AO_double_load(&var->dw_mem);
+#else
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+#ifndef AO_HAVE_double_load_read
+#ifdef AO_HAVE_nop_read
+ AO_nop_read();
+#else
+ AO_nop_full();
+#endif
+#endif
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_acquire) || defined(AO_HAVE_compare_double_and_swap_double_acquire)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_ACQB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_ACQB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_acqb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_acquire)
+ xchgd = AO_double_compare_and_swap_acquire(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_acquire)
+ xchgd = AO_compare_double_and_swap_double_acquire(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#ifdef AO_HAVE_double_load_acquire
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#else
+ ao_act = AO_double_load(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+#ifndef AO_HAVE_double_load_acquire
+ AO_nop_full();
+#endif
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_write) || defined(AO_HAVE_compare_double_and_swap_double_write)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_WB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_WB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_wb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_write)
+ xchgd = AO_double_compare_and_swap_write(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_write)
+ xchgd = AO_compare_double_and_swap_double_write(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#ifdef AO_HAVE_double_load
+ ao_act = AO_double_load(&var->dw_mem);
+#else
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_release) || defined(AO_HAVE_compare_double_and_swap_double_release)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_RELB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_RELB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_relb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_release)
+ xchgd = AO_double_compare_and_swap_release(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_release)
+ xchgd = AO_compare_double_and_swap_double_release(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+ ao_act = AO_double_load(&var->dw_mem);
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#endif /* defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__) */
+
+#endif /* Have AO double functionality ... */
+
+#endif /* ETHR_LIBATOMIC_OPS_DW_ATOMIC_H__ */
+
diff --git a/erts/include/internal/libatomic_ops/ethread.h b/erts/include/internal/libatomic_ops/ethread.h
index e1fdd588bb..d65ee19b04 100644
--- a/erts/include/internal/libatomic_ops/ethread.h
+++ b/erts/include/internal/libatomic_ops/ethread.h
@@ -33,9 +33,12 @@
#define AO_USE_PENTIUM4_INSTRS
#endif
+#define ETHR_NATIVE_IMPL__ "libatomic_ops"
+
#include "atomic_ops.h"
#include "ethr_membar.h"
#include "ethr_atomic.h"
+#include "ethr_dw_atomic.h"
#endif
diff --git a/erts/include/internal/ose/ethr_event.h b/erts/include/internal/ose/ethr_event.h
new file mode 100644
index 0000000000..000a600813
--- /dev/null
+++ b/erts/include/internal/ose/ethr_event.h
@@ -0,0 +1,113 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2011. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+//#define USE_PTHREAD_API
+
+#define ETHR_EVENT_OFF_WAITER__ -1L
+#define ETHR_EVENT_OFF__ 1L
+#define ETHR_EVENT_ON__ 0L
+
+#ifdef USE_PTHREAD_API
+
+typedef struct {
+ ethr_atomic32_t state;
+ pthread_mutex_t mtx;
+ pthread_cond_t cnd;
+} ethr_event;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ ethr_sint32_t val;
+ val = ethr_atomic32_xchg_mb(&e->state, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_cond_signal(&e->cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_mutex_unlock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic32_set(&e->state, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#else
+
+typedef struct {
+ ethr_atomic32_t state;
+ PROCESS proc;
+} ethr_event;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ ethr_sint32_t val = ethr_atomic32_xchg_mb(&e->state, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+#ifdef DEBUG
+ OSFSEMVAL fsem_val = get_fsem(e->proc);
+
+ /* There is a race in this assert.
+ This is because the state is set before the wait call in wait__.
+ We hope that a delay of 10 ms is enough */
+ if (fsem_val == 0)
+ delay(10);
+ ETHR_ASSERT(get_fsem(e->proc) == -1);
+#endif
+ signal_fsem(e->proc);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic32_set(&e->state, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/win/ethr_membar.h b/erts/include/internal/win/ethr_membar.h
index 8237660b2c..a17f2459fc 100644
--- a/erts/include/internal/win/ethr_membar.h
+++ b/erts/include/internal/win/ethr_membar.h
@@ -63,13 +63,13 @@ do { \
#pragma intrinsic(_mm_sfence)
#pragma intrinsic(_mm_lfence)
-static __forceinline void
+static ETHR_FORCE_INLINE void
ethr_cfence__(void)
{
_ReadWriteBarrier();
}
-static __forceinline void
+static ETHR_FORCE_INLINE void
ethr_mfence__(void)
{
#if ETHR_SIZEOF_PTR == 4
@@ -80,7 +80,7 @@ ethr_mfence__(void)
_mm_mfence();
}
-static __forceinline void
+static ETHR_FORCE_INLINE void
ethr_sfence__(void)
{
#if ETHR_SIZEOF_PTR == 4
@@ -91,7 +91,7 @@ ethr_sfence__(void)
_mm_sfence();
}
-static __forceinline void
+static ETHR_FORCE_INLINE void
ethr_lfence__(void)
{
#if ETHR_SIZEOF_PTR == 4