aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2010-06-17 10:23:50 +0200
committerRickard Green <[email protected]>2010-08-10 11:41:14 +0200
commit300b419486c1ca88e33938f182d5d5a8b90fb73f (patch)
tree55c37d5fb042bf6b3b5f56d89c9238a7e22f8b29 /erts/include
parentc1e94fa9a6fe4ae717d35dfbd1b628dc2e06d26a (diff)
downloadotp-300b419486c1ca88e33938f182d5d5a8b90fb73f.tar.gz
otp-300b419486c1ca88e33938f182d5d5a8b90fb73f.tar.bz2
otp-300b419486c1ca88e33938f182d5d5a8b90fb73f.zip
Rewrite ethread library
Large parts of the ethread library have been rewritten. The ethread library is an Erlang runtime system internal, portable thread library used by the runtime system itself. Most notable improvement is a reader optimized rwlock implementation which dramatically improve the performance of read-lock/read-unlock operations on multi processor systems by avoiding ping-ponging of the rwlock cache lines. The reader optimized rwlock implementation is used by miscellaneous rwlocks in the runtime system that are known to be read-locked frequently, and can be enabled on ETS tables by passing the `{read_concurrency, true}' option upon table creation. See the documentation of `ets:new/2' for more information. The ethread library can now also use the libatomic_ops library for atomic memory accesses. This makes it possible for the Erlang runtime system to utilize optimized atomic operations on more platforms than before. Use the `--with-libatomic_ops=PATH' configure command line argument when specifying where the libatomic_ops installation is located. The libatomic_ops library can be downloaded from: http://www.hpl.hp.com/research/linux/atomic_ops/ The changed API of the ethread library has also caused modifications in the Erlang runtime system. Preparations for the to come "delayed deallocation" feature has also been done since it depends on the ethread library. Note: When building for x86, the ethread library will now use instructions that first appeared on the pentium 4 processor. If you want the runtime system to be compatible with older processors (back to 486) you need to pass the `--enable-ethread-pre-pentium4-compatibility' configure command line argument when configuring the system.
Diffstat (limited to 'erts/include')
-rw-r--r--erts/include/internal/erl_misc_utils.h4
-rw-r--r--erts/include/internal/ethr_internal.h67
-rw-r--r--erts/include/internal/ethr_mutex.h510
-rw-r--r--erts/include/internal/ethr_optimized_fallbacks.h185
-rw-r--r--erts/include/internal/ethread.h1653
-rw-r--r--erts/include/internal/ethread_header_config.h.in57
-rw-r--r--erts/include/internal/gcc/ethr_atomic.h19
-rw-r--r--erts/include/internal/i386/atomic.h25
-rw-r--r--erts/include/internal/i386/ethread.h3
-rw-r--r--erts/include/internal/i386/rwlock.h12
-rw-r--r--erts/include/internal/i386/spinlock.h14
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h292
-rw-r--r--erts/include/internal/libatomic_ops/ethread.h30
-rw-r--r--erts/include/internal/ppc32/atomic.h22
-rw-r--r--erts/include/internal/ppc32/ethread.h3
-rw-r--r--erts/include/internal/ppc32/rwlock.h12
-rw-r--r--erts/include/internal/ppc32/spinlock.h12
-rw-r--r--erts/include/internal/pthread/ethr_event.h151
-rw-r--r--erts/include/internal/sparc32/atomic.h39
-rw-r--r--erts/include/internal/sparc32/ethread.h3
-rw-r--r--erts/include/internal/sparc32/rwlock.h12
-rw-r--r--erts/include/internal/sparc32/spinlock.h12
-rw-r--r--erts/include/internal/tile/atomic.h67
-rw-r--r--erts/include/internal/win/ethr_atomic.h244
-rw-r--r--erts/include/internal/win/ethr_event.h62
-rw-r--r--erts/include/internal/win/ethread.h31
26 files changed, 2409 insertions, 1132 deletions
diff --git a/erts/include/internal/erl_misc_utils.h b/erts/include/internal/erl_misc_utils.h
index 82e9ba3798..6b875ff824 100644
--- a/erts/include/internal/erl_misc_utils.h
+++ b/erts/include/internal/erl_misc_utils.h
@@ -50,4 +50,8 @@ int erts_unbind_from_cpu_str(char *str);
int erts_milli_sleep(long);
+#ifdef __WIN32__
+int erts_get_last_win_errno(void);
+#endif
+
#endif /* #ifndef ERL_MISC_UTILS_H_ */
diff --git a/erts/include/internal/ethr_internal.h b/erts/include/internal/ethr_internal.h
new file mode 100644
index 0000000000..e9c3daf783
--- /dev/null
+++ b/erts/include/internal/ethr_internal.h
@@ -0,0 +1,67 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Internal ethread exports
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_INTERNAL_H__
+#define ETHR_INTERNAL_H__
+
+#include "erl_misc_utils.h"
+
+extern ethr_memory_allocators ethr_mem__;
+extern erts_cpu_info_t *ethr_cpu_info__;
+extern size_t ethr_pagesize__;
+extern size_t ethr_min_stack_size__; /* kilo words */
+extern size_t ethr_max_stack_size__; /* kilo words */
+extern int ethr_not_completely_inited__;
+extern int ethr_not_inited__;
+
+extern void *(*ethr_thr_prepare_func__)(void);
+extern void (*ethr_thr_parent_func__)(void *);
+extern void (*ethr_thr_child_func__)(void *);
+
+#define ETHR_PAGE_ALIGN(SZ) \
+ (((((size_t) (SZ)) - 1)/ethr_pagesize__ + 1)*ethr_pagesize__)
+#define ETHR_B2KW(B) ((((size_t) (B)) - 1)/(sizeof(void *)*1024) + 1)
+#define ETHR_KW2B(KW) (((size_t) (KW))*sizeof(void *)*1024)
+
+#undef ETHR_STACK_GUARD_SIZE
+#ifdef ETHR_HAVE_PTHREAD_ATTR_SETGUARDSIZE
+# define ETHR_STACK_GUARD_SIZE (ethr_pagesize__)
+#endif
+
+/* implemented in lib_src/<thr-lib>/ethread.c */
+int ethr_set_tse__(ethr_ts_event *tsep);
+ethr_ts_event *ethr_get_tse__(void);
+ETHR_PROTO_NORETURN__ ethr_abort__(void);
+#ifdef ETHR_WIN32_THREADS
+int ethr_win_get_errno__(void);
+#endif
+
+/* implemented in lib_src/common/ethread_aux.c */
+int ethr_init_common__(ethr_init_data *id);
+int ethr_late_init_common__(ethr_late_init_data *lid);
+void ethr_run_exit_handlers__(void);
+void ethr_ts_event_destructor__(void *vtsep);
+
+
+#endif
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
new file mode 100644
index 0000000000..4ce3e75c78
--- /dev/null
+++ b/erts/include/internal/ethr_mutex.h
@@ -0,0 +1,510 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Mutex, rwmutex and condition variable implementation
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_MUTEX_H__
+#define ETHR_MUTEX_H__
+
+#define ETHR_RWMUTEX_INITIALIZED 0x99999999
+#define ETHR_MUTEX_INITIALIZED 0x77777777
+#define ETHR_COND_INITIALIZED 0x55555555
+
+#if 0
+# define ETHR_MTX_HARD_DEBUG
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+# ifdef __GNUC__
+# warning ETHR_MTX_HARD_DEBUG
+# endif
+/*# define ETHR_MTX_HARD_DEBUG_LFS*/
+/*# define ETHR_MTX_HARD_DEBUG_FENCE*/
+/*# define ETHR_MTX_HARD_DEBUG_Q*/
+# define ETHR_MTX_HARD_DEBUG_WSQ
+
+# if !defined(ETHR_MTX_HARD_DEBUG_WSQ) && defined(ETHR_MTX_HARD_DEBUG_Q)
+# define ETHR_MTX_HARD_DEBUG_WSQ
+# endif
+#endif
+
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+
+#if 0
+# define ETHR_MTX_Q_LOCK_SPINLOCK__
+# define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t
+#elif defined(ETHR_PTHREADS)
+# define ETHR_MTX_Q_LOCK_PTHREAD_MUTEX__
+# define ETHR_MTX_QLOCK_TYPE__ pthread_mutex_t
+#elif defined(ETHR_WIN32_THREADS)
+# define ETHR_MTX_Q_LOCK_CRITICAL_SECTION__
+# define ETHR_MTX_QLOCK_TYPE__ CRITICAL_SECTION
+#else
+# error Need a qlock implementation
+#endif
+
+#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+
+/* frequent read kind */
+#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
+#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_FLG__ - 1)
+#define ETHR_RWMTX_R_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+/* normal kind */
+#define ETHR_RWMTX_RS_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+
+#define ETHR_RWMTX_WAIT_FLGS__ \
+ (ETHR_RWMTX_W_WAIT_FLG__|ETHR_RWMTX_R_WAIT_FLG__)
+
+#define ETHR_CND_WAIT_FLG__ ETHR_RWMTX_R_WAIT_FLG__
+
+struct ethr_mutex_base_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long pre_fence;
+#endif
+ ethr_atomic_t flgs;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_WSQ
+ int ws;
+#endif
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+ ethr_atomic_t hdbg_lfs;
+#endif
+};
+
+#endif
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_mutex_opt;
+
+typedef struct {
+ int main_spincount;
+ int aux_spincount;
+} ethr_cond_opt;
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ struct ethr_mutex_base_ mtxb;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ struct {
+ long pre_fence;
+ } mtxb; /* mtxb allows us to use same macro as for mutex and rwmutex... */
+#endif
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
+ short aux_scnt;
+ short main_scnt;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread */
+
+typedef struct ethr_mutex_ ethr_mutex;
+struct ethr_mutex_ {
+ pthread_mutex_t pt_mtx;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+typedef struct ethr_cond_ ethr_cond;
+struct ethr_cond_ {
+ pthread_cond_t pt_cnd;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread */
+
+int ethr_mutex_init_opt(ethr_mutex *, ethr_mutex_opt *);
+int ethr_mutex_init(ethr_mutex *);
+int ethr_mutex_destroy(ethr_mutex *);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+int ethr_mutex_trylock(ethr_mutex *);
+void ethr_mutex_lock(ethr_mutex *);
+void ethr_mutex_unlock(ethr_mutex *);
+#endif
+int ethr_cond_init_opt(ethr_cond *, ethr_cond_opt *);
+int ethr_cond_init(ethr_cond *);
+int ethr_cond_destroy(ethr_cond *);
+void ethr_cond_signal(ethr_cond *);
+void ethr_cond_broadcast(ethr_cond *);
+int ethr_cond_wait(ethr_cond *, ethr_mutex *);
+
+typedef enum {
+ ETHR_RWMUTEX_TYPE_NORMAL,
+ ETHR_RWMUTEX_TYPE_FREQUENT_READ,
+ ETHR_RWMUTEX_TYPE_EXTREMELY_FREQUENT_READ
+} ethr_rwmutex_type;
+
+typedef enum {
+ ETHR_RWMUTEX_LONG_LIVED,
+ ETHR_RWMUTEX_SHORT_LIVED,
+ ETHR_RWMUTEX_UNKNOWN_LIVED
+} ethr_rwmutex_lived;
+
+typedef struct {
+ ethr_rwmutex_type type;
+ ethr_rwmutex_lived lived;
+ int main_spincount;
+ int aux_spincount;
+} ethr_rwmutex_opt;
+
+#define ETHR_RWMUTEX_OPT_DEFAULT_INITER \
+ {ETHR_RWMUTEX_TYPE_NORMAL, ETHR_RWMUTEX_UNKNOWN_LIVED, -1, -1}
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+typedef union {
+ struct {
+ ethr_atomic_t readers;
+ int waiting_readers;
+ int byte_offset;
+ ethr_rwmutex_lived lived;
+ } data;
+ char align__[ETHR_CACHE_LINE_SIZE];
+} ethr_rwmtx_readers_array__;
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ struct ethr_mutex_base_ mtxb;
+ ethr_rwmutex_type type;
+ ethr_ts_event *rq_end;
+ union {
+ ethr_rwmtx_readers_array__ *ra;
+ int rs;
+ } tdata;
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+ long post_fence;
+#endif
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#else /* pthread_rwlock */
+
+typedef struct ethr_rwmutex_ ethr_rwmutex;
+struct ethr_rwmutex_ {
+ pthread_rwlock_t pt_rwlock;
+#if ETHR_XCHK
+ int initialized;
+#endif
+};
+
+#endif /* pthread_rwlock */
+
+int ethr_rwmutex_set_reader_group(int);
+int ethr_rwmutex_init_opt(ethr_rwmutex *, ethr_rwmutex_opt *);
+int ethr_rwmutex_init(ethr_rwmutex *);
+int ethr_rwmutex_destroy(ethr_rwmutex *);
+#if defined(ETHR_USE_OWN_RWMTX_IMPL__) \
+ || !defined(ETHR_TRY_INLINE_FUNCS) \
+ || defined(ETHR_MUTEX_IMPL__)
+int ethr_rwmutex_tryrlock(ethr_rwmutex *);
+void ethr_rwmutex_rlock(ethr_rwmutex *);
+void ethr_rwmutex_runlock(ethr_rwmutex *);
+int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwlock(ethr_rwmutex *);
+void ethr_rwmutex_rwunlock(ethr_rwmutex *);
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG
+#define ETHR_MTX_HARD_ASSERT(A) \
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
+#else
+#define ETHR_MTX_HARD_ASSERT(A) ((void) 1)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_LFS
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
+do { \
+ ethr_atomic_init(&(MTXB)->hdbg_lfs, 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
+do { \
+ long val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ > 0); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
+do { \
+ long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ >= 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
+do { \
+ long val__; \
+ ETHR_COMPILER_BARRIER; \
+ val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == -1); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if ((RES) == 0) \
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK((MTXB)); \
+ else \
+ ETHR_MTX_HARD_ASSERT((RES) == EBUSY); \
+} while (0)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
+do { \
+ long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ETHR_MTX_HARD_ASSERT(val__ == 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#else
+# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB)
+# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES)
+# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB)
+#endif
+
+#ifdef ETHR_MTX_HARD_DEBUG_FENCE
+
+#if ETHR_SIZEOF_PTR == 8
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeadbeefdeadbeefL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeaddeaddeadL
+#else
+# define ETHR_MTX_HARD_DEBUG_PRE_FENCE 0xdeaddeadL
+# define ETHR_MTX_HARD_DEBUG_POST_FENCE 0xdeaddeadL
+#endif
+
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ETHR_MTX_HARD_ASSERT((X)->mtxb.pre_fence == ETHR_MTX_HARD_DEBUG_PRE_FENCE);\
+ ETHR_MTX_HARD_ASSERT((X)->post_fence == ETHR_MTX_HARD_DEBUG_POST_FENCE); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X) \
+do { \
+ (X)->mtxb.pre_fence = ETHR_MTX_HARD_DEBUG_PRE_FENCE; \
+ (X)->post_fence = ETHR_MTX_HARD_DEBUG_POST_FENCE; \
+} while (0)
+#else
+#define ETHR_MTX_HARD_DEBUG_FENCE_CHK(X)
+#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X)
+#endif
+
+#ifdef ETHR_USE_OWN_MTX_IMPL__
+
+#define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT 1000
+#define ETHR_MTX_DEFAULT_AUX_SPINCOUNT 50
+
+#define ETHR_CND_DEFAULT_MAIN_SPINCOUNT 0
+#define ETHR_CND_DEFAULT_AUX_SPINCOUNT 0
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+void ethr_mutex_lock_wait__(ethr_mutex *, long);
+void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ long act;
+ int res;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ res = (act == 0) ? 0 : EBUSY;
+
+ ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&mtx->mtxb, res);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ ETHR_COMPILER_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ long act;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ if (act != 0)
+ ethr_mutex_lock_wait__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ long act;
+ ETHR_COMPILER_BARRIER;
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
+
+ act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ if (act != ETHR_RWMTX_W_FLG__)
+ ethr_mutex_unlock_wake__(mtx, act);
+
+ ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#else /* pthread_mutex */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
+{
+ int res;
+ res = pthread_mutex_trylock(&mtx->pt_mtx);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_lock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
+{
+ int res = pthread_mutex_unlock(&mtx->pt_mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_mutex */
+
+#ifdef ETHR_USE_OWN_RWMTX_IMPL__
+
+#define ETHR_RWMTX_DEFAULT_MAIN_SPINCOUNT 1000
+#define ETHR_RWMTX_DEFAULT_AUX_SPINCOUNT 50
+
+#else /* pthread_rwlock */
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE int
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
+ if (res != 0 && res != EBUSY)
+ ETHR_FATAL_ERROR__(res);
+ return res;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
+{
+ int res = pthread_rwlock_unlock(&rwmtx->pt_rwlock);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* pthread_rwlock */
+
+int ethr_mutex_lib_init(int);
+int ethr_mutex_lib_late_init(int, int);
+
+#endif /* #ifndef ETHR_MUTEX_H__ */
diff --git a/erts/include/internal/ethr_optimized_fallbacks.h b/erts/include/internal/ethr_optimized_fallbacks.h
new file mode 100644
index 0000000000..2f9f987d0b
--- /dev/null
+++ b/erts/include/internal/ethr_optimized_fallbacks.h
@@ -0,0 +1,185 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: "Optimized" fallbacks used when native ops are missing
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_OPTIMIZED_FALLBACKS_H__
+#define ETHR_OPTIMIZED_FALLBACKS_H__
+
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
+#endif
+
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
+/* --- Optimized spinlocks using pthread spinlocks -------------------------- */
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef pthread_spinlock_t ethr_opt_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE int
+ethr_opt_spinlock_init(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_init((pthread_spinlock_t *) lock, 0);
+}
+
+static ETHR_INLINE int
+ethr_opt_spinlock_destroy(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_destroy((pthread_spinlock_t *) lock);
+}
+
+
+static ETHR_INLINE int
+ethr_opt_spin_unlock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_unlock((pthread_spinlock_t *) lock);
+}
+
+static ETHR_INLINE int
+ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
+{
+ return pthread_spin_lock((pthread_spinlock_t *) lock);
+}
+
+#endif
+
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native spinlocks using native atomics -------------------------------- */
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
+
+typedef ethr_native_atomic_t ethr_native_spinlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) == 1);
+ ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ while (ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ (long) 1, (long) 0) != 0) {
+ ETHR_SPIN_BODY;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#endif
+
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Native rwspinlocks using native atomics ------------------------------ */
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
+#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
+
+typedef ethr_native_atomic_t ethr_native_rwlock_t;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#define ETHR_WLOCK_FLAG__ (((long) 1) << 30)
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+#ifdef DEBUG
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) >= 0);
+#endif
+ ethr_native_atomic_dec_relb((ethr_native_atomic_t *) lock);
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ long act, exp = 0;
+ while (1) {
+ act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ exp+1, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = (act & ETHR_WLOCK_FLAG__) ? 0 : act;
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ ETHR_COMPILER_BARRIER;
+ ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock)
+ == ETHR_WLOCK_FLAG__);
+ ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ long act, exp = 0;
+ while (1) {
+ act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
+ exp|ETHR_WLOCK_FLAG__, exp);
+ if (act == exp)
+ break;
+ ETHR_SPIN_BODY;
+ exp = act & ~ETHR_WLOCK_FLAG__;
+ }
+ act |= ETHR_WLOCK_FLAG__;
+ /* Wait for readers to leave */
+ while (act != ETHR_WLOCK_FLAG__) {
+ ETHR_SPIN_BODY;
+ act = ethr_native_atomic_read_acqb((ethr_native_atomic_t *) lock);
+ }
+ ETHR_COMPILER_BARRIER;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 4e7a38cd5c..4a205699bd 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -33,20 +33,9 @@
#include <stdlib.h>
#include "erl_errno.h"
-/*
- * Extra memory barrier requirements:
- * - ethr_atomic_or_old() needs to enforce a memory barrier sufficient
- * for a lock operation.
- * - ethr_atomic_and_old() needs to enforce a memory barrier sufficient
- * for an unlock operation.
- * - ethr_atomic_cmpxchg() needs to enforce a memory barrier sufficient
- * for a lock and unlock operation.
- */
-
-
-#undef ETHR_USE_RWMTX_FALLBACK
#undef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-#undef ETHR_HAVE_OPTIMIZED_LOCKS
+#undef ETHR_HAVE_OPTIMIZED_SPINLOCK
+#undef ETHR_HAVE_OPTIMIZED_RWSPINLOCK
typedef struct {
long tv_sec;
@@ -54,6 +43,10 @@ typedef struct {
} ethr_timeval;
#if defined(DEBUG)
+# define ETHR_DEBUG
+#endif
+
+#if defined(ETHR_DEBUG)
# undef ETHR_XCHK
# define ETHR_XCHK 1
#else
@@ -68,47 +61,57 @@ typedef struct {
#elif defined(__WIN32__)
# define ETHR_INLINE __forceinline
#endif
-#if defined(DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
+#if defined(ETHR_DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \
|| (defined(__GNUC__) && defined(ERTS_MIXED_CYGWIN_VC))
# undef ETHR_INLINE
# define ETHR_INLINE
# undef ETHR_TRY_INLINE_FUNCS
#endif
-#ifdef ETHR_FORCE_INLINE_FUNCS
-# define ETHR_TRY_INLINE_FUNCS
-#endif
-#if !defined(ETHR_DISABLE_NATIVE_IMPLS) \
- && (defined(PURIFY) || defined(VALGRIND) || defined(ERTS_MIXED_CYGWIN_VC))
+#if !defined(ETHR_DISABLE_NATIVE_IMPLS) && (defined(PURIFY)||defined(VALGRIND))
# define ETHR_DISABLE_NATIVE_IMPLS
#endif
-#define ETHR_RWMUTEX_INITIALIZED 0x99999999
-#define ETHR_MUTEX_INITIALIZED 0x77777777
-#define ETHR_COND_INITIALIZED 0x55555555
+/* Assume 64-byte cache line size */
+#define ETHR_CACHE_LINE_SIZE 64L
+#define ETHR_CACHE_LINE_MASK (ETHR_CACHE_LINE_SIZE - 1)
-#define ETHR_CACHE_LINE_SIZE 64
+#define ETHR_CACHE_LINE_ALIGN_SIZE(SZ) \
+ (((((SZ) - 1) / ETHR_CACHE_LINE_SIZE) + 1) * ETHR_CACHE_LINE_SIZE)
-#ifdef ETHR_INLINE_FUNC_NAME_
-# define ETHR_CUSTOM_INLINE_FUNC_NAME_
-#else
+#ifndef ETHR_INLINE_FUNC_NAME_
# define ETHR_INLINE_FUNC_NAME_(X) X
#endif
-#define ETHR_COMPILER_BARRIER ethr_compiler_barrier()
-#ifdef __GNUC__
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER __asm__ __volatile__("":::"memory")
+#if !defined(__func__)
+# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L
+# if !defined(__GNUC__) || __GNUC__ < 2
+# define __func__ "[unknown_function]"
+# else
+# define __func__ __FUNCTION__
+# endif
+# endif
#endif
-#ifdef DEBUG
+int ethr_assert_failed(const char *file, int line, const char *func, char *a);
+#ifdef ETHR_DEBUG
#define ETHR_ASSERT(A) \
- ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, #A)))
-int ethr_assert_failed(char *f, int l, char *a);
+ ((void) ((A) ? 1 : ethr_assert_failed(__FILE__, __LINE__, __func__, #A)))
#else
#define ETHR_ASSERT(A) ((void) 1)
#endif
+#if defined(__GNUC__)
+# define ETHR_PROTO_NORETURN__ void __attribute__((noreturn))
+# define ETHR_IMPL_NORETURN__ void
+#elif defined(__WIN32__) && defined(_MSC_VER)
+# define ETHR_PROTO_NORETURN__ __declspec(noreturn) void
+# define ETHR_IMPL_NORETURN__ __declspec(noreturn) void
+#else
+# define ETHR_PROTO_NORETURN__ void
+# define ETHR_IMPL_NORETURN__ void
+#endif
+
#if defined(ETHR_PTHREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The pthread implementation *
@@ -118,7 +121,9 @@ int ethr_assert_failed(char *f, int l, char *a);
#error "_GNU_SOURCE not defined. Please, compile all files with -D_GNU_SOURCE."
#endif
-#if defined(ETHR_HAVE_MIT_PTHREAD_H)
+#if defined(ETHR_NEED_NPTL_PTHREAD_H)
+#include <nptl/pthread.h>
+#elif defined(ETHR_HAVE_MIT_PTHREAD_H)
#include <pthread/mit/pthread.h>
#elif defined(ETHR_HAVE_PTHREAD_H)
#include <pthread.h>
@@ -128,130 +133,23 @@ int ethr_assert_failed(char *f, int l, char *a);
typedef pthread_t ethr_tid;
-typedef struct ethr_mutex_ ethr_mutex;
-struct ethr_mutex_ {
- pthread_mutex_t pt_mtx;
- int is_rec_mtx;
- ethr_mutex *prev;
- ethr_mutex *next;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-
-typedef struct ethr_cond_ ethr_cond;
-struct ethr_cond_ {
- pthread_cond_t pt_cnd;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
+typedef pthread_key_t ethr_tsd_key;
-#ifndef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-#define ETHR_USE_RWMTX_FALLBACK
-#else
-typedef struct ethr_rwmutex_ ethr_rwmutex;
-struct ethr_rwmutex_ {
- pthread_rwlock_t pt_rwlock;
-#if ETHR_XCHK
- int initialized;
-#endif
-};
-#endif
+#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-/* Static initializers */
-#if ETHR_XCHK
-#define ETHR_MUTEX_XCHK_INITER , ETHR_MUTEX_INITIALIZED
-#define ETHR_COND_XCHK_INITER , ETHR_COND_INITIALIZED
-#else
-#define ETHR_MUTEX_XCHK_INITER
-#define ETHR_COND_XCHK_INITER
+#if defined(PURIFY) || defined(VALGRIND)
+# define ETHR_FORCE_PTHREAD_RWLOCK
+# define ETHR_FORCE_PTHREAD_MUTEX
#endif
-#define ETHR_MUTEX_INITER {PTHREAD_MUTEX_INITIALIZER, 0, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-#define ETHR_COND_INITER {PTHREAD_COND_INITIALIZER ETHR_COND_XCHK_INITER}
-
-#if defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE) \
- || defined(ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP)
-# define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
-# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
-# define ETHR_REC_MUTEX_INITER \
- {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, 1, NULL, NULL ETHR_MUTEX_XCHK_INITER}
-# endif
-#else
-# undef ETHR_HAVE_ETHR_REC_MUTEX_INIT
+#if !defined(ETHR_FORCE_PTHREAD_RWLOCK)
+# define ETHR_USE_OWN_RWMTX_IMPL__
#endif
-#ifndef ETHR_HAVE_PTHREAD_ATFORK
-# define ETHR_NO_FORKSAFETY 1
+#if !defined(ETHR_FORCE_PTHREAD_MUTEX) && 0
+# define ETHR_USE_OWN_MTX_IMPL__
#endif
-typedef pthread_key_t ethr_tsd_key;
-
-#define ETHR_HAVE_ETHR_SIG_FUNCS 1
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- return pthread_mutex_trylock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- return pthread_mutex_lock(&mtx->pt_mtx);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- return pthread_mutex_unlock(&mtx->pt_mtx);
-}
-
-#ifdef ETHR_HAVE_PTHREAD_RWLOCK_INIT
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_tryrdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_rdlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_runlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_tryrwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_trywrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_wrlock(&rwmtx->pt_rwlock);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
-{
- return pthread_rwlock_unlock(&rwmtx->pt_rwlock);
-}
-
-#endif /* ETHR_HAVE_PTHREAD_RWLOCK_INIT */
-
-#endif /* ETHR_TRY_INLINE_FUNCS */
-
#elif defined(ETHR_WIN32_THREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The native win32 threads implementation *
@@ -273,409 +171,22 @@ ETHR_INLINE_FUNC_NAME_(ethr_rwmutex_rwunlock)(ethr_rwmutex *rwmtx)
# undef WIN32_LEAN_AND_MEAN
#endif
-/* Types */
-typedef long ethr_tid; /* thread id type */
-typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
-#if ETHR_XCHK
- int is_rec_mtx;
-#endif
-} ethr_mutex;
-
-typedef struct cnd_wait_event__ cnd_wait_event_;
+struct ethr_join_data_;
+/* Types */
typedef struct {
- volatile int initialized;
- CRITICAL_SECTION cs;
- cnd_wait_event_ *queue;
- cnd_wait_event_ *queue_end;
-} ethr_cond;
-
-#define ETHR_USE_RWMTX_FALLBACK
-
-/* Static initializers */
-
-#define ETHR_MUTEX_INITER {0}
-#define ETHR_COND_INITER {0}
-
-#define ETHR_REC_MUTEX_INITER ETHR_MUTEX_INITER
-
-#define ETHR_HAVE_ETHR_REC_MUTEX_INIT 1
+ long id;
+ struct ethr_join_data_ *jdata;
+} ethr_tid; /* thread id type */
typedef DWORD ethr_tsd_key;
#undef ETHR_HAVE_ETHR_SIG_FUNCS
-#ifdef ETHR_TRY_INLINE_FUNCS
-int ethr_fake_static_mutex_init(ethr_mutex *mtx);
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- return TryEnterCriticalSection(&mtx->cs) ? 0 : EBUSY;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
-{
- if (!mtx->initialized) {
- int res = ethr_fake_static_mutex_init(mtx);
- if (res != 0)
- return res;
- }
- EnterCriticalSection(&mtx->cs);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
-{
- LeaveCriticalSection(&mtx->cs);
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#ifdef ERTS_MIXED_CYGWIN_VC
-
-/* atomics */
-
-#ifdef _MSC_VER
-# if _MSC_VER < 1300
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
-# else
-# if defined(_M_IX86)
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else /* I.e. IA64 */
-# if _MSC_VER >= 1400
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
-# else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-# endif
-# endif
-# endif
-# if _MSC_VER >= 1400
-# include <intrin.h>
-# undef ETHR_COMPILER_BARRIER
-# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
-# endif
-#pragma intrinsic(_ReadWriteBarrier)
-#pragma intrinsic(_InterlockedAnd)
-#pragma intrinsic(_InterlockedOr)
-#else
-# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
-#endif
-
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
-
-#define ETHR_MEMORY_BARRIER \
-do { \
- volatile LONG x___ = 0; \
- (void) _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
-} while (0)
-
-typedef struct {
- volatile LONG value;
-} ethr_atomic_t;
-
-typedef struct {
- volatile LONG locked;
-} ethr_spinlock_t;
-
-typedef struct {
- volatile LONG counter;
-} ethr_rwlock_t;
-#define ETHR_WLOCK_FLAG__ (((LONG) 1) << 30)
-
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- var->value = (LONG) i;
-#else
- (void) InterlockedExchange(&var->value, (LONG) i);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- *i = var->value;
-#else
- *i = InterlockedExchangeAdd(&var->value, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = InterlockedExchangeAdd(&var->value, (LONG) i);
- *testp += i;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- (void) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- (void) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedIncrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = (long) InterlockedDecrement(&var->value);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedAnd() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedAnd(&var->value, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedOr() provides a full
- * memory barrier.
- */
- *old = (long) _InterlockedOr(&var->value, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- *
- * According to msdn _InterlockedCompareExchange() provides a full
- * memory barrier.
- */
- *old = _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) expected);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = (long) InterlockedExchange(&var->value, (LONG) new);
- return 0;
-}
-
-/*
- * According to msdn InterlockedExchange() provides a full
- * memory barrier.
- */
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->locked = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->locked, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
-{
- return 0;
-}
-
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- InterlockedExchange(&lock->locked, (LONG) 0);
-#ifdef DEBUG
- ETHR_ASSERT(old == 1);
-#endif
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
-{
- LONG old;
- do {
- old = InterlockedExchange(&lock->locked, (LONG) 1);
- } while (old != (LONG) 0);
- ETHR_COMPILER_BARRIER;
- return 0;
-}
+#define ETHR_USE_OWN_RWMTX_IMPL__
+#define ETHR_USE_OWN_MTX_IMPL__
-/*
- * According to msdn InterlockedIncrement, InterlockedDecrement,
- * and InterlockedExchangeAdd(), _InterlockedAnd, and _InterlockedOr
- * provides full memory barriers.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- lock->counter = (LONG) 0;
-#else
- (void) InterlockedExchange(&lock->counter, (LONG) 0);
-#endif
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- InterlockedDecrement(&lock->counter);
- ETHR_ASSERT(old != 0);
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- while (1) {
- LONG old = InterlockedIncrement(&lock->counter);
- if ((old & ETHR_WLOCK_FLAG__) == 0)
- break; /* Got read lock */
- /* Restore and wait for writers to unlock */
- old = InterlockedDecrement(&lock->counter);
- while (old & ETHR_WLOCK_FLAG__) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
-#endif
- }
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ETHR_COMPILER_BARRIER;
- {
-#ifdef DEBUG
- LONG old =
-#endif
- _InterlockedAnd(&lock->counter, ~ETHR_WLOCK_FLAG__);
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- LONG old;
- do {
- old = _InterlockedOr(&lock->counter, ETHR_WLOCK_FLAG__);
- } while (old & ETHR_WLOCK_FLAG__);
- /* We got the write part of the lock; wait for readers to unlock */
- while ((old & ~ETHR_WLOCK_FLAG__) != 0) {
-#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
- old = lock->counter;
-#else
- old = InterlockedExchangeAdd(&lock->counter, (LONG) 0);
-#endif
- ETHR_ASSERT(old & ETHR_WLOCK_FLAG__);
- }
- ETHR_COMPILER_BARRIER;
- return 0;
-}
-
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* #ifdef ERTS_MIXED_CYGWIN_VC */
+#define ETHR_YIELD() (Sleep(0), 0)
#else /* No supported thread lib found */
@@ -687,6 +198,12 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
#endif
+#ifdef SIZEOF_LONG
+#if SIZEOF_LONG < ETHR_SIZEOF_PTR
+#error size of long currently needs to be at least the same as size of void *
+#endif
+#endif
+
/* __builtin_expect() is needed by both native atomics code
* and the fallback code */
#if !defined(__GNUC__) || (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
@@ -698,6 +215,8 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
# if defined(__GNUC__)
# if defined(ETHR_PREFER_GCC_NATIVE_IMPLS)
# include "gcc/ethread.h"
+# elif defined(ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS)
+# include "libatomic_ops/ethread.h"
# endif
# ifndef ETHR_HAVE_NATIVE_ATOMICS
# if ETHR_SIZEOF_PTR == 4
@@ -718,115 +237,141 @@ ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
# endif
# endif
# include "gcc/ethread.h"
+# include "libatomic_ops/ethread.h"
# endif
+# elif defined(ETHR_WIN32_THREADS)
+# include "win/ethread.h"
# endif
-#endif /* !defined(ETHR_DISABLE_NATIVE_IMPLS) */
-
-#ifdef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-# undef ETHR_HAVE_NATIVE_ATOMICS
-#endif
-#ifdef ETHR_HAVE_OPTIMIZED_LOCKS
-# undef ETHR_HAVE_NATIVE_LOCKS
-#endif
+#endif /* !ETHR_DISABLE_NATIVE_IMPLS */
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
-#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-#define ETHR_HAVE_OPTIMIZED_LOCKS 1
+#if defined(__GNUC__)
+# ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER __asm__ __volatile__("" : : : "memory")
+# endif
+# ifndef ETHR_SPIN_BODY
+# if defined(__i386__) || defined(__x86_64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("rep;nop" : : : "memory")
+# elif defined(__ia64__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("hint @pause" : : : "memory")
+# elif defined(__sparc__)
+# define ETHR_SPIN_BODY __asm__ __volatile__("membar #LoadLoad")
+# else
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+# endif
+# endif
+#elif defined(ETHR_WIN32_THREADS)
+# ifndef ETHR_COMPILER_BARRIER
+# include <intrin.h>
+# pragma intrinsic(_ReadWriteBarrier)
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+# ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY do {YieldProcessor();ETHR_COMPILER_BARRIER;} while(0)
+# endif
#endif
-typedef struct {
- unsigned open;
- ethr_mutex mtx;
- ethr_cond cnd;
-} ethr_gate;
+#define ETHR_YIELD_AFTER_BUSY_LOOPS 50
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Map ethread native atomics to ethread API atomics.
+ * ETHR_*MEMORY_BARRIER orders between locked and atomic accesses only,
+ * i.e. when our lock based atomic fallback is used, a noop is sufficient.
*/
-typedef ethr_native_atomic_t ethr_atomic_t;
+#define ETHR_MEMORY_BARRIER do { } while (0)
+#define ETHR_WRITE_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_MEMORY_BARRIER do { } while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER do { } while (0)
#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
-/*
- * Map ethread native spinlocks to ethread API spinlocks.
- */
-typedef ethr_native_spinlock_t ethr_spinlock_t;
-/*
- * Map ethread native rwlocks to ethread API rwlocks.
- */
-typedef ethr_native_rwlock_t ethr_rwlock_t;
+#ifndef ETHR_WRITE_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_WRITE_MEMORY_BARRIER_IS_FULL
#endif
-
-#ifdef ETHR_USE_RWMTX_FALLBACK
-typedef struct {
- ethr_mutex mtx;
- ethr_cond rcnd;
- ethr_cond wcnd;
- unsigned readers;
- unsigned waiting_readers;
- unsigned waiting_writers;
-#if ETHR_XCHK
- int initialized;
+#ifndef ETHR_READ_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+# define ETHR_READ_MEMORY_BARRIER_IS_FULL
#endif
-} ethr_rwmutex;
+#ifndef ETHR_READ_DEPEND_MEMORY_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_COMPILER_BARRIER
+# define ETHR_READ_DEPEND_MEMORY_BARRIER_IS_COMPILER_BARRIER
#endif
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
-typedef long ethr_atomic_t;
-#endif
+#define ETHR_FATAL_ERROR__(ERR) \
+ ethr_fatal_error__(__FILE__, __LINE__, __func__, (ERR))
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
+ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file,
+ int line,
+ const char *func,
+ int err);
-#if defined(ETHR_WIN32_THREADS)
-typedef struct {
- CRITICAL_SECTION cs;
-} ethr_spinlock_t;
-typedef struct {
- CRITICAL_SECTION cs;
- unsigned counter;
-} ethr_rwlock_t;
+void ethr_compiler_barrier_fallback(void);
+#ifndef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER ethr_compiler_barrier_fallback()
+#endif
-int ethr_do_spinlock_init(ethr_spinlock_t *lock);
-int ethr_do_rwlock_init(ethr_rwlock_t *lock);
+#ifndef ETHR_SPIN_BODY
+# define ETHR_SPIN_BODY ETHR_COMPILER_BARRIER
+#endif
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+#ifndef ETHR_YIELD
+# if defined(ETHR_HAVE_SCHED_YIELD)
+# ifdef ETHR_HAVE_SCHED_H
+# include <sched.h>
+# endif
+# include <errno.h>
+# if defined(ETHR_SCHED_YIELD_RET_INT)
+# define ETHR_YIELD() (sched_yield() < 0 ? errno : 0)
+# else
+# define ETHR_YIELD() (sched_yield(), 0)
+# endif
+# elif defined(ETHR_HAVE_PTHREAD_YIELD)
+# if defined(ETHR_PTHREAD_YIELD_RET_INT)
+# define ETHR_YIELD() pthread_yield()
+# else
+# define ETHR_YIELD() (pthread_yield(), 0)
+# endif
+# else
+# define ETHR_YIELD() (ethr_compiler_barrier(), 0)
+# endif
+#endif
+
+#include "ethr_optimized_fallbacks.h"
-#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-typedef struct {
- pthread_spinlock_t spnlck;
-} ethr_spinlock_t;
typedef struct {
- pthread_spinlock_t spnlck;
- unsigned counter;
-} ethr_rwlock_t;
-#define ETHR_RWLOCK_WRITERS (((unsigned) 1) << 31)
+ void *(*thread_create_prepare_func)(void);
+ void (*thread_create_parent_func)(void *);
+ void (*thread_create_child_func)(void *);
+} ethr_init_data;
-#else /* ethr mutex/rwmutex */
+#define ETHR_INIT_DATA_DEFAULT_INITER {NULL, NULL, NULL}
typedef struct {
- ethr_mutex mtx;
-} ethr_spinlock_t;
+ void *(*alloc)(size_t);
+ void *(*realloc)(void *, size_t);
+ void (*free)(void *);
+} ethr_memory_allocator;
+
+#define ETHR_MEM_ALLOC_DEF_INITER__ {NULL, NULL, NULL}
typedef struct {
- ethr_rwmutex rwmtx;
-} ethr_rwlock_t;
+ ethr_memory_allocator std;
+ ethr_memory_allocator sl;
+ ethr_memory_allocator ll;
+} ethr_memory_allocators;
-#endif /* end mutex/rwmutex */
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
+#define ETHR_MEM_ALLOCS_DEF_INITER__ \
+ {ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__, \
+ ETHR_MEM_ALLOC_DEF_INITER__}
typedef struct {
- void *(*alloc)(size_t);
- void *(*realloc)(void *, size_t);
- void (*free)(void *);
- void *(*thread_create_prepare_func)(void);
- void (*thread_create_parent_func)(void *);
- void (*thread_create_child_func)(void *);
-} ethr_init_data;
+ ethr_memory_allocators mem;
+ int reader_groups;
+ int main_threads;
+} ethr_late_init_data;
-#define ETHR_INIT_DATA_DEFAULT_INITER {malloc, realloc, free, NULL, NULL, NULL}
+#define ETHR_LATE_INIT_DATA_DEFAULT_INITER \
+ {ETHR_MEM_ALLOCS_DEF_INITER__, 0, 0}
typedef struct {
int detached; /* boolean (default false) */
@@ -835,18 +380,15 @@ typedef struct {
#define ETHR_THR_OPTS_DEFAULT_INITER {0, -1}
-#if defined(ETHR_CUSTOM_INLINE_FUNC_NAME_) || !defined(ETHR_TRY_INLINE_FUNCS)
-# define ETHR_NEED_MTX_PROTOTYPES__
-# define ETHR_NEED_RWMTX_PROTOTYPES__
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
# define ETHR_NEED_SPINLOCK_PROTOTYPES__
+# define ETHR_NEED_RWSPINLOCK_PROTOTYPES__
# define ETHR_NEED_ATOMIC_PROTOTYPES__
#endif
-#if !defined(ETHR_NEED_RWMTX_PROTOTYPES__) && defined(ETHR_USE_RWMTX_FALLBACK)
-# define ETHR_NEED_RWMTX_PROTOTYPES__
-#endif
-
int ethr_init(ethr_init_data *);
+int ethr_late_init(ethr_late_init_data *);
int ethr_install_exit_handler(void (*funcp)(void));
int ethr_thr_create(ethr_tid *, void * (*)(void *), void *, ethr_thr_opts *);
int ethr_thr_join(ethr_tid, void **);
@@ -854,65 +396,6 @@ int ethr_thr_detach(ethr_tid);
void ethr_thr_exit(void *);
ethr_tid ethr_self(void);
int ethr_equal_tids(ethr_tid, ethr_tid);
-int ethr_mutex_init(ethr_mutex *);
-#ifdef ETHR_HAVE_ETHR_REC_MUTEX_INIT
-int ethr_rec_mutex_init(ethr_mutex *);
-#endif
-int ethr_mutex_destroy(ethr_mutex *);
-int ethr_mutex_set_forksafe(ethr_mutex *);
-int ethr_mutex_unset_forksafe(ethr_mutex *);
-#ifdef ETHR_NEED_MTX_PROTOTYPES__
-int ethr_mutex_trylock(ethr_mutex *);
-int ethr_mutex_lock(ethr_mutex *);
-int ethr_mutex_unlock(ethr_mutex *);
-#endif
-int ethr_cond_init(ethr_cond *);
-int ethr_cond_destroy(ethr_cond *);
-int ethr_cond_signal(ethr_cond *);
-int ethr_cond_broadcast(ethr_cond *);
-int ethr_cond_wait(ethr_cond *, ethr_mutex *);
-int ethr_cond_timedwait(ethr_cond *, ethr_mutex *, ethr_timeval *);
-
-int ethr_rwmutex_init(ethr_rwmutex *);
-int ethr_rwmutex_destroy(ethr_rwmutex *);
-#ifdef ETHR_NEED_RWMTX_PROTOTYPES__
-int ethr_rwmutex_tryrlock(ethr_rwmutex *);
-int ethr_rwmutex_rlock(ethr_rwmutex *);
-int ethr_rwmutex_runlock(ethr_rwmutex *);
-int ethr_rwmutex_tryrwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwlock(ethr_rwmutex *);
-int ethr_rwmutex_rwunlock(ethr_rwmutex *);
-#endif
-
-#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
-int ethr_atomic_init(ethr_atomic_t *, long);
-int ethr_atomic_set(ethr_atomic_t *, long);
-int ethr_atomic_read(ethr_atomic_t *, long *);
-int ethr_atomic_inctest(ethr_atomic_t *, long *);
-int ethr_atomic_dectest(ethr_atomic_t *, long *);
-int ethr_atomic_inc(ethr_atomic_t *);
-int ethr_atomic_dec(ethr_atomic_t *);
-int ethr_atomic_addtest(ethr_atomic_t *, long, long *);
-int ethr_atomic_add(ethr_atomic_t *, long);
-int ethr_atomic_and_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_or_old(ethr_atomic_t *, long, long *);
-int ethr_atomic_xchg(ethr_atomic_t *, long, long *);
-int ethr_atomic_cmpxchg(ethr_atomic_t *, long, long, long *);
-#endif
-
-#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
-int ethr_spinlock_init(ethr_spinlock_t *);
-int ethr_spinlock_destroy(ethr_spinlock_t *);
-int ethr_spin_unlock(ethr_spinlock_t *);
-int ethr_spin_lock(ethr_spinlock_t *);
-
-int ethr_rwlock_init(ethr_rwlock_t *);
-int ethr_rwlock_destroy(ethr_rwlock_t *);
-int ethr_read_unlock(ethr_rwlock_t *);
-int ethr_read_lock(ethr_rwlock_t *);
-int ethr_write_unlock(ethr_rwlock_t *);
-int ethr_write_lock(ethr_rwlock_t *);
-#endif
int ethr_time_now(ethr_timeval *);
int ethr_tsd_key_create(ethr_tsd_key *);
@@ -920,13 +403,6 @@ int ethr_tsd_key_delete(ethr_tsd_key);
int ethr_tsd_set(ethr_tsd_key, void *);
void *ethr_tsd_get(ethr_tsd_key);
-int ethr_gate_init(ethr_gate *);
-int ethr_gate_destroy(ethr_gate *);
-int ethr_gate_close(ethr_gate *);
-int ethr_gate_let_through(ethr_gate *, unsigned);
-int ethr_gate_wait(ethr_gate *);
-int ethr_gate_swait(ethr_gate *, int);
-
#ifdef ETHR_HAVE_ETHR_SIG_FUNCS
#include <signal.h>
int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset);
@@ -935,534 +411,579 @@ int ethr_sigwait(const sigset_t *set, int *sig);
void ethr_compiler_barrier(void);
-#ifdef ETHR_TRY_INLINE_FUNCS
-
-#ifdef ETHR_HAVE_NATIVE_ATOMICS
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_init(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
-{
- ethr_native_atomic_set(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
-{
- *i = ethr_native_atomic_read(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ethr_native_atomic_add(var, incr);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *var,
- long i,
- long *testp)
-{
- *testp = ethr_native_atomic_add_return(var, i);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
-{
- ethr_native_atomic_inc(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
-{
- ethr_native_atomic_dec(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_inc_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *var, long *testp)
-{
- *testp = ethr_native_atomic_dec_return(var);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_and_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_or_retold(var, mask);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
-{
- *old = ethr_native_atomic_xchg(var, new);
- return 0;
-}
-
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
- long new,
- long expected,
- long *old)
-{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- *old = ethr_native_atomic_cmpxchg(var, new, expected);
- return 0;
-}
+#if defined(ETHR_HAVE_NATIVE_SPINLOCKS)
+typedef ethr_native_spinlock_t ethr_spinlock_t;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+typedef ethr_opt_spinlock_t ethr_spinlock_t;
+#elif defined(__WIN32__)
+typedef CRITICAL_SECTION ethr_spinlock_t;
+#else
+typedef pthread_mutex_t ethr_spinlock_t;
+#endif
-#endif /* ETHR_HAVE_NATIVE_ATOMICS */
+#ifdef ETHR_NEED_SPINLOCK_PROTOTYPES__
+int ethr_spinlock_init(ethr_spinlock_t *);
+int ethr_spinlock_destroy(ethr_spinlock_t *);
+void ethr_spin_unlock(ethr_spinlock_t *);
+void ethr_spin_lock(ethr_spinlock_t *);
+#endif
-#ifdef ETHR_HAVE_NATIVE_LOCKS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spinlock_init(lock);
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_init((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ if (!InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION *) lock, INT_MAX))
+ return ethr_win_get_errno__();
+ return 0;
+#else
+ return pthread_mutex_init((pthread_mutex_t *) lock, NULL);
+#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ return ethr_opt_spinlock_destroy((ethr_opt_spinlock_t *) lock);
+#elif defined(__WIN32__)
+ DeleteCriticalSection((CRITICAL_SECTION *) lock);
+ return 0;
+#else
+ return pthread_mutex_destroy((pthread_mutex_t *) lock);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_unlock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_unlock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ LeaveCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_unlock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
{
+#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
ethr_native_spin_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
-{
- ethr_native_rwlock_init(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
-{
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_read_lock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_unlock(lock);
- return 0;
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
-{
- ethr_native_write_lock(lock);
- return 0;
+#elif defined(ETHR_HAVE_OPTIMIZED_SPINLOCKS)
+ int err = ethr_opt_spin_lock((ethr_opt_spinlock_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#elif defined(__WIN32__)
+ EnterCriticalSection((CRITICAL_SECTION *) lock);
+#else
+ int err = pthread_mutex_lock((pthread_mutex_t *) lock);
+ if (err)
+ ETHR_FATAL_ERROR__(err);
+#endif
}
-#endif /* ETHR_HAVE_NATIVE_LOCKS */
-
#endif /* ETHR_TRY_INLINE_FUNCS */
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
/*
- * Fallbacks for atomics used in absence of optimized implementation.
+ * Map ethread native atomics to ethread API atomics.
*/
-#ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS
+typedef ethr_native_atomic_t ethr_atomic_t;
+#else
+typedef long ethr_atomic_t;
+#endif
+#ifdef ETHR_NEED_ATOMIC_PROTOTYPES__
+void ethr_atomic_init(ethr_atomic_t *, long);
+void ethr_atomic_set(ethr_atomic_t *, long);
+long ethr_atomic_read(ethr_atomic_t *);
+long ethr_atomic_inc_read(ethr_atomic_t *);
+long ethr_atomic_dec_read(ethr_atomic_t *);
+void ethr_atomic_inc(ethr_atomic_t *);
+void ethr_atomic_dec(ethr_atomic_t *);
+long ethr_atomic_add_read(ethr_atomic_t *, long);
+void ethr_atomic_add(ethr_atomic_t *, long);
+long ethr_atomic_read_band(ethr_atomic_t *, long);
+long ethr_atomic_read_bor(ethr_atomic_t *, long);
+long ethr_atomic_xchg(ethr_atomic_t *, long);
+long ethr_atomic_cmpxchg(ethr_atomic_t *, long, long);
+long ethr_atomic_read_acqb(ethr_atomic_t *);
+long ethr_atomic_inc_read_acqb(ethr_atomic_t *);
+void ethr_atomic_set_relb(ethr_atomic_t *, long);
+void ethr_atomic_dec_relb(ethr_atomic_t *);
+long ethr_atomic_dec_read_relb(ethr_atomic_t *);
+long ethr_atomic_cmpxchg_acqb(ethr_atomic_t *, long, long);
+long ethr_atomic_cmpxchg_relb(ethr_atomic_t *, long, long);
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+#ifndef ETHR_HAVE_NATIVE_ATOMICS
/*
- * ETHR_MEMORY_BARRIER orders between locked and atomic accesses only,
- * i.e. when this atomic fallback is used a noop is sufficient.
+ * Fallbacks for atomics used in absence of a native implementation.
*/
-#define ETHR_MEMORY_BARRIER
#define ETHR_ATOMIC_ADDR_BITS 10
#define ETHR_ATOMIC_ADDR_SHIFT 6
typedef struct {
union {
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- pthread_spinlock_t spnlck;
-#else
- ethr_mutex mtx;
-#endif
+ ethr_spinlock_t lck;
char buf[ETHR_CACHE_LINE_SIZE];
} u;
} ethr_atomic_protection_t;
extern ethr_atomic_protection_t ethr_atomic_protection__[1 << ETHR_ATOMIC_ADDR_BITS];
-
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-
#define ETHR_ATOMIC_PTR2LCK__(PTR) \
(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.spnlck)
+ & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.lck)
#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
do { \
- pthread_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = pthread_spin_lock(slp__); \
- if (res__ != 0) \
- return res__; \
+ ethr_spinlock_t *slp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
+ ethr_spin_lock(slp__); \
{ EXPS; } \
- return pthread_spin_unlock(slp__); \
+ ethr_spin_unlock(slp__); \
} while (0)
-#else /* ethread mutex */
-
-#define ETHR_ATOMIC_PTR2LCK__(PTR) \
-(&ethr_atomic_protection__[((((unsigned long) (PTR)) >> ETHR_ATOMIC_ADDR_SHIFT) \
- & ((1 << ETHR_ATOMIC_ADDR_BITS) - 1))].u.mtx)
-
-#define ETHR_ATOMIC_OP_FALLBACK_IMPL__(AP, EXPS) \
-do { \
- ethr_mutex *mtxp__ = ETHR_ATOMIC_PTR2LCK__((AP)); \
- int res__ = ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(mtxp__); \
- if (res__ != 0) \
- return res__; \
- { EXPS; } \
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(mtxp__); \
-} while (0)
-
-#endif /* end ethread mutex */
-
-#ifdef ETHR_TRY_INLINE_FUNCS
+#endif
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_atomic_init)(ethr_atomic_t *var, long i)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_init(var, i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(ethr_atomic_t *var, long i)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = (ethr_atomic_t) i);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_set(var, i);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var = i);
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var, long *i)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *i = (long) *var);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_read(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) *var);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inctest)(ethr_atomic_t *incp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *testp = (long) ++(*incp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_add(var, incr);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
+#endif
+}
+
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_add_read)(ethr_atomic_t *var, long i)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_add_return(var, i);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += i; res = *var);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dectest)(ethr_atomic_t *decp, long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, *testp = (long) --(*decp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_inc(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, ++(*var));
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_add)(ethr_atomic_t *var, long incr)
-{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *var += incr);
-}
-
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_addtest)(ethr_atomic_t *incp,
- long i,
- long *testp)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, *incp += i; *testp = *incp);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_dec(var);
+#else
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, --(*var));
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc)(ethr_atomic_t *incp)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(incp, ++(*incp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_inc_return(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) ++(*var));
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(ethr_atomic_t *decp)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(ethr_atomic_t *var)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(decp, --(*decp));
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_dec_return(var);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = (long) --(*var));
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_and_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_band)(ethr_atomic_t *var,
+ long mask)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var &= mask);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_and_retold(var, mask);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var &= mask);
+ return res;
+#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_atomic_or_old)(ethr_atomic_t *var,
- long mask,
- long *old)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_bor)(ethr_atomic_t *var,
+ long mask)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var |= mask);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_or_retold(var, mask);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var |= mask);
+ return res;
+#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE long
ETHR_INLINE_FUNC_NAME_(ethr_atomic_xchg)(ethr_atomic_t *var,
- long new,
- long *old)
+ long new)
{
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, *old = *var; *var = new);
-}
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_xchg(var, new);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var, res = *var; *var = new);
+ return res;
+#endif
+}
-/*
- * If *var == *old, replace *old with new, else do nothing.
- * In any case return the original value of *var in *old.
- */
-static ETHR_INLINE int
+static ETHR_INLINE long
ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(ethr_atomic_t *var,
long new,
- long expected,
- long *old)
+ long exp)
{
- /*
- * See "Extra memory barrier requirements" note at the top
- * of the file.
- */
- ETHR_ATOMIC_OP_FALLBACK_IMPL__(
- var,
- long old_val = *var;
- *old = old_val;
- if (__builtin_expect(old_val == expected, 1))
- *var = new;
- );
- return 0;
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+#else
+ long res;
+ ETHR_ATOMIC_OP_FALLBACK_IMPL__(var,
+ {
+ res = *var;
+ if (__builtin_expect(res == exp, 1))
+ *var = new;
+ });
+ return res;
+#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-#endif /* #ifndef ETHR_HAVE_OPTIMIZED_ATOMIC_OPS */
-
/*
- * Fallbacks for spin locks, and rw spin locks used in absence of
- * optimized implementation.
+ * Important memory barrier requirements.
+ *
+ * The following atomic operations *must* supply a memory barrier of
+ * at least the type specified by its suffix:
+ * _acqb = acquire barrier
+ * _relb = release barrier
*/
-#ifndef ETHR_HAVE_OPTIMIZED_LOCKS
-#ifdef ETHR_TRY_INLINE_FUNCS
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_read_acqb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_read_acqb(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_read)(var);
+#endif
+}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_init)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read_acqb)(ethr_atomic_t *var)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_inc_return_acqb(var);
#else
- return ethr_mutex_init(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_inc_read)(var);
#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spinlock_destroy)(ethr_spinlock_t *lock)
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_set_relb)(ethr_atomic_t *var, long val)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_set_relb(var, val);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_set)(var, val);
+#endif
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_relb)(ethr_atomic_t *var)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ ethr_native_atomic_dec_relb(var);
#else
- return ethr_mutex_destroy(&lock->mtx);
+ ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec)(var);
#endif
}
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read_relb)(ethr_atomic_t *var)
+{
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_dec_return_relb(var);
+#else
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_dec_read)(var);
+#endif
+}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_unlock)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_acqb)(ethr_atomic_t *var,
+ long new,
+ long exp)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg_acqb(var, new, exp);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
#endif
}
-static ETHR_INLINE int
-ETHR_INLINE_FUNC_NAME_(ethr_spin_lock)(ethr_spinlock_t *lock)
+static ETHR_INLINE long
+ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg_relb)(ethr_atomic_t *var,
+ long new,
+ long exp)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_lock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_ATOMICS
+ return ethr_native_atomic_cmpxchg_relb(var, new, exp);
#else
- return ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(&lock->mtx);
+ return ETHR_INLINE_FUNC_NAME_(ethr_atomic_cmpxchg)(var, new, exp);
#endif
}
-#ifdef ETHR_USE_RWMTX_FALLBACK
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) X
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+typedef struct ethr_ts_event_ ethr_ts_event; /* Needed by ethr_mutex.h */
+
+#if defined(ETHR_WIN32_THREADS)
+# include "win/ethr_event.h"
#else
-#define ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(X) ETHR_INLINE_FUNC_NAME_(X)
+# include "pthread/ethr_event.h"
+#endif
+
+int ethr_set_main_thr_status(int, int);
+int ethr_get_main_thr_status(int *);
+
+struct ethr_ts_event_ {
+ ethr_ts_event *next;
+ ethr_ts_event *prev;
+ ethr_event event;
+ void *udata;
+ ethr_atomic_t uaflgs;
+ unsigned uflgs;
+ unsigned iflgs; /* for ethr lib only */
+ short rgix; /* for ethr lib only */
+ short mtix; /* for ethr lib only */
+};
+
+#define ETHR_TS_EV_ETHREAD (((unsigned) 1) << 0)
+#define ETHR_TS_EV_INITED (((unsigned) 1) << 1)
+#define ETHR_TS_EV_TMP (((unsigned) 1) << 2)
+#define ETHR_TS_EV_MAIN_THR (((unsigned) 1) << 3)
+
+int ethr_get_tmp_ts_event__(ethr_ts_event **tsepp);
+int ethr_free_ts_event__(ethr_ts_event *tsep);
+int ethr_make_ts_event__(ethr_ts_event **tsepp);
+
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+ethr_ts_event *ethr_get_ts_event(void);
+void ethr_leave_ts_event(ethr_ts_event *);
+#endif
+
+#if defined(ETHR_PTHREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern pthread_key_t ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = pthread_getspecific(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_make_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+
+}
+
#endif
+#elif defined(ETHR_WIN32_THREADS)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHREAD_IMPL__)
+
+extern DWORD ethr_ts_event_key__;
+
+static ETHR_INLINE ethr_ts_event *
+ETHR_INLINE_FUNC_NAME_(ethr_get_ts_event)(void)
+{
+ ethr_ts_event *tsep = TlsGetValue(ethr_ts_event_key__);
+ if (!tsep) {
+ int res = ethr_get_tmp_ts_event__(&tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ ETHR_ASSERT(tsep);
+ }
+ return tsep;
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_leave_ts_event)(ethr_ts_event *tsep)
+{
+ if (tsep->iflgs & ETHR_TS_EV_TMP) {
+ int res = ethr_free_ts_event__(tsep);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+#endif
+
+#endif
+
+#include "ethr_mutex.h" /* Need atomic declarations and tse */
+
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+typedef ethr_native_rwlock_t ethr_rwlock_t;
+#else
+typedef ethr_rwmutex ethr_rwlock_t;
+#endif
+
+#ifdef ETHR_NEED_RWSPINLOCK_PROTOTYPES__
+int ethr_rwlock_init(ethr_rwlock_t *);
+int ethr_rwlock_destroy(ethr_rwlock_t *);
+void ethr_read_unlock(ethr_rwlock_t *);
+void ethr_read_lock(ethr_rwlock_t *);
+void ethr_write_unlock(ethr_rwlock_t *);
+void ethr_write_lock(ethr_rwlock_t *);
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_init)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_init(&lock->spnlck, 0);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_rwlock_init(lock);
+ return 0;
#else
- return ethr_rwmutex_init(&lock->rwmtx);
+ return ethr_rwmutex_init_opt((ethr_rwmutex *) lock, NULL);
#endif
}
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_rwlock_destroy)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- return pthread_spin_destroy(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ return 0;
#else
- return ethr_rwmutex_destroy(&lock->rwmtx);
+ return ethr_rwmutex_destroy((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter--;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_runlock)(&lock->rwmtx);
+ ethr_rwmutex_runlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_read_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- int locked = 0;
- do {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- if ((lock->counter & ETHR_RWLOCK_WRITERS) == 0) {
- lock->counter++;
- locked = 1;
- }
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- } while (!locked);
- return 0;
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_read_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rlock)(&lock->rwmtx);
+ ethr_rwmutex_rlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_unlock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- lock->counter = 0;
- return pthread_spin_unlock(&lock->spnlck);
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_unlock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwunlock)(&lock->rwmtx);
+ ethr_rwmutex_rwunlock((ethr_rwmutex *) lock);
#endif
}
-static ETHR_INLINE int
+static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_write_lock)(ethr_rwlock_t *lock)
{
-#if defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
- while (1) {
- int res = pthread_spin_lock(&lock->spnlck);
- if (res != 0)
- return res;
- lock->counter |= ETHR_RWLOCK_WRITERS;
- if (lock->counter == ETHR_RWLOCK_WRITERS)
- return 0;
- res = pthread_spin_unlock(&lock->spnlck);
- if (res != 0)
- return res;
- }
+#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
+ ethr_native_write_lock(lock);
#else
- return ETHR_RWLOCK_RWMTX_FALLBACK_NAME_(ethr_rwmutex_rwlock)(&lock->rwmtx);
+ ethr_rwmutex_rwlock((ethr_rwmutex *) lock);
#endif
}
-#endif /* #ifdef ETHR_TRY_INLINE_FUNCS */
-
-#endif /* ETHR_HAVE_OPTIMIZED_LOCKS */
-
-#if defined(ETHR_HAVE_OPTIMIZED_LOCKS) || defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
-# define ETHR_HAVE_OPTIMIZED_SPINLOCK
-#endif
+#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* #ifndef ETHREAD_H__ */
diff --git a/erts/include/internal/ethread_header_config.h.in b/erts/include/internal/ethread_header_config.h.in
index c9fd87c2f6..5debb44756 100644
--- a/erts/include/internal/ethread_header_config.h.in
+++ b/erts/include/internal/ethread_header_config.h.in
@@ -29,26 +29,54 @@
/* Define if you have pthreads */
#undef ETHR_PTHREADS
+/* Define if you need the <nptl/pthread.h> header file. */
+#undef ETHR_NEED_NPTL_PTHREAD_H
+
/* Define if you have the <pthread.h> header file. */
#undef ETHR_HAVE_PTHREAD_H
/* Define if the pthread.h header file is in pthread/mit directory. */
#undef ETHR_HAVE_MIT_PTHREAD_H
-/* Define if you have the pthread_mutexattr_settype function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETTYPE
+/* Define if you have the pthread_spin_lock function. */
+#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
-/* Define if you have the pthread_mutexattr_setkind_np function. */
-#undef ETHR_HAVE_PTHREAD_MUTEXATTR_SETKIND_NP
+/* Define if you want to force usage of pthread rwlocks */
+#undef ETHR_FORCE_PTHREAD_RWLOCK
-/* Define if you have the pthread_atfork function. */
-#undef ETHR_HAVE_PTHREAD_ATFORK
+/* Define if you have the pthread_rwlockattr_setkind_np() function. */
+#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
-/* Define if you have the pthread_spin_lock function. */
-#undef ETHR_HAVE_PTHREAD_SPIN_LOCK
+/* Define if you have the PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP rwlock
+ attribute. */
+#undef ETHR_HAVE_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
-/* Define if you have a pthread_rwlock implementation that can be used */
-#undef ETHR_HAVE_PTHREAD_RWLOCK_INIT
+/* Define if you have a linux futex implementation. */
+#undef ETHR_HAVE_LINUX_FUTEX
+
+/* Define if you have gcc atomic operations */
+#undef ETHR_HAVE_GCC_ATOMIC_OPS
+
+/* Define if you prefer gcc native ethread implementations */
+#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+
+/* Define if you have the <sched.h> header file. */
+#undef ETHR_HAVE_SCHED_H
+
+/* Define if you have the sched_yield() function. */
+#undef ETHR_HAVE_SCHED_YIELD
+
+/* Define if you have the pthread_yield() function. */
+#undef ETHR_HAVE_PTHREAD_YIELD
+
+/* Define if pthread_yield() returns an int. */
+#undef ETHR_PTHREAD_YIELD_RET_INT
+
+/* Define if sched_yield() returns an int. */
+#undef ETHR_SCHED_YIELD_RET_INT
+
+/* Define if you want compatibilty with x86 processors before pentium4. */
+#undef ETHR_PRE_PENTIUM4_COMPAT
/* Define if you have the pthread_rwlockattr_setkind_np() function. */
#undef ETHR_HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP
@@ -63,6 +91,15 @@
/* Define if you prefer gcc native ethread implementations */
#undef ETHR_PREFER_GCC_NATIVE_IMPLS
+/* Define if you have libatomic_ops atomic operations */
+#undef ETHR_HAVE_LIBATOMIC_OPS
+
+/* Define if you prefer libatomic_ops native ethread implementations */
+#undef ETHR_PREFER_LIBATOMIC_OPS_NATIVE_IMPLS
+
+/* Define to the size of AO_t if libatomic_ops is used */
+#undef ETHR_SIZEOF_AO_T
+
/* Define if you want to turn on extra sanity checking in the ethread library */
#undef ETHR_XCHK
diff --git a/erts/include/internal/gcc/ethr_atomic.h b/erts/include/internal/gcc/ethr_atomic.h
index 775030c8d5..5fe6e23477 100644
--- a/erts/include/internal/gcc/ethr_atomic.h
+++ b/erts/include/internal/gcc/ethr_atomic.h
@@ -55,6 +55,7 @@ do { \
volatile long x___ = 0; \
(void) __sync_val_compare_and_swap(&x___, (long) 0, (long) 1); \
} while (0)
+#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MEMORY_BARRIER
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
@@ -157,6 +158,24 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
return act;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ return __sync_add_and_fetch(&var->counter, (long) 0);
+}
+
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif
#endif
diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h
index 90b4c5f773..f28258059f 100644
--- a/erts/include/internal/i386/atomic.h
+++ b/erts/include/internal/i386/atomic.h
@@ -32,8 +32,11 @@ typedef struct {
volatile long counter;
} ethr_native_atomic_t;
-#ifdef __x86_64__
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("mfence" : : : "memory")
+#define ETHR_WRITE_MEMORY_BARRIER __asm__ __volatile__("sfence" : : : "memory")
+#define ETHR_READ_MEMORY_BARRIER __asm__ __volatile__("lfence" : : : "memory")
+#define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("" : : : "memory")
#else
#define ETHR_MEMORY_BARRIER \
do { \
@@ -42,7 +45,9 @@ do { \
} while (0)
#endif
-#ifdef ETHR_TRY_INLINE_FUNCS
+#define ETHR_ATOMIC_HAVE_INC_DEC_INSTRUCTIONS 1
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#ifdef __x86_64__
#define LONG_SUFFIX "q"
@@ -158,6 +163,22 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
return tmp;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+#define ethr_native_atomic_read_acqb ethr_native_atomic_read
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
+#define ethr_native_atomic_set_relb ethr_native_atomic_set
+#else
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#endif
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#undef LONG_SUFFIX
#endif /* ETHR_TRY_INLINE_FUNCS */
diff --git a/erts/include/internal/i386/ethread.h b/erts/include/internal/i386/ethread.h
index fad8b108fa..ed43e77279 100644
--- a/erts/include/internal/i386/ethread.h
+++ b/erts/include/internal/i386/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_I386_ETHREAD_H */
diff --git a/erts/include/internal/i386/rwlock.h b/erts/include/internal/i386/rwlock.h
index c009be8ef1..be47f459ce 100644
--- a/erts/include/internal/i386/rwlock.h
+++ b/erts/include/internal/i386/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#define ETHR_RWLOCK_OFFSET (1<<24)
diff --git a/erts/include/internal/i386/spinlock.h b/erts/include/internal/i386/spinlock.h
index 2b4832e26a..0325324895 100644
--- a/erts/include/internal/i386/spinlock.h
+++ b/erts/include/internal/i386/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -31,7 +31,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
@@ -46,7 +46,7 @@ ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
* On i386 this needs to be a locked operation
* to avoid Pentium Pro errata 66 and 92.
*/
-#if defined(__x86_64__)
+#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT)
__asm__ __volatile__("" : : : "memory");
*(unsigned char*)&lock->lock = 0;
#else
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
new file mode 100644
index 0000000000..a6eb43a0bd
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -0,0 +1,292 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_LIBATOMIC_OPS_ATOMIC_H__
+#define ETHR_LIBATOMIC_OPS_ATOMIC_H__
+
+#if !defined(ETHR_HAVE_NATIVE_ATOMICS) && defined(ETHR_HAVE_LIBATOMIC_OPS)
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#if (defined(__i386__) && !defined(ETHR_PRE_PENTIUM4_COMPAT)) \
+ || defined(__x86_64__)
+#define AO_USE_PENTIUM4_INSTRS
+#endif
+
+#include "atomic_ops.h"
+
+/*
+ * libatomic_ops can be downloaded from:
+ * http://www.hpl.hp.com/research/linux/atomic_ops/
+ *
+ * These operations need to be defined by libatomic_ops;
+ * otherwise, we won't compile:
+ * - AO_nop_full()
+ * - AO_load()
+ * - AO_store()
+ * - AO_compare_and_swap()
+ *
+ * The `AO_t' type also have to be at least as large as
+ * `void *' and `long' types.
+ */
+
+#if ETHR_SIZEOF_AO_T < ETHR_SIZEOF_PTR
+#error The AO_t type is too small
+#endif
+
+typedef struct {
+ volatile AO_t counter;
+} ethr_native_atomic_t;
+
+#define ETHR_MEMORY_BARRIER AO_nop_full()
+#ifdef AO_HAVE_nop_write
+# define ETHR_WRITE_MEMORY_BARRIER AO_nop_write()
+#else
+# define ETHR_WRITE_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_HAVE_nop_read
+# define ETHR_READ_MEMORY_BARRIER AO_nop_read()
+#else
+# define ETHR_READ_MEMORY_BARRIER ETHR_MEMORY_BARRIER
+#endif
+#ifdef AO_NO_DD_ORDERING
+# define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_READ_MEMORY_BARRIER
+#else
+# define ETHR_READ_DEPEND_MEMORY_BARRIER __asm__ __volatile__("":::"memory")
+#endif
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long value)
+{
+ AO_store(&var->counter, (AO_t) value);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long value)
+{
+ ethr_native_atomic_set(var, value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return (long) AO_load(&var->counter);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
+{
+#ifdef AO_HAVE_fetch_and_add
+ return ((long) AO_fetch_and_add(&var->counter, (AO_t) incr)) + incr;
+#else
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp + (AO_t) incr;
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) new;
+ }
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) ethr_native_atomic_add_return(var, incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_add1
+ return ((long) AO_fetch_and_add1(&var->counter)) + 1;
+#else
+ return ethr_native_atomic_add_return(var, 1);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_inc_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1
+ return ((long) AO_fetch_and_sub1(&var->counter)) - 1;
+#else
+ return ethr_native_atomic_add_return(var, -1);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp & ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) exp;
+ }
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ AO_t new = exp | ((AO_t) mask);
+ if (AO_compare_and_swap(&var->counter, exp, new))
+ return (long) exp;
+ }
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long exp)
+{
+ long act;
+ do {
+ if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ while (1) {
+ AO_t exp = AO_load(&var->counter);
+ if (AO_compare_and_swap(&var->counter, exp, (AO_t) new))
+ return (long) exp;
+ }
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_load_acquire
+ return (long) AO_load_acquire(&var->counter);
+#else
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_add1_acquire
+ return ((long) AO_fetch_and_add1_acquire(&var->counter)) + 1;
+#else
+ long res = ethr_native_atomic_add_return(var, 1);
+ ETHR_MEMORY_BARRIER;
+ return res;
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long value)
+{
+#ifdef AO_HAVE_store_release
+ AO_store_release(&var->counter, (AO_t) value);
+#else
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_set(var, value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+#ifdef AO_HAVE_fetch_and_sub1_release
+ return ((long) AO_fetch_and_sub1_release(&var->counter)) - 1;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_dec_return(var);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ (void) ethr_native_atomic_dec_return_relb(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+{
+#ifdef AO_HAVE_compare_and_swap_acquire
+ long act;
+ do {
+ if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ AO_nop_full();
+ return act;
+#else
+ long act = ethr_native_atomic_cmpxchg(var, new, exp);
+ ETHR_MEMORY_BARRIER;
+ return act;
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+{
+#ifdef AO_HAVE_compare_and_swap_release
+ long act;
+ do {
+ if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+ act = (long) AO_load(&var->counter);
+ } while (act == exp);
+ return act;
+#else
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+#endif
+}
+
+
+#endif
+
+#endif
+
+#endif
diff --git a/erts/include/internal/libatomic_ops/ethread.h b/erts/include/internal/libatomic_ops/ethread.h
new file mode 100644
index 0000000000..ee73ba73bc
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethread.h
@@ -0,0 +1,30 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_LIBATOMIC_OPS_H__
+#define ETHREAD_LIBATOMIC_OPS_H__
+
+#include "ethr_atomic.h"
+
+#endif
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index 105d874995..f21f7c9588 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -34,7 +34,7 @@ typedef struct {
#define ETHR_MEMORY_BARRIER __asm__ __volatile__("sync" : : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
@@ -205,6 +205,26 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
return old;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+#define ethr_native_atomic_set_relb ethr_native_atomic_xchg
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_dec_relb ethr_native_atomic_dec_return
+#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return
+
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_PPC_ATOMIC_H */
diff --git a/erts/include/internal/ppc32/ethread.h b/erts/include/internal/ppc32/ethread.h
index d2a72c3dc1..12efc1b653 100644
--- a/erts/include/internal/ppc32/ethread.h
+++ b/erts/include/internal/ppc32/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_PPC32_ETHREAD_H */
diff --git a/erts/include/internal/ppc32/rwlock.h b/erts/include/internal/ppc32/rwlock.h
index 9bdab12826..19ec26ab68 100644
--- a/erts/include/internal/ppc32/rwlock.h
+++ b/erts/include/internal/ppc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/ppc32/spinlock.h b/erts/include/internal/ppc32/spinlock.h
index 034c20c143..c8460a3e8a 100644
--- a/erts/include/internal/ppc32/spinlock.h
+++ b/erts/include/internal/ppc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -34,7 +34,7 @@ typedef struct {
volatile unsigned int lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/pthread/ethr_event.h b/erts/include/internal/pthread/ethr_event.h
new file mode 100644
index 0000000000..104ec287e0
--- /dev/null
+++ b/erts/include/internal/pthread/ethr_event.h
@@ -0,0 +1,151 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#if defined(ETHR_HAVE_LINUX_FUTEX) && defined(ETHR_HAVE_NATIVE_ATOMICS)
+/* --- Linux futex implementation of ethread events ------------------------- */
+#define ETHR_LINUX_FUTEX_IMPL__
+
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <linux/futex.h>
+#include <sys/time.h>
+
+/*
+ * Note: Linux futexes operate on 32-bit integers, but
+ * ethr_native_atomic_t are 64-bits on 64-bit
+ * platforms. This has to be taken into account.
+ * Therefore, in each individual value used each
+ * byte look the same.
+ */
+
+#if ETHR_SIZEOF_PTR == 8
+
+#define ETHR_EVENT_OFF_WAITER__ 0xffffffffffffffffL
+#define ETHR_EVENT_OFF__ 0x7777777777777777L
+#define ETHR_EVENT_ON__ 0L
+
+#elif ETHR_SIZEOF_PTR == 4
+
+#define ETHR_EVENT_OFF_WAITER__ 0xffffffffL
+#define ETHR_EVENT_OFF__ 0x77777777L
+#define ETHR_EVENT_ON__ 0L
+
+#else
+
+#error ehrm...
+
+#endif
+
+#if defined(FUTEX_WAIT_PRIVATE) && defined(FUTEX_WAKE_PRIVATE)
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT_PRIVATE
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE_PRIVATE
+#else
+# define ETHR_FUTEX_WAIT__ FUTEX_WAIT
+# define ETHR_FUTEX_WAKE__ FUTEX_WAKE
+#endif
+
+typedef struct {
+ ethr_atomic_t futex;
+} ethr_event;
+
+#define ETHR_FUTEX__(FTX, OP, VAL) \
+ (-1 == syscall(__NR_futex, (void *) (FTX), (OP), (int) (VAL), NULL, NULL, 0)\
+ ? errno : 0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ long val;
+ ETHR_WRITE_MEMORY_BARRIER;
+ val = ethr_atomic_xchg(&e->futex, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = ETHR_FUTEX__(&e->futex, ETHR_FUTEX_WAKE__, 1);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic_set(&e->futex, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#elif defined(ETHR_PTHREADS)
+/* --- Posix mutex/cond implementation of events ---------------------------- */
+
+typedef struct {
+ ethr_atomic_t state;
+ pthread_mutex_t mtx;
+ pthread_cond_t cnd;
+} ethr_event;
+
+#define ETHR_EVENT_OFF_WAITER__ -1L
+#define ETHR_EVENT_OFF__ 1L
+#define ETHR_EVENT_ON__ 0L
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ long val;
+ ETHR_WRITE_MEMORY_BARRIER;
+ val = ethr_atomic_xchg(&e->state, ETHR_EVENT_ON__);
+ if (val == ETHR_EVENT_OFF_WAITER__) {
+ int res = pthread_mutex_lock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_cond_signal(&e->cnd);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ res = pthread_mutex_unlock(&e->mtx);
+ if (res != 0)
+ ETHR_FATAL_ERROR__(res);
+ }
+}
+
+static void ETHR_INLINE
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ ethr_atomic_set(&e->state, ETHR_EVENT_OFF__);
+ ETHR_MEMORY_BARRIER;
+}
+
+#endif
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 8fde449a52..2a995d4465 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -32,7 +32,7 @@ typedef struct {
__asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore\n" \
: : : "memory")
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
#if defined(__arch64__)
#define CASX "casx"
@@ -172,6 +172,43 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
return new;
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ethr_native_atomic_set(var, i);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ ethr_native_atomic_dec(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ return ethr_native_atomic_dec_return(var);
+}
+
+#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
+#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
+#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHR_SPARC32_ATOMIC_H */
diff --git a/erts/include/internal/sparc32/ethread.h b/erts/include/internal/sparc32/ethread.h
index 1d55399640..dca113b4d6 100644
--- a/erts/include/internal/sparc32/ethread.h
+++ b/erts/include/internal/sparc32/ethread.h
@@ -29,6 +29,7 @@
#include "rwlock.h"
#define ETHR_HAVE_NATIVE_ATOMICS 1
-#define ETHR_HAVE_NATIVE_LOCKS 1
+#define ETHR_HAVE_NATIVE_SPINLOCKS 1
+#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#endif /* ETHREAD_SPARC32_ETHREAD_H */
diff --git a/erts/include/internal/sparc32/rwlock.h b/erts/include/internal/sparc32/rwlock.h
index 12448e0b06..465ec96866 100644
--- a/erts/include/internal/sparc32/rwlock.h
+++ b/erts/include/internal/sparc32/rwlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile int lock;
} ethr_native_rwlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
diff --git a/erts/include/internal/sparc32/spinlock.h b/erts/include/internal/sparc32/spinlock.h
index b4fe48b714..493d514210 100644
--- a/erts/include/internal/sparc32/spinlock.h
+++ b/erts/include/internal/sparc32/spinlock.h
@@ -1,19 +1,19 @@
/*
* %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
+ *
+ * Copyright Ericsson AB 2005-2010. All Rights Reserved.
+ *
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
* compliance with the License. You should have received a copy of the
* Erlang Public License along with this software. If not, it can be
* retrieved online at http://www.erlang.org/.
- *
+ *
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and limitations
* under the License.
- *
+ *
* %CopyrightEnd%
*/
@@ -29,7 +29,7 @@ typedef struct {
volatile unsigned char lock;
} ethr_native_spinlock_t;
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
diff --git a/erts/include/internal/tile/atomic.h b/erts/include/internal/tile/atomic.h
index 5e4c7ac9fe..69569d82d1 100644
--- a/erts/include/internal/tile/atomic.h
+++ b/erts/include/internal/tile/atomic.h
@@ -34,7 +34,7 @@ typedef struct {
#define ETHR_MEMORY_BARRIER __insn_mf()
-#ifdef ETHR_TRY_INLINE_FUNCS
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
static ETHR_INLINE void
ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
@@ -45,7 +45,6 @@ ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
static ETHR_INLINE void
ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
{
- __insn_mf();
atomic_exchange_acq(&var->counter, i);
}
@@ -58,28 +57,24 @@ ethr_native_atomic_read(ethr_native_atomic_t *var)
static ETHR_INLINE void
ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
{
- __insn_mf();
atomic_add(&var->counter, incr);
}
static ETHR_INLINE void
ethr_native_atomic_inc(ethr_native_atomic_t *var)
{
- __insn_mf();
atomic_increment(&var->counter);
}
static ETHR_INLINE void
ethr_native_atomic_dec(ethr_native_atomic_t *var)
{
- __insn_mf();
atomic_decrement(&var->counter);
}
static ETHR_INLINE long
ethr_native_atomic_add_return(ethr_native_atomic_t *var, long incr)
{
- __insn_mf();
return atomic_exchange_and_add(&var->counter, incr) + incr;
}
@@ -98,33 +93,81 @@ ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
static ETHR_INLINE long
ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_and_val(&var->counter, mask);
}
static ETHR_INLINE long
ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
{
- __insn_mf();
return atomic_or_val(&var->counter, mask);
}
static ETHR_INLINE long
ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val)
{
- __insn_mf();
return atomic_exchange_acq(&var->counter, val);
}
static ETHR_INLINE long
ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long expected)
{
- /* Implement a barrier suitable for a mutex unlock. */
- __insn_mf();
return atomic_compare_and_exchange_val_acq(&var->counter, new, expected);
}
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_read(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_inc_return(var);
+ ETHR_MEMORY_BARRIER;
+ return res;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long val)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_set(var, val);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ ethr_native_atomic_dec(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long exp)
+{
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long exp)
+{
+ ETHR_MEMORY_BARRIER;
+ return ethr_native_atomic_cmpxchg(var, new, exp);
+}
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_TILE_ATOMIC_H */
diff --git a/erts/include/internal/win/ethr_atomic.h b/erts/include/internal/win/ethr_atomic.h
new file mode 100644
index 0000000000..500459dd6c
--- /dev/null
+++ b/erts/include/internal/win/ethr_atomic.h
@@ -0,0 +1,244 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomics ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_WIN_ATOMIC_H__
+#define ETHR_WIN_ATOMIC_H__
+
+#ifdef _MSC_VER
+# if _MSC_VER < 1300
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0 /* Dont trust really old compilers */
+# else
+# if defined(_M_IX86)
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else /* I.e. IA64 */
+# if _MSC_VER >= 1400
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 1
+# else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+# endif
+# endif
+# endif
+# if _MSC_VER >= 1400
+# include <intrin.h>
+# undef ETHR_COMPILER_BARRIER
+# define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
+# endif
+#pragma intrinsic(_ReadWriteBarrier)
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#else
+# define ETHR_IMMED_ATOMIC_SET_GET_SAFE__ 0
+#endif
+
+/*
+ * No configure test checking for _Interlocked*_{acq,rel} and
+ * Interlocked*{Acquire,Release} have been written yet...
+ *
+ * Note, that these are pure optimizations for the itanium
+ * processor.
+ */
+
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
+#pragma intrinsic(_InterlockedCompareExchange_acq)
+#endif
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+#pragma intrinsic(_InterlockedCompareExchange_rel)
+#endif
+
+
+typedef struct {
+ volatile LONG value;
+} ethr_native_atomic_t;
+
+#define ETHR_MEMORY_BARRIER \
+do { \
+ volatile LONG x___ = 0; \
+ _InterlockedCompareExchange(&x___, (LONG) 1, (LONG) 0); \
+} while (0)
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, long i)
+{
+ var->value = (LONG) i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set(ethr_native_atomic_t *var, long i)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ var->value = (LONG) i;
+#else
+ (void) InterlockedExchange(&var->value, (LONG) i);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+#if ETHR_IMMED_ATOMIC_SET_GET_SAFE__
+ return var->value;
+#else
+ return InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, long incr)
+{
+ (void) InterlockedExchangeAdd(&var->value, (LONG) incr);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, long i)
+{
+ LONG tmp = InterlockedExchangeAdd(&var->value, (LONG) i);
+ return tmp + i;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ (void) InterlockedIncrement(&var->value);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ (void) InterlockedDecrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ return (long) InterlockedIncrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ return (long) InterlockedDecrement(&var->value);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) _InterlockedAnd(&var->value, mask);
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, long mask)
+{
+ return (long) _InterlockedOr(&var->value, mask);
+}
+
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
+{
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+}
+
+
+static ETHR_INLINE long
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, long new)
+{
+ return (long) InterlockedExchange(&var->value, (LONG) new);
+}
+
+/*
+ * Atomic ops with at least specified barriers.
+ */
+
+static ETHR_INLINE long
+ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDEXCHANGEADDACQUIRE
+ return (long) InterlockedExchangeAddAcquire(&var->value, (LONG) 0);
+#else
+ return (long) InterlockedExchangeAdd(&var->value, (LONG) 0);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDINCREMENTACQUIRE
+ return (long) InterlockedIncrementAcquire(&var->value);
+#else
+ return (long) InterlockedIncrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
+{
+ (void) InterlockedExchange(&var->value, (LONG) i);
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
+ (void) InterlockedDecrementRelease(&var->value);
+#else
+ (void) InterlockedDecrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDDECREMENTRELEASE
+ return (long) InterlockedDecrementRelease(&var->value);
+#else
+ return (long) InterlockedDecrement(&var->value);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+{
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_ACQ
+ return (long) _InterlockedCompareExchange_acq(&var->value, (LONG) new, (LONG) old);
+#else
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+#endif
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+{
+
+#ifdef ETHR_HAVE_INTERLOCKEDCOMPAREEXCHANGE_REL
+ return (long) _InterlockedCompareExchange_rel(&var->value, (LONG) new, (LONG) old);
+#else
+ return (long) _InterlockedCompareExchange(&var->value, (LONG) new, (LONG) old);
+#endif
+}
+
+#endif
+
+#endif
diff --git a/erts/include/internal/win/ethr_event.h b/erts/include/internal/win/ethr_event.h
new file mode 100644
index 0000000000..af57c20f91
--- /dev/null
+++ b/erts/include/internal/win/ethr_event.h
@@ -0,0 +1,62 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Author: Rickard Green
+ */
+
+#define ETHR_EVENT_OFF_WAITER__ ((LONG) -1)
+#define ETHR_EVENT_OFF__ ((LONG) 1)
+#define ETHR_EVENT_ON__ ((LONG) 0)
+
+typedef struct {
+ volatile LONG state;
+ HANDLE handle;
+} ethr_event;
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_set)(ethr_event *e)
+{
+ /* InterlockedExchange() imply a full memory barrier which is important */
+ LONG state = InterlockedExchange(&e->state, ETHR_EVENT_ON__);
+ if (state == ETHR_EVENT_OFF_WAITER__) {
+ if (!SetEvent(e->handle))
+ ETHR_FATAL_ERROR__(ethr_win_get_errno__());
+ }
+}
+
+static ETHR_INLINE void
+ETHR_INLINE_FUNC_NAME_(ethr_event_reset)(ethr_event *e)
+{
+ /* InterlockedExchange() imply a full memory barrier which is important */
+ InterlockedExchange(&e->state, ETHR_EVENT_OFF__);
+}
+
+#endif
+
+int ethr_event_init(ethr_event *e);
+int ethr_event_destroy(ethr_event *e);
+int ethr_event_wait(ethr_event *e);
+int ethr_event_swait(ethr_event *e, int spincount);
+#if !defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_EVENT_IMPL__)
+void ethr_event_set(ethr_event *e);
+void ethr_event_reset(ethr_event *e);
+#endif
diff --git a/erts/include/internal/win/ethread.h b/erts/include/internal/win/ethread.h
new file mode 100644
index 0000000000..b52710f6a3
--- /dev/null
+++ b/erts/include/internal/win/ethread.h
@@ -0,0 +1,31 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2010. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native atomic and spinlock ethread support when using VC++
+ * Author: Rickard Green
+ */
+
+#ifndef ETHREAD_WIN_H__
+#define ETHREAD_WIN_H__
+
+#include "ethr_atomic.h"
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+
+#endif