aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include/internal
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2014-11-25 19:34:50 +0100
committerRickard Green <[email protected]>2014-11-25 19:34:50 +0100
commit7c14d7b35f3fe6df4ab44a30ba1e3c6dac00a70b (patch)
treed0aa0b00417515de605691f86b5f9c5f8dbf9033 /erts/include/internal
parent23c0cebf707e61a0f2b9e9eb472d7cf3ad4eb94f (diff)
parent26e93069d53a44e274bc7c5f871fa8a7bd1dd94b (diff)
downloadotp-7c14d7b35f3fe6df4ab44a30ba1e3c6dac00a70b.tar.gz
otp-7c14d7b35f3fe6df4ab44a30ba1e3c6dac00a70b.tar.bz2
otp-7c14d7b35f3fe6df4ab44a30ba1e3c6dac00a70b.zip
Merge branch 'maint'
* maint: Implement support for double word atomics using libatomic_ops Improve usage of libatomic_ops for word size atomics Optimize atomic ops with release barrier for 32-bit PowerPC
Diffstat (limited to 'erts/include/internal')
-rw-r--r--erts/include/internal/ethread.h2
-rw-r--r--erts/include/internal/libatomic_ops/ethr_atomic.h343
-rw-r--r--erts/include/internal/libatomic_ops/ethr_dw_atomic.h567
-rw-r--r--erts/include/internal/libatomic_ops/ethread.h3
-rw-r--r--erts/include/internal/ppc32/atomic.h146
5 files changed, 1050 insertions, 11 deletions
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index 72c054b588..ad5d05704c 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -364,8 +364,8 @@ extern ethr_runtime_t ethr_runtime__;
# include "sparc64/ethread.h"
# endif
# endif
-# include "gcc/ethread.h"
# include "libatomic_ops/ethread.h"
+# include "gcc/ethread.h"
# endif
# elif defined(ETHR_HAVE_LIBATOMIC_OPS)
# include "libatomic_ops/ethread.h"
diff --git a/erts/include/internal/libatomic_ops/ethr_atomic.h b/erts/include/internal/libatomic_ops/ethr_atomic.h
index fb1288c330..734cdf0890 100644
--- a/erts/include/internal/libatomic_ops/ethr_atomic.h
+++ b/erts/include/internal/libatomic_ops/ethr_atomic.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2010-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2010-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -32,22 +32,23 @@
* These operations need to be defined by libatomic_ops;
* otherwise, we won't compile:
* - AO_nop_full()
- * - AO_load()
- * - AO_store()
- * - AO_compare_and_swap()
+ * - AO_load() || AO_load_aquire()
+ * - AO_store() || AO_store_release()
+ * - AO_compare_and_swap() || AO_compare_and_swap_acquire()
+ * || AO_compare_and_swap_release() || AO_compare_and_swap_full()
*
*/
#if ETHR_SIZEOF_AO_T == 4
#define ETHR_HAVE_NATIVE_ATOMIC32 1
-#define ETHR_NATIVE_ATOMIC32_IMPL "libatomic_ops"
+#define ETHR_NATIVE_ATOMIC32_IMPL ETHR_NATIVE_IMPL__
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic32_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic32_t
#define ETHR_AINT_T__ ethr_sint32_t
#define ETHR_AINT_SUFFIX__ "l"
#elif ETHR_SIZEOF_AO_T == 8
#define ETHR_HAVE_NATIVE_ATOMIC64 1
-#define ETHR_NATIVE_ATOMIC64_IMPL "libatomic_ops"
+#define ETHR_NATIVE_ATOMIC64_IMPL ETHR_NATIVE_IMPL__
#define ETHR_NATMC_FUNC__(X) ethr_native_atomic64_ ## X
#define ETHR_ATMC_T__ ethr_native_atomic64_t
#define ETHR_AINT_T__ ethr_sint64_t
@@ -74,6 +75,8 @@ ETHR_NATMC_FUNC__(addr)(ETHR_ATMC_T__ *var)
return (ETHR_AINT_T__ *) &var->counter;
}
+#ifdef AO_HAVE_store
+
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET 1
#else
@@ -86,6 +89,24 @@ ETHR_NATMC_FUNC__(set)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
AO_store(&var->counter, (AO_t) value);
}
+#endif
+
+#ifdef AO_HAVE_store_write
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_WB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_WB 1
+#endif
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_wb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ AO_store_write(&var->counter, (AO_t) value);
+}
+
+#endif
+
#ifdef AO_HAVE_store_release
#if ETHR_SIZEOF_AO_T == 4
@@ -102,6 +123,24 @@ ETHR_NATMC_FUNC__(set_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
#endif
+#ifdef AO_HAVE_store_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_SET_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_SET_MB 1
+#endif
+
+static ETHR_INLINE void
+ETHR_NATMC_FUNC__(set_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ value)
+{
+ AO_store_full(&var->counter, (AO_t) value);
+}
+
+#endif
+
+#ifdef AO_HAVE_load
+
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ 1
#else
@@ -114,6 +153,24 @@ ETHR_NATMC_FUNC__(read)(ETHR_ATMC_T__ *var)
return (ETHR_AINT_T__) AO_load(&var->counter);
}
+#endif
+
+#ifdef AO_HAVE_load_read
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_RB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_RB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_rb)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__) AO_load_read(&var->counter);
+}
+
+#endif
+
#ifdef AO_HAVE_load_acquire
#if ETHR_SIZEOF_AO_T == 4
@@ -130,6 +187,22 @@ ETHR_NATMC_FUNC__(read_acqb)(ETHR_ATMC_T__ *var)
#endif
+#ifdef AO_HAVE_load_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_READ_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_READ_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(read_mb)(ETHR_ATMC_T__ *var)
+{
+ return (ETHR_AINT_T__) AO_load_full(&var->counter);
+}
+
+#endif
+
#ifdef AO_HAVE_fetch_and_add
#if ETHR_SIZEOF_AO_T == 4
@@ -146,6 +219,54 @@ ETHR_NATMC_FUNC__(add_return)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
#endif
+#ifdef AO_HAVE_fetch_and_add_acquire
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_ACQB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_ACQB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return_acqb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add_acquire(&var->counter, (AO_t) incr)) + incr;
+}
+
+#endif
+
+#ifdef AO_HAVE_fetch_and_add_release
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RELB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_RELB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return_relb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add_release(&var->counter, (AO_t) incr)) + incr;
+}
+
+#endif
+
+#ifdef AO_HAVE_fetch_and_add_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_ADD_RETURN_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(add_return_mb)(ETHR_ATMC_T__ *var, ETHR_AINT_T__ incr)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add_full(&var->counter, (AO_t) incr)) + incr;
+}
+
+#endif
+
#ifdef AO_HAVE_fetch_and_add1
#if ETHR_SIZEOF_AO_T == 4
@@ -178,6 +299,38 @@ ETHR_NATMC_FUNC__(inc_return_acqb)(ETHR_ATMC_T__ *var)
#endif
+#ifdef AO_HAVE_fetch_and_add1_release
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RELB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_RELB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_relb)(ETHR_ATMC_T__ *var)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add1_release(&var->counter)) + 1;
+}
+
+#endif
+
+#ifdef AO_HAVE_fetch_and_add1_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_INC_RETURN_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(inc_return_mb)(ETHR_ATMC_T__ *var)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_add1_full(&var->counter)) + 1;
+}
+
+#endif
+
#ifdef AO_HAVE_fetch_and_sub1
#if ETHR_SIZEOF_AO_T == 4
@@ -194,6 +347,22 @@ ETHR_NATMC_FUNC__(dec_return)(ETHR_ATMC_T__ *var)
#endif
+#ifdef AO_HAVE_fetch_and_sub1_acquire
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_ACQB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_ACQB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_acqb)(ETHR_ATMC_T__ *var)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1_acquire(&var->counter)) - 1;
+}
+
+#endif
+
#ifdef AO_HAVE_fetch_and_sub1_release
#if ETHR_SIZEOF_AO_T == 4
@@ -210,7 +379,60 @@ ETHR_NATMC_FUNC__(dec_return_relb)(ETHR_ATMC_T__ *var)
#endif
-#ifdef AO_HAVE_compare_and_swap
+#ifdef AO_HAVE_fetch_and_sub1_full
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_DEC_RETURN_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(dec_return_mb)(ETHR_ATMC_T__ *var)
+{
+ return ((ETHR_AINT_T__) AO_fetch_and_sub1_full(&var->counter)) - 1;
+}
+
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_full) || defined(AO_HAVE_fetch_compare_and_swap_full)
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_MB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_MB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_mb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+#if defined(AO_HAVE_fetch_compare_and_swap_full)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_full(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap_full(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+#ifdef AO_HAVE_load_acquire
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+#endif
+ } while (act == exp);
+#ifndef AO_HAVE_load_acquire
+ AO_nop_full();
+#endif
+ return act;
+#endif
+}
+
+#endif
+
+#if defined(AO_HAVE_compare_and_swap) || defined(AO_HAVE_fetch_compare_and_swap)
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG 1
@@ -223,18 +445,28 @@ ETHR_NATMC_FUNC__(cmpxchg)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
ETHR_AINT_T__ exp)
{
+#if defined(AO_HAVE_fetch_compare_and_swap)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
+#ifdef AO_HAVE_load
act = (ETHR_AINT_T__) AO_load(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load_aquire(&var->counter);
+#endif
} while (act == exp);
return act;
+#endif
}
#endif
-#ifdef AO_HAVE_compare_and_swap_acquire
+#if defined(AO_HAVE_compare_and_swap_acquire) || defined(AO_HAVE_fetch_compare_and_swap_acquire)
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_ACQB 1
@@ -247,6 +479,11 @@ ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
ETHR_AINT_T__ exp)
{
+#if defined(AO_HAVE_fetch_compare_and_swap_acquire)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_acquire(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_acquire(&var->counter, (AO_t) exp, (AO_t) new))
@@ -261,11 +498,55 @@ ETHR_NATMC_FUNC__(cmpxchg_acqb)(ETHR_ATMC_T__ *var,
AO_nop_full();
#endif
return act;
+#endif
+}
+
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_read) || defined(AO_HAVE_fetch_compare_and_swap_read)
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_RB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_rb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+#if defined(AO_HAVE_fetch_compare_and_swap_read)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_read(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap_read(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+#if defined(AO_HAVE_load_read)
+ act = (ETHR_AINT_T__) AO_load_read(&var->counter);
+#elif defined(AO_HAVE_load)
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#endif
+ } while (act == exp);
+#ifndef AO_HAVE_load_read
+#ifdef AO_HAVE_nop_read
+ AO_nop_read();
+#else
+ AO_nop_full();
+#endif
+#endif
+ return act;
+#endif
}
#endif
-#ifdef AO_HAVE_compare_and_swap_release
+#if defined(AO_HAVE_compare_and_swap_release) || defined(AO_HAVE_fetch_compare_and_swap_release)
#if ETHR_SIZEOF_AO_T == 4
# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RELB 1
@@ -278,13 +559,57 @@ ETHR_NATMC_FUNC__(cmpxchg_relb)(ETHR_ATMC_T__ *var,
ETHR_AINT_T__ new,
ETHR_AINT_T__ exp)
{
+#if defined(AO_HAVE_fetch_compare_and_swap_release)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_release(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
ETHR_AINT_T__ act;
do {
if (AO_compare_and_swap_release(&var->counter, (AO_t) exp, (AO_t) new))
return exp;
+#ifdef AO_HAVE_load
act = (ETHR_AINT_T__) AO_load(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#endif
+ } while (act == exp);
+ return act;
+#endif
+}
+
+#endif
+
+#if defined(AO_HAVE_compare_and_swap_write) || defined(AO_HAVE_fetch_compare_and_swap_write)
+
+#if ETHR_SIZEOF_AO_T == 4
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_WB 1
+#else
+# define ETHR_HAVE_ETHR_NATIVE_ATOMIC64_CMPXCHG_WB 1
+#endif
+
+static ETHR_INLINE ETHR_AINT_T__
+ETHR_NATMC_FUNC__(cmpxchg_wb)(ETHR_ATMC_T__ *var,
+ ETHR_AINT_T__ new,
+ ETHR_AINT_T__ exp)
+{
+#if defined(AO_HAVE_fetch_compare_and_swap_write)
+ return (ETHR_AINT_T__) AO_fetch_compare_and_swap_write(&var->counter,
+ (AO_t) exp,
+ (AO_t) new);
+#else
+ ETHR_AINT_T__ act;
+ do {
+ if (AO_compare_and_swap_write(&var->counter, (AO_t) exp, (AO_t) new))
+ return exp;
+#ifdef AO_HAVE_load
+ act = (ETHR_AINT_T__) AO_load(&var->counter);
+#else
+ act = (ETHR_AINT_T__) AO_load_acquire(&var->counter);
+#endif
} while (act == exp);
return act;
+#endif
}
#endif
diff --git a/erts/include/internal/libatomic_ops/ethr_dw_atomic.h b/erts/include/internal/libatomic_ops/ethr_dw_atomic.h
new file mode 100644
index 0000000000..4dd9f41e96
--- /dev/null
+++ b/erts/include/internal/libatomic_ops/ethr_dw_atomic.h
@@ -0,0 +1,567 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Native double word atomics using libatomic_ops
+ * Author: Rickard Green
+ */
+
+#ifndef ETHR_LIBATOMIC_OPS_DW_ATOMIC_H__
+#define ETHR_LIBATOMIC_OPS_DW_ATOMIC_H__
+
+#if defined(AO_HAVE_double_t) \
+ && (defined(AO_HAVE_double_load_acquire) \
+ || defined(AO_HAVE_double_load)) \
+ && (defined(AO_HAVE_compare_double_and_swap_double) \
+ || defined(AO_HAVE_compare_double_and_swap_double_full) \
+ || defined(AO_HAVE_compare_double_and_swap_double_acquire) \
+ || defined(AO_HAVE_compare_double_and_swap_double_release) \
+ || defined(AO_HAVE_double_compare_and_swap) \
+ || defined(AO_HAVE_double_compare_and_swap_full) \
+ || defined(AO_HAVE_double_compare_and_swap_acquire) \
+ || defined(AO_HAVE_double_compare_and_swap_release))
+
+#if ETHR_SIZEOF_PTR == 4
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint64_t
+#elif ETHR_SIZEOF_PTR == 8 && defined(ETHR_HAVE_INT128_T)
+# define ETHR_NATIVE_SU_DW_SINT_T ethr_sint128_t
+#endif
+
+typedef union {
+ volatile AO_double_t dw_mem;
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+ ETHR_NATIVE_SU_DW_SINT_T su_dw_sint;
+#endif
+} ethr_native_dw_atomic_t;
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_NATIVE_SU_DW_ATOMIC
+#else
+# define ETHR_HAVE_NATIVE_DW_ATOMIC
+#endif
+
+#define ETHR_NATIVE_DW_ATOMIC_IMPL ETHR_NATIVE_IMPL__
+
+#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__)
+
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_NDWA_FUNC__(Func) ethr_native_su_dw_atomic_ ## Func
+# define ETHR_NDWA_RET_3_TYPE__ ETHR_NATIVE_SU_DW_SINT_T
+# define ETHR_NDWA_RET_2_TYPE__ ETHR_NATIVE_SU_DW_SINT_T
+# define ETHR_NDWA_VAL_ARG_TYPE__ ETHR_NATIVE_SU_DW_SINT_T
+# define ETHR_NDWA_DECL_ARG__(Arg)
+# if defined(AO_HAVE_DOUBLE_PTR_STORAGE)
+# define ETHR_NDWA_VAL2AOVAL__(AOV, V) \
+ ((AOV).AO_whole = (double_ptr_storage) (V))
+# define ETHR_NDWA_AOVAL2VAL__(AOV, V) \
+ ((V) = (ETHR_NATIVE_SU_DW_SINT_T) (AOV).AO_whole)
+# define ETHR_NDWA_RETURN_VAL_3__(SUCCESS, AOVAL, VAL) \
+ do { \
+ return (ETHR_NATIVE_SU_DW_SINT_T) (AOVAL).AO_whole; \
+ } while (0)
+# define ETHR_NDWA_RETURN_VAL_2__(AOVAL, VAL) \
+ do { \
+ return (ETHR_NATIVE_SU_DW_SINT_T) (AOVAL).AO_whole; \
+ } while (0)
+# define ETHR_NDWA_AOVAL_EQ__(AOV1, AOV2) \
+ ((AOV1).AO_whole == (AOV2).AO_whole)
+# else
+typedef union {
+ ethr_sint_t sint[2];
+ ETHR_NATIVE_SU_DW_SINT_T dw_sint;
+} ethr_dw_splitter_t;
+# define ETHR_NDWA_VAL2AOVAL__(AOV, V) \
+ do { \
+ ethr_dw_splitter_t tmp__; \
+ tmp__.dw_sint = (V); \
+ (AOV).AO_val1 = (AO_t) tmp__.sint[0]; \
+ (AOV).AO_val2 = (AO_t) tmp__.sint[1]; \
+ } while (0)
+# define ETHR_NDWA_AOVAL2VAL__(AOV, V) \
+ do { \
+ ethr_dw_splitter_t tmp__; \
+ tmp__.sint[0] = (ethr_sint_t) (AOV).AO_val1; \
+ tmp__.sint[1] = (ethr_sint_t) (AOV).AO_val2; \
+ (V) = tmp__.dw_sint; \
+ } while (0)
+# define ETHR_NDWA_RETURN_VAL_3__(SUCCESS, AOVAL, VAL) \
+ do { \
+ ethr_dw_splitter_t tmp__; \
+ tmp__.sint[0] = (ethr_sint_t) (AOVAL).AO_val1; \
+ tmp__.sint[1] = (ethr_sint_t) (AOVAL).AO_val2; \
+ return tmp__.dw_sint; \
+ } while (0)
+# define ETHR_NDWA_AOVAL_EQ__(AOV1, AOV2) \
+ ((AOV1).AO_val1 == (AOV2).AO_val1 \
+ && (AOV1).AO_val2 == (AOV2).AO_val2)
+# endif
+#else
+# define ETHR_NDWA_FUNC__(Func) ethr_native_dw_atomic_ ## Func
+# define ETHR_NDWA_RET_3_TYPE__ int
+# define ETHR_NDWA_RET_2_TYPE__ void
+# define ETHR_NDWA_VAL_ARG_TYPE__ ethr_sint_t *
+# define ETHR_NDWA_DECL_ARG__(Arg) , ETHR_NDWA_VAL_ARG_TYPE__ Arg
+# define ETHR_NDWA_VAL2AOVAL__(AOV, V) \
+ do { \
+ (AOV).AO_val1 = (AO_t) (V)[0]; \
+ (AOV).AO_val2 = (AO_t) (V)[1]; \
+ } while (0)
+# define ETHR_NDWA_AOVAL2VAL__(AOV, V) \
+ do { \
+ ethr_dw_splitter_t tmp__; \
+ (V)[0] = (ethr_sint_t) (AOV).AO_val1; \
+ (V)[1] = (ethr_sint_t) (AOV).AO_val2; \
+ } while (0)
+# define ETHR_NDWA_RETURN_VAL_3__(SUCCESS, AOVAL, VAL) \
+ do { \
+ (VAL)[0] = (ethr_sint_t) (AOVAL).AO_val1; \
+ (VAL)[1] = (ethr_sint_t) (AOVAL).AO_val2; \
+ return (SUCCESS); \
+ } while (0)
+# define ETHR_NDWA_RETURN_VAL_2__(AOVAL, VAL) \
+ do { \
+ (VAL)[0] = (ethr_sint_t) (AOVAL).AO_val1; \
+ (VAL)[1] = (ethr_sint_t) (AOVAL).AO_val2; \
+ return; \
+ } while (0)
+# if defined(AO_HAVE_DOUBLE_PTR_STORAGE)
+# define ETHR_NDWA_AOVAL_EQ__(AOV1, AOV2) \
+ ((AOV1).AO_whole == (AOV2).AO_whole)
+# else
+# define ETHR_NDWA_AOVAL_EQ__(AOV1, AOV2) \
+ ((AOV1).AO_val1 == (AOV2).AO_val1 \
+ && (AOV1).AO_val2 == (AOV2).AO_val2)
+# endif
+#endif
+
+#define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_ADDR
+static ETHR_INLINE ethr_sint_t *
+ethr_native_dw_atomic_addr(ethr_native_dw_atomic_t *var)
+{
+ return (ethr_sint_t *) &var->dw_mem;
+}
+
+#ifdef AO_HAVE_double_load
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_2_TYPE__
+ETHR_NDWA_FUNC__(read)(ethr_native_dw_atomic_t *var
+ ETHR_NDWA_DECL_ARG__(val))
+{
+ AO_double_t act = AO_double_load(&var->dw_mem);
+ ETHR_NDWA_RETURN_VAL_2__(act, val);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_load_read
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_RB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_RB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_2_TYPE__
+ETHR_NDWA_FUNC__(read_rb)(ethr_native_dw_atomic_t *var
+ ETHR_NDWA_DECL_ARG__(val))
+{
+ AO_double_t act = AO_double_load_read(&var->dw_mem);
+ ETHR_NDWA_RETURN_VAL_2__(act, val);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_load_acquire
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_READ_ACQB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_READ_ACQB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_2_TYPE__
+ETHR_NDWA_FUNC__(read_acqb)(ethr_native_dw_atomic_t *var
+ ETHR_NDWA_DECL_ARG__(val))
+{
+ AO_double_t act = AO_double_load_acquire(&var->dw_mem);
+ ETHR_NDWA_RETURN_VAL_2__(act, val);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_store
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET
+#endif
+
+static ETHR_INLINE void
+ETHR_NDWA_FUNC__(set)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ val)
+{
+ AO_double_t new;
+ ETHR_NDWA_VAL2AOVAL__(new, val);
+ AO_double_store(&var->dw_mem, new);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_store_write
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_WB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_WB
+#endif
+
+static ETHR_INLINE void
+ETHR_NDWA_FUNC__(set_wb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ val)
+{
+ AO_double_t new;
+ ETHR_NDWA_VAL2AOVAL__(new, val);
+ AO_double_store_write(&var->dw_mem, new);
+}
+
+#endif
+
+#ifdef AO_HAVE_double_store_release
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_SET_RELB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_SET_RELB
+#endif
+
+static ETHR_INLINE void
+ETHR_NDWA_FUNC__(set_relb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ val)
+{
+ AO_double_t new;
+ ETHR_NDWA_VAL2AOVAL__(new, val);
+ AO_double_store_release(&var->dw_mem, new);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_full) || defined(AO_HAVE_compare_double_and_swap_double_full)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_MB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_MB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_mb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_full)
+ xchgd = AO_double_compare_and_swap_full(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_full)
+ xchgd = AO_compare_double_and_swap_double_full(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#ifdef AO_HAVE_double_load_acquire
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#else
+ ao_act = AO_double_load(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+#ifndef AO_HAVE_double_load_acquire
+ AO_nop_full();
+#endif
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap) || defined(AO_HAVE_compare_double_and_swap_double)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap)
+ xchgd = AO_double_compare_and_swap(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double)
+ xchgd = AO_compare_double_and_swap_double(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#ifdef AO_HAVE_double_load
+ ao_act = AO_double_load(&var->dw_mem);
+#else
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_read) || defined(AO_HAVE_compare_double_and_swap_double_read)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_RB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_RB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_rb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_read)
+ xchgd = AO_double_compare_and_swap_read(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_read)
+ xchgd = AO_compare_double_and_swap_double_read(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#if defined(AO_HAVE_double_load_read)
+ ao_act = AO_double_load_read(&var->dw_mem);
+#elif defined(AO_HAVE_double_load)
+ ao_act = AO_double_load(&var->dw_mem);
+#else
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+#ifndef AO_HAVE_double_load_read
+#ifdef AO_HAVE_nop_read
+ AO_nop_read();
+#else
+ AO_nop_full();
+#endif
+#endif
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_acquire) || defined(AO_HAVE_compare_double_and_swap_double_acquire)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_ACQB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_ACQB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_acqb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_acquire)
+ xchgd = AO_double_compare_and_swap_acquire(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_acquire)
+ xchgd = AO_compare_double_and_swap_double_acquire(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#ifdef AO_HAVE_double_load_acquire
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#else
+ ao_act = AO_double_load(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+#ifndef AO_HAVE_double_load_acquire
+ AO_nop_full();
+#endif
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_write) || defined(AO_HAVE_compare_double_and_swap_double_write)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_WB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_WB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_wb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_write)
+ xchgd = AO_double_compare_and_swap_write(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_write)
+ xchgd = AO_compare_double_and_swap_double_write(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+#ifdef AO_HAVE_double_load
+ ao_act = AO_double_load(&var->dw_mem);
+#else
+ ao_act = AO_double_load_acquire(&var->dw_mem);
+#endif
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#if defined(AO_HAVE_double_compare_and_swap_release) || defined(AO_HAVE_compare_double_and_swap_double_release)
+
+#if defined(ETHR_NATIVE_SU_DW_SINT_T)
+# define ETHR_HAVE_ETHR_NATIVE_SU_DW_ATOMIC_CMPXCHG_RELB
+#else
+# define ETHR_HAVE_ETHR_NATIVE_DW_ATOMIC_CMPXCHG_RELB
+#endif
+
+static ETHR_INLINE ETHR_NDWA_RET_3_TYPE__
+ETHR_NDWA_FUNC__(cmpxchg_relb)(ethr_native_dw_atomic_t *var,
+ ETHR_NDWA_VAL_ARG_TYPE__ new,
+ ETHR_NDWA_VAL_ARG_TYPE__ exp)
+{
+ AO_double_t ao_act, ao_new, ao_exp;
+
+ ETHR_NDWA_VAL2AOVAL__(ao_exp, exp);
+ ETHR_NDWA_VAL2AOVAL__(ao_new, new);
+
+ do {
+ int xchgd;
+#if defined(AO_HAVE_double_compare_and_swap_release)
+ xchgd = AO_double_compare_and_swap_release(&var->dw_mem, ao_exp, ao_new);
+#elif defined(AO_HAVE_compare_double_and_swap_double_release)
+ xchgd = AO_compare_double_and_swap_double_release(&var->dw_mem,
+ ao_exp.AO_val1,
+ ao_exp.AO_val2,
+ ao_new.AO_val1,
+ ao_new.AO_val2);
+#endif
+
+ if (xchgd)
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_exp, exp);
+
+ ao_act = AO_double_load(&var->dw_mem);
+
+ } while (ETHR_NDWA_AOVAL_EQ__(ao_exp, ao_act));
+
+ ETHR_NDWA_RETURN_VAL_3__(1, ao_act, exp);
+}
+
+#endif
+
+#endif /* defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_ATOMIC_IMPL__) */
+
+#endif /* Have AO double functionality ... */
+
+#endif /* ETHR_LIBATOMIC_OPS_DW_ATOMIC_H__ */
+
diff --git a/erts/include/internal/libatomic_ops/ethread.h b/erts/include/internal/libatomic_ops/ethread.h
index e1fdd588bb..d65ee19b04 100644
--- a/erts/include/internal/libatomic_ops/ethread.h
+++ b/erts/include/internal/libatomic_ops/ethread.h
@@ -33,9 +33,12 @@
#define AO_USE_PENTIUM4_INSTRS
#endif
+#define ETHR_NATIVE_IMPL__ "libatomic_ops"
+
#include "atomic_ops.h"
#include "ethr_membar.h"
#include "ethr_atomic.h"
+#include "ethr_dw_atomic.h"
#endif
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
index 6001620677..b558626b09 100644
--- a/erts/include/internal/ppc32/atomic.h
+++ b/erts/include/internal/ppc32/atomic.h
@@ -91,6 +91,20 @@ ethr_native_atomic32_add_return_acqb(ethr_native_atomic32_t *var, ethr_sint32_t
return res;
}
+
+#ifndef ETHR_PPC_HAVE_NO_LWSYNC
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_ADD_RETURN_RELB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_add_return_relb(ethr_native_atomic32_t *var, ethr_sint32_t incr)
+{
+ ethr_lwsync__();
+ return ethr_native_atomic32_add_return(var, incr);
+}
+
+#endif
+
#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN 1
static ETHR_INLINE ethr_sint32_t
@@ -120,7 +134,19 @@ ethr_native_atomic32_inc_return_acqb(ethr_native_atomic32_t *var)
__asm__ __volatile("isync\n\t" : : : "memory");
return res;
}
-
+
+#ifndef ETHR_PPC_HAVE_NO_LWSYNC
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_INC_RETURN_RELB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_inc_return_relb(ethr_native_atomic32_t *var)
+{
+ ethr_lwsync__();
+ return ethr_native_atomic32_inc_return(var);
+}
+
+#endif
#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN 1
@@ -152,6 +178,19 @@ ethr_native_atomic32_dec_return_acqb(ethr_native_atomic32_t *var)
return res;
}
+#ifndef ETHR_PPC_HAVE_NO_LWSYNC
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_DEC_RETURN_RELB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_dec_return_relb(ethr_native_atomic32_t *var)
+{
+ ethr_lwsync__();
+ return ethr_native_atomic32_dec_return(var);
+}
+
+#endif
+
#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD 1
static ETHR_INLINE ethr_sint32_t
@@ -182,6 +221,19 @@ ethr_native_atomic32_and_retold_acqb(ethr_native_atomic32_t *var, ethr_sint32_t
return res;
}
+#ifndef ETHR_PPC_HAVE_NO_LWSYNC
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_AND_RETOLD_RELB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_and_retold_relb(ethr_native_atomic32_t *var, ethr_sint32_t mask)
+{
+ ethr_lwsync__();
+ return ethr_native_atomic32_and_retold(var, mask);
+}
+
+#endif
+
#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD 1
static ETHR_INLINE ethr_sint32_t
@@ -212,6 +264,18 @@ ethr_native_atomic32_or_retold_acqb(ethr_native_atomic32_t *var, ethr_sint32_t m
return res;
}
+#ifndef ETHR_PPC_HAVE_NO_LWSYNC
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_OR_RETOLD_RELB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_or_retold_relb(ethr_native_atomic32_t *var, ethr_sint32_t mask)
+{
+ ethr_lwsync__();
+ return ethr_native_atomic32_or_retold(var, mask);
+}
+
+#endif
#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG 1
@@ -242,6 +306,19 @@ ethr_native_atomic32_xchg_acqb(ethr_native_atomic32_t *var, ethr_sint32_t val)
return res;
}
+#ifndef ETHR_PPC_HAVE_NO_LWSYNC
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_XCHG_RELB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_xchg_relb(ethr_native_atomic32_t *var, ethr_sint32_t val)
+{
+ ethr_lwsync__();
+ return ethr_native_atomic32_xchg(var, val);
+}
+
+#endif
+
#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG 1
static ETHR_INLINE ethr_sint32_t
@@ -291,6 +368,73 @@ ethr_native_atomic32_cmpxchg_acqb(ethr_native_atomic32_t *var,
return old;
}
+#if !defined(ETHR_DISABLE_LWSYNC_FOR_CMPXCHG_RELB) && !defined(ETHR_PPC_HAVE_NO_LWSYNC)
+
+#define ETHR_HAVE_ETHR_NATIVE_ATOMIC32_CMPXCHG_RELB 1
+
+static ETHR_INLINE ethr_sint32_t
+ethr_native_atomic32_cmpxchg_relb(ethr_native_atomic32_t *var,
+ ethr_sint32_t new,
+ ethr_sint32_t expected)
+{
+ ethr_sint32_t actual;
+
+ /*
+ * We want to implement the release barrier using the
+ * 'lwsync' instruction instead of using the more
+ * expensive 'sync' instruction.
+ *
+ * cmpxchg looks something like this:
+ *
+ * lwarx # Load
+ * ...
+ * if (fail)
+ * goto done;
+ * stwcx # Store
+ * if (fail)
+ * goto done;
+ * ...
+ *
+ * In the case we succeeded, 'lwsync' will have
+ * ordered all previously issued loads and stores
+ * against the successful store to this variable.
+ * That is everything is fine!
+ *
+ * In the case we did not succeed, we need to order
+ * all previously issued loads and stores against
+ * the load of this variable. 'lwsync' does not
+ * guarantee this. In order to solve this we issue
+ * a 'sync' and redo the load. If the value has
+ * changed to what the user passed as expected value
+ * we need to try the cmpxchg operation again, since
+ * this value indicates success.
+ */
+
+ ethr_lwsync__();
+
+ actual = ethr_native_atomic32_cmpxchg(var, new, expected);
+
+#ifndef ETHR_PPC_HAVE_LWSYNC
+ /* We checked for lwsync support in runtime... */
+ if (ETHR_PPC_RUNTIME_CONF_HAVE_NO_LWSYNC__)
+ return actual; /* No need to; ethr_lwsync__() issued a sync... */
+#endif
+
+ /* ethr_lwsync__() issued an lwsync... */
+ if (actual == expected)
+ return actual; /* Successful operation */
+
+ /* Failure... need to issue a sync... */
+ ethr_sync__();
+ actual = ethr_native_atomic32_read(var);
+ if (actual != expected)
+ return actual; /* Fail... */
+ /* Try again... */
+ return ethr_native_atomic32_cmpxchg(var, new, expected);
+}
+
+#endif
+
#endif /* ETHR_TRY_INLINE_FUNCS */
#endif /* ETHREAD_PPC_ATOMIC_H */