aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include/internal/sparc32
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2010-11-05 15:32:34 +0100
committerRickard Green <[email protected]>2010-12-01 10:23:15 +0100
commit80570513a1f121d89543c4c5b11fa5041cc3df7f (patch)
treeceaf0275eb1cf0404fd79914cdcd3cb98297c3cd /erts/include/internal/sparc32
parentf0fae4bebaa76a7608e09877da62ae84c365388d (diff)
downloadotp-80570513a1f121d89543c4c5b11fa5041cc3df7f.tar.gz
otp-80570513a1f121d89543c4c5b11fa5041cc3df7f.tar.bz2
otp-80570513a1f121d89543c4c5b11fa5041cc3df7f.zip
Miscellaneous rwmutex bug fixes and improvements
The ERTS internal rwlock implementation could get into an inconsistent state. This bug was very seldom triggered, but could be during heavy contention. The bug was introduced in R14B (erts-5.8.1). The bug was most likely to be triggered when using the read_concurrency option on an ETS table that was frequently accessed from multiple processes doing lots of writes and reads. That is, in a situation where you typically don't want to use the read_concurrency option in the first place.
Diffstat (limited to 'erts/include/internal/sparc32')
-rw-r--r--erts/include/internal/sparc32/atomic.h35
1 files changed, 28 insertions, 7 deletions
diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h
index 2a995d4465..2da6472393 100644
--- a/erts/include/internal/sparc32/atomic.h
+++ b/erts/include/internal/sparc32/atomic.h
@@ -176,38 +176,59 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old)
* Atomic ops with at least specified barriers.
*/
+/* TODO: relax acquire barriers */
+
static ETHR_INLINE long
ethr_native_atomic_read_acqb(ethr_native_atomic_t *var)
{
long res = ethr_native_atomic_read(var);
- __asm__ __volatile__("membar #StoreLoad|#StoreStore");
+ __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore" : : : "memory");
return res;
}
static ETHR_INLINE void
ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i)
{
- __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
ethr_native_atomic_set(var, i);
}
+static ETHR_INLINE long
+ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var)
+{
+ long res = ethr_native_atomic_inc_return(var);
+ __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
+ return res;
+}
+
static ETHR_INLINE void
ethr_native_atomic_dec_relb(ethr_native_atomic_t *var)
{
- __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
ethr_native_atomic_dec(var);
}
static ETHR_INLINE long
ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var)
{
- __asm__ __volatile__("membar #LoadStore|#StoreStore");
+ __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
return ethr_native_atomic_dec_return(var);
}
-#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return
-#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg
-#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old)
+{
+ long res = ethr_native_atomic_cmpxchg(var, new, old);
+ __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory");
+ return res;
+}
+
+static ETHR_INLINE long
+ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old)
+{
+ __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory");
+ return ethr_native_atomic_cmpxchg(var, new, old);
+}
#endif /* ETHR_TRY_INLINE_FUNCS */