diff options
author | Rickard Green <[email protected]> | 2010-12-01 10:43:59 +0100 |
---|---|---|
committer | Rickard Green <[email protected]> | 2010-12-01 10:43:59 +0100 |
commit | bfa69f08f63978db55e31931db1c6c6af3e7b4ae (patch) | |
tree | 9ee57f4c6bba94e48137d4a11ea8a394e2ca1b68 /erts/include/internal | |
parent | f74637e1f2f177c896e7b3641ec1d77ed14aa2e4 (diff) | |
parent | 80570513a1f121d89543c4c5b11fa5041cc3df7f (diff) | |
download | otp-bfa69f08f63978db55e31931db1c6c6af3e7b4ae.tar.gz otp-bfa69f08f63978db55e31931db1c6c6af3e7b4ae.tar.bz2 otp-bfa69f08f63978db55e31931db1c6c6af3e7b4ae.zip |
Merge branch 'rickard/rwmutex-bug/OTP-8925' into dev
* rickard/rwmutex-bug/OTP-8925:
Miscellaneous rwmutex bug fixes and improvements
Don't use more reader groups than schedulers
New test suite containing stress tests of the rwmutex implementation
Conflicts:
erts/emulator/beam/erl_init.c
Diffstat (limited to 'erts/include/internal')
-rw-r--r-- | erts/include/internal/ethr_mutex.h | 145 | ||||
-rw-r--r-- | erts/include/internal/i386/atomic.h | 49 | ||||
-rw-r--r-- | erts/include/internal/sparc32/atomic.h | 35 |
3 files changed, 214 insertions, 15 deletions
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h index 8d9d5e3d08..636fdc1e2f 100644 --- a/erts/include/internal/ethr_mutex.h +++ b/erts/include/internal/ethr_mutex.h @@ -33,6 +33,13 @@ # define ETHR_MTX_HARD_DEBUG #endif +#if 0 +# define ETHR_MTX_CHK_EXCL +#if 1 +# define ETHR_MTX_CHK_NON_EXCL +#endif +#endif + #ifdef ETHR_MTX_HARD_DEBUG # ifdef __GNUC__ # warning ETHR_MTX_HARD_DEBUG @@ -49,6 +56,15 @@ #if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__) +#ifdef ETHR_DEBUG +# ifndef ETHR_MTX_CHK_EXCL +# define ETHR_MTX_CHK_EXCL +# endif +# ifndef ETHR_MTX_CHK_NON_EXCL +# define ETHR_MTX_CHK_NON_EXCL +# endif +#endif + #if 0 # define ETHR_MTX_Q_LOCK_SPINLOCK__ # define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t @@ -68,8 +84,8 @@ /* frequent read kind */ #define ETHR_RWMTX_R_FLG__ (((long) 1) << 28) -#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_FLG__ - 1) -#define ETHR_RWMTX_R_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1) +#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((long) 1) << 27) +#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_ABRT_UNLCK_FLG__ - 1) /* normal kind */ #define ETHR_RWMTX_RS_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1) @@ -91,6 +107,12 @@ struct ethr_mutex_base_ { #ifdef ETHR_MTX_HARD_DEBUG_WSQ int ws; #endif +#ifdef ETHR_MTX_CHK_EXCL + ethr_atomic_t exclusive; +#endif +#ifdef ETHR_MTX_CHK_NON_EXCL + ethr_atomic_t non_exclusive; +#endif #ifdef ETHR_MTX_HARD_DEBUG_LFS ethr_atomic_t hdbg_lfs; #endif @@ -344,6 +366,116 @@ do { \ #define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X) #endif +#ifdef ETHR_MTX_CHK_EXCL + +#if !defined(ETHR_DEBUG) && defined(__GNUC__) +#warning "check exclusive is enabled" +#endif + +# define ETHR_MTX_CHK_EXCL_INIT__(MTXB) \ + ethr_atomic_init(&(MTXB)->exclusive, 0) + +# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB) \ +do { \ + ETHR_COMPILER_BARRIER; \ + if (!ethr_atomic_read(&(MTXB)->exclusive)) \ + ethr_assert_failed(__FILE__, __LINE__, __func__,\ + "is exclusive"); \ + ETHR_COMPILER_BARRIER; \ +} while (0) +# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB) \ +do { \ + ETHR_COMPILER_BARRIER; \ + if (ethr_atomic_read(&(MTXB)->exclusive)) \ + ethr_assert_failed(__FILE__, __LINE__, __func__,\ + "is not exclusive"); \ + ETHR_COMPILER_BARRIER; \ +} while (0) +# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB) \ +do { \ + ETHR_MTX_CHK_EXCL_IS_NOT_EXCL((MTXB)); \ + ethr_atomic_set(&(MTXB)->exclusive, 1); \ + ETHR_COMPILER_BARRIER; \ +} while (0) +# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB) \ +do { \ + ETHR_MTX_CHK_EXCL_IS_EXCL((MTXB)); \ + ethr_atomic_set(&(MTXB)->exclusive, 0); \ + ETHR_COMPILER_BARRIER; \ +} while (0) + +#ifdef ETHR_MTX_CHK_NON_EXCL + +#if !defined(ETHR_DEBUG) && defined(__GNUC__) +#warning "check non-exclusive is enabled" +#endif + +# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) \ + ethr_atomic_init(&(MTXB)->non_exclusive, 0) +# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) \ +do { \ + ETHR_COMPILER_BARRIER; \ + if (!ethr_atomic_read(&(MTXB)->non_exclusive)) \ + ethr_assert_failed(__FILE__, __LINE__, __func__,\ + "is non-exclusive"); \ + ETHR_COMPILER_BARRIER; \ +} while (0) +# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) \ +do { \ + ETHR_COMPILER_BARRIER; \ + if (ethr_atomic_read(&(MTXB)->non_exclusive)) \ + ethr_assert_failed(__FILE__, __LINE__, __func__,\ + "is not non-exclusive"); \ + ETHR_COMPILER_BARRIER; \ +} while (0) +# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) \ +do { \ + ETHR_COMPILER_BARRIER; \ + ethr_atomic_inc(&(MTXB)->non_exclusive); \ + ETHR_COMPILER_BARRIER; \ +} while (0) +# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) \ +do { \ + ETHR_COMPILER_BARRIER; \ + ethr_atomic_add(&(MTXB)->non_exclusive, (NO)); \ + ETHR_COMPILER_BARRIER; \ +} while (0) +# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) \ +do { \ + ETHR_COMPILER_BARRIER; \ + ethr_atomic_dec(&(MTXB)->non_exclusive); \ + ETHR_COMPILER_BARRIER; \ +} while (0) +#else +# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) +# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) +# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) +#endif + +#else +# define ETHR_MTX_CHK_EXCL_INIT__(MTXB) +# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB) +# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) +# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) +# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) +# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) +#endif + +# define ETHR_MTX_CHK_EXCL_INIT(MTXB) \ +do { \ + ETHR_MTX_CHK_EXCL_INIT__((MTXB)); \ + ETHR_MTX_CHK_NON_EXCL_INIT__((MTXB)); \ +} while (0) + + #ifdef ETHR_USE_OWN_MTX_IMPL__ #define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT_MAX 2000 @@ -369,6 +501,11 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx) act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0); res = (act == 0) ? 0 : EBUSY; +#ifdef ETHR_MTX_CHK_EXCL + if (res == 0) + ETHR_MTX_CHK_EXCL_SET_EXCL(&mtx->mtxb); +#endif + ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&mtx->mtxb, res); ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx); @@ -386,6 +523,8 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx) if (act != 0) ethr_mutex_lock_wait__(mtx, act); + ETHR_MTX_CHK_EXCL_SET_EXCL(&mtx->mtxb); + ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb); ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx); @@ -400,6 +539,8 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx) ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx); ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb); + ETHR_MTX_CHK_EXCL_UNSET_EXCL(&mtx->mtxb); + act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__); if (act != ETHR_RWMTX_W_FLG__) ethr_mutex_unlock_wake__(mtx, act); diff --git a/erts/include/internal/i386/atomic.h b/erts/include/internal/i386/atomic.h index f28258059f..52d01aab32 100644 --- a/erts/include/internal/i386/atomic.h +++ b/erts/include/internal/i386/atomic.h @@ -167,15 +167,52 @@ ethr_native_atomic_xchg(ethr_native_atomic_t *var, long val) * Atomic ops with at least specified barriers. */ -#define ethr_native_atomic_read_acqb ethr_native_atomic_read -#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return +static ETHR_INLINE long +ethr_native_atomic_read_acqb(ethr_native_atomic_t *var) +{ + long val; +#if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT) + val = var->counter; +#else + val = ethr_native_atomic_add_return(var, 0); +#endif + __asm__ __volatile__("" : : : "memory"); + return val; +} + +static ETHR_INLINE void +ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i) +{ + __asm__ __volatile__("" : : : "memory"); #if defined(__x86_64__) || !defined(ETHR_PRE_PENTIUM4_COMPAT) -#define ethr_native_atomic_set_relb ethr_native_atomic_set + var->counter = i; #else -#define ethr_native_atomic_set_relb ethr_native_atomic_xchg + (void) ethr_native_atomic_xchg(var, i); #endif -#define ethr_native_atomic_dec_relb ethr_native_atomic_dec -#define ethr_native_atomic_dec_return_relb ethr_native_atomic_dec_return +} + +static ETHR_INLINE long +ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var) +{ + long res = ethr_native_atomic_inc_return(var); + __asm__ __volatile__("" : : : "memory"); + return res; +} + +static ETHR_INLINE void +ethr_native_atomic_dec_relb(ethr_native_atomic_t *var) +{ + __asm__ __volatile__("" : : : "memory"); + ethr_native_atomic_dec(var); +} + +static ETHR_INLINE long +ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var) +{ + __asm__ __volatile__("" : : : "memory"); + return ethr_native_atomic_dec_return(var); +} + #define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg #define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg diff --git a/erts/include/internal/sparc32/atomic.h b/erts/include/internal/sparc32/atomic.h index 2a995d4465..2da6472393 100644 --- a/erts/include/internal/sparc32/atomic.h +++ b/erts/include/internal/sparc32/atomic.h @@ -176,38 +176,59 @@ ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, long new, long old) * Atomic ops with at least specified barriers. */ +/* TODO: relax acquire barriers */ + static ETHR_INLINE long ethr_native_atomic_read_acqb(ethr_native_atomic_t *var) { long res = ethr_native_atomic_read(var); - __asm__ __volatile__("membar #StoreLoad|#StoreStore"); + __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreLoad|#StoreStore" : : : "memory"); return res; } static ETHR_INLINE void ethr_native_atomic_set_relb(ethr_native_atomic_t *var, long i) { - __asm__ __volatile__("membar #LoadStore|#StoreStore"); + __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory"); ethr_native_atomic_set(var, i); } +static ETHR_INLINE long +ethr_native_atomic_inc_return_acqb(ethr_native_atomic_t *var) +{ + long res = ethr_native_atomic_inc_return(var); + __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory"); + return res; +} + static ETHR_INLINE void ethr_native_atomic_dec_relb(ethr_native_atomic_t *var) { - __asm__ __volatile__("membar #LoadStore|#StoreStore"); + __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory"); ethr_native_atomic_dec(var); } static ETHR_INLINE long ethr_native_atomic_dec_return_relb(ethr_native_atomic_t *var) { - __asm__ __volatile__("membar #LoadStore|#StoreStore"); + __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory"); return ethr_native_atomic_dec_return(var); } -#define ethr_native_atomic_inc_return_acqb ethr_native_atomic_inc_return -#define ethr_native_atomic_cmpxchg_acqb ethr_native_atomic_cmpxchg -#define ethr_native_atomic_cmpxchg_relb ethr_native_atomic_cmpxchg +static ETHR_INLINE long +ethr_native_atomic_cmpxchg_acqb(ethr_native_atomic_t *var, long new, long old) +{ + long res = ethr_native_atomic_cmpxchg(var, new, old); + __asm__ __volatile__("membar #LoadLoad|#LoadStore" : : : "memory"); + return res; +} + +static ETHR_INLINE long +ethr_native_atomic_cmpxchg_relb(ethr_native_atomic_t *var, long new, long old) +{ + __asm__ __volatile__("membar #LoadStore|#StoreStore" : : : "memory"); + return ethr_native_atomic_cmpxchg(var, new, old); +} #endif /* ETHR_TRY_INLINE_FUNCS */ |