aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include/internal/ethr_mutex.h
diff options
context:
space:
mode:
Diffstat (limited to 'erts/include/internal/ethr_mutex.h')
-rw-r--r--erts/include/internal/ethr_mutex.h212
1 files changed, 186 insertions, 26 deletions
diff --git a/erts/include/internal/ethr_mutex.h b/erts/include/internal/ethr_mutex.h
index 8d9d5e3d08..fadaf1e2a4 100644
--- a/erts/include/internal/ethr_mutex.h
+++ b/erts/include/internal/ethr_mutex.h
@@ -33,6 +33,13 @@
# define ETHR_MTX_HARD_DEBUG
#endif
+#if 0
+# define ETHR_MTX_CHK_EXCL
+#if 1
+# define ETHR_MTX_CHK_NON_EXCL
+#endif
+#endif
+
#ifdef ETHR_MTX_HARD_DEBUG
# ifdef __GNUC__
# warning ETHR_MTX_HARD_DEBUG
@@ -49,6 +56,15 @@
#if defined(ETHR_USE_OWN_RWMTX_IMPL__) || defined(ETHR_USE_OWN_MTX_IMPL__)
+#ifdef ETHR_DEBUG
+# ifndef ETHR_MTX_CHK_EXCL
+# define ETHR_MTX_CHK_EXCL
+# endif
+# ifndef ETHR_MTX_CHK_NON_EXCL
+# define ETHR_MTX_CHK_NON_EXCL
+# endif
+#endif
+
#if 0
# define ETHR_MTX_Q_LOCK_SPINLOCK__
# define ETHR_MTX_QLOCK_TYPE__ ethr_spinlock_t
@@ -62,14 +78,14 @@
# error Need a qlock implementation
#endif
-#define ETHR_RWMTX_W_FLG__ (((long) 1) << 31)
-#define ETHR_RWMTX_W_WAIT_FLG__ (((long) 1) << 30)
-#define ETHR_RWMTX_R_WAIT_FLG__ (((long) 1) << 29)
+#define ETHR_RWMTX_W_FLG__ (((ethr_sint32_t) 1) << 31)
+#define ETHR_RWMTX_W_WAIT_FLG__ (((ethr_sint32_t) 1) << 30)
+#define ETHR_RWMTX_R_WAIT_FLG__ (((ethr_sint32_t) 1) << 29)
/* frequent read kind */
-#define ETHR_RWMTX_R_FLG__ (((long) 1) << 28)
-#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_FLG__ - 1)
-#define ETHR_RWMTX_R_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
+#define ETHR_RWMTX_R_FLG__ (((ethr_sint32_t) 1) << 28)
+#define ETHR_RWMTX_R_ABRT_UNLCK_FLG__ (((ethr_sint32_t) 1) << 27)
+#define ETHR_RWMTX_R_PEND_UNLCK_MASK__ (ETHR_RWMTX_R_ABRT_UNLCK_FLG__ - 1)
/* normal kind */
#define ETHR_RWMTX_RS_MASK__ (ETHR_RWMTX_R_WAIT_FLG__ - 1)
@@ -79,20 +95,39 @@
#define ETHR_CND_WAIT_FLG__ ETHR_RWMTX_R_WAIT_FLG__
+#ifdef ETHR_DEBUG
+#define ETHR_DBG_CHK_UNUSED_FLG_BITS(V) \
+ ETHR_ASSERT(!((V) & ~(ETHR_RWMTX_W_FLG__ \
+ | ETHR_RWMTX_W_WAIT_FLG__ \
+ | ETHR_RWMTX_R_WAIT_FLG__ \
+ | ETHR_RWMTX_RS_MASK__)))
+#else
+#define ETHR_DBG_CHK_UNUSED_FLG_BITS(V)
+#endif
+
+#define ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(MTX) \
+ ETHR_DBG_CHK_UNUSED_FLG_BITS(ethr_atomic32_read(&(MTX)->mtxb.flgs))
+
struct ethr_mutex_base_ {
#ifdef ETHR_MTX_HARD_DEBUG_FENCE
long pre_fence;
#endif
- ethr_atomic_t flgs;
- ETHR_MTX_QLOCK_TYPE__ qlck;
- ethr_ts_event *q;
+ ethr_atomic32_t flgs;
short aux_scnt;
short main_scnt;
+ ETHR_MTX_QLOCK_TYPE__ qlck;
+ ethr_ts_event *q;
#ifdef ETHR_MTX_HARD_DEBUG_WSQ
int ws;
#endif
+#ifdef ETHR_MTX_CHK_EXCL
+ ethr_atomic32_t exclusive;
+#endif
+#ifdef ETHR_MTX_CHK_NON_EXCL
+ ethr_atomic32_t non_exclusive;
+#endif
#ifdef ETHR_MTX_HARD_DEBUG_LFS
- ethr_atomic_t hdbg_lfs;
+ ethr_atomic32_t hdbg_lfs;
#endif
};
@@ -201,7 +236,7 @@ typedef struct {
typedef union {
struct {
- ethr_atomic_t readers;
+ ethr_atomic32_t readers;
int waiting_readers;
int byte_offset;
ethr_rwmutex_lived lived;
@@ -263,13 +298,13 @@ void ethr_rwmutex_rwunlock(ethr_rwmutex *);
#ifdef ETHR_MTX_HARD_DEBUG_LFS
# define ETHR_MTX_HARD_DEBUG_LFS_INIT(MTXB) \
do { \
- ethr_atomic_init(&(MTXB)->hdbg_lfs, 0); \
+ ethr_atomic32_init(&(MTXB)->hdbg_lfs, 0); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint32_t val__; \
ETHR_COMPILER_BARRIER; \
- val__ = ethr_atomic_inc_read(&(MTXB)->hdbg_lfs); \
+ val__ = ethr_atomic32_inc_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ > 0); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_TRYRLOCK(MTXB, RES) \
@@ -282,15 +317,15 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ ethr_sint32_t val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ >= 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(MTXB) \
do { \
- long val__; \
+ ethr_sint32_t val__; \
ETHR_COMPILER_BARRIER; \
- val__ = ethr_atomic_dec_read(&(MTXB)->hdbg_lfs); \
+ val__ = ethr_atomic32_dec_read(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == -1); \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(MTXB, RES) \
@@ -303,7 +338,7 @@ do { \
} while (0)
# define ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(MTXB) \
do { \
- long val__ = ethr_atomic_inctest(&(MTXB)->hdbg_lfs); \
+ ethr_sint32_t val__ = ethr_atomic32_inctest(&(MTXB)->hdbg_lfs); \
ETHR_MTX_HARD_ASSERT(val__ == 0); \
ETHR_COMPILER_BARRIER; \
} while (0)
@@ -344,6 +379,116 @@ do { \
#define ETHR_MTX_HARD_DEBUG_FENCE_INIT(X)
#endif
+#ifdef ETHR_MTX_CHK_EXCL
+
+#if !defined(ETHR_DEBUG) && defined(__GNUC__)
+#warning "check exclusive is enabled"
+#endif
+
+# define ETHR_MTX_CHK_EXCL_INIT__(MTXB) \
+ ethr_atomic32_init(&(MTXB)->exclusive, 0)
+
+# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if (!ethr_atomic32_read(&(MTXB)->exclusive)) \
+ ethr_assert_failed(__FILE__, __LINE__, __func__,\
+ "is exclusive"); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if (ethr_atomic32_read(&(MTXB)->exclusive)) \
+ ethr_assert_failed(__FILE__, __LINE__, __func__,\
+ "is not exclusive"); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB) \
+do { \
+ ETHR_MTX_CHK_EXCL_IS_NOT_EXCL((MTXB)); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 1); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB) \
+do { \
+ ETHR_MTX_CHK_EXCL_IS_EXCL((MTXB)); \
+ ethr_atomic32_set(&(MTXB)->exclusive, 0); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+
+#ifdef ETHR_MTX_CHK_NON_EXCL
+
+#if !defined(ETHR_DEBUG) && defined(__GNUC__)
+#warning "check non-exclusive is enabled"
+#endif
+
+# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB) \
+ ethr_atomic32_init(&(MTXB)->non_exclusive, 0)
+# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if (!ethr_atomic32_read(&(MTXB)->non_exclusive)) \
+ ethr_assert_failed(__FILE__, __LINE__, __func__,\
+ "is non-exclusive"); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ if (ethr_atomic32_read(&(MTXB)->non_exclusive)) \
+ ethr_assert_failed(__FILE__, __LINE__, __func__,\
+ "is not non-exclusive"); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ethr_atomic32_inc(&(MTXB)->non_exclusive); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ethr_atomic32_add(&(MTXB)->non_exclusive, (NO)); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB) \
+do { \
+ ETHR_COMPILER_BARRIER; \
+ ethr_atomic32_dec(&(MTXB)->non_exclusive); \
+ ETHR_COMPILER_BARRIER; \
+} while (0)
+#else
+# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB)
+#endif
+
+#else
+# define ETHR_MTX_CHK_EXCL_INIT__(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_SET_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_UNSET_EXCL(MTXB)
+# define ETHR_MTX_CHK_NON_EXCL_INIT__(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_IS_NOT_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL_NO(MTXB, NO)
+# define ETHR_MTX_CHK_EXCL_SET_NON_EXCL(MTXB)
+# define ETHR_MTX_CHK_EXCL_UNSET_NON_EXCL(MTXB)
+#endif
+
+# define ETHR_MTX_CHK_EXCL_INIT(MTXB) \
+do { \
+ ETHR_MTX_CHK_EXCL_INIT__((MTXB)); \
+ ETHR_MTX_CHK_NON_EXCL_INIT__((MTXB)); \
+} while (0)
+
+
#ifdef ETHR_USE_OWN_MTX_IMPL__
#define ETHR_MTX_DEFAULT_MAIN_SPINCOUNT_MAX 2000
@@ -356,21 +501,28 @@ do { \
#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_MUTEX_IMPL__)
-void ethr_mutex_lock_wait__(ethr_mutex *, long);
-void ethr_mutex_unlock_wake__(ethr_mutex *, long);
+void ethr_mutex_lock_wait__(ethr_mutex *, ethr_sint32_t);
+void ethr_mutex_unlock_wake__(ethr_mutex *, ethr_sint32_t);
static ETHR_INLINE int
ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
int res;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
- act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
res = (act == 0) ? 0 : EBUSY;
+#ifdef ETHR_MTX_CHK_EXCL
+ if (res == 0)
+ ETHR_MTX_CHK_EXCL_SET_EXCL(&mtx->mtxb);
+#endif
+
ETHR_MTX_HARD_DEBUG_LFS_TRYRWLOCK(&mtx->mtxb, res);
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
ETHR_COMPILER_BARRIER;
return res;
@@ -379,15 +531,19 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_trylock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
- act = ethr_atomic_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
+ act = ethr_atomic32_cmpxchg_acqb(&mtx->mtxb.flgs, ETHR_RWMTX_W_FLG__, 0);
if (act != 0)
ethr_mutex_lock_wait__(mtx, act);
+ ETHR_MTX_CHK_EXCL_SET_EXCL(&mtx->mtxb);
+
ETHR_MTX_HARD_DEBUG_LFS_RWLOCK(&mtx->mtxb);
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
ETHR_COMPILER_BARRIER;
}
@@ -395,16 +551,20 @@ ETHR_INLINE_FUNC_NAME_(ethr_mutex_lock)(ethr_mutex *mtx)
static ETHR_INLINE void
ETHR_INLINE_FUNC_NAME_(ethr_mutex_unlock)(ethr_mutex *mtx)
{
- long act;
+ ethr_sint32_t act;
ETHR_COMPILER_BARRIER;
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
ETHR_MTX_HARD_DEBUG_LFS_RWUNLOCK(&mtx->mtxb);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
+
+ ETHR_MTX_CHK_EXCL_UNSET_EXCL(&mtx->mtxb);
- act = ethr_atomic_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
+ act = ethr_atomic32_cmpxchg_relb(&mtx->mtxb.flgs, 0, ETHR_RWMTX_W_FLG__);
if (act != ETHR_RWMTX_W_FLG__)
ethr_mutex_unlock_wake__(mtx, act);
ETHR_MTX_HARD_DEBUG_FENCE_CHK(mtx);
+ ETHR_MTX_DBG_CHK_UNUSED_FLG_BITS(mtx);
}
#endif /* ETHR_TRY_INLINE_FUNCS */