aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_thr_progress.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_thr_progress.c')
-rw-r--r--erts/emulator/beam/erl_thr_progress.c76
1 files changed, 47 insertions, 29 deletions
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 7148b756e7..2a9f276e02 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2011-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2011-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -95,9 +95,9 @@
#define ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL 100
-#define ERTS_THR_PRGR_LFLG_BLOCK (((erts_aint32_t) 1) << 31)
-#define ERTS_THR_PRGR_LFLG_NO_LEADER (((erts_aint32_t) 1) << 30)
-#define ERTS_THR_PRGR_LFLG_WAITING_UM (((erts_aint32_t) 1) << 29)
+#define ERTS_THR_PRGR_LFLG_BLOCK ((erts_aint32_t) (1U << 31))
+#define ERTS_THR_PRGR_LFLG_NO_LEADER ((erts_aint32_t) (1U << 30))
+#define ERTS_THR_PRGR_LFLG_WAITING_UM ((erts_aint32_t) (1U << 29))
#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
| ERTS_THR_PRGR_LFLG_BLOCK \
| ERTS_THR_PRGR_LFLG_WAITING_UM))
@@ -142,8 +142,8 @@ init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
#warning "Thread progress state debug is on"
#endif
-#define ERTS_THR_PROGRESS_STATE_DEBUG_LEADER (((erts_aint32_t) 1) << 0)
-#define ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE (((erts_aint32_t) 1) << 1)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_LEADER ((erts_aint32_t) (1U << 0))
+#define ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE ((erts_aint32_t) (1U << 1))
#define ERTS_THR_PROGRESS_STATE_DEBUG_INIT(ID) \
erts_atomic32_init_nob(&intrnl->thr[(ID)].data.state_debug, \
@@ -179,10 +179,10 @@ do { \
#endif /* ERTS_THR_PROGRESS_STATE_DEBUG */
-#define ERTS_THR_PRGR_BLCKR_INVALID (~((erts_aint32_t) 0))
-#define ERTS_THR_PRGR_BLCKR_UNMANAGED (((erts_aint32_t) 1) << 31)
+#define ERTS_THR_PRGR_BLCKR_INVALID ((erts_aint32_t) (~0U))
+#define ERTS_THR_PRGR_BLCKR_UNMANAGED ((erts_aint32_t) (1U << 31))
-#define ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING (((erts_aint32_t) 1) << 31)
+#define ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING ((erts_aint32_t) (1U << 31))
#define ERTS_THR_PRGR_BM_BITS 32
#define ERTS_THR_PRGR_BM_SHIFT 5
@@ -321,13 +321,23 @@ tmp_thr_prgr_data(ErtsSchedulerData *esdp)
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
if (!tpd) {
- /*
- * We only allocate the part up to the wakeup_request field
- * which is the first field only used by registered threads
- */
- tpd = erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA,
- offsetof(ErtsThrPrgrData, wakeup_request));
- init_tmp_thr_prgr_data(tpd);
+ /*
+ * We only allocate the part up to the wakeup_request field which is
+ * the first field only used by registered threads
+ */
+ size_t alloc_size = offsetof(ErtsThrPrgrData, wakeup_request);
+
+ /* We may land here as a result of unmanaged_delay being called from
+ * the lock counting module, which in turn might be called from within
+ * the allocator, so we use plain malloc to avoid deadlocks. */
+ tpd =
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ malloc(alloc_size);
+#else
+ erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA, alloc_size);
+#endif
+
+ init_tmp_thr_prgr_data(tpd);
}
return tpd;
@@ -337,8 +347,13 @@ static ERTS_INLINE void
return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
{
if (tpd->is_temporary) {
- erts_tsd_set(erts_thr_prgr_data_key__, NULL);
- erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+ erts_tsd_set(erts_thr_prgr_data_key__, NULL);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ free(tpd);
+#else
+ erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+#endif
}
}
@@ -502,7 +517,7 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
if (tpd) {
if (!tpd->is_temporary)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Double register of thread\n",
__FILE__, __LINE__, __func__);
is_blocking = tpd->is_blocking;
@@ -524,7 +539,7 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
#endif
ASSERT(tpd->id >= 0);
if (tpd->id >= intrnl->unmanaged.no)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Too many unmanaged registered threads\n",
__FILE__, __LINE__, __func__);
@@ -547,7 +562,7 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
if (tpd) {
if (!tpd->is_temporary)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Double register of thread\n",
__FILE__, __LINE__, __func__);
is_blocking = tpd->is_blocking;
@@ -568,7 +583,7 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
tpd->id = erts_atomic32_inc_read_nob(&intrnl->misc.data.managed_id);
ASSERT(tpd->id >= 0);
if (tpd->id >= intrnl->managed.no)
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"%s:%d:%s(): Too many managed registered threads\n",
__FILE__, __LINE__, __func__);
@@ -700,6 +715,7 @@ leader_update(ErtsThrPrgrData *tpd)
tpd->leader_state.chk_next_ix = no_managed;
erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current,
(erts_aint32_t) new_umrefc_ix);
+ tpd->leader_state.umrefc_ix.current = new_umrefc_ix;
ETHR_MEMBAR(ETHR_StoreLoad);
refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
ASSERT(refc >= 0);
@@ -969,8 +985,10 @@ erts_thr_progress_unmanaged_continue__(ErtsThrPrgrDelayHandle handle)
#ifdef ERTS_ENABLE_LOCK_CHECK
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
ERTS_LC_ASSERT(tpd && tpd->is_delaying);
- tpd->is_delaying = 0;
- return_tmp_thr_prgr_data(tpd);
+ tpd->is_delaying--;
+ ASSERT(tpd->is_delaying >= 0);
+ if (!tpd->is_delaying)
+ return_tmp_thr_prgr_data(tpd);
#endif
ASSERT(!erts_thr_progress_is_managed_thread());
@@ -995,7 +1013,7 @@ erts_thr_progress_unmanaged_delay__(void)
#ifdef ERTS_ENABLE_LOCK_CHECK
{
ErtsThrPrgrData *tpd = tmp_thr_prgr_data(NULL);
- tpd->is_delaying = 1;
+ tpd->is_delaying++;
}
#endif
return (ErtsThrPrgrDelayHandle) umrefc_ix;
@@ -1033,7 +1051,7 @@ has_reached_wakeup(ErtsThrPrgrVal wakeup)
limit += 1;
if (!erts_thr_progress_has_passed__(limit, wakeup))
- erl_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_ABORT_EXIT,
"Invalid wakeup request value found:"
" current=%b64u, wakeup=%b64u, limit=%b64u",
current, wakeup, limit);
@@ -1102,7 +1120,7 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
ix = erts_atomic32_inc_read_nob(&mwd->len) - 1;
#if ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE
if (ix >= intrnl->managed.no)
- erl_exit(ERTS_ABORT_EXIT, "Internal error: Too many wakeup requests\n");
+ erts_exit(ERTS_ABORT_EXIT, "Internal error: Too many wakeup requests\n");
#endif
mwd->id[ix] = tpd->id;
@@ -1186,7 +1204,7 @@ wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
int hbase = hix << ERTS_THR_PRGR_BM_SHIFT;
int hbit;
for (hbit = 0; hbit < ERTS_THR_PRGR_BM_BITS; hbit++) {
- if (hmask & (1 << hbit)) {
+ if (hmask & (1U << hbit)) {
erts_aint_t lmask;
int lix = hbase + hbit;
ASSERT(0 <= lix && lix < umwd->low_sz);
@@ -1195,7 +1213,7 @@ wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
int lbase = lix << ERTS_THR_PRGR_BM_SHIFT;
int lbit;
for (lbit = 0; lbit < ERTS_THR_PRGR_BM_BITS; lbit++) {
- if (lmask & (1 << lbit)) {
+ if (lmask & (1U << lbit)) {
int id = lbase + lbit;
wakeup_unmanaged(id);
}