aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.c3
-rw-r--r--erts/emulator/beam/beam_bif_load.c7
-rw-r--r--erts/emulator/beam/beam_bp.c3
-rw-r--r--erts/emulator/beam/beam_emu.c2
-rw-r--r--erts/emulator/beam/bif.c6
-rw-r--r--erts/emulator/beam/bif.tab5
-rw-r--r--erts/emulator/beam/big.c11
-rw-r--r--erts/emulator/beam/code_ix.c3
-rw-r--r--erts/emulator/beam/dist.c3
-rw-r--r--erts/emulator/beam/erl_alloc.c3
-rw-r--r--erts/emulator/beam/erl_alloc.h11
-rw-r--r--erts/emulator/beam/erl_alloc.types7
-rw-r--r--erts/emulator/beam/erl_alloc_util.c57
-rw-r--r--erts/emulator/beam/erl_alloc_util.h5
-rw-r--r--erts/emulator/beam/erl_async.c6
-rw-r--r--erts/emulator/beam/erl_bif_info.c489
-rw-r--r--erts/emulator/beam/erl_bif_unique.c6
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c3
-rw-r--r--erts/emulator/beam/erl_db.c63
-rw-r--r--erts/emulator/beam/erl_db.h5
-rw-r--r--erts/emulator/beam/erl_db_hash.c24
-rw-r--r--erts/emulator/beam/erl_db_hash.h4
-rw-r--r--erts/emulator/beam/erl_dirty_bif.tab5
-rw-r--r--erts/emulator/beam/erl_drv_thread.c29
-rw-r--r--erts/emulator/beam/erl_fun.c3
-rw-r--r--erts/emulator/beam/erl_gc.c3
-rw-r--r--erts/emulator/beam/erl_init.c11
-rw-r--r--erts/emulator/beam/erl_instrument.c6
-rw-r--r--erts/emulator/beam/erl_lock_check.c285
-rw-r--r--erts/emulator/beam/erl_lock_check.h50
-rw-r--r--erts/emulator/beam/erl_lock_count.c961
-rw-r--r--erts/emulator/beam/erl_lock_count.h998
-rw-r--r--erts/emulator/beam/erl_lock_flags.c59
-rw-r--r--erts/emulator/beam/erl_lock_flags.h78
-rw-r--r--erts/emulator/beam/erl_msacc.c6
-rw-r--r--erts/emulator/beam/erl_mtrace.c6
-rw-r--r--erts/emulator/beam/erl_nif.c3
-rw-r--r--erts/emulator/beam/erl_node_tables.c44
-rw-r--r--erts/emulator/beam/erl_node_tables.h4
-rw-r--r--erts/emulator/beam/erl_port_task.c37
-rw-r--r--erts/emulator/beam/erl_port_task.h8
-rw-r--r--erts/emulator/beam/erl_process.c33
-rw-r--r--erts/emulator/beam/erl_process_lock.c209
-rw-r--r--erts/emulator/beam/erl_process_lock.h180
-rw-r--r--erts/emulator/beam/erl_ptab.c3
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c2
-rw-r--r--erts/emulator/beam/erl_smp.h133
-rw-r--r--erts/emulator/beam/erl_thr_progress.c33
-rw-r--r--erts/emulator/beam/erl_threads.h406
-rw-r--r--erts/emulator/beam/erl_time_sup.c61
-rw-r--r--erts/emulator/beam/erl_trace.c10
-rw-r--r--erts/emulator/beam/erl_utils.h1
-rw-r--r--erts/emulator/beam/export.c3
-rw-r--r--erts/emulator/beam/global.h3
-rw-r--r--erts/emulator/beam/io.c223
-rw-r--r--erts/emulator/beam/module.c3
-rw-r--r--erts/emulator/beam/ops.tab8
-rw-r--r--erts/emulator/beam/register.c3
-rw-r--r--erts/emulator/beam/safe_hash.c23
-rw-r--r--erts/emulator/beam/safe_hash.h7
-rw-r--r--erts/emulator/beam/sys.h8
-rw-r--r--erts/emulator/beam/utils.c1
62 files changed, 2933 insertions, 1742 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index 2055c29190..c2d78aaccb 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -442,7 +442,8 @@ init_atom_table(void)
erts_smp_atomic_init_nob(&atom_put_ops, 0);
#endif
- erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab");
+ erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) atom_hash;
f.cmp = (HCMP_FUN) atom_cmp;
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 007bf99b6e..14ddb74324 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -97,7 +97,8 @@ init_purge_state(void)
{
purge_state.module = THE_NON_VALUE;
- erts_smp_mtx_init(&purge_state.mtx, "purge_state");
+ erts_smp_mtx_init(&purge_state.mtx, "purge_state", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
purge_state.pending_purge_lambda =
erts_export_put(am_erts_code_purger, am_pending_purge_lambda, 3);
@@ -118,7 +119,9 @@ init_purge_state(void)
void
erts_beam_bif_load_init(void)
{
- erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas");
+ erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+
release_literal_areas.first = NULL;
release_literal_areas.last = NULL;
erts_smp_atomic_init_nob(&erts_copy_literal_area__,
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index b9453c1d9a..950639f7ae 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -165,7 +165,8 @@ erts_bp_init(void) {
erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index");
+ erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#endif
}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 79d751d13e..bc83699951 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -6829,7 +6829,7 @@ apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg)
}
if (is_not_nil(tmp)) { /* Must be well-formed list */
- p->freason = EXC_UNDEF;
+ p->freason = EXC_BADARG;
return NULL;
}
reg[arity] = fun;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 40dd4129d2..890277a3ba 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -62,9 +62,6 @@ static erts_smp_atomic32_t msacc;
static Export *await_sched_wall_time_mod_trap;
static erts_smp_atomic32_t sched_wall_time;
-static erts_smp_mtx_t ports_snapshot_mtx;
-erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
-
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
/*
@@ -5138,9 +5135,6 @@ void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
void erts_init_bif(void)
{
- erts_smp_mtx_init(&ports_snapshot_mtx, "ports_snapshot");
- erts_smp_atomic_init_nob(&erts_dead_ports_ptr, (erts_aint_t) NULL);
-
/*
* bif_return_trap/2 is a hidden BIF that bifs that need to
* yield the calling process traps to.
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index a8bbf5f8c1..962b00ae7b 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -437,7 +437,10 @@ bif erts_debug:dump_links/1
#
# Lock counter bif's
#
-bif erts_debug:lock_counters/1
+bif erts_debug:lcnt_control/2
+bif erts_debug:lcnt_control/1
+bif erts_debug:lcnt_collect/0
+bif erts_debug:lcnt_clear/0
#
# New Bifs in R8.
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 7128b8ed23..5eaf262cd8 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -1293,8 +1293,11 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
*r++ = ~c ^ *y++;
x++;
}
- while(xl--)
- *r++ = ~*x++;
+ while(xl--) {
+ DSUBb(*x,0,b,c);
+ *r++ = ~c;
+ x++;
+ }
}
else {
ErtsDigit b1, b2;
@@ -1312,7 +1315,9 @@ static dsize_t I_bxor(ErtsDigit* x, dsize_t xl, short xsgn,
x++; y++;
}
while(xl--) {
- *r++ = *x++;
+ DSUBb(*x,0,b1,c1);
+ *r++ = c1;
+ x++;
}
}
}
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index ec6267711b..8a3d1b20b4 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -57,7 +57,8 @@ void erts_code_ix_init(void)
*/
erts_smp_atomic32_init_nob(&the_active_code_index, 0);
erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
- erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
+ erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_tsd_key_create(&has_code_write_permission,
"erts_has_code_write_permission");
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 982f1066df..09fdb897f5 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -3223,7 +3223,8 @@ static ErtsNodesMonitor *nodes_monitors_end;
static void
init_nodes_monitors(void)
{
- erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors");
+ erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
nodes_monitors = NULL;
nodes_monitors_end = NULL;
}
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 169e1e423d..c7ab444c96 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -3819,7 +3819,8 @@ hdbg_init(void)
hdbg_mblks[ERL_ALC_HDBG_MAX_MBLK-1].next = NULL;
free_hdbg_mblks = &hdbg_mblks[0];
used_hdbg_mblks = NULL;
- erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug");
+ erts_mtx_init(&hdbg_mblk_mtx, "erts_alloc_hard_debug", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
}
static void *check_memory_fence(void *ptr,
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index 7b5cbe2178..97a1cf1308 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -344,7 +344,8 @@ ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
static erts_smp_spinlock_t NAME##_lck; \
ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_smp_spin_lock(&NAME##_lck), \
erts_smp_spin_unlock(&NAME##_lck))
@@ -358,7 +359,8 @@ ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
static erts_mtx_t NAME##_lck; \
ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_mtx_init(NAME##_lck, #NAME "_alloc_lock"), \
+ erts_mtx_init(NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_mtx_lock(&NAME##_lck), \
erts_mtx_unlock(&NAME##_lck))
@@ -371,7 +373,8 @@ ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
#define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \
static erts_spinlock_t NAME##_lck; \
ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \
- erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock"),\
+ erts_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
erts_spin_lock(&NAME##_lck), \
erts_spin_unlock(&NAME##_lck))
@@ -448,7 +451,7 @@ NAME##_free(TYPE *p) \
}
#ifdef DEBUG
-#define ERTS_PRE_ALLOC_SIZE(SZ) 2
+#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) < 1000 ? (SZ)/10 + 10 : 100)
#define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T))
#else
#define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) > 1 ? (SZ) : 1)
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 8a23a1526e..50a1d97dd5 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -368,6 +368,13 @@ type SSB SHORT_LIVED PROCESSES ssb
+endif
++if lcnt
+
+type LCNT_CARRIER STANDARD SYSTEM lcnt_lock_info_carrier
+type LCNT_VECTOR SHORT_LIVED SYSTEM lcnt_sample_vector
+
++endif
+
type DEBUG SHORT_LIVED SYSTEM debugging
type DDLL_PROCESS STANDARD SYSTEM ddll_processes
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 6fddba4b34..af86ad0548 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -6135,16 +6135,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (init->ts) {
allctr->thread_safe = 1;
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x_opt(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no),
- ERTS_LCNT_LT_ALLOC);
-#else
- erts_mtx_init_x(&allctr->mutex,
- "alcu_allocator",
- make_small(allctr->alloc_no));
-#endif /*ERTS_ENABLE_LOCK_COUNT*/
+ erts_mtx_init(&allctr->mutex, "alcu_allocator", make_small(allctr->alloc_no),
+ ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
#ifdef DEBUG
allctr->debug.saved_tid = 0;
@@ -6324,7 +6316,8 @@ erts_alcu_init(AlcUInit_t *init)
carrier_alignment = sizeof(Unit_t);
#endif
- erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms");
+ erts_mtx_init(&init_atoms_mtx, "alcu_init_atoms", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
atoms_initialized = 0;
initialized = 1;
@@ -6592,3 +6585,45 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void lcnt_enable_allocator_lock_count(Allctr_t *allocator, int enable) {
+ if(!allocator->thread_safe) {
+ return;
+ }
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&allocator->mutex.lcnt,
+ "alcu_allocator", make_small(allocator->alloc_no),
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ } else {
+ erts_lcnt_uninstall(&allocator->mutex.lcnt);
+ }
+}
+
+static void lcnt_update_thread_spec_locks(ErtsAllocatorThrSpec_t *tspec, int enable) {
+ if(tspec->enabled) {
+ int i;
+
+ for(i = 0; i < tspec->size; i++) {
+ lcnt_enable_allocator_lock_count(tspec->allctr[i], enable);
+ }
+ }
+}
+
+void erts_lcnt_update_allocator_locks(int enable) {
+ int i;
+
+ for(i = ERTS_ALC_A_MIN; i < ERTS_ALC_A_MAX; i++) {
+ ErtsAllocatorInfo_t *ai = &erts_allctrs_info[i];
+
+ if(ai->enabled && ai->alloc_util) {
+ if(ai->thr_spec) {
+ lcnt_update_thread_spec_locks((ErtsAllocatorThrSpec_t*)ai->extra, enable);
+ } else {
+ lcnt_enable_allocator_lock_count((Allctr_t*)ai->extra, enable);
+ }
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index e889980fa4..73c467aa0a 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -220,6 +220,10 @@ void* erts_alcu_literal_32_sys_realloc(Allctr_t*, void *ptr, Uint *size_p, Uint
void erts_alcu_literal_32_sys_dealloc(Allctr_t*, void *ptr, Uint size, int superalign);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_allocator_locks(int enable);
+#endif
+
#endif /* !ERL_ALLOC_UTIL__ */
#if defined(GET_ERL_ALLOC_UTIL_IMPL) && !defined(ERL_ALLOC_UTIL_IMPL__)
@@ -673,7 +677,6 @@ void erts_alcu_assert_failed(char* expr, char* file, int line, char *func);
int is_sbc_blk(Block_t*);
#endif
-
#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
&& !defined(ERL_ALLOC_UTIL_IMPL__) */
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 84254af0c2..9a93034fcb 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -194,7 +194,8 @@ erts_init_async(void)
ptr += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
async->init.data.no_initialized = 0;
- erts_mtx_init(&async->init.data.mtx, "async_init_mtx");
+ erts_mtx_init(&async->init.data.mtx, "async_init_mtx", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_cnd_init(&async->init.data.cnd);
erts_atomic_init_nob(&async->init.data.id, 0);
@@ -213,7 +214,8 @@ erts_init_async(void)
for (i = 1; i <= erts_no_schedulers; i++) {
ErtsAsyncReadyQ *arq = async_ready_q(i);
#if ERTS_USE_ASYNC_READY_ENQ_MTX
- erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx");
+ erts_mtx_init(&arq->x.data.enq_mtx, "async_enq_mtx", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
#endif
erts_thr_q_finalize_dequeue_state_init(&arq->fin_deq);
qinit.arg = (void *) (SWord) i;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index e2773475b0..e5d7efcc72 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -3544,24 +3544,32 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_runtime) {
- UWord u1, u2, dummy;
+ ErtsMonotonicTime u1, u2;
Eterm b1, b2;
- elapsed_time_both(&u1,&dummy,&u2,&dummy);
- b1 = erts_make_integer(u1,BIF_P);
- b2 = erts_make_integer(u2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ elapsed_time_both(&u1, NULL, &u2, NULL);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, u1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, u2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, u1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, u2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_run_queue) {
res = erts_run_queues_len(NULL, 1, 0, 0);
BIF_RET(make_small(res));
} else if (BIF_ARG_1 == am_wall_clock) {
- UWord w1, w2;
+ ErtsMonotonicTime w1, w2;
Eterm b1, b2;
+ Uint hsz;
wall_clock_elapsed_time_both(&w1, &w2);
- b1 = erts_make_integer((Uint) w1,BIF_P);
- b2 = erts_make_integer((Uint) w2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, w1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, w2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, w1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, w2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_io) {
@@ -4444,48 +4452,120 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
#ifdef ERTS_ENABLE_LOCK_COUNT
+
+typedef struct {
+ /* info->location_count may increase between size calculation and term
+ * building, so we cap it at the value sampled in lcnt_build_result_vector.
+ *
+ * Shrinking is safe though. */
+ int max_location_count;
+ erts_lcnt_lock_info_t *info;
+} lcnt_sample_t;
+
+typedef struct lcnt_sample_vector_ {
+ lcnt_sample_t *elements;
+ size_t size;
+} lcnt_sample_vector_t;
+
+static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
+ erts_lcnt_lock_info_t *iterator;
+ lcnt_sample_vector_t result;
+ size_t allocated_entries;
+
+ allocated_entries = 64;
+ result.size = 0;
+
+ result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
+ allocated_entries * sizeof(lcnt_sample_t));
+
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(list, &iterator)) {
+ erts_lcnt_retain_lock_info(iterator);
+
+ result.elements[result.size].max_location_count = iterator->location_count;
+ result.elements[result.size].info = iterator;
+
+ result.size++;
+
+ if(result.size >= allocated_entries) {
+ allocated_entries *= 2;
+
+ result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
+ allocated_entries * sizeof(lcnt_sample_t));
+ }
+ }
+
+ return result;
+}
+
+static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
+ size_t i;
+
+ for(i = 0; i < vector->size; i++) {
+ erts_lcnt_release_lock_info(vector->elements[i].info);
+ }
+
+ erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
+}
+
+/* The size of an integer is not guaranteed to be constant since we're walking
+ * over live data, and may cross over into bignum territory between size calc
+ * and the actual build. This takes care of that through always assuming the
+ * worst, but needs to be fixed up with HRelease once the final term has been
+ * built. */
+static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
+ Eterm res = THE_NON_VALUE;
+
+ if(szp) {
+ *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
+ }
+
+ if(hpp) {
+ if (IS_USMALL(0, ui)) {
+ res = make_small(ui);
+ } else {
+ res = erts_uint64_to_big(ui, hpp);
+ }
+ }
+
+ return res;
+}
+
static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
- Uint tries = 0, colls = 0;
- unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
- unsigned int line = 0;
unsigned int i;
-
+ const char *file;
+
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
-
+
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
- * { .. histogram .. }]
- */
+ * [{{file, line},
+ {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }] */
- tries = (Uint) ethr_atomic_read(&stats->tries);
- colls = (Uint) ethr_atomic_read(&stats->colls);
-
- line = stats->line;
- timer_s = stats->timer.s;
- timer_ns = stats->timer.ns;
- timer_n = stats->timer_n;
-
- af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
- uil = erts_bld_uint( hpp, szp, line);
+ file = stats->file ? stats->file : "undefined";
+
+ af = erts_atom_put((byte *)file, strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
+ uil = erts_bld_uint( hpp, szp, stats->line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
-
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
- uits = erts_bld_uint( hpp, szp, timer_s);
- uitns = erts_bld_uint( hpp, szp, timer_ns);
- uitn = erts_bld_uint( hpp, szp, timer_n);
+ uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
+ uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
+
+ uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
+ uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
+ uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
- vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
}
+
thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
@@ -4494,185 +4574,266 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
return res;
}
-static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
+static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
+ Eterm id = info->id;
+
+ if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_TYPE_PROCLOCK) {
+ /* Use registered names as id's for process locks if available. Thread
+ * progress is delayed since we may be running on a dirty scheduler. */
+ ErtsThrPrgrDelayHandle delay_handle;
+ Process *process;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+
+ process = erts_proc_lookup(info->id);
+ if (process && process->common.u.alive.reg) {
+ id = process->common.u.alive.reg->name;
+ }
+
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
+ const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
+ id = erts_atom_put((byte*)name, strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
+ }
+ }
+
+ return id;
+}
+
+static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
+ erts_lcnt_lock_info_t *info = sample->info;
+
Eterm name, type, id, stats = NIL, t;
- Process *proc = NULL;
- char *ltype;
+ const char *lock_desc;
int i;
+
+ /* term: [{name, id, type, stats()}] */
+
+ ASSERT(info->name);
- /* term:
- * [{name, id, type, stats()}]
- */
-
- ASSERT(lock->name);
-
- ltype = erts_lcnt_lock_type(lock->flag);
-
- ASSERT(ltype);
-
- type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
-
- if (lock->flag & ERTS_LCNT_LT_ALLOC) {
- /* use allocator types names as id's for allocator locks */
- ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
- /* use registered names as id's for process locks if available */
- proc = erts_proc_lookup(lock->id);
- if (proc && proc->common.u.alive.reg) {
- id = proc->common.u.alive.reg->name;
- } else {
- /* otherwise use process id */
- id = lock->id;
- }
+ lock_desc = erts_lock_flags_get_type_name(info->flags);
+
+ type = erts_atom_put((byte*)lock_desc, strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte*)info->name, strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
+
+ /* Only attempt to resolve ids when actually emitting the term. This ought
+ * to be safe since all immediates are the same size. */
+ if(hpp != NULL) {
+ id = lcnt_pretty_print_lock_id(info);
} else {
- id = lock->id;
+ id = NIL;
}
- for (i = 0; i < lock->n_stats; i++) {
- stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
+ for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
+ stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
}
t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
- res = erts_bld_cons( hpp, szp, t, res);
+ res = erts_bld_cons(hpp, szp, t, res);
return res;
}
-static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
+static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
+ lcnt_sample_vector_t *current_locks,
+ lcnt_sample_vector_t *deleted_locks, Eterm res) {
+ const char *str_duration = "duration";
+ const char *str_locks = "locks";
+
Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
- erts_lcnt_lock_t *lock = NULL;
- char *str_duration = "duration";
- char *str_locks = "locks";
-
- /* term:
- * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
- */
-
+ size_t i;
+
+ /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
+
/* duration tuple */
- dts = erts_bld_uint( hpp, szp, data->duration.s);
- dtns = erts_bld_uint( hpp, szp, data->duration.ns);
+ dts = bld_unstable_uint64(hpp, szp, duration->s);
+ dtns = bld_unstable_uint64(hpp, szp, duration->ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
-
+
adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
-
aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
-
- for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < current_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &current_locks->elements[i], lloc);
}
-
- for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < deleted_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
}
-
+
tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
-
- res = erts_bld_cons( hpp, szp, tloc, res);
- res = erts_bld_cons( hpp, szp, tdur, res);
+
+ res = erts_bld_cons(hpp, szp, tloc, res);
+ res = erts_bld_cons(hpp, szp, tdur, res);
return res;
-}
+}
+
+static struct {
+ const char *name;
+ erts_lock_flags_t flag;
+} lcnt_category_map[] = {
+ {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
+ {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
+ {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
+ {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
+ {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
+ {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
+ {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
+ {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
+ {NULL, 0}
+ };
+
+static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
+ int i = 0;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
+ return lcnt_category_map[i].flag;
+ }
+ }
+
+ return 0;
+}
+
+static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
+ Eterm res;
+ int i;
+
+ res = NIL;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(mask & lcnt_category_map[i].flag) {
+ Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
+ strlen(lcnt_category_map[i].name),
+ ERTS_ATOM_ENC_UTF8, 0);
+
+ res = erts_bld_cons(hpp, szp, category, res);
+ }
+ }
+
+ return res;
+}
+
#endif
-BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
+BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
{
-#ifdef ERTS_ENABLE_LOCK_COUNT
- Eterm res = NIL;
-#endif
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
+#else
+ erts_lcnt_clear_counters();
+ BIF_RET(am_ok);
+#endif
+}
- if (BIF_ARG_1 == am_enabled) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- BIF_RET(am_true);
+BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
+{
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
#else
- BIF_RET(am_false);
-#endif
- }
-#ifdef ERTS_ENABLE_LOCK_COUNT
+ lcnt_sample_vector_t current_locks, deleted_locks;
+ erts_lcnt_data_t data;
- else if (BIF_ARG_1 == am_info) {
- erts_lcnt_data_t *data;
- Uint hsize = 0;
- Uint *szp;
- Eterm* hp;
+ Eterm *term_heap_start, *term_heap_end;
+ Uint term_heap_size = 0;
+ Eterm result;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ data = erts_lcnt_get_data();
- erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
- data = erts_lcnt_get_data();
+ current_locks = lcnt_build_sample_vector(data.current_locks);
+ deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
- /* calculate size */
+ lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
+ &current_locks, &deleted_locks, NIL);
- szp = &hsize;
- lcnt_build_result_term(NULL, szp, data, NIL);
+ term_heap_start = HAlloc(BIF_P, term_heap_size);
+ term_heap_end = term_heap_start;
- /* alloc and build */
+ result = lcnt_build_result_term(&term_heap_end, NULL,
+ &data.duration, &current_locks, &deleted_locks, NIL);
- hp = HAlloc(BIF_P, hsize);
+ HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
- res = lcnt_build_result_term(&hp, NULL, data, res);
-
- erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+ lcnt_destroy_sample_vector(&current_locks);
+ lcnt_destroy_sample_vector(&deleted_locks);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(res);
- } else if (BIF_ARG_1 == am_clear) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ BIF_RET(result);
+#endif
+}
- erts_lcnt_clear_counters();
+BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t mask;
+ Eterm *term_heap_block;
+ Uint term_heap_size;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ mask = erts_lcnt_get_category_mask();
+ term_heap_size = 0;
- BIF_RET(am_ok);
- } else if (is_tuple(BIF_ARG_1)) {
- Eterm* ptr = tuple_val(BIF_ARG_1);
-
- if ((arityval(ptr[0]) == 2) && (ptr[2] == am_false || ptr[2] == am_true)) {
- int lock_opt = 0, enable = (ptr[2] == am_true) ? 1 : 0;
- if (ERTS_IS_ATOM_STR("copy_save", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_COPYSAVE;
- } else if (ERTS_IS_ATOM_STR("process_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PROCLOCK;
- } else if (ERTS_IS_ATOM_STR("port_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PORTLOCK;
- } else if (ERTS_IS_ATOM_STR("suspend", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_SUSPEND;
- } else if (ERTS_IS_ATOM_STR("location", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_LOCATION;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
+ lcnt_build_category_list(NULL, &term_heap_size, mask);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ term_heap_block = HAlloc(BIF_P, term_heap_size);
- if (enable) res = erts_lcnt_set_rt_opt(lock_opt) ? am_true : am_false;
- else res = erts_lcnt_clear_rt_opt(lock_opt) ? am_true : am_false;
-
-#ifdef ERTS_SMP
- if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PORTLOCK) {
- erts_lcnt_enable_io_lock_count(enable);
- } else if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PROCLOCK) {
- erts_lcnt_enable_proc_lock_count(enable);
- }
+ BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
+ } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ if(erts_lcnt_get_preserve_info()) {
+ BIF_RET(am_true);
+ }
+
+ BIF_RET(am_false);
+ }
#endif
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t category_mask = 0;
+ Eterm categories = BIF_ARG_2;
+
+ if(!(is_list(categories) || is_nil(categories))) {
+ BIF_ERROR(BIF_P, BADARG);
}
- }
-#endif
+ while(is_list(categories)) {
+ Eterm *cell = list_val(categories);
+ erts_lock_flags_t category;
+
+ category = lcnt_atom_to_lock_category(CAR(cell));
+
+ if(!category) {
+ Eterm *hp = HAlloc(BIF_P, 4);
+
+ BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
+ }
+
+ category_mask |= category;
+ categories = CDR(cell);
+ }
+
+ erts_lcnt_set_category_mask(category_mask);
+
+ BIF_RET(am_ok);
+ } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ int enabled = (BIF_ARG_2 == am_true);
+
+ if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ erts_lcnt_set_preserve_info(enabled);
+
+ BIF_RET(am_ok);
+ }
+ }
+#endif
BIF_ERROR(BIF_P, BADARG);
}
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index fc6fb5f868..2f8adc87d5 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -392,7 +392,8 @@ init_magic_ref_tables(void)
erts_snprintf(&tblp->name[0], sizeof(tblp->name),
"magic_ref_table_0");
hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
- erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table");
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
hash_funcs.hash = nsched_mreft_hash;
hash_funcs.cmp = nsched_mreft_cmp;
@@ -402,7 +403,8 @@ init_magic_ref_tables(void)
erts_snprintf(&tblp->name[0], sizeof(tblp->name),
"magic_ref_table_%d", i);
hash_init(0, &tblp->hash, &tblp->name[0], 1, hash_funcs);
- erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table");
+ erts_rwmtx_init(&tblp->rwmtx, "magic_ref_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
}
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index 50f33b2014..f8b2fa744f 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -1706,7 +1706,8 @@ erts_init_cpu_topology(void)
{
int ix;
- erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info");
+ erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA,
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 17e0f2aeec..b83134c79c 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -575,9 +575,7 @@ delete_owned_table(Process *p, DbTable *tb)
table_dec_refc(tb, 1);
}
-
-static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
- char *rwname, char* fixname)
+static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
{
#ifdef ERTS_SMP
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
@@ -587,9 +585,10 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
#endif
#ifdef ERTS_SMP
- erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
- rwname, tb->common.the_name);
- erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
+ erts_smp_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_smp_mtx_init(&tb->common.fixlock, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
}
@@ -1753,8 +1752,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
/* Note, 'type' is *read only* from now on... */
#endif
erts_smp_refc_init(&tb->common.fix_count, 0);
- db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
- "db_tab", "db_tab_fix");
+ db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ));
tb->common.keypos = keypos;
tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
@@ -3391,8 +3389,9 @@ void init_db(ErtsDbSpinCount db_spin_count)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
- "meta_name_tab", make_small(i));
+ erts_smp_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ "meta_name_tab", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB);
}
#endif
@@ -4334,3 +4333,47 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
return list;
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable) {
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&tb->common.rwlock.lcnt, "db_tab",
+ tb->common.the_name, ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ erts_lcnt_install_new_lock_info(&tb->common.fixlock.lcnt, "db_tab_fix",
+ tb->common.the_name, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(&tb->common.rwlock.lcnt);
+ erts_lcnt_uninstall(&tb->common.fixlock.lcnt);
+ }
+
+ if(IS_HASH_TABLE(tb->common.status)) {
+ erts_lcnt_enable_db_hash_lock_count(&tb->hash, enable);
+ }
+}
+
+static void lcnt_update_db_locks_per_sched(void *enable) {
+ ErtsSchedulerData *esdp;
+ DbTable *head;
+
+ esdp = erts_get_scheduler_data();
+ head = esdp->ets_tables.clist;
+
+ if(head) {
+ DbTable *iterator = head;
+
+ do {
+ if(is_table_alive(iterator)) {
+ erts_lcnt_enable_db_lock_count(iterator, !!enable);
+ }
+
+ iterator = iterator->common.all.next;
+ } while (iterator != head);
+ }
+}
+
+void erts_lcnt_update_db_locks(int enable) {
+ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers,
+ &lcnt_update_db_locks_per_sched, (void*)(UWord)enable);
+}
+
+#endif /* ERTS_ENABLE_LOCK_COUNT */ \ No newline at end of file
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 4ff9f224e8..d83126b3a2 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -129,6 +129,11 @@ extern erts_smp_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
Uint erts_db_get_max_tabs(void);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable);
+void erts_lcnt_update_db_locks(int enable);
+#endif
+
#endif /* ERL_DB_H__ */
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 0addfaa3c7..ae9322dfd3 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -675,8 +675,8 @@ int db_create_hash(Process *p, DbTable *tbl)
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_smp_rwmtx_init_opt_x(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
- "db_hash_slot", tb->common.the_name);
+ erts_smp_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
}
/* This important property is needed to guarantee the two buckets
* involved in a grow/shrink operation it protected by the same lock:
@@ -3206,3 +3206,23 @@ Eterm erts_ets_hash_sizeof_ext_segtab(void)
return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1);
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable) {
+ int i;
+
+ if(tb->locks == NULL) {
+ return;
+ }
+
+ for(i = 0; i < DB_HASH_LOCK_CNT; i++) {
+ erts_lcnt_ref_t *ref = &tb->locks->lck_vec[i].lck.lcnt;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(ref, "db_hash_slot", tb->common.the_name,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DB);
+ } else {
+ erts_lcnt_uninstall(ref);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index f491c85d95..523ed7860e 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -103,4 +103,8 @@ typedef struct {
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
Eterm erts_ets_hash_sizeof_ext_segtab(void);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable);
+#endif
+
#endif /* _DB_HASH_H */
diff --git a/erts/emulator/beam/erl_dirty_bif.tab b/erts/emulator/beam/erl_dirty_bif.tab
index 69421dcfcc..10c76d2579 100644
--- a/erts/emulator/beam/erl_dirty_bif.tab
+++ b/erts/emulator/beam/erl_dirty_bif.tab
@@ -46,6 +46,11 @@
dirty-cpu erts_debug:dirty_cpu/2
dirty-io erts_debug:dirty_io/2
+# lcnt_control/1 doesn't need to be dirty.
+dirty-cpu erts_debug:lcnt_control/2
+dirty-cpu erts_debug:lcnt_collect/0
+dirty-cpu erts_debug:lcnt_clear/0
+
# --- TEST of Dirty BIF functionality ---
# Functions below will execute on dirty schedulers when emulator has
# been configured for testing dirty schedulers. This is used for test
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 0e6aadf568..742c428f2a 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -55,7 +55,7 @@ fatal_error(int err, char *func)
struct ErlDrvMutex_ {
ethr_mutex mtx;
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
#endif
char *name;
};
@@ -68,7 +68,7 @@ struct ErlDrvCond_ {
struct ErlDrvRWLock_ {
ethr_rwmutex rwmtx;
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
#endif
char *name;
};
@@ -146,7 +146,8 @@ void erl_drv_thr_init(void)
sizeof(char *)*ERL_DRV_TSD_KEYS_INC);
for (i = 0; i < ERL_DRV_TSD_KEYS_INC; i++)
used_tsd_keys[i] = NULL;
- erts_mtx_init(&tsd_mtx, "drv_tsd");
+ erts_mtx_init(&tsd_mtx, "drv_tsd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
}
/*
@@ -176,7 +177,8 @@ erl_drv_mutex_create(char *name)
dmtx->name = no_name;
}
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&dmtx->lcnt, dmtx->name, ERTS_LCNT_LT_MUTEX);
+ erts_lcnt_init_ref_x(&dmtx->lcnt, dmtx->name, NIL,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
}
return dmtx;
@@ -191,7 +193,7 @@ erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&dmtx->lcnt);
+ erts_lcnt_uninstall(&dmtx->lcnt);
#endif
res = dmtx ? ethr_mutex_destroy(&dmtx->mtx) : EINVAL;
if (res != 0)
@@ -368,7 +370,8 @@ erl_drv_rwlock_create(char *name)
drwlck->name = no_name;
}
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&drwlck->lcnt, drwlck->name, ERTS_LCNT_LT_RWMUTEX);
+ erts_lcnt_init_ref_x(&drwlck->lcnt, drwlck->name, NIL,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
}
return drwlck;
@@ -383,7 +386,7 @@ erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&drwlck->lcnt);
+ erts_lcnt_uninstall(&drwlck->lcnt);
#endif
res = drwlck ? ethr_rwmutex_destroy(&drwlck->rwmtx) : EINVAL;
if (res != 0)
@@ -411,7 +414,7 @@ erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
res = ethr_rwmutex_tryrlock(&drwlck->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LCNT_LO_READ);
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
return res;
#else
@@ -426,7 +429,7 @@ erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_rlock(&drwlck->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -442,7 +445,7 @@ erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_runlock(&drwlck->rwmtx);
#endif
@@ -457,7 +460,7 @@ erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
res = ethr_rwmutex_tryrwlock(&drwlck->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
return res;
#else
@@ -472,7 +475,7 @@ erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwlock(&drwlck->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -488,7 +491,7 @@ erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwunlock(&drwlck->rwmtx);
#endif
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index d18016c42e..535f677bb3 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -63,7 +63,8 @@ erts_init_fun_table(void)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab");
+ erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) fun_hash;
f.cmp = (HCMP_FUN) fun_cmp;
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 3c8bdaa62e..8cb977a7f3 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -274,7 +274,8 @@ erts_init_gc(void)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info");
+ erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
init_gc_info(&dirty_gc.info);
#endif
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 6172595552..5206d7564f 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -2357,8 +2357,12 @@ erl_start(int argc, char **argv)
#ifdef ERTS_SMP
erts_start_schedulers();
- /* Let system specific code decide what to do with the main thread... */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_startup();
+#endif
+
+ /* Let system specific code decide what to do with the main thread... */
erts_sys_main_thread(); /* May or may not return! */
#else
{
@@ -2373,6 +2377,11 @@ erl_start(int argc, char **argv)
erts_sched_init_time_sup(esdp);
erts_ets_sched_spec_data_init(esdp);
erts_aux_work_timeout_late_init(esdp);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_post_startup();
+#endif
+
process_main(esdp->x_reg_array, esdp->f_reg_array);
}
#endif
diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c
index 4d4defd8b5..634509f880 100644
--- a/erts/emulator/beam/erl_instrument.c
+++ b/erts/emulator/beam/erl_instrument.c
@@ -1200,7 +1200,8 @@ erts_instr_init(int stat, int map_stat)
stats = erts_alloc(ERTS_ALC_T_INSTR_INFO, sizeof(struct stats_));
- erts_mtx_init(&instr_mutex, "instr");
+ erts_mtx_init(&instr_mutex, "instr", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
mem_anchor = NULL;
@@ -1223,7 +1224,8 @@ erts_instr_init(int stat, int map_stat)
if (map_stat) {
- erts_mtx_init(&instr_x_mutex, "instr_x");
+ erts_mtx_init(&instr_x_mutex, "instr_x", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_instr_memory_map = 1;
erts_instr_stat = 1;
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index f270d8baef..cf091ee43f 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -104,7 +104,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "db_tab", "address" },
{ "proc_status", "pid" },
{ "proc_trace", "pid" },
- { "ports_snapshot", NULL },
{ "db_tab_fix", "address" },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
@@ -161,6 +160,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
+#ifdef ERTS_SMP
+ { "pollsets_lock", NULL },
+#endif
{ "alcu_allocator", "index" },
{ "mseg", NULL },
#ifdef ERTS_SMP
@@ -173,7 +175,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "get_time", NULL },
{ "get_corrected_time", NULL },
{ "breakpoints", NULL },
- { "pollsets_lock", NULL },
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
@@ -199,41 +200,20 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#define ERTS_LOCK_ORDER_SIZE \
(sizeof(erts_lock_order)/sizeof(erts_lc_lock_order_t))
-#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
- (((LCKD_FLG) & (ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)) \
- && ((LCK_FLG) \
- & ERTS_LC_FLG_LT_ALL \
- & ~(ERTS_LC_FLG_LT_SPINLOCK|ERTS_LC_FLG_LT_RWSPINLOCK)))
+#define LOCK_IS_TYPE_ORDER_VIOLATION(LCK_FLG, LCKD_FLG) \
+ (((LCKD_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_SPINLOCK \
+ && \
+ ((LCK_FLG) & ERTS_LOCK_FLAGS_MASK_TYPE) != ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
static __decl_noreturn void __noreturn lc_abort(void);
-static char *
-lock_type(Uint16 flags)
+static const char *rw_op_str(erts_lock_options_t options)
{
- switch (flags & ERTS_LC_FLG_LT_ALL) {
- case ERTS_LC_FLG_LT_SPINLOCK: return "[spinlock]";
- case ERTS_LC_FLG_LT_RWSPINLOCK: return "[rw(spin)lock]";
- case ERTS_LC_FLG_LT_MUTEX: return "[mutex]";
- case ERTS_LC_FLG_LT_RWMUTEX: return "[rwmutex]";
- case ERTS_LC_FLG_LT_PROCLOCK: return "[proclock]";
- default: return "";
+ if(options == ERTS_LOCK_OPTIONS_WRITE) {
+ ERTS_INTERNAL_ERROR("Only write flag present");
}
-}
-static char *
-rw_op_str(Uint16 flags)
-{
- switch (flags & ERTS_LC_FLG_LO_READ_WRITE) {
- case ERTS_LC_FLG_LO_READ_WRITE:
- return " (rw)";
- case ERTS_LC_FLG_LO_READ:
- return " (r)";
- case ERTS_LC_FLG_LO_WRITE:
- ERTS_INTERNAL_ERROR("Only write flag present");
- default:
- break;
- }
- return "";
+ return erts_lock_options_get_short_desc(options);
}
typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
@@ -244,7 +224,8 @@ struct erts_lc_locked_lock_t_ {
Sint16 id;
char *file;
unsigned int line;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
};
typedef struct {
@@ -431,7 +412,7 @@ make_my_locked_locks(void)
}
static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
+new_locked_lock(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
@@ -441,12 +422,13 @@ new_locked_lock(erts_lc_lock_t *lck, Uint16 op_flags,
l_lck->extra = lck->extra;
l_lck->file = file;
l_lck->line = line;
- l_lck->flags = lck->flags | op_flags;
+ l_lck->flags = lck->flags;
+ l_lck->taken_options = options;
return l_lck;
}
static void
-raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
+raw_print_lock(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags,
char* file, unsigned int line, char *suffix)
{
char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
@@ -458,16 +440,16 @@ raw_print_lock(char *prefix, Sint16 id, Wterm extra, Uint16 flags,
erts_fprintf(stderr,"%p",_unchecked_boxed_val(extra));
else
erts_fprintf(stderr,"%T",extra);
- erts_fprintf(stderr,"%s",lock_type(flags));
+ erts_fprintf(stderr,"[%s]",erts_lock_flags_get_type_name(flags));
if (file)
erts_fprintf(stderr,"(%s:%d)",file,line);
- erts_fprintf(stderr,"'%s%s",rw_op_str(flags),suffix);
+ erts_fprintf(stderr,"'(%s)%s",rw_op_str(flags),suffix);
}
static void
-print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix)
+print_lock2(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags, char *suffix)
{
raw_print_lock(prefix, id, extra, flags, NULL, 0, suffix);
}
@@ -522,9 +504,9 @@ uninitialized_lock(void)
static void
lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "%s%s", prefix, rw_op_str(op_flags));
+ erts_fprintf(stderr, "%s (%s)", prefix, rw_op_str(options));
print_lock(" ", lck, " lock which is already locked by thread!\n");
print_curr_locks(l_lcks);
lc_abort();
@@ -532,9 +514,9 @@ lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
static void
unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
- Uint16 op_flags)
+ erts_lock_options_t options)
{
- erts_fprintf(stderr, "Unlocking%s ", rw_op_str(op_flags));
+ erts_fprintf(stderr, "Unlocking (%s) ", rw_op_str(options));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
print_curr_locks(l_lcks);
lc_abort();
@@ -745,84 +727,128 @@ erts_lc_get_lock_order_id(char *name)
return (Sint16) -1;
}
+static int compare_locked_by_id(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
+{
+ if(locked_lock->id < comparand->id) {
+ return -1;
+ } else if(locked_lock->id > comparand->id) {
+ return 1;
+ }
-static int
-find_lock(erts_lc_locked_lock_t **l_lcks, erts_lc_lock_t *lck)
+ return 0;
+}
+
+static int compare_locked_by_id_extra(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
+ int order = compare_locked_by_id(locked_lock, comparand);
+
+ if(order) {
+ return order;
+ } else if(locked_lock->extra < comparand->extra) {
+ return -1;
+ } else if(locked_lock->extra > comparand->extra) {
+ return 1;
+ }
- if (l_lck) {
- if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & lck->flags) == lck->flags)
- return 1;
- return 0;
- }
- else if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra < lck->extra)) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id > lck->id
- || (l_lck->id == lck->id
- && l_lck->extra >= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id < lck->id
- || (l_lck->id == lck->id
- && l_lck->extra <= lck->extra)) {
- *l_lcks = l_lck;
- if (l_lck->id == lck->id
- && l_lck->extra == lck->extra
- && ((l_lck->flags & lck->flags) == lck->flags))
- return 1;
- return 0;
- }
- }
- }
+ return 0;
+}
+
+typedef int (*locked_compare_func)(erts_lc_locked_lock_t *, erts_lc_lock_t *);
+
+/* Searches through a list of taken locks, bailing when it hits an entry whose
+ * order relative to the search template is the opposite of the one at the
+ * start of the search. (*closest_neighbor) is either set to the exact match,
+ * or the one closest to it in the sort order. */
+static int search_locked_list(locked_compare_func compare,
+ erts_lc_locked_lock_t *locked_locks,
+ erts_lc_lock_t *search_template,
+ erts_lc_locked_lock_t **closest_neighbor)
+{
+ erts_lc_locked_lock_t *iterator = locked_locks;
+
+ (*closest_neighbor) = iterator;
+
+ if(iterator) {
+ int relative_order = compare(iterator, search_template);
+
+ if(relative_order < 0) {
+ while((iterator = iterator->next) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order >= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ } else if(relative_order > 0) {
+ while((iterator = iterator->prev) != NULL) {
+ relative_order = compare(iterator, search_template);
+
+ if(relative_order <= 0) {
+ (*closest_neighbor) = iterator;
+ break;
+ }
+ }
+ }
+
+ return relative_order == 0;
}
+
return 0;
}
+/* Searches for a lock in the given list that matches search_template, and sets
+ * (*locked_locks) to the closest lock in the sort order. */
static int
-find_id(erts_lc_locked_lock_t **l_lcks, Sint16 id)
-{
- erts_lc_locked_lock_t *l_lck = *l_lcks;
-
- if (l_lck) {
- if (l_lck->id == id)
- return 1;
- else if (l_lck->id < id) {
- for (l_lck = l_lck->next; l_lck; l_lck = l_lck->next) {
- if (l_lck->id >= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
- else {
- for (l_lck = l_lck->prev; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id <= id) {
- *l_lcks = l_lck;
- if (l_lck->id == id)
- return 1;
- return 0;
- }
- }
- }
+find_lock(erts_lc_locked_lock_t **locked_locks, erts_lc_lock_t *search_template)
+{
+ erts_lc_locked_lock_t *closest_neighbor;
+ int found_lock;
+
+ found_lock = search_locked_list(compare_locked_by_id_extra,
+ (*locked_locks),
+ search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ if(found_lock) {
+ erts_lock_options_t relevant_options;
+ erts_lock_flags_t relevant_flags;
+
+ /* We only care about the options and flags that are set in the
+ * template. */
+ relevant_options = (closest_neighbor->taken_options & search_template->taken_options);
+ relevant_flags = (closest_neighbor->flags & search_template->flags);
+
+ return search_template->taken_options == relevant_options &&
+ search_template->flags == relevant_flags;
}
+
return 0;
}
+/* Searches for a lock in the given list by id, and sets (*locked_locks) to the
+ * closest lock in the sort order. */
+static int
+find_id(erts_lc_locked_lock_t **locked_locks, Sint16 id)
+{
+ erts_lc_locked_lock_t *closest_neighbor;
+ erts_lc_lock_t search_template;
+ int found_lock;
+
+ search_template.id = id;
+
+ found_lock = search_locked_list(compare_locked_by_id,
+ (*locked_locks),
+ &search_template,
+ &closest_neighbor);
+
+ (*locked_locks) = closest_neighbor;
+
+ return found_lock;
+}
+
void
erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
{
@@ -918,17 +944,17 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
void
-erts_lc_check_no_locked_of_type(Uint16 flags)
+erts_lc_check_no_locked_of_type(erts_lock_flags_t type)
{
erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
if (l_lcks) {
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
- if (l_lck->flags & flags) {
+ if ((l_lck->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == type) {
erts_fprintf(stderr,
"Locked lock of type %s found which isn't "
"allowed here!\n",
- lock_type(l_lck->flags));
+ erts_lock_flags_get_type_name(l_lck->flags));
print_curr_locks(l_lcks);
lc_abort();
}
@@ -937,7 +963,7 @@ erts_lc_check_no_locked_of_type(Uint16 flags)
}
int
-erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
#ifdef ERTS_LC_DO_NOT_FORCE_BUSY_TRYLOCK_ON_LOCK_ORDER_VIOLATION
return 0;
@@ -986,7 +1012,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", l_lcks, lck, options);
break;
}
}
@@ -1008,7 +1034,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags)
#endif
}
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
@@ -1021,7 +1047,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
return;
l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, op_flags, file, line) : NULL;
+ l_lck = locked ? new_locked_lock(lck, options, file, line) : NULL;
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1039,7 +1065,7 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, op_flags);
+ lock_twice("Trylocking", l_lcks, lck, options);
if (locked) {
l_lck->next = tl_lck->next;
l_lck->prev = tl_lck;
@@ -1062,14 +1088,14 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
}
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
if (!find_lock(&l_lck, lck))
required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ l_lck = new_locked_lock(lck, options, file, line);
if (!l_lcks->required.last) {
ASSERT(!l_lcks->required.first);
l_lck->next = l_lck->prev = NULL;
@@ -1109,7 +1135,7 @@ void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
}
}
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
@@ -1137,7 +1163,7 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
lc_free((void *) l_lck);
}
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
erts_lc_locked_locks_t *l_lcks;
@@ -1150,7 +1176,7 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
return;
l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, op_flags, file, line);
+ l_lck = new_locked_lock(lck, options, file, line);
if (!l_lcks->locked.last) {
ASSERT(!l_lcks->locked.first);
@@ -1166,12 +1192,12 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
l_lcks->locked.last = l_lck;
}
else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
- lock_twice("Locking", l_lcks, lck, op_flags);
+ lock_twice("Locking", l_lcks, lck, options);
else
lock_order_violation(l_lcks, lck);
}
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1192,8 +1218,8 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->flags & ERTS_LC_FLG_LO_ALL) != op_flags)
- unlock_op_mismatch(l_lcks, lck, op_flags);
+ if ((l_lck->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options)
+ unlock_op_mismatch(l_lcks, lck, options);
if (l_lck->prev)
l_lck->prev->next = l_lck->next;
else
@@ -1210,7 +1236,7 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
unlock_of_not_locked(l_lcks, lck);
}
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags)
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
erts_lc_locked_locks_t *l_lcks;
erts_lc_locked_lock_t *l_lck;
@@ -1274,23 +1300,25 @@ void erts_lc_unrequire_lock(erts_lc_lock_t *lck)
}
void
-erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags)
+erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = (UWord) &lck->extra;
ASSERT(is_not_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
void
-erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra)
+erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra)
{
lck->id = erts_lc_get_lock_order_id(name);
lck->extra = extra;
ASSERT(is_immed(lck->extra));
lck->flags = flags;
+ lck->taken_options = 0;
lck->inited = ERTS_LC_INITITALIZED;
}
@@ -1304,6 +1332,7 @@ erts_lc_destroy_lock(erts_lc_lock_t *lck)
lck->id = -1;
lck->extra = THE_NON_VALUE;
lck->flags = 0;
+ lck->taken_options = 0;
}
void
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 18296d1fec..8c754a8dfa 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -36,6 +36,8 @@
#ifdef ERTS_ENABLE_LOCK_CHECK
+#include "erl_lock_flags.h"
+
#ifndef ERTS_ENABLE_LOCK_POSITION
/* Enable in order for _x variants of mtx functions to be used. */
#define ERTS_ENABLE_LOCK_POSITION 1
@@ -44,36 +46,14 @@
typedef struct {
int inited;
Sint16 id;
- Uint16 flags;
+ erts_lock_flags_t flags;
+ erts_lock_options_t taken_options;
UWord extra;
} erts_lc_lock_t;
#define ERTS_LC_INITITALIZED 0x7f7f7f7f
-
-#define ERTS_LC_FLG_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LC_FLG_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LC_FLG_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LC_FLG_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LC_FLG_LT_PROCLOCK (((Uint16) 1) << 4)
-
-#define ERTS_LC_FLG_LO_READ (((Uint16) 1) << 5)
-#define ERTS_LC_FLG_LO_WRITE (((Uint16) 1) << 6)
-
-#define ERTS_LC_FLG_LO_READ_WRITE (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-#define ERTS_LC_FLG_LT_ALL (ERTS_LC_FLG_LT_SPINLOCK \
- | ERTS_LC_FLG_LT_RWSPINLOCK \
- | ERTS_LC_FLG_LT_MUTEX \
- | ERTS_LC_FLG_LT_RWMUTEX \
- | ERTS_LC_FLG_LT_PROCLOCK)
-
-#define ERTS_LC_FLG_LO_ALL (ERTS_LC_FLG_LO_READ \
- | ERTS_LC_FLG_LO_WRITE)
-
-
-#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), (X)}
+#define ERTS_LC_LOCK_INIT(ID, X, F) {ERTS_LC_INITITALIZED, (ID), (F), 0, (X)}
void erts_lc_init(void);
void erts_lc_late_init(void);
@@ -83,31 +63,31 @@ void erts_lc_check(erts_lc_lock_t *have, int have_len,
void erts_lc_check_exact(erts_lc_lock_t *have, int have_len);
void erts_lc_have_locks(int *resv, erts_lc_lock_t *lcks, int len);
void erts_lc_have_lock_ids(int *resv, int *ids, int len);
-void erts_lc_check_no_locked_of_type(Uint16 flags);
-int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_check_no_locked_of_type(erts_lock_flags_t flags);
+int erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_lock_flg_x(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
-void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
+void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
int erts_lc_trylock_force_busy(erts_lc_lock_t *lck);
void erts_lc_trylock_x(int locked, erts_lc_lock_t *lck,
char* file, unsigned int line);
void erts_lc_lock_x(erts_lc_lock_t *lck, char* file, unsigned int line);
void erts_lc_unlock(erts_lc_lock_t *lck);
void erts_lc_might_unlock(erts_lc_lock_t *lck);
-void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, Uint16 flags);
-void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, Uint16 flags, Eterm extra);
+void erts_lc_init_lock(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags);
+void erts_lc_init_lock_x(erts_lc_lock_t *lck, char *name, erts_lock_flags_t flags, Eterm extra);
void erts_lc_destroy_lock(erts_lc_lock_t *lck);
void erts_lc_fail(char *fmt, ...);
int erts_lc_assert_failed(char *file, int line, char *assertion);
void erts_lc_set_thread_name(char *thread_name);
void erts_lc_pll(void);
-void erts_lc_require_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags,
+void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line);
-void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
+void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options);
void erts_lc_require_lock(erts_lc_lock_t *lck, char *file, unsigned int line);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 678bc43f04..d2e8f47d59 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -18,51 +18,37 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- */
-
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-/* Needed for VxWorks va_arg */
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
#include "sys.h"
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "global.h"
#include "erl_lock_count.h"
-#include "ethread.h"
-#include "erl_term.h"
-#include "atom.h"
-#include <stdio.h>
-
-/* globals, dont access these without locks or blocks */
+#include "erl_thr_progress.h"
-ethr_mutex lcnt_data_lock;
-erts_lcnt_data_t *erts_lcnt_data;
-Uint16 erts_lcnt_rt_options;
-erts_lcnt_time_t timer_start;
-const char *str_undefined = "undefined";
+#include "erl_node_tables.h"
+#include "erl_alloc_util.h"
+#include "erl_check_io.h"
+#include "erl_poll.h"
+#include "erl_db.h"
-static ethr_tsd_key lcnt_thr_data_key;
-static int lcnt_n_thr;
-static erts_lcnt_thread_data_t *lcnt_thread_data[2048];
+#define LCNT_MAX_CARRIER_ENTRIES 255
-/* local functions */
+/* - Locals that are shared with the header implementation - */
-static ERTS_INLINE void lcnt_lock(void) {
- ethr_mutex_lock(&lcnt_data_lock);
-}
+#ifdef DEBUG
+int lcnt_initialization_completed__;
+#endif
-static ERTS_INLINE void lcnt_unlock(void) {
- ethr_mutex_unlock(&lcnt_data_lock);
-}
+erts_lock_flags_t lcnt_category_mask__;
+ethr_tsd_key lcnt_thr_data_key__;
-const int log2_tab64[64] = {
+const int lcnt_log2_tab64__[64] = {
63, 0, 58, 1, 59, 47, 53, 2,
60, 39, 48, 27, 54, 33, 42, 3,
61, 51, 37, 40, 49, 18, 28, 20,
@@ -72,635 +58,624 @@ const int log2_tab64[64] = {
56, 45, 25, 31, 35, 16, 9, 12,
44, 24, 15, 8, 23, 7, 6, 5};
-static ERTS_INLINE int lcnt_log2(Uint64 v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v |= v >> 32;
- return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
-}
-
-static char* lcnt_lock_type(Uint16 flag) {
- switch(flag & ERTS_LCNT_LT_ALL) {
- case ERTS_LCNT_LT_SPINLOCK: return "spinlock";
- case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock";
- case ERTS_LCNT_LT_MUTEX: return "mutex";
- case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex";
- case ERTS_LCNT_LT_PROCLOCK: return "proclock";
- default: return "";
- }
-}
+/* - Local variables - */
-static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
- ethr_atomic_set(&stats->tries, 0);
- ethr_atomic_set(&stats->colls, 0);
- stats->timer.s = 0;
- stats->timer.ns = 0;
- stats->timer_n = 0;
- stats->file = (char *)str_undefined;
- stats->line = 0;
- sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
-}
+typedef struct lcnt_static_lock_ref_ {
+ erts_lcnt_ref_t *reference;
-static void lcnt_time(erts_lcnt_time_t *time) {
- /*
- * erts_sys_hrtime() is the highest resolution
- * we could find, it may or may not be monotonic...
- */
- ErtsMonotonicTime mtime = erts_sys_hrtime();
- time->s = (unsigned long) (mtime / 1000000000LL);
- time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
-}
+ erts_lock_flags_t flags;
+ const char *name;
+ Eterm id;
-static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
- long ds;
- long dns;
+ struct lcnt_static_lock_ref_ *next;
+} lcnt_static_lock_ref_t;
- ds = t1->s - t0->s;
- dns = t1->ns - t0->ns;
+static ethr_atomic_t lcnt_static_lock_registry;
- /* the difference should not be able to get bigger than 1 sec in ns*/
+static erts_lcnt_lock_info_list_t lcnt_current_lock_list;
+static erts_lcnt_lock_info_list_t lcnt_deleted_lock_list;
- if (dns < 0) {
- ds -= 1;
- dns += 1000000000LL;
- }
+static erts_lcnt_time_t lcnt_timer_start;
- ASSERT(ds >= 0);
+static int lcnt_preserve_info;
- d->s = ds;
- d->ns = dns;
-}
+/* local functions */
+
+static void lcnt_clear_stats(erts_lcnt_lock_info_t *info) {
+ size_t i;
+
+ for(i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
-/* difference d must be non-negative */
+ sys_memzero(&stats->wait_time_histogram, sizeof(stats->wait_time_histogram));
-static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- t->s += d->s;
- t->ns += d->ns;
+ stats->total_time_waited.s = 0;
+ stats->total_time_waited.ns = 0;
- t->s += t->ns / 1000000000LL;
- t->ns = t->ns % 1000000000LL;
+ stats->times_waited = 0;
+
+ stats->file = NULL;
+ stats->line = 0;
+
+ ethr_atomic_set(&stats->attempts, 0);
+ ethr_atomic_set(&stats->collisions, 0);
+ }
+
+ info->location_count = 1;
}
-static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
- erts_lcnt_thread_data_t *eltd;
+static lcnt_thread_data_t__ *lcnt_thread_data_alloc(void) {
+ lcnt_thread_data_t__ *eltd =
+ (lcnt_thread_data_t__*)malloc(sizeof(lcnt_thread_data_t__));
- eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
- if (!eltd) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ if(!eltd) {
+ ERTS_INTERNAL_ERROR("Failed to allocate lcnt thread data.");
}
+
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
- eltd->id = lcnt_n_thr++;
- /* set thread data to array */
- lcnt_thread_data[eltd->id] = eltd;
-
return eltd;
}
-static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) {
- return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key);
-}
+/* - List operations -
+ *
+ * Info entries are kept in a doubly linked list where each entry is locked
+ * with its neighbors rather than a global lock. Deletion is rather quick, but
+ * insertion is still serial since the head becomes a de facto global lock.
+ *
+ * We rely on ad-hoc spinlocks to avoid "recursing" into this module. */
-/* debug */
+#define LCNT_SPINLOCK_YIELD_ITERATIONS 50
-#if 0
-static char* lock_opt(Uint16 flag) {
- if ((flag & ERTS_LCNT_LO_WRITE) && (flag & ERTS_LCNT_LO_READ)) return "rw";
- if (flag & ERTS_LCNT_LO_READ ) return "r ";
- if (flag & ERTS_LCNT_LO_WRITE) return " w";
- return "--";
-}
+#define LCNT_SPINLOCK_HELPER_INIT \
+ Uint failed_spin_count = 0;
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
- erts_aint_t w_state, r_state;
- char *type;
+#define LCNT_SPINLOCK_HELPER_YIELD \
+ do { \
+ failed_spin_count++; \
+ if(!(failed_spin_count % LCNT_SPINLOCK_YIELD_ITERATIONS)) { \
+ erts_thr_yield(); \
+ } else { \
+ ERTS_SPIN_BODY; \
+ } \
+ } while(0)
- if (strcmp(lock->name, "run_queue") != 0) return;
- type = lcnt_lock_type(lock->flag);
- r_state = ethr_atomic_read(&lock->r_state);
- w_state = ethr_atomic_read(&lock->w_state);
+static void lcnt_unlock_list_entry(erts_lcnt_lock_info_t *info) {
+ ethr_atomic32_set_relb(&info->lock, 0);
+}
- if (lock->flag & flag) {
- erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- type,
- lock->id);
- }
+static int lcnt_try_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ return ethr_atomic32_cmpxchg_acqb(&info->lock, 1, 0) == 0;
}
-#endif
-static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- unsigned int i;
- erts_lcnt_lock_stats_t *stats = NULL;
+static void lcnt_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
- }
+ while(!lcnt_try_lock_list_entry(info)) {
+ LCNT_SPINLOCK_HELPER_YIELD;
}
- return &lock->stats[0];
}
-static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
- int idx;
- unsigned long r;
+static void lcnt_lock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
- idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
- } else {
- r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
- if (r) idx = lcnt_log2(r);
- else idx = 0;
+ for(;;) {
+ if(!lcnt_try_lock_list_entry(info))
+ goto retry_after_entry_failed;
+ if(!lcnt_try_lock_list_entry(info->next))
+ goto retry_after_next_failed;
+ if(!lcnt_try_lock_list_entry(info->prev))
+ goto retry_after_prev_failed;
+
+ return;
+
+ retry_after_prev_failed:
+ lcnt_unlock_list_entry(info->next);
+ retry_after_next_failed:
+ lcnt_unlock_list_entry(info);
+ retry_after_entry_failed:
+ LCNT_SPINLOCK_HELPER_YIELD;
}
- hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
- erts_lcnt_time_t *time_wait) {
+static void lcnt_unlock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ lcnt_unlock_list_entry(info->prev);
+ lcnt_unlock_list_entry(info->next);
+ lcnt_unlock_list_entry(info);
+}
- ethr_atomic_inc(&stats->tries);
+static void lcnt_insert_list_entry(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t *info) {
+ erts_lcnt_lock_info_t *next, *prev;
- if (lock_in_conflict)
- ethr_atomic_inc(&stats->colls);
+ prev = &list->head;
- if (time_wait) {
- lcnt_time_add(&(stats->timer), time_wait);
- stats->timer_n++;
- lcnt_update_stats_hist(&stats->hist,time_wait);
- }
-}
+ lcnt_lock_list_entry(prev);
-/* interface */
+ next = prev->next;
-void erts_lcnt_init() {
- erts_lcnt_thread_data_t *eltd = NULL;
+ lcnt_lock_list_entry(next);
- /* init lock */
- if (ethr_mutex_init(&lcnt_data_lock) != 0) abort();
+ info->next = next;
+ info->prev = prev;
- /* init tsd */
- lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key, "lcnt_data");
+ prev->next = info;
+ next->prev = info;
- lcnt_lock();
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
+}
+
+static void lcnt_insert_list_carrier(erts_lcnt_lock_info_list_t *list,
+ erts_lcnt_lock_info_carrier_t *carrier) {
+ erts_lcnt_lock_info_t *next, *prev;
+ size_t i;
- erts_lcnt_rt_options = ERTS_LCNT_OPT_LOCATION | ERTS_LCNT_OPT_PROCLOCK;
- eltd = lcnt_thread_data_alloc();
- ethr_tsd_set(lcnt_thr_data_key, eltd);
+ for(i = 0; i < carrier->entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[i];
- /* init lcnt structure */
- erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
- if (!erts_lcnt_data) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ info->prev = &carrier->entries[i - 1];
+ info->next = &carrier->entries[i + 1];
}
- erts_lcnt_data->current_locks = erts_lcnt_list_init();
- erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
- lcnt_unlock();
+ prev = &list->head;
+
+ lcnt_lock_list_entry(prev);
+
+ next = prev->next;
+
+ lcnt_lock_list_entry(next);
+
+ next->prev = &carrier->entries[carrier->entry_count - 1];
+ carrier->entries[carrier->entry_count - 1].next = next;
+ prev->next = &carrier->entries[0];
+ carrier->entries[0].prev = prev;
+
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
}
-void erts_lcnt_late_init() {
- /* set start timer and zero statistics */
- erts_lcnt_clear_counters();
- erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
+static void lcnt_init_list(erts_lcnt_lock_info_list_t *list) {
+ /* Ensure that ref_count operations explode when touching the sentinels in
+ * DEBUG mode. */
+ ethr_atomic_init(&(list->head.ref_count), -1);
+ ethr_atomic_init(&(list->tail.ref_count), -1);
+
+ ethr_atomic32_init(&(list->head.lock), 0);
+ (list->head).next = &list->tail;
+ (list->head).prev = &list->tail;
+
+ ethr_atomic32_init(&(list->tail.lock), 0);
+ (list->tail).next = &list->head;
+ (list->tail).prev = &list->head;
}
-/* list operations */
+/* - Carrier operations - */
-/* BEGIN ASSUMPTION: lcnt_data_lock taken */
+int lcnt_thr_progress_unmanaged_delay__(void) {
+ return erts_thr_progress_unmanaged_delay();
+}
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
- erts_lcnt_lock_list_t *list;
+void lcnt_thr_progress_unmanaged_continue__(int handle) {
+ return erts_thr_progress_unmanaged_continue(handle);
+}
- list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
- if (!list) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
- return list;
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == 0);
+ erts_free(ERTS_ALC_T_LCNT_CARRIER, (void*)carrier);
}
-static void lcnt_list_free(erts_lcnt_lock_t *head) {
- erts_lcnt_lock_t *lock, *next;
+static void lcnt_thr_prg_cleanup_carrier(void *data) {
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t entry_count, i;
+
+ /* carrier->entry_count will be replaced with garbage if it's deallocated
+ * on the final iteration, so we'll tuck it away to get a clean exit. */
+ entry_count = carrier->entry_count;
- lock = head;
+ for(i = 0; i < entry_count; i++) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) >= (entry_count - i));
- while(lock != NULL) {
- next = lock->next;
- free(lock);
- lock = next;
+ erts_lcnt_release_lock_info(&carrier->entries[i]);
}
}
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_t *tail = NULL;
+static void lcnt_schedule_carrier_cleanup(void *data) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* We can't issue cleanup jobs on anything other than normal schedulers, so
+ * we move to the first scheduler if required. */
- tail = list->tail;
- if (tail) {
- tail->next = lock;
- lock->prev = tail;
+ if(!esdp || esdp->type != ERTS_SCHED_NORMAL) {
+ erts_schedule_misc_aux_work(1, &lcnt_schedule_carrier_cleanup, data);
} else {
- list->head = lock;
- lock->prev = NULL;
- ASSERT(!lock->next);
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t carrier_size;
+
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * carrier->entry_count;
+
+ erts_schedule_thr_prgr_later_cleanup_op(&lcnt_thr_prg_cleanup_carrier,
+ data, (ErtsThrPrgrLaterOp*)&carrier->release_entries, carrier_size);
}
- lock->next = NULL;
- list->tail = lock;
+}
- list->n++;
+static void lcnt_info_deallocate(erts_lcnt_lock_info_t *info) {
+ lcnt_release_carrier__(info->carrier);
}
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- if (lock->next) lock->next->prev = lock->prev;
- if (lock->prev) lock->prev->next = lock->next;
- if (list->head == lock) list->head = lock->next;
- if (list->tail == lock) list->tail = lock->prev;
+static void lcnt_info_dispose(erts_lcnt_lock_info_t *info) {
+ ASSERT(ethr_atomic_read(&info->ref_count) == 0);
+
+ if(lcnt_preserve_info) {
+ ethr_atomic_set(&info->ref_count, 1);
+
+ /* Move straight to deallocation the next time around. */
+ info->dispose = &lcnt_info_deallocate;
- lock->prev = NULL;
- lock->next = NULL;
- list->n--;
+ lcnt_insert_list_entry(&lcnt_deleted_lock_list, info);
+ } else {
+ lcnt_info_deallocate(info);
+ }
}
-/* END ASSUMPTION: lcnt_data_lock taken */
+static void lcnt_lock_info_init_helper(erts_lcnt_lock_info_t *info) {
+ ethr_atomic_init(&info->ref_count, 1);
+ ethr_atomic32_init(&info->lock, 0);
+
+ ethr_atomic_init(&info->r_state, 0);
+ ethr_atomic_init(&info->w_state, 0);
-/* lock operations */
+ info->dispose = &lcnt_info_dispose;
-/* interface to erl_threads.h */
-/* only lock on init and destroy, all others should use atomics */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, NIL);
+ lcnt_clear_stats(info);
}
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
- int i;
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int entry_count) {
+ erts_lcnt_lock_info_carrier_t *result;
+ size_t carrier_size, i;
- if (flag & ERTS_LCNT_LT_DISABLE) {
- ERTS_LCNT_CLEAR_FLAG(lock);
- return;
- }
+ ASSERT(entry_count > 0 && entry_count <= LCNT_MAX_CARRIER_ENTRIES);
+ ASSERT(lcnt_initialization_completed__);
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = flag;
- lock->name = name;
- lock->id = id;
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * entry_count;
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
+ result = (erts_lcnt_lock_info_carrier_t*)erts_alloc(ERTS_ALC_T_LCNT_CARRIER, carrier_size);
+ result->entry_count = entry_count;
- lock->n_stats = 1;
+ ethr_atomic_init(&result->ref_count, entry_count);
- for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- lcnt_clear_stats(&lock->stats[i]);
- }
+ for(i = 0; i < entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &result->entries[i];
- lcnt_lock();
- erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
- lcnt_unlock();
-}
+ lcnt_lock_info_init_helper(info);
-/* init empty, instead of zero struct
- * used by process locks probes
- */
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock) {
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = 0;
- lock->name = NULL;
- lock->id = NIL;
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
- lock->n_stats = 0;
- sys_memzero(lock->stats, sizeof(lock->stats));
-}
-/* destroy lock */
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- lcnt_lock();
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
- erts_lcnt_lock_t *deleted_lock;
- /* copy structure and insert the copy */
- deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
- if (!deleted_lock) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
- deleted_lock->next = NULL;
- deleted_lock->prev = NULL;
- erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
+ info->carrier = result;
}
- /* delete original */
- erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
- ERTS_LCNT_CLEAR_FLAG(lock);
- lcnt_unlock();
+ return result;
}
-/* lock */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t swapped_carrier;
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- erts_aint_t r_state = 0, w_state = 0;
- erts_lcnt_thread_data_t *eltd;
+#ifdef DEBUG
+ int i;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ /* Verify that all locks share the same categories/static property; all
+ * other flags are fair game. */
+ for(i = 1; i < carrier->entry_count; i++) {
+ const erts_lock_flags_t SIGNIFICANT_DIFF_MASK =
+ ERTS_LOCK_FLAGS_MASK_CATEGORY | ERTS_LOCK_FLAGS_PROPERTY_STATIC;
- eltd = lcnt_get_thread_data();
- ASSERT(eltd);
+ erts_lcnt_lock_info_t *previous, *current;
- w_state = ethr_atomic_read(&lock->w_state);
+ previous = &carrier->entries[i - 1];
+ current = &carrier->entries[i];
- if (option & ERTS_LCNT_LO_WRITE) {
- r_state = ethr_atomic_read(&lock->r_state);
- ethr_atomic_inc( &lock->w_state);
- }
- if (option & ERTS_LCNT_LO_READ) {
- ethr_atomic_inc( &lock->r_state);
+ ASSERT(!((previous->flags ^ current->flags) & SIGNIFICANT_DIFF_MASK));
}
+#endif
- /* we cannot acquire w_lock if either w or r are taken */
- /* we cannot acquire r_lock if w_lock is taken */
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)carrier, (ethr_sint_t)NULL);
- if ((w_state > 0) || (r_state > 0)) {
- eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
- }
- eltd->timer_set++;
+ if(swapped_carrier != (ethr_sint_t)NULL) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == carrier->entry_count);
+ ethr_atomic_set(&carrier->ref_count, 0);
+#endif
+
+ lcnt_deallocate_carrier__(carrier);
} else {
- eltd->lock_in_conflict = 0;
+ lcnt_insert_list_carrier(&lcnt_current_lock_list, carrier);
}
}
-void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
- erts_aint_t w_state;
- erts_lcnt_thread_data_t *eltd;
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref) {
+ ethr_sint_t previous_carrier, swapped_carrier;
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ previous_carrier = ethr_atomic_read(ref);
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)NULL, previous_carrier);
- w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc(&lock->w_state);
- eltd = lcnt_get_thread_data();
+ if(previous_carrier && previous_carrier == swapped_carrier) {
+ lcnt_schedule_carrier_cleanup((void*)previous_carrier);
+ }
+}
- ASSERT(eltd);
+/* - Static lock registry -
+ *
+ * Since static locks can be trusted to never disappear, we can track them
+ * pretty cheaply and won't need to bother writing an "erts_lcnt_update_xx"
+ * variant. */
+
+static void lcnt_init_static_lock_registry(void) {
+ ethr_atomic_init(&lcnt_static_lock_registry, (ethr_sint_t)NULL);
+}
+
+static void lcnt_update_static_locks(void) {
+ lcnt_static_lock_ref_t *iterator =
+ (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
+
+ while(iterator != NULL) {
+ if(!erts_lcnt_check_enabled(iterator->flags)) {
+ erts_lcnt_uninstall(iterator->reference);
+ } else if(!erts_lcnt_check_ref_installed(iterator->reference)) {
+ erts_lcnt_lock_info_carrier_t *carrier = erts_lcnt_create_lock_info_carrier(1);
+
+ erts_lcnt_init_lock_info_idx(carrier, 0, iterator->name, iterator->id, iterator->flags);
- if (w_state > 0) {
- eltd->lock_in_conflict = 1;
- /* only set the timer if nobody else has it
- * This should only happen when proc_locks aquires several locks
- * 'atomicly'. All other locks will block the thread if w_state > 0
- * i.e. locked.
- */
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
+ erts_lcnt_install(iterator->reference, carrier);
}
- eltd->timer_set++;
- } else {
- eltd->lock_in_conflict = 0;
+
+ iterator = iterator->next;
}
}
-/* if a lock wasn't really a lock operation, bad bad process locks */
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags) {
+ lcnt_static_lock_ref_t *lock = malloc(sizeof(lcnt_static_lock_ref_t));
+ int retry_insertion;
+
+ ASSERT(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC);
+
+ lock->reference = reference;
+ lock->flags = flags;
+ lock->name = name;
+ lock->id = id;
+
+ do {
+ ethr_sint_t swapped_head;
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
- /* should check if this thread was "waiting" */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ lock->next = (lcnt_static_lock_ref_t*)ethr_atomic_read(&lcnt_static_lock_registry);
- ethr_atomic_dec(&lock->w_state);
+ swapped_head = ethr_atomic_cmpxchg_acqb(
+ &lcnt_static_lock_registry,
+ (ethr_sint_t)lock,
+ (ethr_sint_t)lock->next);
+
+ retry_insertion = (swapped_head != (ethr_sint_t)lock->next);
+ } while(retry_insertion);
}
-/*
- * erts_lcnt_lock_post
- *
- * Used when we get a lock (i.e. directly after a lock operation)
- * if the timer was set then we had to wait for the lock
- * lock_post will calculate the wait time.
- */
+/* - Initialization - */
+
+void erts_lcnt_pre_thr_init() {
+ /* Ensure that the dependency hack mentioned in the header doesn't
+ * explode at runtime. */
+ ERTS_CT_ASSERT(sizeof(LcntThrPrgrLaterOp) >= sizeof(ErtsThrPrgrLaterOp));
+ ERTS_CT_ASSERT(ERTS_THR_PRGR_DHANDLE_MANAGED ==
+ (ErtsThrPrgrDelayHandle)LCNT_THR_PRGR_DHANDLE_MANAGED);
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_post_x(lock, (char*)str_undefined, 0);
+ lcnt_init_list(&lcnt_current_lock_list);
+ lcnt_init_list(&lcnt_deleted_lock_list);
+
+ lcnt_init_static_lock_registry();
}
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- erts_lcnt_thread_data_t *eltd;
- erts_lcnt_time_t timer;
- erts_lcnt_time_t time_wait;
- erts_lcnt_lock_stats_t *stats;
-#ifdef DEBUG
- erts_aint_t flowstate;
-#endif
+void erts_lcnt_post_thr_init() {
+ /* ASSUMPTION: this is safe since it runs prior to the creation of other
+ * threads (Directly after ethread init). */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ ethr_tsd_key_create(&lcnt_thr_data_key__, "lcnt_data");
+
+ erts_lcnt_thread_setup();
+}
+
+void erts_lcnt_late_init() {
+ /* Set start timer and zero all statistics */
+ erts_lcnt_clear_counters();
+ erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
#ifdef DEBUG
- if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc(&lock->flowstate);
- }
+ /* It's safe to use erts_alloc and thread progress past this point. */
+ lcnt_initialization_completed__ = 1;
#endif
+}
- eltd = lcnt_get_thread_data();
-
- ASSERT(eltd);
+void erts_lcnt_post_startup(void) {
+ /* Default to capturing everything to match the behavior of the old lock
+ * counter build. */
+ erts_lcnt_set_category_mask(ERTS_LOCK_FLAGS_MASK_CATEGORY);
+}
- /* if lock was in conflict, time it */
- stats = lcnt_get_lock_stats(lock, file, line);
- if (eltd->timer_set) {
- lcnt_time(&timer);
+void erts_lcnt_thread_setup() {
+ lcnt_thread_data_t__ *eltd = lcnt_thread_data_alloc();
- lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
- lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
- eltd->timer_set--;
- ASSERT(eltd->timer_set >= 0);
- } else {
- lcnt_update_stats(stats, eltd->lock_in_conflict, NULL);
- }
+ ASSERT(eltd);
+ ethr_tsd_set(lcnt_thr_data_key__, eltd);
}
-/* unlock */
+void erts_lcnt_thread_exit_handler() {
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state);
+ if (eltd) {
+ free(eltd);
+ }
}
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+/* - BIF interface - */
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info) {
#ifdef DEBUG
- {
- erts_aint_t w_state;
- erts_aint_t flowstate;
-
- /* flowstate */
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 1);
- ethr_atomic_dec(&lock->flowstate);
-
- /* write state */
- w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0);
- }
+ ASSERT(ethr_atomic_inc_read_acqb(&info->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&info->ref_count);
#endif
- ethr_atomic_dec(&lock->w_state);
}
-/* trylock */
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info) {
+ ethr_sint_t count;
+
+ /* We need to acquire the lock before decrementing ref_count to avoid
+ * racing with list iteration; there's a short window between reading the
+ * reference to info and increasing its ref_count. */
+ lcnt_lock_list_entry_with_neighbors(info);
+
+ count = ethr_atomic_dec_read(&info->ref_count);
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- /* Determine lock_state via res instead of state */
- if (res != EBUSY) {
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
+ ASSERT(count >= 0);
+
+ if(count > 0) {
+ lcnt_unlock_list_entry_with_neighbors(info);
} else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+ (info->next)->prev = info->prev;
+ (info->prev)->next = info->next;
+
+ lcnt_unlock_list_entry_with_neighbors(info);
+
+ info->dispose(info);
}
}
+erts_lock_flags_t erts_lcnt_get_category_mask() {
+ return lcnt_category_mask__;
+}
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
- /* Determine lock_state via res instead of state */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (res != EBUSY) {
-#ifdef DEBUG
- {
- erts_aint_t flowstate;
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
- }
+#ifdef ERTS_ENABLE_KERNEL_POLL
+/* erl_poll/erl_check_io only exports one of these variants at a time, and we
+ * may need to use either one depending on emulator startup flags. */
+void erts_lcnt_update_pollset_locks_nkp(int);
+void erts_lcnt_update_pollset_locks_kp(int);
+
+void erts_lcnt_update_cio_locks_nkp(int);
+void erts_lcnt_update_cio_locks_kp(int);
#endif
- ethr_atomic_inc(&lock->w_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
- } else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask) {
+ erts_lock_flags_t changed_categories;
+
+ ASSERT(!(mask & ~ERTS_LOCK_FLAGS_MASK_CATEGORY));
+ ASSERT(lcnt_initialization_completed__);
+
+ changed_categories = (lcnt_category_mask__ ^ mask);
+ lcnt_category_mask__ = mask;
+
+ if(changed_categories) {
+ lcnt_update_static_locks();
}
-}
-/* thread operations */
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION) {
+ erts_lcnt_update_distribution_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ }
-void erts_lcnt_thread_setup(void) {
- erts_lcnt_thread_data_t *eltd;
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ erts_lcnt_update_allocator_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR);
+ }
- lcnt_lock();
- /* lock for thread id global update */
- eltd = lcnt_thread_data_alloc();
- lcnt_unlock();
- ASSERT(eltd);
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-}
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_PROCESS) {
+ erts_lcnt_update_process_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
+ }
-void erts_lcnt_thread_exit_handler() {
- erts_lcnt_thread_data_t *eltd;
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_IO) {
+#ifdef ERTS_ENABLE_KERNEL_POLL
+ if(erts_use_kernel_poll) {
+ erts_lcnt_update_pollset_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_update_pollset_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+#else
+ erts_lcnt_update_pollset_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_cio_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+#endif
- eltd = ethr_tsd_get(lcnt_thr_data_key);
+ erts_lcnt_update_driver_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ erts_lcnt_update_port_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
- if (eltd) {
- free(eltd);
+ if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_DB) {
+ erts_lcnt_update_db_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_DB);
}
}
-/* bindings for bifs */
-
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options |= opt;
- return prev;
+void erts_lcnt_set_preserve_info(int enable) {
+ lcnt_preserve_info = enable;
}
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
- Uint16 prev;
- prev = (erts_lcnt_rt_options & opt);
- erts_lcnt_rt_options &= ~opt;
- return prev;
+int erts_lcnt_get_preserve_info() {
+ return lcnt_preserve_info;
}
void erts_lcnt_clear_counters(void) {
- erts_lcnt_lock_t *lock;
- erts_lcnt_lock_list_t *list;
- erts_lcnt_lock_stats_t *stats;
- int i;
+ erts_lcnt_lock_info_t *iterator;
- lcnt_lock();
+ lcnt_time__(&lcnt_timer_start);
- list = erts_lcnt_data->current_locks;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_current_lock_list, &iterator)) {
+ lcnt_clear_stats(iterator);
+ }
- for (lock = list->head; lock != NULL; lock = lock->next) {
- for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- stats = &lock->stats[i];
- lcnt_clear_stats(stats);
- }
- lock->n_stats = 1;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_deleted_lock_list, &iterator)) {
+ erts_lcnt_release_lock_info(iterator);
}
+}
- lock = erts_lcnt_data->deleted_locks->head;
- erts_lcnt_data->deleted_locks->head = NULL;
- erts_lcnt_data->deleted_locks->tail = NULL;
- erts_lcnt_data->deleted_locks->n = 0;
+erts_lcnt_data_t erts_lcnt_get_data(void) {
+ erts_lcnt_time_t timer_stop;
+ erts_lcnt_data_t result;
- lcnt_time(&timer_start);
+ lcnt_time__(&timer_stop);
- lcnt_unlock();
+ result.timer_start = lcnt_timer_start;
- /* free deleted locks */
- lcnt_list_free(lock);
+ result.current_locks = &lcnt_current_lock_list;
+ result.deleted_locks = &lcnt_deleted_lock_list;
+
+ lcnt_time_diff__(&result.duration, &timer_stop, &result.timer_start);
+
+ return result;
}
-erts_lcnt_data_t *erts_lcnt_get_data(void) {
- erts_lcnt_time_t timer_stop;
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator) {
+ erts_lcnt_lock_info_t *current, *next;
- lcnt_lock();
+ current = *iterator ? *iterator : &list->head;
- lcnt_time(&timer_stop);
- lcnt_time_diff(&(erts_lcnt_data->duration), &timer_stop, &timer_start);
+ ASSERT(current != &list->tail);
- lcnt_unlock();
+ lcnt_lock_list_entry(current);
- return erts_lcnt_data;
-}
+ next = current->next;
+
+ if(next != &list->tail) {
+ erts_lcnt_retain_lock_info(next);
+ }
+
+ lcnt_unlock_list_entry(current);
+
+ if(current != &list->head) {
+ erts_lcnt_release_lock_info(current);
+ }
+
+ *iterator = next;
-char *erts_lcnt_lock_type(Uint16 type) {
- return lcnt_lock_type(type);
+ return next != &list->tail;
}
-#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+#endif /* #ifdef ERTS_ENABLE_LOCK_COUNT */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 6caffbfe86..89d95a73cf 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -18,64 +18,51 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- * Abstract:
- * Locks statistics internal representation.
- *
- * Conceptual representation,
- * - set name
- * | - id (the unique lock)
- * | | - lock type
- * | | - statistics
- * | | | - location (file and line number)
- * | | | - tries
- * | | | - collisions (including trylock busy)
- * | | | - timer (time spent in waiting for lock)
- * | | | - n_timer (collisions excluding trylock busy)
- * | | | - histogram
- * | | | | - # 0 = log2(lock wait_time ns)
- * | | | | - ...
- * | | | | - # n = log2(lock wait_time ns)
- *
- * Each instance of a lock is the unique lock, i.e. set and id in that set.
- * For each lock there is a set of statistics with where and what impact
- * the lock aqusition had.
- *
- * Runtime options
- * - suspend, used when internal lock-counting can't be applied. For instance
- * when allocating a term for the outside and halloc needs to be used.
- * Default: off.
- * - location, reserved and not used.
- * - proclock, disable proclock counting. Used when performance might be an
- * issue. Accessible from erts_debug:lock_counters({process_locks, bool()}).
- * Default: off.
- * - copysave, enable saving of destroyed locks (and thereby its statistics).
- * If memory constraints is an issue this need to be disabled.
- * Accessible from erts_debug:lock_counters({copy_save, bool()}).
- * Default: off.
+/**
+ * @description Statistics for locks.
+ * @file erl_lock_count.h
+ *
+ * @author Björn-Egil Dahlberg
+ * @author John Högberg
+ *
+ * Conceptual representation:
*
+ * - set name
+ * | - id (the unique lock)
+ * | | - lock type
+ * | | - statistics
+ * | | | - location (file and line number)
+ * | | | - attempts
+ * | | | - collisions (including trylock busy)
+ * | | | - timer (time spent in waiting for lock)
+ * | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
+ *
+ * Each instance of a lock is the unique lock, i.e. set and id in that set.
+ * For each lock there is a set of statistics with where and what impact
+ * the lock acquisition had.
*/
-#include "sys.h"
-
#ifndef ERTS_LOCK_COUNT_H__
#define ERTS_LOCK_COUNT_H__
#ifdef ERTS_ENABLE_LOCK_COUNT
#ifndef ERTS_ENABLE_LOCK_POSITION
-/* Enable in order for _x variants of mtx functions to be used. */
+/** @brief Controls whether _x variants of mtx functions are used. */
#define ERTS_ENABLE_LOCK_POSITION 1
#endif
+#include "sys.h"
#include "ethread.h"
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+#include "erl_term.h"
+#include "erl_lock_flags.h"
+
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (5)
-/* histogram */
#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
#if 0 || defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT)
#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
@@ -85,154 +72,857 @@
#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
#endif
-#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
-#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
-#define ERTS_LCNT_LT_MUTEX (((Uint16) 1) << 2)
-#define ERTS_LCNT_LT_RWMUTEX (((Uint16) 1) << 3)
-#define ERTS_LCNT_LT_PROCLOCK (((Uint16) 1) << 4)
-#define ERTS_LCNT_LT_ALLOC (((Uint16) 1) << 5)
+typedef struct {
+ unsigned long s;
+ unsigned long ns;
+} erts_lcnt_time_t;
+
+typedef struct {
+ /* @brief log2 array of nano seconds occurences */
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
+} erts_lcnt_hist_t;
-#define ERTS_LCNT_LO_READ (((Uint16) 1) << 6)
-#define ERTS_LCNT_LO_WRITE (((Uint16) 1) << 7)
+typedef struct {
+ /** @brief In which file the lock was taken. May be NULL. */
+ const char *file;
+ /** @brief Line number in \c file */
+ unsigned int line;
-#define ERTS_LCNT_LT_DISABLE (((Uint16) 1) << 8)
+ /* "attempts" and "collisions" need to be atomic since try_lock busy does
+ * not acquire a lock and there is no post action to rectify the
+ * situation. */
-#define ERTS_LCNT_LO_READ_WRITE ( ERTS_LCNT_LO_READ \
- | ERTS_LCNT_LO_WRITE )
+ ethr_atomic_t attempts;
+ ethr_atomic_t collisions;
-#define ERTS_LCNT_LT_ALL ( ERTS_LCNT_LT_SPINLOCK \
- | ERTS_LCNT_LT_RWSPINLOCK \
- | ERTS_LCNT_LT_MUTEX \
- | ERTS_LCNT_LT_RWMUTEX \
- | ERTS_LCNT_LT_PROCLOCK )
+ erts_lcnt_time_t total_time_waited;
+ Uint64 times_waited;
-#define ERTS_LCNT_LOCK_TYPE(lock) ((lock)->flag & ERTS_LCNT_LT_ALL)
-#define ERTS_LCNT_IS_LOCK_INVALID(lock) (!((lock)->flag & ERTS_LCNT_LT_ALL))
-#define ERTS_LCNT_CLEAR_FLAG(lock) ((lock)->flag = 0)
+ erts_lcnt_hist_t wait_time_histogram;
+} erts_lcnt_lock_stats_t;
-/* runtime options */
+typedef struct lcnt_lock_info_t_ {
+ erts_lock_flags_t flags;
+ const char *name;
+ /** @brief Id if possible, must be an immediate */
+ Eterm id;
-#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0)
-#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1)
-#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2)
-#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 3)
-#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 4)
+ /* The first entry is reserved as a fallback for when location information
+ * is missing, and when the lock is used in more than (MAX_LOCK_LOCATIONS
+ * - 1) different places. */
+ erts_lcnt_lock_stats_t location_stats[ERTS_LCNT_MAX_LOCK_LOCATIONS];
+ unsigned int location_count;
-typedef struct {
- unsigned long s;
- unsigned long ns;
-} erts_lcnt_time_t;
+ /* -- Everything below is internal to this module ---------------------- */
+
+ /* Lock states; rw locks uses both states, other locks only uses w_state */
-extern erts_lcnt_time_t timer_start;
+ /** @brief Write state. 0 = not taken, otherwise n threads waiting */
+ ethr_atomic_t w_state;
+ /** @brief Read state. 0 = not taken, > 0 -> writes will wait */
+ ethr_atomic_t r_state;
+
+ struct lcnt_lock_info_t_ *prev;
+ struct lcnt_lock_info_t_ *next;
+
+ /** @brief Used in place of erts_refc_t to avoid a circular dependency. */
+ ethr_atomic_t ref_count;
+ ethr_atomic32_t lock;
+
+ /** @brief Deletion hook called once \c ref_count reaches 0; may defer
+ * deletion by modifying \c ref_count. */
+ void (*dispose)(struct lcnt_lock_info_t_ *);
+
+ struct lcnt_lock_info_carrier_ *carrier;
+} erts_lcnt_lock_info_t;
+
+typedef struct lcnt_lock_info_list_ {
+ erts_lcnt_lock_info_t head;
+ erts_lcnt_lock_info_t tail;
+} erts_lcnt_lock_info_list_t;
typedef struct {
- Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */
-} erts_lcnt_hist_t;
+ erts_lcnt_time_t timer_start; /**< Time of last clear */
+ erts_lcnt_time_t duration; /**< Time since last clear */
-typedef struct erts_lcnt_lock_stats_s {
- /* "tries" and "colls" needs to be atomic since
- * trylock busy does not acquire a lock and there
- * is no post action to rectify the situation
- */
+ erts_lcnt_lock_info_list_t *current_locks;
+ erts_lcnt_lock_info_list_t *deleted_locks;
+} erts_lcnt_data_t;
- char *file; /* which file the lock was taken */
- unsigned int line; /* line number in file */
+typedef struct lcnt_lock_info_carrier_ erts_lcnt_lock_info_carrier_t;
- ethr_atomic_t tries; /* n tries to get lock */
- ethr_atomic_t colls; /* n collisions of tries to get lock */
+typedef ethr_atomic_t erts_lcnt_ref_t;
- unsigned long timer_n; /* #times waited for lock */
- erts_lcnt_time_t timer; /* total wait time for lock */
- erts_lcnt_hist_t hist;
-} erts_lcnt_lock_stats_t;
+/* -- Globals -------------------------------------------------------------- */
+
+/** @brief Checks whether counting is enabled for any of the given
+ * categories. */
+#define erts_lcnt_check_enabled(flags) \
+ (lcnt_category_mask__ & flags)
+
+/* -- Lock operations ------------------------------------------------------
+ *
+ * All of these will nop if there's nothing "installed" on the given reference,
+ * in order to transparently support enable/disable at runtime. */
+
+/** @brief Records that a lock is being acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock
+ * @param option Notes whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Records that a lock has been acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_lock_post.
+ * @param file The name of the file where the lock was acquired.
+ * @param line The line at which the lock was acquired. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line);
+
+/** @brief Records that a lock has been released. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref);
+
+/** @copydoc erts_lcnt_unlock_opt
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option);
+
+/** @brief Rectifies the case where a lock wasn't actually a lock operation.
+ *
+ * Only used for process locks at the moment. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref);
+
+/** @brief Records the result of a trylock, placing the queried lock status in
+ * \c result. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result);
+
+/** @copydoc erts_lcnt_trylock
+ * @param option Whether the lock is a read or write lock. */
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option);
+
+/* Indexed variants of the standard lock operations, for use when a single
+ * reference contains many counters (eg. process locks).
+ *
+ * erts_lcnt_open_ref must be used to safely extract the installed carrier,
+ * which must released with erts_lcnt_close_reference on success.
+ *
+ * Refer to \c erts_lcnt_lock for example usage. */
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line);
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index);
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option);
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result);
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option);
+
+/* -- Reference operations ------------------------------------------------- */
+
+/** @brief Registers a lock counter reference; this must be called prior to
+ * using any other functions in this module. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref);
+
+/** @brief As \c erts_lcnt_init_ref, but also enables lock counting right
+ * away if appropriate to reduce noise.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags);
+
+/** @brief Checks whether counting is enabled on the given reference. */
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref);
+
+/** @brief Convenience macro to re/enable counting on an already initialized
+ * reference. Don't forget to specify the lock type in \c flags! */
+#define erts_lcnt_install_new_lock_info(ref, name, id, flags) \
+ if(!erts_lcnt_check_ref_installed(ref)) { \
+ erts_lcnt_lock_info_carrier_t *__carrier; \
+ __carrier = erts_lcnt_create_lock_info_carrier(1);\
+ erts_lcnt_init_lock_info_idx(__carrier, 0, name, id, flags); \
+ erts_lcnt_install(ref, __carrier);\
+ } while(0)
+
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int count);
+
+/* @brief Initializes the lock info at the given index.
+ * @param id An immediate erlang term with whatever extra data you want to
+ * identify this lock with.
+ * @param flags The flags the lock itself was initialized with. Keep in mind
+ * that all locks in a carrier must share the same category/static property. */
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags);
+
+/** @brief Atomically installs the given lock counters. Nops (and releases the
+ * provided carrier) if something was already installed. */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier);
+
+/** @brief Atomically removes the currently installed lock counters. Nops if
+ * nothing was installed. */
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref);
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result);
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier);
+
+/* -- Module initialization ------------------------------------------------ */
+
+void erts_lcnt_pre_thr_init(void);
+void erts_lcnt_post_thr_init(void);
+void erts_lcnt_late_init(void);
+
+/* @brief Called after everything in the system has been initialized, including
+ * the schedulers. This is mainly a backwards compatibility shim for matching
+ * the old lcnt behavior where all lock counting was enabled by default. */
+void erts_lcnt_post_startup(void);
+
+void erts_lcnt_thread_setup(void);
+void erts_lcnt_thread_exit_handler(void);
+
+/* -- BIF interface -------------------------------------------------------- */
-/* rw locks uses both states, other locks only uses w_state */
-typedef struct erts_lcnt_lock_s {
- char *name; /* lock name */
- Uint16 flag; /* lock type */
- Eterm id; /* id if possible */
+/** @brief Safely iterates through all entries in the given list.
+ *
+ * The referenced item will be valid until the next call to
+ * \c erts_lcnt_iterate_list after which point it may be destroyed; call
+ * erts_lcnt_retain_lock_info if you wish to hang on to it beyond that point.
+ *
+ * Iteration can be cancelled by calling erts_lcnt_release_lock_info on the
+ * iterator and breaking out of the loop.
+ *
+ * @param iterator The iteration variable; set the pointee to NULL to start
+ * iteration.
+ * @return 1 while the iterator is valid, 0 at the end of the list. */
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator);
+
+/** @brief Clears the counter state of all locks, and releases all locks
+ * preserved through erts_lcnt_set_preserve_info (if any). */
+void erts_lcnt_clear_counters(void);
+
+/** @brief Retrieves the global lock counter state.
+ *
+ * Note that the lists may be modified while you're mucking around with them.
+ * Always use \c erts_lcnt_iterate_list to enumerate them. */
+erts_lcnt_data_t erts_lcnt_get_data(void);
+
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info);
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info);
+
+/** @brief Sets whether to preserve the info of destroyed/uninstalled locks.
+ *
+ * This option makes no distinction whether the lock was destroyed or if lock
+ * counting was simply disabled, so erts_lcnt_set_category_mask must not be
+ * used while this option is active. */
+void erts_lcnt_set_preserve_info(int enable);
+
+int erts_lcnt_get_preserve_info(void);
+
+/** @brief Updates the category mask, enabling or disabling counting on the
+ * affected locks as necessary.
+ *
+ * This is not guaranteed to find all existing locks; only those that are
+ * flagged as static locks and those reachable through other means can be
+ * altered. */
+void erts_lcnt_set_category_mask(erts_lock_flags_t mask);
+
+erts_lock_flags_t erts_lcnt_get_category_mask(void);
+
+/* -- Inline implementation ------------------------------------------------ */
+
+/* The following is a hack to get the things we need from erl_thr_progress.h,
+ * which we can't #include without dependency hell breaking loose.
+ *
+ * The size of LcntThrPrgrLaterOp and value of the constant are verified at
+ * compile-time in erts_lcnt_pre_thr_init. */
+
+int lcnt_thr_progress_unmanaged_delay__(void);
+void lcnt_thr_progress_unmanaged_continue__(int handle);
+typedef struct { Uint64 _[4]; } LcntThrPrgrLaterOp;
+#define LCNT_THR_PRGR_DHANDLE_MANAGED -1
+
+struct lcnt_lock_info_carrier_ {
+ ethr_atomic_t ref_count;
+
+ LcntThrPrgrLaterOp release_entries;
+
+ unsigned char entry_count;
+ erts_lcnt_lock_info_t entries[];
+};
+
+typedef struct {
+ erts_lcnt_time_t timer; /* timer */
+ int timer_set; /* bool */
+ int lock_in_conflict; /* bool */
+} lcnt_thread_data_t__;
+
+extern const int lcnt_log2_tab64__[];
+
+extern ethr_tsd_key lcnt_thr_data_key__;
+extern erts_lock_flags_t lcnt_category_mask__;
#ifdef DEBUG
- ethr_atomic_t flowstate;
+extern int lcnt_initialization_completed__;
#endif
- /* lock states */
- ethr_atomic_t w_state; /* 0 not taken, otherwise n threads waiting */
- ethr_atomic_t r_state; /* 0 not taken, > 0 -> writes will wait */
+void lcnt_register_static_lock__(erts_lcnt_ref_t *reference, const char *name, Eterm id,
+ erts_lock_flags_t flags);
- /* statistics */
- unsigned int n_stats;
- erts_lcnt_lock_stats_t stats[ERTS_LCNT_MAX_LOCK_LOCATIONS]; /* first entry is "undefined"*/
+void lcnt_deallocate_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
- /* chains for list handling */
- /* data is hold by lcnt_lock */
- struct erts_lcnt_lock_s *prev;
- struct erts_lcnt_lock_s *next;
-} erts_lcnt_lock_t;
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v);
-typedef struct {
- erts_lcnt_lock_t *head;
- erts_lcnt_lock_t *tail;
- unsigned long n;
-} erts_lcnt_lock_list_t;
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited);
-typedef struct {
- erts_lcnt_time_t duration; /* time since last clear */
- erts_lcnt_lock_list_t *current_locks;
- erts_lcnt_lock_list_t *deleted_locks;
-} erts_lcnt_data_t;
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited);
-typedef struct {
- int id;
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line);
- erts_lcnt_time_t timer; /* timer */
- int timer_set; /* bool */
- int lock_in_conflict; /* bool */
-} erts_lcnt_thread_data_t;
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state);
-/* globals */
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time);
-extern Uint16 erts_lcnt_rt_options;
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d);
-/* function declerations */
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0);
-void erts_lcnt_init(void);
-void erts_lcnt_late_init(void);
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
-/* thread operations */
-void erts_lcnt_thread_setup(void);
-void erts_lcnt_thread_exit_handler(void);
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier);
+
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void lcnt_time__(erts_lcnt_time_t *time) {
+ /*
+ * erts_sys_hrtime() is the highest resolution
+ * we could find, it may or may not be monotonic...
+ */
+ ErtsMonotonicTime mtime = erts_sys_hrtime();
+ time->s = (unsigned long) (mtime / 1000000000LL);
+ time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
+}
+
+/* difference d must be non-negative */
+
+ERTS_GLB_INLINE
+void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
+ t->s += d->s;
+ t->ns += d->ns;
+
+ t->s += t->ns / 1000000000LL;
+ t->ns = t->ns % 1000000000LL;
+}
+
+ERTS_GLB_INLINE
+void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
+ long ds;
+ long dns;
+
+ ds = t1->s - t0->s;
+ dns = t1->ns - t0->ns;
+
+ /* the difference should not be able to get bigger than 1 sec in ns*/
+
+ if (dns < 0) {
+ ds -= 1;
+ dns += 1000000000LL;
+ }
+
+ ASSERT(ds >= 0);
+
+ d->s = ds;
+ d->ns = dns;
+}
+
+ERTS_GLB_INLINE
+int lcnt_log2__(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return lcnt_log2_tab64__[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited) {
+ int idx;
+
+ if(time_waited->s > 0 || time_waited->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ unsigned long r = time_waited->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+
+ idx = r ? lcnt_log2__(r) : 0;
+ }
+
+ hist->ns[idx]++;
+}
+
+ERTS_GLB_INLINE
+void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited) {
+ ethr_atomic_inc(&stats->attempts);
+
+ if(lock_in_conflict) {
+ ethr_atomic_inc(&stats->collisions);
+ }
+
+ if(time_waited) {
+ stats->times_waited++;
+
+ lcnt_time_add__(&stats->total_time_waited, time_waited);
+ lcnt_update_wait_histogram__(&stats->wait_time_histogram, time_waited);
+ }
+}
+
+/* If we were installed while the lock was held, r/w_state will be 0 and we
+ * can't tell which unlock or unacquire operation was the last. To get around
+ * this we assume that all excess operations go *towards* zero rather than down
+ * to zero, eventually becoming consistent with the actual state once the lock
+ * is fully released.
+ *
+ * Conflicts might not be counted until the recorded state is fully consistent
+ * with the actual state, but there should be no other ill effects. */
+
+ERTS_GLB_INLINE
+void lcnt_dec_lock_state__(ethr_atomic_t *l_state) {
+ ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state);
+
+ /* We can not assume that state is >= -1 here; unlock and unacquire might
+ * bring it below -1 and race to increment it back. */
+
+ if(state < 0) {
+ ethr_atomic_inc_acqb(l_state);
+ }
+}
+
+ERTS_GLB_INLINE
+erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line) {
+ unsigned int i;
+
+ ASSERT(info->location_count >= 1 && info->location_count <= ERTS_LCNT_MAX_LOCK_LOCATIONS);
+
+ for(i = 0; i < info->location_count; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
+
+ if(stats->file == file && stats->line == line) {
+ return stats;
+ }
+ }
-/* list operations (local) */
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void);
+ if(info->location_count < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[info->location_count];
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock);
+ stats->file = file;
+ stats->line = line;
-/* lock operations (global) */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag);
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id);
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock);
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock);
+ info->location_count++;
-void erts_lcnt_lock(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option);
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock);
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line);
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock);
+ return stats;
+ }
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock);
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option);
+ return &info->location_stats[0];
+}
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option);
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res);
+ERTS_GLB_INLINE
+lcnt_thread_data_t__ *lcnt_get_thread_data__(void) {
+ lcnt_thread_data_t__ *eltd = (lcnt_thread_data_t__ *)ethr_tsd_get(lcnt_thr_data_key__);
-/* bif interface */
-Uint16 erts_lcnt_set_rt_opt(Uint16 opt);
-Uint16 erts_lcnt_clear_rt_opt(Uint16 opt);
-void erts_lcnt_clear_counters(void);
-char *erts_lcnt_lock_type(Uint16 type);
-erts_lcnt_data_t *erts_lcnt_get_data(void);
+ ASSERT(eltd);
+
+ return eltd;
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result) {
+ if(ERTS_LIKELY(!erts_lcnt_check_ref_installed(ref))) {
+ return 0;
+ }
+
+ ASSERT(lcnt_initialization_completed__);
+
+ (*handle) = lcnt_thr_progress_unmanaged_delay__();
+ (*result) = (erts_lcnt_lock_info_carrier_t*)ethr_atomic_read(ref);
+
+ if(*result) {
+ if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_retain_carrier__(*result);
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 1;
+ } else if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_thr_progress_unmanaged_continue__(*handle);
+ }
+
+ return 0;
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier) {
+ if(handle != LCNT_THR_PRGR_DHANDLE_MANAGED) {
+ lcnt_release_carrier__(carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref(erts_lcnt_ref_t *ref) {
+ ethr_atomic_init(ref, (ethr_sint_t)NULL);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_ref_x(erts_lcnt_ref_t *ref, const char *name,
+ Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_init_ref(ref);
+
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC) {
+ lcnt_register_static_lock__(ref, name, id, flags);
+ }
+
+ if(erts_lcnt_check_enabled(flags)) {
+ erts_lcnt_install_new_lock_info(ref, name, id, flags);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+int erts_lcnt_check_ref_installed(erts_lcnt_ref_t *ref) {
+ return (!!*ethr_atomic_addr(ref));
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_post_x_idx(carrier, 0, file, line);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_lock_unacquire_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock(erts_lcnt_ref_t *ref) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_idx(carrier, 0);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_unlock_opt_idx(carrier, 0, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_idx(carrier, 0, result);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_FORCE_INLINE
+void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(ref, &handle, &carrier)) {
+ erts_lcnt_trylock_opt_idx(carrier, 0, result, option);
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_sint_t w_state, r_state;
+
+ w_state = ethr_atomic_inc_read(&info->w_state) - 1;
+ r_state = ethr_atomic_read(&info->r_state);
+
+ /* We cannot acquire w_lock if either w or r are taken */
+ eltd->lock_in_conflict = (w_state > 0) || (r_state > 0);
+ } else {
+ ethr_sint_t w_state = ethr_atomic_read(&info->w_state);
+
+ /* We cannot acquire r_lock if w_lock is taken */
+ eltd->lock_in_conflict = (w_state > 0);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ if(eltd->lock_in_conflict) {
+ /* Only set the timer if nobody else has it. This should only happen
+ * when proc_locks acquires several locks "atomically." All other locks
+ * will block the thread when locked (w_state > 0) */
+ if(eltd->timer_set == 0) {
+ lcnt_time__(&eltd->timer);
+ }
+
+ eltd->timer_set++;
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_post_x_idx(carrier, index, NULL, 0);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
+ erts_lcnt_lock_stats_t *stats;
+
+ ASSERT(index < carrier->entry_count);
+
+ /* If the lock was in conflict, update the time spent waiting. */
+ stats = lcnt_get_lock_stats__(info, file, line);
+ if(eltd->timer_set) {
+ erts_lcnt_time_t time_wait;
+ erts_lcnt_time_t timer;
+
+ lcnt_time__(&timer);
+
+ lcnt_time_diff__(&time_wait, &timer, &eltd->timer);
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, &time_wait);
+
+ eltd->timer_set--;
+
+ ASSERT(eltd->timer_set >= 0);
+ } else {
+ lcnt_update_stats__(stats, eltd->lock_in_conflict, NULL);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_unlock_opt_idx(carrier, index, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ lcnt_dec_lock_state__(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ lcnt_dec_lock_state__(&info->r_state);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ lcnt_dec_lock_state__(&info->w_state);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result) {
+ ASSERT(index < carrier->entry_count);
+
+ erts_lcnt_trylock_opt_idx(carrier, index, result, ERTS_LOCK_OPTIONS_WRITE);
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, erts_lock_options_t option) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(index < carrier->entry_count);
+
+ ASSERT((option & ERTS_LOCK_OPTIONS_READ) || (option & ERTS_LOCK_OPTIONS_WRITE));
+
+ if(result != EBUSY) {
+ if(option & ERTS_LOCK_OPTIONS_WRITE) {
+ ethr_atomic_inc(&info->w_state);
+ }
+
+ if(option & ERTS_LOCK_OPTIONS_READ) {
+ ASSERT(info->flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE);
+ ethr_atomic_inc(&info->r_state);
+ }
+
+ lcnt_update_stats__(&info->location_stats[0], 0, NULL);
+ } else {
+ ethr_atomic_inc(&info->location_stats[0].attempts);
+ ethr_atomic_inc(&info->location_stats[0].collisions);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_init_lock_info_idx(erts_lcnt_lock_info_carrier_t *carrier, int index,
+ const char *name, Eterm id, erts_lock_flags_t flags) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[index];
+
+ ASSERT(is_immed(id));
+
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_TYPE);
+ ASSERT(flags & ERTS_LOCK_FLAGS_MASK_CATEGORY);
+
+ info->flags = flags;
+ info->name = name;
+ info->id = id;
+}
+
+ERTS_GLB_INLINE
+void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+#ifdef DEBUG
+ ASSERT(ethr_atomic_inc_read_acqb(&carrier->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&carrier->ref_count);
+#endif
+}
+
+ERTS_GLB_INLINE
+void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t count = ethr_atomic_dec_read_relb(&carrier->ref_count);
+
+ ASSERT(count >= 0);
+
+ if(count == 0) {
+ lcnt_deallocate_carrier__(carrier);
+ }
+}
+
+#endif
#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
#endif /* ifndef ERTS_LOCK_COUNT_H__ */
diff --git a/erts/emulator/beam/erl_lock_flags.c b/erts/emulator/beam/erl_lock_flags.c
new file mode 100644
index 0000000000..e0a0e95c09
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.c
@@ -0,0 +1,59 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "erl_lock_flags.h"
+
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags) {
+ switch(flags & ERTS_LOCK_FLAGS_MASK_TYPE) {
+ case ERTS_LOCK_FLAGS_TYPE_PROCLOCK:
+ return "proclock";
+ case ERTS_LOCK_FLAGS_TYPE_MUTEX:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_mutex";
+ }
+
+ return "mutex";
+ case ERTS_LOCK_FLAGS_TYPE_SPINLOCK:
+ if(flags & ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE) {
+ return "rw_spinlock";
+ }
+
+ return "spinlock";
+ default:
+ return "garbage";
+ }
+}
+
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options) {
+ switch(options) {
+ case ERTS_LOCK_OPTIONS_RDWR:
+ return "rw";
+ case ERTS_LOCK_OPTIONS_READ:
+ return "r";
+ case ERTS_LOCK_OPTIONS_WRITE:
+ return "w";
+ default:
+ return "none";
+ }
+}
diff --git a/erts/emulator/beam/erl_lock_flags.h b/erts/emulator/beam/erl_lock_flags.h
new file mode 100644
index 0000000000..d711f69456
--- /dev/null
+++ b/erts/emulator/beam/erl_lock_flags.h
@@ -0,0 +1,78 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2017. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERTS_LOCK_FLAGS_H__
+#define ERTS_LOCK_FLAGS_H__
+
+#define ERTS_LOCK_OPTIONS_READ (1 << 1)
+#define ERTS_LOCK_OPTIONS_WRITE (1 << 2)
+
+#define ERTS_LOCK_OPTIONS_RDWR (ERTS_LOCK_OPTIONS_READ | ERTS_LOCK_OPTIONS_WRITE)
+
+/* Property/category are bitfields to simplify their use in masks. */
+#define ERTS_LOCK_FLAGS_MASK_CATEGORY (0xFFC0)
+#define ERTS_LOCK_FLAGS_MASK_PROPERTY (0x0030)
+
+/* Type is a plain number. */
+#define ERTS_LOCK_FLAGS_MASK_TYPE (0x000F)
+
+#define ERTS_LOCK_FLAGS_TYPE_SPINLOCK (1)
+#define ERTS_LOCK_FLAGS_TYPE_MUTEX (2)
+#define ERTS_LOCK_FLAGS_TYPE_PROCLOCK (3)
+
+/* "Static" guarantees that the lock will never be destroyed once created. */
+#define ERTS_LOCK_FLAGS_PROPERTY_STATIC (1 << 4)
+#define ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE (1 << 5)
+
+#define ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR (1 << 6)
+#define ERTS_LOCK_FLAGS_CATEGORY_PROCESS (1 << 7)
+#define ERTS_LOCK_FLAGS_CATEGORY_IO (1 << 8)
+#define ERTS_LOCK_FLAGS_CATEGORY_DB (1 << 9)
+#define ERTS_LOCK_FLAGS_CATEGORY_DEBUG (1 << 10)
+#define ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER (1 << 11)
+#define ERTS_LOCK_FLAGS_CATEGORY_GENERIC (1 << 12)
+#define ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION (1 << 13)
+
+#define ERTS_LOCK_TYPE_SPINLOCK \
+ (ERTS_LOCK_FLAGS_TYPE_SPINLOCK)
+#define ERTS_LOCK_TYPE_RWSPINLOCK \
+ (ERTS_LOCK_TYPE_SPINLOCK | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_MUTEX \
+ (ERTS_LOCK_FLAGS_TYPE_MUTEX)
+#define ERTS_LOCK_TYPE_RWMUTEX \
+ (ERTS_LOCK_TYPE_MUTEX | \
+ ERTS_LOCK_FLAGS_PROPERTY_READ_WRITE)
+#define ERTS_LOCK_TYPE_PROCLOCK \
+ (ERTS_LOCK_FLAGS_CATEGORY_PROCESS | \
+ ERTS_LOCK_FLAGS_TYPE_PROCLOCK)
+
+/* -- -- */
+
+typedef unsigned short erts_lock_flags_t;
+typedef unsigned short erts_lock_options_t;
+
+/* @brief Gets the type name of the lock, honoring the RW flag if supplied. */
+const char *erts_lock_flags_get_type_name(erts_lock_flags_t flags);
+
+/* @brief Gets a short-form description of the given lock options. (rw/r/w) */
+const char *erts_lock_options_get_short_desc(erts_lock_options_t options);
+
+#endif /* ERTS_LOCK_FLAGS_H__ */
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 2d70f0d874..6c477be615 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -76,7 +76,8 @@ void erts_msacc_early_init(void) {
#ifndef ERTS_MSACC_ALWAYS_ON
erts_msacc_enabled = 0;
#endif
- erts_rwmtx_init(&msacc_mutex,"msacc_list_mutex");
+ erts_rwmtx_init(&msacc_mutex, "msacc_list_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#ifdef USE_THREADS
erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key");
#else
@@ -109,7 +110,8 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
#ifdef USE_THREADS
erts_rwmtx_rwlock(&msacc_mutex);
if (!managed) {
- erts_mtx_init(&msacc->mtx,"msacc_unmanaged_mutex");
+ erts_mtx_init(&msacc->mtx, "msacc_unmanaged_mutex", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
msacc->next = msacc_unmanaged;
msacc_unmanaged = msacc;
msacc_unmanaged_count++;
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 19bb7d5b31..f2a660f085 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -583,8 +583,10 @@ void erts_mtrace_init(char *receiver, char *nodename)
byte ip_addr[4];
Uint16 port;
- erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf");
- erts_mtx_init(&mtrace_op_mutex, "mtrace_op");
+ erts_mtx_init(&mtrace_buf_mutex, "mtrace_buf", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
+ erts_mtx_init(&mtrace_op_mutex, "mtrace_op", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
socket_desc = erts_sock_open();
if (socket_desc == ERTS_SOCK_INVALID_SOCKET) {
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index cdce6abafd..d3c5af3a83 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -2457,7 +2457,8 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
erts_refc_inc(&resource->type->refc, 2);
if (type->down) {
resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
- erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors");
+ erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
+ ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
resource->monitors->root = NULL;
resource->monitors->pending_failed_fire = 0;
resource->monitors->is_dying = 0;
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 3c5945d48d..deadf435e9 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -85,21 +85,20 @@ dist_table_cmp(void *dep1, void *dep2)
static void*
dist_table_alloc(void *dep_tmpl)
{
- Eterm chnl_nr;
Eterm sysname;
DistEntry *dep;
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
sysname = ((DistEntry *) dep_tmpl)->sysname;
- chnl_nr = make_small((Uint) atom_val(sysname));
dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry));
dist_entries++;
dep->prev = NULL;
erts_smp_refc_init(&dep->refc, -1);
- erts_smp_rwmtx_init_opt_x(&dep->rwmtx, &rwmtx_opt, "dist_entry", chnl_nr);
+ erts_smp_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->sysname = sysname;
dep->cid = NIL;
dep->connection_id = 0;
@@ -107,12 +106,14 @@ dist_table_alloc(void *dep_tmpl)
dep->flags = 0;
dep->version = 0;
- erts_smp_mtx_init_x(&dep->lnk_mtx, "dist_entry_links", chnl_nr);
+ erts_smp_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->node_links = NULL;
dep->nlinks = NULL;
dep->monitors = NULL;
- erts_smp_mtx_init_x(&dep->qlock, "dist_entry_out_queue", chnl_nr);
+ erts_smp_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname,
+ ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
dep->qflgs = 0;
dep->qsize = 0;
dep->out_queue.first = NULL;
@@ -760,8 +761,10 @@ void erts_init_node_tables(int dd_sec)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table");
- erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table");
+ erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
f.hash = (H_FUN) dist_table_hash;
f.cmp = (HCMP_FUN) dist_table_cmp;
@@ -818,6 +821,33 @@ int erts_lc_is_de_rlocked(DistEntry *dep)
#endif
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+
+static void erts_lcnt_enable_dist_lock_count(void *dep_raw, void *enable) {
+ DistEntry *dep = (DistEntry*)dep_raw;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&dep->rwmtx.lcnt, "dist_entry", dep->sysname,
+ ERTS_LOCK_TYPE_RWMUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->lnk_mtx.lcnt, "dist_entry_links", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ erts_lcnt_install_new_lock_info(&dep->qlock.lcnt, "dist_entry_out_queue", dep->sysname,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION);
+ } else {
+ erts_lcnt_uninstall(&dep->rwmtx.lcnt);
+ erts_lcnt_uninstall(&dep->lnk_mtx.lcnt);
+ erts_lcnt_uninstall(&dep->qlock.lcnt);
+ }
+}
+
+void erts_lcnt_update_distribution_locks(int enable) {
+ erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ hash_foreach(&erts_dist_table, erts_lcnt_enable_dist_lock_count,
+ (void*)(UWord)enable);
+ erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+}
+#endif
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* The following is only supposed to be used for testing, and debugging. *
* *
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index 489da1ba17..91bcb4fce1 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -195,6 +195,10 @@ int erts_lc_is_de_rwlocked(DistEntry *);
int erts_lc_is_de_rlocked(DistEntry *);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_update_distribution_locks(int enable);
+#endif
+
ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep);
ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np);
ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep);
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index a044de3fee..1ab1e47254 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -852,10 +852,11 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
}
static ERTS_INLINE void
-abort_nosuspend_task(Port *pp,
- ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp,
- int bpq_data)
+abort_signal_task(Port *pp,
+ int abort_type,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
@@ -863,18 +864,28 @@ abort_nosuspend_task(Port *pp,
if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
else {
ErlDrvSizeT size = erts_proc2port_sig_command_data_size(&tdp->psig.data);
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
- ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ abort_type,
&tdp->psig.data);
aborted_proc2port_data(pp, size);
}
}
+
+static ERTS_INLINE void
+abort_nosuspend_task(Port *pp,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
+{
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, type, tdp, bpq_data);
+}
+
static ErtsPortTaskHandleList *
get_free_nosuspend_handles(Port *pp)
{
@@ -1613,8 +1624,9 @@ abort_nosuspend:
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
+
+ ASSERT(ptp);
+ port_task_free(ptp);
return 0;
@@ -1625,12 +1637,15 @@ fail:
erts_port_dec_refc(pp);
#endif
+ if (ptp) {
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT,
+ ptp->type, &ptp->u.alive.td, 0);
+ port_task_free(ptp);
+ }
+
if (ns_pthlp)
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
- if (ptp)
- port_task_free(ptp);
-
return -1;
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 9cca62ffaf..39f403b443 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -188,13 +188,7 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
#ifdef ERTS_SMP
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x_opt(&ptsp->mtx, lock_str, instr_id,
- ((erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
- ? 0 : ERTS_LCNT_LT_DISABLE));
-#else
- erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
-#endif
+ erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
#endif
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index fc2b34e70f..359fd83522 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -841,7 +841,9 @@ erts_late_init_process(void)
{
int ix;
- erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
+ erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
Eterm atom;
char *atom_str;
@@ -2010,12 +2012,13 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
int id, self = 0;
if (ignore_self) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
- if (esdp)
- self = (int) esdp->no;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+
+ /* ignore_self is meaningless on dirty schedulers since aux work can
+ * only run on normal schedulers, and their ids do not translate. */
+ if(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ self = (int)esdp->no;
+ }
}
ASSERT(0 < max_sched && max_sched <= erts_no_schedulers);
@@ -6232,13 +6235,17 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
* id if the esdp->no <-> ix+1 mapping change.
*/
- erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
+ erts_smp_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_smp_cnd_init(&rq->cnd);
#ifdef ERTS_DIRTY_SCHEDULERS
#ifdef ERTS_SMP
- if (ERTS_RUNQ_IX_IS_DIRTY(ix))
- erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list");
+ if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
+ erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list",
+ make_small(ix + 1),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
+ }
rq->sleepers.list = NULL;
#endif
#endif
@@ -6431,7 +6438,8 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
- erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update");
+ erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
balance_info.forced_check_balance = 0;
balance_info.halftime = 1;
balance_info.full_reds_history_index = 0;
@@ -7493,7 +7501,8 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi)
static void
init_scheduler_suspend(void)
{
- erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd");
+ erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
schdlr_sspnd.online.normal = 1;
schdlr_sspnd.curr_online.normal = 1;
schdlr_sspnd.active.normal = 1;
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index c0e7380ed0..ff124d5ba7 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -112,21 +112,13 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
-#ifdef ERTS_ENABLE_LOCK_COUNT
-static void lcnt_enable_proc_lock_count(Process *proc, int enable);
-#endif
-
void
erts_init_proc_lock(int cpus)
{
int i;
for (i = 0; i < ERTS_NO_OF_PIX_LOCKS; i++) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_mtx_init_x(&erts_pix_locks[i].u.mtx,
- "pix_lock", make_small(i));
-#else
- erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
-#endif
+ erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
}
#if ERTS_PROC_LOCK_OWN_IMPL
erts_thr_install_exit_handler(cleanup_tse);
@@ -944,7 +936,7 @@ erts_pid2proc_opt(Process *c_p,
erts_proc_inc_refc(proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
- erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
+ erts_lcnt_proc_lock_unacquire(&proc->lock, lcnt_locks);
#endif
managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
@@ -1062,32 +1054,38 @@ erts_proc_lock_init(Process *p)
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
+ erts_mtx_init(&p->lock.main, "proc_main", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
+ erts_mtx_init(&p->lock.link, "proc_link", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
+ erts_mtx_init(&p->lock.msgq, "proc_msgq", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.btm, "proc_btm", p->common.id);
+ erts_mtx_init(&p->lock.btm, "proc_btm", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.btm.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.btm.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
+ erts_mtx_init(&p->lock.status, "proc_status", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
#endif
- erts_mtx_init_x(&p->lock.trace, "proc_trace", p->common.id);
+ erts_mtx_init(&p->lock.trace, "proc_trace", p->common.id,
+ ERTS_LOCK_FLAGS_CATEGORY_PROCESS);
ethr_mutex_lock(&p->lock.trace.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.trace.lc);
@@ -1124,117 +1122,70 @@ erts_proc_lock_fin(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_proc_lock_count(int enable) {
- int ix, max = erts_ptab_max(&erts_proc);
- Process *proc = NULL;
- for (ix = 0; ix < max; ++ix) {
- if ((proc = erts_pix2proc(ix)) != NULL)
- lcnt_enable_proc_lock_count(proc, enable);
- } /* for all processes */
-}
-
void erts_lcnt_proc_lock_init(Process *p) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) {
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_main));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_link));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_msgq));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_btm));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_status));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_trace));
- } else { /* now the common case */
- Eterm pid = (p->common.id != ERTS_INVALID_PID) ? p->common.id : NIL;
- erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_status),"proc_status",ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_trace), "proc_trace", ERTS_LCNT_LT_PROCLOCK, pid);
- } /* the lock names should really be aligned to four characters */
+ erts_lcnt_init_ref(&p->lock.lcnt_carrier);
+
+ if(erts_lcnt_check_enabled(ERTS_LOCK_FLAGS_CATEGORY_PROCESS)) {
+ erts_lcnt_enable_proc_lock_count(p, 1);
+ }
} /* logic reversed */
void erts_lcnt_proc_lock_destroy(Process *p) {
- erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_btm));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_trace));
+ erts_lcnt_uninstall(&p->lock.lcnt_carrier);
}
-static void lcnt_enable_proc_lock_count(Process *proc, int enable) {
- if (enable) {
- if (!ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) {
- erts_lcnt_proc_lock_init(proc);
- }
- }
- else {
- if (ERTS_LCNT_LOCK_TYPE(&(proc->lock.lcnt_main))) {
- erts_lcnt_proc_lock_destroy(proc);
- }
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable) {
+ if(proc->common.id == ERTS_INVALID_PID) {
+ /* Locks without an id are more trouble than they're worth; there's no
+ * way to look them up and we can't track them with _STATIC since it's
+ * too early to tell whether we're a system process (proc->static_flags
+ * hasn't been not set yet). */
+ } else if(!enable) {
+ erts_lcnt_proc_lock_destroy(proc);
+ } else if(!erts_lcnt_check_ref_installed(&proc->lock.lcnt_carrier)) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+
+ carrier = erts_lcnt_create_lock_info_carrier(ERTS_LCNT_PROCLOCK_COUNT);
+
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN,
+ "proc_main", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK,
+ "proc_link", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ,
+ "proc_msgq", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM,
+ "proc_btm", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS,
+ "proc_status",proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+ erts_lcnt_init_lock_info_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE,
+ "proc_trace", proc->common.id, ERTS_LOCK_TYPE_PROCLOCK);
+
+ erts_lcnt_install(&proc->lock.lcnt_carrier, carrier);
}
}
-void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock(&(lock->lcnt_trace)); }
-}
+void erts_lcnt_update_process_locks(int enable) {
+ int i, max;
-void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
- char *file, unsigned int line) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) {
- erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line);
- }
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
- }
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
- }
- if (locks & ERTS_PROC_LOCK_BTM) {
- erts_lcnt_lock_post_x(&(lock->lcnt_btm), file, line);
- }
- if (locks & ERTS_PROC_LOCK_STATUS) {
- erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line);
- }
- if (locks & ERTS_PROC_LOCK_TRACE) {
- erts_lcnt_lock_post_x(&(lock->lcnt_trace), file, line);
- }
-}
+ max = erts_ptab_max(&erts_proc);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock_unaquire(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock_unaquire(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock_unaquire(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock_unaquire(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock_unaquire(&(lock->lcnt_trace)); }
-}
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Process *proc;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ proc = erts_pix2proc(i);
+
+ if(proc != NULL) {
+ erts_lcnt_enable_proc_lock_count(proc, enable);
+ }
-void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_unlock(&(lock->lcnt_main)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_unlock(&(lock->lcnt_msgq)); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_unlock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_unlock(&(lock->lcnt_status)); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_unlock(&(lock->lcnt_trace)); }
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
+ }
}
-void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
- if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
- if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_trylock(&(lock->lcnt_main), res); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); }
- if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_trylock(&(lock->lcnt_msgq), res); }
- if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_trylock(&(lock->lcnt_btm), res); }
- if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_trylock(&(lock->lcnt_status), res); }
- if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_trylock(&(lock->lcnt_trace), res); }
-} /* reversed logic */
+
#endif /* ERTS_ENABLE_LOCK_COUNT */
@@ -1249,7 +1200,7 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_lock_x(&lck,file,line);
@@ -1282,7 +1233,7 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_trylock_x(locked, &lck, file, line);
@@ -1314,7 +1265,7 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_unlock(&lck);
@@ -1349,7 +1300,7 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_might_unlock(&lck);
@@ -1397,7 +1348,7 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
erts_lc_require_lock(&lck, file, line);
@@ -1444,7 +1395,7 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_TRACE) {
lck.id = lc_id.proc_lock_trace;
erts_lc_unrequire_lock(&lck);
@@ -1493,7 +1444,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCKS_ALL) {
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
+ ERTS_LOCK_TYPE_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN)
lck.id = lc_id.proc_lock_main;
@@ -1524,7 +1475,7 @@ void erts_proc_lc_chk_only_proc_main(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL
#define ERTS_PROC_LC_EMPTY_LOCK_INIT \
- ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
+ ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LOCK_TYPE_PROCLOCK)
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
@@ -1739,22 +1690,22 @@ erts_proc_lc_my_proc_locks(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t locks[6] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_btm,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LOCK_TYPE_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_trace,
p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK)};
+ ERTS_LOCK_TYPE_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t locks[6] = {p->lock.main.lc,
p->lock.link.lc,
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 6e704b185d..023ba4d4ae 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -78,13 +78,19 @@ typedef struct erts_proc_lock_t_ {
ErtsProcLocks flags;
#endif
erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt_main;
- erts_lcnt_lock_t lcnt_link;
- erts_lcnt_lock_t lcnt_msgq;
- erts_lcnt_lock_t lcnt_btm;
- erts_lcnt_lock_t lcnt_status;
- erts_lcnt_lock_t lcnt_trace;
+#if defined(ERTS_ENABLE_LOCK_COUNT) && !ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ /* Each erts_mtx_t has its own lock counter ^ */
+
+ #define ERTS_LCNT_PROCLOCK_IDX_MAIN 0
+ #define ERTS_LCNT_PROCLOCK_IDX_LINK 1
+ #define ERTS_LCNT_PROCLOCK_IDX_MSGQ 2
+ #define ERTS_LCNT_PROCLOCK_IDX_BTM 3
+ #define ERTS_LCNT_PROCLOCK_IDX_STATUS 4
+ #define ERTS_LCNT_PROCLOCK_IDX_TRACE 5
+
+ #define ERTS_LCNT_PROCLOCK_COUNT 6
+
+ erts_lcnt_ref_t lcnt_carrier;
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_t main;
@@ -245,14 +251,170 @@ typedef struct erts_proc_lock_t_ {
void erts_lcnt_proc_lock_init(Process *p);
void erts_lcnt_proc_lock_destroy(Process *p);
+
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, char *file, unsigned int line);
-void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks);
+ERTS_GLB_INLINE
void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res);
-void erts_lcnt_enable_proc_lock_count(int enable);
+void erts_lcnt_enable_proc_lock_count(Process *proc, int enable);
+void erts_lcnt_update_process_locks(int enable);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
+ char *file, unsigned int line) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, file, line);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_post_x_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, file, line);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_lock_unacquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_unacquire_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_unlock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+}
+
+ERTS_GLB_INLINE
+void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
+ erts_lcnt_lock_info_carrier_t *carrier;
+ int handle;
+
+ if(erts_lcnt_open_ref(&lock->lcnt_carrier, &handle, &carrier)) {
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MAIN, res);
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_LINK, res);
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_MSGQ, res);
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_BTM, res);
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_STATUS, res);
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_trylock_idx(carrier, ERTS_LCNT_PROCLOCK_IDX_TRACE, res);
+ }
+
+ erts_lcnt_close_ref(handle, carrier);
+ }
+} /* reversed logic */
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ERTS_ENABLE_LOCK_COUNT*/
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index c3d59cb3a8..b3bcb3af3f 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -372,7 +372,8 @@ erts_ptab_init_table(ErtsPTab *ptab,
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name);
+ erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0);
last_data_init_nob(ptab, ~((Uint64) 0));
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index cab4bd73db..96238318c9 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -161,7 +161,7 @@ enqueue_remote_managed_thread(erts_sspa_chunk_header_t *chdr,
if ((i & 1) == 0)
itmp = itmp2;
else {
- enq = (erts_sspa_blk_t *) itmp;
+ enq = (erts_sspa_blk_t *) itmp2;
itmp = erts_atomic_read_acqb(&enq->next_atmc);
ASSERT(itmp != ERTS_AINT_NULL);
}
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 181736b009..696bdbdaf1 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -128,14 +128,14 @@ ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y);
#define ERTS_SMP_HAVE_REC_MTX_INIT 1
ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx);
#endif
-ERTS_GLB_INLINE void erts_smp_mtx_init_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
+ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line);
@@ -153,18 +153,15 @@ ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd,
ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd);
ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name);
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line);
@@ -181,11 +178,10 @@ ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx);
ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx);
-ERTS_GLB_INLINE void erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock);
ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -194,11 +190,10 @@ ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file,
ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock);
#endif
ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock);
ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -1062,34 +1057,18 @@ erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx)
#endif
ERTS_GLB_INLINE void
-erts_smp_mtx_init_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
+erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_mtx_init_x(mtx, name, extra);
+ erts_mtx_init(mtx, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked_x(erts_smp_mtx_t *mtx, char *name, Eterm extra)
+erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_mtx_init_locked_x_opt(mtx, name, extra, 0);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init(mtx, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name)
-{
-#ifdef ERTS_SMP
- erts_mtx_init_locked(mtx, name);
+ erts_mtx_init_locked(mtx, name, extra, flags);
#endif
}
@@ -1211,39 +1190,25 @@ erts_smp_rwmtx_set_reader_group(int no)
}
ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_opt_x(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt_x(rwmtx, opt, name, extra);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init_x(erts_smp_rwmtx_t *rwmtx, char *name, Eterm extra)
+erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwmtx_init_x(rwmtx, name, extra);
+ erts_smp_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
#endif
}
ERTS_GLB_INLINE void
erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
- erts_smp_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef ERTS_SMP
- erts_rwmtx_init_opt(rwmtx, opt, name);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, char *name)
+ erts_smp_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwmtx_init(rwmtx, name);
+ erts_rwmtx_init_opt(rwmtx, opt, name, extra, flags);
#endif
}
@@ -1379,20 +1344,10 @@ erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
}
ERTS_GLB_INLINE void
-erts_smp_spinlock_init_x(erts_smp_spinlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_spinlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name)
+erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_spinlock_init(lock, name);
+ erts_spinlock_init(lock, name, extra, flags);
#else
(void)lock;
#endif
@@ -1445,20 +1400,10 @@ erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock)
}
ERTS_GLB_INLINE void
-erts_smp_rwlock_init_x(erts_smp_rwlock_t *lock, char *name, Eterm extra)
-{
-#ifdef ERTS_SMP
- erts_rwlock_init_x(lock, name, extra);
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name)
+erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef ERTS_SMP
- erts_rwlock_init(lock, name);
+ erts_rwlock_init(lock, name, extra, flags);
#else
(void)lock;
#endif
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 700ed90def..2a9f276e02 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -321,13 +321,23 @@ tmp_thr_prgr_data(ErtsSchedulerData *esdp)
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(esdp);
if (!tpd) {
- /*
- * We only allocate the part up to the wakeup_request field
- * which is the first field only used by registered threads
- */
- tpd = erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA,
- offsetof(ErtsThrPrgrData, wakeup_request));
- init_tmp_thr_prgr_data(tpd);
+ /*
+ * We only allocate the part up to the wakeup_request field which is
+ * the first field only used by registered threads
+ */
+ size_t alloc_size = offsetof(ErtsThrPrgrData, wakeup_request);
+
+ /* We may land here as a result of unmanaged_delay being called from
+ * the lock counting module, which in turn might be called from within
+ * the allocator, so we use plain malloc to avoid deadlocks. */
+ tpd =
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ malloc(alloc_size);
+#else
+ erts_alloc(ERTS_ALC_T_T_THR_PRGR_DATA, alloc_size);
+#endif
+
+ init_tmp_thr_prgr_data(tpd);
}
return tpd;
@@ -337,8 +347,13 @@ static ERTS_INLINE void
return_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
{
if (tpd->is_temporary) {
- erts_tsd_set(erts_thr_prgr_data_key__, NULL);
- erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+ erts_tsd_set(erts_thr_prgr_data_key__, NULL);
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ free(tpd);
+#else
+ erts_free(ERTS_ALC_T_T_THR_PRGR_DATA, tpd);
+#endif
}
}
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 9612b70469..8b5c17d739 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -259,13 +259,16 @@
#include "sys.h"
+#include "erl_lock_flags.h"
+#include "erl_term.h"
+
#ifdef USE_THREADS
#define ETHR_TRY_INLINE_FUNCS
#include "ethread.h"
+
#include "erl_lock_check.h"
#include "erl_lock_count.h"
-#include "erl_term.h"
#if defined(__GLIBC__) && (__GLIBC__ << 16) + __GLIBC_MINOR__ < (2 << 16) + 4
/*
@@ -307,9 +310,11 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
-
} erts_mtx_t;
typedef ethr_cond erts_cnd_t;
@@ -320,7 +325,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwmtx_t;
@@ -365,7 +373,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_spinlock_t;
@@ -376,7 +387,10 @@ typedef struct {
erts_lc_lock_t lc;
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_t lcnt;
+ erts_lcnt_ref_t lcnt;
+#endif
+#ifdef DEBUG
+ erts_lock_flags_t flags;
#endif
} erts_rwlock_t;
@@ -479,11 +493,14 @@ ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void));
ERTS_GLB_INLINE erts_tid_t erts_thr_self(void);
ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len);
ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y);
-ERTS_GLB_INLINE void erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra);
-ERTS_GLB_INLINE void erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
-ERTS_GLB_INLINE void erts_mtx_init_locked_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt);
-ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name);
-ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name);
+ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
+ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_mtx_trylock_x(erts_mtx_t *mtx, char *file,
@@ -502,18 +519,15 @@ ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx);
ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd);
ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no);
-ERTS_GLB_INLINE void erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra);
-ERTS_GLB_INLINE void erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name);
+ erts_rwmtx_opt_t *opt,
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_init(erts_rwmtx_t *rwmtx,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx);
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE int erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line);
@@ -603,16 +617,10 @@ ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp
ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
erts_aint64_t mask,
erts_aint64_t set);
-
-ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
- char *name,
- Eterm extra,
- Uint16 opt);
-ERTS_GLB_INLINE void erts_spinlock_init_x(erts_spinlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock);
ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -621,11 +629,10 @@ ERTS_GLB_INLINE void erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigne
ERTS_GLB_INLINE void erts_spin_lock(erts_spinlock_t *lock);
#endif
ERTS_GLB_INLINE int erts_lc_spinlock_is_locked(erts_spinlock_t *lock);
-ERTS_GLB_INLINE void erts_rwlock_init_x(erts_rwlock_t *lock,
- char *name,
- Eterm extra);
ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock,
- char *name);
+ char *name,
+ Eterm extra,
+ erts_lock_flags_t flags);
ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock);
ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock);
#ifdef ERTS_ENABLE_LOCK_POSITION
@@ -2159,97 +2166,41 @@ erts_equal_tids(erts_tid_t x, erts_tid_t y)
}
ERTS_GLB_INLINE void
-erts_mtx_init_x(erts_mtx_t *mtx, char *name, Eterm extra)
+erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX, extra);
-#endif
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "initialize mutex");
+ }
-ERTS_GLB_INLINE void
-erts_mtx_init_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
-#endif
+ flags |= ERTS_LOCK_TYPE_MUTEX;
+#ifdef DEBUG
+ mtx->flags = flags;
#endif
-}
-
-ERTS_GLB_INLINE void
-erts_mtx_init_locked_x_opt(erts_mtx_t *mtx, char *name, Eterm extra, Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX | opt, extra);
-#endif
- ethr_mutex_lock(&mtx->mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
+ erts_lc_init_lock_x(&mtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
+ erts_lcnt_init_ref_x(&mtx->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
-erts_mtx_init(erts_mtx_t *mtx, char *name)
+erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
-#endif
-}
+ erts_mtx_init(mtx, name, extra, flags);
-ERTS_GLB_INLINE void
-erts_mtx_init_locked(erts_mtx_t *mtx, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_mutex_init(&mtx->mtx);
- if (res)
- erts_thr_fatal_error(res, "initialize mutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&mtx->lc, name, ERTS_LC_FLG_LT_MUTEX);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&mtx->lcnt, name, ERTS_LCNT_LT_MUTEX);
-#endif
ethr_mutex_lock(&mtx->mtx);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_trylock(1, &mtx->lc);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock(&mtx->lcnt, 1);
-#endif
+ #ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &mtx->lc);
+ #endif
+ #ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_trylock(&mtx->lcnt, 1);
+ #endif
#endif
}
@@ -2258,11 +2209,14 @@ erts_mtx_destroy(erts_mtx_t *mtx)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(mtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&mtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&mtx->lcnt);
+ erts_lcnt_uninstall(&mtx->lcnt);
#endif
res = ethr_mutex_destroy(&mtx->mtx);
if (res != 0) {
@@ -2359,7 +2313,8 @@ erts_lc_mtx_is_locked(erts_mtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_FLAGS_TYPE_MUTEX;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2459,7 +2414,7 @@ erts_rwmtx_set_reader_group(int no)
#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_check_no_locked_of_type(ERTS_LOCK_TYPE_RWMUTEX);
#endif
res = ethr_rwmutex_set_reader_group(no);
if (res != 0)
@@ -2468,57 +2423,32 @@ erts_rwmtx_set_reader_group(int no)
}
ERTS_GLB_INLINE void
-erts_rwmtx_init_opt_x(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name,
- Eterm extra)
-{
+erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt,
+ char *name, Eterm extra, erts_lock_flags_t flags) {
#ifdef USE_THREADS
int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (name && name[0] == '\0')
- erts_lcnt_init_lock_x(&rwmtx->lcnt, NULL, ERTS_LCNT_LT_RWMUTEX, extra);
- else
- erts_lcnt_init_lock_x(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX, extra);
-#endif
-#endif
-}
+ if (res != 0) {
+ erts_thr_fatal_error(res, "initialize rwmutex");
+ }
-ERTS_GLB_INLINE void
-erts_rwmtx_init_x(erts_rwmtx_t *rwmtx,
- char *name,
- Eterm extra)
-{
- erts_rwmtx_init_opt_x(rwmtx, NULL, name, extra);
-}
+ flags |= ERTS_LOCK_TYPE_RWMUTEX;
+#ifdef DEBUG
+ rwmtx->flags = flags;
+#endif
-ERTS_GLB_INLINE void
-erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx,
- erts_rwmtx_opt_t *opt,
- char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
- if (res != 0)
- erts_thr_fatal_error(res, "initialize rwmutex");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&rwmtx->lc, name, ERTS_LC_FLG_LT_RWMUTEX);
+ erts_lc_init_lock_x(&rwmtx->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&rwmtx->lcnt, name, ERTS_LCNT_LT_RWMUTEX);
-#endif
+ erts_lcnt_init_ref_x(&rwmtx->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
-erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name)
-{
- erts_rwmtx_init_opt(rwmtx, NULL, name);
+erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name, Eterm extra,
+ erts_lock_flags_t flags) {
+ erts_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
}
ERTS_GLB_INLINE void
@@ -2526,11 +2456,14 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(rwmtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&rwmtx->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&rwmtx->lcnt);
+ erts_lcnt_uninstall(&rwmtx->lcnt);
#endif
res = ethr_rwmutex_destroy(&rwmtx->rwmtx);
if (res != 0) {
@@ -2558,7 +2491,7 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2567,13 +2500,13 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_READ);
#endif
return res;
@@ -2592,13 +2525,13 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_rlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -2612,10 +2545,10 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_rwmutex_runlock(&rwmtx->rwmtx);
#endif
@@ -2633,7 +2566,7 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
- if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE))
+ if (erts_lc_trylock_force_busy_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR))
return EBUSY; /* Make sure caller can handle the situation without
causing a lock order violation */
#endif
@@ -2642,13 +2575,13 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_trylock_flg_x(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_trylock_flg(res == 0, &rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_trylock_opt(&rwmtx->lcnt, res, ERTS_LOCK_OPTIONS_RDWR);
#endif
return res;
@@ -2667,13 +2600,13 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwlock(&rwmtx->rwmtx);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -2687,10 +2620,10 @@ erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
#endif
@@ -2728,7 +2661,8 @@ erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -2742,7 +2676,8 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = mtx->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWMUTEX;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3075,59 +3010,26 @@ erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
/* spinlock */
ERTS_GLB_INLINE void
-erts_spinlock_init_x(erts_spinlock_t *lock, char *name, Eterm extra)
+erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK, extra);
-#endif
-#else
- (void)lock;
-#endif
-}
+ if (res) {
+ erts_thr_fatal_error(res, "init spinlock");
+ }
-ERTS_GLB_INLINE void
-erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, Eterm extra,
- Uint16 opt)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK|opt, extra);
+ flags |= ERTS_LOCK_TYPE_SPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-#else
- (void)lock;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_spinlock_init(erts_spinlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_spinlock_init(&lock->slck);
- if (res)
- erts_thr_fatal_error(res, "init spinlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_SPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_SPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
@@ -3135,11 +3037,14 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_spinlock_destroy(&lock->slck);
if (res != 0) {
@@ -3207,7 +3112,8 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = 0;
+ lc.flags = ERTS_LOCK_TYPE_SPINLOCK;
+ lc.taken_options = 0;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3218,39 +3124,26 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
/* rwspinlock */
ERTS_GLB_INLINE void
-erts_rwlock_init_x(erts_rwlock_t *lock, char *name, Eterm extra)
+erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
#ifdef USE_THREADS
int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock_x(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK, extra);
-#endif
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock_x(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK, extra);
-#endif
-#else
- (void)lock;
+ if (res) {
+ erts_thr_fatal_error(res, "init rwlock");
+ }
+
+ flags |= ERTS_LOCK_TYPE_RWSPINLOCK;
+#ifdef DEBUG
+ lock->flags = flags;
#endif
-}
-ERTS_GLB_INLINE void
-erts_rwlock_init(erts_rwlock_t *lock, char *name)
-{
-#ifdef USE_THREADS
- int res = ethr_rwlock_init(&lock->rwlck);
- if (res)
- erts_thr_fatal_error(res, "init rwlock");
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_init_lock(&lock->lc, name, ERTS_LC_FLG_LT_RWSPINLOCK);
+ erts_lc_init_lock_x(&lock->lc, name, flags, extra);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_init_lock(&lock->lcnt, name, ERTS_LCNT_LT_RWSPINLOCK);
-#endif
-#else
- (void)lock;
+ erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
+#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
@@ -3258,11 +3151,14 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
int res;
+
+ ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_destroy_lock(&lock->lc);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_destroy_lock(&lock->lcnt);
+ erts_lcnt_uninstall(&lock->lcnt);
#endif
res = ethr_rwlock_destroy(&lock->rwlck);
if (res != 0) {
@@ -3286,10 +3182,10 @@ erts_read_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_unlock(&lock->rwlck);
#else
@@ -3307,13 +3203,13 @@ erts_read_lock(erts_rwlock_t *lock)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_READ,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ);
#endif
ethr_read_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -3329,10 +3225,10 @@ erts_write_unlock(erts_rwlock_t *lock)
{
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_unlock(&lock->rwlck);
#else
@@ -3350,13 +3246,13 @@ erts_write_lock(erts_rwlock_t *lock)
#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
- erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
+ erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_RDWR,file,line);
#else
- erts_lc_lock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
+ erts_lc_lock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR);
#endif
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_lock_opt(&lock->lcnt, ERTS_LCNT_LO_READ_WRITE);
+ erts_lcnt_lock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR);
#endif
ethr_write_lock(&lock->rwlck);
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -3373,7 +3269,8 @@ erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_READ;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
@@ -3387,7 +3284,8 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK)
int res;
erts_lc_lock_t lc = lock->lc;
- lc.flags = ERTS_LC_FLG_LO_READ|ERTS_LC_FLG_LO_WRITE;
+ lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK;
+ lc.taken_options = ERTS_LOCK_OPTIONS_RDWR;
erts_lc_have_locks(&res, &lc, 1);
return res;
#else
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 3084a8db75..f6bb52dde1 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -954,8 +954,10 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX);
- erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
- erts_smp_mtx_init(&erts_get_time_mtx, "get_time");
+ erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
+ erts_smp_mtx_init(&erts_get_time_mtx, "get_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
time_sup.r.o.correction = time_correction;
time_sup.r.o.warp_mode = time_warp_mode;
@@ -1120,8 +1122,9 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx,
- &rwmtx_opts, "get_corrected_time");
+ erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts,
+ "get_corrected_time", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
cdatap = &time_sup.inf.c.parmon.cdata;
@@ -1286,56 +1289,62 @@ erts_finalize_time_offset(void)
/* info functions */
void
-elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff)
+elapsed_time_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff, ErtsMonotonicTime *ms_sys_diff)
{
- UWord prev_total_user, prev_total_sys;
- UWord total_user, total_sys;
+ ErtsMonotonicTime prev_total_user, prev_total_sys;
+ ErtsMonotonicTime total_user, total_sys;
SysTimes now;
sys_times(&now);
- total_user = (now.tms_utime * 1000) / SYS_CLK_TCK;
- total_sys = (now.tms_stime * 1000) / SYS_CLK_TCK;
+ total_user = (ErtsMonotonicTime) ((now.tms_utime * 1000) / SYS_CLK_TCK);
+ total_sys = (ErtsMonotonicTime) ((now.tms_stime * 1000) / SYS_CLK_TCK);
if (ms_user != NULL)
*ms_user = total_user;
if (ms_sys != NULL)
*ms_sys = total_sys;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
+ if (ms_user_diff || ms_sys_diff) {
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
- prev_total_user = (t_start.tms_utime * 1000) / SYS_CLK_TCK;
- prev_total_sys = (t_start.tms_stime * 1000) / SYS_CLK_TCK;
- t_start = now;
+ prev_total_user = (ErtsMonotonicTime) ((t_start.tms_utime * 1000) / SYS_CLK_TCK);
+ prev_total_sys = (ErtsMonotonicTime) ((t_start.tms_stime * 1000) / SYS_CLK_TCK);
+ t_start = now;
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
- if (ms_user_diff != NULL)
- *ms_user_diff = total_user - prev_total_user;
+ if (ms_user_diff != NULL)
+ *ms_user_diff = total_user - prev_total_user;
- if (ms_sys_diff != NULL)
- *ms_sys_diff = total_sys - prev_total_sys;
+ if (ms_sys_diff != NULL)
+ *ms_sys_diff = total_sys - prev_total_sys;
+ }
}
/* wall clock routines */
void
-wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
+wall_clock_elapsed_time_both(ErtsMonotonicTime *ms_total, ErtsMonotonicTime *ms_diff)
{
ErtsMonotonicTime now, elapsed;
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
now = time_sup.r.o.get_time();
update_last_mtime(NULL, now);
elapsed = ERTS_MONOTONIC_TO_MSEC(now);
- *ms_total = (UWord) elapsed;
- *ms_diff = (UWord) (elapsed - prev_wall_clock_elapsed);
- prev_wall_clock_elapsed = elapsed;
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ *ms_total = elapsed;
+
+ if (ms_diff) {
+ erts_smp_mtx_lock(&erts_timeofday_mtx);
+
+ *ms_diff = elapsed - prev_wall_clock_elapsed;
+ prev_wall_clock_elapsed = elapsed;
+
+ erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ }
}
/* get current time */
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 4b06c55770..db7d0ac449 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -336,7 +336,8 @@ void erts_init_trace(void) {
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers");
+ erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
#ifdef HAVE_ERTS_NOW_CPU
erts_cpu_timestamp = 0;
@@ -2625,7 +2626,8 @@ init_sys_msg_dispatcher(void)
sys_message_queue = NULL;
sys_message_queue_end = NULL;
erts_smp_cnd_init(&smq_cnd);
- erts_smp_mtx_init(&smq_mtx, "sys_msg_q");
+ erts_smp_mtx_init(&smq_mtx, "sys_msg_q", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_smp_thr_create(&sys_msg_dispatcher_tid,
sys_msg_dispatcher_func,
NULL,
@@ -3185,7 +3187,9 @@ static void init_tracer_nif()
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx");
+
+ erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
erts_tracer_nif_clear();
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 07cf4f6903..3d28b05752 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -131,6 +131,7 @@ Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+#define erts_bld_monotonic_time erts_bld_sint64
Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
#define erts_bld_tuple2(H,S,E1,E2) erts_bld_tuple(H,S,2,E1,E2)
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 57f5ba5436..828c833ffc 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -182,7 +182,8 @@ init_export_table(void)
HashFunctions f;
int i;
- erts_smp_mtx_init(&export_staging_lock, "export_tab");
+ erts_smp_mtx_init(&export_staging_lock, "export_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
erts_smp_atomic_init_nob(&total_entries_bytes, 0);
f.hash = (H_FUN) export_hash;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index fc95535ec3..2105ee7a6c 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1183,7 +1183,8 @@ void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon);
Eterm erts_driver_monitor_to_ref(Eterm* hp, const ErlDrvMonitor *mon);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
-void erts_lcnt_enable_io_lock_count(int enable);
+void erts_lcnt_update_driver_locks(int enable);
+void erts_lcnt_update_port_locks(int enable);
#endif
/* driver_tab.c */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index d25e53ada0..b609f6de39 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -258,14 +258,7 @@ static ERTS_INLINE void port_init_instr(Port *prt
#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
- char *lock_str = "port_lock";
-#ifdef ERTS_ENABLE_LOCK_COUNT
- Uint16 opt = ((erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)
- ? 0 : ERTS_LCNT_LT_DISABLE);
-#else
- Uint16 opt = 0;
-#endif
- erts_mtx_init_locked_x_opt(prt->lock, lock_str, id, opt);
+ erts_mtx_init_locked(prt->lock, "port_lock", id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
#endif
erts_port_task_init_sched(&prt->sched, id);
@@ -1968,7 +1961,6 @@ int
erts_port_output_async(Port *prt, Eterm from, Eterm list)
{
- ErtsPortOpResult res;
ErtsProc2PortSigData *sigdp;
erts_driver_t *drv = prt->drv_ptr;
size_t size;
@@ -2102,26 +2094,18 @@ erts_port_output_async(Port *prt, Eterm from, Eterm list)
sigdp->u.output.size = size;
port_sig_callback = port_sig_output;
}
- sigdp->flags = 0;
ns_pthp = NULL;
task_flags = 0;
- res = erts_schedule_proc2port_signal(NULL,
- prt,
- ERTS_INVALID_PID,
- NULL,
- sigdp,
- task_flags,
- ns_pthp,
- port_sig_callback);
+ erts_schedule_proc2port_signal(NULL,
+ prt,
+ ERTS_INVALID_PID,
+ NULL,
+ sigdp,
+ task_flags,
+ ns_pthp,
+ port_sig_callback);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
- return 1;
- }
return 1;
bad_value:
@@ -2554,10 +2538,6 @@ erts_port_output(Process *c_p,
port_sig_callback);
if (res != ERTS_PORT_OP_SCHEDULED) {
- if (drv->outputv)
- cleanup_scheduled_outputv(evp, cbin);
- else
- cleanup_scheduled_output(buf);
return res;
}
@@ -2736,21 +2716,14 @@ erts_port_exit(Process *c_p,
&bp->off_heap);
}
- res = erts_schedule_proc2port_signal(c_p,
- prt,
- c_p ? c_p->common.id : from,
- refp,
- sigdp,
- 0,
- NULL,
- port_sig_exit);
-
- if (res == ERTS_PORT_OP_DROPPED) {
- if (bp)
- free_message_buffer(bp);
- }
-
- return res;
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ NULL,
+ port_sig_exit);
}
static ErtsPortOpResult
@@ -3419,9 +3392,8 @@ void erts_init_io(int port_tab_size,
else if (port_tab_size < ERTS_MIN_PORTS)
port_tab_size = ERTS_MIN_PORTS;
- erts_smp_rwmtx_init_opt(&erts_driver_list_lock,
- &drv_list_rwmtx_opts,
- "driver_list");
+ erts_smp_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
driver_list = NULL;
erts_smp_tsd_key_create(&driver_list_lock_status_key,
"erts_driver_list_lock_status_key");
@@ -3458,67 +3430,94 @@ void erts_init_io(int port_tab_size,
}
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
-static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable)
+static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable)
{
if (dp->lock) {
- if (enable)
- erts_lcnt_init_lock_x(&dp->lock->lcnt,
- "driver_lock",
- ERTS_LCNT_LT_MUTEX,
- erts_atom_put((byte*)dp->name,
- sys_strlen(dp->name),
- ERTS_ATOM_ENC_LATIN1,
- 1));
-
- else
- erts_lcnt_destroy_lock(&dp->lock->lcnt);
+ if (enable) {
+ Eterm name_as_atom = erts_atom_put((byte*)dp->name, sys_strlen(dp->name),
+ ERTS_ATOM_ENC_LATIN1, 1);
+ erts_lcnt_install_new_lock_info(&dp->lock->lcnt, "driver_lock", name_as_atom,
+ ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ } else {
+ erts_lcnt_uninstall(&dp->lock->lcnt);
+ }
}
}
-static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable)
+static void lcnt_enable_port_lock_count(Port *prt, int enable)
{
erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
- if (!enable) {
- erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_destroy_lock(&prt->lock->lcnt);
+
+ if(enable) {
+ ErlDrvPDL pdl = prt->port_data_lock;
+
+ erts_lcnt_install_new_lock_info(&prt->sched.mtx.lcnt, "port_sched_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+
+ if(pdl) {
+ erts_lcnt_install_new_lock_info(&pdl->mtx.lcnt, "port_data_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_install_new_lock_info(&prt->lock->lcnt, "port_lock",
+ prt->common.id, ERTS_LOCK_TYPE_MUTEX | ERTS_LOCK_FLAGS_CATEGORY_IO);
+ }
+ } else {
+ erts_lcnt_uninstall(&prt->sched.mtx.lcnt);
+
+ if(prt->port_data_lock) {
+ erts_lcnt_uninstall(&prt->port_data_lock->mtx.lcnt);
+ }
+
+ if(state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ erts_lcnt_uninstall(&prt->lock->lcnt);
+ }
}
- else {
- erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt,
- "port_sched_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
- if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
- erts_lcnt_init_lock_x(&prt->lock->lcnt,
- "port_lock",
- ERTS_LCNT_LT_MUTEX,
- prt->common.id);
+}
+
+void erts_lcnt_update_driver_locks(int enable) {
+ erts_driver_t *driver;
+
+ lcnt_enable_driver_lock_count(&vanilla_driver, enable);
+ lcnt_enable_driver_lock_count(&spawn_driver, enable);
+#ifndef __WIN32__
+ lcnt_enable_driver_lock_count(&forker_driver, enable);
+#endif
+ lcnt_enable_driver_lock_count(&fd_driver, enable);
+
+ erts_rwmtx_rlock(&erts_driver_list_lock);
+
+ for (driver = driver_list; driver; driver = driver->next) {
+ lcnt_enable_driver_lock_count(driver, enable);
}
+
+ erts_rwmtx_runlock(&erts_driver_list_lock);
}
-void erts_lcnt_enable_io_lock_count(int enable) {
- erts_driver_t *dp;
- int ix, max = erts_ptab_max(&erts_port);
- Port *prt;
+void erts_lcnt_update_port_locks(int enable) {
+ int i, max;
- for (ix = 0; ix < max; ix++) {
- if ((prt = erts_pix2port(ix)) != NULL) {
- lcnt_enable_port_lock_count(prt, enable);
+ max = erts_ptab_max(&erts_port);
+
+ for(i = 0; i < max; i++) {
+ int delay_handle;
+ Port *port;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+ port = erts_pix2port(i);
+
+ if(port != NULL) {
+ lcnt_enable_port_lock_count(port, enable);
}
- } /* for all ports */
- lcnt_enable_drv_lock_count(&vanilla_driver, enable);
- lcnt_enable_drv_lock_count(&spawn_driver, enable);
-#ifndef __WIN32__
- lcnt_enable_drv_lock_count(&forker_driver, enable);
-#endif
- lcnt_enable_drv_lock_count(&fd_driver, enable);
- /* enable lock counting in all drivers */
- for (dp = driver_list; dp; dp = dp->next) {
- lcnt_enable_drv_lock_count(dp, enable);
+ if(delay_handle != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ }
}
-} /* enable/disable lock counting of ports */
+}
+
#endif /* defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) */
/*
* Buffering of data when using line oriented I/O on ports
@@ -3701,7 +3700,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res)
ERTS_SMP_CHK_NO_PROC_LOCKS;
ASSERT(!prt || prt->common.id == sender);
-#ifdef ERTS_SMP
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ASSERT(!prt || erts_lc_is_port_locked(prt));
#endif
@@ -4930,10 +4929,9 @@ erts_port_control(Process* c_p,
0,
NULL,
port_sig_control);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_control(binp, bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -5223,10 +5221,9 @@ erts_port_call(Process* c_p,
0,
NULL,
port_sig_call);
- if (res != ERTS_PORT_OP_SCHEDULED) {
- cleanup_scheduled_call(bufp);
+ if (res != ERTS_PORT_OP_SCHEDULED)
return ERTS_PORT_OP_BADARG;
- }
+
return res;
}
@@ -7093,7 +7090,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id);
+ erts_mtx_init(&pdl->mtx, "port_data_lock", pp->common.id, ERTS_LOCK_FLAGS_CATEGORY_IO);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -8260,22 +8257,16 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->flags = de->driver_flags;
drv->handle = handle;
#ifdef ERTS_SMP
- if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING)
- drv->lock = NULL;
- else {
- drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK,
- sizeof(erts_mtx_t));
- erts_mtx_init_x(drv->lock,
- "driver_lock",
-#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
- erts_atom_put((byte *) drv->name,
- sys_strlen(drv->name),
- ERTS_ATOM_ENC_LATIN1,
- 1)
-#else
- NIL
-#endif
- );
+ if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) {
+ drv->lock = NULL;
+ } else {
+ Eterm driver_id = erts_atom_put((byte *) drv->name,
+ sys_strlen(drv->name),
+ ERTS_ATOM_ENC_LATIN1, 1);
+
+ drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK, sizeof(erts_mtx_t));
+
+ erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
#endif
drv->entry = de;
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index 8ab6c713d6..7987cb2eb5 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -120,7 +120,8 @@ void init_module_table(void)
}
for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- erts_smp_rwmtx_init_x(&the_old_code_rwlocks[i], "old_code", make_small(i));
+ erts_smp_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i),
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
erts_smp_atomic_init_nob(&tot_module_bytes, 0);
}
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index cdf9cb58b9..44613c7d85 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -1174,9 +1174,9 @@ bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \
%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action
%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action
-i_bs_get_binary_imm2 f x I I I d
-i_bs_get_binary2 f x I s I d
-i_bs_get_binary_all2 f x I I d
+i_bs_get_binary_imm2 f x I I I x
+i_bs_get_binary2 f x I s I x
+i_bs_get_binary_all2 f x I I x
i_bs_get_binary_all_reuse x f I
# Fetching float from binaries.
@@ -1186,7 +1186,7 @@ bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \
bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail
%macro: i_bs_get_float2 BsGetFloat2 -fail_action
-i_bs_get_float2 f x I s I d
+i_bs_get_float2 f x I s I x
# Miscellanous
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 7f60710124..bf3267cff1 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -145,7 +145,8 @@ void init_register_table(void)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab");
+ erts_smp_rwmtx_init_opt(&regtab_rwmtx, &rwmtx_opt, "reg_tab", NIL,
+ ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
f.hash = (H_FUN) reg_hash;
f.cmp = (HCMP_FUN) reg_cmp;
diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c
index 30b26a7296..527c9efeca 100644
--- a/erts/emulator/beam/safe_hash.c
+++ b/erts/emulator/beam/safe_hash.c
@@ -155,7 +155,8 @@ int safe_hash_table_sz(SafeHash *h)
** Init a pre allocated or static hash structure
** and allocate buckets. NOT SAFE
*/
-SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, SafeHashFunctions fun)
+SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_flags_t flags,
+ int size, SafeHashFunctions fun)
{
int i, bytes;
@@ -170,7 +171,8 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size,
erts_smp_atomic_init_nob(&h->is_rehashing, 0);
erts_smp_atomic_init_nob(&h->nitems, 0);
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
- erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash");
+ erts_smp_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL,
+ flags);
}
return h;
}
@@ -273,5 +275,22 @@ void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_ar
}
}
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int enable) {
+ int i;
+
+ for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) {
+ erts_smp_mtx_t *lock = &h->lock_vec[i].mtx;
+
+ if(enable) {
+ erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL,
+ ERTS_LOCK_TYPE_MUTEX | flags);
+ } else {
+ erts_lcnt_uninstall(&lock->lcnt);
+ }
+ }
+}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
+
#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h
index 285103cb17..dde48a6de8 100644
--- a/erts/emulator/beam/safe_hash.h
+++ b/erts/emulator/beam/safe_hash.h
@@ -28,6 +28,7 @@
#include "sys.h"
#include "erl_alloc.h"
+#include "erl_lock_flags.h"
typedef unsigned long SafeHashValue;
@@ -85,7 +86,7 @@ typedef struct
/* A: Lockless atomics */
} SafeHash;
-SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, int, SafeHashFunctions);
+SafeHash* safe_hash_init(ErtsAlcType_t, SafeHash*, char*, erts_lock_flags_t, int, SafeHashFunctions);
void safe_hash_get_info(SafeHashInfo*, SafeHash*);
int safe_hash_table_sz(SafeHash *);
@@ -96,5 +97,9 @@ void* safe_hash_erase(SafeHash*, void*);
void safe_hash_for_each(SafeHash*, void (*func)(void *, void *), void *);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+void erts_lcnt_enable_hash_lock_count(SafeHash*, erts_lock_flags_t, int);
+#endif
+
#endif /* __SAFE_HASH_H__ */
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index d752ea4330..b6c77794d2 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -784,10 +784,10 @@ Preload* sys_preloaded(void);
unsigned char* sys_preload_begin(Preload*);
void sys_preload_end(Preload*);
int sys_get_key(int);
-void elapsed_time_both(UWord *ms_user, UWord *ms_sys,
- UWord *ms_user_diff, UWord *ms_sys_diff);
-void wall_clock_elapsed_time_both(UWord *ms_total,
- UWord *ms_diff);
+void elapsed_time_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys,
+ ErtsMonotonicTime *ms_user_diff, ErtsMonotonicTime *ms_sys_diff);
+void wall_clock_elapsed_time_both(ErtsMonotonicTime *ms_total,
+ ErtsMonotonicTime *ms_diff);
void get_time(int *hour, int *minute, int *second);
void get_date(int *year, int *month, int *day);
void get_localtime(int *year, int *month, int *day,
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 457cada745..0fb25c2082 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -42,6 +42,7 @@
#include "dist.h"
#include "erl_printf.h"
#include "erl_threads.h"
+#include "erl_lock_count.h"
#include "erl_smp.h"
#include "erl_time.h"
#include "erl_thr_progress.h"