aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_lock_count.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_lock_count.c')
-rw-r--r--erts/emulator/beam/erl_lock_count.c832
1 files changed, 352 insertions, 480 deletions
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index aee9796171..2cf59aa367 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2008-2016. All Rights Reserved.
+ * Copyright Ericsson AB 2008-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,51 +18,30 @@
* %CopyrightEnd%
*/
-/*
- * Description: Statistics for locks.
- *
- * Author: Björn-Egil Dahlberg
- * Date: 2008-07-03
- */
-
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
-/* Needed for VxWorks va_arg */
-#include "sys.h"
-
#ifdef ERTS_ENABLE_LOCK_COUNT
+#include "sys.h"
+
#include "erl_lock_count.h"
-#include "ethread.h"
-#include "erl_term.h"
-#include "atom.h"
-#include <stdio.h>
+#include "erl_thr_progress.h"
-/* globals, dont access these without locks or blocks */
+#define LCNT_MAX_CARRIER_ENTRIES 255
-ethr_mutex lcnt_data_lock;
-erts_lcnt_data_t *erts_lcnt_data;
-Uint16 erts_lcnt_rt_options;
-erts_lcnt_time_t timer_start;
-const char *str_undefined = "undefined";
+/* - Exported global - */
-static ethr_tsd_key lcnt_thr_data_key;
-static int lcnt_n_thr;
-static erts_lcnt_thread_data_t *lcnt_thread_data[2048];
+Uint16 erts_lcnt_rt_options = ERTS_LCNT_OPT_PROCLOCK | ERTS_LCNT_OPT_LOCATION;
-/* local functions */
+/* - Locals that are shared with the header implementation - */
-static ERTS_INLINE void lcnt_lock(void) {
- ethr_mutex_lock(&lcnt_data_lock);
-}
+int lcnt_initialization_completed__ = 0;
-static ERTS_INLINE void lcnt_unlock(void) {
- ethr_mutex_unlock(&lcnt_data_lock);
-}
+ethr_tsd_key lcnt_thr_data_key__;
-const int log2_tab64[64] = {
+const int lcnt_log2_tab64__[64] = {
63, 0, 58, 1, 59, 47, 53, 2,
60, 39, 48, 27, 54, 33, 42, 3,
61, 51, 37, 40, 49, 18, 28, 20,
@@ -72,99 +51,52 @@ const int log2_tab64[64] = {
56, 45, 25, 31, 35, 16, 9, 12,
44, 24, 15, 8, 23, 7, 6, 5};
-static ERTS_INLINE int lcnt_log2(Uint64 v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v |= v >> 32;
- return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
-}
-
-static char* lcnt_lock_type(Uint16 flag) {
- switch(flag & ERTS_LCNT_LT_ALL) {
- case ERTS_LCNT_LT_SPINLOCK: return "spinlock";
- case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock";
- case ERTS_LCNT_LT_MUTEX: return "mutex";
- case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex";
- case ERTS_LCNT_LT_PROCLOCK: return "proclock";
- default: return "";
- }
-}
+/* - Local variables - */
-static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
- ethr_atomic_set(&stats->tries, 0);
- ethr_atomic_set(&stats->colls, 0);
- stats->timer.s = 0;
- stats->timer.ns = 0;
- stats->timer_n = 0;
- stats->file = (char *)str_undefined;
- stats->line = 0;
- sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
-}
+static erts_lcnt_lock_info_list_t lcnt_current_lock_list;
+static erts_lcnt_lock_info_list_t lcnt_deleted_lock_list;
-static void lcnt_time(erts_lcnt_time_t *time) {
- /*
- * erts_sys_hrtime() is the highest resolution
- * we could find, it may or may not be monotonic...
- */
- ErtsMonotonicTime mtime = erts_sys_hrtime();
- time->s = (unsigned long) (mtime / 1000000000LL);
- time->ns = (unsigned long) (mtime - 1000000000LL*time->s);
-}
+static erts_lcnt_time_t lcnt_timer_start;
-static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) {
- long ds;
- long dns;
+/* local functions */
- ds = t1->s - t0->s;
- dns = t1->ns - t0->ns;
+static void lcnt_clear_stats(erts_lcnt_lock_info_t *info) {
+ size_t i;
- /* the difference should not be able to get bigger than 1 sec in ns*/
+ for(i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
+ erts_lcnt_lock_stats_t *stats = &info->location_stats[i];
- if (dns < 0) {
- ds -= 1;
- dns += 1000000000LL;
- }
+ sys_memzero(&stats->wait_time_histogram, sizeof(stats->wait_time_histogram));
- ASSERT(ds >= 0);
+ stats->total_time_waited.s = 0;
+ stats->total_time_waited.ns = 0;
- d->s = ds;
- d->ns = dns;
-}
+ stats->times_waited = 0;
-/* difference d must be non-negative */
+ stats->file = NULL;
+ stats->line = 0;
-static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- t->s += d->s;
- t->ns += d->ns;
+ ethr_atomic_set(&stats->attempts, 0);
+ ethr_atomic_set(&stats->collisions, 0);
+ }
- t->s += t->ns / 1000000000LL;
- t->ns = t->ns % 1000000000LL;
+ info->location_count = 1;
}
-static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
- erts_lcnt_thread_data_t *eltd;
+static lcnt_thread_data_t__ *lcnt_thread_data_alloc(void) {
+ lcnt_thread_data_t__ *eltd =
+ (lcnt_thread_data_t__*)malloc(sizeof(lcnt_thread_data_t__));
- eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
- if (!eltd) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ if(!eltd) {
+ ERTS_INTERNAL_ERROR("Failed to allocate lcnt thread data.");
}
+
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
- eltd->id = lcnt_n_thr++;
- /* set thread data to array */
- lcnt_thread_data[eltd->id] = eltd;
-
return eltd;
}
-static erts_lcnt_thread_data_t *lcnt_get_thread_data(void) {
- return (erts_lcnt_thread_data_t *)ethr_tsd_get(lcnt_thr_data_key);
-}
-
/* debug */
#if 0
@@ -175,472 +107,391 @@ static char* lock_opt(Uint16 flag) {
return "--";
}
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
- erts_aint_t w_state, r_state;
+static void print_lock_x(erts_lcnt_lock_info_t *info, Uint16 flag, char *action) {
+ ethr_sint_t w_state, r_state;
char *type;
- if (strcmp(lock->name, "run_queue") != 0) return;
- type = lcnt_lock_type(lock->flag);
- r_state = ethr_atomic_read(&lock->r_state);
- w_state = ethr_atomic_read(&lock->w_state);
+ if (strcmp(info->name, "run_queue") != 0) return;
+ type = erts_lcnt_lock_type(info->flag);
+ r_state = ethr_atomic_read(&info->r_state);
+ w_state = ethr_atomic_read(&info->w_state);
- if (lock->flag & flag) {
+ if (info->flag & flag) {
erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
action,
- lock->name,
+ info->name,
r_state,
w_state,
type,
- lock->id);
+ info->id);
}
}
#endif
-static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- unsigned int i;
- erts_lcnt_lock_stats_t *stats = NULL;
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
- }
- }
- return &lock->stats[0];
+/* - List operations -
+ *
+ * Info entries are kept in a doubly linked list where each entry is locked
+ * with its neighbors rather than a global lock. Deletion is rather quick, but
+ * insertion is still serial since the head becomes a de facto global lock.
+ *
+ * We rely on ad-hoc spinlocks to avoid "recursing" into this module. */
+
+#define LCNT_SPINLOCK_YIELD_ITERATIONS 50
+
+#define LCNT_SPINLOCK_HELPER_INIT \
+ Uint failed_spin_count = 0;
+
+#define LCNT_SPINLOCK_HELPER_YIELD \
+ do { \
+ failed_spin_count++; \
+ if(!(failed_spin_count % LCNT_SPINLOCK_YIELD_ITERATIONS)) { \
+ erts_thr_yield(); \
+ } else { \
+ ERTS_SPIN_BODY; \
+ } \
+ } while(0)
+
+static void lcnt_unlock_list_entry(erts_lcnt_lock_info_t *info) {
+ ethr_atomic32_set_relb(&info->lock, 0);
}
-static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
- int idx;
- unsigned long r;
+static int lcnt_try_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ return ethr_atomic32_cmpxchg_acqb(&info->lock, 1, 0) == 0;
+}
- if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
- idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
- } else {
- r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
- if (r) idx = lcnt_log2(r);
- else idx = 0;
+static void lcnt_lock_list_entry(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
+
+ while(!lcnt_try_lock_list_entry(info)) {
+ LCNT_SPINLOCK_HELPER_YIELD;
}
- hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
- erts_lcnt_time_t *time_wait) {
+static void lcnt_lock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ LCNT_SPINLOCK_HELPER_INIT;
- ethr_atomic_inc(&stats->tries);
+ for(;;) {
+ if(!lcnt_try_lock_list_entry(info))
+ goto retry_after_entry_failed;
+ if(!lcnt_try_lock_list_entry(info->next))
+ goto retry_after_next_failed;
+ if(!lcnt_try_lock_list_entry(info->prev))
+ goto retry_after_prev_failed;
- if (lock_in_conflict)
- ethr_atomic_inc(&stats->colls);
+ return;
- if (time_wait) {
- lcnt_time_add(&(stats->timer), time_wait);
- stats->timer_n++;
- lcnt_update_stats_hist(&stats->hist,time_wait);
+ retry_after_prev_failed:
+ lcnt_unlock_list_entry(info->next);
+ retry_after_next_failed:
+ lcnt_unlock_list_entry(info);
+ retry_after_entry_failed:
+ LCNT_SPINLOCK_HELPER_YIELD;
}
}
-/* interface */
+static void lcnt_unlock_list_entry_with_neighbors(erts_lcnt_lock_info_t *info) {
+ lcnt_unlock_list_entry(info->prev);
+ lcnt_unlock_list_entry(info->next);
+ lcnt_unlock_list_entry(info);
+}
-void erts_lcnt_init() {
- erts_lcnt_thread_data_t *eltd = NULL;
+static void lcnt_insert_list_entry(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t *info) {
+ erts_lcnt_lock_info_t *next, *prev;
- /* init lock */
- if (ethr_mutex_init(&lcnt_data_lock) != 0) abort();
+ prev = &list->head;
- /* init tsd */
- lcnt_n_thr = 0;
- ethr_tsd_key_create(&lcnt_thr_data_key, "lcnt_data");
+ lcnt_lock_list_entry(prev);
- lcnt_lock();
+ next = prev->next;
- erts_lcnt_rt_options = ERTS_LCNT_OPT_LOCATION | ERTS_LCNT_OPT_PROCLOCK;
- eltd = lcnt_thread_data_alloc();
- ethr_tsd_set(lcnt_thr_data_key, eltd);
+ lcnt_lock_list_entry(next);
- /* init lcnt structure */
- erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
- if (!erts_lcnt_data) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- erts_lcnt_data->current_locks = erts_lcnt_list_init();
- erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
+ info->next = next;
+ info->prev = prev;
- lcnt_unlock();
+ prev->next = info;
+ next->prev = info;
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
}
-void erts_lcnt_late_init() {
- /* set start timer and zero statistics */
- erts_lcnt_clear_counters();
- erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
-}
+static void lcnt_insert_list_carrier(erts_lcnt_lock_info_list_t *list,
+ erts_lcnt_lock_info_carrier_t *carrier) {
+ erts_lcnt_lock_info_t *next, *prev;
+ size_t i;
-/* list operations */
+ for(i = 0; i < carrier->entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &carrier->entries[i];
-/* BEGIN ASSUMPTION: lcnt_data_lock taken */
+ info->prev = &carrier->entries[i - 1];
+ info->next = &carrier->entries[i + 1];
+ }
-erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
- erts_lcnt_lock_list_t *list;
+ prev = &list->head;
- list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
- if (!list) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- list->head = NULL;
- list->tail = NULL;
- list->n = 0;
- return list;
-}
+ lcnt_lock_list_entry(prev);
-static void lcnt_list_free(erts_lcnt_lock_t *head) {
- erts_lcnt_lock_t *lock, *next;
+ next = prev->next;
- lock = head;
+ lcnt_lock_list_entry(next);
- while(lock != NULL) {
- next = lock->next;
- free(lock);
- lock = next;
- }
+ next->prev = &carrier->entries[carrier->entry_count - 1];
+ carrier->entries[carrier->entry_count - 1].next = next;
+
+ prev->next = &carrier->entries[0];
+ carrier->entries[0].prev = prev;
+
+ lcnt_unlock_list_entry(next);
+ lcnt_unlock_list_entry(prev);
}
-void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_t *tail = NULL;
+static void lcnt_init_list(erts_lcnt_lock_info_list_t *list) {
+ /* Ensure that ref_count operations explode when touching the sentinels in
+ * DEBUG mode. */
+ ethr_atomic_init(&(list->head.ref_count), -1);
+ ethr_atomic_init(&(list->tail.ref_count), -1);
- tail = list->tail;
- if (tail) {
- tail->next = lock;
- lock->prev = tail;
- } else {
- list->head = lock;
- lock->prev = NULL;
- ASSERT(!lock->next);
- }
- lock->next = NULL;
- list->tail = lock;
+ ethr_atomic32_init(&(list->head.lock), 0);
+ (list->head).next = &list->tail;
+ (list->head).prev = &list->tail;
- list->n++;
+ ethr_atomic32_init(&(list->tail.lock), 0);
+ (list->tail).next = &list->head;
+ (list->tail).prev = &list->head;
}
-void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) {
- if (lock->next) lock->next->prev = lock->prev;
- if (lock->prev) lock->prev->next = lock->next;
- if (list->head == lock) list->head = lock->next;
- if (list->tail == lock) list->tail = lock->prev;
+/* - Carrier operations - */
- lock->prev = NULL;
- lock->next = NULL;
- list->n--;
+int lcnt_thr_progress_unmanaged_delay__(void) {
+ return erts_thr_progress_unmanaged_delay();
}
-/* END ASSUMPTION: lcnt_data_lock taken */
+void lcnt_thr_progress_unmanaged_continue__(int handle) {
+ return erts_thr_progress_unmanaged_continue(handle);
+}
-/* lock operations */
+static void lcnt_deallocate_carrier_malloc(erts_lcnt_lock_info_carrier_t *carrier) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == 0);
+ free(carrier);
+}
-/* interface to erl_threads.h */
-/* only lock on init and destroy, all others should use atomics */
-void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, NIL);
+static void lcnt_deallocate_carrier_erts(erts_lcnt_lock_info_carrier_t *carrier) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == 0);
+ erts_free(ERTS_ALC_T_LCNT_CARRIER, (void*)carrier);
}
-void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
- int i;
+static void lcnt_thr_prg_cleanup_carrier(void *data) {
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t entry_count, i;
- if (flag & ERTS_LCNT_LT_DISABLE) {
- ERTS_LCNT_CLEAR_FLAG(lock);
- return;
+ /* carrier->entry_count will be replaced with garbage if it's deallocated
+ * on the final iteration, so we'll tuck it away to get a clean exit. */
+ entry_count = carrier->entry_count;
+
+ for(i = 0; i < entry_count; i++) {
+ ASSERT(ethr_atomic_read(&carrier->ref_count) >= (entry_count - i));
+
+ erts_lcnt_release_lock_info(&carrier->entries[i]);
}
+}
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = flag;
- lock->name = name;
- lock->id = id;
+static void lcnt_schedule_carrier_cleanup(void *data) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
+ /* We can't issue cleanup jobs on anything other than normal schedulers, so
+ * we move to the first scheduler if required. */
+
+ if(!esdp || esdp->type != ERTS_SCHED_NORMAL) {
+ erts_schedule_misc_aux_work(1, &lcnt_schedule_carrier_cleanup, data);
+ } else {
+ erts_lcnt_lock_info_carrier_t *carrier = data;
+ size_t carrier_size;
- lock->n_stats = 1;
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * carrier->entry_count;
- for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- lcnt_clear_stats(&lock->stats[i]);
+ erts_schedule_thr_prgr_later_cleanup_op(&lcnt_thr_prg_cleanup_carrier,
+ data, (ErtsThrPrgrLaterOp*)&carrier->release_entries, carrier_size);
}
+}
- lcnt_lock();
- erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
- lcnt_unlock();
+static void lcnt_info_deallocate(erts_lcnt_lock_info_t *info) {
+ lcnt_release_carrier__(info->carrier);
}
-/* init empty, instead of zero struct
- * used by process locks probes
- */
-void erts_lcnt_init_lock_empty(erts_lcnt_lock_t *lock) {
- lock->next = NULL;
- lock->prev = NULL;
- lock->flag = 0;
- lock->name = NULL;
- lock->id = NIL;
- ethr_atomic_init(&lock->r_state, 0);
- ethr_atomic_init(&lock->w_state, 0);
-#ifdef DEBUG
- ethr_atomic_init(&lock->flowstate, 0);
-#endif
- lock->n_stats = 0;
- sys_memzero(lock->stats, sizeof(lock->stats));
-}
-/* destroy lock */
-void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- lcnt_lock();
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
- erts_lcnt_lock_t *deleted_lock;
- /* copy structure and insert the copy */
- deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
- if (!deleted_lock) {
- ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
- }
- memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
- deleted_lock->next = NULL;
- deleted_lock->prev = NULL;
- erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
- }
- /* delete original */
- erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
- ERTS_LCNT_CLEAR_FLAG(lock);
+static void lcnt_info_dispose(erts_lcnt_lock_info_t *info) {
+ ASSERT(ethr_atomic_read(&info->ref_count) == 0);
- lcnt_unlock();
+ if(erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
+ ethr_atomic_set(&info->ref_count, 1);
+
+ /* Move straight to deallocation the next time around. */
+ info->dispose = &lcnt_info_deallocate;
+
+ lcnt_insert_list_entry(&lcnt_deleted_lock_list, info);
+ } else {
+ lcnt_info_deallocate(info);
+ }
}
-/* lock */
+static void lcnt_lock_info_init_helper(erts_lcnt_lock_info_t *info) {
+#ifdef DEBUG
+ ethr_atomic_init(&info->flowstate, 0);
+#endif
-void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- erts_aint_t r_state = 0, w_state = 0;
- erts_lcnt_thread_data_t *eltd;
+ ethr_atomic_init(&info->ref_count, 1);
+ ethr_atomic32_init(&info->lock, 0);
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ ethr_atomic_init(&info->r_state, 0);
+ ethr_atomic_init(&info->w_state, 0);
- eltd = lcnt_get_thread_data();
- ASSERT(eltd);
+ info->dispose = &lcnt_info_dispose;
- w_state = ethr_atomic_read(&lock->w_state);
+ lcnt_clear_stats(info);
+}
- if (option & ERTS_LCNT_LO_WRITE) {
- r_state = ethr_atomic_read(&lock->r_state);
- ethr_atomic_inc( &lock->w_state);
- }
- if (option & ERTS_LCNT_LO_READ) {
- ethr_atomic_inc( &lock->r_state);
- }
+erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int entry_count) {
+ erts_lcnt_lock_info_carrier_t *result;
+ size_t carrier_size, i;
+
+ ASSERT(entry_count > 0 && entry_count <= LCNT_MAX_CARRIER_ENTRIES);
- /* we cannot acquire w_lock if either w or r are taken */
- /* we cannot acquire r_lock if w_lock is taken */
+ carrier_size = sizeof(erts_lcnt_lock_info_carrier_t) +
+ sizeof(erts_lcnt_lock_info_t) * entry_count;
- if ((w_state > 0) || (r_state > 0)) {
- eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
- }
- eltd->timer_set++;
+ if(lcnt_initialization_completed__) {
+ result = (erts_lcnt_lock_info_carrier_t*)erts_alloc(ERTS_ALC_T_LCNT_CARRIER, carrier_size);
+ result->deallocate = &lcnt_deallocate_carrier_erts;
} else {
- eltd->lock_in_conflict = 0;
+ result = (erts_lcnt_lock_info_carrier_t*)malloc(carrier_size);
+ result->deallocate = &lcnt_deallocate_carrier_malloc;
}
-}
-void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
- erts_aint_t w_state;
- erts_lcnt_thread_data_t *eltd;
+ ethr_atomic_init(&result->ref_count, entry_count);
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ result->entry_count = entry_count;
- w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc(&lock->w_state);
- eltd = lcnt_get_thread_data();
+ for(i = 0; i < entry_count; i++) {
+ erts_lcnt_lock_info_t *info = &result->entries[i];
- ASSERT(eltd);
+ lcnt_lock_info_init_helper(info);
- if (w_state > 0) {
- eltd->lock_in_conflict = 1;
- /* only set the timer if nobody else has it
- * This should only happen when proc_locks aquires several locks
- * 'atomicly'. All other locks will block the thread if w_state > 0
- * i.e. locked.
- */
- if (eltd->timer_set == 0) {
- lcnt_time(&eltd->timer);
- }
- eltd->timer_set++;
- } else {
- eltd->lock_in_conflict = 0;
+ info->carrier = result;
}
-}
-
-/* if a lock wasn't really a lock operation, bad bad process locks */
-
-void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
- /* should check if this thread was "waiting" */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- ethr_atomic_dec(&lock->w_state);
+ return result;
}
-/*
- * erts_lcnt_lock_post
- *
- * Used when we get a lock (i.e. directly after a lock operation)
- * if the timer was set then we had to wait for the lock
- * lock_post will calculate the wait time.
- */
+void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier) {
+ ethr_sint_t swapped_carrier;
-void erts_lcnt_lock_post(erts_lcnt_lock_t *lock) {
- erts_lcnt_lock_post_x(lock, (char*)str_undefined, 0);
-}
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)carrier, (ethr_sint_t)NULL);
-void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
- erts_lcnt_thread_data_t *eltd;
- erts_lcnt_time_t timer;
- erts_lcnt_time_t time_wait;
- erts_lcnt_lock_stats_t *stats;
+ if(swapped_carrier != (ethr_sint_t)NULL) {
#ifdef DEBUG
- erts_aint_t flowstate;
+ ASSERT(ethr_atomic_read(&carrier->ref_count) == carrier->entry_count);
+ ethr_atomic_set(&carrier->ref_count, 0);
#endif
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
+ carrier->deallocate(carrier);
+ } else {
+ lcnt_insert_list_carrier(&lcnt_current_lock_list, carrier);
+ }
+}
+
+void erts_lcnt_uninstall(erts_lcnt_ref_t *ref) {
+ ethr_sint_t previous_carrier, swapped_carrier;
-#ifdef DEBUG
- if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc(&lock->flowstate);
+ previous_carrier = ethr_atomic_read(ref);
+ swapped_carrier = ethr_atomic_cmpxchg_mb(ref, (ethr_sint_t)NULL, previous_carrier);
+
+ if(previous_carrier && previous_carrier == swapped_carrier) {
+ lcnt_schedule_carrier_cleanup((void*)previous_carrier);
}
-#endif
+}
- eltd = lcnt_get_thread_data();
+/* - Initialization - */
- ASSERT(eltd);
+void erts_lcnt_pre_thr_init() {
+ /* Ensure that the dependency hack mentioned in the header doesn't
+ * explode at runtime. */
+ ERTS_CT_ASSERT(sizeof(LcntThrPrgrLaterOp) >= sizeof(ErtsThrPrgrLaterOp));
+ ERTS_CT_ASSERT(ERTS_THR_PRGR_DHANDLE_MANAGED ==
+ (ErtsThrPrgrDelayHandle)LCNT_THR_PRGR_DHANDLE_MANAGED);
- /* if lock was in conflict, time it */
- stats = lcnt_get_lock_stats(lock, file, line);
- if (eltd->timer_set) {
- lcnt_time(&timer);
+ lcnt_init_list(&lcnt_current_lock_list);
+ lcnt_init_list(&lcnt_deleted_lock_list);
+}
- lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
- lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
- eltd->timer_set--;
- ASSERT(eltd->timer_set >= 0);
- } else {
- lcnt_update_stats(stats, eltd->lock_in_conflict, NULL);
- }
+void erts_lcnt_post_thr_init() {
+ /* ASSUMPTION: this is safe since it runs prior to the creation of other
+ * threads (Directly after ethread init). */
+ ethr_tsd_key_create(&lcnt_thr_data_key__, "lcnt_data");
+
+ erts_lcnt_thread_setup();
}
-/* unlock */
+void erts_lcnt_late_init() {
+ /* Set start timer and zero all statistics */
+ erts_lcnt_clear_counters();
+ erts_thr_install_exit_handler(erts_lcnt_thread_exit_handler);
-void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state);
+ /* It's safe to use erts_alloc and thread progress past this point. */
+ lcnt_initialization_completed__ = 1;
}
-void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
-#ifdef DEBUG
- {
- erts_aint_t w_state;
- erts_aint_t flowstate;
-
- /* flowstate */
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 1);
- ethr_atomic_dec(&lock->flowstate);
-
- /* write state */
- w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0);
- }
-#endif
- ethr_atomic_dec(&lock->w_state);
+void erts_lcnt_thread_setup() {
+ lcnt_thread_data_t__ *eltd = lcnt_thread_data_alloc();
+
+ ASSERT(eltd);
+
+ ethr_tsd_set(lcnt_thr_data_key__, eltd);
}
-/* trylock */
+void erts_lcnt_thread_exit_handler() {
+ lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__();
-void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) {
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- /* Determine lock_state via res instead of state */
- if (res != EBUSY) {
- if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state);
- if (option & ERTS_LCNT_LO_READ ) ethr_atomic_inc(&lock->r_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
- } else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
+ if (eltd) {
+ free(eltd);
}
}
+/* - BIF interface - */
-void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
- /* Determine lock_state via res instead of state */
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
- if (ERTS_LCNT_IS_LOCK_INVALID(lock)) return;
- if (res != EBUSY) {
+void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info) {
#ifdef DEBUG
- {
- erts_aint_t flowstate;
- flowstate = ethr_atomic_read(&lock->flowstate);
- ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
- }
+ ASSERT(ethr_atomic_inc_read_acqb(&info->ref_count) >= 2);
+#else
+ ethr_atomic_inc_acqb(&info->ref_count);
#endif
- ethr_atomic_inc(&lock->w_state);
- lcnt_update_stats(&(lock->stats[0]), 0, NULL);
- } else {
- ethr_atomic_inc(&lock->stats[0].tries);
- ethr_atomic_inc(&lock->stats[0].colls);
- }
}
-/* thread operations */
+void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info) {
+ ethr_sint_t count;
-void erts_lcnt_thread_setup(void) {
- erts_lcnt_thread_data_t *eltd;
+ /* We need to acquire the lock before decrementing ref_count to avoid
+ * racing with list iteration; there's a short window between reading the
+ * reference to info and increasing its ref_count. */
+ lcnt_lock_list_entry_with_neighbors(info);
- lcnt_lock();
- /* lock for thread id global update */
- eltd = lcnt_thread_data_alloc();
- lcnt_unlock();
- ASSERT(eltd);
- ethr_tsd_set(lcnt_thr_data_key, eltd);
-}
+ count = ethr_atomic_dec_read(&info->ref_count);
-void erts_lcnt_thread_exit_handler() {
- erts_lcnt_thread_data_t *eltd;
+ ASSERT(count >= 0);
- eltd = ethr_tsd_get(lcnt_thr_data_key);
+ if(count > 0) {
+ lcnt_unlock_list_entry_with_neighbors(info);
+ } else {
+ (info->next)->prev = info->prev;
+ (info->prev)->next = info->next;
- if (eltd) {
- free(eltd);
+ lcnt_unlock_list_entry_with_neighbors(info);
+
+ info->dispose(info);
}
}
-/* bindings for bifs */
-
Uint16 erts_lcnt_set_rt_opt(Uint16 opt) {
Uint16 prev;
prev = (erts_lcnt_rt_options & opt);
@@ -656,51 +507,72 @@ Uint16 erts_lcnt_clear_rt_opt(Uint16 opt) {
}
void erts_lcnt_clear_counters(void) {
- erts_lcnt_lock_t *lock;
- erts_lcnt_lock_list_t *list;
- erts_lcnt_lock_stats_t *stats;
- int i;
+ erts_lcnt_lock_info_t *iterator;
- lcnt_lock();
+ lcnt_time__(&lcnt_timer_start);
- list = erts_lcnt_data->current_locks;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_current_lock_list, &iterator)) {
+ lcnt_clear_stats(iterator);
+ }
- for (lock = list->head; lock != NULL; lock = lock->next) {
- for( i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
- stats = &lock->stats[i];
- lcnt_clear_stats(stats);
- }
- lock->n_stats = 1;
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(&lcnt_deleted_lock_list, &iterator)) {
+ erts_lcnt_release_lock_info(iterator);
}
+}
+
+erts_lcnt_data_t erts_lcnt_get_data(void) {
+ erts_lcnt_time_t timer_stop;
+ erts_lcnt_data_t result;
- lock = erts_lcnt_data->deleted_locks->head;
- erts_lcnt_data->deleted_locks->head = NULL;
- erts_lcnt_data->deleted_locks->tail = NULL;
- erts_lcnt_data->deleted_locks->n = 0;
+ lcnt_time__(&timer_stop);
- lcnt_time(&timer_start);
+ result.timer_start = lcnt_timer_start;
- lcnt_unlock();
+ result.current_locks = &lcnt_current_lock_list;
+ result.deleted_locks = &lcnt_deleted_lock_list;
- /* free deleted locks */
- lcnt_list_free(lock);
+ lcnt_time_diff__(&result.duration, &timer_stop, &result.timer_start);
+
+ return result;
}
-erts_lcnt_data_t *erts_lcnt_get_data(void) {
- erts_lcnt_time_t timer_stop;
+const char *erts_lcnt_lock_type(Uint16 type) {
+ switch(type & ERTS_LCNT_LT_ALL) {
+ case ERTS_LCNT_LT_SPINLOCK: return "spinlock";
+ case ERTS_LCNT_LT_RWSPINLOCK: return "rw_spinlock";
+ case ERTS_LCNT_LT_MUTEX: return "mutex";
+ case ERTS_LCNT_LT_RWMUTEX: return "rw_mutex";
+ case ERTS_LCNT_LT_PROCLOCK: return "proclock";
+ default: return "";
+ }
+}
- lcnt_lock();
+int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator) {
+ erts_lcnt_lock_info_t *current, *next;
- lcnt_time(&timer_stop);
- lcnt_time_diff(&(erts_lcnt_data->duration), &timer_stop, &timer_start);
+ current = *iterator ? *iterator : &list->head;
- lcnt_unlock();
+ ASSERT(current != &list->tail);
- return erts_lcnt_data;
-}
+ lcnt_lock_list_entry(current);
+
+ next = current->next;
+
+ if(next != &list->tail) {
+ erts_lcnt_retain_lock_info(next);
+ }
+
+ lcnt_unlock_list_entry(current);
+
+ if(current != &list->head) {
+ erts_lcnt_release_lock_info(current);
+ }
+
+ *iterator = next;
-char *erts_lcnt_lock_type(Uint16 type) {
- return lcnt_lock_type(type);
+ return next != &list->tail;
}
-#endif /* ifdef ERTS_ENABLE_LOCK_COUNT */
+#endif /* #ifdef ERTS_ENABLE_LOCK_COUNT */