/* * %CopyrightBegin% * * Copyright Ericsson AB 2008-2017. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * %CopyrightEnd% */ /** * @description Statistics for locks. * @file erl_lock_count.h * * @author Björn-Egil Dahlberg * @author John Högberg * * Conceptual representation: * * - set name * | - id (the unique lock) * | | - lock type * | | - statistics * | | | - location (file and line number) * | | | - attempts * | | | - collisions (including trylock busy) * | | | - timer (time spent in waiting for lock) * | | | - n_timer (collisions excluding trylock busy) * | | | - histogram * | | | | - # 0 = log2(lock wait_time ns) * | | | | - ... * | | | | - # n = log2(lock wait_time ns) * * Each instance of a lock is the unique lock, i.e. set and id in that set. * For each lock there is a set of statistics with where and what impact * the lock aqusition had. * * Runtime options: * - location, reserved and not used. * - proclock, disable proclock counting. Used when performance might be an * issue. Accessible from erts_debug:lock_counters({process_locks, bool()}). * Default: off. * - copysave, enable saving of destroyed locks (and thereby its statistics). * If memory constraints is an issue this need to be disabled. * Accessible from erts_debug:lock_counters({copy_save, bool()}). * Default: off. */ #ifndef ERTS_LOCK_COUNT_H__ #define ERTS_LOCK_COUNT_H__ #ifdef ERTS_ENABLE_LOCK_COUNT #ifndef ERTS_ENABLE_LOCK_POSITION /** @brief Controls whether _x variants of mtx functions are used. */ #define ERTS_ENABLE_LOCK_POSITION 1 #endif #include "sys.h" #include "ethread.h" #include "erl_term.h" #define ERTS_LCNT_MAX_LOCK_LOCATIONS (10) /* histogram */ #define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1) #if 0 || defined(ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT) #define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30) #define ERTS_LCNT_HISTOGRAM_RSHIFT (0) #else #define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (20) #define ERTS_LCNT_HISTOGRAM_RSHIFT (10) #endif #define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0) #define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1) #define ERTS_LCNT_LT_MUTEX (((Uint16) 1) << 2) #define ERTS_LCNT_LT_RWMUTEX (((Uint16) 1) << 3) #define ERTS_LCNT_LT_PROCLOCK (((Uint16) 1) << 4) #define ERTS_LCNT_LT_ALLOC (((Uint16) 1) << 5) #define ERTS_LCNT_LO_READ (((Uint16) 1) << 6) #define ERTS_LCNT_LO_WRITE (((Uint16) 1) << 7) #define ERTS_LCNT_LT_DISABLE (((Uint16) 1) << 8) #define ERTS_LCNT_LO_READ_WRITE ( ERTS_LCNT_LO_READ \ | ERTS_LCNT_LO_WRITE ) #define ERTS_LCNT_LT_ALL ( ERTS_LCNT_LT_SPINLOCK \ | ERTS_LCNT_LT_RWSPINLOCK \ | ERTS_LCNT_LT_MUTEX \ | ERTS_LCNT_LT_RWMUTEX \ | ERTS_LCNT_LT_PROCLOCK ) /* Runtime options */ #define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1) #define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2) #define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 3) #define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 4) typedef struct { unsigned long s; unsigned long ns; } erts_lcnt_time_t; typedef struct { /* @brief log2 array of nano seconds occurences */ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; } erts_lcnt_hist_t; typedef struct { /** @brief In which file the lock was taken. May be NULL. */ const char *file; /** @brief Line number in \c file */ unsigned int line; /* "attempts" and "collisions" need to be atomic since try_lock busy does * not acquire a lock and there is no post action to rectify the * situation. */ ethr_atomic_t attempts; ethr_atomic_t collisions; erts_lcnt_time_t total_time_waited; Uint64 times_waited; erts_lcnt_hist_t wait_time_histogram; } erts_lcnt_lock_stats_t; typedef struct lcnt_lock_info_t_ { const char *name; /**< Lock name */ Uint16 flag; /**< Lock type */ Eterm id; /**< Id if possible, must be an immediate */ /* The first entry is reserved as a fallback for when location information * is missing, and when the lock is used in more than (MAX_LOCK_LOCATIONS * - 1) different places. */ erts_lcnt_lock_stats_t location_stats[ERTS_LCNT_MAX_LOCK_LOCATIONS]; unsigned int location_count; /* -- Everything below is internal to this module ---------------------- */ /* Lock states; rw locks uses both states, other locks only uses w_state */ /** @brief Write state. 0 = not taken, otherwise n threads waiting */ ethr_atomic_t w_state; /** @brief Read state. 0 = not taken, > 0 -> writes will wait */ ethr_atomic_t r_state; #ifdef LCNT_DEBUG_LOCK_FLOW /** @brief Tracks lock/unlock operations. This will explode if the lock is * held at the time lock counting is installed. * * Avoid enabling existing locks at runtime while running in this * configuration. */ ethr_atomic_t flowstate; #endif struct lcnt_lock_info_t_ *prev; struct lcnt_lock_info_t_ *next; /** @brief Used in place of erts_refc_t to avoid a circular dependency. */ ethr_atomic_t ref_count; ethr_atomic32_t lock; /** @brief Deletion hook called once \c ref_count reaches 0; may defer * deletion by modifying \c ref_count. */ void (*dispose)(struct lcnt_lock_info_t_ *); struct lcnt_lock_info_carrier_ *carrier; } erts_lcnt_lock_info_t; typedef struct lcnt_lock_info_list_ { erts_lcnt_lock_info_t head; erts_lcnt_lock_info_t tail; } erts_lcnt_lock_info_list_t; typedef struct { erts_lcnt_time_t timer_start; /**< Time of last clear */ erts_lcnt_time_t duration; /**< Time since last clear */ erts_lcnt_lock_info_list_t *current_locks; erts_lcnt_lock_info_list_t *deleted_locks; } erts_lcnt_data_t; typedef struct lcnt_lock_info_carrier_ erts_lcnt_lock_info_carrier_t; typedef ethr_atomic_t erts_lcnt_ref_t; /* -- Globals -------------------------------------------------------------- */ extern Uint16 erts_lcnt_rt_options; /* -- Lock operations ------------------------------------------------------ * * All of these will nop if there's nothing "installed" on the given reference, * in order to transparently support enable/disable at runtime. */ /** @brief Records that a lock is being acquired. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_lock(erts_lcnt_ref_t *ref); /** @copydoc erts_lcnt_lock * @param option Notes whether the lock is a read or write lock. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, Uint16 option); /** @brief Records that a lock has been acquired. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_lock_post(erts_lcnt_ref_t *ref); /** @copydoc erts_lcnt_lock_post. * @param file The name of the file where the lock was acquired. * @param line The line at which the lock was acquired. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line); /** @brief Records that a lock has been released. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_unlock(erts_lcnt_ref_t *ref); /** @copydoc erts_lcnt_unlock_opt * @param option Whether the lock is a read or write lock. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, Uint16 option); /** @brief Rectifies the case where a lock wasn't actually a lock operation. * * Only used for process locks at the moment. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref); /** @brief Records the result of a trylock, placing the queried lock status in * \c result. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result); /** @copydoc erts_lcnt_trylock * @param option Whether the lock is a read or write lock. */ ERTS_GLB_FORCE_INLINE void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, Uint16 option); /* Indexed variants of the standard lock operations, for use when a single * reference contains many counters (eg. process locks). * * erts_lcnt_open_ref must be used to safely extract the installed carrier, * which must released with erts_lcnt_close_reference on success. * * Refer to \c erts_lcnt_lock for example usage. */ ERTS_GLB_FORCE_INLINE int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result); ERTS_GLB_FORCE_INLINE void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier); ERTS_GLB_INLINE void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index); ERTS_GLB_INLINE void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, Uint16 option); ERTS_GLB_INLINE void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index); ERTS_GLB_INLINE void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line); ERTS_GLB_INLINE void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index); ERTS_GLB_INLINE void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index); ERTS_GLB_INLINE void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, Uint16 option); ERTS_GLB_INLINE void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result); ERTS_GLB_INLINE void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, Uint16 option); /* -- Reference operations ------------------------------------------------- */ erts_lcnt_lock_info_carrier_t *erts_lcnt_create_lock_info_carrier(int count); /** @brief Fills in the name and lock type of the given index. */ #define erts_lcnt_init_lock_info_idx(carrier, index, name, flag) \ erts_lcnt_init_lock_info_x_idx(carrier, index, name, flag, NIL) /** @copydoc erts_lcnt_install_new_lock_info * @param id An immediate erlang term with whatever extra data you want to * identify this lock with. */ ERTS_GLB_INLINE void erts_lcnt_init_lock_info_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, const char *name, Uint16 flag, Eterm id); /** @brief Initializes a lock counter reference; this must be called prior to * using any other functions in this module. */ #define erts_lcnt_init_ref(ref) ethr_atomic_init(ref, (ethr_sint_t)NULL); /** @brief Atomically installs the given lock counters. Nops (and releases the * provided carrier) if something was already installed. */ void erts_lcnt_install(erts_lcnt_ref_t *ref, erts_lcnt_lock_info_carrier_t *carrier); /** @brief Atomically removes the currently installed lock counters. Nops if * nothing was installed. */ void erts_lcnt_uninstall(erts_lcnt_ref_t *ref); /** @brief Convenience macro to install a single lock counter of the given * name and type. */ #define erts_lcnt_install_new_lock_info(reference, name, flag) \ erts_lcnt_install_new_lock_info_x(reference, name, flag, NIL) /** @copydoc erts_lcnt_install_new_lock_info * @param id An immediate erlang term with whatever extra data you want to * identify this lock with. */ #define erts_lcnt_install_new_lock_info_x(reference, name, flag, id) \ do { \ erts_lcnt_lock_info_carrier_t *__carrier; \ __carrier = erts_lcnt_create_lock_info_carrier(1); \ erts_lcnt_init_lock_info_x_idx(__carrier, 0, name, flag, id); \ erts_lcnt_install(reference, __carrier); \ } while(0) /* -- Module initialization ------------------------------------------------ */ void erts_lcnt_pre_thr_init(void); void erts_lcnt_post_thr_init(void); void erts_lcnt_late_init(void); void erts_lcnt_thread_setup(void); void erts_lcnt_thread_exit_handler(void); /* -- BIF interface -------------------------------------------------------- */ /** @brief Safely iterates through all entries in the given list. * * The referenced item will be valid until the next call to * \c erts_lcnt_iterate_list after which point it may be destroyed; call * erts_lcnt_retain_lock_info if you wish to hang on to it beyond that point. * * Iteration can be cancelled by calling erts_lcnt_release_lock_info on the * iterator and breaking out of the loop. * * @param iterator The iteration variable; set the pointee to NULL to start * iteration. * @return 1 while the iterator is valid, 0 at the end of the list. */ int erts_lcnt_iterate_list(erts_lcnt_lock_info_list_t *list, erts_lcnt_lock_info_t **iterator); /** @brief Clears the counter state of all locks, and releases all locks * preserved through ERTS_LCNT_OPT_COPYSAVE (if any). */ void erts_lcnt_clear_counters(void); /** @brief Retrieves the global lock counter state. * * Note that the lists may be modified while you're mucking around with them. * Always use \c erts_lcnt_iterate_list to enumerate them. */ erts_lcnt_data_t erts_lcnt_get_data(void); void erts_lcnt_retain_lock_info(erts_lcnt_lock_info_t *info); void erts_lcnt_release_lock_info(erts_lcnt_lock_info_t *info); Uint16 erts_lcnt_set_rt_opt(Uint16 opt); Uint16 erts_lcnt_clear_rt_opt(Uint16 opt); const char *erts_lcnt_lock_type(Uint16 type); /* -- Inline implementation ------------------------------------------------ */ /* The following is a hack to get the things we need from erl_thr_progress.h, * which we can't #include without dependency hell breaking loose. * * The size of LcntThrPrgrLaterOp and value of the constant are verified at * compile-time in erts_lcnt_pre_thr_init. */ int lcnt_thr_progress_unmanaged_delay__(void); void lcnt_thr_progress_unmanaged_continue__(int handle); typedef struct { Uint64 _[4]; } LcntThrPrgrLaterOp; #define LCNT_THR_PRGR_DHANDLE_MANAGED -1 struct lcnt_lock_info_carrier_ { ethr_atomic_t ref_count; void (*deallocate)(struct lcnt_lock_info_carrier_ *); LcntThrPrgrLaterOp release_entries; unsigned char entry_count; erts_lcnt_lock_info_t entries[]; }; typedef struct { erts_lcnt_time_t timer; /* timer */ int timer_set; /* bool */ int lock_in_conflict; /* bool */ } lcnt_thread_data_t__; extern ethr_tsd_key lcnt_thr_data_key__; /** @brief Some operations (eg erts_alloc or erts_thr_progress_unmanaged_delay) * are unsafe in the early stages of initialization, so we're using this flag * to know when we can move over to normal operation. */ extern int lcnt_initialization_completed__; extern const int lcnt_log2_tab64__[]; ERTS_GLB_INLINE int lcnt_log2__(Uint64 v); ERTS_GLB_INLINE void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited); ERTS_GLB_INLINE void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited); ERTS_GLB_INLINE erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line); ERTS_GLB_INLINE void lcnt_dec_lock_state__(ethr_atomic_t *l_state); ERTS_GLB_INLINE void lcnt_time__(erts_lcnt_time_t *time); ERTS_GLB_INLINE void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d); ERTS_GLB_INLINE void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0); ERTS_GLB_INLINE void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier); ERTS_GLB_INLINE void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier); ERTS_GLB_INLINE lcnt_thread_data_t__ *lcnt_get_thread_data__(void); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void lcnt_time__(erts_lcnt_time_t *time) { /* * erts_sys_hrtime() is the highest resolution * we could find, it may or may not be monotonic... */ ErtsMonotonicTime mtime = erts_sys_hrtime(); time->s = (unsigned long) (mtime / 1000000000LL); time->ns = (unsigned long) (mtime - 1000000000LL*time->s); } /* difference d must be non-negative */ ERTS_GLB_INLINE void lcnt_time_add__(erts_lcnt_time_t *t, erts_lcnt_time_t *d) { t->s += d->s; t->ns += d->ns; t->s += t->ns / 1000000000LL; t->ns = t->ns % 1000000000LL; } ERTS_GLB_INLINE void lcnt_time_diff__(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_time_t *t0) { long ds; long dns; ds = t1->s - t0->s; dns = t1->ns - t0->ns; /* the difference should not be able to get bigger than 1 sec in ns*/ if (dns < 0) { ds -= 1; dns += 1000000000LL; } ASSERT(ds >= 0); d->s = ds; d->ns = dns; } ERTS_GLB_INLINE int lcnt_log2__(Uint64 v) { v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v |= v >> 32; return lcnt_log2_tab64__[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58]; } ERTS_GLB_INLINE void lcnt_update_wait_histogram__(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_waited) { int idx; if(time_waited->s > 0 || time_waited->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) { idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1; } else { unsigned long r = time_waited->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT; idx = r ? lcnt_log2__(r) : 0; } hist->ns[idx]++; } ERTS_GLB_INLINE void lcnt_update_stats__(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_waited) { ethr_atomic_inc(&stats->attempts); if(lock_in_conflict) { ethr_atomic_inc(&stats->collisions); } if(time_waited) { stats->times_waited++; lcnt_time_add__(&stats->total_time_waited, time_waited); lcnt_update_wait_histogram__(&stats->wait_time_histogram, time_waited); } } /* If we were installed while the lock was held, r/w_state will be 0 and we * can't tell which unlock or unacquire operation was the last. To get around * this we assume that all excess operations go *towards* zero rather than down * to zero, eventually becoming consistent with the actual state once the lock * is fully released. * * Conflicts might not be counted until the recorded state is fully consistent * with the actual state, but there should be no other ill effects. */ ERTS_GLB_INLINE void lcnt_dec_lock_state__(ethr_atomic_t *l_state) { ethr_sint_t state = ethr_atomic_dec_read_acqb(l_state); /* We can not assume that state is >= -1 here; unlock and unacquire might * bring it below -1 and race to increment it back. */ if(state < 0) { ethr_atomic_inc_acqb(l_state); } } ERTS_GLB_INLINE erts_lcnt_lock_stats_t *lcnt_get_lock_stats__(erts_lcnt_lock_info_t *info, char *file, unsigned int line) { ASSERT(info->location_count >= 1 && info->location_count <= ERTS_LCNT_MAX_LOCK_LOCATIONS); if(erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) { unsigned int i; for(i = 0; i < info->location_count; i++) { erts_lcnt_lock_stats_t *stats = &info->location_stats[i]; if(stats->file == file && stats->line == line) { return stats; } } if(info->location_count < ERTS_LCNT_MAX_LOCK_LOCATIONS) { erts_lcnt_lock_stats_t *stats = &info->location_stats[info->location_count]; stats->file = file; stats->line = line; info->location_count++; return stats; } } return &info->location_stats[0]; } ERTS_GLB_INLINE lcnt_thread_data_t__ *lcnt_get_thread_data__(void) { lcnt_thread_data_t__ *eltd = (lcnt_thread_data_t__ *)ethr_tsd_get(lcnt_thr_data_key__); ASSERT(eltd); return eltd; } ERTS_GLB_FORCE_INLINE int erts_lcnt_open_ref(erts_lcnt_ref_t *ref, int *handle, erts_lcnt_lock_info_carrier_t **result) { if(!*ethr_atomic_addr(ref) || !lcnt_initialization_completed__) { return 0; } (*handle) = lcnt_thr_progress_unmanaged_delay__(); (*result) = (erts_lcnt_lock_info_carrier_t*)ethr_atomic_read(ref); if(*result) { if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) { lcnt_retain_carrier__(*result); lcnt_thr_progress_unmanaged_continue__(*handle); } return 1; } else if(*handle != LCNT_THR_PRGR_DHANDLE_MANAGED) { lcnt_thr_progress_unmanaged_continue__(*handle); } return 0; } ERTS_GLB_FORCE_INLINE void erts_lcnt_close_ref(int handle, erts_lcnt_lock_info_carrier_t *carrier) { if(handle != LCNT_THR_PRGR_DHANDLE_MANAGED) { lcnt_release_carrier__(carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_lock(erts_lcnt_ref_t *ref) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_lock_idx(carrier, 0); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_lock_opt(erts_lcnt_ref_t *ref, Uint16 option) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_lock_opt_idx(carrier, 0, option); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_lock_post(erts_lcnt_ref_t *ref) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_lock_post_idx(carrier, 0); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_lock_post_x(erts_lcnt_ref_t *ref, char *file, unsigned int line) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_lock_post_x_idx(carrier, 0, file, line); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_lock_unacquire(erts_lcnt_ref_t *ref) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_lock_unacquire_idx(carrier, 0); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_unlock(erts_lcnt_ref_t *ref) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_unlock_idx(carrier, 0); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_unlock_opt(erts_lcnt_ref_t *ref, Uint16 option) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_unlock_opt_idx(carrier, 0, option); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_trylock(erts_lcnt_ref_t *ref, int result) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_trylock_idx(carrier, 0, result); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_FORCE_INLINE void erts_lcnt_trylock_opt(erts_lcnt_ref_t *ref, int result, Uint16 option) { erts_lcnt_lock_info_carrier_t *carrier; int handle; if(erts_lcnt_open_ref(ref, &handle, &carrier)) { erts_lcnt_trylock_opt_idx(carrier, 0, result, option); erts_lcnt_close_ref(handle, carrier); } } ERTS_GLB_INLINE void erts_lcnt_lock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) { erts_lcnt_lock_opt_idx(carrier, index, ERTS_LCNT_LO_WRITE); } ERTS_GLB_INLINE void erts_lcnt_lock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, Uint16 option) { erts_lcnt_lock_info_t *info = &carrier->entries[index]; lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__(); ASSERT(index < carrier->entry_count); ASSERT((option & ERTS_LCNT_LO_READ) || (option & ERTS_LCNT_LO_WRITE)); if(option & ERTS_LCNT_LO_WRITE) { ethr_sint_t w_state, r_state; w_state = ethr_atomic_inc_read(&info->w_state) - 1; r_state = ethr_atomic_read(&info->r_state); /* We cannot acquire w_lock if either w or r are taken */ eltd->lock_in_conflict = (w_state > 0) || (r_state > 0); } else { ethr_sint_t w_state = ethr_atomic_read(&info->w_state); /* We cannot acquire r_lock if w_lock is taken */ eltd->lock_in_conflict = (w_state > 0); } if(option & ERTS_LCNT_LO_READ) { ethr_atomic_inc(&info->r_state); } if(eltd->lock_in_conflict) { /* Only set the timer if nobody else has it. This should only happen * when proc_locks acquires several locks "atomically." All other locks * will block the thread when locked (w_state > 0) */ if(eltd->timer_set == 0) { lcnt_time__(&eltd->timer); } eltd->timer_set++; } } ERTS_GLB_INLINE void erts_lcnt_lock_post_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) { erts_lcnt_lock_post_x_idx(carrier, index, NULL, 0); } ERTS_GLB_INLINE void erts_lcnt_lock_post_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, char *file, unsigned int line) { erts_lcnt_lock_info_t *info = &carrier->entries[index]; lcnt_thread_data_t__ *eltd = lcnt_get_thread_data__(); erts_lcnt_lock_stats_t *stats; ASSERT(index < carrier->entry_count); #ifdef LCNT_DEBUG_LOCK_FLOW if(!(info->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) { ASSERT(ethr_atomic_inc_read(&info->flowstate) == 1); } #endif /* If the lock was in conflict, update the time spent waiting. */ stats = lcnt_get_lock_stats__(info, file, line); if(eltd->timer_set) { erts_lcnt_time_t time_wait; erts_lcnt_time_t timer; lcnt_time__(&timer); lcnt_time_diff__(&time_wait, &timer, &eltd->timer); lcnt_update_stats__(stats, eltd->lock_in_conflict, &time_wait); eltd->timer_set--; ASSERT(eltd->timer_set >= 0); } else { lcnt_update_stats__(stats, eltd->lock_in_conflict, NULL); } } ERTS_GLB_INLINE void erts_lcnt_unlock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) { #ifdef LCNT_DEBUG_LOCK_FLOW erts_lcnt_lock_info_t *info = &carrier->entries[index]; ASSERT(ethr_atomic_dec_read(&info->flowstate) == 0); #endif ASSERT(index < carrier->entry_count); erts_lcnt_unlock_opt_idx(carrier, index, ERTS_LCNT_LO_WRITE); } ERTS_GLB_INLINE void erts_lcnt_unlock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, Uint16 option) { erts_lcnt_lock_info_t *info = &carrier->entries[index]; ASSERT(index < carrier->entry_count); ASSERT((option & ERTS_LCNT_LO_READ) || (option & ERTS_LCNT_LO_WRITE)); if(option & ERTS_LCNT_LO_WRITE) { lcnt_dec_lock_state__(&info->w_state); } if(option & ERTS_LCNT_LO_READ) { lcnt_dec_lock_state__(&info->r_state); } } ERTS_GLB_INLINE void erts_lcnt_lock_unacquire_idx(erts_lcnt_lock_info_carrier_t *carrier, int index) { erts_lcnt_lock_info_t *info = &carrier->entries[index]; ASSERT(index < carrier->entry_count); lcnt_dec_lock_state__(&info->w_state); } ERTS_GLB_INLINE void erts_lcnt_trylock_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result) { #ifdef LCNT_DEBUG_LOCK_FLOW erts_lcnt_lock_info_t *info = &carrier->entries[index]; ASSERT(result == EBUSY || ethr_atomic_inc_read(&info->flowstate) == 1); #endif ASSERT(index < carrier->entry_count); erts_lcnt_trylock_opt_idx(carrier, index, result, ERTS_LCNT_LO_WRITE); } ERTS_GLB_INLINE void erts_lcnt_trylock_opt_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, int result, Uint16 option) { erts_lcnt_lock_info_t *info = &carrier->entries[index]; ASSERT(index < carrier->entry_count); ASSERT((option & ERTS_LCNT_LO_READ) || (option & ERTS_LCNT_LO_WRITE)); if(result != EBUSY) { if(option & ERTS_LCNT_LO_WRITE) { ethr_atomic_inc(&info->w_state); } if(option & ERTS_LCNT_LO_READ) { ethr_atomic_inc(&info->r_state); } lcnt_update_stats__(&info->location_stats[0], 0, NULL); } else { ethr_atomic_inc(&info->location_stats[0].attempts); ethr_atomic_inc(&info->location_stats[0].collisions); } } ERTS_GLB_INLINE void erts_lcnt_init_lock_info_x_idx(erts_lcnt_lock_info_carrier_t *carrier, int index, const char *name, Uint16 flag, Eterm id) { erts_lcnt_lock_info_t *info = &carrier->entries[index]; ASSERT(is_immed(id)); info->flag = flag; info->name = name; info->id = id; } ERTS_GLB_INLINE void lcnt_retain_carrier__(erts_lcnt_lock_info_carrier_t *carrier) { #ifdef DEBUG ASSERT(ethr_atomic_inc_read_acqb(&carrier->ref_count) >= 2); #else ethr_atomic_inc_acqb(&carrier->ref_count); #endif } ERTS_GLB_INLINE void lcnt_release_carrier__(erts_lcnt_lock_info_carrier_t *carrier) { ethr_sint_t count = ethr_atomic_dec_read_relb(&carrier->ref_count); ASSERT(count >= 0); if(count == 0) { carrier->deallocate(carrier); } } #endif #endif /* ifdef ERTS_ENABLE_LOCK_COUNT */ #endif /* ifndef ERTS_LOCK_COUNT_H__ */