aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
authorBjörn-Egil Dahlberg <[email protected]>2010-02-02 15:28:11 +0100
committerBjörn Gustavsson <[email protected]>2010-02-08 18:05:13 +0100
commit628aa0e68a0632905a28f66e71ae10fb15f12fbe (patch)
tree6b0729873a43f1e99354013a06955624ee2c93b1 /erts/emulator/beam
parentcfff4e99860a4d21a42645b20f76188dde704e54 (diff)
downloadotp-628aa0e68a0632905a28f66e71ae10fb15f12fbe.tar.gz
otp-628aa0e68a0632905a28f66e71ae10fb15f12fbe.tar.bz2
otp-628aa0e68a0632905a28f66e71ae10fb15f12fbe.zip
Add runtime option to enable/disable lcnt stats
Add erts_debug:lock_counters({copy_save, bool()}). This option enables or disables statistics saving for destroyed processes and ets-tables. Enabling this might consume a lot of memory. Add id-numbering for lock classes which is otherwise undefined.
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/erl_bif_info.c92
-rw-r--r--erts/emulator/beam/erl_db.c13
-rw-r--r--erts/emulator/beam/erl_db_hash.c6
-rw-r--r--erts/emulator/beam/erl_lock_count.c58
-rw-r--r--erts/emulator/beam/erl_lock_count.h15
-rw-r--r--erts/emulator/beam/erl_process.c6
-rw-r--r--erts/emulator/beam/io.c2
7 files changed, 110 insertions, 82 deletions
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 5ff1f794df..a34d400ed8 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -3704,12 +3704,11 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
ASSERT(ltype);
type = am_atom_put(ltype, strlen(ltype));
-
name = am_atom_put(lock->name, strlen(lock->name));
if (lock->flag & ERTS_LCNT_LT_ALLOC) {
/* use allocator types names as id's for allocator locks */
- ltype = ERTS_ALC_A2AD(signed_val(lock->id));
+ ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
id = am_atom_put(ltype, strlen(ltype));
} else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
/* use registered names as id's for process locks if available */
@@ -3778,17 +3777,28 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
{
#ifdef ERTS_ENABLE_LOCK_COUNT
Eterm res = NIL;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_block_system(0);
+#endif
+
+
+ if (BIF_ARG_1 == am_enabled) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
+ }
+#ifdef ERTS_ENABLE_LOCK_COUNT
- if (BIF_ARG_1 == am_info) {
+ else if (BIF_ARG_1 == am_info) {
erts_lcnt_data_t *data;
Uint hsize = 0;
Uint *szp;
Eterm* hp;
- erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
data = erts_lcnt_get_data();
/* calculate size */
@@ -3803,29 +3813,65 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
res = lcnt_build_result_term(&hp, NULL, data, res);
erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- goto done;
+ BIF_RET(res);
} else if (BIF_ARG_1 == am_clear) {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+
erts_lcnt_clear_counters();
- res = am_ok;
- goto done;
+
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET(am_ok);
} else if (is_tuple(BIF_ARG_1)) {
- Uint prev = 0;
Eterm* tp = tuple_val(BIF_ARG_1);
+
switch (arityval(tp[0])) {
case 2:
- if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
+ if (ERTS_IS_ATOM_STR("copy_save", tp[1])) {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
if (tp[2] == am_true) {
- prev = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK);
- if (prev) res = am_true;
- else res = am_false;
- goto done;
+
+ res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
+
} else if (tp[2] == am_false) {
- prev = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_PROCLOCK);
- if (prev) res = am_true;
- else res = am_false;
- goto done;
+
+ res = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false;
+
+ } else {
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_ERROR(BIF_P, BADARG);
}
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(res);
+
+ } else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) {
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ if (tp[2] == am_true) {
+
+ res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
+
+ } else if (tp[2] == am_false) {
+
+ res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false;
+
+ } else {
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ erts_smp_release_system();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(res);
}
break;
@@ -3834,16 +3880,8 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
}
}
- erts_smp_release_system();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
#endif
BIF_ERROR(BIF_P, BADARG);
-#ifdef ERTS_ENABLE_LOCK_COUNT
-done:
- erts_smp_release_system();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
-#endif
}
void
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index b02150008f..e07a76835b 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -261,13 +261,8 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, char *rwname, char* fixname)
erts_refc_init(&tb->common.ref, 1);
erts_refc_init(&tb->common.fixref, 0);
#ifdef ERTS_SMP
-# ifdef ERTS_ENABLE_LOCK_COUNT
erts_smp_rwmtx_init_x(&tb->common.rwlock, rwname, tb->common.the_name);
erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
-# else
- erts_smp_rwmtx_init(&tb->common.rwlock, rwname);
- erts_smp_mtx_init(&tb->common.fixlock, fixname);
-# endif
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
#endif
}
@@ -2597,19 +2592,11 @@ void init_db(void)
#ifdef ERTS_SMP
for (i=0; i<META_MAIN_TAB_LOCK_CNT; i++) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
erts_smp_spinlock_init_x(&meta_main_tab_locks[i].lck, "meta_main_tab_slot", make_small(i));
-#else
- erts_smp_spinlock_init(&meta_main_tab_locks[i].lck, "meta_main_tab_slot");
-#endif
}
erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
erts_smp_rwmtx_init_x(&meta_name_tab_rwlocks[i].lck, "meta_name_tab", make_small(i));
-#else
- erts_smp_rwmtx_init(&meta_name_tab_rwlocks[i].lck, "meta_name_tab");
-#endif
}
#endif
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index dea45053df..dbe13204d9 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -628,11 +628,7 @@ int db_create_hash(Process *p, DbTable *tbl)
(DbTable *) tb,
sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- #ifdef ERTS_ENABLE_LOCK_COUNT
- erts_rwmtx_init_x(&tb->locks->lck_vec[i].lck, "db_hash_slot", tb->common.the_name);
- #else
- erts_rwmtx_init(&tb->locks->lck_vec[i].lck, "db_hash_slot");
- #endif
+ erts_rwmtx_init_x(&tb->locks->lck_vec[i].lck, "db_hash_slot", make_small(i));
}
/* This important property is needed to guarantee that the buckets
* involved in a grow/shrink operation it protected by the same lock:
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 6211983f4b..32fddce38b 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -162,7 +162,6 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char
long int colls, tries, w_state, r_state;
erts_lcnt_lock_stats_t *stats = NULL;
- float rate;
char *type;
int i;
@@ -170,8 +169,6 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char
ethr_atomic_read(&lock->r_state, &r_state);
ethr_atomic_read(&lock->w_state, &w_state);
- if (tries > 0) rate = (float)(colls/(float)tries)*100;
- else rate = 0.0f;
if (lock->flag & flag) {
erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n",
@@ -181,26 +178,13 @@ static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char
w_state,
lock->id,
extra);
-
- for(i = 0; i < lock->n_stats; i++) {
- stats = &(lock->stats[i]);
- ethr_atomic_read(&stats->tries, &tries);
- ethr_atomic_read(&stats->colls, &colls);
- fprintf(stderr, "%69s:%5d [tries %9ld] [colls %9ld] [timer_n %8ld] [timer %4ld s %6ld us]\r\n",
- stats->file,
- stats->line,
- tries,
- colls,
- stats->timer_n,
- stats->timer.s,
- (unsigned long)stats->timer.ns/1000);
- }
- fprintf(stderr, "\r\n");
}
}
static void print_lock(erts_lcnt_lock_t *lock, char *action) {
- print_lock_x(lock, ERTS_LCNT_LT_ALL, action, "");
+ if (strcmp(lock->name, "proc_main") == 0) {
+ print_lock_x(lock, ERTS_LCNT_LT_ALL, action, "");
+ }
}
#endif
@@ -230,8 +214,8 @@ static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflic
ethr_atomic_inc(&stats->tries);
- /* beware of trylock */
- if (lock_in_conflict) ethr_atomic_inc(&stats->colls);
+ if (lock_in_conflict)
+ ethr_atomic_inc(&stats->colls);
if (time_wait) {
lcnt_time_add(&(stats->timer), time_wait);
@@ -366,6 +350,7 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter
for (i = 0; i < ERTS_LCNT_MAX_LOCK_LOCATIONS; i++) {
lcnt_clear_stats(&lock->stats[i]);
}
+
erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
lcnt_unlock();
@@ -373,18 +358,20 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter
void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
erts_lcnt_lock_t *deleted_lock;
-
- /* copy structure and insert the copy */
- deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
lcnt_lock();
- memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
- deleted_lock->next = NULL;
- deleted_lock->prev = NULL;
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) {
+ /* copy structure and insert the copy */
+
+ deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
+ memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
- erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
+ deleted_lock->next = NULL;
+ deleted_lock->prev = NULL;
+ erts_lcnt_list_insert(erts_lcnt_data->deleted_locks, deleted_lock);
+ }
/* delete original */
erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock);
@@ -416,9 +403,10 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
/* we cannot acquire w_lock if either w or r are taken */
/* we cannot acquire r_lock if w_lock is taken */
- if ((w_state > 0) || (r_state > 0)){
+ if ((w_state > 0) || (r_state > 0)) {
eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0) lcnt_time(&eltd->timer);
+ if (eltd->timer_set == 0)
+ lcnt_time(&eltd->timer);
eltd->timer_set++;
} else {
eltd->lock_in_conflict = 0;
@@ -445,7 +433,8 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
* 'atomicly'. All other locks will block the thread if w_state > 0
* i.e. locked.
*/
- if (eltd->timer_set == 0) lcnt_time(&eltd->timer);
+ if (eltd->timer_set == 0)
+ lcnt_time(&eltd->timer);
eltd->timer_set++;
} else {
@@ -494,24 +483,23 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
eltd = lcnt_get_thread_data();
ASSERT(eltd);
-
+
/* if lock was in conflict, time it */
stats = lcnt_get_lock_stats(lock, file, line);
if (eltd->timer_set) {
lcnt_time(&timer);
-
- eltd->timer_set--;
lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
+ eltd->timer_set--;
ASSERT(eltd->timer_set >= 0);
} else {
lcnt_update_stats(stats, eltd->lock_in_conflict, NULL);
}
-
+
}
/* unlock */
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 8564c36203..86c85226e4 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -39,6 +39,20 @@
* Each instance of a lock is the unique lock, i.e. set and id in that set.
* For each lock there is a set of statistics with where and what impact
* the lock aqusition had.
+ *
+ * Runtime options
+ * - suspend, used when internal lock-counting can't be applied. For instance
+ * when allocating a term for the outside and halloc needs to be used.
+ * Default: off.
+ * - location, reserved and not used.
+ * - proclock, disable proclock counting. Used when performance might be an
+ * issue. Accessible from erts_debug:lock_counters({process_locks, bool()}).
+ * Default: off.
+ * - copysave, enable saving of destroyed locks (and thereby its statistics).
+ * If memory constraints is an issue this need to be disabled.
+ * Accessible from erts_debug:lock_counters({copy_save, bool()}).
+ * Default: off.
+ *
*/
#include "sys.h"
@@ -74,6 +88,7 @@
#define ERTS_LCNT_OPT_SUSPEND (((Uint16) 1) << 0)
#define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1)
#define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2)
+#define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 3)
typedef struct {
unsigned long s;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 2789691c55..bf08bc7a86 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -2095,7 +2095,11 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online)
rq->ix = ix;
erts_smp_atomic_init(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
- erts_smp_mtx_init(&rq->mtx, "run_queue");
+ /* make sure that the "extra" id correponds to the schedulers
+ * id if the esdp->no <-> ix+1 mapping change.
+ */
+
+ erts_smp_mtx_init_x(&rq->mtx, "run_queue", make_small(ix + 1));
erts_smp_cnd_init(&rq->cnd);
erts_smp_atomic_init(&rq->spin_waiter, 0);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 61985271e6..34afe5656c 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1227,7 +1227,7 @@ void init_io(void)
erts_smp_atomic_init(&erts_port[i].refc, 0);
erts_port[i].lock = NULL;
erts_port[i].xports = NULL;
- erts_smp_spinlock_init(&erts_port[i].state_lck, "port_state");
+ erts_smp_spinlock_init_x(&erts_port[i].state_lck, "port_state", make_small(i));
#endif
erts_port[i].tracer_proc = NIL;
erts_port[i].trace_flags = 0;