aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_db.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_db.c')
-rw-r--r--erts/emulator/beam/erl_db.c968
1 files changed, 521 insertions, 447 deletions
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 68d984014f..c009a3bde8 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2017. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -44,12 +44,40 @@
#include "erl_binary.h"
-erts_smp_atomic_t erts_ets_misc_mem_size;
+erts_atomic_t erts_ets_misc_mem_size;
/*
** Utility macros
*/
+#define DB_BIF_GET_TABLE(TB, WHAT, KIND, BIF_IX) \
+ DB_GET_TABLE(TB, BIF_ARG_1, WHAT, KIND, BIF_IX, NULL, BIF_P)
+
+#define DB_TRAP_GET_TABLE(TB, TID, WHAT, KIND, BIF_EXP) \
+ DB_GET_TABLE(TB, TID, WHAT, KIND, 0, BIF_EXP, BIF_P)
+
+#define DB_GET_TABLE(TB, TID, WHAT, KIND, BIF_IX, BIF_EXP, PROC) \
+do { \
+ Uint freason__; \
+ if (!(TB = db_get_table(PROC, TID, WHAT, KIND, &freason__))) { \
+ return db_bif_fail(PROC, freason__, BIF_IX, BIF_EXP); \
+ } \
+}while(0)
+
+static BIF_RETTYPE db_bif_fail(Process* p, Uint freason,
+ Uint bif_ix, Export* bif_exp)
+{
+ if (freason == TRAP) {
+ if (!bif_exp)
+ bif_exp = bif_export[bif_ix];
+ p->arity = bif_exp->info.mfa.arity;
+ p->i = (BeamInstr*) bif_exp->addressv[erts_active_code_ix()];
+ }
+ p->freason = freason;
+ return THE_NON_VALUE;
+}
+
+
/* Get a key from any table structure and a tagged object */
#define TERM_GETKEY(tb, obj) db_getkey((tb)->common.keypos, (obj))
@@ -61,15 +89,9 @@ enum DbIterSafety {
ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */
ITER_SAFE /* No need to fixate at all */
};
-#ifdef ERTS_SMP
# define ITERATION_SAFETY(Proc,Tab) \
((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
: (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED))
-#else
-# define ITERATION_SAFETY(Proc,Tab) \
- ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) \
- ? ITER_SAFE : ITER_SAFE_LOCKED)
-#endif
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
@@ -195,7 +217,7 @@ static void delete_sched_table(Process *c_p, DbTable *tb);
static void table_dec_refc(DbTable *tb, erts_aint_t min_val)
{
- if (erts_smp_refc_dectest(&tb->common.refc, min_val) == 0)
+ if (erts_refc_dectest(&tb->common.refc, min_val) == 0)
schedule_free_dbtable(tb);
}
@@ -209,21 +231,21 @@ static ERTS_INLINE void
make_btid(DbTable *tb)
{
Binary *btid = erts_create_magic_indirection(db_table_tid_destructor);
- erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
- erts_smp_atomic_init_nob(tbref, (erts_aint_t) tb);
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ erts_atomic_init_nob(tbref, (erts_aint_t) tb);
tb->common.btid = btid;
/*
* Table and magic indirection refer eachother,
* and table is refered once by being alive...
*/
- erts_smp_refc_init(&tb->common.refc, 2);
+ erts_refc_init(&tb->common.refc, 2);
erts_refc_inc(&btid->intern.refc, 1);
}
static ERTS_INLINE DbTable* btid2tab(Binary* btid)
{
- erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
- return (DbTable *) erts_smp_atomic_read_nob(tbref);
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ return (DbTable *) erts_atomic_read_nob(tbref);
}
static DbTable *
@@ -231,7 +253,7 @@ tid2tab(Eterm tid)
{
DbTable *tb;
Binary *btid;
- erts_smp_atomic_t *tbref;
+ erts_atomic_t *tbref;
if (!is_internal_magic_ref(tid))
return NULL;
@@ -239,8 +261,8 @@ tid2tab(Eterm tid)
if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor)
return NULL;
- tbref = erts_smp_binary_to_magic_indirection(btid);
- tb = (DbTable *) erts_smp_atomic_read_nob(tbref);
+ tbref = erts_binary_to_magic_indirection(btid);
+ tb = (DbTable *) erts_atomic_read_nob(tbref);
ASSERT(!tb || tb->common.btid == btid);
@@ -250,11 +272,11 @@ tid2tab(Eterm tid)
static ERTS_INLINE int
is_table_alive(DbTable *tb)
{
- erts_smp_atomic_t *tbref;
+ erts_atomic_t *tbref;
DbTable *rtb;
- tbref = erts_smp_binary_to_magic_indirection(tb->common.btid);
- rtb = (DbTable *) erts_smp_atomic_read_nob(tbref);
+ tbref = erts_binary_to_magic_indirection(tb->common.btid);
+ rtb = (DbTable *) erts_atomic_read_nob(tbref);
ASSERT(!rtb || rtb == tb);
@@ -264,11 +286,7 @@ is_table_alive(DbTable *tb)
static ERTS_INLINE int
is_table_named(DbTable *tb)
{
-#ifdef ERTS_SMP
return tb->common.type & DB_NAMED_TABLE;
-#else
- return tb->common.status & DB_NAMED_TABLE;
-#endif
}
@@ -277,8 +295,8 @@ tid_clear(Process *c_p, DbTable *tb)
{
DbTable *rtb;
Binary *btid = tb->common.btid;
- erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
- rtb = (DbTable *) erts_smp_atomic_xchg_nob(tbref, (erts_aint_t) NULL);
+ erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid);
+ rtb = (DbTable *) erts_atomic_xchg_nob(tbref, (erts_aint_t) NULL);
ASSERT(!rtb || tb == rtb);
if (rtb) {
table_dec_refc(tb, 1);
@@ -293,17 +311,22 @@ make_tid(Process *c_p, DbTable *tb)
return erts_mk_magic_ref(&hp, &c_p->off_heap, tb->common.btid);
}
+Eterm
+erts_db_make_tid(Process *c_p, DbTableCommon *tb)
+{
+ return make_tid(c_p, (DbTable*)tb);
+}
+
+
/*
** The meta hash table of all NAMED ets tables
*/
-#ifdef ERTS_SMP
-# define META_NAME_TAB_LOCK_CNT 16
+# define META_NAME_TAB_LOCK_CNT 256
union {
- erts_smp_rwmtx_t lck;
- byte _cache_line_alignment[64];
+ erts_rwmtx_t lck;
+ byte align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))];
}meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT];
-#endif
static struct meta_name_tab_entry {
union {
Eterm name_atom;
@@ -319,13 +342,11 @@ static unsigned meta_name_tab_mask;
static ERTS_INLINE
struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
- erts_smp_rwmtx_t** lockp)
+ erts_rwmtx_t** lockp)
{
unsigned bix = atom_val(name) & meta_name_tab_mask;
struct meta_name_tab_entry* bucket = &meta_name_tab[bix];
-#ifdef ERTS_SMP
*lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck;
-#endif
return bucket;
}
@@ -333,8 +354,7 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
typedef enum {
LCK_READ=1, /* read only access */
LCK_WRITE=2, /* exclusive table write access */
- LCK_WRITE_REC=3, /* record write access */
- LCK_NONE=4
+ LCK_WRITE_REC=3 /* record write access */
} db_lock_kind_t;
extern DbTableMethod db_hash;
@@ -344,9 +364,6 @@ int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
int erts_ets_always_compress;
static int db_max_tabs;
-static Eterm ms_delete_all;
-static Eterm ms_delete_all_buff[8]; /* To compare with for deletion
- of all objects */
/*
** Forward decls, static functions
@@ -358,18 +375,19 @@ static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data);
static void free_heir_data(DbTable*);
static SWord free_fixations_locked(Process* p, DbTable *tb);
+static void delete_all_objects_continue(Process* p, DbTable* tb);
static SWord free_table_continue(Process *p, DbTable *tb, SWord reds);
static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb);
-static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_delete_trap_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1);
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1);
static Eterm table_info(Process* p, DbTable* tb, Eterm What);
-static BIF_RETTYPE ets_select1(Process* p, Eterm arg1);
-static BIF_RETTYPE ets_select2(Process* p, Eterm arg1, Eterm arg2);
-static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
+static BIF_RETTYPE ets_select1(Process* p, int bif_ix, Eterm arg1);
+static BIF_RETTYPE ets_select2(Process* p, DbTable*, Eterm tid, Eterm ms);
+static BIF_RETTYPE ets_select3(Process* p, DbTable*, Eterm tid, Eterm ms, Sint chunk_size);
/*
@@ -390,16 +408,14 @@ free_dbtable(void *vtb)
{
DbTable *tb = (DbTable *) vtb;
#ifdef HARDDEBUG
- if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
+ if (erts_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n",
- erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
+ erts_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
tb->common.fixations);
}
#endif
-#ifdef ERTS_SMP
- erts_smp_rwmtx_destroy(&tb->common.rwlock);
- erts_smp_mtx_destroy(&tb->common.fixlock);
-#endif
+ erts_rwmtx_destroy(&tb->common.rwlock);
+ erts_mtx_destroy(&tb->common.fixlock);
ASSERT(is_immed(tb->common.heir_data));
if (tb->common.btid)
@@ -419,8 +435,8 @@ static void schedule_free_dbtable(DbTable* tb)
* Caller is *not* allowed to access the specialized part
* (hash or tree) of *tb after this function has returned.
*/
- ASSERT(erts_smp_refc_read(&tb->common.refc, 0) == 0);
- ASSERT(erts_smp_refc_read(&tb->common.fix_count, 0) == 0);
+ ASSERT(erts_refc_read(&tb->common.refc, 0) == 0);
+ ASSERT(erts_refc_read(&tb->common.fix_count, 0) == 0);
erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
(void *) tb,
&tb->release.data,
@@ -434,8 +450,8 @@ save_sched_table(Process *c_p, DbTable *tb)
DbTable *first;
ASSERT(esdp);
- esdp->ets_tables.count++;
- erts_smp_refc_inc(&tb->common.refc, 1);
+ erts_atomic_inc_nob(&esdp->ets_tables.count);
+ erts_refc_inc(&tb->common.refc, 1);
first = esdp->ets_tables.clist;
if (!first) {
@@ -458,8 +474,8 @@ remove_sched_table(ErtsSchedulerData *esdp, DbTable *tb)
ASSERT(erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid))
== (Uint32) esdp->no);
- ASSERT(esdp->ets_tables.count > 0);
- esdp->ets_tables.count--;
+ ASSERT(erts_atomic_read_nob(&esdp->ets_tables.count) > 0);
+ erts_atomic_dec_nob(&esdp->ets_tables.count);
eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
if (eaydp->ongoing) {
@@ -525,11 +541,11 @@ save_owned_table(Process *c_p, DbTable *tb)
{
DbTable *first;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
- erts_smp_refc_inc(&tb->common.refc, 1);
+ erts_refc_inc(&tb->common.refc, 1);
if (!first) {
tb->common.owned.next = tb->common.owned.prev = tb;
@@ -541,13 +557,13 @@ save_owned_table(Process *c_p, DbTable *tb)
tb->common.owned.prev->common.owned.next = tb;
first->common.owned.prev = tb;
}
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
}
static ERTS_INLINE void
delete_owned_table(Process *p, DbTable *tb)
{
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
if (tb->common.owned.next == tb) {
DbTable* old;
ASSERT(tb->common.owned.prev == tb);
@@ -570,38 +586,33 @@ delete_owned_table(Process *p, DbTable *tb)
if (tb == first)
erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
table_dec_refc(tb, 1);
}
static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
{
-#ifdef ERTS_SMP
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
if (use_frequent_read_lock)
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
-#endif
-#ifdef ERTS_SMP
- erts_smp_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
+ erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
- erts_smp_mtx_init(&tb->common.fixlock, "db_tab_fix",
+ erts_mtx_init(&tb->common.fixlock, "db_tab_fix",
tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
-#endif
}
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
-#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
tb->common.is_thread_safe = 1;
} else {
- erts_smp_rwmtx_rlock(&tb->common.rwlock);
+ erts_rwmtx_rlock(&tb->common.rwlock);
ASSERT(!tb->common.is_thread_safe);
}
}
@@ -610,14 +621,13 @@ static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
switch (kind) {
case LCK_WRITE:
case LCK_WRITE_REC:
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
break;
default:
- erts_smp_rwmtx_rlock(&tb->common.rwlock);
+ erts_rwmtx_rlock(&tb->common.rwlock);
}
ASSERT(tb->common.is_thread_safe);
}
-#endif
}
static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
@@ -627,16 +637,15 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
* DbTable structure. That is, ONLY the SMP case is allowed
* to follow the tb pointer!
*/
-#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
ASSERT(tb->common.is_thread_safe);
tb->common.is_thread_safe = 0;
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
}
else {
ASSERT(!tb->common.is_thread_safe);
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
}
}
else {
@@ -644,13 +653,39 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
switch (kind) {
case LCK_WRITE:
case LCK_WRITE_REC:
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
+ erts_rwmtx_rwunlock(&tb->common.rwlock);
break;
default:
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
}
}
-#endif
+}
+
+static ERTS_INLINE int db_is_exclusive(DbTable* tb, db_lock_kind_t kind)
+{
+ return kind != LCK_READ && tb->common.is_thread_safe;
+}
+
+static DbTable* handle_lacking_permission(Process* p, DbTable* tb,
+ db_lock_kind_t kind,
+ Uint* freason_p)
+{
+ if (tb->common.status & DB_BUSY) {
+ if (!db_is_exclusive(tb, kind)) {
+ db_unlock(tb, kind);
+ db_lock(tb, LCK_WRITE);
+ }
+ delete_all_objects_continue(p, tb);
+ db_unlock(tb, LCK_WRITE);
+ tb = NULL;
+ *freason_p = TRAP;
+ }
+ else if (p->common.id != tb->common.owner) {
+ db_unlock(tb, kind);
+ tb = NULL;
+ *freason_p = BADARG;
+ }
+ return tb;
}
static ERTS_INLINE
@@ -658,10 +693,10 @@ DbTable* db_get_table_aux(Process *p,
Eterm id,
int what,
db_lock_kind_t kind,
- int meta_already_locked)
+ int meta_already_locked,
+ Uint* freason_p)
{
DbTable *tb;
- erts_smp_rwmtx_t *mtl = NULL;
/*
* IMPORTANT: Only scheduler threads are allowed
@@ -671,13 +706,13 @@ DbTable* db_get_table_aux(Process *p,
ASSERT(erts_get_scheduler_data());
if (is_atom(id)) {
+ erts_rwmtx_t *mtl;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
if (!meta_already_locked)
- erts_smp_rwmtx_rlock(mtl);
+ erts_rwmtx_rlock(mtl);
else{
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
|| erts_lc_rwmtx_is_rwlocked(mtl));
- mtl = NULL;
}
tb = NULL;
if (bucket->pu.tb != NULL) {
@@ -696,20 +731,29 @@ DbTable* db_get_table_aux(Process *p,
}
}
}
+ if (!meta_already_locked)
+ erts_rwmtx_runlock(mtl);
}
else
tb = tid2tab(id);
if (tb) {
db_lock(tb, kind);
- if ((tb->common.status & what) == 0
- && p->common.id != tb->common.owner) {
- db_unlock(tb, kind);
- tb = NULL;
- }
+#ifdef ETS_DBG_FORCE_TRAP
+ if (erts_atomic_read_nob(&tb->common.dbg_force_trap) &&
+ erts_atomic_add_read_nob(&tb->common.dbg_force_trap, 2) & 2) {
+ db_unlock(tb, kind);
+ tb = NULL;
+ *freason_p = TRAP;
+ }
+ else
+#endif
+ if (ERTS_UNLIKELY(!(tb->common.status & what)))
+ tb = handle_lacking_permission(p, tb, kind, freason_p);
}
- if (mtl)
- erts_smp_rwmtx_runlock(mtl);
+ else
+ *freason_p = BADARG;
+
return tb;
}
@@ -717,20 +761,21 @@ static ERTS_INLINE
DbTable* db_get_table(Process *p,
Eterm id,
int what,
- db_lock_kind_t kind)
+ db_lock_kind_t kind,
+ Uint* freason_p)
{
- return db_get_table_aux(p, id, what, kind, 0);
+ return db_get_table_aux(p, id, what, kind, 0, freason_p);
}
static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
- erts_smp_rwmtx_t* rwlock;
+ erts_rwmtx_t* rwlock;
struct meta_name_tab_entry* new_entry;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
if (!have_lock)
- erts_smp_rwmtx_rwlock(rwlock);
+ erts_rwmtx_rwlock(rwlock);
if (bucket->pu.tb == NULL) { /* empty */
new_entry = bucket;
@@ -778,27 +823,25 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
done:
if (!have_lock)
- erts_smp_rwmtx_rwunlock(rwlock);
+ erts_rwmtx_rwunlock(rwlock);
return ret;
}
static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
- erts_smp_rwmtx_t* rwlock;
+ erts_rwmtx_t* rwlock;
Eterm name_atom = tb->common.the_name;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
ASSERT(is_table_named(tb));
-#ifdef ERTS_SMP
- if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
+ if (!have_lock && erts_rwmtx_tryrwlock(rwlock) == EBUSY) {
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwlock(rwlock);
+ erts_rwmtx_rwlock(rwlock);
db_lock(tb, LCK_WRITE);
}
-#endif
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
if (bucket->pu.tb == NULL) {
goto done;
@@ -851,7 +894,7 @@ static int remove_named_tab(DbTable *tb, int have_lock)
done:
if (!have_lock)
- erts_smp_rwmtx_rwunlock(rwlock);
+ erts_rwmtx_rwunlock(rwlock);
return ret;
}
@@ -860,11 +903,11 @@ done:
*/
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
- erts_smp_refc_inc(&tb->common.fix_count, 1);
+ erts_refc_inc(&tb->common.fix_count, 1);
}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
{
- if (erts_smp_refc_dectest(&tb->common.fix_count, 0) == 0) {
+ if (erts_refc_dectest(&tb->common.fix_count, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
}
@@ -887,9 +930,7 @@ BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2)
#endif
kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, kind, BIF_ets_safe_fixtable_2);
if (BIF_ARG_2 == am_true) {
fix_table_locked(BIF_P, tb);
@@ -919,11 +960,7 @@ BIF_RETTYPE ets_first_1(BIF_ALIST_1)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_first_1);
cret = tb->common.meth->db_first(BIF_P, tb, &ret);
@@ -946,11 +983,7 @@ BIF_RETTYPE ets_next_2(BIF_ALIST_2)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_next_2);
cret = tb->common.meth->db_next(BIF_P, tb, BIF_ARG_2, &ret);
@@ -973,11 +1006,7 @@ BIF_RETTYPE ets_last_1(BIF_ALIST_1)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_last_1);
cret = tb->common.meth->db_last(BIF_P, tb, &ret);
@@ -1000,11 +1029,7 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_prev_2);
cret = tb->common.meth->db_prev(BIF_P,tb,BIF_ARG_2,&ret);
@@ -1022,21 +1047,15 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
BIF_RETTYPE ets_take_2(BIF_ALIST_2)
{
DbTable* tb;
-#ifdef DEBUG
int cret;
-#endif
Eterm ret;
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC);
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
-#ifdef DEBUG
- cret =
-#endif
- tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
- ASSERT(cret == DB_ERROR_NONE);
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_take_2);
+
+ cret = tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
+
+ ASSERT(cret == DB_ERROR_NONE); (void)cret;
db_unlock(tb, LCK_WRITE_REC);
BIF_RET(ret);
}
@@ -1054,9 +1073,8 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
DeclareTmpHeap(cell,2,BIF_P);
DbUpdateHandle handle;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_element_3);
+
UseTmpHeap(2,BIF_P);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
goto bail_out;
@@ -1127,9 +1145,9 @@ bail_out:
}
static BIF_RETTYPE
-do_update_counter(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Eterm arg4)
+do_update_counter(Process *p, DbTable* tb,
+ Eterm arg2, Eterm arg3, Eterm arg4)
{
- DbTable* tb;
int cret = DB_ERROR_BADITEM;
Eterm upop_list;
int list_size;
@@ -1145,10 +1163,6 @@ do_update_counter(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Eterm arg4)
Eterm* hstart;
Eterm* hend;
- if ((tb = db_get_table(p, arg1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
-
UseTmpHeap(5, p);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
@@ -1322,7 +1336,11 @@ bail_out:
*/
BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
{
- return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, THE_NON_VALUE);
+ DbTable* tb;
+
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_counter_3);
+
+ return do_update_counter(BIF_P, tb, BIF_ARG_2, BIF_ARG_3, THE_NON_VALUE);
}
/*
@@ -1334,10 +1352,14 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
*/
BIF_RETTYPE ets_update_counter_4(BIF_ALIST_4)
{
+ DbTable* tb;
+
if (is_not_tuple(BIF_ARG_4)) {
BIF_ERROR(BIF_P, BADARG);
}
- return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_counter_4);
+
+ return do_update_counter(BIF_P, tb, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
}
@@ -1358,9 +1380,8 @@ BIF_RETTYPE ets_insert_2(BIF_ALIST_2)
kind = ((is_list(BIF_ARG_2) && CDR(list_val(BIF_ARG_2)) != NIL)
? LCK_WRITE : LCK_WRITE_REC);
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_2);
+
if (BIF_ARG_2 == NIL) {
db_unlock(tb, kind);
BIF_RET(am_true);
@@ -1426,11 +1447,9 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
/* More than one object, use LCK_WRITE to keep atomicity */
kind = LCK_WRITE;
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind);
- if (tb == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
- meth = tb->common.meth;
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_new_2);
+
+ meth = tb->common.meth;
for (lst = BIF_ARG_2; is_list(lst); lst = CDR(list_val(lst))) {
if (is_not_tuple(CAR(list_val(lst)))
|| (arityval(*tuple_val(CAR(list_val(lst))))
@@ -1465,9 +1484,8 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
/* Only one object (or NIL)
*/
kind = LCK_WRITE_REC;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_new_2);
+
if (BIF_ARG_2 == NIL) {
db_unlock(tb, kind);
BIF_RET(am_true);
@@ -1505,7 +1523,8 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
DbTable* tb;
Eterm ret;
Eterm old_name;
- erts_smp_rwmtx_t *lck1, *lck2;
+ erts_rwmtx_t *lck1, *lck2;
+ Uint freason;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1528,7 +1547,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
if (lck1 == lck2)
lck2 = NULL;
else if (lck1 > lck2) {
- erts_smp_rwmtx_t *tmp = lck1;
+ erts_rwmtx_t *tmp = lck1;
lck1 = lck2;
lck2 = tmp;
}
@@ -1546,13 +1565,13 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
}
}
- erts_smp_rwmtx_rwlock(lck1);
+ erts_rwmtx_rwlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwlock(lck2);
+ erts_rwmtx_rwlock(lck2);
- tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
+ tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1, &freason);
if (!tb)
- goto badarg;
+ goto fail;
if (is_table_named(tb)) {
if (!insert_named_tab(BIF_ARG_2, tb, 1))
@@ -1568,17 +1587,22 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
tb->common.the_name = BIF_ARG_2;
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwunlock(lck1);
+ erts_rwmtx_rwunlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwunlock(lck2);
+ erts_rwmtx_rwunlock(lck2);
BIF_RET(ret);
- badarg:
+
+badarg:
+ freason = BADARG;
+
+fail:
if (tb)
db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwunlock(lck1);
+ erts_rwmtx_rwunlock(lck1);
if (lck2)
- erts_smp_rwmtx_rwunlock(lck2);
- BIF_ERROR(BIF_P, BADARG);
+ erts_rwmtx_rwunlock(lck2);
+
+ return db_bif_fail(BIF_P, freason, BIF_ets_rename_2, NULL);
}
@@ -1598,12 +1622,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Uint32 status;
Sint keypos;
int is_named, is_compressed;
-#ifdef ERTS_SMP
int is_fine_locked, frequent_read;
-#endif
-#ifdef DEBUG
int cret;
-#endif
DbTableMethod* meth;
if (is_not_atom(BIF_ARG_1)) {
@@ -1616,10 +1636,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
status = DB_SET | DB_PROTECTED;
keypos = 1;
is_named = 0;
-#ifdef ERTS_SMP
is_fine_locked = 0;
frequent_read = 0;
-#endif
heir = am_none;
heir_data = (UWord) am_undefined;
is_compressed = erts_ets_always_compress;
@@ -1647,30 +1665,18 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = signed_val(tp[2]);
}
else if (tp[1] == am_write_concurrency) {
-#ifdef ERTS_SMP
if (tp[2] == am_true) {
is_fine_locked = 1;
} else if (tp[2] == am_false) {
is_fine_locked = 0;
} else break;
-#else
- if ((tp[2] != am_true) && (tp[2] != am_false)) {
- break;
- }
-#endif
}
else if (tp[1] == am_read_concurrency) {
-#ifdef ERTS_SMP
if (tp[2] == am_true) {
frequent_read = 1;
} else if (tp[2] == am_false) {
frequent_read = 0;
} else break;
-#else
- if ((tp[2] != am_true) && (tp[2] != am_false)) {
- break;
- }
-#endif
}
else if (tp[1] == am_heir && tp[2] == am_none) {
@@ -1712,11 +1718,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
if (IS_HASH_TABLE(status)) {
meth = &db_hash;
-#ifdef ERTS_SMP
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
}
-#endif
}
else if (IS_TREE_TABLE(status)) {
meth = &db_tree;
@@ -1725,10 +1729,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-#ifdef ERTS_SMP
if (frequent_read && !(status & DB_PRIVATE))
status |= DB_FREQ_READ;
-#endif
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
@@ -1737,36 +1739,34 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
{
DbTable init_tb;
- erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
+ erts_atomic_init_nob(&init_tb.common.memory_size, 0);
tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb, sizeof(DbTable));
- erts_smp_atomic_init_nob(&tb->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
+ erts_atomic_init_nob(&tb->common.memory_size,
+ erts_atomic_read_nob(&init_tb.common.memory_size));
}
tb->common.meth = meth;
tb->common.the_name = BIF_ARG_1;
tb->common.status = status;
-#ifdef ERTS_SMP
- tb->common.type = status & ERTS_ETS_TABLE_TYPES;
+ tb->common.type = status;
/* Note, 'type' is *read only* from now on... */
-#endif
- erts_smp_refc_init(&tb->common.fix_count, 0);
+ erts_refc_init(&tb->common.fix_count, 0);
db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ));
tb->common.keypos = keypos;
tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
- erts_smp_atomic_init_nob(&tb->common.nitems, 0);
+ erts_atomic_init_nob(&tb->common.nitems, 0);
tb->common.fixing_procs = NULL;
tb->common.compress = is_compressed;
-
-#ifdef DEBUG
- cret =
+#ifdef ETS_DBG_FORCE_TRAP
+ erts_atomic_init_nob(&tb->common.dbg_force_trap, erts_ets_dbg_force_trap);
#endif
- meth->db_create(BIF_P, tb);
- ASSERT(cret == DB_ERROR_NONE);
+
+ cret = meth->db_create(BIF_P, tb);
+ ASSERT(cret == DB_ERROR_NONE); (void)cret;
make_btid(tb);
@@ -1776,20 +1776,21 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
ret = make_tid(BIF_P, tb);
save_sched_table(BIF_P, tb);
+ save_owned_table(BIF_P, tb);
if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
tid_clear(BIF_P, tb);
+ delete_owned_table(BIF_P, tb);
db_lock(tb,LCK_WRITE);
free_heir_data(tb);
- tb->common.meth->db_free_table(tb);
+ tb->common.meth->db_free_empty_table(tb);
db_unlock(tb,LCK_WRITE);
table_dec_refc(tb, 0);
BIF_ERROR(BIF_P, BADARG);
}
BIF_P->flags |= F_USING_DB; /* So we can remove tb if p dies */
- save_owned_table(BIF_P, tb);
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1801,6 +1802,34 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_RET(ret);
}
+/*
+** Retrieves the tid() of a named ets table.
+*/
+BIF_RETTYPE ets_whereis_1(BIF_ALIST_1)
+{
+ DbTable* tb;
+ Eterm res;
+ Uint freason;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG)
+ BIF_RET(am_undefined);
+ else {
+ //ToDo: Could we avoid this
+ return db_bif_fail(BIF_P, freason, BIF_ets_whereis_1, NULL);
+ }
+ }
+
+ res = make_tid(BIF_P, tb);
+ db_unlock(tb, LCK_READ);
+
+ BIF_RET(res);
+}
+
/*
** The lookup BIF
*/
@@ -1812,9 +1841,7 @@ BIF_RETTYPE ets_lookup_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_lookup_2);
cret = tb->common.meth->db_get(BIF_P, tb, BIF_ARG_2, &ret);
@@ -1842,9 +1869,7 @@ BIF_RETTYPE ets_member_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_member_2);
cret = tb->common.meth->db_member(tb, BIF_ARG_2, &ret);
@@ -1875,9 +1900,7 @@ BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_lookup_element_3);
if (is_not_small(BIF_ARG_3) || ((index = signed_val(BIF_ARG_3)) < 1)) {
db_unlock(tb, LCK_READ);
@@ -1915,9 +1938,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_delete_1);
/*
* Clear all access bits to prevent any ets operation to access the
@@ -1940,7 +1961,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
* Process 'rp' might be exiting, but our table lock prevents it
* from terminating as it cannot complete erts_db_process_exiting().
*/
- ASSERT(!(ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)));
+ ASSERT(!(ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state)));
delete_owned_table(rp, tb);
BIF_P->flags |= F_USING_DB;
@@ -1959,7 +1980,8 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tid_clear(BIF_P, tb);
db_unlock(tb, LCK_WRITE);
- if (free_table_continue(BIF_P, tb, reds) < 0) {
+ reds = free_table_continue(BIF_P, tb, reds);
+ if (reds < 0) {
/*
* Package the DbTable* pointer into a bignum so that it can be safely
* passed through a trap. We used to pass the DbTable* pointer directly
@@ -1988,6 +2010,7 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
Eterm to_pid = BIF_ARG_2;
Eterm from_pid;
DbTable* tb = NULL;
+ Uint freason;
if (!is_internal_pid(to_pid)) {
goto badarg;
@@ -1997,10 +2020,11 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
goto badarg;
}
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->common.id) {
- goto badarg;
- }
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, &freason)) == NULL)
+ goto fail;
+ if (tb->common.owner != BIF_P->common.id)
+ goto badarg;
+
from_pid = tb->common.owner;
if (to_pid == from_pid) {
goto badarg; /* or should we be idempotent? return false maybe */
@@ -2014,14 +2038,17 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
db_unlock(tb,LCK_WRITE);
send_ets_transfer_message(BIF_P, to_proc, &to_locks,
tb, BIF_ARG_3);
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
UnUseTmpHeap(5,BIF_P);
BIF_RET(am_true);
badarg:
- if (to_proc != NULL && to_proc != BIF_P) erts_smp_proc_unlock(to_proc, to_locks);
+ freason = BADARG;
+fail:
+ if (to_proc != NULL && to_proc != BIF_P) erts_proc_unlock(to_proc, to_locks);
if (tb != NULL) db_unlock(tb, LCK_WRITE);
- BIF_ERROR(BIF_P, BADARG);
+
+ return db_bif_fail(BIF_P, freason, BIF_ets_give_away_3, NULL);
}
BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
@@ -2072,11 +2099,13 @@ BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
}
}
- if (tail != NIL
- || (tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->common.id) {
+ if (tail != NIL)
+ goto badarg;
+
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_setopts_2);
+
+ if (tb->common.owner != BIF_P->common.id)
goto badarg;
- }
if (heir_data != THE_NON_VALUE) {
free_heir_data(tb);
@@ -2100,23 +2129,84 @@ badarg:
}
/*
-** BIF to erase a whole table and release all memory it holds
-*/
-BIF_RETTYPE ets_delete_all_objects_1(BIF_ALIST_1)
+ * Common for delete_all_objects and select_delete(DeleteAll).
+ */
+BIF_RETTYPE ets_internal_delete_all_2(BIF_ALIST_2)
{
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
+ Eterm nitems;
DbTable* tb;
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_internal_delete_all_2);
+
+ if (BIF_ARG_2 == am_undefined) {
+ nitems = erts_make_integer(erts_atomic_read_nob(&tb->common.nitems),
+ BIF_P);
- tb->common.meth->db_delete_all_objects(BIF_P, tb);
+ reds = tb->common.meth->db_delete_all_objects(BIF_P, tb, reds);
+
+ ASSERT(!(tb->common.status & DB_BUSY));
+
+ if (reds < 0) {
+ /*
+ * Oboy, need to trap AND need to be atomic.
+ * Solved by cooperative trapping where every process trying to
+ * access this table (including this process) will "fail" to lookup
+ * the table and instead pitch in deleting objects
+ * (in delete_all_objects_continue) and then trap to self.
+ */
+ ASSERT((tb->common.status & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC))
+ ==
+ (tb->common.type & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC)));
+ tb->common.status &= ~(DB_PRIVATE|DB_PROTECTED|DB_PUBLIC);
+ tb->common.status |= DB_BUSY;
+ db_unlock(tb, LCK_WRITE);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_ets_internal_delete_all_2], BIF_P,
+ BIF_ARG_1, nitems);
+ }
+ else {
+ /* Done, no trapping needed */
+ BUMP_REDS(BIF_P, (initial_reds - reds));
+ }
+
+ }
+ else {
+ /*
+ * The table lookup succeeded and second argument is nitems
+ * and not 'undefined', which means we have trapped at least once
+ * and are now done.
+ */
+ nitems = BIF_ARG_2;
+ }
db_unlock(tb, LCK_WRITE);
+ BIF_RET(nitems);
+}
- BIF_RET(am_true);
+static void delete_all_objects_continue(Process* p, DbTable* tb)
+{
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(p);
+ SWord reds = initial_reds;
+
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+
+ if ((tb->common.status & (DB_DELETE|DB_BUSY)) != DB_BUSY)
+ return;
+
+ reds = tb->common.meth->db_delete_all_objects(p, tb, reds);
+
+ if (reds < 0) {
+ BUMP_ALL_REDS(p);
+ }
+ else {
+ tb->common.status |= tb->common.type & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC);
+ tb->common.status &= ~DB_BUSY;
+ BUMP_REDS(p, (initial_reds - reds));
+ }
}
/*
@@ -2132,9 +2222,7 @@ BIF_RETTYPE ets_delete_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_delete_2);
cret = tb->common.meth->db_erase(tb,BIF_ARG_2,&ret);
@@ -2161,9 +2249,8 @@ BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_delete_object_2);
+
if (is_not_tuple(BIF_ARG_2) ||
(arityval(*tuple_val(BIF_ARG_2)) < tb->common.keypos)) {
db_unlock(tb, LCK_WRITE_REC);
@@ -2186,7 +2273,7 @@ BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
/*
** This is for trapping, cannot be called directly.
*/
-static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
+static BIF_RETTYPE ets_select_delete_trap_1(BIF_ALIST_1)
{
Process *p = BIF_P;
Eterm a1 = BIF_ARG_1;
@@ -2196,15 +2283,14 @@ static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
Eterm ret;
Eterm *tptr;
db_lock_kind_t kind = LCK_WRITE_REC;
-
+
CHECK_TABLES();
ASSERT(is_tuple(a1));
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) {
- BIF_ERROR(p,BADARG);
- }
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_WRITE, kind,
+ &ets_select_delete_continue_exp);
cret = tb->common.meth->db_select_delete_continue(p,tb,a1,&ret);
@@ -2228,7 +2314,10 @@ static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
}
-BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
+/*
+ * ets:select_delete/2 without special case for "delete-all".
+ */
+BIF_RETTYPE ets_internal_select_delete_2(BIF_ALIST_2)
{
BIF_RETTYPE result;
DbTable* tb;
@@ -2238,20 +2327,8 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
CHECK_TABLES();
- if(eq(BIF_ARG_2, ms_delete_all)) {
- int nitems;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
- nitems = erts_smp_atomic_read_nob(&tb->common.nitems);
- tb->common.meth->db_delete_all_objects(BIF_P, tb);
- db_unlock(tb, LCK_WRITE);
- BIF_RET(erts_make_integer(nitems,BIF_P));
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_internal_select_delete_2);
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
@@ -2294,7 +2371,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
*/
struct ErtsEtsAllReq_ {
- erts_smp_atomic32_t refc;
+ erts_atomic32_t refc;
Process *proc;
ErtsOIRefStorage ref;
ErtsEtsAllReqList list[1]; /* one per scheduler */
@@ -2368,7 +2445,7 @@ ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp,
ASSERT(!*tablepp);
/* Max heap size needed... */
- sz = esdp->ets_tables.count;
+ sz = erts_atomic_read_nob(&esdp->ets_tables.count);
sz *= ERTS_MAGIC_REF_THING_SIZE + 2;
sz += 3 + ERTS_REF_THING_SIZE;
hfragp = new_message_buffer(sz);
@@ -2427,7 +2504,7 @@ ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp,
erts_proc_dec_refc(reqp->proc);
- if (erts_smp_atomic32_dec_read_nob(&reqp->refc) == 0)
+ if (erts_atomic32_dec_read_nob(&reqp->refc) == 0)
erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp);
*reqpp = NULL;
@@ -2452,7 +2529,8 @@ erts_handle_yielded_ets_all_request(ErtsSchedulerData *esdp,
if (!eaydp->queue)
return 0; /* All work completed! */
- if (yc < ERTS_ETS_ALL_TB_YCNT_START && yc > esdp->ets_tables.count)
+ if (yc < ERTS_ETS_ALL_TB_YCNT_START &&
+ yc > erts_atomic_read_nob(&esdp->ets_tables.count))
return 1; /* Yield! */
eaydp->ongoing = ongoing = eaydp->queue;
@@ -2515,25 +2593,22 @@ BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0)
Eterm ref = erts_make_ref(BIF_P);
ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ,
ERTS_ETS_ALL_REQ_SIZE);
- erts_smp_atomic32_init_nob(&req->refc,
+ erts_atomic32_init_nob(&req->refc,
(erts_aint32_t) erts_no_schedulers);
erts_oiref_storage_save(&req->ref, ref);
req->proc = BIF_P;
erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
handle_ets_all_request,
(void *) req);
-#endif
handle_ets_all_request((void *) req);
BIF_RET(ref);
}
-
/*
** db_slot(Db, Slot) -> [Items].
*/
@@ -2545,9 +2620,8 @@ BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_slot_2);
+
/* The slot number is checked in table specific code. */
cret = tb->common.meth->db_slot(BIF_P, tb, BIF_ARG_2, &ret);
db_unlock(tb, LCK_READ);
@@ -2567,41 +2641,53 @@ BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
BIF_RETTYPE ets_match_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_match_1, BIF_ARG_1);
}
BIF_RETTYPE ets_match_2(BIF_ALIST_2)
{
+ DbTable* tb;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_2);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarDollar, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, tb, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
BIF_RETTYPE ets_match_3(BIF_ALIST_3)
{
+ DbTable* tb;
Eterm ms;
+ Sint chunk_size;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_3);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarDollar, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, tb, BIF_ARG_1, ms, chunk_size);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2609,34 +2695,35 @@ BIF_RETTYPE ets_match_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_3(BIF_ALIST_3)
{
- return ets_select3(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ DbTable* tb;
+ Sint chunk_size;
+
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_3);
+
+ return ets_select3(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, chunk_size);
}
static BIF_RETTYPE
-ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3)
+ets_select3(Process* p, DbTable* tb, Eterm tid, Eterm ms, Sint chunk_size)
{
BIF_RETTYPE result;
- DbTable* tb;
int cret;
Eterm ret;
- Sint chunk_size;
enum DbIterSafety safety;
CHECK_TABLES();
- /* Chunk size strictly greater than 0 */
- if (is_not_small(arg3) || (chunk_size = signed_val(arg3)) <= 0) {
- BIF_ERROR(p, BADARG);
- }
- if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(p, tb, arg1,
- arg2, chunk_size,
+ cret = tb->common.meth->db_select_chunk(p, tb, tid,
+ ms, chunk_size,
0 /* not reversed */,
&ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
@@ -2682,9 +2769,8 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_READ, kind,
+ &ets_select_continue_exp);
cret = tb->common.meth->db_select_continue(p, tb, a1,
&ret);
@@ -2714,10 +2800,10 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
BIF_RETTYPE ets_select_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_select_1, BIF_ARG_1);
}
-static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
+static BIF_RETTYPE ets_select1(Process *p, int bif_ix, Eterm arg1)
{
BIF_RETTYPE result;
DbTable* tb;
@@ -2739,10 +2825,10 @@ static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
BIF_ERROR(p, BADARG);
}
tptr = tuple_val(arg1);
- if (arityval(*tptr) < 1 ||
- (tb = db_get_table(p, tptr[1], DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+ if (arityval(*tptr) < 1)
+ BIF_ERROR(p, BADARG);
+
+ DB_GET_TABLE(tb, tptr[1], DB_READ, LCK_READ, bif_ix, NULL, p);
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
@@ -2778,33 +2864,27 @@ static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
BIF_RETTYPE ets_select_2(BIF_ALIST_2)
{
- return ets_select2(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ DbTable* tb;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_2);
+ return ets_select2(BIF_P, tb, BIF_ARG_1, BIF_ARG_2);
}
static BIF_RETTYPE
-ets_select2(Process* p, Eterm arg1, Eterm arg2)
+ets_select2(Process* p, DbTable* tb, Eterm tid, Eterm ms)
{
BIF_RETTYPE result;
- DbTable* tb;
int cret;
enum DbIterSafety safety;
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
-
- if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(p, tb, arg1, arg2, 0, &ret);
+ cret = tb->common.meth->db_select(p, tb, tid, ms, 0, &ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
@@ -2847,9 +2927,9 @@ static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_READ, kind,
+ &ets_select_count_continue_exp);
cret = tb->common.meth->db_select_count_continue(p, tb, a1, &ret);
@@ -2884,13 +2964,9 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_count_2);
+
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
@@ -2940,9 +3016,8 @@ static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1)
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) {
- BIF_ERROR(p,BADARG);
- }
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_WRITE, kind,
+ &ets_select_replace_continue_exp);
cret = tb->common.meth->db_select_replace_continue(p,tb,a1,&ret);
@@ -2976,9 +3051,7 @@ BIF_RETTYPE ets_select_replace_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_select_replace_2);
if (tb->common.status & DB_BAG) {
/* Bag implementation presented both semantic consistency
@@ -3029,13 +3102,8 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
Sint chunk_size;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_reverse_3);
/* Chunk size strictly greater than 0 */
if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
@@ -3073,7 +3141,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_reverse_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_select_reverse_1, BIF_ARG_1);
}
BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
@@ -3085,13 +3153,9 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_reverse_2);
+
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
@@ -3123,45 +3187,63 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
/*
-** ets:match_object(Continuation), ets:match_object(Table, Pattern), ets:match_object(Table,Pattern,ChunkSize)
+** ets:match_object(Continuation)
*/
BIF_RETTYPE ets_match_object_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_match_object_1, BIF_ARG_1);
}
+/*
+** ets:match_object(Table, Pattern)
+*/
BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
{
+ DbTable* tb;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_object_2);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarUnderscore, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, tb, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
+/*
+** ets:match_object(Table,Pattern,ChunkSize)
+*/
BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
{
+ DbTable* tb;
+ Sint chunk_size;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_object_3);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarUnderscore, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, tb, BIF_ARG_1, ms, chunk_size);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -3175,22 +3257,24 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
static Eterm fields[] = {am_protection, am_keypos, am_type, am_named_table,
am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed,
am_write_concurrency,
- am_read_concurrency};
+ am_read_concurrency,
+ am_id};
Eterm results[sizeof(fields)/sizeof(Eterm)];
DbTable* tb;
Eterm res;
int i;
Eterm* hp;
+ Uint freason;
/*Process* rp = NULL;*/
/* If/when we implement lockless private tables:
Eterm owner;
*/
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG && (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)))
BIF_RET(am_undefined);
- }
- BIF_ERROR(BIF_P, BADARG);
+ else
+ return db_bif_fail(BIF_P, freason, BIF_ets_info_1, NULL);
}
/* If/when we implement lockless private tables:
@@ -3211,7 +3295,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL
|| tb->common.owner != owner) {
if (BIF_P != rp)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
@@ -3225,7 +3309,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
db_unlock(tb, LCK_READ);
/*if (rp != NULL && rp != BIF_P)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/
hp = HAlloc(BIF_P, 5*sizeof(fields)/sizeof(Eterm));
res = NIL;
@@ -3247,12 +3331,13 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret = THE_NON_VALUE;
+ Uint freason;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG && (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)))
BIF_RET(am_undefined);
- }
- BIF_ERROR(BIF_P, BADARG);
+ else
+ return db_bif_fail(BIF_P, freason, BIF_ets_info_2, NULL);
}
ret = table_info(BIF_P, tb, BIF_ARG_2);
db_unlock(tb, LCK_READ);
@@ -3340,15 +3425,13 @@ int erts_ets_rwmtx_spin_count = -1;
void init_db(ErtsDbSpinCount db_spin_count)
{
int i;
- Eterm *hp;
unsigned bits;
size_t size;
-#ifdef ERTS_SMP
int max_spin_count = (1 << 15) - 1; /* internal limit */
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED;
switch (db_spin_count) {
case ERTS_DB_SPNCNT_NONE:
@@ -3388,13 +3471,12 @@ void init_db(ErtsDbSpinCount db_spin_count)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
- erts_smp_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
+ erts_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
"meta_name_tab", make_small(i),
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB);
}
-#endif
- erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0);
+ erts_atomic_init_nob(&erts_ets_misc_mem_size, 0);
db_initialize_util();
if (user_requested_db_max_tabs < DB_DEF_MAX_TABS)
@@ -3408,7 +3490,11 @@ void init_db(ErtsDbSpinCount db_spin_count)
db_max_tabs, ((Uint)1)<<SMALL_BITS);
}
- meta_name_tab_mask = (((Uint) 1)<<(bits-1)) - 1; /* At least half the size of main tab */
+ /*
+ * We don't have ony hard limit for number of tables anymore, .
+ * but we use 'db_max_tabs' to determine size of name hash table.
+ */
+ meta_name_tab_mask = (((Uint) 1)<<bits) - 1;
size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1);
meta_name_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
ERTS_ETS_MISC_MEM_ADD(size);
@@ -3423,35 +3509,28 @@ void init_db(ErtsDbSpinCount db_spin_count)
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
- am_ets, am_atom_put("delete_trap",11), 1,
- &ets_select_delete_1);
+ am_ets, ERTS_MAKE_AM("select_delete_trap"), 1,
+ &ets_select_delete_trap_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_count_continue_exp,
- am_ets, am_atom_put("count_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("count_trap"), 1,
&ets_select_count_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_replace_continue_exp,
- am_ets, am_atom_put("replace_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("replace_trap"), 1,
&ets_select_replace_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_continue_exp,
- am_ets, am_atom_put("select_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("select_trap"), 1,
&ets_select_trap_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_delete_continue_exp,
- am_ets, am_atom_put("delete_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("delete_trap"), 1,
&ets_delete_trap);
-
- hp = ms_delete_all_buff;
- ms_delete_all = CONS(hp, am_true, NIL);
- hp += 2;
- ms_delete_all = TUPLE3(hp,am_Underscore,NIL,ms_delete_all);
- hp +=4;
- ms_delete_all = CONS(hp, ms_delete_all,NIL);
}
void
@@ -3463,7 +3542,7 @@ erts_ets_sched_spec_data_init(ErtsSchedulerData *esdp)
eaydp->tab = NULL;
eaydp->queue = NULL;
esdp->ets_tables.clist = NULL;
- esdp->ets_tables.count = 0;
+ erts_atomic_init_nob(&esdp->ets_tables.count, 0);
}
@@ -3495,14 +3574,14 @@ retry:
if (tb->common.owner != p->common.id) {
if (to_proc != NULL ) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
}
db_unlock(tb,LCK_WRITE);
return !0; /* ok, someone already gave my table away */
}
if (tb->common.heir != to_pid) { /* someone changed the heir */
if (to_proc != NULL ) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
}
if (to_pid == p->common.id || to_pid == am_none) {
return 0; /* no real heir, table still mine */
@@ -3515,7 +3594,7 @@ retry:
}
if (to_proc->common.u.alive.started_interval
!= tb->common.heir_started_interval) {
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
@@ -3532,7 +3611,7 @@ retry:
heir_data = tpv[1];
}
send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data);
- erts_smp_proc_unlock(to_proc, to_locks);
+ erts_proc_unlock(to_proc, to_locks);
return !0;
}
@@ -3568,7 +3647,8 @@ send_ets_transfer_message(Process *c_p, Process *proc,
hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp);
sender = c_p->common.id;
msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy);
- erts_queue_message(proc, *locks, mp, msg, sender);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_proc_message(c_p, proc, *locks, mp, msg);
}
@@ -3583,21 +3663,17 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
db_lock(tb, LCK_WRITE_REC);
if (!(tb->common.status & DB_DELETE)) {
erts_aint_t diff;
- #ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
- #endif
+ erts_mtx_lock(&tb->common.fixlock);
ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p));
diff = -((erts_aint_t) fix->counter);
- erts_smp_refc_add(&tb->common.fix_count,diff,0);
+ erts_refc_add(&tb->common.fix_count,diff,0);
fix->counter = 0;
fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
- #ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
- #endif
+ erts_mtx_unlock(&tb->common.fixlock);
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
work += db_unfix_table_hash(&(tb->hash));
}
@@ -3654,9 +3730,9 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
switch (state->op) {
case GET_OWNED_TABLE: {
DbTable* tb;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
if (!tb) {
/* Done with owned tables; now fixations */
@@ -3747,10 +3823,8 @@ static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
- erts_smp_refc_inc(&tb->common.fix_count,1);
+ erts_mtx_lock(&tb->common.fixlock);
+ erts_refc_inc(&tb->common.fix_count,1);
fix = tb->common.fixing_procs;
if (fix == NULL) {
tb->common.time.monotonic
@@ -3763,9 +3837,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
ASSERT(fixed_tabs_find(NULL, fix));
++(fix->counter);
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
+ erts_mtx_unlock(&tb->common.fixlock);
return;
}
}
@@ -3778,9 +3850,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
fix->counter = 1;
fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
+ erts_mtx_unlock(&tb->common.fixlock);
p->flags |= F_USING_DB;
fixed_tabs_insert(p, fix);
@@ -3793,20 +3863,16 @@ static void unfix_table_locked(Process* p, DbTable* tb,
{
DbFixation* fix;
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
+ erts_mtx_lock(&tb->common.fixlock);
fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p);
if (fix) {
- erts_smp_refc_dec(&tb->common.fix_count,0);
+ erts_refc_dec(&tb->common.fix_count,0);
--(fix->counter);
ASSERT(fix->counter >= 0);
if (fix->counter == 0) {
fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
+ erts_mtx_unlock(&tb->common.fixlock);
fixed_tabs_delete(p, fix);
erts_refc_dec(&fix->tabs.btid->intern.refc, 1);
@@ -3817,22 +3883,19 @@ static void unfix_table_locked(Process* p, DbTable* tb,
goto unlocked;
}
}
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
+ erts_mtx_unlock(&tb->common.fixlock);
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
- && erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
-#ifdef ERTS_SMP
+ && erts_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
- erts_smp_rwmtx_runlock(&tb->common.rwlock);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
+ erts_rwmtx_runlock(&tb->common.rwlock);
+ erts_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
- if (tb->common.status & DB_DELETE) return;
+ if (tb->common.status & (DB_DELETE|DB_BUSY))
+ return;
}
-#endif
db_unfix_table_hash(&(tb->hash));
}
}
@@ -3854,9 +3917,8 @@ static void free_fixations_op(DbFixation* fix, void* vctx)
ASSERT(ctx->tb->common.status & DB_DELETE);
diff = -((erts_aint_t) fix->counter);
- erts_smp_refc_add(&ctx->tb->common.fix_count, diff, 0);
+ erts_refc_add(&ctx->tb->common.fix_count, diff, 0);
-#ifdef ERTS_SMP
if (fix->procs.p != ctx->p) { /* Fixated by other process */
fix->counter = 0;
@@ -3872,7 +3934,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx)
*/
}
else
-#endif
{
fixed_tabs_delete(fix->procs.p, fix);
@@ -3885,7 +3946,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx)
ctx->cnt++;
}
-#ifdef ERTS_SMP
int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
{
ASSERT(fix->counter == 0);
@@ -3897,13 +3957,12 @@ int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
return 1;
}
-#endif
static SWord free_fixations_locked(Process* p, DbTable *tb)
{
struct free_fixations_ctx ctx;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
ctx.p = p;
ctx.tb = tb;
@@ -3983,7 +4042,8 @@ static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1)
ASSERT(*ptr == make_pos_bignum_header(1));
- if (free_table_continue(BIF_P, tb, reds) < 0) {
+ reds = free_table_continue(BIF_P, tb, reds);
+ if (reds < 0) {
BUMP_ALL_REDS(BIF_P);
BIF_TRAP1(&ets_delete_continue_exp, BIF_P, cont);
}
@@ -4046,7 +4106,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
int use_monotonic;
if (What == am_size) {
- ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems));
+ ret = make_small(erts_atomic_read_nob(&tb->common.nitems));
} else if (What == am_type) {
if (tb->common.status & DB_SET) {
ret = am_set;
@@ -4059,7 +4119,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_bag;
}
} else if (What == am_memory) {
- Uint words = (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size)
+ Uint words = (Uint) ((erts_atomic_read_nob(&tb->common.memory_size)
+ sizeof(Uint)
- 1)
/ sizeof(Uint));
@@ -4089,14 +4149,17 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = is_table_named(tb) ? am_true : am_false;
} else if (What == am_compressed) {
ret = tb->common.compress ? am_true : am_false;
+ } else if (What == am_id) {
+ ret = make_tid(p, tb);
}
+
/*
* For debugging purposes
*/
else if (What == am_data) {
print_table(ERTS_PRINT_STDOUT, NULL, 1, tb);
ret = am_true;
- } else if (What == am_atom_put("fixed",5)) {
+ } else if (ERTS_IS_ATOM_STR("fixed",What)) {
if (IS_FIXED(tb))
ret = am_true;
else
@@ -4105,9 +4168,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
= ERTS_IS_ATOM_STR("safe_fixed_monotonic_time",
What))
|| ERTS_IS_ATOM_STR("safe_fixed", What)) {
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
+ erts_mtx_lock(&tb->common.fixlock);
if (IS_FIXED(tb)) {
Uint need;
Eterm *hp;
@@ -4149,10 +4210,8 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
} else {
ret = am_false;
}
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
- } else if (What == am_atom_put("stats",5)) {
+ erts_mtx_unlock(&tb->common.fixlock);
+ } else if (ERTS_IS_ATOM_STR("stats",What)) {
if (IS_HASH_TABLE(tb->common.status)) {
FloatDef f;
DbHashStats stats;
@@ -4175,7 +4234,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
std_dev_exp = make_float(hp);
PUT_DOUBLE(f, hp);
hp += FLOAT_SIZE_OBJECT;
- ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)),
+ ret = TUPLE7(hp, make_small(erts_atomic_read_nob(&tb->hash.nactive)),
avg, std_dev_real, std_dev_exp,
make_small(stats.min_chain_len),
make_small(stats.max_chain_len),
@@ -4207,9 +4266,9 @@ static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb)
tb->common.meth->db_print(to, to_arg, show, tb);
- erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read_nob(&tb->common.nitems));
+ erts_print(to, to_arg, "Objects: %d\n", (int)erts_atomic_read_nob(&tb->common.nitems));
erts_print(to, to_arg, "Words: %bpu\n",
- (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size)
+ (Uint) ((erts_atomic_read_nob(&tb->common.memory_size)
+ sizeof(Uint)
- 1)
/ sizeof(Uint)));
@@ -4249,9 +4308,9 @@ void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler *
Uint
erts_get_ets_misc_mem_size(void)
{
- ERTS_SMP_MEMORY_BARRIER;
+ ERTS_THR_MEMORY_BARRIER;
/* Memory not allocated in ets_alloc */
- return (Uint) erts_smp_atomic_read_nob(&erts_ets_misc_mem_size);
+ return (Uint) erts_atomic_read_nob(&erts_ets_misc_mem_size);
}
/* SMP Note: May only be used when system is locked */
@@ -4260,7 +4319,7 @@ erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg)
{
int ix;
- ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(erts_thr_progress_is_blocking());
for (ix = 0; ix < erts_no_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
@@ -4292,6 +4351,18 @@ erts_db_get_max_tabs()
return db_max_tabs;
}
+Uint erts_ets_table_count(void)
+{
+ Uint tb_count = 0;
+ Uint six;
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsSchedulerData *esdp = &erts_aligned_scheduler_data[six].esd;
+ tb_count += erts_atomic_read_nob(&esdp->ets_tables.count);
+ }
+ return tb_count;
+}
+
/*
* For testing of meta tables only.
*
@@ -4312,7 +4383,7 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
while (index >= atom_table_size()) {
char tmp[20];
erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
- erts_atom_put((byte *) tmp, strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
+ erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
}
list = CONS(hp, make_atom(index), list);
hp += 2;
@@ -4365,5 +4436,8 @@ void erts_lcnt_update_db_locks(int enable) {
erts_schedule_multi_misc_aux_work(0, erts_no_schedulers,
&lcnt_update_db_locks_per_sched, (void*)(UWord)enable);
}
+#endif /* ERTS_ENABLE_LOCK_COUNT */
-#endif /* ERTS_ENABLE_LOCK_COUNT */ \ No newline at end of file
+#ifdef ETS_DBG_FORCE_TRAP
+erts_aint_t erts_ets_dbg_force_trap = 0;
+#endif