aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_db_hash.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_db_hash.c')
-rw-r--r--erts/emulator/beam/erl_db_hash.c1278
1 files changed, 656 insertions, 622 deletions
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index cf928a9035..42d7909a08 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1998-2017. All Rights Reserved.
+ * Copyright Ericsson AB 1998-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
/*
** Implementation of unordered ETS tables.
** The tables are implemented as linear dynamic hash tables.
+** https://en.wikipedia.org/wiki/Linear_hashing
*/
/* SMP:
@@ -109,22 +110,18 @@
#define NSEG_2 256 /* Size of second segment table */
#define NSEG_INC 128 /* Number of segments to grow after that */
-#ifdef ERTS_SMP
# define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED)
-#else
-# define DB_USING_FINE_LOCKING(TB) 0
-#endif
#ifdef ETHR_ORDERED_READ_DEPEND
-#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))
+#define SEGTAB(tb) ((struct segment**) erts_atomic_read_nob(&(tb)->segtab))
#else
#define SEGTAB(tb) \
(DB_USING_FINE_LOCKING(tb) \
- ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \
- : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)))
+ ? ((struct segment**) erts_atomic_read_ddrb(&(tb)->segtab)) \
+ : ((struct segment**) erts_atomic_read_nob(&(tb)->segtab)))
#endif
-#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive))
-#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems))
+#define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive))
+#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems))
#define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP)
@@ -142,111 +139,129 @@
static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
{
Uint mask = (DB_USING_FINE_LOCKING(tb)
- ? erts_smp_atomic_read_acqb(&tb->szm)
- : erts_smp_atomic_read_nob(&tb->szm));
+ ? erts_atomic_read_acqb(&tb->szm)
+ : erts_atomic_read_nob(&tb->szm));
Uint ix = hval & mask;
- if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) {
+ if (ix >= erts_atomic_read_nob(&tb->nactive)) {
ix &= mask>>1;
- ASSERT(ix < erts_smp_atomic_read_nob(&tb->nactive));
+ ASSERT(ix < erts_atomic_read_nob(&tb->nactive));
}
return ix;
}
-/* Remember a slot containing a pseudo-deleted item (INVALID_HASH)
- * Return false if we got raced by unfixing thread
- * and the object should be deleted for real.
- */
-static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix,
- erts_aint_t fixated_by_me)
+
+static ERTS_INLINE FixedDeletion* alloc_fixdel(DbTableHash* tb)
{
- erts_aint_t was_next;
- erts_aint_t exp_next;
FixedDeletion* fixd = (FixedDeletion*) erts_db_alloc(ERTS_ALC_T_DB_FIX_DEL,
- (DbTable *) tb,
- sizeof(FixedDeletion));
+ (DbTable *) tb,
+ sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion));
- fixd->slot = ix;
- was_next = erts_smp_atomic_read_acqb(&tb->fixdel);
+ return fixd;
+}
+
+static ERTS_INLINE void free_fixdel(DbTableHash* tb, FixedDeletion* fixd)
+{
+ erts_db_free(ERTS_ALC_T_DB_FIX_DEL, (DbTable*)tb,
+ fixd, sizeof(FixedDeletion));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+}
+
+static ERTS_INLINE int link_fixdel(DbTableHash* tb,
+ FixedDeletion* fixd,
+ erts_aint_t fixated_by_me)
+{
+ erts_aint_t was_next;
+ erts_aint_t exp_next;
+
+ was_next = erts_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic insertion in linked list: */
if (NFIXED(tb) <= fixated_by_me) {
- erts_db_free(ERTS_ALC_T_DB_FIX_DEL, (DbTable*)tb,
- fixd, sizeof(FixedDeletion));
+ free_fixdel(tb, fixd);
return 0; /* raced by unfixer */
}
exp_next = was_next;
fixd->next = (FixedDeletion*) exp_next;
- was_next = erts_smp_atomic_cmpxchg_mb(&tb->fixdel,
+ was_next = erts_atomic_cmpxchg_mb(&tb->fixdel,
(erts_aint_t) fixd,
exp_next);
}while (was_next != exp_next);
return 1;
}
+/* Remember a slot containing a pseudo-deleted item
+ * Return false if we got raced by unfixing thread
+ * and the object should be deleted for real.
+ */
+static int add_fixed_deletion(DbTableHash* tb, int ix,
+ erts_aint_t fixated_by_me)
+{
+ FixedDeletion* fixd = alloc_fixdel(tb);
+ fixd->slot = ix;
+ fixd->all = 0;
+ return link_fixdel(tb, fixd, fixated_by_me);
+}
+
+
+static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
+{
+ return p->pseudo_deleted;
+}
-#define MAX_HASH 0xEFFFFFFFUL
-#define INVALID_HASH 0xFFFFFFFFUL
/* optimised version of make_hash (normal case? atomic key) */
#define MAKE_HASH(term) \
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
- make_internal_hash(term, 0)) % MAX_HASH)
+ make_internal_hash(term, 0)) & MAX_HASH_MASK)
-#ifdef ERTS_SMP
# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck)
# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval))
/* Fine grained read lock */
-static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
+static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval)
{
if (tb->common.is_thread_safe) {
return NULL;
} else {
- erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval);
+ erts_rwmtx_t* lck = GET_LOCK(tb,hval);
ASSERT(tb->common.type & DB_FINE_LOCKED);
- erts_smp_rwmtx_rlock(lck);
+ erts_rwmtx_rlock(lck);
return lck;
}
}
/* Fine grained write lock */
-static ERTS_INLINE erts_smp_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
+static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval)
{
if (tb->common.is_thread_safe) {
return NULL;
} else {
- erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval);
+ erts_rwmtx_t* lck = GET_LOCK(tb,hval);
ASSERT(tb->common.type & DB_FINE_LOCKED);
- erts_smp_rwmtx_rwlock(lck);
+ erts_rwmtx_rwlock(lck);
return lck;
}
}
-static ERTS_INLINE void RUNLOCK_HASH(erts_smp_rwmtx_t* lck)
+static ERTS_INLINE void RUNLOCK_HASH(erts_rwmtx_t* lck)
{
if (lck != NULL) {
- erts_smp_rwmtx_runlock(lck);
+ erts_rwmtx_runlock(lck);
}
}
-static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
+static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck)
{
if (lck != NULL) {
- erts_smp_rwmtx_rwunlock(lck);
+ erts_rwmtx_rwunlock(lck);
}
}
-#else /* ERTS_SMP */
-# define RLOCK_HASH(tb,hval) NULL
-# define WLOCK_HASH(tb,hval) NULL
-# define RUNLOCK_HASH(lck) ((void)lck)
-# define WUNLOCK_HASH(lck) ((void)lck)
-#endif /* ERTS_SMP */
#ifdef ERTS_ENABLE_LOCK_CHECK
# define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd))
-# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
-# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rwlocked(lck))
-# define IS_TAB_WLOCKED(tb) erts_smp_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
+# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval)))
+# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_lc_rwmtx_is_rwlocked(lck))
+# define IS_TAB_WLOCKED(tb) erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock)
#else
# define IS_HASH_RLOCKED(tb,hval) (1)
# define IS_HASH_WLOCKED(tb,hval) (1)
@@ -259,47 +274,44 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
** Slot READ locks updated accordingly, unlocked if EOT.
*/
static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix,
- erts_smp_rwmtx_t** lck_ptr)
+ erts_rwmtx_t** lck_ptr)
{
-#ifdef ERTS_SMP
ix += DB_HASH_LOCK_CNT;
if (ix < NACTIVE(tb)) return ix;
RUNLOCK_HASH(*lck_ptr);
ix = (ix + 1) & DB_HASH_LOCK_MASK;
if (ix != 0) *lck_ptr = RLOCK_HASH(tb,ix);
return ix;
-#else
- return (++ix < NACTIVE(tb)) ? ix : 0;
-#endif
}
/* Same as next_slot but with WRITE locking */
static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix,
- erts_smp_rwmtx_t** lck_ptr)
+ erts_rwmtx_t** lck_ptr)
{
-#ifdef ERTS_SMP
ix += DB_HASH_LOCK_CNT;
if (ix < NACTIVE(tb)) return ix;
WUNLOCK_HASH(*lck_ptr);
ix = (ix + 1) & DB_HASH_LOCK_MASK;
if (ix != 0) *lck_ptr = WLOCK_HASH(tb,ix);
return ix;
-#else
- return next_slot(tb,ix,lck_ptr);
-#endif
}
-/*
- * Some special binary flags
- */
-#define BIN_FLAG_ALL_OBJECTS BIN_FLAG_USR1
-
static ERTS_INLINE void free_term(DbTableHash *tb, HashDbTerm* p)
{
db_free_term((DbTable*)tb, p, offsetof(HashDbTerm, dbterm));
}
+static ERTS_INLINE void free_term_list(DbTableHash *tb, HashDbTerm* p)
+{
+ while (p) {
+ HashDbTerm* next = p->next;
+ free_term(tb, p);
+ p = next;
+ }
+}
+
+
/*
* Local types
*/
@@ -309,9 +321,6 @@ struct mp_prefound {
};
struct mp_info {
- int all_objects; /* True if complete objects are always
- * returned from the match_spec (can use
- * copy_shallow on the return value) */
int something_can_match; /* The match_spec is not "impossible" */
int key_given;
struct mp_prefound dlists[10]; /* Default list of "pre-found" buckets */
@@ -331,9 +340,7 @@ struct segment {
/* An extended segment table */
struct ext_segtab {
-#ifdef ERTS_SMP
ErtsThrPrgrLaterOp lop;
-#endif
struct segment** prev_segtab; /* Used when table is shrinking */
int prev_nsegs; /* Size of prev_segtab */
int nsegs; /* Size of this segtab */
@@ -347,9 +354,9 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb,
struct segment** segtab)
{
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
+ erts_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab);
else
- erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
+ erts_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab);
}
/* Used by select_replace on analyze_pattern */
@@ -361,8 +368,8 @@ typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Ete
static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix);
static void alloc_seg(DbTableHash *tb);
static int free_seg(DbTableHash *tb, int free_records);
-static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
- HashDbTerm *list);
+static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
+ HashDbTerm *list);
static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
HashValue hval, HashDbTerm *list);
static void shrink(DbTableHash* tb, int nitems);
@@ -423,7 +430,7 @@ static void db_print_hash(fmtfn_t to,
void *to_arg,
int show,
DbTable *tbl);
-static int db_free_table_hash(DbTable *tbl);
+static int db_free_empty_table_hash(DbTable *tbl);
static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds);
@@ -432,7 +439,7 @@ static void db_foreach_offheap_hash(DbTable *,
void (*)(ErlOffHeap *, void *),
void *);
-static int db_delete_all_objects_hash(Process* p, DbTable* tbl);
+static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds);
#ifdef HARDDEBUG
static void db_check_table_hash(DbTableHash *tb);
#endif
@@ -457,7 +464,8 @@ static ERTS_INLINE void try_shrink(DbTableHash* tb)
static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
Eterm key, HashValue hval)
{
- if (b->hvalue != hval) return 0;
+ if (b->hvalue != hval || is_pseudo_deleted(b))
+ return 0;
else {
Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
ASSERT(!is_header(itemKey));
@@ -470,7 +478,8 @@ static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
static ERTS_INLINE int has_key(DbTableHash* tb, HashDbTerm* b,
Eterm key, HashValue hval)
{
- if (b->hvalue != hval && b->hvalue != INVALID_HASH) return 0;
+ if (b->hvalue != hval)
+ return 0;
else {
Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
ASSERT(!is_header(itemKey));
@@ -534,7 +543,7 @@ DbTableMethod db_hash =
db_select_replace_continue_hash,
db_take_hash,
db_delete_all_objects_hash,
- db_free_table_hash,
+ db_free_empty_table_hash,
db_free_table_continue_hash,
db_print_hash,
db_foreach_offheap_hash,
@@ -559,7 +568,7 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
{
/*int tries = 0;*/
DEBUG_WAIT();
- if (erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ if (erts_atomic_cmpxchg_relb(&tb->fixdel,
(erts_aint_t) fixdel,
(erts_aint_t) NULL) != (erts_aint_t) NULL) {
/* Oboy, must join lists */
@@ -568,13 +577,13 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
erts_aint_t exp_tail;
while (last->next != NULL) last = last->next;
- was_tail = erts_smp_atomic_read_acqb(&tb->fixdel);
+ was_tail = erts_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic list insertion */
exp_tail = was_tail;
last->next = (FixedDeletion*) exp_tail;
/*++tries;*/
DEBUG_WAIT();
- was_tail = erts_smp_atomic_cmpxchg_relb(&tb->fixdel,
+ was_tail = erts_atomic_cmpxchg_relb(&tb->fixdel,
(erts_aint_t) fixdel,
exp_tail);
}while (was_tail != exp_tail);
@@ -590,52 +599,58 @@ SWord db_unfix_table_hash(DbTableHash *tb)
FixedDeletion* fixdel;
SWord work = 0;
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
- || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
- && !tb->common.is_thread_safe));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
+ || (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock)
+ && !tb->common.is_thread_safe));
restart:
- fixdel = (FixedDeletion*) erts_smp_atomic_xchg_mb(&tb->fixdel,
- (erts_aint_t) NULL);
- while (fixdel != NULL) {
- FixedDeletion *fx = fixdel;
- int ix = fx->slot;
- HashDbTerm **bp;
- HashDbTerm *b;
- erts_smp_rwmtx_t* lck = WLOCK_HASH(tb,ix);
-
- if (IS_FIXED(tb)) { /* interrupted by fixer */
- WUNLOCK_HASH(lck);
- restore_fixdel(tb,fixdel);
- if (!IS_FIXED(tb)) {
- goto restart; /* unfixed again! */
- }
- return work;
- }
- if (ix < NACTIVE(tb)) {
- bp = &BUCKET(tb, ix);
- b = *bp;
-
- while (b != NULL) {
- if (b->hvalue == INVALID_HASH) {
- *bp = b->next;
- free_term(tb, b);
- work++;
- b = *bp;
- } else {
- bp = &b->next;
- b = b->next;
- }
- }
- }
- /* else slot has been joined and purged by shrink() */
- WUNLOCK_HASH(lck);
- fixdel = fx->next;
- erts_db_free(ERTS_ALC_T_DB_FIX_DEL,
- (DbTable *) tb,
- (void *) fx,
- sizeof(FixedDeletion));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
- work++;
+ fixdel = (FixedDeletion*) erts_atomic_xchg_mb(&tb->fixdel,
+ (erts_aint_t) NULL);
+ while (fixdel) {
+ FixedDeletion *free_me;
+
+ do {
+ HashDbTerm **bp;
+ HashDbTerm *b;
+ HashDbTerm *free_us = NULL;
+ erts_rwmtx_t* lck;
+
+ lck = WLOCK_HASH(tb, fixdel->slot);
+
+ if (IS_FIXED(tb)) { /* interrupted by fixer */
+ WUNLOCK_HASH(lck);
+ restore_fixdel(tb,fixdel);
+ if (!IS_FIXED(tb)) {
+ goto restart; /* unfixed again! */
+ }
+ return work;
+ }
+ if (fixdel->slot < NACTIVE(tb)) {
+ bp = &BUCKET(tb, fixdel->slot);
+ b = *bp;
+
+ while (b != NULL) {
+ if (is_pseudo_deleted(b)) {
+ HashDbTerm* nxt = b->next;
+ b->next = free_us;
+ free_us = b;
+ work++;
+ b = *bp = nxt;
+ } else {
+ bp = &b->next;
+ b = b->next;
+ }
+ }
+ }
+ /* else slot has been joined and purged by shrink() */
+ WUNLOCK_HASH(lck);
+ free_term_list(tb, free_us);
+
+ }while (fixdel->all && fixdel->slot-- > 0);
+
+ free_me = fixdel;
+ fixdel = fixdel->next;
+ free_fixdel(tb, free_me);
+ work++;
}
/* ToDo: Maybe try grow/shrink the table as well */
@@ -647,10 +662,10 @@ int db_create_hash(Process *p, DbTable *tbl)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK);
- erts_smp_atomic_init_nob(&tb->nactive, FIRST_SEGSZ);
- erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
- erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
+ erts_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK);
+ erts_atomic_init_nob(&tb->nactive, FIRST_SEGSZ);
+ erts_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL);
+ erts_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL);
SET_SEGTAB(tb, tb->first_segtab);
tb->nsegs = NSEG_1;
tb->nslots = FIRST_SEGSZ;
@@ -659,32 +674,30 @@ int db_create_hash(Process *p, DbTable *tbl)
SIZEOF_SEGMENT(FIRST_SEGSZ));
sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ));
-#ifdef ERTS_SMP
- erts_smp_atomic_init_nob(&tb->is_resizing, 0);
+ erts_atomic_init_nob(&tb->is_resizing, 0);
if (tb->common.type & DB_FINE_LOCKED) {
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER;
int i;
if (tb->common.type & DB_FREQ_READ)
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
+ rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ;
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
- tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
- (DbTable *) tb,
- sizeof(DbTableHashFineLocks));
+ tb->locks = (DbTableHashFineLocks*) erts_db_alloc(ERTS_ALC_T_DB_SEG, /* Other type maybe? */
+ (DbTable *) tb,
+ sizeof(DbTableHashFineLocks));
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
- erts_smp_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
+ erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt,
"db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
}
/* This important property is needed to guarantee the two buckets
* involved in a grow/shrink operation it protected by the same lock:
*/
- ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
+ ASSERT(erts_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0);
}
else { /* coarse locking */
tb->locks = NULL;
}
ERTS_THR_MEMORY_BARRIER;
-#endif /* ERST_SMP */
return DB_ERROR_NONE;
}
@@ -692,22 +705,12 @@ static int db_first_hash(Process *p, DbTable *tbl, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
Uint ix = 0;
- erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix);
+ erts_rwmtx_t* lck = RLOCK_HASH(tb,ix);
HashDbTerm* list;
- for (;;) {
- list = BUCKET(tb,ix);
- if (list != NULL) {
- if (list->hvalue == INVALID_HASH) {
- list = next(tb,&ix,&lck,list);
- }
- break;
- }
- if ((ix=next_slot(tb,ix,&lck)) == 0) {
- list = NULL;
- break;
- }
- }
+ list = BUCKET(tb,ix);
+ list = next_live(tb, &ix, &lck, list);
+
if (list != NULL) {
*ret = db_copy_key(p, tbl, &list->dbterm);
RUNLOCK_HASH(lck);
@@ -725,7 +728,7 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
HashValue hval;
Uint ix;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
@@ -744,13 +747,13 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
}
/* Key found */
- b = next(tb, &ix, &lck, b);
+ b = next_live(tb, &ix, &lck, b->next);
if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
while (b != 0) {
if (!has_live_key(tb, b, key, hval)) {
break;
}
- b = next(tb, &ix, &lck, b);
+ b = next_live(tb, &ix, &lck, b->next);
}
}
if (b == NULL) {
@@ -772,7 +775,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
HashDbTerm** bp;
HashDbTerm* b;
HashDbTerm* q;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int nitems;
int ret = DB_ERROR_NONE;
@@ -797,8 +800,9 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
*/
if (tb->common.status & DB_SET) {
HashDbTerm* bnext = b->next;
- if (b->hvalue == INVALID_HASH) {
- erts_smp_atomic_inc_nob(&tb->common.nitems);
+ if (is_pseudo_deleted(b)) {
+ erts_atomic_inc_nob(&tb->common.nitems);
+ b->pseudo_deleted = 0;
}
else if (key_clash_fail) {
ret = DB_ERROR_BADKEY;
@@ -806,14 +810,14 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
}
q = replace_dbterm(tb, b, obj);
q->next = bnext;
- q->hvalue = hval; /* In case of INVALID_HASH */
+ ASSERT(q->hvalue == hval);
*bp = q;
goto Ldone;
}
else if (key_clash_fail) { /* && (DB_BAG || DB_DUPLICATE_BAG) */
q = b;
do {
- if (q->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(q)) {
ret = DB_ERROR_BADKEY;
goto Ldone;
}
@@ -825,9 +829,10 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
q = b;
do {
if (db_eq(&tb->common,obj,&q->dbterm)) {
- if (q->hvalue == INVALID_HASH) {
- erts_smp_atomic_inc_nob(&tb->common.nitems);
- q->hvalue = hval;
+ if (is_pseudo_deleted(q)) {
+ erts_atomic_inc_nob(&tb->common.nitems);
+ q->pseudo_deleted = 0;
+ ASSERT(q->hvalue == hval);
if (q != b) { /* must move to preserve key insertion order */
*qp = q->next;
q->next = b;
@@ -845,9 +850,10 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
Lnew:
q = new_dbterm(tb, obj);
q->hvalue = hval;
+ q->pseudo_deleted = 0;
q->next = b;
*bp = q;
- nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems);
+ nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
WUNLOCK_HASH(lck);
{
int nactive = NACTIVE(tb);
@@ -872,7 +878,7 @@ get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
while (b2 && has_key(tb, b2, key, hval)) {
- if (b2->hvalue != INVALID_HASH)
+ if (!is_pseudo_deleted(b2))
sz += b2->dbterm.size + 2;
b2 = b2->next;
@@ -891,7 +897,7 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
HashValue hval;
int ix;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
@@ -917,7 +923,7 @@ static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret)
HashValue hval;
int ix;
HashDbTerm* b1;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
ix = hash_to_ix(tb, hval);
@@ -946,7 +952,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
HashValue hval;
int ix;
HashDbTerm* b1;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int retval;
hval = MAKE_HASH(key);
@@ -968,7 +974,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
while(b2 != NULL && has_key(tb,b2,key,hval)) {
if (ndex > arityval(b2->dbterm.tpl[0])
- && b2->hvalue != INVALID_HASH) {
+ && !is_pseudo_deleted(b2)) {
retval = DB_ERROR_BADITEM;
goto done;
}
@@ -976,7 +982,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
}
b = b1;
while(b != b2) {
- if (b->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(b)) {
Eterm *hp;
Eterm copy = db_copy_element_from_ets(&tb->common, p,
&b->dbterm, ndex, &hp, 2);
@@ -1011,7 +1017,8 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
int ix;
HashDbTerm** bp;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ HashDbTerm* free_us = NULL;
+ erts_rwmtx_t* lck;
int nitems_diff = 0;
hval = MAKE_HASH(key);
@@ -1026,16 +1033,17 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
if (nitems_diff == -1 && IS_FIXED(tb)
&& add_fixed_deletion(tb, ix, 0)) {
/* Pseudo remove (no need to keep several of same key) */
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
continue;
}
}
else {
- if (nitems_diff && b->hvalue != INVALID_HASH)
+ if (nitems_diff && !is_pseudo_deleted(b))
break;
}
bp = &b->next;
@@ -1043,9 +1051,10 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
*ret = am_true;
return DB_ERROR_NONE;
}
@@ -1060,7 +1069,8 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
int ix;
HashDbTerm** bp;
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ HashDbTerm* free_us = NULL;
+ erts_rwmtx_t* lck;
int nitems_diff = 0;
int nkeys = 0;
Eterm key;
@@ -1078,13 +1088,14 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
if (db_eq(&tb->common,object, &b->dbterm)) {
--nitems_diff;
if (nkeys==1 && IS_FIXED(tb) && add_fixed_deletion(tb,ix,0)) {
- b->hvalue = INVALID_HASH; /* Pseudo remove */
+ b->pseudo_deleted = 1;
bp = &b->next;
b = b->next;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
}
if (tb->common.status & (DB_DUPLICATE_BAG)) {
continue;
@@ -1093,7 +1104,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
}
}
}
- else if (nitems_diff && b->hvalue != INVALID_HASH) {
+ else if (nitems_diff && !is_pseudo_deleted(b)) {
break;
}
bp = &b->next;
@@ -1101,9 +1112,10 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
*ret = am_true;
return DB_ERROR_NONE;
}
@@ -1112,7 +1124,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
Sint slot;
int retval;
int nactive;
@@ -1139,28 +1151,19 @@ static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
/*
- * This is just here so I can take care of the return value
- * that is to be sent during a trap (the BIF_TRAP macros explicitly returns)
- */
-static BIF_RETTYPE bif_trap1(Export *bif,
- Process *p,
- Eterm p1)
-{
- BIF_TRAP1(bif, p, p1);
-}
-
-
-/*
* Match traversal callbacks
*/
+typedef struct match_callbacks_t_ match_callbacks_t;
+struct match_callbacks_t_
+{
/* Called when no match is possible.
* context_ptr: Pointer to context
* ret: Pointer to traversal function term return.
*
* Both the direct return value and 'ret' are used as the traversal function return values.
*/
-typedef int (*mtraversal_on_nothing_can_match_t)(void* context_ptr, Eterm* ret);
+ int (*on_nothing_can_match)(match_callbacks_t* ctx, Eterm* ret);
/* Called for each match result.
* context_ptr: Pointer to context
@@ -1171,8 +1174,8 @@ typedef int (*mtraversal_on_nothing_can_match_t)(void* context_ptr, Eterm* ret);
*
* Should return 1 for successful match, 0 otherwise.
*/
-typedef int (*mtraversal_on_match_res_t)(void* context_ptr, Sint slot_ix, HashDbTerm*** current_ptr_ptr,
- Eterm match_res);
+ int (*on_match_res)(match_callbacks_t* ctx, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr, Eterm match_res);
/* Called when either we've matched enough elements in this cycle or EOT was reached.
* context_ptr: Pointer to context
@@ -1185,8 +1188,8 @@ typedef int (*mtraversal_on_match_res_t)(void* context_ptr, Sint slot_ix, HashDb
* Both the direct return value and 'ret' are used as the traversal function return values.
* If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
*/
-typedef int (*mtraversal_on_loop_ended_t)(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret);
+ int (*on_loop_ended)(match_callbacks_t* ctx, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret);
/* Called when it's time to trap
* context_ptr: Pointer to context
@@ -1198,7 +1201,11 @@ typedef int (*mtraversal_on_loop_ended_t)(void* context_ptr, Sint slot_ix, Sint
* Both the direct return value and 'ret' are used as the traversal function return values.
* If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
*/
-typedef int (*mtraversal_on_trap_t)(void* context_ptr, Sint slot_ix, Sint got, Binary** mpp, Eterm* ret);
+ int (*on_trap)(match_callbacks_t* ctx, Sint slot_ix, Sint got, Binary** mpp,
+ Eterm* ret);
+
+};
+
/*
* Begin hash table match traversal
@@ -1211,11 +1218,7 @@ static int match_traverse(Process* p, DbTableHash* tb,
Eterm** hpp, /* Heap */
int lock_for_write, /* Set to 1 if we're going to delete or
modify existing terms */
- mtraversal_on_nothing_can_match_t on_nothing_can_match,
- mtraversal_on_match_res_t on_match_res,
- mtraversal_on_loop_ended_t on_loop_ended,
- mtraversal_on_trap_t on_trap,
- void* context_ptr, /* State for callbacks above */
+ match_callbacks_t* ctx,
Eterm* ret)
{
Sint slot_ix; /* Slot index */
@@ -1227,18 +1230,13 @@ static int match_traverse(Process* p, DbTableHash* tb,
unsigned current_list_pos = 0; /* Prefound buckets list index */
Eterm match_res;
Sint got = 0; /* Matched terms counter */
- erts_smp_rwmtx_t* lck; /* Slot lock */
+ erts_rwmtx_t* lck; /* Slot lock */
int ret_value;
-#ifdef ERTS_SMP
- erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
+ erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
= (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
- void (*unlock_hash_function)(erts_smp_rwmtx_t*)
+ void (*unlock_hash_function)(erts_rwmtx_t*)
= (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
-#else
- #define lock_hash_function(tb, hval) NULL
- #define unlock_hash_function(lck) ((void)lck)
-#endif
- Sint (*next_slot_function)(DbTableHash*, Uint, erts_smp_rwmtx_t**)
+ Sint (*next_slot_function)(DbTableHash*, Uint, erts_rwmtx_t**)
= (lock_for_write ? next_slot_w : next_slot);
if ((ret_value = analyze_pattern(tb, pattern, extra_match_validator, &mpi))
@@ -1250,14 +1248,10 @@ static int match_traverse(Process* p, DbTableHash* tb,
if (!mpi.something_can_match) {
/* Can't possibly match anything */
- ret_value = on_nothing_can_match(context_ptr, ret);
+ ret_value = ctx->on_nothing_can_match(ctx, ret);
goto done;
}
- if (mpi.all_objects) {
- mpi.mp->intern.flags |= BIN_FLAG_ALL_OBJECTS;
- }
-
/*
* Look for initial slot / bucket
*/
@@ -1273,7 +1267,8 @@ static int match_traverse(Process* p, DbTableHash* tb,
}
slot_ix = next_slot_function(tb,slot_ix,&lck);
if (slot_ix == 0) {
- ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left,
+ &mpi.mp, ret);
goto done;
}
}
@@ -1291,11 +1286,11 @@ static int match_traverse(Process* p, DbTableHash* tb,
*/
for(;;) {
if (*current_ptr != NULL) {
- if ((*current_ptr)->hvalue != INVALID_HASH) {
- match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0,
+ if (!is_pseudo_deleted(*current_ptr)) {
+ match_res = db_match_dbterm(&tb->common, p, mpi.mp,
&(*current_ptr)->dbterm, hpp, 2);
saved_current = *current_ptr;
- if (on_match_res(context_ptr, slot_ix, &current_ptr, match_res)) {
+ if (ctx->on_match_res(ctx, slot_ix, &current_ptr, match_res)) {
++got;
}
--iterations_left;
@@ -1309,7 +1304,7 @@ static int match_traverse(Process* p, DbTableHash* tb,
else if (mpi.key_given) { /* Key is bound */
unlock_hash_function(lck);
if (current_list_pos == mpi.num_lists) {
- ret_value = on_loop_ended(context_ptr, -1, got, iterations_left, &mpi.mp, ret);
+ ret_value = ctx->on_loop_ended(ctx, -1, got, iterations_left, &mpi.mp, ret);
goto done;
} else {
slot_ix = mpi.lists[current_list_pos].ix;
@@ -1328,24 +1323,20 @@ static int match_traverse(Process* p, DbTableHash* tb,
unlock_hash_function(lck);
break;
}
- if (iterations_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
+ if (iterations_left <= 0) {
unlock_hash_function(lck);
- ret_value = on_trap(context_ptr, slot_ix, got, &mpi.mp, ret);
+ ret_value = ctx->on_trap(ctx, slot_ix, got, &mpi.mp, ret);
goto done;
}
current_ptr = &BUCKET(tb,slot_ix);
}
}
- ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, &mpi.mp, ret);
done:
/* We should only jump directly to this label if
- * we've already called on_nothing_can_match / on_loop_ended / on_trap
+ * we've already called ctx->nothing_can_match / loop_ended / trap
*/
if (mpi.mp != NULL) {
erts_bin_free(mpi.mp);
@@ -1356,10 +1347,6 @@ done:
}
return ret_value;
-#ifndef SMP
-#undef lock_hash_function
-#undef unlock_hash_function
-#endif
}
/*
@@ -1374,30 +1361,21 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
Binary** mpp, /* Existing match program */
int lock_for_write, /* Set to 1 if we're going to delete or
modify existing terms */
- mtraversal_on_match_res_t on_match_res,
- mtraversal_on_loop_ended_t on_loop_ended,
- mtraversal_on_trap_t on_trap,
- void* context_ptr, /* For callbacks */
+ match_callbacks_t* ctx,
Eterm* ret)
{
- int all_objects = (*mpp)->intern.flags & BIN_FLAG_ALL_OBJECTS;
HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
* the 'next' pointer in the previous term
*/
HashDbTerm* saved_current; /* Helper to avoid double skip on match */
Eterm match_res;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int ret_value;
-#ifdef ERTS_SMP
- erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
+ erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
= (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
- void (*unlock_hash_function)(erts_smp_rwmtx_t*)
+ void (*unlock_hash_function)(erts_rwmtx_t*)
= (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
-#else
- #define lock_hash_function(tb, hval) NULL
- #define unlock_hash_function(lck) ((void)lck)
-#endif
- Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_smp_rwmtx_t** lck_ptr)
+ Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_rwmtx_t** lck_ptr)
= (lock_for_write ? next_slot_w : next_slot);
if (got < 0) {
@@ -1409,7 +1387,7 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
|| (chunk_size && got >= chunk_size))
{
/* Already got all or enough in the match_list */
- ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, mpp, ret);
goto done;
}
@@ -1427,11 +1405,11 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
current_ptr = &BUCKET(tb,slot_ix);
for(;;) {
if (*current_ptr != NULL) {
- if ((*current_ptr)->hvalue != INVALID_HASH) {
- match_res = db_match_dbterm(&tb->common, p, *mpp, all_objects,
+ if (!is_pseudo_deleted(*current_ptr)) {
+ match_res = db_match_dbterm(&tb->common, p, *mpp,
&(*current_ptr)->dbterm, hpp, 2);
saved_current = *current_ptr;
- if (on_match_res(context_ptr, slot_ix, &current_ptr, match_res)) {
+ if (ctx->on_match_res(ctx, slot_ix, &current_ptr, match_res)) {
++got;
}
--iterations_left;
@@ -1451,20 +1429,16 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
unlock_hash_function(lck);
break;
}
- if (iterations_left <= 0 || MBUF(p)) {
- /*
- * We have either reached our limit, or just created some heap fragments.
- * Since many heap fragments will make the GC slower, trap and GC now.
- */
+ if (iterations_left <= 0) {
unlock_hash_function(lck);
- ret_value = on_trap(context_ptr, slot_ix, got, mpp, ret);
+ ret_value = ctx->on_trap(ctx, slot_ix, got, mpp, ret);
goto done;
}
current_ptr = &BUCKET(tb,slot_ix);
}
}
- ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, mpp, ret);
done:
/* We should only jump directly to this label if
@@ -1472,10 +1446,6 @@ done:
*/
return ret_value;
-#ifndef SMP
-#undef lock_hash_function
-#undef unlock_hash_function
-#endif
}
@@ -1485,7 +1455,7 @@ done:
* as well as their continuation-handling counterparts.
*/
-static ERTS_INLINE int on_mtraversal_simple_trap(Export* trap_function,
+static ERTS_INLINE int on_simple_trap(Export* trap_function,
Process* p,
DbTableHash* tb,
Eterm tid,
@@ -1504,20 +1474,24 @@ static ERTS_INLINE int on_mtraversal_simple_trap(Export* trap_function,
BUMP_ALL_REDS(p);
if (IS_USMALL(0, got)) {
- hp = HAlloc(p, base_halloc_sz + 5);
+ hp = HAllocX(p, base_halloc_sz + 5, ERTS_MAGIC_REF_THING_SIZE);
egot = make_small(got);
}
else {
- hp = HAlloc(p, base_halloc_sz + BIG_UINT_HEAP_SIZE + 5);
+ hp = HAllocX(p, base_halloc_sz + BIG_UINT_HEAP_SIZE + 5,
+ ERTS_MAGIC_REF_THING_SIZE);
egot = uint_to_big(got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
if (is_first_trap) {
+ if (is_atom(tid))
+ tid = erts_db_make_tid(p, &tb->common);
mpb = erts_db_make_match_prog_ref(p, *mpp, &hp);
*mpp = NULL; /* otherwise the caller will destroy it */
}
else {
+ ASSERT(!is_atom(tid));
mpb = prev_continuation_tptr[3];
}
@@ -1527,16 +1501,16 @@ static ERTS_INLINE int on_mtraversal_simple_trap(Export* trap_function,
make_small(slot_ix),
mpb,
egot);
- *ret = bif_trap1(trap_function, p, continuation);
+ ERTS_BIF_PREP_TRAP1(*ret, trap_function, p, continuation);
return DB_ERROR_NONE;
}
-static ERTS_INLINE int unpack_simple_mtraversal_continuation(Eterm continuation,
- Eterm** tptr_ptr,
- Eterm* tid_ptr,
- Sint* slot_ix_p,
- Binary** mpp,
- Sint* got_p)
+static ERTS_INLINE int unpack_simple_continuation(Eterm continuation,
+ Eterm** tptr_ptr,
+ Eterm* tid_ptr,
+ Sint* slot_ix_p,
+ Binary** mpp,
+ Sint* got_p)
{
Eterm* tptr;
ASSERT(is_tuple(continuation));
@@ -1571,6 +1545,7 @@ static ERTS_INLINE int unpack_simple_mtraversal_continuation(Eterm continuation,
#define MAX_SELECT_CHUNK_ITERATIONS 1000
typedef struct {
+ match_callbacks_t base;
Process* p;
DbTableHash* tb;
Eterm tid;
@@ -1578,77 +1553,86 @@ typedef struct {
Sint chunk_size;
Eterm match_list;
Eterm* prev_continuation_tptr;
-} mtraversal_select_chunk_context_t;
+} select_chunk_context_t;
-static int mtraversal_select_chunk_on_nothing_can_match(void* context_ptr, Eterm* ret) {
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
- *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL);
+static int select_chunk_on_nothing_can_match(match_callbacks_t* ctx_base, Eterm* ret)
+{
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
+ *ret = (ctx->chunk_size > 0 ? am_EOT : NIL);
return DB_ERROR_NONE;
}
-static int mtraversal_select_chunk_on_match_res(void* context_ptr, Sint slot_ix,
- HashDbTerm*** current_ptr_ptr,
- Eterm match_res)
+static int select_chunk_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
if (is_value(match_res)) {
- sc_context_ptr->match_list = CONS(sc_context_ptr->hp, match_res, sc_context_ptr->match_list);
+ ctx->match_list = CONS(ctx->hp, match_res, ctx->match_list);
return 1;
}
return 0;
}
-static int mtraversal_select_chunk_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static int select_chunk_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
Eterm mpb;
if (iterations_left == MAX_SELECT_CHUNK_ITERATIONS) {
/* We didn't get to iterate a single time, which means EOT */
- ASSERT(sc_context_ptr->match_list == NIL);
- *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL);
+ ASSERT(ctx->match_list == NIL);
+ *ret = (ctx->chunk_size > 0 ? am_EOT : NIL);
return DB_ERROR_NONE;
}
else {
ASSERT(iterations_left < MAX_SELECT_CHUNK_ITERATIONS);
- BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
- if (sc_context_ptr->chunk_size) {
+ BUMP_REDS(ctx->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (ctx->chunk_size) {
Eterm continuation;
Eterm rest = NIL;
Sint rest_size = 0;
- if (got > sc_context_ptr->chunk_size) { /* Split list in return value and 'rest' */
- Eterm tmp = sc_context_ptr->match_list;
- rest = sc_context_ptr->match_list;
- while (got-- > sc_context_ptr->chunk_size + 1) {
+ if (got > ctx->chunk_size) { /* Split list in return value and 'rest' */
+ Eterm tmp = ctx->match_list;
+ rest = ctx->match_list;
+ while (got-- > ctx->chunk_size + 1) {
tmp = CDR(list_val(tmp));
++rest_size;
}
++rest_size;
- sc_context_ptr->match_list = CDR(list_val(tmp));
+ ctx->match_list = CDR(list_val(tmp));
CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
been in 'user space' */
}
if (rest != NIL || slot_ix >= 0) { /* Need more calls */
- sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3 + 7 + ERTS_MAGIC_REF_THING_SIZE);
- mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &sc_context_ptr->hp);
+ Eterm tid = ctx->tid;
+ ctx->hp = HAllocX(ctx->p,
+ 3 + 7 + ERTS_MAGIC_REF_THING_SIZE,
+ ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &ctx->hp);
+ if (is_atom(tid))
+ tid = erts_db_make_tid(ctx->p,
+ &ctx->tb->common);
continuation = TUPLE6(
- sc_context_ptr->hp,
- sc_context_ptr->tid,
+ ctx->hp,
+ tid,
make_small(slot_ix),
- make_small(sc_context_ptr->chunk_size),
+ make_small(ctx->chunk_size),
mpb, rest,
make_small(rest_size));
*mpp = NULL; /* Otherwise the caller will destroy it */
- sc_context_ptr->hp += 7;
- *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, continuation);
+ ctx->hp += 7;
+ *ret = TUPLE2(ctx->hp, ctx->match_list, continuation);
return DB_ERROR_NONE;
} else { /* All data is exhausted */
- if (sc_context_ptr->match_list != NIL) { /* No more data to search but still a
+ if (ctx->match_list != NIL) { /* No more data to search but still a
result to return to the caller */
- sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3);
- *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, am_EOT);
+ ctx->hp = HAlloc(ctx->p, 3);
+ *ret = TUPLE2(ctx->hp, ctx->match_list, am_EOT);
return DB_ERROR_NONE;
} else { /* Reached the end of the ttable with no data to return */
*ret = am_EOT;
@@ -1656,78 +1640,88 @@ static int mtraversal_select_chunk_on_loop_ended(void* context_ptr, Sint slot_ix
}
}
}
- *ret = sc_context_ptr->match_list;
+ *ret = ctx->match_list;
return DB_ERROR_NONE;
}
}
-static int mtraversal_select_chunk_on_trap(void* context_ptr, Sint slot_ix, Sint got,
- Binary** mpp, Eterm* ret)
+static int select_chunk_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
Eterm mpb;
Eterm continuation;
Eterm* hp;
- BUMP_ALL_REDS(sc_context_ptr->p);
+ BUMP_ALL_REDS(ctx->p);
- if (sc_context_ptr->prev_continuation_tptr == NULL) {
+ if (ctx->prev_continuation_tptr == NULL) {
+ Eterm tid = ctx->tid;
/* First time we're trapping */
- hp = HAlloc(sc_context_ptr->p, 7 + ERTS_MAGIC_REF_THING_SIZE);
- mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &hp);
+ hp = HAllocX(ctx->p, 7 + ERTS_MAGIC_REF_THING_SIZE,
+ ERTS_MAGIC_REF_THING_SIZE);
+ if (is_atom(tid))
+ tid = erts_db_make_tid(ctx->p, &ctx->tb->common);
+ mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &hp);
continuation = TUPLE6(
hp,
- sc_context_ptr->tid,
+ tid,
make_small(slot_ix),
- make_small(sc_context_ptr->chunk_size),
+ make_small(ctx->chunk_size),
mpb,
- sc_context_ptr->match_list,
+ ctx->match_list,
make_small(got));
*mpp = NULL; /* otherwise the caller will destroy it */
}
else {
/* Not the first time we're trapping; reuse continuation terms */
- hp = HAlloc(sc_context_ptr->p, 7);
+ hp = HAlloc(ctx->p, 7);
continuation = TUPLE6(
hp,
- sc_context_ptr->prev_continuation_tptr[1],
+ ctx->prev_continuation_tptr[1],
make_small(slot_ix),
- sc_context_ptr->prev_continuation_tptr[3],
- sc_context_ptr->prev_continuation_tptr[4],
- sc_context_ptr->match_list,
+ ctx->prev_continuation_tptr[3],
+ ctx->prev_continuation_tptr[4],
+ ctx->match_list,
make_small(got));
}
- *ret = bif_trap1(&ets_select_continue_exp, sc_context_ptr->p, continuation);
+ ERTS_BIF_PREP_TRAP1(*ret, &ets_select_continue_exp, ctx->p,
+ continuation);
return DB_ERROR_NONE;
}
-static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, int reverse, Eterm *ret) {
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern,
+ int reverse, Eterm *ret)
+{
return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret);
}
-static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Sint chunk_size,
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
int reverse, Eterm *ret)
{
- mtraversal_select_chunk_context_t sc_context;
- sc_context.p = p;
- sc_context.tb = &tbl->hash;
- sc_context.tid = tid;
- sc_context.hp = NULL;
- sc_context.chunk_size = chunk_size;
- sc_context.match_list = NIL;
- sc_context.prev_continuation_tptr = NULL;
+ select_chunk_context_t ctx;
+
+ ctx.base.on_nothing_can_match = select_chunk_on_nothing_can_match;
+ ctx.base.on_match_res = select_chunk_on_match_res;
+ ctx.base.on_loop_ended = select_chunk_on_loop_ended;
+ ctx.base.on_trap = select_chunk_on_trap,
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.hp = NULL;
+ ctx.chunk_size = chunk_size;
+ ctx.match_list = NIL;
+ ctx.prev_continuation_tptr = NULL;
return match_traverse(
- sc_context.p, sc_context.tb,
+ ctx.p, ctx.tb,
pattern, NULL,
- sc_context.chunk_size,
+ ctx.chunk_size,
MAX_SELECT_CHUNK_ITERATIONS,
- &sc_context.hp, 0,
- mtraversal_select_chunk_on_nothing_can_match,
- mtraversal_select_chunk_on_match_res,
- mtraversal_select_chunk_on_loop_ended,
- mtraversal_select_chunk_on_trap,
- &sc_context, ret);
+ &ctx.hp, 0,
+ &ctx.base, ret);
}
/*
@@ -1736,47 +1730,50 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid, Eterm patte
*
*/
-static int mtraversal_select_chunk_continue_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static
+int select_chunk_continue_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
Eterm continuation;
Eterm rest = NIL;
Eterm* hp;
ASSERT(iterations_left <= MAX_SELECT_CHUNK_ITERATIONS);
- BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
- if (sc_context_ptr->chunk_size) {
+ BUMP_REDS(ctx->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (ctx->chunk_size) {
Sint rest_size = 0;
- if (got > sc_context_ptr->chunk_size) {
+ if (got > ctx->chunk_size) {
/* Cannot write destructively here,
the list may have
been in user space */
- hp = HAlloc(sc_context_ptr->p, (got - sc_context_ptr->chunk_size) * 2);
- while (got-- > sc_context_ptr->chunk_size) {
- rest = CONS(hp, CAR(list_val(sc_context_ptr->match_list)), rest);
+ hp = HAlloc(ctx->p, (got - ctx->chunk_size) * 2);
+ while (got-- > ctx->chunk_size) {
+ rest = CONS(hp, CAR(list_val(ctx->match_list)), rest);
hp += 2;
- sc_context_ptr->match_list = CDR(list_val(sc_context_ptr->match_list));
+ ctx->match_list = CDR(list_val(ctx->match_list));
++rest_size;
}
}
if (rest != NIL || slot_ix >= 0) {
- hp = HAlloc(sc_context_ptr->p, 3 + 7);
+ hp = HAlloc(ctx->p, 3 + 7);
continuation = TUPLE6(
hp,
- sc_context_ptr->prev_continuation_tptr[1],
+ ctx->prev_continuation_tptr[1],
make_small(slot_ix),
- sc_context_ptr->prev_continuation_tptr[3],
- sc_context_ptr->prev_continuation_tptr[4],
+ ctx->prev_continuation_tptr[3],
+ ctx->prev_continuation_tptr[4],
rest,
make_small(rest_size));
hp += 7;
- *ret = TUPLE2(hp, sc_context_ptr->match_list, continuation);
+ *ret = TUPLE2(hp, ctx->match_list, continuation);
return DB_ERROR_NONE;
} else {
- if (sc_context_ptr->match_list != NIL) {
- hp = HAlloc(sc_context_ptr->p, 3);
- *ret = TUPLE2(hp, sc_context_ptr->match_list, am_EOT);
+ if (ctx->match_list != NIL) {
+ hp = HAlloc(ctx->p, 3);
+ *ret = TUPLE2(hp, ctx->match_list, am_EOT);
return DB_ERROR_NONE;
} else {
*ret = am_EOT;
@@ -1784,15 +1781,17 @@ static int mtraversal_select_chunk_continue_on_loop_ended(void* context_ptr, Sin
}
}
}
- *ret = sc_context_ptr->match_list;
+ *ret = ctx->match_list;
return DB_ERROR_NONE;
}
/*
* This is called when select traps
*/
-static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
- mtraversal_select_chunk_context_t sc_context = {0};
+static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation,
+ Eterm* ret)
+{
+ select_chunk_context_t ctx;
Eterm* tptr;
Eterm tid;
Binary* mp;
@@ -1827,21 +1826,21 @@ static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation,
match_list = tptr[5];
/* Proceed */
- sc_context.p = p;
- sc_context.tb = &tbl->hash;
- sc_context.tid = tid;
- sc_context.hp = NULL;
- sc_context.chunk_size = chunk_size;
- sc_context.match_list = match_list;
- sc_context.prev_continuation_tptr = tptr;
+ ctx.base.on_match_res = select_chunk_on_match_res;
+ ctx.base.on_loop_ended = select_chunk_continue_on_loop_ended;
+ ctx.base.on_trap = select_chunk_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.hp = NULL;
+ ctx.chunk_size = chunk_size;
+ ctx.match_list = match_list;
+ ctx.prev_continuation_tptr = tptr;
return match_traverse_continue(
- sc_context.p, sc_context.tb, sc_context.chunk_size,
- iterations_left, &sc_context.hp, slot_ix, got, &mp, 0,
- mtraversal_select_chunk_on_match_res, /* Reuse callback */
- mtraversal_select_chunk_continue_on_loop_ended,
- mtraversal_select_chunk_on_trap, /* Reuse callback */
- &sc_context, ret);
+ ctx.p, ctx.tb, ctx.chunk_size,
+ iterations_left, &ctx.hp, slot_ix, got, &mp, 0,
+ &ctx.base, ret);
badparam:
*ret = NIL;
@@ -1860,75 +1859,83 @@ badparam:
#define MAX_SELECT_COUNT_ITERATIONS 1000
typedef struct {
+ match_callbacks_t base;
Process* p;
DbTableHash* tb;
Eterm tid;
- Eterm* hp;
Eterm* prev_continuation_tptr;
-} mtraversal_select_count_context_t;
+} select_count_context_t;
-static int mtraversal_select_count_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+static int select_count_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
+{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int mtraversal_select_count_on_match_res(void* context_ptr, Sint slot_ix,
- HashDbTerm*** current_ptr_ptr,
- Eterm match_res)
+static int select_count_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
return (match_res == am_true);
}
-static int mtraversal_select_count_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static int select_count_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr;
+ select_count_context_t* ctx = (select_count_context_t*) ctx_base;
ASSERT(iterations_left <= MAX_SELECT_COUNT_ITERATIONS);
- BUMP_REDS(scnt_context_ptr->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left);
- *ret = erts_make_integer(got, scnt_context_ptr->p);
+ BUMP_REDS(ctx->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left);
+ *ret = erts_make_integer(got, ctx->p);
return DB_ERROR_NONE;
}
-static int mtraversal_select_count_on_trap(void* context_ptr, Sint slot_ix, Sint got,
- Binary** mpp, Eterm* ret)
+static int select_count_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr;
- return on_mtraversal_simple_trap(
+ select_count_context_t* ctx = (select_count_context_t*) ctx_base;
+ return on_simple_trap(
&ets_select_count_continue_exp,
- scnt_context_ptr->p,
- scnt_context_ptr->tb,
- scnt_context_ptr->tid,
- scnt_context_ptr->prev_continuation_tptr,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
slot_ix, got, mpp, ret);
}
-static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) {
- mtraversal_select_count_context_t scnt_context = {0};
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ select_count_context_t ctx;
Sint iterations_left = MAX_SELECT_COUNT_ITERATIONS;
Sint chunk_size = 0;
- scnt_context.p = p;
- scnt_context.tb = &tbl->hash;
- scnt_context.tid = tid;
- scnt_context.hp = NULL;
- scnt_context.prev_continuation_tptr = NULL;
+ ctx.base.on_nothing_can_match = select_count_on_nothing_can_match;
+ ctx.base.on_match_res = select_count_on_match_res;
+ ctx.base.on_loop_ended = select_count_on_loop_ended;
+ ctx.base.on_trap = select_count_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
return match_traverse(
- scnt_context.p, scnt_context.tb,
+ ctx.p, ctx.tb,
pattern, NULL,
chunk_size, iterations_left, NULL, 0,
- mtraversal_select_count_on_nothing_can_match,
- mtraversal_select_count_on_match_res,
- mtraversal_select_count_on_loop_ended,
- mtraversal_select_count_on_trap,
- &scnt_context, ret);
+ &ctx.base, ret);
}
/*
* This is called when select_count traps
*/
-static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
- mtraversal_select_count_context_t scnt_context = {0};
+static int db_select_count_continue_hash(Process* p, DbTable* tbl,
+ Eterm continuation, Eterm* ret)
+{
+ select_count_context_t ctx;
Eterm* tptr;
Eterm tid;
Binary* mp;
@@ -1937,25 +1944,24 @@ static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continu
Sint chunk_size = 0;
*ret = NIL;
- if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
- scnt_context.p = p;
- scnt_context.tb = &tbl->hash;
- scnt_context.tid = tid;
- scnt_context.hp = NULL;
- scnt_context.prev_continuation_tptr = tptr;
+ ctx.base.on_match_res = select_count_on_match_res;
+ ctx.base.on_loop_ended = select_count_on_loop_ended;
+ ctx.base.on_trap = select_count_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
return match_traverse_continue(
- scnt_context.p, scnt_context.tb, chunk_size,
+ ctx.p, ctx.tb, chunk_size,
MAX_SELECT_COUNT_ITERATIONS,
NULL, slot_ix, got, &mp, 0,
- mtraversal_select_count_on_match_res, /* Reuse callback */
- mtraversal_select_count_on_loop_ended, /* Reuse callback */
- mtraversal_select_count_on_trap, /* Reuse callback */
- &scnt_context, ret);
+ &ctx.base, ret);
}
#undef MAX_SELECT_COUNT_ITERATIONS
@@ -1970,108 +1976,119 @@ static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continu
#define MAX_SELECT_DELETE_ITERATIONS 1000
typedef struct {
+ match_callbacks_t base;
Process* p;
DbTableHash* tb;
Eterm tid;
- Eterm* hp;
Eterm* prev_continuation_tptr;
erts_aint_t fixated_by_me;
Uint last_pseudo_delete;
-} mtraversal_select_delete_context_t;
+ HashDbTerm* free_us;
+} select_delete_context_t;
-static int mtraversal_select_delete_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+static int select_delete_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
+{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int mtraversal_select_delete_on_match_res(void* context_ptr, Sint slot_ix,
- HashDbTerm*** current_ptr_ptr,
- Eterm match_res)
+static int select_delete_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
HashDbTerm** current_ptr = *current_ptr_ptr;
- mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
HashDbTerm* del;
if (match_res != am_true)
return 0;
- if (NFIXED(sd_context_ptr->tb) > sd_context_ptr->fixated_by_me) { /* fixated by others? */
- if (slot_ix != sd_context_ptr->last_pseudo_delete) {
- if (!add_fixed_deletion(sd_context_ptr->tb, slot_ix, sd_context_ptr->fixated_by_me))
+ if (NFIXED(ctx->tb) > ctx->fixated_by_me) { /* fixated by others? */
+ if (slot_ix != ctx->last_pseudo_delete) {
+ if (!add_fixed_deletion(ctx->tb, slot_ix, ctx->fixated_by_me))
goto do_erase;
- sd_context_ptr->last_pseudo_delete = slot_ix;
+ ctx->last_pseudo_delete = slot_ix;
}
- (*current_ptr)->hvalue = INVALID_HASH;
+ (*current_ptr)->pseudo_deleted = 1;
}
else {
do_erase:
del = *current_ptr;
*current_ptr = (*current_ptr)->next; // replace pointer to term using next
- free_term(sd_context_ptr->tb, del);
+ del->next = ctx->free_us;
+ ctx->free_us = del;
}
- erts_smp_atomic_dec_nob(&sd_context_ptr->tb->common.nitems);
+ erts_atomic_dec_nob(&ctx->tb->common.nitems);
return 1;
}
-static int mtraversal_select_delete_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static int select_delete_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
+ free_term_list(ctx->tb, ctx->free_us);
+ ctx->free_us = NULL;
ASSERT(iterations_left <= MAX_SELECT_DELETE_ITERATIONS);
- BUMP_REDS(sd_context_ptr->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
+ BUMP_REDS(ctx->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
if (got) {
- try_shrink(sd_context_ptr->tb);
+ try_shrink(ctx->tb);
}
- *ret = erts_make_integer(got, sd_context_ptr->p);
+ *ret = erts_make_integer(got, ctx->p);
return DB_ERROR_NONE;
}
-static int mtraversal_select_delete_on_trap(void* context_ptr, Sint slot_ix, Sint got,
- Binary** mpp, Eterm* ret)
+static int select_delete_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
- return on_mtraversal_simple_trap(
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
+ free_term_list(ctx->tb, ctx->free_us);
+ ctx->free_us = NULL;
+ return on_simple_trap(
&ets_select_delete_continue_exp,
- sd_context_ptr->p,
- sd_context_ptr->tb,
- sd_context_ptr->tid,
- sd_context_ptr->prev_continuation_tptr,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
slot_ix, got, mpp, ret);
}
-static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) {
- mtraversal_select_delete_context_t sd_context = {0};
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ select_delete_context_t ctx;
Sint chunk_size = 0;
- sd_context.p = p;
- sd_context.tb = &tbl->hash;
- sd_context.tid = tid;
- sd_context.hp = NULL;
- sd_context.prev_continuation_tptr = NULL;
-#ifdef ERTS_SMP
- sd_context.fixated_by_me = sd_context.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
-#else
- sd_context.fixated_by_me = 0;
-#endif
- sd_context.last_pseudo_delete = (Uint) -1;
+ ctx.base.on_nothing_can_match = select_delete_on_nothing_can_match;
+ ctx.base.on_match_res = select_delete_on_match_res;
+ ctx.base.on_loop_ended = select_delete_on_loop_ended;
+ ctx.base.on_trap = select_delete_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
+ ctx.fixated_by_me = ctx.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
+ ctx.last_pseudo_delete = (Uint) -1;
+ ctx.free_us = NULL;
return match_traverse(
- sd_context.p, sd_context.tb,
+ ctx.p, ctx.tb,
pattern, NULL,
chunk_size,
MAX_SELECT_DELETE_ITERATIONS, NULL, 1,
- mtraversal_select_delete_on_nothing_can_match,
- mtraversal_select_delete_on_match_res,
- mtraversal_select_delete_on_loop_ended,
- mtraversal_select_delete_on_trap,
- &sd_context, ret);
+ &ctx.base, ret);
}
/*
* This is called when select_delete traps
*/
-static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
- mtraversal_select_delete_context_t sd_context = {0};
+static int db_select_delete_continue_hash(Process* p, DbTable* tbl,
+ Eterm continuation, Eterm* ret)
+{
+ select_delete_context_t ctx;
Eterm* tptr;
Eterm tid;
Binary* mp;
@@ -2079,27 +2096,27 @@ static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm contin
Sint slot_ix;
Sint chunk_size = 0;
- if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
- sd_context.p = p;
- sd_context.tb = &tbl->hash;
- sd_context.tid = tid;
- sd_context.hp = NULL;
- sd_context.prev_continuation_tptr = tptr;
- sd_context.fixated_by_me = ONLY_WRITER(p, sd_context.tb) ? 0 : 1; /* TODO: something nicer */
- sd_context.last_pseudo_delete = (Uint) -1;
+ ctx.base.on_match_res = select_delete_on_match_res;
+ ctx.base.on_loop_ended = select_delete_on_loop_ended;
+ ctx.base.on_trap = select_delete_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
+ ctx.fixated_by_me = ONLY_WRITER(p, ctx.tb) ? 0 : 1; /* TODO: something nicer */
+ ctx.last_pseudo_delete = (Uint) -1;
+ ctx.free_us = NULL;
return match_traverse_continue(
- sd_context.p, sd_context.tb, chunk_size,
+ ctx.p, ctx.tb, chunk_size,
MAX_SELECT_DELETE_ITERATIONS,
NULL, slot_ix, got, &mp, 1,
- mtraversal_select_delete_on_match_res, /* Reuse callback */
- mtraversal_select_delete_on_loop_ended, /* Reuse callback */
- mtraversal_select_delete_on_trap, /* Reuse callback */
- &sd_context, ret);
+ &ctx.base, ret);
}
#undef MAX_SELECT_DELETE_ITERATIONS
@@ -2114,24 +2131,26 @@ static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm contin
#define MAX_SELECT_REPLACE_ITERATIONS 1000
typedef struct {
+ match_callbacks_t base;
Process* p;
DbTableHash* tb;
Eterm tid;
- Eterm* hp;
Eterm* prev_continuation_tptr;
-} mtraversal_select_replace_context_t;
+} select_replace_context_t;
-static int mtraversal_select_replace_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+static int select_replace_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
+{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_ix,
- HashDbTerm*** current_ptr_ptr,
- Eterm match_res)
+static int select_replace_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
- mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
- DbTableHash* tb = sr_context_ptr->tb;
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
+ DbTableHash* tb = ctx->tb;
HashDbTerm* new;
HashDbTerm* next;
HashValue hval;
@@ -2147,6 +2166,7 @@ static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_i
new = new_dbterm(tb, match_res);
new->next = next;
new->hvalue = hval;
+ new->pseudo_deleted = 0;
free_term(tb, **current_ptr_ptr);
**current_ptr_ptr = new; /* replace 'next' pointer in previous object */
*current_ptr_ptr = &((**current_ptr_ptr)->next); /* advance to next object */
@@ -2155,35 +2175,37 @@ static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_i
return 0;
}
-static int mtraversal_select_replace_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static int select_replace_on_loop_ended(match_callbacks_t* ctx_base, Sint slot_ix,
+ Sint got, Sint iterations_left,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
ASSERT(iterations_left <= MAX_SELECT_REPLACE_ITERATIONS);
/* the more objects we've replaced, the more reductions we've consumed */
- BUMP_REDS(sr_context_ptr->p,
+ BUMP_REDS(ctx->p,
MIN(MAX_SELECT_REPLACE_ITERATIONS * 2,
(MAX_SELECT_REPLACE_ITERATIONS - iterations_left) + (int)got));
- *ret = erts_make_integer(got, sr_context_ptr->p);
+ *ret = erts_make_integer(got, ctx->p);
return DB_ERROR_NONE;
}
-static int mtraversal_select_replace_on_trap(void* context_ptr, Sint slot_ix, Sint got,
- Binary** mpp, Eterm* ret)
+static int select_replace_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
- return on_mtraversal_simple_trap(
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
+ return on_simple_trap(
&ets_select_replace_continue_exp,
- sr_context_ptr->p,
- sr_context_ptr->tb,
- sr_context_ptr->tid,
- sr_context_ptr->prev_continuation_tptr,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
slot_ix, got, mpp, ret);
}
static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret)
{
- mtraversal_select_replace_context_t sr_context = {0};
+ select_replace_context_t ctx;
Sint chunk_size = 0;
/* Bag implementation presented both semantic consistency and performance issues,
@@ -2191,22 +2213,21 @@ static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pat
*/
ASSERT(!(tbl->hash.common.status & DB_BAG));
- sr_context.p = p;
- sr_context.tb = &tbl->hash;
- sr_context.tid = tid;
- sr_context.hp = NULL;
- sr_context.prev_continuation_tptr = NULL;
+ ctx.base.on_nothing_can_match = select_replace_on_nothing_can_match;
+ ctx.base.on_match_res = select_replace_on_match_res;
+ ctx.base.on_loop_ended = select_replace_on_loop_ended;
+ ctx.base.on_trap = select_replace_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
return match_traverse(
- sr_context.p, sr_context.tb,
+ ctx.p, ctx.tb,
pattern, db_match_keeps_key,
chunk_size,
MAX_SELECT_REPLACE_ITERATIONS, NULL, 1,
- mtraversal_select_replace_on_nothing_can_match,
- mtraversal_select_replace_on_match_res,
- mtraversal_select_replace_on_loop_ended,
- mtraversal_select_replace_on_trap,
- &sr_context, ret);
+ &ctx.base, ret);
}
/*
@@ -2214,7 +2235,7 @@ static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pat
*/
static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret)
{
- mtraversal_select_replace_context_t sr_context = {0};
+ select_replace_context_t ctx;
Eterm* tptr;
Eterm tid ;
Binary* mp;
@@ -2223,26 +2244,25 @@ static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm conti
Sint chunk_size = 0;
*ret = NIL;
- if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
/* Proceed */
- sr_context.p = p;
- sr_context.tb = &tbl->hash;
- sr_context.tid = tid;
- sr_context.hp = NULL;
- sr_context.prev_continuation_tptr = tptr;
+ ctx.base.on_match_res = select_replace_on_match_res;
+ ctx.base.on_loop_ended = select_replace_on_loop_ended;
+ ctx.base.on_trap = select_replace_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
return match_traverse_continue(
- sr_context.p, sr_context.tb, chunk_size,
+ ctx.p, ctx.tb, chunk_size,
MAX_SELECT_REPLACE_ITERATIONS,
NULL, slot_ix, got, &mp, 1,
- mtraversal_select_replace_on_match_res, /* Reuse callback */
- mtraversal_select_replace_on_loop_ended, /* Reuse callback */
- mtraversal_select_replace_on_trap, /* Reuse callback */
- &sr_context, ret);
+ &ctx.base, ret);
}
@@ -2250,8 +2270,9 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashDbTerm **bp, *b;
+ HashDbTerm *free_us = NULL;
HashValue hval = MAKE_HASH(key);
- erts_smp_rwmtx_t *lck = WLOCK_HASH(tb, hval);
+ erts_rwmtx_t *lck = WLOCK_HASH(tb, hval);
int ix = hash_to_ix(tb, hval);
int nitems_diff = 0;
@@ -2267,12 +2288,13 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
&& add_fixed_deletion(tb, ix, 0)) {
/* Pseudo remove (no need to keep several of same key) */
bp = &b->next;
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
b = b->next;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
}
}
break;
@@ -2280,9 +2302,10 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
}
WUNLOCK_HASH(lck);
if (nitems_diff) {
- erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff);
+ erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
return DB_ERROR_NONE;
}
@@ -2296,25 +2319,51 @@ void db_initialize_hash(void)
}
-int db_mark_all_deleted_hash(DbTable *tbl)
+static SWord db_mark_all_deleted_hash(DbTable *tbl, SWord reds)
{
+ const int LOOPS_PER_REDUCTION = 8;
DbTableHash *tb = &tbl->hash;
- HashDbTerm* list;
+ FixedDeletion* fixdel;
+ SWord loops = reds * LOOPS_PER_REDUCTION;
int i;
- ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb));
- for (i = 0; i < NACTIVE(tb); i++) {
- if ((list = BUCKET(tb,i)) != NULL) {
- add_fixed_deletion(tb, i, 0);
- do {
- list->hvalue = INVALID_HASH;
- list = list->next;
- }while(list != NULL);
- }
+ fixdel = (FixedDeletion*) erts_atomic_read_nob(&tb->fixdel);
+ if (fixdel && fixdel->trap) {
+ /* Continue after trap */
+ ASSERT(fixdel->all);
+ ASSERT(fixdel->slot < NACTIVE(tb));
+ i = fixdel->slot;
}
- erts_smp_atomic_set_nob(&tb->common.nitems, 0);
- return DB_ERROR_NONE;
+ else {
+ /* First call */
+ int ok;
+ fixdel = alloc_fixdel(tb);
+ ok = link_fixdel(tb, fixdel, 0);
+ ASSERT(ok); (void)ok;
+ i = 0;
+ }
+
+ do {
+ HashDbTerm* b;
+ for (b = BUCKET(tb,i); b; b = b->next)
+ b->pseudo_deleted = 1;
+ } while (++i < NACTIVE(tb) && --loops > 0);
+
+ if (i < NACTIVE(tb)) {
+ /* Yield */
+ fixdel->slot = i;
+ fixdel->all = 0;
+ fixdel->trap = 1;
+ return -1;
+ }
+
+ fixdel->slot = NACTIVE(tb) - 1;
+ fixdel->all = 1;
+ fixdel->trap = 0;
+ erts_atomic_set_nob(&tb->common.nitems, 0);
+ return loops < 0 ? 0 : loops / LOOPS_PER_REDUCTION;
}
@@ -2327,7 +2376,6 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb));
-#ifdef ERTS_SMP
i = tbl->common.is_thread_safe;
/* If crash dumping we set table to thread safe in order to
avoid taking any locks */
@@ -2337,9 +2385,6 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
db_calc_stats_hash(&tbl->hash, &stats);
tbl->common.is_thread_safe = i;
-#else
- db_calc_stats_hash(&tbl->hash, &stats);
-#endif
erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len);
erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len);
@@ -2361,7 +2406,7 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
continue;
erts_print(to, to_arg, "%d: [", i);
while(list != 0) {
- if (list->hvalue == INVALID_HASH)
+ if (is_pseudo_deleted(list))
erts_print(to, to_arg, "*");
if (tb->common.compress) {
Eterm key = GETKEY(tb, list->dbterm.tpl);
@@ -2380,9 +2425,9 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
}
}
-/* release all memory occupied by a single table */
-static int db_free_table_hash(DbTable *tbl)
+static int db_free_empty_table_hash(DbTable *tbl)
{
+ ASSERT(NITEMS(tbl) == 0);
while (db_free_table_continue_hash(tbl, ERTS_SWORD_MAX) < 0)
;
return 0;
@@ -2391,24 +2436,20 @@ static int db_free_table_hash(DbTable *tbl)
static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
{
DbTableHash *tb = &tbl->hash;
- FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel);
- ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE));
+ FixedDeletion* fixdel = (FixedDeletion*) erts_atomic_read_acqb(&tb->fixdel);
+ ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE));
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
fixdel = fx->next;
- erts_db_free(ERTS_ALC_T_DB_FIX_DEL,
- (DbTable *) tb,
- (void *) fx,
- sizeof(FixedDeletion));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+ free_fixdel(tb, fx);
if (--reds < 0) {
- erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
+ erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
return reds; /* Not done */
}
}
- erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
+ erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
while(tb->nslots != 0) {
reds -= EXT_SEGSZ/64 + free_seg(tb, 1);
@@ -2420,7 +2461,6 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
return reds; /* Not done */
}
}
-#ifdef ERTS_SMP
if (tb->locks != NULL) {
int i;
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
@@ -2430,8 +2470,7 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
(void*)tb->locks, sizeof(DbTableHashFineLocks));
tb->locks = NULL;
}
-#endif
- ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
+ ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
return reds; /* Done */
}
@@ -2462,7 +2501,6 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
mpi->num_lists = 0;
mpi->key_given = 1;
mpi->something_can_match = 0;
- mpi->all_objects = 1;
mpi->mp = NULL;
for (lst = pattern; is_list(lst); lst = CDR(list_val(lst)))
@@ -2515,7 +2553,6 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
- mpi->all_objects = 0;
}
++i;
if (!(mpi->key_given)) {
@@ -2530,7 +2567,7 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
if (!db_has_variable(key)) { /* Bound key */
int ix, search_slot;
HashDbTerm** bp;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
hval = MAKE_HASH(key);
lck = RLOCK_HASH(tb,hval);
ix = hash_to_ix(tb, hval);
@@ -2634,14 +2671,12 @@ static void alloc_seg(DbTableHash *tb)
tb->nslots += EXT_SEGSZ;
}
-#ifdef ERTS_SMP
static void dealloc_ext_segtab(void* lop_data)
{
struct ext_segtab* est = (struct ext_segtab*) lop_data;
erts_free(ERTS_ALC_T_DB_SEG, est);
}
-#endif
/* Shrink table by freeing the top segment
** free_records: 1=free any records in segment, 0=assume segment is empty
@@ -2680,7 +2715,6 @@ static int free_seg(DbTableHash *tb, int free_records)
SET_SEGTAB(tb, est->prev_segtab);
tb->nsegs = est->prev_nsegs;
-#ifdef ERTS_SMP
if (!tb->common.is_thread_safe) {
/*
* Table is doing a graceful shrink operation and we must avoid
@@ -2698,7 +2732,6 @@ static int free_seg(DbTableHash *tb, int free_records)
sz);
}
else
-#endif
erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
SIZEOF_EXT_SEGTAB(est->nsegs));
}
@@ -2733,7 +2766,7 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
if (!sz) {
ptr = ptr1;
while(ptr != ptr2) {
- if (ptr->hvalue != INVALID_HASH)
+ if (!is_pseudo_deleted(ptr))
sz += ptr->dbterm.size + 2;
ptr = ptr->next;
}
@@ -2744,7 +2777,7 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
ptr = ptr1;
while(ptr != ptr2) {
- if (ptr->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(ptr)) {
copy = db_copy_object_from_ets(&tb->common, &ptr->dbterm, &hp, &MSO(p));
list = CONS(hp, copy, list);
hp += 2;
@@ -2759,22 +2792,18 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
static ERTS_INLINE int
begin_resizing(DbTableHash* tb)
{
-#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
else
ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
-#endif
return 1;
}
static ERTS_INLINE void
done_resizing(DbTableHash* tb)
{
-#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
erts_atomic_set_relb(&tb->is_resizing, 0);
-#endif
}
/* Grow table with one or more new buckets.
@@ -2785,7 +2814,7 @@ static void grow(DbTableHash* tb, int nitems)
HashDbTerm** pnext;
HashDbTerm** to_pnext;
HashDbTerm* p;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int nactive;
int from_ix, to_ix;
int szm;
@@ -2807,7 +2836,7 @@ static void grow(DbTableHash* tb, int nitems)
}
ASSERT(nactive < tb->nslots);
- szm = erts_smp_atomic_read_nob(&tb->szm);
+ szm = erts_atomic_read_nob(&tb->szm);
if (nactive <= szm) {
from_ix = nactive & (szm >> 1);
} else {
@@ -2818,7 +2847,7 @@ static void grow(DbTableHash* tb, int nitems)
to_ix = nactive;
lck = WLOCK_HASH(tb, from_ix);
- ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix));
+ ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix));
/* Now a final double check (with the from_ix lock held)
* that we did not get raced by a table fixer.
*/
@@ -2826,12 +2855,12 @@ static void grow(DbTableHash* tb, int nitems)
WUNLOCK_HASH(lck);
goto abort;
}
- erts_smp_atomic_set_nob(&tb->nactive, ++nactive);
+ erts_atomic_set_nob(&tb->nactive, ++nactive);
if (from_ix == 0) {
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->szm, szm);
+ erts_atomic_set_relb(&tb->szm, szm);
else
- erts_smp_atomic_set_nob(&tb->szm, szm);
+ erts_atomic_set_nob(&tb->szm, szm);
}
done_resizing(tb);
@@ -2841,7 +2870,7 @@ static void grow(DbTableHash* tb, int nitems)
p = *pnext;
to_pnext = &BUCKET(tb, to_ix);
while (p != NULL) {
- if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
+ if (is_pseudo_deleted(p)) { /* rare but possible with fine locking */
*pnext = p->next;
free_term(tb, p);
p = *pnext;
@@ -2879,7 +2908,7 @@ static void shrink(DbTableHash* tb, int nitems)
HashDbTerm** src_bp;
HashDbTerm** dst_bp;
HashDbTerm** bp;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int src_ix, dst_ix, low_szm;
int nactive;
int loop_limit = 5;
@@ -2892,13 +2921,13 @@ static void shrink(DbTableHash* tb, int nitems)
goto abort; /* already done (race) */
}
src_ix = nactive - 1;
- low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1;
+ low_szm = erts_atomic_read_nob(&tb->szm) >> 1;
dst_ix = src_ix & low_szm;
ASSERT(dst_ix < src_ix);
ASSERT(nactive > FIRST_SEGSZ);
lck = WLOCK_HASH(tb, dst_ix);
- ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix));
+ ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix));
/* Double check for racing table fixers */
if (IS_FIXED(tb)) {
WUNLOCK_HASH(lck);
@@ -2914,7 +2943,7 @@ static void shrink(DbTableHash* tb, int nitems)
* as we must step through "src" anyway to purge pseudo deleted.
*/
while(*bp != NULL) {
- if ((*bp)->hvalue == INVALID_HASH) {
+ if (is_pseudo_deleted(*bp)) {
HashDbTerm* deleted = *bp;
*bp = deleted->next;
free_term(tb, deleted);
@@ -2927,9 +2956,9 @@ static void shrink(DbTableHash* tb, int nitems)
*src_bp = NULL;
nactive = src_ix;
- erts_smp_atomic_set_nob(&tb->nactive, nactive);
+ erts_atomic_set_nob(&tb->nactive, nactive);
if (dst_ix == 0) {
- erts_smp_atomic_set_relb(&tb->szm, low_szm);
+ erts_atomic_set_relb(&tb->szm, low_szm);
}
WUNLOCK_HASH(lck);
@@ -2964,15 +2993,15 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key,
/* It return the next live object in a table, NULL if no more */
/* In-bucket: RLOCKED */
/* Out-bucket: RLOCKED unless NULL */
-static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
- HashDbTerm *list)
+static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr,
+ HashDbTerm *list)
{
int i;
- ERTS_SMP_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
+ ERTS_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
- for (list = list->next; list != NULL; list = list->next) {
- if (list->hvalue != INVALID_HASH)
+ for ( ; list != NULL; list = list->next) {
+ if (!is_pseudo_deleted(list))
return list;
}
@@ -2981,7 +3010,7 @@ static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr,
list = BUCKET(tb,i);
while (list != NULL) {
- if (list->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(list)) {
*iptr = i;
return list;
}
@@ -2999,7 +3028,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
DbTableHash *tb = &tbl->hash;
HashValue hval;
HashDbTerm **bp, *b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int flags = 0;
ASSERT(tb->common.status & DB_SET);
@@ -3014,7 +3043,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
break;
}
if (has_key(tb, b, key, hval)) {
- if (b->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(b)) {
goto Ldone;
}
break;
@@ -3044,18 +3073,20 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
HashDbTerm *q = new_dbterm(tb, obj);
q->hvalue = hval;
+ q->pseudo_deleted = 0;
q->next = NULL;
*bp = b = q;
flags |= DB_INC_TRY_GROW;
} else {
HashDbTerm *q, *next = b->next;
- ASSERT(b->hvalue == INVALID_HASH);
+ ASSERT(is_pseudo_deleted(b));
q = replace_dbterm(tb, b, obj);
q->next = next;
- q->hvalue = hval;
+ ASSERT(q->hvalue == hval);
+ q->pseudo_deleted = 0;
*bp = b = q;
- erts_smp_atomic_inc_nob(&tb->common.nitems);
+ erts_atomic_inc_nob(&tb->common.nitems);
}
HRelease(p, hend, htop);
@@ -3081,24 +3112,24 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
DbTableHash *tb = &tbl->hash;
HashDbTerm **bp = (HashDbTerm **) handle->bp;
HashDbTerm *b = *bp;
- erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck;
+ erts_rwmtx_t* lck = (erts_rwmtx_t*) handle->lck;
HashDbTerm* free_me = NULL;
- ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */
+ ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */
ASSERT((&b->dbterm == handle->dbterm) == !(tb->common.compress && handle->flags & DB_MUST_RESIZE));
if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
if (IS_FIXED(tb) && add_fixed_deletion(tb, hash_to_ix(tb, b->hvalue),
0)) {
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
} else {
*bp = b->next;
free_me = b;
}
WUNLOCK_HASH(lck);
- erts_smp_atomic_dec_nob(&tb->common.nitems);
+ erts_atomic_dec_nob(&tb->common.nitems);
try_shrink(tb);
} else {
if (handle->flags & DB_MUST_RESIZE) {
@@ -3107,7 +3138,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
}
if (handle->flags & DB_INC_TRY_GROW) {
int nactive;
- int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems);
+ int nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
WUNLOCK_HASH(lck);
nactive = NACTIVE(tb);
@@ -3128,16 +3159,19 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
return;
}
-static int db_delete_all_objects_hash(Process* p, DbTable* tbl)
+static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds)
{
if (IS_FIXED(tbl)) {
- db_mark_all_deleted_hash(tbl);
+ reds = db_mark_all_deleted_hash(tbl, reds);
} else {
- db_free_table_hash(tbl);
+ reds = db_free_table_continue_hash(tbl, reds);
+ if (reds < 0)
+ return reds;
+
db_create_hash(p, tbl);
- erts_smp_atomic_set_nob(&tbl->hash.common.nitems, 0);
+ erts_atomic_set_nob(&tbl->hash.common.nitems, 0);
}
- return 0;
+ return reds;
}
void db_foreach_offheap_hash(DbTable *tbl,
@@ -3165,7 +3199,7 @@ void db_foreach_offheap_hash(DbTable *tbl,
void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
{
HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
+ erts_rwmtx_t* lck;
int sum = 0;
int sq_sum = 0;
int kept_items = 0;
@@ -3180,7 +3214,7 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
len = 0;
for (b = BUCKET(tb,ix); b!=NULL; b=b->next) {
len++;
- if (b->hvalue == INVALID_HASH)
+ if (is_pseudo_deleted(b))
++kept_items;
}
sum += len;