aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
authorSverker Eriksson <[email protected]>2017-03-22 15:41:31 +0100
committerSverker Eriksson <[email protected]>2017-03-22 15:41:31 +0100
commitb4ac8b2b32f094217d0533ee139273923c3a8af7 (patch)
treed03faaf73e8caa32c112cfb7224cdb1efdd23ec4 /erts
parentf3e86bb18b9b60504e756e3018f8777fdd0d8bc5 (diff)
parent772f04770bdec7341f63bf4fddb7b893b4ffdd27 (diff)
downloadotp-b4ac8b2b32f094217d0533ee139273923c3a8af7.tar.gz
otp-b4ac8b2b32f094217d0533ee139273923c3a8af7.tar.bz2
otp-b4ac8b2b32f094217d0533ee139273923c3a8af7.zip
Merge branch 'sverker/ets-table-identifiers/OTP-14094'
* sverker/ets-table-identifiers: observer: Polish crashdump viewer for ETS observer: Polish Table Viewer tab stdlib: Remove ets_SUITE:memory_check_summary erts: Improve reduction count during table cleanup erts: Cleanup table status bits erts: Remove now redundant 'id' from DbTableCommon erts: Remove meta_main_tab erts: Pass tid argument down to trapping functions erts: Print table id as ref in crashdump and break menu erts: Replace meta_pid_to{_fixed}_tab with linked lists erts: Correct erl_rbtree comments about yielding erts: Add ERTS_RBT_YIELD_STAT_INIT to erl_rbtree Fix node_container_SUITE list_to_ref/1 Implement ets:all() using scheduler specific data Rename fixation count in ets table to avoid confusion Introduce references as table identifiers
Diffstat (limited to 'erts')
-rw-r--r--erts/emulator/beam/bif.c126
-rw-r--r--erts/emulator/beam/bif.tab3
-rw-r--r--erts/emulator/beam/break.c2
-rw-r--r--erts/emulator/beam/erl_alloc.types1
-rw-r--r--erts/emulator/beam/erl_bif_info.c8
-rw-r--r--erts/emulator/beam/erl_db.c1953
-rw-r--r--erts/emulator/beam/erl_db.h46
-rw-r--r--erts/emulator/beam/erl_db_hash.c242
-rw-r--r--erts/emulator/beam/erl_db_hash.h12
-rw-r--r--erts/emulator/beam/erl_db_tree.c63
-rw-r--r--erts/emulator/beam/erl_db_util.h93
-rw-r--r--erts/emulator/beam/erl_init.c1
-rw-r--r--erts/emulator/beam/erl_lock_check.c6
-rw-r--r--erts/emulator/beam/erl_node_tables.c41
-rw-r--r--erts/emulator/beam/erl_process.c88
-rw-r--r--erts/emulator/beam/erl_process.h32
-rw-r--r--erts/emulator/beam/erl_rbtree.h66
-rw-r--r--erts/preloaded/ebin/erlang.beambin106104 -> 106220 bytes
-rw-r--r--erts/preloaded/src/erlang.erl8
19 files changed, 1555 insertions, 1236 deletions
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 614bedab12..2d37f977c0 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -4254,6 +4254,132 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE list_to_ref_1(BIF_ALIST_1)
+{
+ /*
+ * A valid reference is on the format
+ * "#Ref<N.X.Y.Z>" where N, X, Y, and Z are
+ * 32-bit integers (i.e., max 10 characters).
+ */
+ Eterm *hp;
+ Eterm res;
+ Uint32 refn[ERTS_MAX_REF_NUMBERS];
+ int n = 0;
+ Uint ints[1 + ERTS_MAX_REF_NUMBERS] = {0};
+ char* cp;
+ Sint i;
+ DistEntry *dep = NULL;
+ char buf[5 /* #Ref< */
+ + (1 + ERTS_MAX_REF_NUMBERS)*(10 + 1) /* N.X.Y.Z> */
+ + 1 /* \0 */];
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = &buf[0];
+ if (*cp++ != '#') goto bad;
+ if (*cp++ != 'R') goto bad;
+ if (*cp++ != 'e') goto bad;
+ if (*cp++ != 'f') goto bad;
+ if (*cp++ != '<') goto bad;
+
+ for (i = 0; i < sizeof(ints)/sizeof(Uint); i++) {
+ if (*cp < '0' || *cp > '9') goto bad;
+
+ while (*cp >= '0' && *cp <= '9') {
+ ints[i] = 10*ints[i] + (*cp - '0');
+ cp++;
+ }
+
+ n++;
+ if (ints[i] > ~((Uint32) 0)) goto bad;
+ if (*cp == '>') break;
+ if (*cp++ != '.') goto bad;
+ }
+
+ if (*cp++ != '>') goto bad;
+ if (*cp != '\0') goto bad;
+
+ if (n < 2) goto bad;
+
+ for (n = 0; i > 0; i--)
+ refn[n++] = (Uint32) ints[i];
+
+ ASSERT(n <= ERTS_MAX_REF_NUMBERS);
+
+ dep = erts_channel_no_to_dist_entry(ints[0]);
+
+ if (!dep)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ ErtsMagicBinary *mb;
+ Uint32 sid;
+ if (refn[0] > MAX_REFERENCE) goto bad;
+ if (n != ERTS_REF_NUMBERS) goto bad;
+ sid = erts_get_ref_numbers_thr_id(refn);
+ if (sid > erts_no_schedulers) goto bad;
+ mb = erts_magic_ref_lookup_bin(refn);
+ if (mb) {
+ hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_mk_magic_ref(&hp, &BIF_P->off_heap,
+ (Binary *) mb);
+ }
+ else {
+ hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE);
+ write_ref_thing(hp, refn[0], refn[1], refn[2]);
+ res = make_internal_ref(hp);
+ }
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+ Uint hsz;
+ int j;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
+
+ hsz = EXTERNAL_THING_HEAD_SIZE;
+#if defined(ARCH_64)
+ hsz += n/2 + 1;
+#else
+ hsz += n;
+#endif
+
+ etp = (ExternalThing *) HAlloc(BIF_P, hsz);
+ etp->header = make_external_ref_header(n/2);
+ etp->next = BIF_P->off_heap.first;
+ etp->node = enp;
+ i = 0;
+#if defined(ARCH_64)
+ etp->data.ui32[i] = n;
+#endif
+ for (j = 0; j < n; j++) {
+ etp->data.ui32[i] = refn[j];
+ i++;
+ }
+
+ BIF_P->off_heap.first = (struct erl_off_heap_header*) etp;
+ res = make_external_ref(etp);
+ }
+
+ erts_deref_dist_entry(dep);
+ BIF_RET(res);
+
+ bad:
+ if (dep)
+ erts_deref_dist_entry(dep);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
+
/**********************************************************************/
BIF_RETTYPE group_leader_0(BIF_ALIST_0)
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index ce2cffa498..6f50297fc5 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -88,6 +88,7 @@ bif erlang:list_to_binary/1
bif erlang:list_to_float/1
bif erlang:list_to_integer/1
bif erlang:list_to_pid/1
+bif erlang:list_to_ref/1
bif erlang:list_to_tuple/1
bif erlang:loaded/0
bif erlang:localtime/0
@@ -324,7 +325,7 @@ bif erlang:match_spec_test/3
# Bifs in ets module.
#
-bif ets:all/0
+bif ets:internal_request_all/0
bif ets:new/2
bif ets:delete/1
bif ets:delete/2
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 4dad5736c5..0b40d70cb7 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -512,6 +512,8 @@ do_break(void)
erts_free_read_env(mode);
#endif /* __WIN32__ */
+ ASSERT(erts_smp_thr_progress_is_blocking());
+
erts_printf("\n"
"BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n"
" (v)ersion (k)ill (D)b-tables (d)istribution\n");
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 3d5de72ee7..32f84c8593 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -227,6 +227,7 @@ type DB_DMC_ERROR ETS ETS db_dmc_error
type DB_DMC_ERR_INFO ETS ETS db_dmc_error_info
type DB_TERM ETS ETS db_term
type DB_PROC_CLEANUP SHORT_LIVED ETS db_proc_cleanup_state
+type ETS_ALL_REQ SHORT_LIVED ETS ets_all_request
type INSTR_INFO LONG_LIVED SYSTEM instr_info
type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 06a73ffea5..025f99330a 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -3634,10 +3634,6 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(TUPLE2(hp, make_small((Uint) words),
erts_ets_hash_sizeof_ext_segtab()));
}
- else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) {
- /* Used by ets_SUITE (stdlib) */
- BIF_RET(erts_ets_get_meta_state(BIF_P));
- }
else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
/* Used by driver_SUITE (emulator) */
Uint sz, *szp;
@@ -4398,10 +4394,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
BIF_RET(am_ok);
}
- else if (ERTS_IS_ATOM_STR("DbTable_meta", BIF_ARG_1)) {
- /* Used by ets_SUITE (stdlib) */
- BIF_RET(erts_ets_restore_meta_state(BIF_P, BIF_ARG_2));
- }
else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 9f077dd407..2f188b5391 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -19,9 +19,7 @@
*/
/*
- * This file contains the bif interface functions and
- * the handling of the "meta tables" ie the tables of
- * db tables.
+ * This file contains the 'ets' bif interface functions.
*/
/*
@@ -43,6 +41,7 @@
#include "erl_db.h"
#include "bif.h"
#include "big.h"
+#include "erl_binary.h"
erts_smp_atomic_t erts_ets_misc_mem_size;
@@ -74,62 +73,226 @@ enum DbIterSafety {
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
+/*
+ * "fixed_tabs": list of all fixed tables for a process
+ */
+#ifdef DEBUG
+static int fixed_tabs_find(DbFixation* first, DbFixation* fix);
+#endif
-/*
-** The main meta table, containing all ets tables.
-*/
-#ifdef ERTS_SMP
+static void fixed_tabs_insert(Process* p, DbFixation* fix)
+{
+ DbFixation* first = erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES);
+
+ if (!first) {
+ fix->tabs.next = fix->tabs.prev = fix;
+ erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix);
+ }
+ else {
+ ASSERT(!fixed_tabs_find(first, fix));
+ fix->tabs.prev = first->tabs.prev;
+ fix->tabs.next = first;
+ fix->tabs.prev->tabs.next = fix;
+ first->tabs.prev = fix;
+ }
+}
+
+static void fixed_tabs_delete(Process *p, DbFixation* fix)
+{
+ if (fix->tabs.next == fix) {
+ DbFixation* old;
+ ASSERT(fix->tabs.prev == fix);
+ old = erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, NULL);
+ ASSERT(old == fix); (void)old;
+ }
+ else {
+ DbFixation *first = (DbFixation*) erts_psd_get(p, ERTS_PSD_ETS_FIXED_TABLES);
-#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
-#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
-#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
+ ASSERT(fixed_tabs_find(first, fix));
+ fix->tabs.prev->tabs.next = fix->tabs.next;
+ fix->tabs.next->tabs.prev = fix->tabs.prev;
-typedef union {
- erts_smp_rwmtx_t rwmtx;
- byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
- sizeof(erts_smp_rwmtx_t))];
-} erts_meta_main_tab_lock_t;
+ if (fix == first)
+ erts_psd_set(p, ERTS_PSD_ETS_FIXED_TABLES, fix->tabs.next);
+ }
+}
-static erts_meta_main_tab_lock_t *meta_main_tab_locks;
+#ifdef DEBUG
+static int fixed_tabs_find(DbFixation* first, DbFixation* fix)
+{
+ DbFixation* p;
+ if (!first) {
+ first = (DbFixation*) erts_psd_get(fix->procs.p, ERTS_PSD_ETS_FIXED_TABLES);
+ }
+ p = first;
+ do {
+ if (p == fix)
+ return 1;
+ ASSERT(p->procs.p == fix->procs.p);
+ ASSERT(p->tabs.next->tabs.prev == p);
+ p = p->tabs.next;
+ } while (p != first);
+ return 0;
+}
#endif
-static struct {
- union {
- DbTable *tb; /* Only directly readable if slot is ALIVE */
- UWord next_free; /* (index<<2)|1 if slot is FREE */
- }u;
-} *meta_main_tab;
-/* A slot in meta_main_tab can have three states:
- * FREE : Free to use for new table. Part of linked free-list.
- * ALIVE: Contains a table
- * DEAD : Contains a table that is being removed.
+
+/*
+ * fixing_procs: tree of all processes fixating a table
*/
-#define IS_SLOT_FREE(i) (meta_main_tab[(i)].u.next_free & 1)
-#define IS_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free & 2)
-#define IS_SLOT_ALIVE(i) (!(meta_main_tab[(i)].u.next_free & (1|2)))
-#define GET_NEXT_FREE_SLOT(i) (meta_main_tab[(i)].u.next_free >> 2)
-#define SET_NEXT_FREE_SLOT(i,next) (meta_main_tab[(i)].u.next_free = ((next)<<2)|1)
-#define MARK_SLOT_DEAD(i) (meta_main_tab[(i)].u.next_free |= 2)
-#define GET_ANY_SLOT_TAB(i) ((DbTable*)(meta_main_tab[(i)].u.next_free & ~(1|2))) /* dead or alive */
+#define ERTS_RBT_PREFIX fixing_procs
+#define ERTS_RBT_T DbFixation
+#define ERTS_RBT_KEY_T Process*
+#define ERTS_RBT_FLAGS_T int
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->procs.parent = NULL; \
+ (T)->procs.right = NULL; \
+ (T)->procs.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) ((T)->procs.is_red)
+#define ERTS_RBT_SET_RED(T) ((T)->procs.is_red = 1)
+#define ERTS_RBT_IS_BLACK(T) (!(T)->procs.is_red)
+#define ERTS_RBT_SET_BLACK(T) ((T)->procs.is_red = 0)
+#define ERTS_RBT_GET_FLAGS(T) ((T)->procs.is_red)
+#define ERTS_RBT_SET_FLAGS(T, F) ((T)->procs.is_red = (F))
+#define ERTS_RBT_GET_PARENT(T) ((T)->procs.parent)
+#define ERTS_RBT_SET_PARENT(T, P) ((T)->procs.parent = (P))
+#define ERTS_RBT_GET_RIGHT(T) ((T)->procs.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->procs.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->procs.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->procs.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->procs.p)
+#define ERTS_RBT_IS_LT(KX, KY) ((KX) < (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) ((KX) == (KY))
+
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_FOREACH
+#define ERTS_RBT_WANT_FOREACH_DESTROY
+#ifdef DEBUG
+# define ERTS_RBT_WANT_LOOKUP
+#endif
+#define ERTS_RBT_UNDEF
-static ERTS_INLINE erts_smp_rwmtx_t *
-get_meta_main_tab_lock(unsigned slot)
+#include "erl_rbtree.h"
+
+#ifdef HARDDEBUG
+# error Do something useful with CHECK_TABLES maybe
+#else
+# define CHECK_TABLES()
+#endif
+
+
+static void
+send_ets_transfer_message(Process *c_p, Process *proc,
+ ErtsProcLocks *locks,
+ DbTable *tb, Eterm heir_data);
+static void schedule_free_dbtable(DbTable* tb);
+static void delete_sched_table(Process *c_p, DbTable *tb);
+
+static void table_dec_refc(DbTable *tb, erts_aint_t min_val)
+{
+ if (erts_smp_refc_dectest(&tb->common.refc, min_val) == 0)
+ schedule_free_dbtable(tb);
+}
+
+static int
+db_table_tid_destructor(Binary *unused)
+{
+ return 1;
+}
+
+static ERTS_INLINE void
+make_btid(DbTable *tb)
+{
+ Binary *btid = erts_create_magic_indirection(db_table_tid_destructor);
+ erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
+ erts_smp_atomic_init_nob(tbref, (erts_aint_t) tb);
+ tb->common.btid = btid;
+ /*
+ * Table and magic indirection refer eachother,
+ * and table is refered once by being alive...
+ */
+ erts_smp_refc_init(&tb->common.refc, 2);
+ erts_refc_inc(&btid->refc, 1);
+}
+
+static ERTS_INLINE DbTable* btid2tab(Binary* btid)
+{
+ erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
+ return (DbTable *) erts_smp_atomic_read_nob(tbref);
+}
+
+static DbTable *
+tid2tab(Eterm tid)
+{
+ DbTable *tb;
+ Binary *btid;
+ erts_smp_atomic_t *tbref;
+ if (!is_internal_magic_ref(tid))
+ return NULL;
+
+ btid = erts_magic_ref2bin(tid);
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor)
+ return NULL;
+
+ tbref = erts_smp_binary_to_magic_indirection(btid);
+ tb = (DbTable *) erts_smp_atomic_read_nob(tbref);
+
+ ASSERT(!tb || tb->common.btid == btid);
+
+ return tb;
+}
+
+static ERTS_INLINE int
+is_table_alive(DbTable *tb)
+{
+ erts_smp_atomic_t *tbref;
+ DbTable *rtb;
+
+ tbref = erts_smp_binary_to_magic_indirection(tb->common.btid);
+ rtb = (DbTable *) erts_smp_atomic_read_nob(tbref);
+
+ ASSERT(!rtb || rtb == tb);
+
+ return !!rtb;
+}
+
+static ERTS_INLINE int
+is_table_named(DbTable *tb)
{
#ifdef ERTS_SMP
- return &meta_main_tab_locks[slot & ERTS_META_MAIN_TAB_LOCK_TAB_MASK].rwmtx;
+ return tb->common.type & DB_NAMED_TABLE;
#else
- return NULL;
+ return tb->common.status & DB_NAMED_TABLE;
#endif
}
-static erts_smp_spinlock_t meta_main_tab_main_lock;
-static Uint meta_main_tab_first_free; /* Index of first free slot */
-static int meta_main_tab_cnt; /* Number of active tables */
-static int meta_main_tab_top; /* Highest ever used slot + 1 */
-static Uint meta_main_tab_slot_mask; /* The slot index part of an unnamed table id */
-static Uint meta_main_tab_seq_incr;
-static Uint meta_main_tab_seq_cnt = 0; /* To give unique(-ish) table identifiers */
+
+static ERTS_INLINE void
+tid_clear(Process *c_p, DbTable *tb)
+{
+ DbTable *rtb;
+ Binary *btid = tb->common.btid;
+ erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid);
+ rtb = (DbTable *) erts_smp_atomic_xchg_nob(tbref, (erts_aint_t) NULL);
+ ASSERT(!rtb || tb == rtb);
+ if (rtb) {
+ table_dec_refc(tb, 1);
+ delete_sched_table(c_p, tb);
+ }
+}
+
+static ERTS_INLINE Eterm
+make_tid(Process *c_p, DbTable *tb)
+{
+ Eterm *hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ return erts_mk_magic_ref(&hp, &c_p->off_heap, tb->common.btid);
+}
+
/*
** The meta hash table of all NAMED ets tables
@@ -181,8 +344,6 @@ int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
int erts_ets_always_compress;
static int db_max_tabs;
-static DbTable *meta_pid_to_tab; /* Pid mapped to owned tables */
-static DbTable *meta_pid_to_fixed_tab; /* Pid mapped to fixed tables */
static Eterm ms_delete_all;
static Eterm ms_delete_all_buff[8]; /* To compare with for deletion
of all objects */
@@ -195,12 +356,9 @@ static void fix_table_locked(Process* p, DbTable* tb);
static void unfix_table_locked(Process* p, DbTable* tb, db_lock_kind_t* kind);
static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data);
static void free_heir_data(DbTable*);
-static void free_fixations_locked(DbTable *tb);
+static SWord free_fixations_locked(Process* p, DbTable *tb);
-static int free_table_cont(Process *p,
- DbTable *tb,
- int first,
- int clean_meta_tab);
+static SWord free_table_continue(Process *p, DbTable *tb, SWord reds);
static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb);
static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1);
@@ -235,23 +393,16 @@ free_dbtable(void *vtb)
erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable),
tb->common.fixations);
}
- erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n",
- tb->common.id);
-
- erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
- print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab);
-
-
- erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
- print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab);
#endif
#ifdef ERTS_SMP
erts_smp_rwmtx_destroy(&tb->common.rwlock);
erts_smp_mtx_destroy(&tb->common.fixlock);
#endif
ASSERT(is_immed(tb->common.heir_data));
+
+ if (tb->common.btid && erts_refc_dectest(&tb->common.btid->refc, 0) == 0)
+ erts_bin_free(tb->common.btid);
+
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
}
@@ -266,13 +417,163 @@ static void schedule_free_dbtable(DbTable* tb)
* Caller is *not* allowed to access the specialized part
* (hash or tree) of *tb after this function has returned.
*/
- ASSERT(erts_smp_refc_read(&tb->common.ref, 0) == 0);
+ ASSERT(erts_smp_refc_read(&tb->common.refc, 0) == 0);
+ ASSERT(erts_smp_refc_read(&tb->common.fix_count, 0) == 0);
erts_schedule_thr_prgr_later_cleanup_op(free_dbtable,
(void *) tb,
&tb->release.data,
sizeof(DbTable));
}
+static ERTS_INLINE void
+save_sched_table(Process *c_p, DbTable *tb)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ DbTable *first;
+
+ ASSERT(esdp);
+ esdp->ets_tables.count++;
+ erts_smp_refc_inc(&tb->common.refc, 1);
+
+ first = esdp->ets_tables.clist;
+ if (!first) {
+ tb->common.all.next = tb->common.all.prev = tb;
+ esdp->ets_tables.clist = tb;
+ }
+ else {
+ tb->common.all.prev = first->common.all.prev;
+ tb->common.all.next = first;
+ tb->common.all.prev->common.all.next = tb;
+ first->common.all.prev = tb;
+ }
+}
+
+static ERTS_INLINE void
+remove_sched_table(ErtsSchedulerData *esdp, DbTable *tb)
+{
+ ErtsEtsAllYieldData *eaydp;
+ ASSERT(esdp);
+ ASSERT(erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid))
+ == (Uint32) esdp->no);
+
+ ASSERT(esdp->ets_tables.count > 0);
+ esdp->ets_tables.count--;
+
+ eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ if (eaydp->ongoing) {
+ /* ets:all() op process list from last to first... */
+ if (eaydp->tab == tb) {
+ if (eaydp->tab == esdp->ets_tables.clist)
+ eaydp->tab = NULL;
+ else
+ eaydp->tab = tb->common.all.prev;
+ }
+ }
+
+ if (tb->common.all.next == tb) {
+ ASSERT(tb->common.all.prev == tb);
+ ASSERT(esdp->ets_tables.clist == tb);
+ esdp->ets_tables.clist = NULL;
+ }
+ else {
+#ifdef DEBUG
+ DbTable *tmp = esdp->ets_tables.clist;
+ do {
+ if (tmp == tb) break;
+ tmp = tmp->common.all.next;
+ } while (tmp != esdp->ets_tables.clist);
+ ASSERT(tmp == tb);
+#endif
+ tb->common.all.prev->common.all.next = tb->common.all.next;
+ tb->common.all.next->common.all.prev = tb->common.all.prev;
+
+ if (esdp->ets_tables.clist == tb)
+ esdp->ets_tables.clist = tb->common.all.next;
+
+ }
+
+ table_dec_refc(tb, 0);
+}
+
+static void
+scheduled_remove_sched_table(void *vtb)
+{
+ remove_sched_table(erts_get_scheduler_data(), (DbTable *) vtb);
+}
+
+static void
+delete_sched_table(Process *c_p, DbTable *tb)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ Uint32 sid;
+
+ ASSERT(esdp);
+
+ ASSERT(tb->common.btid);
+ sid = erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid));
+ ASSERT(1 <= sid && sid <= erts_no_schedulers);
+ if (sid == (Uint32) esdp->no)
+ remove_sched_table(esdp, tb);
+ else
+ erts_schedule_misc_aux_work((int) sid, scheduled_remove_sched_table, tb);
+}
+
+static ERTS_INLINE void
+save_owned_table(Process *c_p, DbTable *tb)
+{
+ DbTable *first;
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
+
+ erts_smp_refc_inc(&tb->common.refc, 1);
+
+ if (!first) {
+ tb->common.owned.next = tb->common.owned.prev = tb;
+ erts_psd_set(c_p, ERTS_PSD_ETS_OWNED_TABLES, tb);
+ }
+ else {
+ tb->common.owned.prev = first->common.owned.prev;
+ tb->common.owned.next = first;
+ tb->common.owned.prev->common.owned.next = tb;
+ first->common.owned.prev = tb;
+ }
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+}
+
+static ERTS_INLINE void
+delete_owned_table(Process *p, DbTable *tb)
+{
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (tb->common.owned.next == tb) {
+ DbTable* old;
+ ASSERT(tb->common.owned.prev == tb);
+ old = erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, NULL);
+ ASSERT(old == tb); (void)old;
+ }
+ else {
+ DbTable *first = (DbTable*) erts_psd_get(p, ERTS_PSD_ETS_OWNED_TABLES);
+#ifdef DEBUG
+ DbTable *tmp = first;
+ do {
+ if (tmp == tb) break;
+ tmp = tmp->common.owned.next;
+ } while (tmp != first);
+ ASSERT(tmp == tb);
+#endif
+ tb->common.owned.prev->common.owned.next = tb->common.owned.next;
+ tb->common.owned.next->common.owned.prev = tb->common.owned.prev;
+
+ if (tb == first)
+ erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ table_dec_refc(tb, 1);
+}
+
+
static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
char *rwname, char* fixname)
{
@@ -294,7 +595,6 @@ static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
#ifdef ERTS_SMP
- ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
@@ -327,8 +627,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
* to follow the tb pointer!
*/
#ifdef ERTS_SMP
- ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
-
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
ASSERT(tb->common.is_thread_safe);
@@ -354,20 +652,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
#endif
}
-
-static ERTS_INLINE void db_meta_lock(DbTable* tb, db_lock_kind_t kind)
-{
- ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
- ASSERT(kind != LCK_WRITE);
- /* As long as we only lock for READ we don't have to lock at all. */
-}
-
-static ERTS_INLINE void db_meta_unlock(DbTable* tb, db_lock_kind_t kind)
-{
- ASSERT(tb == meta_pid_to_tab || tb == meta_pid_to_fixed_tab);
- ASSERT(kind != LCK_WRITE);
-}
-
static ERTS_INLINE
DbTable* db_get_table_aux(Process *p,
Eterm id,
@@ -375,7 +659,7 @@ DbTable* db_get_table_aux(Process *p,
db_lock_kind_t kind,
int meta_already_locked)
{
- DbTable *tb = NULL;
+ DbTable *tb;
erts_smp_rwmtx_t *mtl = NULL;
/*
@@ -385,23 +669,7 @@ DbTable* db_get_table_aux(Process *p,
*/
ASSERT(erts_get_scheduler_data());
- if (is_small(id)) {
- Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
- if (!meta_already_locked) {
- mtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rlock(mtl);
- }
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- else {
- erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
- || erts_lc_rwmtx_is_rwlocked(test_mtl));
- }
-#endif
- if (slot < db_max_tabs && IS_SLOT_ALIVE(slot))
- tb = meta_main_tab[slot].u.tb;
- }
- else if (is_atom(id)) {
+ if (is_atom(id)) {
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
if (!meta_already_locked)
erts_smp_rwmtx_rlock(mtl);
@@ -410,7 +678,7 @@ DbTable* db_get_table_aux(Process *p,
|| erts_lc_rwmtx_is_rwlocked(mtl));
mtl = NULL;
}
-
+ tb = NULL;
if (bucket->pu.tb != NULL) {
if (is_atom(bucket->u.name_atom)) { /* single */
if (bucket->u.name_atom == id)
@@ -428,11 +696,13 @@ DbTable* db_get_table_aux(Process *p,
}
}
}
+ else
+ tb = tid2tab(id);
+
if (tb) {
db_lock(tb, kind);
- if (tb->common.id != id
- || ((tb->common.status & what) == 0
- && p->common.id != tb->common.owner)) {
+ if ((tb->common.status & what) == 0
+ && p->common.id != tb->common.owner) {
db_unlock(tb, kind);
tb = NULL;
}
@@ -451,18 +721,6 @@ DbTable* db_get_table(Process *p,
return db_get_table_aux(p, id, what, kind, 0);
}
-/* Requires meta_main_tab_locks[slot] locked.
-*/
-static ERTS_INLINE void free_slot(int slot)
-{
- ASSERT(!IS_SLOT_FREE(slot));
- erts_smp_spin_lock(&meta_main_tab_main_lock);
- SET_NEXT_FREE_SLOT(slot,meta_main_tab_first_free);
- meta_main_tab_first_free = slot;
- meta_main_tab_cnt--;
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-}
-
static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
{
int ret = 0;
@@ -527,9 +785,10 @@ static int remove_named_tab(DbTable *tb, int have_lock)
{
int ret = 0;
erts_smp_rwmtx_t* rwlock;
- Eterm name_atom = tb->common.id;
+ Eterm name_atom = tb->common.the_name;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
+ ASSERT(is_table_named(tb));
#ifdef ERTS_SMP
if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
db_unlock(tb, LCK_WRITE);
@@ -600,11 +859,11 @@ done:
*/
static ERTS_INLINE void local_fix_table(DbTable* tb)
{
- erts_smp_refc_inc(&tb->common.ref, 1);
+ erts_smp_refc_inc(&tb->common.fix_count, 1);
}
static ERTS_INLINE void local_unfix_table(DbTable* tb)
{
- if (erts_smp_refc_dectest(&tb->common.ref, 0) == 0) {
+ if (erts_smp_refc_dectest(&tb->common.fix_count, 0) == 0) {
ASSERT(IS_HASH_TABLE(tb->common.status));
db_unfix_table_hash(&(tb->hash));
}
@@ -1244,6 +1503,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret;
+ Eterm old_name;
erts_smp_rwmtx_t *lck1, *lck2;
#ifdef HARDDEBUG
@@ -1260,12 +1520,10 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
(void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
- if (is_small(BIF_ARG_1)) {
- Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
- lck2 = get_meta_main_tab_lock(slot);
- }
- else if (is_atom(BIF_ARG_1)) {
- (void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
+ if (is_atom(BIF_ARG_1)) {
+ old_name = BIF_ARG_1;
+ named_tab:
+ (void) meta_name_tab_bucket(old_name, &lck2);
if (lck1 == lck2)
lck2 = NULL;
else if (lck1 > lck2) {
@@ -1275,7 +1533,16 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
}
}
else {
- BIF_ERROR(BIF_P, BADARG);
+ tb = tid2tab(BIF_ARG_1);
+ if (!tb)
+ BIF_ERROR(BIF_P, BADARG);
+ else {
+ if (is_table_named(tb)) {
+ old_name = tb->common.the_name;
+ goto named_tab;
+ }
+ lck2 = NULL;
+ }
}
erts_smp_rwmtx_rwlock(lck1);
@@ -1286,21 +1553,19 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
if (!tb)
goto badarg;
- if (is_not_atom(tb->common.id)) { /* Not a named table */
- tb->common.the_name = BIF_ARG_2;
- goto done;
- }
-
- if (!insert_named_tab(BIF_ARG_2, tb, 1))
- goto badarg;
+ if (is_table_named(tb)) {
+ if (!insert_named_tab(BIF_ARG_2, tb, 1))
+ goto badarg;
- if (!remove_named_tab(tb, 1))
- erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.id);
-
- tb->common.id = tb->common.the_name = BIF_ARG_2;
+ if (!remove_named_tab(tb, 1))
+ erts_exit(ERTS_ERROR_EXIT,"Could not find named tab %s", tb->common.the_name);
+ ret = BIF_ARG_2;
+ }
+ else { /* Not a named table */
+ ret = BIF_ARG_1;
+ }
+ tb->common.the_name = BIF_ARG_2;
- done:
- ret = tb->common.id;
db_unlock(tb, LCK_WRITE);
erts_smp_rwmtx_rwunlock(lck1);
if (lck2)
@@ -1324,7 +1589,6 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
BIF_RETTYPE ets_new_2(BIF_ALIST_2)
{
DbTable* tb = NULL;
- int slot;
Eterm list;
Eterm val;
Eterm ret;
@@ -1339,9 +1603,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
#ifdef DEBUG
int cret;
#endif
- DeclareTmpHeap(meta_tuple,3,BIF_P);
DbTableMethod* meth;
- erts_smp_rwmtx_t *mmtl;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
@@ -1350,7 +1612,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
- status = DB_NORMAL | DB_SET | DB_PROTECTED;
+ status = DB_SET | DB_PROTECTED;
keypos = 1;
is_named = 0;
#ifdef ERTS_SMP
@@ -1433,6 +1695,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
else if (val == am_named_table) {
is_named = 1;
+ status |= DB_NAMED_TABLE;
}
else if (val == am_compressed) {
is_compressed = 1;
@@ -1487,7 +1750,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
#endif
- erts_smp_refc_init(&tb->common.ref, 0);
+ erts_smp_refc_init(&tb->common.fix_count, 0);
db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
"db_tab", "db_tab_fix");
tb->common.keypos = keypos;
@@ -1496,7 +1759,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
erts_smp_atomic_init_nob(&tb->common.nitems, 0);
- tb->common.fixations = NULL;
+ tb->common.fixing_procs = NULL;
tb->common.compress = is_compressed;
#ifdef DEBUG
@@ -1505,87 +1768,36 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
meth->db_create(BIF_P, tb);
ASSERT(cret == DB_ERROR_NONE);
- erts_smp_spin_lock(&meta_main_tab_main_lock);
+ make_btid(tb);
- if (meta_main_tab_cnt >= db_max_tabs) {
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
- erts_send_error_to_logger_str(BIF_P->group_leader,
- "** Too many db tables **\n");
- free_heir_data(tb);
- tb->common.meth->db_free_table(tb);
- free_dbtable((void *) tb);
- BIF_ERROR(BIF_P, SYSTEM_LIMIT);
- }
-
- slot = meta_main_tab_first_free;
- ASSERT(slot>=0 && slot<db_max_tabs);
- meta_main_tab_first_free = GET_NEXT_FREE_SLOT(slot);
- meta_main_tab_cnt++;
- if (slot >= meta_main_tab_top) {
- ASSERT(slot == meta_main_tab_top);
- meta_main_tab_top = slot + 1;
- }
-
- if (is_named) {
- ret = BIF_ARG_1;
- }
- else {
- ret = make_small(slot | meta_main_tab_seq_cnt);
- meta_main_tab_seq_cnt += meta_main_tab_seq_incr;
- ASSERT((unsigned_val(ret) & meta_main_tab_slot_mask) == slot);
- }
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-
- tb->common.id = ret;
- tb->common.slot = slot; /* store slot for erase */
+ if (is_named)
+ ret = BIF_ARG_1;
+ else
+ ret = make_tid(BIF_P, tb);
- mmtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rwlock(mmtl);
- meta_main_tab[slot].u.tb = tb;
- ASSERT(IS_SLOT_ALIVE(slot));
- erts_smp_rwmtx_rwunlock(mmtl);
+ save_sched_table(BIF_P, tb);
if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
- mmtl = get_meta_main_tab_lock(slot);
- erts_smp_rwmtx_rwlock(mmtl);
- free_slot(slot);
- erts_smp_rwmtx_rwunlock(mmtl);
+ tid_clear(BIF_P, tb);
db_lock(tb,LCK_WRITE);
free_heir_data(tb);
tb->common.meth->db_free_table(tb);
- schedule_free_dbtable(tb);
db_unlock(tb,LCK_WRITE);
+ table_dec_refc(tb, 0);
BIF_ERROR(BIF_P, BADARG);
}
BIF_P->flags |= F_USING_DB; /* So we can remove tb if p dies */
+ save_owned_table(BIF_P, tb);
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id,
BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
- erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
- erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n",
- erts_smp_atomic_read_nob(&meta_pid_to_fixed_tab->common.memory_size));
#endif
- UseTmpHeap(3,BIF_P);
-
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- if (db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,
- BIF_P->common.id,
- make_small(slot)),
- 0) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Could not update ets metadata.");
- }
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
-
- UnUseTmpHeap(3,BIF_P);
-
BIF_RET(ret);
}
@@ -1690,9 +1902,9 @@ BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3)
*/
BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
{
- int trap;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
DbTable* tb;
- erts_smp_rwmtx_t *mmtl;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1715,7 +1927,6 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status |= DB_DELETE;
if (tb->common.owner != BIF_P->common.id) {
- DeclareTmpHeap(meta_tuple,3,BIF_P);
/*
* The table is being deleted by a process other than its owner.
@@ -1723,50 +1934,33 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
* current process will be killed (e.g. by an EXIT signal), we will
* now transfer the ownership to the current process.
*/
- UseTmpHeap(3,BIF_P);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
-
- BIF_P->flags |= F_USING_DB;
- tb->common.owner = BIF_P->common.id;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,
- BIF_P->common.id,
- make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(3,BIF_P);
- }
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- /*
- * We keep our increased refc over this op in order to
- * prevent the table from disapearing.
- */
- db_unlock(tb, LCK_WRITE);
- erts_smp_rwmtx_rwlock(mmtl);
- db_lock(tb, LCK_WRITE);
+ Process *rp = erts_proc_lookup_raw(tb->common.owner);
+ /*
+ * Process 'rp' might be exiting, but our table lock prevents it
+ * from terminating as it cannot complete erts_db_process_exiting().
+ */
+ ASSERT(!(ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)));
+
+ delete_owned_table(rp, tb);
+ BIF_P->flags |= F_USING_DB;
+ tb->common.owner = BIF_P->common.id;
+ save_owned_table(BIF_P, tb);
}
-#endif
- /* We must keep the slot, to be found by db_proc_dead() if process dies */
- MARK_SLOT_DEAD(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
- if (is_atom(tb->common.id))
+
+ tid_clear(BIF_P, tb);
+
+ if (is_table_named(tb))
remove_named_tab(tb, 0);
/* disable inheritance */
free_heir_data(tb);
tb->common.heir = am_none;
- free_fixations_locked(tb);
-
- trap = free_table_cont(BIF_P, tb, 1, 1);
+ reds -= free_fixations_locked(BIF_P, tb);
db_unlock(tb, LCK_WRITE);
- if (trap) {
+
+ if (free_table_continue(BIF_P, tb, reds) < 0) {
/*
* Package the DbTable* pointer into a bignum so that it can be safely
* passed through a trap. We used to pass the DbTable* pointer directly
@@ -1776,9 +1970,11 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
Eterm *hp = HAlloc(BIF_P, 2);
hp[0] = make_pos_bignum_header(1);
hp[1] = (Eterm) tb;
+ BUMP_ALL_REDS(BIF_P);
BIF_TRAP1(&ets_delete_continue_exp, BIF_P, make_big(hp));
}
else {
+ BUMP_REDS(BIF_P, (initial_reds - reds));
BIF_RET(am_true);
}
}
@@ -1790,7 +1986,6 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
{
Process* to_proc = NULL;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- DeclareTmpHeap(buf,5,BIF_P);
Eterm to_pid = BIF_ARG_2;
Eterm from_pid;
DbTable* tb = NULL;
@@ -1812,26 +2007,14 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
goto badarg; /* or should we be idempotent? return false maybe */
}
- UseTmpHeap(5,BIF_P);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
-
+ delete_owned_table(BIF_P, tb);
to_proc->flags |= F_USING_DB;
tb->common.owner = to_pid;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(buf,to_pid,make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
+ save_owned_table(to_proc, tb);
db_unlock(tb,LCK_WRITE);
- erts_send_message(BIF_P, to_proc, &to_locks,
- TUPLE4(buf, am_ETS_TRANSFER,
- tb->common.id,
- from_pid,
- BIF_ARG_3),
- 0);
+ send_ets_transfer_message(BIF_P, to_proc, &to_locks,
+ tb, BIF_ARG_3);
erts_smp_proc_unlock(to_proc, to_locks);
UnUseTmpHeap(5,BIF_P);
BIF_RET(am_true);
@@ -2074,7 +2257,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_delete(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P,tb);
@@ -2101,46 +2284,254 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
return result;
}
-/*
-** Return a list of tables on this node
-*/
-BIF_RETTYPE ets_all_0(BIF_ALIST_0)
+/*
+ * ets:all/0
+ *
+ * ets:all() calls ets:internal_request_all/0 which
+ * requests information about all tables from
+ * each scheduler thread. Each scheduler replies
+ * to the calling process with information about
+ * existing tables created on that specific scheduler.
+ */
+
+struct ErtsEtsAllReq_ {
+ erts_smp_atomic32_t refc;
+ Process *proc;
+ ErtsOIRefStorage ref;
+ ErtsEtsAllReqList list[1]; /* one per scheduler */
+};
+
+#define ERTS_ETS_ALL_REQ_SIZE \
+ (sizeof(ErtsEtsAllReq) \
+ + (sizeof(ErtsEtsAllReqList) \
+ * (erts_no_schedulers - 1)))
+
+typedef struct {
+ ErtsEtsAllReq *ongoing;
+ ErlHeapFragment *hfrag;
+ DbTable *tab;
+ ErtsEtsAllReq *queue;
+} ErtsEtsAllData;
+
+/* Tables handled before yielding */
+#define ERTS_ETS_ALL_TB_YCNT 200
+/*
+ * Min yield count required before starting
+ * an operation that will require yield.
+ */
+#define ERTS_ETS_ALL_TB_YCNT_START 10
+
+#ifdef DEBUG
+/* Test yielding... */
+#undef ERTS_ETS_ALL_TB_YCNT
+#undef ERTS_ETS_ALL_TB_YCNT_START
+#define ERTS_ETS_ALL_TB_YCNT 10
+#define ERTS_ETS_ALL_TB_YCNT_START 1
+#endif
+
+static int
+ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp,
+ ErlHeapFragment **hfragpp, DbTable **tablepp,
+ int *yield_count_p)
{
- DbTable* tb;
- Eterm previous;
- int i;
- Eterm* hp;
- Eterm* hendp;
- int t_tabs_cnt;
- int t_top;
-
- erts_smp_spin_lock(&meta_main_tab_main_lock);
- t_tabs_cnt = meta_main_tab_cnt;
- t_top = meta_main_tab_top;
- erts_smp_spin_unlock(&meta_main_tab_main_lock);
-
- hp = HAlloc(BIF_P, 2*t_tabs_cnt);
- hendp = hp + 2*t_tabs_cnt;
-
- previous = NIL;
- for(i = 0; i < t_top; i++) {
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(i);
- erts_smp_rwmtx_rlock(mmtl);
- if (IS_SLOT_ALIVE(i)) {
- if (hp == hendp) {
- /* Racing table creator, grab some more heap space */
- t_tabs_cnt = 10;
- hp = HAlloc(BIF_P, 2*t_tabs_cnt);
- hendp = hp + 2*t_tabs_cnt;
- }
- tb = meta_main_tab[i].u.tb;
- previous = CONS(hp, tb->common.id, previous);
- hp += 2;
- }
- erts_smp_rwmtx_runlock(mmtl);
+ ErtsEtsAllReq *reqp = *reqpp;
+ ErlHeapFragment *hfragp = *hfragpp;
+ int ycount = *yield_count_p;
+ DbTable *tb, *first;
+ Uint sz;
+ Eterm list, msg, ref, *hp;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+
+ /*
+ * - save_sched_table() inserts at end of circular list.
+ *
+ * - This function scans from the end so we know that
+ * the amount of tables to scan wont grow even if we
+ * yield.
+ *
+ * - remove_sched_table() updates the table we yielded
+ * on if it removes it.
+ */
+
+ if (hfragp) {
+ /* Restart of a yielded operation... */
+ ASSERT(hfragp->used_size < hfragp->alloc_size);
+ ohp = &hfragp->off_heap;
+ hp = &hfragp->mem[hfragp->used_size];
+ list = *hp;
+ hfragp->used_size = hfragp->alloc_size;
+ first = esdp->ets_tables.clist;
+ tb = *tablepp;
+ }
+ else {
+ /* A new operation... */
+ ASSERT(!*tablepp);
+
+ /* Max heap size needed... */
+ sz = esdp->ets_tables.count;
+ sz *= ERTS_MAGIC_REF_THING_SIZE + 2;
+ sz += 3 + ERTS_REF_THING_SIZE;
+ hfragp = new_message_buffer(sz);
+
+ hp = &hfragp->mem[0];
+ ohp = &hfragp->off_heap;
+ list = NIL;
+ first = esdp->ets_tables.clist;
+ tb = first ? first->common.all.prev : NULL;
+ }
+
+ if (tb) {
+ while (1) {
+ if (is_table_alive(tb)) {
+ Eterm tid;
+ if (is_table_named(tb))
+ tid = tb->common.the_name;
+ else
+ tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid);
+ list = CONS(hp, tid, list);
+ hp += 2;
+ }
+
+ if (tb == first)
+ break;
+
+ tb = tb->common.all.prev;
+
+ if (--ycount <= 0) {
+ sz = hp - &hfragp->mem[0];
+ ASSERT(hfragp->alloc_size > sz + 1);
+ *hp = list;
+ hfragp->used_size = sz;
+ *hfragpp = hfragp;
+ *reqpp = reqp;
+ *tablepp = tb;
+ *yield_count_p = 0;
+ return 1; /* Yield! */
+ }
+ }
+ }
+
+ ref = erts_oiref_storage_make_ref(&reqp->ref, &hp);
+ msg = TUPLE2(hp, ref, list);
+ hp += 3;
+
+ sz = hp - &hfragp->mem[0];
+ ASSERT(sz <= hfragp->alloc_size);
+
+ hfragp = erts_resize_message_buffer(hfragp, sz, &msg, 1);
+
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = hfragp;
+
+ erts_queue_message(reqp->proc, 0, mp, msg, am_system);
+
+ erts_proc_dec_refc(reqp->proc);
+
+ if (erts_smp_atomic32_dec_read_nob(&reqp->refc) == 0)
+ erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp);
+
+ *reqpp = NULL;
+ *hfragpp = NULL;
+ *tablepp = NULL;
+ *yield_count_p = ycount;
+
+ return 0;
+}
+
+int
+erts_handle_yielded_ets_all_request(ErtsSchedulerData *esdp,
+ ErtsEtsAllYieldData *eaydp)
+{
+ int ix = (int) esdp->no - 1;
+ int yc = ERTS_ETS_ALL_TB_YCNT;
+
+ while (1) {
+ if (!eaydp->ongoing) {
+ ErtsEtsAllReq *ongoing;
+
+ if (!eaydp->queue)
+ return 0; /* All work completed! */
+
+ if (yc < ERTS_ETS_ALL_TB_YCNT_START && yc > esdp->ets_tables.count)
+ return 1; /* Yield! */
+
+ eaydp->ongoing = ongoing = eaydp->queue;
+ if (ongoing->list[ix].next == ongoing)
+ eaydp->queue = NULL;
+ else {
+ ongoing->list[ix].next->list[ix].prev = ongoing->list[ix].prev;
+ ongoing->list[ix].prev->list[ix].next = ongoing->list[ix].next;
+ eaydp->queue = ongoing->list[ix].next;
+ }
+ ASSERT(!eaydp->hfrag);
+ ASSERT(!eaydp->tab);
+ }
+
+ if (ets_all_reply(esdp, &eaydp->ongoing, &eaydp->hfrag, &eaydp->tab, &yc))
+ return 1; /* Yield! */
+ }
+}
+
+static void
+handle_ets_all_request(void *vreq)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsEtsAllYieldData *eayp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ ErtsEtsAllReq *req = (ErtsEtsAllReq *) vreq;
+
+ if (!eayp->ongoing && !eayp->queue) {
+ /* No ets:all() operations ongoing... */
+ ErlHeapFragment *hf = NULL;
+ DbTable *tb = NULL;
+ int yc = ERTS_ETS_ALL_TB_YCNT;
+ if (ets_all_reply(esdp, &req, &hf, &tb, &yc)) {
+ /* Yielded... */
+ ASSERT(hf);
+ eayp->ongoing = req;
+ eayp->hfrag = hf;
+ eayp->tab = tb;
+ erts_notify_new_aux_yield_work(esdp);
+ }
+ }
+ else {
+ /* Ongoing ets:all() operations; queue up this request... */
+ int ix = (int) esdp->no - 1;
+ if (!eayp->queue) {
+ req->list[ix].next = req;
+ req->list[ix].prev = req;
+ eayp->queue = req;
+ }
+ else {
+ req->list[ix].next = eayp->queue;
+ req->list[ix].prev = eayp->queue->list[ix].prev;
+ eayp->queue->list[ix].prev = req;
+ req->list[ix].prev->list[ix].next = req;
+ }
}
- HRelease(BIF_P, hendp, hp);
- BIF_RET(previous);
+}
+
+BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0)
+{
+ Eterm ref = erts_make_ref(BIF_P);
+ ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ,
+ ERTS_ETS_ALL_REQ_SIZE);
+ erts_smp_atomic32_init_nob(&req->refc,
+ (erts_aint32_t) erts_no_schedulers);
+ erts_oiref_storage_save(&req->ref, ref);
+ req->proc = BIF_P;
+ erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers);
+
+#ifdef ERTS_SMP
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ handle_ets_all_request,
+ (void *) req);
+#endif
+
+ handle_ets_all_request((void *) req);
+ BIF_RET(ref);
}
@@ -2245,7 +2636,7 @@ ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(p, tb,
+ cret = tb->common.meth->db_select_chunk(p, tb, arg1,
arg2, chunk_size,
0 /* not reversed */,
&ret);
@@ -2414,8 +2805,7 @@ ets_select2(Process* p, Eterm arg1, Eterm arg2)
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(p, tb, arg2,
- 0, &ret);
+ cret = tb->common.meth->db_select(p, tb, arg1, arg2, 0, &ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
@@ -2506,7 +2896,7 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_count(BIF_P,tb,BIF_ARG_2, &ret);
+ cret = tb->common.meth->db_select_count(BIF_P,tb, BIF_ARG_1, BIF_ARG_2, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
fix_table_locked(BIF_P, tb);
@@ -2560,7 +2950,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(BIF_P,tb,
+ cret = tb->common.meth->db_select_chunk(BIF_P,tb, BIF_ARG_1,
BIF_ARG_2, chunk_size,
1 /* reversed */, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
@@ -2610,7 +3000,7 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(BIF_P,tb,BIF_ARG_2,
+ cret = tb->common.meth->db_select(BIF_P,tb, BIF_ARG_1, BIF_ARG_2,
1 /*reversed*/, &ret);
if (DID_TRAP(BIF_P,ret) && safety != ITER_SAFE) {
@@ -2701,7 +3091,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
*/
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
BIF_ERROR(BIF_P, BADARG);
@@ -2763,7 +3153,7 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
Eterm ret = THE_NON_VALUE;
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) {
+ if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
BIF_RET(am_undefined);
}
BIF_ERROR(BIF_P, BADARG);
@@ -2853,7 +3243,6 @@ int erts_ets_rwmtx_spin_count = -1;
void init_db(ErtsDbSpinCount db_spin_count)
{
- DbTable init_tb;
int i;
Eterm *hp;
unsigned bits;
@@ -2902,16 +3291,6 @@ void init_db(ErtsDbSpinCount db_spin_count)
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
- meta_main_tab_locks =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_DB_TABLES,
- sizeof(erts_meta_main_tab_lock_t)
- * ERTS_META_MAIN_TAB_LOCK_TAB_SIZE);
-
- for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
- erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
- "meta_main_tab_slot", make_small(i));
- }
- erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
"meta_name_tab", make_small(i));
@@ -2931,20 +3310,6 @@ void init_db(ErtsDbSpinCount db_spin_count)
erts_exit(ERTS_ERROR_EXIT,"Max limit for ets tabled too high %u (max %u).",
db_max_tabs, ((Uint)1)<<SMALL_BITS);
}
- meta_main_tab_slot_mask = (((Uint)1)<<bits) - 1;
- meta_main_tab_seq_incr = (((Uint)1)<<bits);
-
- size = sizeof(*meta_main_tab)*db_max_tabs;
- meta_main_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
- ERTS_ETS_MISC_MEM_ADD(size);
-
- meta_main_tab_cnt = 0;
- meta_main_tab_top = 0;
- for (i=1; i<db_max_tabs; i++) {
- SET_NEXT_FREE_SLOT(i-1,i);
- }
- SET_NEXT_FREE_SLOT(db_max_tabs-1, (Uint)-1);
- meta_main_tab_first_free = 0;
meta_name_tab_mask = (((Uint) 1)<<(bits-1)) - 1; /* At least half the size of main tab */
size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1);
@@ -2959,70 +3324,6 @@ void init_db(ErtsDbSpinCount db_spin_count)
db_initialize_hash();
db_initialize_tree();
- /*TT*/
- /* Create meta table invertion. */
- erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
- meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
- &init_tb,
- sizeof(DbTable));
- erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
-
- meta_pid_to_tab->common.id = NIL;
- meta_pid_to_tab->common.the_name = am_true;
- meta_pid_to_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
-#ifdef ERTS_SMP
- meta_pid_to_tab->common.type
- = meta_pid_to_tab->common.status & ERTS_ETS_TABLE_TYPES;
- /* Note, 'type' is *read only* from now on... */
- meta_pid_to_tab->common.is_thread_safe = 0;
-#endif
- meta_pid_to_tab->common.keypos = 1;
- meta_pid_to_tab->common.owner = NIL;
- erts_smp_atomic_init_nob(&meta_pid_to_tab->common.nitems, 0);
- meta_pid_to_tab->common.slot = -1;
- meta_pid_to_tab->common.meth = &db_hash;
- meta_pid_to_tab->common.compress = 0;
-
- erts_smp_refc_init(&meta_pid_to_tab->common.ref, 0);
- /* Neither rwlock or fixlock used
- db_init_lock(meta_pid_to_tab, "meta_pid_to_tab", "meta_pid_to_tab_FIX");*/
-
- if (db_create_hash(NULL, meta_pid_to_tab) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Unable to create ets metadata tables.");
- }
-
- erts_smp_atomic_set_nob(&init_tb.common.memory_size, 0);
- meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
- &init_tb,
- sizeof(DbTable));
- erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size,
- erts_smp_atomic_read_nob(&init_tb.common.memory_size));
-
- meta_pid_to_fixed_tab->common.id = NIL;
- meta_pid_to_fixed_tab->common.the_name = am_true;
- meta_pid_to_fixed_tab->common.status = (DB_NORMAL | DB_BAG | DB_PUBLIC | DB_FINE_LOCKED);
-#ifdef ERTS_SMP
- meta_pid_to_fixed_tab->common.type
- = meta_pid_to_fixed_tab->common.status & ERTS_ETS_TABLE_TYPES;
- /* Note, 'type' is *read only* from now on... */
- meta_pid_to_fixed_tab->common.is_thread_safe = 0;
-#endif
- meta_pid_to_fixed_tab->common.keypos = 1;
- meta_pid_to_fixed_tab->common.owner = NIL;
- erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.nitems, 0);
- meta_pid_to_fixed_tab->common.slot = -1;
- meta_pid_to_fixed_tab->common.meth = &db_hash;
- meta_pid_to_fixed_tab->common.compress = 0;
-
- erts_smp_refc_init(&meta_pid_to_fixed_tab->common.ref, 0);
- /* Neither rwlock or fixlock used
- db_init_lock(meta_pid_to_fixed_tab, "meta_pid_to_fixed_tab", "meta_pid_to_fixed_tab_FIX");*/
-
- if (db_create_hash(NULL, meta_pid_to_fixed_tab) != DB_ERROR_NONE) {
- erts_exit(ERTS_ERROR_EXIT,"Unable to create ets metadata tables.");
- }
-
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
am_ets, am_atom_put("delete_trap",11), 1,
@@ -3051,81 +3352,18 @@ void init_db(ErtsDbSpinCount db_spin_count)
ms_delete_all = CONS(hp, ms_delete_all,NIL);
}
-#define ARRAY_CHUNK 100
-
-typedef enum {
- ErtsDbProcCleanupProgressTables,
- ErtsDbProcCleanupProgressFixations,
- ErtsDbProcCleanupProgressDone,
-} ErtsDbProcCleanupProgress;
-
-typedef enum {
- ErtsDbProcCleanupOpGetTables,
- ErtsDbProcCleanupOpDeleteTables,
- ErtsDbProcCleanupOpGetFixations,
- ErtsDbProcCleanupOpDeleteFixations,
- ErtsDbProcCleanupOpDone
-} ErtsDbProcCleanupOperation;
-
-typedef struct {
- ErtsDbProcCleanupProgress progress;
- ErtsDbProcCleanupOperation op;
- struct {
- Eterm arr[ARRAY_CHUNK];
- int size;
- int ix;
- int clean_ix;
- } slots;
-} ErtsDbProcCleanupState;
-
-
-static void
-proc_exit_cleanup_tables_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
+void
+erts_ets_sched_spec_data_init(ErtsSchedulerData *esdp)
{
- ASSERT(state->slots.clean_ix <= state->slots.ix);
- if (state->slots.clean_ix < state->slots.ix) {
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- if (state->slots.size < ARRAY_CHUNK
- && state->slots.ix == state->slots.size) {
- Eterm dummy;
- db_erase_hash(meta_pid_to_tab,pid,&dummy);
- }
- else {
- int ix;
- /* Need to erase each explicitly */
- for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
- db_erase_bag_exact2(meta_pid_to_tab,
- pid,
- state->slots.arr[ix]);
- }
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- state->slots.clean_ix = state->slots.ix;
- }
+ ErtsEtsAllYieldData *eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
+ eaydp->ongoing = NULL;
+ eaydp->hfrag = NULL;
+ eaydp->tab = NULL;
+ eaydp->queue = NULL;
+ esdp->ets_tables.clist = NULL;
+ esdp->ets_tables.count = 0;
}
-static void
-proc_exit_cleanup_fixations_meta_data(Eterm pid, ErtsDbProcCleanupState *state)
-{
- ASSERT(state->slots.clean_ix <= state->slots.ix);
- if (state->slots.clean_ix < state->slots.ix) {
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- if (state->slots.size < ARRAY_CHUNK
- && state->slots.ix == state->slots.size) {
- Eterm dummy;
- db_erase_hash(meta_pid_to_fixed_tab,pid,&dummy);
- }
- else {
- int ix;
- /* Need to erase each explicitly */
- for (ix = state->slots.clean_ix; ix < state->slots.ix; ix++)
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- pid,
- state->slots.arr[ix]);
- }
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- state->slots.clean_ix = state->slots.ix;
- }
-}
/* In: Table LCK_WRITE
** Return TRUE : ok, table not mine and NOT locked anymore.
@@ -3135,7 +3373,6 @@ static int give_away_to_heir(Process* p, DbTable* tb)
{
Process* to_proc;
ErtsProcLocks to_locks = ERTS_PROC_LOCK_MAIN;
- DeclareTmpHeap(buf,5,p);
Eterm to_pid;
UWord heir_data;
@@ -3179,19 +3416,12 @@ retry:
erts_smp_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
- UseTmpHeap(5,p);
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab, tb->common.owner,
- make_small(tb->common.slot));
+ delete_owned_table(p, tb);
to_proc->flags |= F_USING_DB;
tb->common.owner = to_pid;
-
- db_put_hash(meta_pid_to_tab,
- TUPLE2(buf,to_pid,make_small(tb->common.slot)),
- 0);
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- UnUseTmpHeap(5,p);
+ save_owned_table(to_proc, tb);
+
db_unlock(tb,LCK_WRITE);
heir_data = tb->common.heir_data;
if (!is_immed(heir_data)) {
@@ -3199,17 +3429,100 @@ retry:
ASSERT(arityval(*tpv) == 1);
heir_data = tpv[1];
}
- erts_send_message(p, to_proc, &to_locks,
- TUPLE4(buf,
- am_ETS_TRANSFER,
- tb->common.id,
- p->common.id,
- heir_data),
- 0);
+ send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data);
erts_smp_proc_unlock(to_proc, to_locks);
return !0;
}
+static void
+send_ets_transfer_message(Process *c_p, Process *proc,
+ ErtsProcLocks *locks,
+ DbTable *tb, Eterm heir_data)
+{
+ Uint hsz, hd_sz;
+ ErtsMessage *mp;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ Eterm tid, hd_copy, msg, sender;
+
+ hsz = 5;
+ if (!is_table_named(tb))
+ hsz += ERTS_MAGIC_REF_THING_SIZE;
+ if (is_immed(heir_data))
+ hd_sz = 0;
+ else {
+ hd_sz = size_object(heir_data);
+ hsz += hd_sz;
+ }
+
+ mp = erts_alloc_message_heap(proc, locks, hsz, &hp, &ohp);
+ if (is_table_named(tb))
+ tid = tb->common.the_name;
+ else
+ tid = erts_mk_magic_ref(&hp, ohp, tb->common.btid);
+ if (!hd_sz)
+ hd_copy = heir_data;
+ else
+ hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp);
+ sender = c_p->common.id;
+ msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy);
+ erts_queue_message(proc, *locks, mp, msg, sender);
+}
+
+
+/* Auto-release fixation from exiting process */
+static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
+{
+ DbTable* tb = btid2tab(fix->tabs.btid);
+ SWord work = 0;
+
+ ASSERT(fix->procs.p == p); (void)p;
+ if (tb) {
+ db_lock(tb, LCK_WRITE_REC);
+ if (!(tb->common.status & DB_DELETE)) {
+ erts_aint_t diff;
+ #ifdef ERTS_SMP
+ erts_smp_mtx_lock(&tb->common.fixlock);
+ #endif
+
+ ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p));
+
+ diff = -((erts_aint_t) fix->counter);
+ erts_smp_refc_add(&tb->common.fix_count,diff,0);
+ fix->counter = 0;
+
+ fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
+
+ #ifdef ERTS_SMP
+ erts_smp_mtx_unlock(&tb->common.fixlock);
+ #endif
+ if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
+ work += db_unfix_table_hash(&(tb->hash));
+ }
+
+ ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix));
+ ERTS_DB_ALC_MEM_UPDATE_(tb, sizeof(DbFixation), 0);
+ }
+ else {
+ ASSERT(fix->counter == 0);
+ }
+ db_unlock(tb, LCK_WRITE_REC);
+ }
+ else {
+ ASSERT(fix->counter == 0);
+ }
+
+ if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) {
+ erts_bin_free(fix->tabs.btid);
+ }
+ erts_free(ERTS_ALC_T_DB_FIXATION, fix);
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ ++work;
+
+ return work;
+}
+
+
/*
* erts_db_process_exiting() is called when a process terminates.
* It returns 0 when completely done, and !0 when it wants to
@@ -3223,276 +3536,160 @@ retry:
int
erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
{
- ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate;
+ typedef struct {
+ enum {
+ GET_OWNED_TABLE,
+ FREE_OWNED_TABLE,
+ UNFIX_TABLES,
+ }op;
+ DbTable *tb;
+ } CleanupState;
+ CleanupState *state = (CleanupState *) c_p->u.terminate;
Eterm pid = c_p->common.id;
- ErtsDbProcCleanupState default_state;
- int ret;
+ CleanupState default_state;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(c_p);
+ SWord reds = initial_reds;
if (!state) {
state = &default_state;
- state->progress = ErtsDbProcCleanupProgressTables;
- state->op = ErtsDbProcCleanupOpGetTables;
+ state->op = GET_OWNED_TABLE;
+ state->tb = NULL;
}
- while (!0) {
+ do {
switch (state->op) {
- case ErtsDbProcCleanupOpGetTables:
- state->slots.size = ARRAY_CHUNK;
- db_meta_lock(meta_pid_to_tab, LCK_READ);
- ret = db_get_element_array(meta_pid_to_tab,
- pid,
- 2,
- state->slots.arr,
- &state->slots.size);
- db_meta_unlock(meta_pid_to_tab, LCK_READ);
- if (ret == DB_ERROR_BADKEY) {
- /* Done with tables; now fixations */
- state->progress = ErtsDbProcCleanupProgressFixations;
- state->op = ErtsDbProcCleanupOpGetFixations;
- break;
- } else if (ret != DB_ERROR_NONE) {
- ERTS_DB_INTERNAL_ERROR("Inconsistent ets table metadata");
- }
-
- state->slots.ix = 0;
- state->slots.clean_ix = 0;
- state->op = ErtsDbProcCleanupOpDeleteTables;
- /* Fall through */
-
- case ErtsDbProcCleanupOpDeleteTables:
-
- while (state->slots.ix < state->slots.size) {
- DbTable *tb = NULL;
- Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
- erts_smp_rwmtx_rlock(mmtl);
- if (!IS_SLOT_FREE(ix)) {
- tb = GET_ANY_SLOT_TAB(ix);
- ASSERT(tb);
- }
- erts_smp_rwmtx_runlock(mmtl);
- if (tb) {
- int do_yield;
- db_lock(tb, LCK_WRITE);
- /* Ownership may have changed since
- we looked up the table. */
- if (tb->common.owner != pid) {
- do_yield = 0;
- db_unlock(tb, LCK_WRITE);
- }
- else if (tb->common.heir != am_none
- && tb->common.heir != pid
- && give_away_to_heir(c_p, tb)) {
- do_yield = 0;
- }
- else {
- int first_call;
-#ifdef HARDDEBUG
- erts_fprintf(stderr,
- "erts_db_process_exiting(); Table: %T, "
- "Process: %T\n",
- tb->common.id, pid);
-#endif
- first_call = (tb->common.status & DB_DELETE) == 0;
- if (first_call) {
- /* Clear all access bits. */
- tb->common.status &= ~(DB_PROTECTED
- | DB_PUBLIC
- | DB_PRIVATE);
- tb->common.status |= DB_DELETE;
-
- if (is_atom(tb->common.id))
- remove_named_tab(tb, 0);
-
- free_heir_data(tb);
- free_fixations_locked(tb);
- }
-
- do_yield = free_table_cont(c_p, tb, first_call, 0);
- db_unlock(tb, LCK_WRITE);
- }
- if (do_yield)
- goto yield;
- }
- state->slots.ix++;
- if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
- goto yield;
- }
+ case GET_OWNED_TABLE: {
+ DbTable* tb;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (!tb) {
+ /* Done with owned tables; now fixations */
+ state->op = UNFIX_TABLES;
+ break;
+ }
- proc_exit_cleanup_tables_meta_data(pid, state);
- state->op = ErtsDbProcCleanupOpGetTables;
- break;
+ ASSERT(tb != state->tb);
+ state->tb = tb;
+ db_lock(tb, LCK_WRITE);
+ /*
+ * Ownership may have changed since we looked up the table.
+ */
+ if (tb->common.owner != pid) {
+ db_unlock(tb, LCK_WRITE);
+ break;
+ }
+ if (tb->common.heir != am_none
+ && tb->common.heir != pid
+ && give_away_to_heir(c_p, tb)) {
+ break;
+ }
+ tid_clear(c_p, tb);
+ /* Clear all access bits. */
+ tb->common.status &= ~(DB_PROTECTED | DB_PUBLIC | DB_PRIVATE);
+ tb->common.status |= DB_DELETE;
+
+ if (is_table_named(tb))
+ remove_named_tab(tb, 0);
+
+ free_heir_data(tb);
+ reds -= free_fixations_locked(c_p, tb);
+ db_unlock(tb, LCK_WRITE);
+ state->op = FREE_OWNED_TABLE;
+ break;
+ }
+ case FREE_OWNED_TABLE:
+ reds = free_table_continue(c_p, state->tb, reds);
+ if (reds < 0)
+ goto yield;
- case ErtsDbProcCleanupOpGetFixations:
- state->slots.size = ARRAY_CHUNK;
- db_meta_lock(meta_pid_to_fixed_tab, LCK_READ);
- ret = db_get_element_array(meta_pid_to_fixed_tab,
- pid,
- 2,
- state->slots.arr,
- &state->slots.size);
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_READ);
-
- if (ret == DB_ERROR_BADKEY) {
- /* Done */
- state->progress = ErtsDbProcCleanupProgressDone;
- state->op = ErtsDbProcCleanupOpDone;
- break;
- } else if (ret != DB_ERROR_NONE) {
- ERTS_DB_INTERNAL_ERROR("Inconsistent ets fix table metadata");
- }
+ state->op = GET_OWNED_TABLE;
+ break;
- state->slots.ix = 0;
- state->slots.clean_ix = 0;
- state->op = ErtsDbProcCleanupOpDeleteFixations;
- /* Fall through */
+ case UNFIX_TABLES: {
+ DbFixation* fix;
- case ErtsDbProcCleanupOpDeleteFixations:
+ fix = (DbFixation*) erts_psd_get(c_p, ERTS_PSD_ETS_FIXED_TABLES);
- while (state->slots.ix < state->slots.size) {
- DbTable *tb = NULL;
- Sint ix = unsigned_val(state->slots.arr[state->slots.ix]);
- erts_smp_rwmtx_t *mmtl = get_meta_main_tab_lock(ix);
- erts_smp_rwmtx_rlock(mmtl);
- if (IS_SLOT_ALIVE(ix)) {
- tb = meta_main_tab[ix].u.tb;
- ASSERT(tb);
- }
- erts_smp_rwmtx_runlock(mmtl);
- if (tb) {
- int reds = 0;
-
- db_lock(tb, LCK_WRITE_REC);
- if (!(tb->common.status & DB_DELETE)) {
- DbFixation** pp;
-
- #ifdef ERTS_SMP
- erts_smp_mtx_lock(&tb->common.fixlock);
- #endif
- reds = 10;
-
- for (pp = &tb->common.fixations; *pp != NULL;
- pp = &(*pp)->next) {
- if ((*pp)->pid == pid) {
- DbFixation* fix = *pp;
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_smp_refc_add(&tb->common.ref,diff,0);
- *pp = fix->next;
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
- break;
- }
- }
- #ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
- #endif
- if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
- db_unfix_table_hash(&(tb->hash));
- reds += 40;
- }
- }
- db_unlock(tb, LCK_WRITE_REC);
- BUMP_REDS(c_p, reds);
- }
- state->slots.ix++;
- if (ERTS_BIF_REDS_LEFT(c_p) <= 0)
- goto yield;
- }
+ if (!fix) {
+ /* Done */
- proc_exit_cleanup_fixations_meta_data(pid, state);
- state->op = ErtsDbProcCleanupOpGetFixations;
- break;
+ if (state != &default_state)
+ erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
+ c_p->u.terminate = NULL;
- case ErtsDbProcCleanupOpDone:
+ BUMP_REDS(c_p, (initial_reds - reds));
+ return 0;
+ }
- if (state != &default_state)
- erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
- c_p->u.terminate = NULL;
- return 0;
+ fixed_tabs_delete(c_p, fix);
+ reds -= proc_cleanup_fixed_table(c_p, fix);
+ break;
+ }
default:
ERTS_DB_INTERNAL_ERROR("Bad internal state");
- }
- }
-
- yield:
+ }
- switch (state->progress) {
- case ErtsDbProcCleanupProgressTables:
- proc_exit_cleanup_tables_meta_data(pid, state);
- break;
- case ErtsDbProcCleanupProgressFixations:
- proc_exit_cleanup_fixations_meta_data(pid, state);
- break;
- default:
- break;
- }
+ } while (reds > 0);
- ASSERT(c_p->u.terminate == (void *) state
- || state == &default_state);
+ yield:
if (state == &default_state) {
c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
- sizeof(ErtsDbProcCleanupState));
- sys_memcpy(c_p->u.terminate,
- (void*) state,
- sizeof(ErtsDbProcCleanupState));
+ sizeof(CleanupState));
+ sys_memcpy(c_p->u.terminate, (void*) state, sizeof(CleanupState));
}
+ else
+ ASSERT(state == c_p->u.terminate);
return !0;
}
+
/* SMP note: table only need to be LCK_READ locked */
static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
- DeclareTmpHeap(meta_tuple,3,p);
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
- erts_smp_refc_inc(&tb->common.ref,1);
- fix = tb->common.fixations;
+ erts_smp_refc_inc(&tb->common.fix_count,1);
+ fix = tb->common.fixing_procs;
if (fix == NULL) {
tb->common.time.monotonic
= erts_get_monotonic_time(erts_proc_sched_data(p));
tb->common.time.offset = erts_get_time_offset();
}
else {
- for (; fix != NULL; fix = fix->next) {
- if (fix->pid == p->common.id) {
- ++(fix->counter);
+ fix = fixing_procs_rbt_lookup(fix, p);
+ if (fix) {
+ ASSERT(fixed_tabs_find(NULL, fix));
+ ++(fix->counter);
+
#ifdef ERTS_SMP
- erts_smp_mtx_unlock(&tb->common.fixlock);
+ erts_smp_mtx_unlock(&tb->common.fixlock);
#endif
- return;
- }
+ return;
}
}
fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION,
tb, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation));
- fix->pid = p->common.id;
+ fix->tabs.btid = tb->common.btid;
+ erts_refc_inc(&fix->tabs.btid->refc, 2);
+ fix->procs.p = p;
fix->counter = 1;
- fix->next = tb->common.fixations;
- tb->common.fixations = fix;
+ fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
+
#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
#endif
- p->flags |= F_USING_DB;
- UseTmpHeap(3,p);
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- if (db_put_hash(meta_pid_to_fixed_tab,
- TUPLE2(meta_tuple,
- p->common.id,
- make_small(tb->common.slot)),
- 0) != DB_ERROR_NONE) {
- UnUseTmpHeap(3,p);
- erts_exit(ERTS_ERROR_EXIT,"Could not insert ets metadata in safe_fixtable.");
- }
- UnUseTmpHeap(3,p);
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ p->flags |= F_USING_DB;
+
+ fixed_tabs_insert(p, fix);
}
/* SMP note: May re-lock table
@@ -3500,28 +3697,26 @@ static void fix_table_locked(Process* p, DbTable* tb)
static void unfix_table_locked(Process* p, DbTable* tb,
db_lock_kind_t* kind_p)
{
- DbFixation** pp;
+ DbFixation* fix;
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
- for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
- if ((*pp)->pid == p->common.id) {
- DbFixation* fix = *pp;
- erts_smp_refc_dec(&tb->common.ref,0);
- --(fix->counter);
- ASSERT(fix->counter >= 0);
- if (fix->counter > 0) {
- break;
- }
- *pp = fix->next;
+ fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p);
+
+ if (fix) {
+ erts_smp_refc_dec(&tb->common.fix_count,0);
+ --(fix->counter);
+ ASSERT(fix->counter >= 0);
+ if (fix->counter == 0) {
+ fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
#endif
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- p->common.id, make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
+ fixed_tabs_delete(p, fix);
+
+ erts_refc_dec(&fix->tabs.btid->refc, 1);
+
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, (void *) fix, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
@@ -3548,29 +3743,87 @@ unlocked:
}
}
-/* Assume that tb is WRITE locked */
-static void free_fixations_locked(DbTable *tb)
+struct free_fixations_ctx
{
- DbFixation *fix;
- DbFixation *next_fix;
+ Process* p;
+ DbTable* tb;
+ SWord cnt;
+};
+
+static void free_fixations_op(DbFixation* fix, void* vctx)
+{
+ struct free_fixations_ctx* ctx = (struct free_fixations_ctx*) vctx;
+ erts_aint_t diff;
+#ifdef DEBUG
+ DbTable* dbg_tb = btid2tab(fix->tabs.btid);
+#endif
- fix = tb->common.fixations;
- while (fix != NULL) {
- erts_aint_t diff = -((erts_aint_t) fix->counter);
- erts_smp_refc_add(&tb->common.ref,diff,0);
- next_fix = fix->next;
- db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_fixed_tab,
- fix->pid,
- make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
- erts_db_free(ERTS_ALC_T_DB_FIXATION,
- tb, (void *) fix, sizeof(DbFixation));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ ASSERT(!dbg_tb || dbg_tb == ctx->tb);
+ ASSERT(fix->counter > 0);
+ ASSERT(ctx->tb->common.status & DB_DELETE);
- fix = next_fix;
+ diff = -((erts_aint_t) fix->counter);
+ erts_smp_refc_add(&ctx->tb->common.fix_count, diff, 0);
+
+#ifdef ERTS_SMP
+ if (fix->procs.p != ctx->p) { /* Fixated by other process */
+ fix->counter = 0;
+
+ /* Fake memory stats for table */
+ ASSERT(sizeof(DbFixation) == ERTS_ALC_DBG_BLK_SZ(fix));
+ ERTS_DB_ALC_MEM_UPDATE_(ctx->tb, sizeof(DbFixation), 0);
+
+ erts_schedule_ets_free_fixation(fix->procs.p->common.id, fix);
+ /*
+ * Either sys task is scheduled and erts_db_execute_free_fixation()
+ * will remove 'fix' or process will exit, drop sys task and
+ * proc_cleanup_fixed_table() will remove 'fix'.
+ */
+ }
+ else
+#endif
+ {
+ fixed_tabs_delete(fix->procs.p, fix);
+
+ if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) {
+ erts_bin_free(fix->tabs.btid);
+ }
+
+ erts_db_free(ERTS_ALC_T_DB_FIXATION,
+ ctx->tb, (void *) fix, sizeof(DbFixation));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
}
- tb->common.fixations = NULL;
+ ctx->cnt++;
+}
+
+#ifdef ERTS_SMP
+int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
+{
+ ASSERT(fix->counter == 0);
+ fixed_tabs_delete(p, fix);
+
+ if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) {
+ erts_bin_free(fix->tabs.btid);
+ }
+ erts_free(ERTS_ALC_T_DB_FIXATION, fix);
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
+ return 1;
+}
+#endif
+
+static SWord free_fixations_locked(Process* p, DbTable *tb)
+{
+ struct free_fixations_ctx ctx;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+
+ ctx.p = p;
+ ctx.tb = tb;
+ ctx.cnt = 0;
+ fixing_procs_rbt_foreach_destroy(&tb->common.fixing_procs,
+ free_fixations_op, &ctx);
+ tb->common.fixing_procs = NULL;
+ return ctx.cnt;
}
static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data)
@@ -3634,55 +3887,40 @@ static void free_heir_data(DbTable* tb)
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1)
{
- Process *p = BIF_P;
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
Eterm cont = BIF_ARG_1;
- int trap;
Eterm* ptr = big_val(cont);
DbTable *tb = *((DbTable **) (UWord) (ptr + 1));
ASSERT(*ptr == make_pos_bignum_header(1));
- db_lock(tb, LCK_WRITE);
- trap = free_table_cont(p, tb, 0, 1);
- db_unlock(tb, LCK_WRITE);
-
- if (trap) {
- BIF_TRAP1(&ets_delete_continue_exp, p, cont);
+ if (free_table_continue(BIF_P, tb, reds) < 0) {
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP1(&ets_delete_continue_exp, BIF_P, cont);
}
else {
+ BUMP_REDS(BIF_P, (initial_reds - reds));
BIF_RET(am_true);
}
}
/*
- * free_table_cont() returns 0 when done and !0 when more work is needed.
+ * free_table_continue() returns reductions left
+ * done if >= 0
+ * yield if < 0
*/
-static int free_table_cont(Process *p,
- DbTable *tb,
- int first,
- int clean_meta_tab)
+static SWord free_table_continue(Process *p, DbTable *tb, SWord reds)
{
- Eterm result;
- erts_smp_rwmtx_t *mmtl;
-
-#ifdef HARDDEBUG
- if (!first) {
- erts_fprintf(stderr,"ets: free_table_cont %T (continue)\r\n",
- tb->common.id);
- }
-#endif
-
- result = tb->common.meth->db_free_table_continue(tb);
+ reds = tb->common.meth->db_free_table_continue(tb, reds);
- if (result == 0) {
+ if (reds < 0) {
#ifdef HARDDEBUG
erts_fprintf(stderr,"ets: free_table_cont %T (continue begin)\r\n",
tb->common.id);
#endif
/* More work to be done. Let other processes work and call us again. */
- BUMP_ALL_REDS(p);
- return !0;
}
else {
#ifdef HARDDEBUG
@@ -3690,27 +3928,28 @@ static int free_table_cont(Process *p,
tb->common.id);
#endif
/* Completely done - we will not get called again. */
- mmtl = get_meta_main_tab_lock(tb->common.slot);
-#ifdef ERTS_SMP
- if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
- erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
- erts_smp_rwmtx_rwlock(mmtl);
- erts_smp_rwmtx_rwlock(&tb->common.rwlock);
- }
-#endif
- free_slot(tb->common.slot);
- erts_smp_rwmtx_rwunlock(mmtl);
-
- if (clean_meta_tab) {
- db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
- db_erase_bag_exact2(meta_pid_to_tab,tb->common.owner,
- make_small(tb->common.slot));
- db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
- }
- schedule_free_dbtable(tb);
- BUMP_REDS(p, 100);
- return 0;
+ delete_owned_table(p, tb);
+ table_dec_refc(tb, 0);
}
+ return reds;
+}
+
+struct fixing_procs_info_ctx
+{
+ Process* p;
+ Eterm list;
+};
+
+static void fixing_procs_info_op(DbFixation* fix, void* vctx)
+{
+ struct fixing_procs_info_ctx* ctx = (struct fixing_procs_info_ctx*) vctx;
+ Eterm* hp;
+ Eterm tpl;
+
+ hp = HAllocX(ctx->p, 5, 100);
+ tpl = TUPLE2(hp, fix->procs.p->common.id, make_small(fix->counter));
+ hp += 3;
+ ctx->list = CONS(hp, tpl, ctx->list);
}
static Eterm table_info(Process* p, DbTable* tb, Eterm What)
@@ -3759,7 +3998,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
} else if (What == am_node) {
ret = erts_this_dist_entry->sysname;
} else if (What == am_named_table) {
- ret = is_atom(tb->common.id) ? am_true : am_false;
+ ret = is_table_named(tb) ? am_true : am_false;
} else if (What == am_compressed) {
ret = tb->common.compress ? am_true : am_false;
}
@@ -3784,9 +4023,9 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
if (IS_FIXED(tb)) {
Uint need;
Eterm *hp;
- Eterm tpl, lst;
- DbFixation *fix;
+ Eterm time;
Sint64 mtime;
+ struct fixing_procs_info_ctx ctx;
need = 3;
if (use_monotonic) {
@@ -3799,19 +4038,15 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
mtime = 0;
need += 4;
}
- for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
- need += 5;
- }
+ ctx.p = p;
+ ctx.list = NIL;
+ fixing_procs_rbt_foreach(tb->common.fixing_procs,
+ fixing_procs_info_op,
+ &ctx);
+
hp = HAlloc(p, need);
- lst = NIL;
- for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
- tpl = TUPLE2(hp,fix->pid,make_small(fix->counter));
- hp += 3;
- lst = CONS(hp,tpl,lst);
- hp += 2;
- }
if (use_monotonic)
- tpl = (IS_SSMALL(mtime)
+ time = (IS_SSMALL(mtime)
? make_small(mtime)
: erts_sint64_to_big(mtime, &hp));
else {
@@ -3819,10 +4054,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
erts_make_timestamp_value(&ms, &s, &us,
tb->common.time.monotonic,
tb->common.time.offset);
- tpl = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
+ time = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
hp += 4;
}
- ret = TUPLE2(hp, tpl, lst);
+ ret = TUPLE2(hp, time, ctx.list);
} else {
ret = am_false;
}
@@ -3867,7 +4102,19 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb)
{
- erts_print(to, to_arg, "Table: %T\n", tb->common.id);
+ Eterm tid;
+ Eterm heap[ERTS_MAGIC_REF_THING_SIZE];
+
+ if (is_table_named(tb)) {
+ tid = tb->common.the_name;
+ } else {
+ ErlOffHeap oh;
+ ERTS_INIT_OFF_HEAP(&oh);
+ write_magic_ref_thing(heap, &oh, (ErtsMagicBinary *) tb->common.btid);
+ tid = make_internal_ref(heap);
+ }
+
+ erts_print(to, to_arg, "Table: %T\n", tid);
erts_print(to, to_arg, "Name: %T\n", tb->common.the_name);
tb->common.meth->db_print(to, to_arg, show, tb);
@@ -3885,21 +4132,30 @@ static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb)
erts_print(to, to_arg, "Read Concurrency: %T\n", table_info(NULL, tb, am_read_concurrency));
}
+typedef struct {
+ fmtfn_t to;
+ void *to_arg;
+ int show;
+} ErtsPrintDbInfo;
+
+static void
+db_info_print(DbTable *tb, void *vpdbip)
+{
+ ErtsPrintDbInfo *pdbip = (ErtsPrintDbInfo *) vpdbip;
+ erts_print(pdbip->to, pdbip->to_arg, "=ets:%T\n", tb->common.owner);
+ erts_print(pdbip->to, pdbip->to_arg, "Slot: %bpu\n", (Uint) tb);
+ print_table(pdbip->to, pdbip->to_arg, pdbip->show, tb);
+}
+
void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler */
{
- int i;
- for (i=0; i < db_max_tabs; i++)
- if (IS_SLOT_ALIVE(i)) {
- erts_print(to, to_arg, "=ets:%T\n", meta_main_tab[i].u.tb->common.owner);
- erts_print(to, to_arg, "Slot: %d\n", i);
- print_table(to, to_arg, show, meta_main_tab[i].u.tb);
- }
-#ifdef DEBUG
- erts_print(to, to_arg, "=internal_ets: Process to table index\n");
- print_table(to, to_arg, show, meta_pid_to_tab);
- erts_print(to, to_arg, "=internal_ets: Process to fixation index\n");
- print_table(to, to_arg, show, meta_pid_to_fixed_tab);
-#endif
+ ErtsPrintDbInfo pdbi;
+
+ pdbi.to = to;
+ pdbi.to_arg = to_arg;
+ pdbi.show = show;
+
+ erts_db_foreach_table(db_info_print, &pdbi);
}
Uint
@@ -3914,15 +4170,22 @@ erts_get_ets_misc_mem_size(void)
void
erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg)
{
- int i, j;
- j = 0;
- for(i = 0; (i < db_max_tabs && j < meta_main_tab_cnt); i++) {
- if (IS_SLOT_ALIVE(i)) {
- j++;
- (*func)(meta_main_tab[i].u.tb, arg);
- }
+ int ix;
+
+ ASSERT(erts_smp_thr_progress_is_blocking());
+
+ for (ix = 0; ix < erts_no_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
+ DbTable *first = esdp->ets_tables.clist;
+ if (first) {
+ DbTable *tb = first;
+ do {
+ if (is_table_alive(tb))
+ (*func)(tb, arg);
+ tb = tb->common.all.next;
+ } while (tb != first);
+ }
}
- ASSERT(j == meta_main_tab_cnt);
}
/* SMP Note: May only be used when system is locked */
@@ -3972,53 +4235,3 @@ erts_ets_colliding_names(Process* p, Eterm name, Uint cnt)
return list;
}
-/*
- * For testing only
- * Retreive meta table size state
- */
-Eterm erts_ets_get_meta_state(Process* p)
-{
- Eterm* hp = HAlloc(p, 3);
- return TUPLE2(hp,
- erts_ets_hash_get_memstate(p, &meta_pid_to_tab->hash),
- erts_ets_hash_get_memstate(p, &meta_pid_to_fixed_tab->hash));
-}
-/*
- * For testing only
- * Restore a previously retrieved meta table size state.
- * We do this to suppress failed memory checks
- * caused by the hysteresis of meta tables grow/shrink limits.
- */
-Eterm erts_ets_restore_meta_state(Process* p, Eterm meta_state)
-{
- Eterm* tv;
- Eterm* hp;
- if (!is_tuple_arity(meta_state, 2))
- return am_badarg;
-
- tv = tuple_val(meta_state);
- hp = HAlloc(p, 3);
- return TUPLE2(hp,
- erts_ets_hash_restore_memstate(&meta_pid_to_tab->hash, tv[1]),
- erts_ets_hash_restore_memstate(&meta_pid_to_fixed_tab->hash, tv[2]));
-}
-
-#ifdef HARDDEBUG /* Here comes some debug functions */
-
-void db_check_tables(void)
-{
-#ifdef ERTS_SMP
- return;
-#else
- int i;
-
- for (i = 0; i < db_max_tabs; i++) {
- if (IS_SLOT_ALIVE(i)) {
- DbTable* tb = meta_main_tab[i].t;
- tb->common.meth->db_check_table(tb);
- }
- }
-#endif
-}
-
-#endif /* HARDDEBUG */
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 852440ff31..cbf4b9e007 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -24,8 +24,37 @@
*
*/
-#ifndef __DB_H__
-#define __DB_H__
+#ifndef ERTS_DB_SCHED_SPEC_TYPES__
+#define ERTS_DB_SCHED_SPEC_TYPES__
+
+union db_table;
+typedef union db_table DbTable;
+
+typedef struct ErtsEtsAllReq_ ErtsEtsAllReq;
+
+typedef struct {
+ ErtsEtsAllReq *next;
+ ErtsEtsAllReq *prev;
+} ErtsEtsAllReqList;
+
+typedef struct {
+ ErtsEtsAllReq *ongoing;
+ ErlHeapFragment *hfrag;
+ DbTable *tab;
+ ErtsEtsAllReq *queue;
+} ErtsEtsAllYieldData;
+
+typedef struct {
+ Uint count;
+ DbTable *clist;
+} ErtsEtsTables;
+
+#endif /* ERTS_DB_SCHED_SPEC_TYPES__ */
+
+#ifndef ERTS_ONLY_SCHED_SPEC_ETS_DATA
+
+#ifndef ERL_DB_H__
+#define ERL_DB_H__
#include "sys.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
@@ -46,6 +75,12 @@ typedef struct {
ErtsThrPrgrLaterOp data;
} DbTableRelease;
+struct ErtsSchedulerData_;
+int erts_handle_yielded_ets_all_request(struct ErtsSchedulerData_ *esdp,
+ ErtsEtsAllYieldData *eadp);
+
+void erts_ets_sched_spec_data_init(struct ErtsSchedulerData_ *esdp);
+
/*
* So, the structure for a database table, NB this is only
* interesting in db.c.
@@ -74,6 +109,7 @@ typedef enum {
void init_db(ErtsDbSpinCount);
int erts_db_process_exiting(Process *, ErtsProcLocks);
+int erts_db_execute_free_fixation(Process*, DbFixation*);
void db_info(fmtfn_t, void *, int);
void erts_db_foreach_table(void (*)(DbTable *, void *), void *);
void erts_db_foreach_offheap(DbTable *,
@@ -90,12 +126,9 @@ extern Export ets_select_continue_exp;
extern erts_smp_atomic_t erts_ets_misc_mem_size;
Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt);
-Eterm erts_ets_get_meta_state(Process* p);
-Eterm erts_ets_restore_meta_state(Process* p, Eterm target_state);
-
Uint erts_db_get_max_tabs(void);
-#endif
+#endif /* ERL_DB_H__ */
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
#define ERTS_HAVE_DB_INTERNAL__
@@ -271,3 +304,4 @@ erts_db_free_nt(ErtsAlcType_t type, void *ptr, Uint size)
#endif /* #if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__) */
+#endif /* !ERTS_ONLY_SCHED_SPEC_ETS_DATA */
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index bb29d56aa7..18405342da 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -393,20 +393,20 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_hash(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
-static int db_select_chunk_hash(Process *p, DbTable *tbl,
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse, Eterm *ret);
-static int db_select_hash(Process *p, DbTable *tbl,
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse, Eterm *ret);
-static int db_select_count_hash(Process *p, DbTable *tbl,
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
-static int db_select_delete_hash(Process *p, DbTable *tbl,
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
static int db_select_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static int db_select_count_continue_hash(Process *p, DbTable *tbl,
+static int db_select_count_continue_hash(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
static int db_select_delete_continue_hash(Process *p, DbTable *tbl,
@@ -418,7 +418,7 @@ static void db_print_hash(fmtfn_t to,
DbTable *tbl);
static int db_free_table_hash(DbTable *tbl);
-static int db_free_table_continue_hash(DbTable *tbl);
+static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds);
static void db_foreach_offheap_hash(DbTable *,
@@ -529,11 +529,6 @@ DbTableMethod db_hash =
db_free_table_continue_hash,
db_print_hash,
db_foreach_offheap_hash,
-#ifdef HARDDEBUG
- db_check_table_hash,
-#else
- NULL,
-#endif
db_lookup_dbterm_hash,
db_finalize_dbterm_hash
};
@@ -581,9 +576,10 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel)
** Table interface routines ie what's called by the bif's
*/
-void db_unfix_table_hash(DbTableHash *tb)
+SWord db_unfix_table_hash(DbTableHash *tb)
{
FixedDeletion* fixdel;
+ SWord work = 0;
ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
|| (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock)
@@ -604,7 +600,7 @@ restart:
if (!IS_FIXED(tb)) {
goto restart; /* unfixed again! */
}
- return;
+ return work;
}
if (ix < NACTIVE(tb)) {
bp = &BUCKET(tb, ix);
@@ -614,6 +610,7 @@ restart:
if (b->hvalue == INVALID_HASH) {
*bp = b->next;
free_term(tb, b);
+ work++;
b = *bp;
} else {
bp = &b->next;
@@ -629,9 +626,12 @@ restart:
(void *) fx,
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+ work++;
}
/* ToDo: Maybe try grow/shrink the table as well */
+
+ return work;
}
int db_create_hash(Process *p, DbTable *tbl)
@@ -846,7 +846,6 @@ Lnew:
grow(tb, nitems);
}
}
- CHECK_TABLES();
return DB_ERROR_NONE;
Ldone:
@@ -871,7 +870,6 @@ get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
}
}
copy = build_term_list(p, b1, b2, sz, tb);
- CHECK_TABLES();
if (bend) {
*bend = b2;
}
@@ -903,70 +901,6 @@ done:
RUNLOCK_HASH(lck);
return DB_ERROR_NONE;
}
-
-int db_get_element_array(DbTable *tbl,
- Eterm key,
- int ndex,
- Eterm *ret,
- int *num_ret)
-{
- DbTableHash *tb = &tbl->hash;
- HashValue hval;
- int ix;
- HashDbTerm* b1;
- int num = 0;
- int retval;
- erts_smp_rwmtx_t* lck;
-
- ASSERT(!IS_FIXED(tbl)); /* no support for fixed tables here */
-
- hval = MAKE_HASH(key);
- lck = RLOCK_HASH(tb, hval);
- ix = hash_to_ix(tb, hval);
- b1 = BUCKET(tb, ix);
-
- while(b1 != 0) {
- if (has_live_key(tb,b1,key,hval)) {
- if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
- HashDbTerm* b;
- HashDbTerm* b2 = b1->next;
-
- while(b2 != NULL && has_live_key(tb,b2,key,hval)) {
- if (ndex > arityval(b2->dbterm.tpl[0])) {
- retval = DB_ERROR_BADITEM;
- goto done;
- }
- b2 = b2->next;
- }
-
- b = b1;
- while(b != b2) {
- if (num < *num_ret) {
- ret[num++] = b->dbterm.tpl[ndex];
- } else {
- retval = DB_ERROR_NONE;
- goto done;
- }
- b = b->next;
- }
- *num_ret = num;
- }
- else {
- ASSERT(*num_ret > 0);
- ret[0] = b1->dbterm.tpl[ndex];
- *num_ret = 1;
- }
- retval = DB_ERROR_NONE;
- goto done;
- }
- b1 = b1->next;
- }
- retval = DB_ERROR_BADKEY;
-done:
- RUNLOCK_HASH(lck);
- return retval;
-}
-
static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret)
{
@@ -1059,54 +993,6 @@ done:
}
/*
- * Very internal interface, removes elements of arity two from
- * BAG. Used for the PID meta table
- */
-int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value)
-{
- DbTableHash *tb = &tbl->hash;
- HashValue hval;
- int ix;
- HashDbTerm** bp;
- HashDbTerm* b;
- erts_smp_rwmtx_t* lck;
- int found = 0;
-
- hval = MAKE_HASH(key);
- lck = WLOCK_HASH(tb,hval);
- ix = hash_to_ix(tb, hval);
- bp = &BUCKET(tb, ix);
- b = *bp;
-
- ASSERT(!IS_FIXED(tb));
- ASSERT((tb->common.status & DB_BAG));
- ASSERT(!tb->common.compress);
-
- while(b != 0) {
- if (has_live_key(tb,b,key,hval)) {
- found = 1;
- if ((arityval(b->dbterm.tpl[0]) == 2) &&
- EQ(value, b->dbterm.tpl[2])) {
- *bp = b->next;
- free_term(tb, b);
- erts_smp_atomic_dec_nob(&tb->common.nitems);
- b = *bp;
- break;
- }
- } else if (found) {
- break;
- }
- bp = &b->next;
- b = b->next;
- }
- WUNLOCK_HASH(lck);
- if (found) {
- try_shrink(tb);
- }
- return DB_ERROR_NONE;
-}
-
-/*
** NB, this is for the db_erase/2 bif.
*/
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
@@ -1401,14 +1287,14 @@ trap:
}
-static int db_select_hash(Process *p, DbTable *tbl,
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse,
Eterm *ret)
{
- return db_select_chunk_hash(p, tbl, pattern, 0, reverse, ret);
+ return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret);
}
-static int db_select_chunk_hash(Process *p, DbTable *tbl,
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse, /* not used */
Eterm *ret)
@@ -1556,7 +1442,7 @@ done:
mpb = erts_db_make_match_prog_ref(p,(mpi.mp),&hp);
if (mpi.all_objects)
(mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
- continuation = TUPLE6(hp, tb->common.id,make_small(slot_ix),
+ continuation = TUPLE6(hp, tid, make_small(slot_ix),
make_small(chunk_size),
mpb, rest,
make_small(rest_size));
@@ -1580,7 +1466,7 @@ trap:
(mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
hp = HAlloc(p,7+ERTS_MAGIC_REF_THING_SIZE);
mpb = erts_db_make_match_prog_ref(p,(mpi.mp),&hp);
- continuation = TUPLE6(hp, tb->common.id, make_small(slot_ix),
+ continuation = TUPLE6(hp, tid, make_small(slot_ix),
make_small(chunk_size),
mpb, match_list,
make_small(got));
@@ -1594,7 +1480,8 @@ trap:
}
static int db_select_count_hash(Process *p,
- DbTable *tbl,
+ DbTable *tbl,
+ Eterm tid,
Eterm pattern,
Eterm *ret)
{
@@ -1700,7 +1587,7 @@ trap:
hp += BIG_UINT_HEAP_SIZE;
}
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
+ continuation = TUPLE4(hp, tid, make_small(slot_ix),
mpb,
egot);
mpi.mp = NULL; /*otherwise the return macro will destroy it */
@@ -1713,6 +1600,7 @@ trap:
static int db_select_delete_hash(Process *p,
DbTable *tbl,
+ Eterm tid,
Eterm pattern,
Eterm *ret)
{
@@ -1845,7 +1733,7 @@ trap:
hp += BIG_UINT_HEAP_SIZE;
}
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
+ continuation = TUPLE4(hp, tid, make_small(slot_ix),
mpb,
egot);
mpi.mp = NULL; /*otherwise the return macro will destroy it */
@@ -1959,7 +1847,7 @@ trap:
egot = uint_to_big(got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
+ continuation = TUPLE4(hp, tptr[1], make_small(slot_ix),
tptr[3],
egot);
RET_TO_BIF(bif_trap1(&ets_select_delete_continue_exp, p,
@@ -2050,7 +1938,7 @@ trap:
egot = uint_to_big(got, hp);
hp += BIG_UINT_HEAP_SIZE;
}
- continuation = TUPLE4(hp, tb->common.id, make_small(slot_ix),
+ continuation = TUPLE4(hp, tptr[1], make_small(slot_ix),
tptr[3],
egot);
RET_TO_BIF(bif_trap1(&ets_select_count_continue_exp, p,
@@ -2197,19 +2085,17 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
/* release all memory occupied by a single table */
static int db_free_table_hash(DbTable *tbl)
{
- while (!db_free_table_continue_hash(tbl))
+ while (db_free_table_continue_hash(tbl, ERTS_SWORD_MAX) < 0)
;
return 0;
}
-static int db_free_table_continue_hash(DbTable *tbl)
+static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
{
DbTableHash *tb = &tbl->hash;
- int done;
FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel);
- ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb));
+ ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE));
- done = 0;
while (fixdel != NULL) {
FixedDeletion *fx = fixdel;
@@ -2219,22 +2105,21 @@ static int db_free_table_continue_hash(DbTable *tbl)
(void *) fx,
sizeof(FixedDeletion));
ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
- if (++done >= 2*DELETE_RECORD_LIMIT) {
+ if (--reds < 0) {
erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel);
- return 0; /* Not done */
+ return reds; /* Not done */
}
}
erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL);
- done /= 2;
while(tb->nslots != 0) {
- done += 1 + EXT_SEGSZ/64 + free_seg(tb, 1);
+ reds -= EXT_SEGSZ/64 + free_seg(tb, 1);
/*
* If we have done enough work, get out here.
*/
- if (done >= DELETE_RECORD_LIMIT) {
- return 0; /* Not done */
+ if (reds < 0) {
+ return reds; /* Not done */
}
}
#ifdef ERTS_SMP
@@ -2249,7 +2134,7 @@ static int db_free_table_continue_hash(DbTable *tbl)
}
#endif
ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
- return 1; /* Done */
+ return reds; /* Done */
}
@@ -3007,63 +2892,4 @@ Eterm erts_ets_hash_sizeof_ext_segtab(void)
{
return make_small(((SIZEOF_EXT_SEGTAB(0)-1) / sizeof(UWord)) + 1);
}
-/* For testing only */
-Eterm erts_ets_hash_get_memstate(Process* p, DbTableHash* tb)
-{
- Eterm seg_cnt;
- while (!begin_resizing(tb))
- /*spinn*/;
-
- seg_cnt = make_small(SLOT_IX_TO_SEG_IX(tb->nslots));
- done_resizing(tb);
- return seg_cnt;
-}
-/* For testing only */
-Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate)
-{
- int seg_cnt, target;
-
- if (!is_small(memstate))
- return make_small(__LINE__);
-
- target = signed_val(memstate);
- if (target < 1)
- return make_small(__LINE__);
- while (1) {
- while (!begin_resizing(tb))
- /*spin*/;
- seg_cnt = SLOT_IX_TO_SEG_IX(tb->nslots);
- done_resizing(tb);
-
- if (target == seg_cnt)
- return am_ok;
- if (IS_FIXED(tb))
- return make_small(__LINE__);
- if (target < seg_cnt)
- shrink(tb, 0);
- else
- grow(tb, INT_MAX);
- }
-}
-#ifdef HARDDEBUG
-
-void db_check_table_hash(DbTable *tbl)
-{
- DbTableHash *tb = &tbl->hash;
- HashDbTerm* list;
- int j;
-
- for (j = 0; j < NACTIVE(tb); j++) {
- if ((list = BUCKET(tb,j)) != 0) {
- while (list != 0) {
- if (!is_tuple(make_tuple(list->dbterm.tpl))) {
- erts_exit(ERTS_ERROR_EXIT, "Bad term in slot %d of ets table", j);
- }
- list = list->next;
- }
- }
- }
-}
-
-#endif
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 6d25c73549..c340c72311 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -75,7 +75,7 @@ typedef struct db_table_hash {
** table types. The process is always an [in out] parameter.
*/
void db_initialize_hash(void);
-void db_unfix_table_hash(DbTableHash *tb /* [in out] */);
+SWord db_unfix_table_hash(DbTableHash *tb);
Uint db_kept_items_hash(DbTableHash *tb);
/* Interface for meta pid table */
@@ -88,14 +88,6 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret);
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret);
-int db_get_element_array(DbTable *tbl,
- Eterm key,
- int ndex,
- Eterm *ret,
- int *num_ret);
-
-int db_erase_bag_exact2(DbTable *tbl, Eterm key, Eterm value);
-
/* not yet in method table */
int db_mark_all_deleted_hash(DbTable *tbl);
@@ -110,7 +102,5 @@ typedef struct {
void db_calc_stats_hash(DbTableHash* tb, DbHashStats*);
Eterm erts_ets_hash_sizeof_ext_segtab(void);
-Eterm erts_ets_hash_get_memstate(Process*, DbTableHash* tb);
-Eterm erts_ets_hash_restore_memstate(DbTableHash* tb, Eterm memstate);
#endif /* _DB_HASH_H */
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index c4ecd2ba37..14a7451267 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -180,7 +180,6 @@ static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old,
static TreeDbTerm *traverse_until(TreeDbTerm *t, int *current, int to);
static void check_slot_pos(DbTableTree *tb);
static void check_saved_stack(DbTableTree *tb);
-static int check_table_tree(DbTableTree* tb, TreeDbTerm *t);
#define TREE_DEBUG
#endif
@@ -283,7 +282,7 @@ struct select_delete_context {
static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key);
static TreeDbTerm *linkout_object_tree(DbTableTree *tb,
Eterm object);
-static int do_free_tree_cont(DbTableTree *tb, int num_left);
+static SWord do_free_tree_continue(DbTableTree *tb, SWord reds);
static void free_term(DbTableTree *tb, TreeDbTerm* p);
static int balance_left(TreeDbTerm **this);
static int balance_right(TreeDbTerm **this);
@@ -369,18 +368,18 @@ static int db_erase_tree(DbTable *tbl, Eterm key, Eterm *ret);
static int db_erase_object_tree(DbTable *tbl, Eterm object,Eterm *ret);
static int db_slot_tree(Process *p, DbTable *tbl,
Eterm slot_term, Eterm *ret);
-static int db_select_tree(Process *p, DbTable *tbl,
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reversed, Eterm *ret);
-static int db_select_count_tree(Process *p, DbTable *tbl,
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
-static int db_select_chunk_tree(Process *p, DbTable *tbl,
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reversed, Eterm *ret);
static int db_select_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
static int db_select_count_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
-static int db_select_delete_tree(Process *p, DbTable *tbl,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret);
static int db_select_delete_continue_tree(Process *p, DbTable *tbl,
Eterm continuation, Eterm *ret);
@@ -389,7 +388,7 @@ static void db_print_tree(fmtfn_t to, void *to_arg,
int show, DbTable *tbl);
static int db_free_table_tree(DbTable *tbl);
-static int db_free_table_continue_tree(DbTable *tbl);
+static SWord db_free_table_continue_tree(DbTable *tbl, SWord);
static void db_foreach_offheap_tree(DbTable *,
void (*)(ErlOffHeap *, void *),
@@ -442,11 +441,6 @@ DbTableMethod db_tree =
db_free_table_continue_tree,
db_print_tree,
db_foreach_offheap_tree,
-#ifdef HARDDEBUG
- db_check_table_tree,
-#else
- NULL,
-#endif
db_lookup_dbterm_tree,
db_finalize_dbterm_tree
@@ -1058,7 +1052,7 @@ static int db_select_continue_tree(Process *p,
}
-static int db_select_tree(Process *p, DbTable *tbl,
+static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, int reverse, Eterm *ret)
{
/* Strategy: Traverse backwards to build resulting list from tail to head */
@@ -1151,7 +1145,7 @@ static int db_select_tree(Process *p, DbTable *tbl,
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
make_small(0), /* Chunk size of zero means not chunked to the
@@ -1263,7 +1257,7 @@ static int db_select_count_continue_tree(Process *p,
}
-static int db_select_count_tree(Process *p, DbTable *tbl,
+static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1349,7 +1343,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
continuation = TUPLE5
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
mpb,
@@ -1363,7 +1357,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl,
}
-static int db_select_chunk_tree(Process *p, DbTable *tbl,
+static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Sint chunk_size,
int reverse,
Eterm *ret)
@@ -1474,7 +1468,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program,
needn't be copied */
@@ -1499,7 +1493,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl,
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
make_small(chunk_size),
@@ -1605,7 +1599,7 @@ static int db_select_delete_continue_tree(Process *p,
#undef RET_TO_BIF
}
-static int db_select_delete_tree(Process *p, DbTable *tbl,
+static int db_select_delete_tree(Process *p, DbTable *tbl, Eterm tid,
Eterm pattern, Eterm *ret)
{
DbTableTree *tb = &tbl->tree;
@@ -1691,7 +1685,7 @@ static int db_select_delete_tree(Process *p, DbTable *tbl,
continuation = TUPLE5
(hp,
- tb->common.id,
+ tid,
key,
sc.end_condition, /* From the match program, needn't be copied */
mpb,
@@ -1757,23 +1751,22 @@ static void db_print_tree(fmtfn_t to, void *to_arg,
/* release all memory occupied by a single table */
static int db_free_table_tree(DbTable *tbl)
{
- while (!db_free_table_continue_tree(tbl))
+ while (db_free_table_continue_tree(tbl, ERTS_SWORD_MAX) < 0)
;
return 1;
}
-static int db_free_table_continue_tree(DbTable *tbl)
+static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds)
{
DbTableTree *tb = &tbl->tree;
- int result;
if (!tb->deletion) {
tb->static_stack.pos = 0;
tb->deletion = 1;
PUSH_NODE(&tb->static_stack, tb->root);
}
- result = do_free_tree_cont(tb, DELETE_RECORD_LIMIT);
- if (result) { /* Completely done. */
+ reds = do_free_tree_continue(tb, reds);
+ if (reds >= 0) { /* Completely done. */
erts_db_free(ERTS_ALC_T_DB_STK,
(DbTable *) tb,
(void *) tb->static_stack.array,
@@ -1781,7 +1774,7 @@ static int db_free_table_continue_tree(DbTable *tbl)
ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size)
== sizeof(DbTable));
}
- return result;
+ return reds;
}
static int db_delete_all_objects_tree(Process* p, DbTable* tbl)
@@ -2064,7 +2057,7 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
return DB_ERROR_NONE;
}
-static int do_free_tree_cont(DbTableTree *tb, int num_left)
+static SWord do_free_tree_continue(DbTableTree *tb, SWord reds)
{
TreeDbTerm *root;
TreeDbTerm *p;
@@ -2083,15 +2076,14 @@ static int do_free_tree_cont(DbTableTree *tb, int num_left)
root = p;
} else {
free_term(tb, root);
- if (--num_left > 0) {
- break;
- } else {
- return 0; /* Done enough for now */
- }
+ if (--reds < 0) {
+ return reds; /* Done enough for now */
+ }
+ break;
}
}
}
- return 1;
+ return reds;
}
/*
@@ -3130,6 +3122,9 @@ static void do_dump_tree2(DbTableTree* tb, int to, void *to_arg, int show,
#ifdef HARDDEBUG
+/*
+ * No called, but kept as it might come to use
+ */
void db_check_table_tree(DbTable *tbl)
{
DbTableTree *tb = &tbl->tree;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 471fefe3cb..72298842d7 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -75,9 +75,6 @@ typedef struct db_term {
*/
} DbTerm;
-union db_table;
-typedef union db_table DbTable;
-
#define DB_MUST_RESIZE 1
#define DB_NEW_OBJECT 2
#define DB_INC_TRY_GROW 4
@@ -138,30 +135,34 @@ typedef struct db_table_method
Eterm slot,
Eterm* ret);
int (*db_select_chunk)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Sint chunk_size,
int reverse,
Eterm* ret);
int (*db_select)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
int reverse,
Eterm* ret);
int (*db_select_delete)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Eterm* ret);
int (*db_select_continue)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
int (*db_select_delete_continue)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
Eterm continuation,
Eterm* ret);
int (*db_select_count)(Process* p,
- DbTable* tb, /* [in out] */
+ DbTable* tb, /* [in out] */
+ Eterm tid,
Eterm pattern,
Eterm* ret);
int (*db_select_count_continue)(Process* p,
@@ -174,7 +175,7 @@ typedef struct db_table_method
DbTable* db /* [in out] */ );
int (*db_free_table)(DbTable* db /* [in out] */ );
- int (*db_free_table_continue)(DbTable* db); /* [in out] */
+ SWord (*db_free_table_continue)(DbTable* db, SWord reds);
void (*db_print)(fmtfn_t to,
void* to_arg,
@@ -184,7 +185,6 @@ typedef struct db_table_method
void (*db_foreach_offheap)(DbTable* db, /* [in out] */
void (*func)(ErlOffHeap *, void *),
void *arg);
- void (*db_check_table)(DbTable* tb);
/* Lookup a dbterm for updating. Return false if not found. */
int (*db_lookup_dbterm)(Process *, DbTable *, Eterm key, Eterm obj,
@@ -198,11 +198,27 @@ typedef struct db_table_method
} DbTableMethod;
typedef struct db_fixation {
- Eterm pid;
+ /* Node in fixed_tabs list */
+ struct {
+ struct db_fixation *next, *prev;
+ Binary* btid;
+ } tabs;
+
+ /* Node in fixing_procs tree */
+ struct {
+ struct db_fixation *left, *right, *parent;
+ int is_red;
+ Process* p;
+ } procs;
+
Uint counter;
- struct db_fixation *next;
} DbFixation;
+typedef struct {
+ DbTable *next;
+ DbTable *prev;
+} DbTableList;
+
/*
* This structure contains data for all different types of database
* tables. Note that these fields must match the same fields
@@ -212,10 +228,13 @@ typedef struct db_fixation {
*/
typedef struct db_table_common {
- erts_smp_refc_t ref; /* fixation counter */
+ erts_smp_refc_t refc; /* reference count of table struct */
+ erts_smp_refc_t fix_count;/* fixation counter */
+ DbTableList all;
+ DbTableList owned;
#ifdef ERTS_SMP
erts_smp_rwmtx_t rwlock; /* rw lock on table */
- erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */
+ erts_smp_mtx_t fixlock; /* Protects fixing_procs and time */
int is_thread_safe; /* No fine locking inside table needed */
Uint32 type; /* table type, *read only* after creation */
#endif
@@ -224,7 +243,7 @@ typedef struct db_table_common {
UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
Uint64 heir_started_interval; /* To further identify the heir */
Eterm the_name; /* an atom */
- Eterm id; /* atom | integer */
+ Binary *btid;
DbTableMethod* meth; /* table methods */
erts_smp_atomic_t nitems; /* Total number of items in table */
erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
@@ -232,36 +251,35 @@ typedef struct db_table_common {
ErtsMonotonicTime monotonic;
ErtsMonotonicTime offset;
} time;
- DbFixation* fixations; /* List of processes who have done safe_fixtable,
+ DbFixation* fixing_procs; /* Tree of processes who have done safe_fixtable,
"local" fixations not included. */
/* All 32-bit fields */
Uint32 status; /* bit masks defined below */
- int slot; /* slot index in meta_main_tab */
int keypos; /* defaults to 1 */
int compress;
} DbTableCommon;
/* These are status bit patterns */
-#define DB_NORMAL (1 << 0)
-#define DB_PRIVATE (1 << 1)
-#define DB_PROTECTED (1 << 2)
-#define DB_PUBLIC (1 << 3)
-#define DB_BAG (1 << 4)
-#define DB_SET (1 << 5)
-/*#define DB_LHASH (1 << 6)*/
-#define DB_FINE_LOCKED (1 << 7) /* fine grained locking enabled */
-#define DB_DUPLICATE_BAG (1 << 8)
-#define DB_ORDERED_SET (1 << 9)
-#define DB_DELETE (1 << 10) /* table is being deleted */
-#define DB_FREQ_READ (1 << 11)
-
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET|DB_FINE_LOCKED|DB_FREQ_READ)
+#define DB_PRIVATE (1 << 0)
+#define DB_PROTECTED (1 << 1)
+#define DB_PUBLIC (1 << 2)
+#define DB_DELETE (1 << 3) /* table is being deleted */
+#define DB_SET (1 << 4)
+#define DB_BAG (1 << 5)
+#define DB_DUPLICATE_BAG (1 << 6)
+#define DB_ORDERED_SET (1 << 7)
+#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */
+#define DB_FREQ_READ (1 << 9) /* read_concurrency */
+#define DB_NAMED_TABLE (1 << 10)
+
+#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET\
+ |DB_FINE_LOCKED|DB_FREQ_READ|DB_NAMED_TABLE)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
#define IS_TREE_TABLE(Status) (!!((Status) & \
DB_ORDERED_SET))
-#define NFIXED(T) (erts_smp_refc_read(&(T)->common.ref,0))
+#define NFIXED(T) (erts_smp_refc_read(&(T)->common.fix_count,0))
#define IS_FIXED(T) (NFIXED(T) != 0)
/*
@@ -506,14 +524,5 @@ erts_db_get_match_prog_binary(Eterm term)
#define Binary2MatchProg(BP) \
(ASSERT(IsMatchProgBinary((BP))), \
((MatchProg *) ERTS_MAGIC_BIN_DATA((BP))))
-/*
-** Debugging
-*/
-#ifdef HARDDEBUG
-void db_check_tables(void); /* in db.c */
-#define CHECK_TABLES() db_check_tables()
-#else
-#define CHECK_TABLES()
-#endif
#endif /* _DB_UTIL_H */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index b2c307e826..61477af316 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -2307,6 +2307,7 @@ erl_start(int argc, char **argv)
#endif
set_main_stack_size();
erts_sched_init_time_sup(esdp);
+ erts_ets_sched_spec_data_init(esdp);
process_main(esdp->x_reg_array, esdp->f_reg_array);
}
#endif
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 6ff9aea5ab..da73469516 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -100,14 +100,12 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry_links", "address" },
{ "code_write_permission", NULL },
{ "purge_state", NULL },
+ { "meta_name_tab", "address" },
+ { "db_tab", "address" },
{ "proc_status", "pid" },
{ "proc_trace", "pid" },
{ "ports_snapshot", NULL },
- { "meta_name_tab", "address" },
- { "meta_main_tab_slot", "address" },
- { "db_tab", "address" },
{ "db_tab_fix", "address" },
- { "meta_main_tab_main", NULL },
{ "db_hash_slot", "address" },
{ "node_table", NULL },
{ "dist_table", NULL },
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index e2072fe30f..89b627aaf5 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -850,14 +850,15 @@ static Eterm AM_timer;
static Eterm AM_delayed_delete_timer;
static void setup_reference_table(void);
-static Eterm reference_table_term(Uint **hpp, Uint *szp);
+static Eterm reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp);
static void delete_reference_table(void);
-#if BIG_UINT_HEAP_SIZE > 3 /* 2-tuple */
-#define ID_HEAP_SIZE BIG_UINT_HEAP_SIZE
-#else
-#define ID_HEAP_SIZE 3 /* 2-tuple */
-#endif
+#undef ERTS_MAX__
+#define ERTS_MAX__(A, B) ((A) > (B) ? (A) : (B))
+
+#define ID_HEAP_SIZE \
+ ERTS_MAX__(ERTS_MAGIC_REF_THING_SIZE, \
+ ERTS_MAX__(BIG_UINT_HEAP_SIZE, 3))
typedef struct node_referrer_ {
struct node_referrer_ *next;
@@ -870,6 +871,7 @@ typedef struct node_referrer_ {
int system_ref;
Eterm id;
Uint id_heap[ID_HEAP_SIZE];
+ ErlOffHeap off_heap;
} NodeReferrer;
typedef struct {
@@ -942,7 +944,7 @@ erts_get_node_and_dist_references(struct process *proc)
/* Get term size */
size = 0;
- (void) reference_table_term(NULL, &size);
+ (void) reference_table_term(NULL, NULL, &size);
hp = HAlloc(proc, size);
#ifdef DEBUG
@@ -951,7 +953,7 @@ erts_get_node_and_dist_references(struct process *proc)
#endif
/* Write term */
- res = reference_table_term(&hp, NULL);
+ res = reference_table_term(&hp, &proc->off_heap, NULL);
ASSERT(endp == hp);
@@ -1048,13 +1050,14 @@ insert_node_referrer(ReferredNode *referred_node, int type, Eterm id)
nrp = (NodeReferrer *) erts_alloc(ERTS_ALC_T_NC_TMP,
sizeof(NodeReferrer));
nrp->next = referred_node->referrers;
+ ERTS_INIT_OFF_HEAP(&nrp->off_heap);
referred_node->referrers = nrp;
if(IS_CONST(id))
nrp->id = id;
else {
Uint *hp = &nrp->id_heap[0];
- ASSERT(is_big(id) || is_tuple(id));
- nrp->id = copy_struct(id, size_object(id), &hp, NULL);
+ ASSERT(is_big(id) || is_tuple(id) || is_internal_magic_ref(id));
+ nrp->id = copy_struct(id, size_object(id), &hp, &nrp->off_heap);
}
nrp->heap_ref = 0;
nrp->link_ref = 0;
@@ -1211,10 +1214,20 @@ insert_links2(ErtsLink *lnk, Eterm id)
static void
insert_ets_table(DbTable *tab, void *unused)
{
+ ErlOffHeap off_heap;
+ Eterm heap[ERTS_MAGIC_REF_THING_SIZE];
struct insert_offheap2_arg a;
a.type = ETS_REF;
- a.id = tab->common.id;
+ if (tab->common.status & DB_NAMED_TABLE)
+ a.id = tab->common.the_name;
+ else {
+ Eterm *hp = &heap[0];
+ ERTS_INIT_OFF_HEAP(&off_heap);
+ a.id = erts_mk_magic_ref(&hp, &off_heap, tab->common.btid);
+ }
erts_db_foreach_offheap(tab, insert_offheap2, (void *) &a);
+ if (is_not_atom(a.id))
+ erts_cleanup_offheap(&off_heap);
}
static void
@@ -1518,7 +1531,7 @@ setup_reference_table(void)
*/
static Eterm
-reference_table_term(Uint **hpp, Uint *szp)
+reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp)
{
#undef MK_2TUP
#undef MK_3TUP
@@ -1573,12 +1586,11 @@ reference_table_term(Uint **hpp, Uint *szp)
nrid = nrp->id;
if (!IS_CONST(nrp->id)) {
-
Uint nrid_sz = size_object(nrp->id);
if (szp)
*szp += nrid_sz;
if (hpp)
- nrid = copy_struct(nrp->id, nrid_sz, hpp, NULL);
+ nrid = copy_struct(nrp->id, nrid_sz, hpp, ohp);
}
if (is_internal_pid(nrid) || nrid == am_error_logger) {
@@ -1713,6 +1725,7 @@ delete_reference_table(void)
NodeReferrer *tnrp;
nrp = referred_nodes[i].referrers;
while(nrp) {
+ erts_cleanup_offheap(&nrp->off_heap);
tnrp = nrp;
nrp = nrp->next;
erts_free(ERTS_ALC_T_NC_TMP, (void *) tnrp);
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 52d9f9ddf7..88fae30845 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -449,7 +449,8 @@ typedef enum {
ERTS_PSTT_CPC, /* Check Process Code */
ERTS_PSTT_CLA, /* Copy Literal Area */
ERTS_PSTT_COHMQ, /* Change off heap message queue */
- ERTS_PSTT_FTMQ /* Flush trace msg queue */
+ ERTS_PSTT_FTMQ, /* Flush trace msg queue */
+ ERTS_PSTT_ETS_FREE_FIXATION
} ErtsProcSysTaskType;
#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
@@ -602,6 +603,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
#endif
valid |= ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+ valid |= ERTS_SSI_AUX_WORK_YIELD;
if (~valid & value)
erts_exit(ERTS_ABORT_EXIT,
@@ -690,6 +692,8 @@ erts_pre_init_process(void)
= "SET_TMO";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
= "MSEG_CACHE_CHECK";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_YIELD_IX]
+ = "YIELD";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_REAP_PORTS_IX]
= "REAP_PORTS";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX]
@@ -726,6 +730,16 @@ erts_pre_init_process(void)
= ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
= ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].get_locks
+ = ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ETS_OWNED_TABLES].set_locks
+ = ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].get_locks
+ = ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].set_locks
+ = ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS;
#endif
}
@@ -2557,6 +2571,48 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
return aux_work & ~ERTS_SSI_AUX_WORK_REAP_PORTS;
}
+void
+erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp)
+{
+ ASSERT(esdp == erts_get_scheduler_data());
+ /* Always called by the scheduler itself... */
+ set_aux_work_flags_wakeup_nob(esdp->ssi, ERTS_SSI_AUX_WORK_YIELD);
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_yield(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ int yield = 0;
+ /*
+ * Yield operations are always requested by the scheduler itself.
+ *
+ * The following handlers should *not* set the ERTS_SSI_AUX_WORK_YIELD
+ * flag in order to indicate more work. They should instead return
+ * information so this "main handler" can manipulate the flag...
+ *
+ * The following handlers should be able to handle being called
+ * even though no work is to be done...
+ */
+
+ /* Various yielding operations... */
+
+ yield |= erts_handle_yielded_ets_all_request(awdp->esdp,
+ &awdp->yield.ets_all);
+
+ /*
+ * Other yielding operations...
+ *
+ */
+
+ if (!yield) {
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_YIELD);
+ return aux_work & ~ERTS_SSI_AUX_WORK_YIELD;
+ }
+
+ return aux_work;
+}
+
+
#if HAVE_ERTS_MSEG
static ERTS_INLINE erts_aint32_t
@@ -2697,6 +2753,9 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
handle_mseg_cache_check);
#endif
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_YIELD,
+ handle_yield);
+
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
handle_reap_ports);
@@ -8727,6 +8786,8 @@ sched_thread_func(void *vesdp)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
#endif
+ erts_ets_sched_spec_data_init(esdp);
+
process_main(esdp->x_reg_array, esdp->f_reg_array);
/* No schedulers should *ever* terminate */
@@ -11148,6 +11209,12 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
st_res = am_true;
break;
#endif
+#ifdef ERTS_SMP
+ case ERTS_PSTT_ETS_FREE_FIXATION:
+ reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]);
+ st_res = am_true;
+ break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -11199,7 +11266,8 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
case ERTS_PSTT_GC_MAJOR:
case ERTS_PSTT_GC_MINOR:
case ERTS_PSTT_CPC:
- case ERTS_PSTT_COHMQ:
+ case ERTS_PSTT_COHMQ:
+ case ERTS_PSTT_ETS_FREE_FIXATION:
st_res = am_false;
break;
case ERTS_PSTT_CLA:
@@ -11553,13 +11621,12 @@ erts_internal_request_system_task_4(BIF_ALIST_4)
}
static void
-erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
+erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg)
{
Process *rp = erts_proc_lookup(pid);
if (rp) {
ErtsProcSysTask *st;
erts_aint32_t state, fail_state;
- int i;
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
ERTS_PROC_SYS_TASK_SIZE(0));
@@ -11568,8 +11635,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
st->reply_tag = NIL;
st->req_id = NIL;
st->req_id_sz = 0;
- for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++)
- st->arg[i] = NIL;
+ st->arg[0] = (Eterm)arg;
ERTS_INIT_OFF_HEAP(&st->off_heap);
state = erts_smp_atomic32_read_nob(&rp->state);
@@ -11585,7 +11651,13 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
void
erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
{
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ);
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ, NULL);
+}
+
+void
+erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix)
+{
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION, fix);
}
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -11633,7 +11705,7 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
dhndl = erts_thr_progress_unmanaged_delay();
#endif
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ);
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, NULL);
#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8bf372dad5..baf830615d 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -63,6 +63,9 @@ typedef struct process Process;
#define ERTS_ONLY_INCLUDE_TRACE_FLAGS
#include "erl_trace.h"
#undef ERTS_ONLY_INCLUDE_TRACE_FLAGS
+#define ERTS_ONLY_SCHED_SPEC_ETS_DATA
+#include "erl_db.h"
+#undef ERTS_ONLY_SCHED_SPEC_ETS_DATA
#ifdef HIPE
#include "hipe_process.h"
@@ -312,6 +315,7 @@ typedef enum {
ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX,
ERTS_SSI_AUX_WORK_SET_TMO_IX,
ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX,
+ ERTS_SSI_AUX_WORK_YIELD_IX,
ERTS_SSI_AUX_WORK_REAP_PORTS_IX,
ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX, /* SHOULD be last flag index */
@@ -348,6 +352,8 @@ typedef enum {
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX)
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX)
+#define ERTS_SSI_AUX_WORK_YIELD \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_YIELD_IX)
#define ERTS_SSI_AUX_WORK_REAP_PORTS \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_REAP_PORTS_IX)
#define ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED \
@@ -613,6 +619,10 @@ typedef struct {
} delayed_wakeup;
#endif
struct {
+ ErtsEtsAllYieldData ets_all;
+ /* Other yielding operations... */
+ } yield;
+ struct {
struct {
erts_aint32_t flags;
void (*callback)(void *);
@@ -621,6 +631,10 @@ typedef struct {
} debug;
} ErtsAuxWorkData;
+#define ERTS_SCHED_AUX_YIELD_DATA(ESDP, NAME) \
+ (&(ESDP)->aux_work_data.yield.NAME)
+void erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp);
+
#ifdef ERTS_DIRTY_SCHEDULERS
typedef enum {
ERTS_DIRTY_CPU_SCHEDULER,
@@ -688,7 +702,7 @@ struct ErtsSchedulerData_ {
ErtsSchedWallTime sched_wall_time;
ErtsGCInfo gc_info;
ErtsPortTaskHandle nosuspend_port_task_handle;
-
+ ErtsEtsTables ets_tables;
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_alloc_verify_func_t verify_unused_temp_alloc;
Allctr_t *verify_unused_temp_alloc_data;
@@ -822,14 +836,16 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_CALL_TIME_BP 3
#define ERTS_PSD_DELAYED_GC_TASK_QS 4
#define ERTS_PSD_NIF_TRAP_EXPORT 5
-#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6
+#define ERTS_PSD_ETS_OWNED_TABLES 6
+#define ERTS_PSD_ETS_FIXED_TABLES 7
+#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 8
-#define ERTS_PSD_SIZE 7
+#define ERTS_PSD_SIZE 9
#if !defined(HIPE)
# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
# undef ERTS_PSD_SIZE
-# define ERTS_PSD_SIZE 6
+# define ERTS_PSD_SIZE 8
#endif
typedef struct {
@@ -857,6 +873,12 @@ typedef struct {
#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_ETS_OWNED_TABLES_GET_LOCKS ERTS_PROC_LOCK_STATUS
+#define ERTS_PSD_ETS_OWNED_TABLES_SET_LOCKS ERTS_PROC_LOCK_STATUS
+
+#define ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
typedef struct {
ErtsProcLocks get_locks;
ErtsProcLocks set_locks;
@@ -1782,6 +1804,8 @@ void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *),
ErtsThrPrgrLaterOp *,
UWord);
void erts_schedule_complete_off_heap_message_queue_change(Eterm pid);
+struct db_fixation;
+void erts_schedule_ets_free_fixation(Eterm pid, struct db_fixation*);
void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc);
int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks);
diff --git a/erts/emulator/beam/erl_rbtree.h b/erts/emulator/beam/erl_rbtree.h
index 5fefaea978..6a42853957 100644
--- a/erts/emulator/beam/erl_rbtree.h
+++ b/erts/emulator/beam/erl_rbtree.h
@@ -105,7 +105,10 @@
* <ERTS_RBT_PREFIX>_rbt_yield_state_t.
*
* The yield state should be statically initialized by
- * ERTS_RBT_YIELD_STAT_INITER.
+ * ERTS_RBT_YIELD_STAT_INITER
+ *
+ * or dynamically initialized with
+ * ERTS_RBT_YIELD_STAT_INIT(<ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate)
*
*
* The following API functions are implemented if corresponding
@@ -178,8 +181,8 @@
* Operate by calling the operator 'op' on each element.
* Order is undefined.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -195,8 +198,8 @@
* Order is undefined. Each element should be destroyed
* by 'op'.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed.
*
* 'arg' is passed as argument to 'op'.
@@ -228,8 +231,8 @@
* Operate by calling the operator 'op' on each element from
* smallest towards larger elements.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -244,8 +247,8 @@
* Operate by calling the operator 'op' on each element from
* largest towards smaller elements.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -296,8 +299,8 @@
* Note that elements are often destroyed in another order
* than the order that the elements are operated on.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -318,8 +321,8 @@
* Note that elements are often destroyed in another order
* than the order that the elements are operated on.
*
- * Yield when 'ylimit' elements has been processed. Zero is
- * returned when yielding, and a non-zero value is returned when
+ * Yield when 'ylimit' elements has been processed. True is
+ * returned when yielding, and false is returned when
* the whole tree has been processed. The tree should not be
* modified until all of it has been processed.
*
@@ -422,6 +425,13 @@
#ifndef ERTS_RBT_YIELD_STAT_INITER
# define ERTS_RBT_YIELD_STAT_INITER {NULL, 0}
#endif
+#ifndef ERTS_RBT_YIELD_STAT_INIT
+# define ERTS_RBT_YIELD_STAT_INIT(YS) \
+ do { \
+ (YS)->x = NULL; \
+ (YS)->up = 0; \
+ } while (0)
+#endif
#define ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y) \
X ## Y
@@ -476,12 +486,12 @@ typedef struct {
#if defined(ERTS_RBT_HARD_DEBUG) \
&& (defined(ERTS_RBT_WANT_DELETE) \
|| defined(ERTS_RBT_NEED_INSERT__))
-static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root);
+static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *node);
# define ERTS_RBT_NEED_HDBG_CHECK_TREE__
-# define ERTS_RBT_HDBG_CHECK_TREE__(R) \
- ERTS_RBT_FUNC__(hdbg_check_tree)((R))
+# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) \
+ ERTS_RBT_FUNC__(hdbg_check_tree)((R),(N))
#else
-# define ERTS_RBT_HDBG_CHECK_TREE__(R) ((void) 1)
+# define ERTS_RBT_HDBG_CHECK_TREE__(R,N) ((void) 1)
#endif
#ifdef ERTS_RBT_NEED_ROTATE__
@@ -634,7 +644,7 @@ ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n)
ERTS_RBT_T null_x; /* null_x is used to get the fixup started when we
splice out a node without children. */
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
ERTS_RBT_INIT_EMPTY_TNODE(&null_x);
@@ -852,7 +862,7 @@ ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n)
}
}
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
}
@@ -982,7 +992,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
{
ERTS_RBT_KEY_T kn = ERTS_RBT_GET_KEY(n);
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
ERTS_RBT_INIT_EMPTY_TNODE(n);
@@ -1004,7 +1014,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
if (lookup && ERTS_RBT_IS_EQ(kn, kx)) {
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, NULL);
return x;
}
@@ -1038,7 +1048,7 @@ ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
ERTS_RBT_FUNC__(insert_fixup__)(root, n);
}
- ERTS_RBT_HDBG_CHECK_TREE__(*root);
+ ERTS_RBT_HDBG_CHECK_TREE__(*root, n);
return NULL;
}
@@ -1364,7 +1374,7 @@ ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
ystate->x = NULL;
ystate->up = 0;
}
- return 1; /* Done */
+ return 0; /* Done */
}
x = p;
}
@@ -1579,15 +1589,17 @@ ERTS_RBT_FUNC__(debug_print)(FILE *filep, ERTS_RBT_T *x, int indent,
#ifdef ERTS_RBT_NEED_HDBG_CHECK_TREE__
static void
-ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
+ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root, ERTS_RBT_T *n)
{
int black_depth = -1, no_black = 0;
ERTS_RBT_T *c, *p, *x = root;
ERTS_RBT_KEY_T kx;
ERTS_RBT_KEY_T kc;
- if (!x)
+ if (!x) {
+ ERTS_RBT_ASSERT(!n);
return;
+ }
ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x));
@@ -1597,6 +1609,9 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
while (1) {
+ if (x == n)
+ n = NULL;
+
if (ERTS_RBT_IS_BLACK(x))
no_black++;
else {
@@ -1668,6 +1683,7 @@ ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
if (!p) {
ERTS_RBT_ASSERT(root == x);
ERTS_RBT_ASSERT(no_black == 0);
+ ERTS_RBT_ASSERT(!n);
return; /* Done */
}
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 149d30d299..5d6f36b222 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 2b0c9ff2af..888d2beee0 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -129,7 +129,7 @@
-export([list_to_atom/1, list_to_binary/1]).
-export([list_to_bitstring/1, list_to_existing_atom/1, list_to_float/1]).
-export([list_to_integer/1, list_to_integer/2]).
--export([list_to_pid/1, list_to_tuple/1, loaded/0]).
+-export([list_to_pid/1, list_to_ref/1, list_to_tuple/1, loaded/0]).
-export([localtime/0, make_ref/0]).
-export([map_size/1, match_spec_test/3, md5/1, md5_final/1]).
-export([md5_init/0, md5_update/2, module_loaded/1, monitor/2]).
@@ -1159,6 +1159,12 @@ list_to_integer(_String,_Base) ->
String :: string().
list_to_pid(_String) ->
erlang:nif_error(undefined).
+
+%% list_to_ref/1
+-spec erlang:list_to_ref(String) -> reference() when
+ String :: string().
+list_to_ref(_String) ->
+ erlang:nif_error(undefined).
%% list_to_tuple/1
-spec list_to_tuple(List) -> tuple() when