aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c11
-rw-r--r--erts/emulator/beam/dist.c2
-rw-r--r--erts/emulator/beam/erl_alloc.c4
-rw-r--r--erts/emulator/beam/erl_alloc.types2
-rw-r--r--erts/emulator/beam/erl_bif_info.c29
-rw-r--r--erts/emulator/beam/erl_binary.h18
-rw-r--r--erts/emulator/beam/erl_bits.c8
-rw-r--r--erts/emulator/beam/erl_gc.c32
-rw-r--r--erts/emulator/beam/erl_hl_timer.c413
-rw-r--r--erts/emulator/beam/erl_hl_timer.h11
-rw-r--r--erts/emulator/beam/erl_init.c39
-rw-r--r--erts/emulator/beam/erl_map.c443
-rw-r--r--erts/emulator/beam/erl_nif.c26
-rw-r--r--erts/emulator/beam/erl_node_tables.c214
-rw-r--r--erts/emulator/beam/erl_node_tables.h16
-rw-r--r--erts/emulator/beam/erl_process.c242
-rw-r--r--erts/emulator/beam/erl_process.h102
-rw-r--r--erts/emulator/beam/erl_term.c4
-rw-r--r--erts/emulator/beam/erl_term.h4
-rw-r--r--erts/emulator/beam/erl_time.h8
-rw-r--r--erts/emulator/beam/global.h98
-rw-r--r--erts/emulator/beam/time.c38
23 files changed, 1348 insertions, 417 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 5ec1409adf..74b42c647e 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -350,6 +350,7 @@ atom message
atom message_binary
atom message_queue_len
atom messages
+atom merge_trap
atom meta
atom meta_match_spec
atom micro_seconds
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 500a98195b..c769428266 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -33,6 +33,7 @@
#include "beam_catches.h"
#include "erl_binary.h"
#include "erl_nif.h"
+#include "erl_bits.h"
#include "erl_thr_progress.h"
static void set_default_trace_pattern(Eterm module);
@@ -937,7 +938,15 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
break;
case TAG_PRIMARY_HEADER:
if (!header_is_transparent(val)) {
- Eterm* new_p = p + thing_arityval(val);
+ Eterm* new_p;
+ if (header_is_bin_matchstate(val)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) p;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (in_area(EXPAND_POINTER(mb->orig), mod_start, mod_size)) {
+ return 1;
+ }
+ }
+ new_p = p + thing_arityval(val);
ASSERT(start <= new_p && new_p < end);
p = new_p;
}
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 142fcb3c00..cfdede793c 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -2522,7 +2522,7 @@ info_dist_entry(int to, void *arg, DistEntry *dep, int visible, int connected)
erts_print(to, arg, "Name: %T", dep->sysname);
#ifdef DEBUG
- erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 1));
+ erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 0));
#endif
erts_print(to, arg, "\n");
if (!connected && is_nil(dep->cid)) {
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index dcae5509ec..d11f24220a 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -581,8 +581,10 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
= erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_ABIF_TIMER)]
= erts_timer_type_size(ERTS_ALC_T_ABIF_TIMER);
+#endif
#ifdef HARD_DEBUG
hdbg_init();
@@ -2343,10 +2345,12 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
&size.processes_used,
fi,
ERTS_ALC_T_BIF_TIMER);
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
add_fix_values(&size.processes,
&size.processes_used,
fi,
ERTS_ALC_T_ABIF_TIMER);
+#endif
}
if (want.atom || want.atom_used) {
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 57c506458c..2721e13250 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -167,7 +167,7 @@ type TIMER_SERVICE LONG_LIVED SYSTEM timer_service
type LL_PTIMER FIXED_SIZE PROCESSES ll_ptimer
type HL_PTIMER FIXED_SIZE PROCESSES hl_ptimer
type BIF_TIMER FIXED_SIZE PROCESSES bif_timer
-type ABIF_TIMER FIXED_SIZE PROCESSES accessor_bif_timer
+# type ABIF_TIMER FIXED_SIZE PROCESSES accessor_bif_timer
type TIMER_REQUEST SHORT_LIVED PROCESSES timer_request
type BTM_YIELD_STATE SHORT_LIVED PROCESSES btm_yield_state
type REG_TABLE STANDARD SYSTEM reg_tab
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index f74aea80a7..23fc4f915e 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -2687,6 +2687,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
res = erts_bld_uint(&hp, NULL, erts_dist_buf_busy_limit);
BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("delayed_node_table_gc", BIF_ARG_1)) {
+ Uint hsz = 0;
+ Uint dntgc = erts_delayed_node_table_gc();
+ if (dntgc == ERTS_NODE_TAB_DELAY_GC_INFINITY)
+ BIF_RET(am_infinity);
+ (void) erts_bld_uint(NULL, &hsz, dntgc);
+ hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
+ res = erts_bld_uint(&hp, NULL, dntgc);
+ BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("ethread_info", BIF_ARG_1)) {
BIF_RET(erts_get_ethread_info(BIF_P));
}
@@ -4050,7 +4059,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
- if (erts_debug_wait_deallocations(BIF_P)) {
+ int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
+ if (erts_debug_wait_completed(BIF_P, flag)) {
+ ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
+ }
+ }
+ if (ERTS_IS_ATOM_STR("timer_cancellations", BIF_ARG_2)) {
+ int flag = ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS;
+ if (erts_debug_wait_completed(BIF_P, flag)) {
ERTS_BIF_YIELD_RETURN(BIF_P, am_ok);
}
}
@@ -4062,6 +4078,17 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
int res = erts_debug_set_unique_monotonic_integer_state(BIF_ARG_2);
BIF_RET(res ? am_true : am_false);
}
+ else if (ERTS_IS_ATOM_STR("node_tab_delayed_delete", BIF_ARG_1)) {
+ /* node_container_SUITE */
+ Sint64 msecs;
+ if (term_to_Sint64(BIF_ARG_2, &msecs)) {
+ /* Negative value restore original value... */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_debug_test_node_tab_delayed_delete(msecs);
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ BIF_RET(am_ok);
+ }
+ }
}
BIF_ERROR(BIF_P, BADARG);
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 8d264d166e..6b96787d40 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -194,6 +194,9 @@ ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size);
ERTS_GLB_INLINE void erts_bin_free(Binary *bp);
+ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size,
+ void (*destructor)(Binary *),
+ int unaligned);
ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size,
void (*destructor)(Binary *));
@@ -332,21 +335,30 @@ erts_bin_free(Binary *bp)
}
ERTS_GLB_INLINE Binary *
-erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
+erts_create_magic_binary_x(Uint size, void (*destructor)(Binary *),
+ int unaligned)
{
- Uint bsize = ERTS_MAGIC_BIN_SIZE(size);
+ Uint bsize = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_SIZE(size)
+ : ERTS_MAGIC_BIN_SIZE(size);
Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize);
ASSERT(bsize > size);
if (!bptr)
erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
bptr->flags = BIN_FLAG_MAGIC;
- bptr->orig_size = ERTS_MAGIC_BIN_ORIG_SIZE(size);
+ bptr->orig_size = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(size)
+ : ERTS_MAGIC_BIN_ORIG_SIZE(size);
erts_refc_init(&bptr->refc, 0);
ERTS_MAGIC_BIN_DESTRUCTOR(bptr) = destructor;
return bptr;
}
+ERTS_GLB_INLINE Binary *
+erts_create_magic_binary(Uint size, void (*destructor)(Binary *))
+{
+ return erts_create_magic_binary_x(size, destructor, 0);
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* !__ERL_BINARY_H */
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index b8ae93fa58..2e29bf8895 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -107,6 +107,14 @@ erts_bits_destroy_state(ERL_BITS_PROTO_0)
void
erts_init_bits(void)
{
+ ERTS_CT_ASSERT(offsetof(Binary,orig_bytes) % 8 == 0);
+ ERTS_CT_ASSERT(offsetof(ErtsMagicBinary,u.aligned.data) % 8 == 0);
+ ERTS_CT_ASSERT(ERTS_MAGIC_BIN_BYTES_TO_ALIGN ==
+ (offsetof(ErtsMagicBinary,u.aligned.data)
+ - offsetof(ErtsMagicBinary,u.unaligned.data)));
+ ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes)
+ == offsetof(Binary,orig_bytes));
+
erts_smp_atomic_init_nob(&bits_bufs_size, 0);
#if defined(ERTS_SMP)
/* erl_process.c calls erts_bits_init_state() on all state instances */
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 1785fc27be..99481e261f 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -677,7 +677,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
Uint area_size;
Eterm* old_htop;
Uint n;
- struct erl_off_heap_header** prev;
+ struct erl_off_heap_header** prev = NULL;
if (p->flags & F_DISABLE_GC)
return;
@@ -786,10 +786,10 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
*/
if (oh) {
- prev = &MSO(p).first;
- while (*prev) {
- prev = &(*prev)->next;
- }
+ prev = &MSO(p).first;
+ while (*prev) {
+ prev = &(*prev)->next;
+ }
}
/*
@@ -818,6 +818,10 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
oh = oh->next;
}
+ if (prev) {
+ *prev = NULL;
+ }
+
/*
* We no longer need this temporary area.
*/
@@ -1223,7 +1227,8 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
Uint new_sz;
Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
- size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
+ size_before = fragments + (HEAP_TOP(p) - HEAP_START(p))
+ + (OLD_HTOP(p) - OLD_HEAP(p));
/*
* Do a fullsweep GC. First figure out the size of the heap
@@ -1870,6 +1875,21 @@ sweep_one_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop, char* src, Uint sr
if (!header_is_thing(gval)) {
heap_ptr++;
} else {
+ if (header_is_bin_matchstate(gval)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) heap_ptr;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ Eterm* origptr;
+ origptr = &(mb->orig);
+ ptr = boxed_val(*origptr);
+ val = *ptr;
+ if (IS_MOVED_BOXED(val)) {
+ *origptr = val;
+ mb->base = binary_bytes(*origptr);
+ } else if (in_area(ptr, src, src_size)) {
+ MOVE_BOXED(ptr,val,htop,origptr);
+ mb->base = binary_bytes(*origptr);
+ }
+ }
heap_ptr += (thing_arityval(gval)+1);
}
break;
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 51cd843935..8eacb921fe 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -81,6 +81,13 @@ static void hdbg_chk_srv(ErtsHLTimerService *srv);
#error "ERTS_REF_NUMBERS changed. Update me..."
#endif
+typedef enum {
+ ERTS_TMR_BIF,
+ ERTS_TMR_PROC,
+ ERTS_TMR_PORT,
+ ERTS_TMR_CALLBACK
+} ErtsTmrType;
+
#define ERTS_BIF_TIMER_SHORT_TIME 5000
#ifdef ERTS_SMP
@@ -93,11 +100,14 @@ static void hdbg_chk_srv(ErtsHLTimerService *srv);
/* Bit 0 to 9 contains scheduler id (see mask below) */
#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 10)
#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 11)
-#define ERTS_TMR_ROFLG_ABIF_TMR (((Uint32) 1) << 12)
-#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 13)
-#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 14)
-#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 15)
-#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 16)
+#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 12)
+#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 13)
+#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 14)
+#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 15)
+#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 16)
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+#define ERTS_TMR_ROFLG_ABIF_TMR (((Uint32) 1) << 17)
+#endif
#define ERTS_TMR_ROFLG_SID_MASK \
(ERTS_TMR_ROFLG_HLT - (Uint32) 1)
@@ -141,6 +151,7 @@ typedef struct {
Uint32 roflgs;
erts_smp_atomic32_t refc;
union {
+ void *arg;
erts_atomic_t next;
} u;
} ErtsTmrHead;
@@ -156,6 +167,7 @@ struct ErtsHLTimer_ {
Process *proc;
Port *port;
Eterm name;
+ void (*callback)(void *);
} receiver;
#ifdef ERTS_HLT_HARD_DEBUG
@@ -172,19 +184,28 @@ struct ErtsHLTimer_ {
Eterm message;
ErlHeapFragment *bp;
} btm;
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
struct {
Eterm accessor;
ErtsHLTimerTree tree;
} abtm;
+#endif
};
#define ERTS_HL_PTIMER_SIZE offsetof(ErtsHLTimer, btm)
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
#define ERTS_BIF_TIMER_SIZE offsetof(ErtsHLTimer, abtm)
#define ERTS_ABIF_TIMER_SIZE sizeof(ErtsHLTimer)
+#else
+#define ERTS_BIF_TIMER_SIZE sizeof(ErtsHLTimer)
+#endif
typedef struct {
ErtsTmrHead head; /* NEED to be first! */
- void *p;
+ union {
+ void *p;
+ void (*callback)(void *);
+ } u;
ErtsTWheelTimer tw_tmr;
} ErtsTWTimer;
@@ -340,8 +361,8 @@ refn_is_lt(Uint32 *x, Uint32 *y)
#define ERTS_RBT_WANT_SMALLEST
#define ERTS_RBT_WANT_LOOKUP_INSERT
#define ERTS_RBT_WANT_REPLACE
+#define ERTS_RBT_WANT_FOREACH
#ifdef ERTS_HLT_HARD_DEBUG
-# define ERTS_RBT_WANT_FOREACH
# define ERTS_RBT_WANT_LOOKUP
#endif
#define ERTS_RBT_UNDEF
@@ -452,8 +473,6 @@ same_time_list_foreach_destroy_yielding(ErtsHLTimer **root,
}
}
-#ifdef ERTS_HLT_HARD_DEBUG
-
static ERTS_INLINE void
same_time_list_foreach(ErtsHLTimer *root,
void (*op)(ErtsHLTimer *, void *),
@@ -468,6 +487,8 @@ same_time_list_foreach(ErtsHLTimer *root,
}
}
+#ifdef ERTS_HLT_HARD_DEBUG
+
static ERTS_INLINE ErtsHLTimer *
same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
{
@@ -584,6 +605,8 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#include "erl_rbtree.h"
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+
#define ERTS_RBT_PREFIX abtm
#define ERTS_RBT_T ErtsHLTimer
#define ERTS_RBT_KEY_T Uint32 *
@@ -634,6 +657,8 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#include "erl_rbtree.h"
+#endif /* ERTS_BTM_ACCESSOR_SUPPORT */
+
#ifdef ERTS_SMP
static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
#endif
@@ -673,7 +698,9 @@ erts_timer_type_size(ErtsAlcType_t type)
case ERTS_ALC_T_LL_PTIMER: return sizeof(ErtsTWTimer);
case ERTS_ALC_T_HL_PTIMER: return ERTS_HL_PTIMER_SIZE;
case ERTS_ALC_T_BIF_TIMER: return ERTS_BIF_TIMER_SIZE;
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
case ERTS_ALC_T_ABIF_TIMER: return ERTS_ABIF_TIMER_SIZE;
+#endif
default: ERTS_INTERNAL_ERROR("Unknown type");
}
return 0;
@@ -747,9 +774,9 @@ schedule_tw_timer_destroy(ErtsTWTimer *tmr)
* dropped at once...
*/
if (tmr->head.roflgs & ERTS_TMR_ROFLG_PROC)
- erts_proc_dec_refc((Process *) tmr->p);
- else
- erts_port_dec_refc((Port *) tmr->p);
+ erts_proc_dec_refc((Process *) tmr->u.p);
+ else if (tmr->head.roflgs & ERTS_TMR_ROFLG_PORT)
+ erts_port_dec_refc((Port *) tmr->u.p);
erts_schedule_thr_prgr_later_cleanup_op(
scheduled_tw_timer_destroy,
@@ -771,7 +798,7 @@ static void
tw_proc_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- Process *proc = (Process *) twtp->p;
+ Process *proc = (Process *) twtp->u.p;
if (proc_timeout_common(proc, vtwtp))
tw_timer_dec_refc(twtp);
tw_timer_dec_refc(twtp);
@@ -781,7 +808,7 @@ static void
tw_port_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- Port *port = (Port *) twtp->p;
+ Port *port = (Port *) twtp->u.p;
if (port_timeout_common(port, vtwtp))
tw_timer_dec_refc(twtp);
tw_timer_dec_refc(twtp);
@@ -801,13 +828,26 @@ cancel_tw_timer(ErtsSchedulerData *esdp, ErtsTWTimer *tmr)
erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->tw_tmr);
}
+static void
+tw_callback_timeout(void *vtwtp)
+{
+ ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
+ void (*callback)(void *) = twtp->u.callback;
+ void *arg = twtp->head.u.arg;
+ tw_timer_dec_refc(twtp);
+ (*callback)(arg);
+}
+
static ErtsTWTimer *
create_tw_timer(ErtsSchedulerData *esdp,
- void *p, int is_proc,
+ ErtsTmrType type, void *p,
+ void (*callback)(void *), void *arg,
ErtsMonotonicTime timeout_pos)
{
ErtsTWTimer *tmr;
void (*timeout_func)(void *);
+ void (*cancel_func)(void *);
+ erts_aint32_t refc;
tmr = tw_timer_alloc();
erts_twheel_init_timer(&tmr->tw_tmr);
@@ -815,24 +855,48 @@ create_tw_timer(ErtsSchedulerData *esdp,
tmr->head.roflgs = (Uint32) esdp->no;
ERTS_HLT_ASSERT((tmr->head.roflgs
& ~ERTS_TMR_ROFLG_SID_MASK) == 0);
- tmr->p = p;
- if (is_proc) {
+
+ switch (type) {
+
+ case ERTS_TMR_PROC:
+ tmr->u.p = p;
tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
timeout_func = tw_proc_timeout;
+ cancel_func = tw_ptimer_cancel;
erts_proc_inc_refc((Process *) p);
- }
- else {
+ refc = 2;
+ break;
+
+ case ERTS_TMR_PORT:
+ tmr->u.p = p;
tmr->head.roflgs |= ERTS_TMR_ROFLG_PORT;
timeout_func = tw_port_timeout;
+ cancel_func = tw_ptimer_cancel;
erts_port_inc_refc((Port *) p);
+ refc = 2;
+ break;
+
+ case ERTS_TMR_CALLBACK:
+ tmr->head.u.arg = arg;
+ tmr->u.callback = callback;
+
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_CALLBACK;
+ timeout_func = tw_callback_timeout;
+ cancel_func = NULL;
+ refc = 1;
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Unsupported timer type");
+ return NULL;
}
- erts_smp_atomic32_init_nob(&tmr->head.refc, 2);
+ erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
erts_twheel_set_timer(esdp->timer_wheel,
&tmr->tw_tmr,
timeout_func,
- tw_ptimer_cancel,
+ cancel_func,
tmr,
timeout_pos);
@@ -852,8 +916,10 @@ hl_timer_destroy(ErtsHLTimer *tmr)
else {
if (roflgs & ERTS_TMR_ROFLG_PRE_ALC)
bif_timer_pre_free(tmr);
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
else if (roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
erts_free(ERTS_ALC_T_ABIF_TIMER, tmr);
+#endif
else
erts_free(ERTS_ALC_T_BIF_TIMER, tmr);
}
@@ -948,6 +1014,8 @@ check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
#endif
}
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+
static void
hlt_delete_abtm(ErtsHLTimer *tmr)
{
@@ -971,18 +1039,20 @@ hlt_delete_abtm(ErtsHLTimer *tmr)
}
}
+#endif
+
static ErtsHLTimer *
create_hl_timer(ErtsSchedulerData *esdp,
ErtsMonotonicTime timeout_pos,
- int short_time, int is_bif_tmr,
+ int short_time, ErtsTmrType type,
void *rcvrp, Eterm rcvr, Eterm acsr,
- Eterm msg, Uint32 *refn)
+ Eterm msg, Uint32 *refn,
+ void (*callback)(void *), void *arg)
{
ErtsHLTimerService *srv = esdp->timer_service;
ErtsHLTimer *tmr, *st_tmr;
erts_aint32_t refc;
Uint32 roflgs;
- int is_abif_tmr = is_bif_tmr && is_value(acsr) && acsr != rcvr;
check_canceled_queue(esdp, srv);
@@ -990,43 +1060,69 @@ create_hl_timer(ErtsSchedulerData *esdp,
roflgs = ((Uint32) esdp->no) | ERTS_TMR_ROFLG_HLT;
- if (!is_bif_tmr)
+ if (type != ERTS_TMR_BIF) {
+
tmr = erts_alloc(ERTS_ALC_T_HL_PTIMER,
ERTS_HL_PTIMER_SIZE);
- else if (short_time) {
- tmr = bif_timer_pre_alloc();
- if (!tmr)
- goto alloc_bif_timer;
- roflgs |= ERTS_TMR_ROFLG_PRE_ALC;
- }
- else {
- alloc_bif_timer:
- if (is_abif_tmr)
- tmr = erts_alloc(ERTS_ALC_T_ABIF_TIMER,
- ERTS_ABIF_TIMER_SIZE);
- else
- tmr = erts_alloc(ERTS_ALC_T_BIF_TIMER,
- ERTS_BIF_TIMER_SIZE);
- }
+ tmr->timeout = timeout_pos;
- tmr->timeout = timeout_pos;
+ switch (type) {
+
+ case ERTS_TMR_PROC:
+ ERTS_HLT_ASSERT(is_internal_pid(rcvr));
- if (!is_bif_tmr) {
- if (is_internal_pid(rcvr)) {
erts_proc_inc_refc((Process *) rcvrp);
tmr->receiver.proc = (Process *) rcvrp;
roflgs |= ERTS_TMR_ROFLG_PROC;
- }
- else {
- erts_port_inc_refc((Port *) rcvrp);
+ refc = 2;
+ break;
+
+ case ERTS_TMR_PORT:
ERTS_HLT_ASSERT(is_internal_port(rcvr));
+ erts_port_inc_refc((Port *) rcvrp);
tmr->receiver.port = (Port *) rcvrp;
roflgs |= ERTS_TMR_ROFLG_PORT;
+ refc = 2;
+ break;
+
+ case ERTS_TMR_CALLBACK:
+ roflgs |= ERTS_TMR_ROFLG_CALLBACK;
+ tmr->receiver.callback = callback;
+ tmr->head.u.arg = arg;
+ refc = 1;
+ break;
+
+ default:
+ ERTS_INTERNAL_ERROR("Unsupported timer type");
+ return NULL;
}
- refc = 2;
+
}
- else {
+ else { /* ERTS_TMR_BIF */
Uint hsz;
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+ int is_abif_tmr = is_value(acsr) && acsr != rcvr;
+#endif
+
+ if (short_time) {
+ tmr = bif_timer_pre_alloc();
+ if (!tmr)
+ goto alloc_bif_timer;
+ roflgs |= ERTS_TMR_ROFLG_PRE_ALC;
+ }
+ else {
+ alloc_bif_timer:
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+ if (is_abif_tmr)
+ tmr = erts_alloc(ERTS_ALC_T_ABIF_TIMER,
+ ERTS_ABIF_TIMER_SIZE);
+ else
+#endif
+ tmr = erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ ERTS_BIF_TIMER_SIZE);
+ }
+
+ tmr->timeout = timeout_pos;
roflgs |= ERTS_TMR_ROFLG_BIF_TMR;
if (is_internal_pid(rcvr)) {
@@ -1057,6 +1153,8 @@ create_hl_timer(ErtsSchedulerData *esdp,
tmr->btm.refn[2] = refn[2];
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
if (is_abif_tmr) {
Process *aproc;
roflgs |= ERTS_TMR_ROFLG_ABIF_TMR;
@@ -1071,6 +1169,9 @@ create_hl_timer(ErtsSchedulerData *esdp,
erts_smp_proc_unlock(aproc, ERTS_PROC_LOCK_BTM);
}
}
+#endif
+
+ btm_rbt_insert(&srv->btm_tree, tmr);
}
tmr->head.roflgs = roflgs;
@@ -1098,9 +1199,6 @@ create_hl_timer(ErtsSchedulerData *esdp,
if (st_tmr)
same_time_list_insert(&st_tmr->time.tree.same_time, tmr);
- if (is_bif_tmr)
- btm_rbt_insert(&srv->btm_tree, tmr);
-
#ifdef ERTS_HLT_HARD_DEBUG
tmr->pending_timeout = 0;
#endif
@@ -1120,8 +1218,10 @@ hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs)
Uint32 is_reg_name = (roflgs & ERTS_TMR_ROFLG_REG_NAME);
ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
hlt_delete_abtm(tmr);
+#endif
if (is_reg_name) {
Eterm pid;
@@ -1203,10 +1303,13 @@ static void hlt_timeout(ErtsHLTimer *tmr, void *vsrv)
hlt_bif_timer_timeout(tmr, roflgs);
else if (roflgs & ERTS_TMR_ROFLG_PROC)
hlt_proc_timeout(tmr);
- else {
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PORT);
+ else if (roflgs & ERTS_TMR_ROFLG_PORT)
hlt_port_timeout(tmr);
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
+ (*tmr->receiver.callback)(tmr->head.u.arg);
}
+
}
tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
@@ -1300,8 +1403,10 @@ hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
hlt_delete_abtm(tmr);
+#endif
}
if (tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE) {
@@ -1670,8 +1775,9 @@ setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos,
tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg;
- tmr = create_hl_timer(esdp, timeout_pos, short_time, 1, NULL,
- rcvr, acsr, tmo_msg, internal_ref_numbers(ref));
+ tmr = create_hl_timer(esdp, timeout_pos, short_time,
+ ERTS_TMR_BIF, NULL, rcvr, acsr, tmo_msg,
+ internal_ref_numbers(ref), NULL, NULL);
UnUseTmpHeap(4, c_p);
@@ -1943,8 +2049,10 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp,
*/
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM);
tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn);
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
if (!tmr)
tmr = abtm_rbt_lookup(c_p->accessor_bif_timers, trefn);
+#endif
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM);
if (!tmr)
return 0;
@@ -2184,11 +2292,13 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
if (!abs || !bool_arg(tp[2], abs))
return 0;
break;
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
case am_accessor:
if (!accessor || is_not_internal_pid(tp[2]))
return 0;
*accessor = tp[2];
break;
+#endif
default:
return 0;
}
@@ -2250,7 +2360,9 @@ typedef struct {
ErtsBifTimers *bif_timers;
union {
proc_btm_rbt_yield_state_t proc_btm_yield_state;
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
abtm_rbt_yield_state_t abtm_yield_state;
+#endif
} u;
} ErtsBifTimerYieldState;
@@ -2291,6 +2403,8 @@ int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
return res;
}
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+
static void
detach_bif_timer(ErtsHLTimer *tmr, void *vesdp)
{
@@ -2335,6 +2449,8 @@ int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp
return res;
}
+#endif /* ERTS_BTM_ACCESSOR_SUPPORT */
+
static ERTS_INLINE int
parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
ErtsMonotonicTime *conv_arg, int abs,
@@ -2498,6 +2614,80 @@ BIF_RETTYPE read_timer_2(BIF_ALIST_2)
return ret;
}
+static void
+start_callback_timer(ErtsSchedulerData *esdp,
+ int twt,
+ ErtsMonotonicTime timeout_pos,
+ void (*callback)(void *),
+ void *arg)
+
+{
+ if (twt)
+ create_tw_timer(esdp, ERTS_TMR_CALLBACK, NULL,
+ callback, arg, timeout_pos);
+ else
+ create_hl_timer(esdp, timeout_pos, 0,
+ ERTS_TMR_CALLBACK, NULL,
+ NIL, THE_NON_VALUE, NIL,
+ NULL, callback, arg);
+}
+
+typedef struct {
+ int twt;
+ ErtsMonotonicTime timeout_pos;
+ void (*callback)(void *);
+ void *arg;
+} ErtsStartCallbackTimerRequest;
+
+static void
+scheduled_start_callback_timer(void *vsctr)
+{
+ ErtsStartCallbackTimerRequest *sctr
+ = (ErtsStartCallbackTimerRequest *) vsctr;
+
+ start_callback_timer(erts_get_scheduler_data(),
+ sctr->twt,
+ sctr->timeout_pos,
+ sctr->callback,
+ sctr->arg);
+
+ erts_free(ERTS_ALC_T_TIMER_REQUEST, vsctr);
+}
+
+void
+erts_start_timer_callback(ErtsMonotonicTime tmo,
+ void (*callback)(void *),
+ void *arg)
+{
+ ErtsSchedulerData *esdp;
+ ErtsMonotonicTime timeout_pos;
+ int twt;
+
+ esdp = erts_get_scheduler_data();
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp),
+ tmo);
+ twt = tmo < ERTS_TIMER_WHEEL_MSEC;
+
+ if (esdp)
+ start_callback_timer(esdp,
+ twt,
+ timeout_pos,
+ callback,
+ arg);
+ else {
+ ErtsStartCallbackTimerRequest *sctr;
+ sctr = erts_alloc(ERTS_ALC_T_TIMER_REQUEST,
+ sizeof(ErtsStartCallbackTimerRequest));
+ sctr->twt = twt;
+ sctr->timeout_pos = timeout_pos;
+ sctr->callback = callback;
+ sctr->arg = arg;
+ erts_schedule_misc_aux_work(1,
+ scheduled_start_callback_timer,
+ (void *) sctr);
+ }
+}
+
/*
* Process and Port timer functionality.
*
@@ -2521,11 +2711,13 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
c_p->flags &= ~F_TIMO;
if (tmo < ERTS_TIMER_WHEEL_MSEC)
- tmr = (void *) create_tw_timer(esdp, (void *) c_p, 1, timeout_pos);
+ tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PROC, (void *) c_p,
+ NULL, NULL, timeout_pos);
else
- tmr = (void *) create_hl_timer(esdp, timeout_pos,
- short_time, 0, (void *) c_p,
- c_p->common.id, NIL, NIL, NULL);
+ tmr = (void *) create_hl_timer(esdp, timeout_pos, short_time,
+ ERTS_TMR_PROC, (void *) c_p,
+ c_p->common.id, THE_NON_VALUE,
+ NIL, NULL, NULL, NULL);
erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr);
}
}
@@ -2608,13 +2800,12 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo)
timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
if (tmo < ERTS_TIMER_WHEEL_MSEC)
- tmr = (void *) create_tw_timer(esdp, (void *) c_prt, 0,
- timeout_pos);
+ tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PORT, (void *) c_prt,
+ NULL, NULL, timeout_pos);
else
- tmr = (void *) create_hl_timer(esdp, timeout_pos, 0, 0,
- (void *) c_prt,
- c_prt->common.id, NIL, NIL,
- NULL);
+ tmr = (void *) create_hl_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
+ (void *) c_prt, c_prt->common.id,
+ THE_NON_VALUE, NIL, NULL, NULL, NULL);
erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
}
@@ -2759,6 +2950,98 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm,
}
}
+typedef struct {
+ void (*tclbk)(void *);
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *);
+ void *arg;
+} ErtsDebugForeachCallbackTimer;
+
+static void
+debug_callback_timer_foreach_list(ErtsHLTimer *tmr, void *vdfct)
+{
+ ErtsDebugForeachCallbackTimer *dfct
+ = (ErtsDebugForeachCallbackTimer *) vdfct;
+
+ if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
+ && (tmr->receiver.callback && dfct->tclbk))
+ (*dfct->func)(dfct->arg,
+ tmr->timeout,
+ tmr->head.u.arg);
+}
+
+static void
+debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct)
+{
+ ErtsDebugForeachCallbackTimer *dfct
+ = (ErtsDebugForeachCallbackTimer *) vdfct;
+
+ if (tmr->time.tree.same_time)
+ same_time_list_foreach(tmr->time.tree.same_time,
+ debug_callback_timer_foreach_list,
+ vdfct);
+
+ if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
+ && (tmr->receiver.callback && dfct->tclbk))
+ (*dfct->func)(dfct->arg,
+ tmr->timeout,
+ tmr->head.u.arg);
+}
+
+static void
+debug_tw_callback_timer(void *vdfct,
+ ErtsMonotonicTime timeout_pos,
+ void *vtwtp)
+{
+ ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
+ ErtsDebugForeachCallbackTimer *dfct
+ = (ErtsDebugForeachCallbackTimer *) vdfct;
+
+ if (twtp->u.callback == dfct->tclbk)
+ (*dfct->func)(dfct->arg,
+ timeout_pos,
+ twtp->head.u.arg);
+}
+
+void
+erts_debug_callback_timer_foreach(void (*tclbk)(void *),
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *),
+ void *arg)
+{
+ int six;
+ ErtsDebugForeachCallbackTimer dfct;
+
+ dfct.tclbk = tclbk;
+ dfct.func = func;
+ dfct.arg = arg;
+
+ if (!erts_smp_thr_progress_is_blocking())
+ ERTS_INTERNAL_ERROR("Not blocking thread progress");
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsHLTimerService *srv =
+ erts_aligned_scheduler_data[six].esd.timer_service;
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+
+ erts_twheel_debug_foreach(twheel,
+ tw_callback_timeout,
+ debug_tw_callback_timer,
+ (void *) &dfct);
+
+ if (srv->yield.root)
+ debug_callback_timer_foreach(srv->yield.root,
+ (void *) &dfct);
+
+ time_rbt_foreach(srv->btm_tree,
+ debug_callback_timer_foreach,
+ (void *) &dfct);
+ }
+}
+
#ifdef ERTS_HLT_HARD_DEBUG
typedef struct {
diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h
index 30889a71da..24c57fc873 100644
--- a/erts/emulator/beam/erl_hl_timer.h
+++ b/erts/emulator/beam/erl_hl_timer.h
@@ -59,7 +59,9 @@ int erts_cancel_bif_timers(Process *, ErtsBifTimers *, void **);
int erts_detach_accessor_bif_timers(Process *, ErtsBifTimers *, void **);
ErtsHLTimerService *erts_create_timer_service(void);
void erts_hl_timer_init(void);
-
+void erts_start_timer_callback(ErtsMonotonicTime,
+ void (*)(void *),
+ void *);
#ifdef ERTS_SMP
void
erts_handle_canceled_timers(void *vesdp,
@@ -76,5 +78,10 @@ void erts_debug_bif_timer_foreach(void (*func)(Eterm,
ErlHeapFragment *,
void *),
void *arg);
-
+void
+erts_debug_callback_timer_foreach(void (*tclbk)(void *),
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *),
+ void *arg);
#endif /* ERL_HL_TIMER_H__ */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 33417833a9..0febe8fb3d 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -141,7 +141,8 @@ static void erl_init(int ncpu,
int port_tab_sz_ignore_files,
int legacy_port_tab,
int time_correction,
- ErtsTimeWarpMode time_warp_mode);
+ ErtsTimeWarpMode time_warp_mode,
+ int node_tab_delete_delay);
static erts_atomic_t exiting;
@@ -314,7 +315,8 @@ erts_short_init(void)
0,
0,
time_correction,
- time_warp_mode);
+ time_warp_mode,
+ ERTS_NODE_TAB_DELAY_GC_DEFAULT);
erts_initialized = 1;
}
@@ -326,7 +328,8 @@ erl_init(int ncpu,
int port_tab_sz_ignore_files,
int legacy_port_tab,
int time_correction,
- ErtsTimeWarpMode time_warp_mode)
+ ErtsTimeWarpMode time_warp_mode,
+ int node_tab_delete_delay)
{
init_benchmarking();
@@ -367,7 +370,7 @@ erl_init(int ncpu,
erts_init_binary(); /* Must be after init_emulator() */
erts_bp_init();
init_db(); /* Must be after init_emulator */
- erts_init_node_tables();
+ erts_init_node_tables(node_tab_delete_delay);
init_dist();
erl_drv_thr_init();
erts_init_async();
@@ -379,6 +382,7 @@ erl_init(int ncpu,
erts_init_bif_re();
erts_init_unicode(); /* after RE to get access to PCRE unicode */
erts_init_external();
+ erts_init_map();
erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2);
erts_late_init_process();
#if HAVE_ERTS_MSEG
@@ -629,6 +633,9 @@ void erts_usage(void)
erts_fprintf(stderr, " see error_logger documentation for details\n");
erts_fprintf(stderr, "-zdbbl size set the distribution buffer busy limit in kilobytes\n");
erts_fprintf(stderr, " valid range is [1-%d]\n", INT_MAX/1024);
+ erts_fprintf(stderr, "-zdntgc time set delayed node table gc in seconds\n");
+ erts_fprintf(stderr, " valid values are infinity or intergers in the range [0-%d]\n",
+ ERTS_NODE_TAB_DELAY_GC_MAX);
erts_fprintf(stderr, "\n");
erts_fprintf(stderr, "Note that if the emulator is started with erlexec (typically\n");
erts_fprintf(stderr, "from the erl script), these flags should be specified with +.\n");
@@ -1219,6 +1226,7 @@ erl_start(int argc, char **argv)
int legacy_port_tab = 0;
int time_correction;
ErtsTimeWarpMode time_warp_mode;
+ int node_tab_delete_delay = ERTS_NODE_TAB_DELAY_GC_DEFAULT;
set_default_time_adj(&time_correction,
&time_warp_mode);
@@ -2005,9 +2013,9 @@ erl_start(int argc, char **argv)
case 'z': {
char *sub_param = argv[i]+2;
- int new_limit;
if (has_prefix("dbbl", sub_param)) {
+ int new_limit;
arg = get_arg(sub_param+4, argv[i+1], &i);
new_limit = atoi(arg);
if (new_limit < 1 || INT_MAX/1024 < new_limit) {
@@ -2016,6 +2024,24 @@ erl_start(int argc, char **argv)
} else {
erts_dist_buf_busy_limit = new_limit*1024;
}
+ }
+ else if (has_prefix("dntgc", sub_param)) {
+ long secs;
+
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ if (sys_strcmp(arg, "infinity") == 0)
+ secs = ERTS_NODE_TAB_DELAY_GC_INFINITY;
+ else {
+ char *endptr;
+ errno = 0;
+ secs = strtol(arg, &endptr, 10);
+ if (errno != 0 || *arg == '\0' || *endptr != '\0'
+ || secs < 0 || ERTS_NODE_TAB_DELAY_GC_MAX < secs) {
+ erts_fprintf(stderr, "Invalid delayed node table gc: %s\n", arg);
+ erts_usage();
+ }
+ }
+ node_tab_delete_delay = (int) secs;
} else {
erts_fprintf(stderr, "bad -z option %s\n", argv[i]);
erts_usage();
@@ -2089,7 +2115,8 @@ erl_start(int argc, char **argv)
port_tab_sz_ignore_files,
legacy_port_tab,
time_correction,
- time_warp_mode);
+ time_warp_mode,
+ node_tab_delete_delay);
load_preloaded();
erts_end_staging_code_ix();
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index a1bd39dbc8..3e78731d20 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -32,6 +32,7 @@
#include "erl_process.h"
#include "error.h"
#include "bif.h"
+#include "erl_binary.h"
#include "erl_map.h"
@@ -79,8 +80,13 @@ typedef struct {
static Eterm flatmap_merge(Process *p, Eterm nodeA, Eterm nodeB);
-static Eterm map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args);
-static Eterm hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB);
+static BIF_RETTYPE map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args);
+struct HashmapMergeContext_;
+static BIF_RETTYPE hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB, int swap_args,
+ struct HashmapMergeContext_*);
+static Export hashmap_merge_trap_export;
+static BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1);
+static Uint hashmap_subtree_size(Eterm node);
static Eterm hashmap_to_list(Process *p, Eterm map);
static Eterm hashmap_keys(Process *p, Eterm map);
static Eterm hashmap_values(Process *p, Eterm map);
@@ -95,6 +101,15 @@ static Eterm hashmap_bld_tuple_uint(Uint **hpp, Uint *szp, Uint n, Uint nums[]);
static int hxnodecmp(hxnode_t* a, hxnode_t* b);
static int hxnodecmpkey(hxnode_t* a, hxnode_t* b);
+
+void erts_init_map(void) {
+ erts_init_trap_export(&hashmap_merge_trap_export,
+ am_maps, am_merge_trap, 1,
+ &maps_merge_trap_1);
+ return;
+}
+
+
/* erlang:map_size/1
* the corresponding instruction is implemented in:
* beam/erl_bif_guard.c
@@ -942,15 +957,15 @@ BIF_RETTYPE maps_merge_2(BIF_ALIST_2) {
BIF_RET(flatmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2));
} else if (is_hashmap(BIF_ARG_2)) {
/* Will always become a tree */
- BIF_RET(map_merge_mixed(BIF_P, BIF_ARG_1, BIF_ARG_2, 0));
+ return map_merge_mixed(BIF_P, BIF_ARG_1, BIF_ARG_2, 0);
}
BIF_P->fvalue = BIF_ARG_2;
} else if (is_hashmap(BIF_ARG_1)) {
if (is_hashmap(BIF_ARG_2)) {
- BIF_RET(hashmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2));
+ return hashmap_merge(BIF_P, BIF_ARG_1, BIF_ARG_2, 0, NULL);
} else if (is_flatmap(BIF_ARG_2)) {
/* Will always become a tree */
- BIF_RET(map_merge_mixed(BIF_P, BIF_ARG_2, BIF_ARG_1, 1));
+ return map_merge_mixed(BIF_P, BIF_ARG_2, BIF_ARG_1, 1);
}
BIF_P->fvalue = BIF_ARG_2;
} else {
@@ -1113,30 +1128,64 @@ static Eterm map_merge_mixed(Process *p, Eterm flat, Eterm tree, int swap_args)
erts_free(ERTS_ALC_T_TMP, (void *) hxns);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(p);
- return swap_args ? hashmap_merge(p, tree, res) : hashmap_merge(p, res, tree);
+ return hashmap_merge(p, res, tree, swap_args, NULL);
+}
+
+#define PSTACK_TYPE struct HashmapMergePStackType
+struct HashmapMergePStackType {
+ Eterm nodeA, nodeB;
+ Eterm *srcA, *srcB;
+ Uint32 abm, bbm, rbm; /* node bitmaps */
+ int mix; /* &1: there are unique A stuff in node
+ * &2: there are unique B stuff in node */
+ int ix;
+ Eterm array[16]; /* temp node construction area */
+};
+
+typedef struct HashmapMergeContext_ {
+ Uint size; /* total key-value counter */
+ unsigned int lvl;
+ Eterm trap_bin;
+ ErtsPStack pstack;
+#ifdef DEBUG
+ Eterm dbg_map_A, dbg_map_B;
+#endif
+} HashmapMergeContext;
+
+static void hashmap_merge_ctx_destructor(Binary* ctx_bin)
+{
+ HashmapMergeContext* ctx = (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin);
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
+
+ PSTACK_DESTROY_SAVED(&ctx->pstack);
+}
+
+BIF_RETTYPE maps_merge_trap_1(BIF_ALIST_1) {
+ Binary* ctx_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val;
+
+ ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == hashmap_merge_ctx_destructor);
+
+ return hashmap_merge(BIF_P, NIL, NIL, 0,
+ (HashmapMergeContext*) ERTS_MAGIC_BIN_DATA(ctx_bin));
}
#define HALLOC_EXTRA 200
+#define MAP_MERGE_LOOP_FACTOR 8
-static Eterm hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB) {
+static BIF_RETTYPE hashmap_merge(Process *p, Eterm map_A, Eterm map_B,
+ int swap_args, HashmapMergeContext* ctx) {
#define PSTACK_TYPE struct HashmapMergePStackType
- struct HashmapMergePStackType {
- Eterm *srcA, *srcB;
- Uint32 abm, bbm, rbm; /* node bitmaps */
- int keepA;
- int ix;
- Eterm array[16];
- };
PSTACK_DECLARE(s, 4);
- struct HashmapMergePStackType* sp = PSTACK_PUSH(s);
- Eterm *hp, *nhp;
+ HashmapMergeContext local_ctx;
+ struct HashmapMergePStackType* sp;
+ Uint32 hx;
+ Eterm res = THE_NON_VALUE;
Eterm hdrA, hdrB;
- Uint32 ahx, bhx;
- Uint size; /* total key-value counter */
- int keepA = 0;
- unsigned int lvl = 0;
+ Eterm *hp, *nhp;
+ Eterm trap_ret;
+ Sint initial_reds = (Sint) (ERTS_BIF_REDS_LEFT(p) * MAP_MERGE_LOOP_FACTOR);
+ Sint reds = initial_reds;
DeclareTmpHeap(th,2,p);
- Eterm res = THE_NON_VALUE;
UseTmpHeap(2,p);
/*
@@ -1144,152 +1193,139 @@ static Eterm hashmap_merge(Process *p, Eterm nodeA, Eterm nodeB) {
* and merge each pair of nodes.
*/
- {
- hashmap_head_t* a = (hashmap_head_t*) hashmap_val(nodeA);
- hashmap_head_t* b = (hashmap_head_t*) hashmap_val(nodeB);
- size = a->size + b->size;
+ PSTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK);
+
+ if (ctx == NULL) { /* first call */
+ hashmap_head_t* a = (hashmap_head_t*) hashmap_val(map_A);
+ hashmap_head_t* b = (hashmap_head_t*) hashmap_val(map_B);
+
+ sp = PSTACK_PUSH(s);
+ sp->srcA = swap_args ? &map_B : &map_A;
+ sp->srcB = swap_args ? &map_A : &map_B;
+ sp->mix = 0;
+ local_ctx.size = a->size + b->size;
+ local_ctx.lvl = 0;
+ #ifdef DEBUG
+ local_ctx.dbg_map_A = map_A;
+ local_ctx.dbg_map_B = map_B;
+ local_ctx.trap_bin = THE_NON_VALUE;
+ #endif
+ ctx = &local_ctx;
+ }
+ else {
+ PSTACK_RESTORE(s, &ctx->pstack);
+ sp = PSTACK_TOP(s);
+ goto resume_from_trap;
}
recurse:
- if (primary_tag(nodeA) == TAG_PRIMARY_BOXED &&
- primary_tag(nodeB) == TAG_PRIMARY_LIST) {
- /* Avoid implementing this combination by switching places */
- Eterm tmp = nodeA;
- nodeA = nodeB;
- nodeB = tmp;
- keepA = !keepA;
- }
-
- switch (primary_tag(nodeA)) {
- case TAG_PRIMARY_LIST: {
- sp->srcA = list_val(nodeA);
- switch (primary_tag(nodeB)) {
- case TAG_PRIMARY_LIST: { /* LEAF + LEAF */
- sp->srcB = list_val(nodeB);
-
- if (EQ(CAR(sp->srcA), CAR(sp->srcB))) {
- --size;
- res = keepA ? nodeA : nodeB;
- } else {
- ahx = hashmap_restore_hash(th, lvl, CAR(sp->srcA));
- bhx = hashmap_restore_hash(th, lvl, CAR(sp->srcB));
- sp->abm = 1 << hashmap_index(ahx);
- sp->bbm = 1 << hashmap_index(bhx);
+ sp->nodeA = *sp->srcA;
+ sp->nodeB = *sp->srcB;
- sp->srcA = &nodeA;
- sp->srcB = &nodeB;
- }
- break;
- }
- case TAG_PRIMARY_BOXED: { /* LEAF + NODE */
- sp->srcB = boxed_val(nodeB);
- ASSERT(is_header(*sp->srcB));
- hdrB = *sp->srcB++;
-
- ahx = hashmap_restore_hash(th, lvl, CAR(sp->srcA));
- sp->abm = 1 << hashmap_index(ahx);
- sp->srcA = &nodeA;
- switch(hdrB & _HEADER_MAP_SUBTAG_MASK) {
- case HAMT_SUBTAG_HEAD_ARRAY:
- sp->srcB++;
- sp->bbm = 0xffff;
- break;
+ if (sp->nodeA == sp->nodeB) {
+ res = sp->nodeA;
+ ctx->size -= is_list(sp->nodeB) ? 1 : hashmap_subtree_size(sp->nodeB);
+ }
+ else {
+ if (is_list(sp->nodeA)) { /* A is LEAF */
+ Eterm keyA = CAR(list_val(sp->nodeA));
+
+ if (is_list(sp->nodeB)) { /* LEAF + LEAF */
+ Eterm keyB = CAR(list_val(sp->nodeB));
+
+ if (EQ(keyA, keyB)) {
+ --ctx->size;
+ res = sp->nodeB;
+ sp->mix = 2; /* We assume values differ.
+ + Don't spend time comparing big values.
+ - Might waste some heap space for internal
+ nodes that could otherwise be reused. */
+ goto merge_nodes;
+ }
+ }
+ hx = hashmap_restore_hash(th, ctx->lvl, keyA);
+ sp->abm = 1 << hashmap_index(hx);
+ /* keep srcA pointing at the leaf */
+ }
+ else { /* A is NODE */
+ sp->srcA = boxed_val(sp->nodeA);
+ hdrA = *sp->srcA++;
+ ASSERT(is_header(hdrA));
+ switch (hdrA & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: {
+ sp->srcA++;
+ sp->abm = 0xffff;
+ break;
+ }
+ case HAMT_SUBTAG_HEAD_BITMAP: sp->srcA++;
+ case HAMT_SUBTAG_NODE_BITMAP: {
+ sp->abm = MAP_HEADER_VAL(hdrA);
+ break;
+ }
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "bad header %ld\r\n", hdrA);
+ }
+ }
- case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++;
- case HAMT_SUBTAG_NODE_BITMAP:
- sp->bbm = MAP_HEADER_VAL(hdrB);
- break;
+ if (is_list(sp->nodeB)) { /* B is LEAF */
+ Eterm keyB = CAR(list_val(sp->nodeB));
- default:
- erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK);
- break;
- }
- break;
- }
- default:
- erl_exit(1, "bad primary tag %ld\r\n", nodeB);
- }
- break;
- }
- case TAG_PRIMARY_BOXED: { /* NODE + NODE */
- sp->srcA = boxed_val(nodeA);
- hdrA = *sp->srcA++;
- ASSERT(is_header(hdrA));
- switch (hdrA & _HEADER_MAP_SUBTAG_MASK) {
- case HAMT_SUBTAG_HEAD_ARRAY: {
- sp->srcA++;
- ASSERT(primary_tag(nodeB) == TAG_PRIMARY_BOXED);
- sp->abm = 0xffff;
- sp->srcB = boxed_val(nodeB);
- hdrB = *sp->srcB++;
- ASSERT(is_header(hdrB));
- switch (hdrB & _HEADER_MAP_SUBTAG_MASK) {
- case HAMT_SUBTAG_HEAD_ARRAY:
- sp->srcB++;
- sp->bbm = 0xffff;
- break;
- case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++;
- case HAMT_SUBTAG_NODE_BITMAP:
- sp->bbm = MAP_HEADER_VAL(hdrB);
- break;
- default:
- erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK);
- }
- break;
- }
- case HAMT_SUBTAG_HEAD_BITMAP: sp->srcA++;
- case HAMT_SUBTAG_NODE_BITMAP: {
- ASSERT(primary_tag(nodeB) == TAG_PRIMARY_BOXED);
- sp->abm = MAP_HEADER_VAL(hdrA);
- sp->srcB = boxed_val(nodeB);
- hdrB = *sp->srcB++;
- ASSERT(is_header(hdrB));
- switch (hdrB & _HEADER_MAP_SUBTAG_MASK) {
- case HAMT_SUBTAG_HEAD_ARRAY:
+ hx = hashmap_restore_hash(th, ctx->lvl, keyB);
+ sp->bbm = 1 << hashmap_index(hx);
+ /* keep srcB pointing at the leaf */
+ }
+ else { /* B is NODE */
+ sp->srcB = boxed_val(sp->nodeB);
+ hdrB = *sp->srcB++;
+ ASSERT(is_header(hdrB));
+ switch (hdrB & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_ARRAY: {
sp->srcB++;
- sp->bbm = 0xffff;
- break;
- case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++;
- case HAMT_SUBTAG_NODE_BITMAP:
- sp->bbm = MAP_HEADER_VAL(hdrB);
- break;
-
- default:
- erl_exit(1, "bad header tag %ld\r\n", *sp->srcB & _HEADER_MAP_SUBTAG_MASK);
- }
- break;
- }
- default:
- erl_exit(1, "bad primary tag %ld\r\n", nodeA);
- }
- break;
- }
- default:
- erl_exit(1, "bad primary tag %ld\r\n", nodeA);
+ sp->bbm = 0xffff;
+ break;
+ }
+ case HAMT_SUBTAG_HEAD_BITMAP: sp->srcB++;
+ case HAMT_SUBTAG_NODE_BITMAP: {
+ sp->bbm = MAP_HEADER_VAL(hdrB);
+ break;
+ }
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "bad header %ld\r\n", hdrB);
+ }
+ }
}
+merge_nodes:
+
for (;;) {
if (is_value(res)) { /* We have a complete (sub-)tree or leaf */
- if (lvl == 0)
+ int child_mix;
+ if (ctx->lvl == 0)
break;
/* Pop from stack and continue build parent node */
- lvl--;
+ ctx->lvl--;
+ child_mix = sp->mix;
sp = PSTACK_POP(s);
sp->array[sp->ix++] = res;
+ sp->mix |= child_mix;
res = THE_NON_VALUE;
if (sp->rbm) {
sp->srcA++;
sp->srcB++;
- keepA = sp->keepA;
}
} else { /* Start build a node */
sp->ix = 0;
sp->rbm = sp->abm | sp->bbm;
- ASSERT(!(sp->rbm == 0 && lvl > 0));
+ ASSERT(!(sp->rbm == 0 && ctx->lvl > 0));
}
+ if (--reds <= 0) {
+ goto trap;
+ }
+resume_from_trap:
+
while (sp->rbm) {
Uint32 next = sp->rbm & (sp->rbm-1);
Uint32 bit = sp->rbm ^ next;
@@ -1297,43 +1333,123 @@ recurse:
if (sp->abm & bit) {
if (sp->bbm & bit) {
/* Bit clash. Push and resolve by recursive merge */
- if (sp->rbm) {
- sp->keepA = keepA;
- }
- nodeA = *sp->srcA;
- nodeB = *sp->srcB;
- lvl++;
+ Eterm* srcA = sp->srcA;
+ Eterm* srcB = sp->srcB;
+ ctx->lvl++;
sp = PSTACK_PUSH(s);
+ sp->srcA = srcA;
+ sp->srcB = srcB;
+ sp->mix = 0;
goto recurse;
} else {
sp->array[sp->ix++] = *sp->srcA++;
+ sp->mix |= 1;
}
} else {
ASSERT(sp->bbm & bit);
sp->array[sp->ix++] = *sp->srcB++;
+ sp->mix |= 2;
}
}
- ASSERT(sp->ix == hashmap_bitcount(sp->abm | sp->bbm));
- if (lvl == 0) {
- nhp = HAllocX(p, HAMT_HEAD_BITMAP_SZ(sp->ix), HALLOC_EXTRA);
- hp = nhp;
- *hp++ = (sp->ix == 16 ? MAP_HEADER_HAMT_HEAD_ARRAY
- : MAP_HEADER_HAMT_HEAD_BITMAP(sp->abm | sp->bbm));
- *hp++ = size;
- } else {
- nhp = HAllocX(p, HAMT_NODE_BITMAP_SZ(sp->ix), HALLOC_EXTRA);
- hp = nhp;
- *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(sp->abm | sp->bbm);
- }
- memcpy(hp, sp->array, sp->ix * sizeof(Eterm));
- res = make_boxed(nhp);
+ switch (sp->mix) {
+ case 0: /* Nodes A and B contain the *EXACT* same sub-trees
+ => fall through and reuse nodeA */
+
+ case 1: /* Only unique A stuff => reuse nodeA */
+ res = sp->nodeA;
+ break;
+
+ case 2: /* Only unique B stuff => reuse nodeB */
+ res = sp->nodeB;
+ break;
+
+ case 3: /* We have a mix => must build new node */
+ ASSERT(sp->ix == hashmap_bitcount(sp->abm | sp->bbm));
+ if (ctx->lvl == 0) {
+ nhp = HAllocX(p, HAMT_HEAD_BITMAP_SZ(sp->ix), HALLOC_EXTRA);
+ hp = nhp;
+ *hp++ = (sp->ix == 16 ? MAP_HEADER_HAMT_HEAD_ARRAY
+ : MAP_HEADER_HAMT_HEAD_BITMAP(sp->abm | sp->bbm));
+ *hp++ = ctx->size;
+ } else {
+ nhp = HAllocX(p, HAMT_NODE_BITMAP_SZ(sp->ix), HALLOC_EXTRA);
+ hp = nhp;
+ *hp++ = MAP_HEADER_HAMT_NODE_BITMAP(sp->abm | sp->bbm);
+ }
+ sys_memcpy(hp, sp->array, sp->ix * sizeof(Eterm));
+ res = make_boxed(nhp);
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT, "strange mix %d\r\n", sp->mix);
+ }
+ }
+
+ /* Done */
+
+#ifdef DEBUG
+ {
+ Eterm *head = hashmap_val(res);
+ Uint size = head[1];
+ Uint real_size = hashmap_subtree_size(res);
+ ASSERT(size == real_size);
+ }
+#endif
+
+ if (ctx != &local_ctx) {
+ ASSERT(ctx->trap_bin != THE_NON_VALUE);
+ ASSERT(p->flags & F_DISABLE_GC);
+ erts_set_gc_state(p, 1);
+ }
+ else {
+ ASSERT(ctx->trap_bin == THE_NON_VALUE);
+ ASSERT(!(p->flags & F_DISABLE_GC));
}
PSTACK_DESTROY(s);
UnUseTmpHeap(2,p);
+ BUMP_REDS(p, (initial_reds - reds) / MAP_MERGE_LOOP_FACTOR);
return res;
+
+trap: /* Yield */
+
+ if (ctx == &local_ctx) {
+ Binary* ctx_b = erts_create_magic_binary(sizeof(HashmapMergeContext),
+ hashmap_merge_ctx_destructor);
+ ctx = ERTS_MAGIC_BIN_DATA(ctx_b);
+ sys_memcpy(ctx, &local_ctx, sizeof(HashmapMergeContext));
+ hp = HAlloc(p, PROC_BIN_SIZE);
+ ASSERT(ctx->trap_bin == THE_NON_VALUE);
+ ctx->trap_bin = erts_mk_magic_binary_term(&hp, &MSO(p), ctx_b);
+
+ erts_set_gc_state(p, 0);
+ }
+ else {
+ ASSERT(ctx->trap_bin != THE_NON_VALUE);
+ ASSERT(p->flags & F_DISABLE_GC);
+ }
+
+ PSTACK_SAVE(s, &ctx->pstack);
+
+ BUMP_ALL_REDS(p);
+ ERTS_BIF_PREP_TRAP1(trap_ret, &hashmap_merge_trap_export,
+ p, ctx->trap_bin);
+ UnUseTmpHeap(2,p);
+ return trap_ret;
}
+static Uint hashmap_subtree_size(Eterm node) {
+ DECLARE_WSTACK(stack);
+ Uint size = 0;
+
+ hashmap_iterator_init(&stack, node, 0);
+ while (hashmap_iterator_next(&stack)) {
+ size++;
+ }
+ DESTROY_WSTACK(stack);
+ return size;
+}
+
+
static int hash_cmp(Uint32 ha, Uint32 hb)
{
int i;
@@ -1756,10 +1872,11 @@ void hashmap_iterator_init(ErtsWStack* s, Eterm node, int reverse) {
sz = 16;
break;
case HAMT_SUBTAG_HEAD_BITMAP:
- sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
+ case HAMT_SUBTAG_NODE_BITMAP:
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
break;
default:
- erl_exit(1, "bad header");
+ erl_exit(ERTS_ABORT_EXIT, "bad header");
}
WSTACK_PUSH3((*s), (UWord)THE_NON_VALUE, /* end marker */
@@ -1796,7 +1913,7 @@ Eterm* hashmap_iterator_next(ErtsWStack* s) {
ASSERT(sz < 17);
break;
default:
- erl_exit(1, "bad header");
+ erl_exit(ERTS_ABORT_EXIT, "bad header");
}
idx++;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 45fc949b81..27f6c6f00d 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1208,7 +1208,11 @@ typedef struct enif_resource_t
struct enif_resource_type_t* type;
#ifdef DEBUG
erts_refc_t nif_refc;
+# ifdef ARCH_32
+ byte align__[4];
+# endif
#endif
+
char data[1];
}ErlNifResource;
@@ -1384,7 +1388,7 @@ static void rollback_opened_resource_types(void)
static void nif_resource_dtor(Binary* bin)
{
- ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(bin);
+ ErlNifResource* resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ErlNifResourceType* type = resource->type;
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
@@ -1405,8 +1409,10 @@ static void nif_resource_dtor(Binary* bin)
void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
{
- Binary* bin = erts_create_magic_binary(SIZEOF_ErlNifResource(size), &nif_resource_dtor);
- ErlNifResource* resource = ERTS_MAGIC_BIN_DATA(bin);
+ Binary* bin = erts_create_magic_binary_x(SIZEOF_ErlNifResource(size),
+ &nif_resource_dtor,
+ 1); /* unaligned */
+ ErlNifResource* resource = ERTS_MAGIC_BIN_UNALIGNED_DATA(bin);
ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
resource->type = type;
@@ -1421,7 +1427,7 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t size)
void enif_release_resource(void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
#ifdef DEBUG
@@ -1435,7 +1441,7 @@ void enif_release_resource(void* obj)
void enif_keep_resource(void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
#ifdef DEBUG
@@ -1447,7 +1453,7 @@ void enif_keep_resource(void* obj)
ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource);
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
Eterm* hp = alloc_heap(env,PROC_BIN_SIZE);
return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary);
}
@@ -1476,7 +1482,7 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
return 0; / * Or should we allow "resource binaries" as handles? * /
}*/
mbin = pb->val;
- resource = (ErlNifResource*) ERTS_MAGIC_BIN_DATA(mbin);
+ resource = (ErlNifResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
if (ERTS_MAGIC_BIN_DESTRUCTOR(mbin) != &nif_resource_dtor
|| resource->type != type) {
return 0;
@@ -1488,8 +1494,8 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
size_t enif_sizeof_resource(void* obj)
{
ErlNifResource* resource = DATA_TO_RESOURCE(obj);
- Binary* bin = &ERTS_MAGIC_BIN_FROM_DATA(resource)->binary;
- return ERTS_MAGIC_BIN_DATA_SIZE(bin) - offsetof(ErlNifResource,data);
+ Binary* bin = &ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource)->binary;
+ return ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(bin) - offsetof(ErlNifResource,data);
}
@@ -2712,6 +2718,8 @@ erts_unload_nif(struct erl_module_nif* lib)
void erl_nif_init()
{
+ ERTS_CT_ASSERT((offsetof(ErlNifResource,data) % 8) == ERTS_MAGIC_BIN_BYTES_TO_ALIGN);
+
resource_type_list.next = &resource_type_list;
resource_type_list.prev = &resource_type_list;
resource_type_list.dtor = NULL;
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 6d827c6bda..0950d7e7ef 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -51,6 +51,9 @@ static Uint dist_entries;
static int references_atoms_need_init = 1;
+static ErtsMonotonicTime orig_node_tab_delete_delay;
+static ErtsMonotonicTime node_tab_delete_delay;
+
/* -- The distribution table ---------------------------------------------- */
#ifdef DEBUG
@@ -290,21 +293,46 @@ DistEntry *erts_find_dist_entry(Eterm sysname)
return res;
}
-void erts_delete_dist_entry(DistEntry *dep)
+static void try_delete_dist_entry(void *vdep)
+{
+ DistEntry *dep = (DistEntry *) vdep;
+ erts_aint_t refc;
+
+ erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ /*
+ * Another thread might have looked up this dist entry after
+ * we decided to delete it (refc became zero). If so, the other
+ * thread incremented refc twice. Once for the new reference
+ * and once for this thread.
+ *
+ * If refc reach -1, noone has used the entry since we
+ * set up the timer. Delete the entry.
+ *
+ * If refc reach 0, the entry is currently not in use
+ * but has been used since we set up the timer. Set up a
+ * new timer.
+ *
+ * If refc > 0, the entry is in use. Keep the entry.
+ */
+ refc = erts_refc_dectest(&dep->refc, -1);
+ if (refc == -1)
+ (void) hash_erase(&erts_dist_table, (void *) dep);
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+
+ if (refc == 0)
+ erts_schedule_delete_dist_entry(dep);
+}
+
+void erts_schedule_delete_dist_entry(DistEntry *dep)
{
ASSERT(dep != erts_this_dist_entry);
- if(dep != erts_this_dist_entry) {
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
- /*
- * Another thread might have looked up this dist entry after
- * we decided to delete it (refc became zero). If so, the other
- * thread incremented refc twice. Once for the new reference
- * and once for this thread. Therefore, delete dist entry if
- * refc is 0 or -1 after a decrement.
- */
- if (erts_refc_dectest(&dep->refc, -1) <= 0)
- (void) hash_erase(&erts_dist_table, (void *) dep);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ if (dep != erts_this_dist_entry) {
+ if (node_tab_delete_delay == 0)
+ try_delete_dist_entry((void *) dep);
+ else if (node_tab_delete_delay > 0)
+ erts_start_timer_callback(node_tab_delete_delay,
+ try_delete_dist_entry,
+ (void *) dep);
}
}
@@ -556,14 +584,14 @@ erts_node_table_size(void)
#endif
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
#ifdef DEBUG
hash_get_info(&hi, &erts_node_table);
ASSERT(node_entries == hi.objs);
#endif
res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode);
if (lock)
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
return res;
}
@@ -572,10 +600,10 @@ erts_node_table_info(int to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
hash_info(to, to_arg, &erts_node_table);
if (lock)
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
}
@@ -609,21 +637,46 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint creation)
return res;
}
-void erts_delete_node(ErlNode *enp)
+static void try_delete_node(void *venp)
+{
+ ErlNode *enp = (ErlNode *) venp;
+ erts_aint_t refc;
+
+ erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ /*
+ * Another thread might have looked up this node after we
+ * decided to delete it (refc became zero). If so, the other
+ * thread incremented refc twice. Once for the new reference
+ * and once for this thread.
+ *
+ * If refc reach -1, noone has used the entry since we
+ * set up the timer. Delete the entry.
+ *
+ * If refc reach 0, the entry is currently not in use
+ * but has been used since we set up the timer. Set up a
+ * new timer.
+ *
+ * If refc > 0, the entry is in use. Keep the entry.
+ */
+ refc = erts_refc_dectest(&enp->refc, -1);
+ if (refc == -1)
+ (void) hash_erase(&erts_node_table, (void *) enp);
+ erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+
+ if (refc == 0)
+ erts_schedule_delete_node(enp);
+}
+
+void erts_schedule_delete_node(ErlNode *enp)
{
ASSERT(enp != erts_this_node);
- if(enp != erts_this_node) {
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
- /*
- * Another thread might have looked up this node after we
- * decided to delete it (refc became zero). If so, the other
- * thread incremented refc twice. Once for the new reference
- * and once for this thread. Therefore, delete node if refc
- * is 0 or -1 after a decrement.
- */
- if (erts_refc_dectest(&enp->refc, -1) <= 0)
- (void) hash_erase(&erts_node_table, (void *) enp);
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ if (enp != erts_this_node) {
+ if (node_tab_delete_delay == 0)
+ try_delete_node((void *) enp);
+ else if (node_tab_delete_delay > 0)
+ erts_start_timer_callback(node_tab_delete_delay,
+ try_delete_node,
+ (void *) enp);
}
}
@@ -651,7 +704,7 @@ static void print_node(void *venp, void *vpndp)
erts_print(pndp->to, pndp->to_arg, " %d", enp->creation);
#ifdef DEBUG
erts_print(pndp->to, pndp->to_arg, " (refc=%ld)",
- erts_refc_read(&enp->refc, 1));
+ erts_refc_read(&enp->refc, 0));
#endif
pndp->no_sysname++;
}
@@ -674,13 +727,13 @@ void erts_print_node_info(int to,
pnd.no_total = 0;
if (lock)
- erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx);
+ erts_smp_rwmtx_rlock(&erts_node_table_rwmtx);
hash_foreach(&erts_node_table, print_node, (void *) &pnd);
if (pnd.no_sysname != 0) {
erts_print(to, to_arg, "\n");
}
if (lock)
- erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx);
+ erts_smp_rwmtx_runlock(&erts_node_table_rwmtx);
if(no_sysname)
*no_sysname = pnd.no_sysname;
@@ -714,11 +767,28 @@ erts_set_this_node(Eterm sysname, Uint creation)
}
-void erts_init_node_tables(void)
+Uint
+erts_delayed_node_table_gc(void)
+{
+ if (node_tab_delete_delay < 0)
+ return (Uint) ERTS_NODE_TAB_DELAY_GC_INFINITY;
+ if (node_tab_delete_delay == 0)
+ return (Uint) 0;
+ return (Uint) ((node_tab_delete_delay-1)/1000 + 1);
+}
+
+void erts_init_node_tables(int dd_sec)
{
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
HashFunctions f;
+ if (dd_sec == ERTS_NODE_TAB_DELAY_GC_INFINITY)
+ node_tab_delete_delay = (ErtsMonotonicTime) -1;
+ else
+ node_tab_delete_delay = ((ErtsMonotonicTime) dd_sec)*1000;
+
+ orig_node_tab_delete_delay = node_tab_delete_delay;
+
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
@@ -847,6 +917,7 @@ static Eterm AM_dist_references;
static Eterm AM_node_references;
static Eterm AM_system;
static Eterm AM_timer;
+static Eterm AM_delayed_delete_timer;
static void setup_reference_table(void);
static Eterm reference_table_term(Uint **hpp, Uint *szp);
@@ -881,8 +952,10 @@ typedef struct dist_referrer_ {
int heap_ref;
int node_ref;
int ctrl_ref;
+ int system_ref;
Eterm id;
Uint creation;
+ Uint id_heap[ID_HEAP_SIZE];
} DistReferrer;
typedef struct {
@@ -931,6 +1004,7 @@ erts_get_node_and_dist_references(struct process *proc)
INIT_AM(node_references);
INIT_AM(timer);
INIT_AM(system);
+ INIT_AM(delayed_delete_timer);
references_atoms_need_init = 0;
}
@@ -988,17 +1062,25 @@ insert_dist_referrer(ReferredDist *referred_dist,
sizeof(DistReferrer));
drp->next = referred_dist->referrers;
referred_dist->referrers = drp;
- drp->id = id;
+ if(IS_CONST(id))
+ drp->id = id;
+ else {
+ Uint *hp = &drp->id_heap[0];
+ ASSERT(is_tuple(id));
+ drp->id = copy_struct(id, size_object(id), &hp, NULL);
+ }
drp->creation = creation;
drp->heap_ref = 0;
drp->node_ref = 0;
drp->ctrl_ref = 0;
+ drp->system_ref = 0;
}
switch (type) {
case NODE_REF: drp->node_ref++; break;
case CTRL_REF: drp->ctrl_ref++; break;
case HEAP_REF: drp->heap_ref++; break;
+ case SYSTEM_REF: drp->system_ref++; break;
default: ASSERT(0);
}
}
@@ -1261,6 +1343,33 @@ insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
#endif
static void
+insert_delayed_delete_node(void *state,
+ ErtsMonotonicTime timeout_pos,
+ void *vnp)
+{
+ DeclareTmpHeapNoproc(heap,3);
+ UseTmpHeapNoproc(3);
+ insert_node((ErlNode *) vnp,
+ SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer));
+ UnUseTmpHeapNoproc(3);
+}
+
+static void
+insert_delayed_delete_dist_entry(void *state,
+ ErtsMonotonicTime timeout_pos,
+ void *vdep)
+{
+ DeclareTmpHeapNoproc(heap,3);
+ UseTmpHeapNoproc(3);
+ insert_dist_entry((DistEntry *) vdep,
+ SYSTEM_REF,
+ TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer),
+ 0);
+ UnUseTmpHeapNoproc(3);
+}
+
+static void
setup_reference_table(void)
{
ErlHeapFragment *hfp;
@@ -1288,6 +1397,13 @@ setup_reference_table(void)
/* Go through the hole system, and build a table of all references
to ErlNode and DistEntry structures */
+ erts_debug_callback_timer_foreach(try_delete_node,
+ insert_delayed_delete_node,
+ NULL);
+ erts_debug_callback_timer_foreach(try_delete_dist_entry,
+ insert_delayed_delete_dist_entry,
+ NULL);
+
UseTmpHeapNoproc(3);
insert_node(erts_this_node,
SYSTEM_REF,
@@ -1601,7 +1717,7 @@ reference_table_term(Uint **hpp, Uint *szp)
tup = MK_2TUP(referred_nodes[i].node->sysname,
MK_UINT(referred_nodes[i].node->creation));
- tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 1)), nril);
+ tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 0)), nril);
nl = MK_CONS(tup, nl);
}
@@ -1624,6 +1740,10 @@ reference_table_term(Uint **hpp, Uint *szp)
tup = MK_2TUP(AM_heap, MK_UINT(drp->heap_ref));
drl = MK_CONS(tup, drl);
}
+ if(drp->system_ref) {
+ tup = MK_2TUP(AM_system, MK_UINT(drp->system_ref));
+ drl = MK_CONS(tup, drl);
+ }
if (is_internal_pid(drp->id)) {
ASSERT(!drp->node_ref);
@@ -1633,6 +1753,14 @@ reference_table_term(Uint **hpp, Uint *szp)
ASSERT(drp->ctrl_ref && !drp->node_ref);
tup = MK_2TUP(AM_port, drp->id);
}
+ else if (is_tuple(drp->id)) {
+ Eterm *t;
+ ASSERT(drp->system_ref && !drp->node_ref
+ && !drp->ctrl_ref && !drp->heap_ref);
+ t = tuple_val(drp->id);
+ ASSERT(2 == arityval(t[0]));
+ tup = MK_2TUP(t[1], t[2]);
+ }
else {
ASSERT(!drp->ctrl_ref && drp->node_ref);
ASSERT(is_atom(drp->id));
@@ -1650,7 +1778,7 @@ reference_table_term(Uint **hpp, Uint *szp)
/* DistList = [{Dist, Refc, ReferenceIdList}] */
tup = MK_3TUP(referred_dists[i].dist->sysname,
- MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 1)),
+ MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 0)),
dril);
dl = MK_CONS(tup, dl);
}
@@ -1705,3 +1833,15 @@ delete_reference_table(void)
}
}
+void
+erts_debug_test_node_tab_delayed_delete(Sint64 millisecs)
+{
+ erts_smp_thr_progress_block();
+
+ if (millisecs < 0)
+ node_tab_delete_delay = orig_node_tab_delete_delay;
+ else
+ node_tab_delete_delay = millisecs;
+
+ erts_smp_thr_progress_unblock();
+}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index af60071ea5..54c5cd1d11 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -46,6 +46,10 @@
#define ERTS_PORT_TASK_ONLY_BASIC_TYPES__
#include "erl_port_task.h"
#undef ERTS_PORT_TASK_ONLY_BASIC_TYPES__
+
+#define ERTS_NODE_TAB_DELAY_GC_DEFAULT (60)
+#define ERTS_NODE_TAB_DELAY_GC_MAX (100*1000*1000)
+#define ERTS_NODE_TAB_DELAY_GC_INFINITY (ERTS_NODE_TAB_DELAY_GC_MAX+1)
#define ERST_INTERNAL_CHANNEL_NO 0
@@ -166,20 +170,21 @@ extern DistEntry *erts_this_dist_entry;
extern ErlNode *erts_this_node;
extern char *erts_this_node_sysname; /* must match erl_node_tables.c */
+Uint erts_delayed_node_table_gc(void);
DistEntry *erts_channel_no_to_dist_entry(Uint);
DistEntry *erts_sysname_to_connected_dist_entry(Eterm);
DistEntry *erts_find_or_insert_dist_entry(Eterm);
DistEntry *erts_find_dist_entry(Eterm);
-void erts_delete_dist_entry(DistEntry *);
+void erts_schedule_delete_dist_entry(DistEntry *);
Uint erts_dist_table_size(void);
void erts_dist_table_info(int, void *);
void erts_set_dist_entry_not_connected(DistEntry *);
void erts_set_dist_entry_connected(DistEntry *, Eterm, Uint);
ErlNode *erts_find_or_insert_node(Eterm, Uint);
-void erts_delete_node(ErlNode *);
+void erts_schedule_delete_node(ErlNode *);
void erts_set_this_node(Eterm, Uint);
Uint erts_node_table_size(void);
-void erts_init_node_tables(void);
+void erts_init_node_tables(int);
void erts_node_table_info(int, void *);
void erts_print_node_info(int, void *, Eterm, int*, int*);
Eterm erts_get_node_and_dist_references(struct process *);
@@ -204,7 +209,7 @@ erts_deref_dist_entry(DistEntry *dep)
{
ASSERT(dep);
if (erts_refc_dectest(&dep->refc, 0) == 0)
- erts_delete_dist_entry(dep);
+ erts_schedule_delete_dist_entry(dep);
}
ERTS_GLB_INLINE void
@@ -212,7 +217,7 @@ erts_deref_node_entry(ErlNode *np)
{
ASSERT(np);
if (erts_refc_dectest(&np->refc, 0) == 0)
- erts_delete_node(np);
+ erts_schedule_delete_node(np);
}
ERTS_GLB_INLINE void
@@ -253,5 +258,6 @@ erts_smp_de_links_unlock(DistEntry *dep)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+void erts_debug_test_node_tab_delayed_delete(Sint64 millisecs);
#endif
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 4940ffc4a0..43e84a1be1 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -165,6 +165,9 @@ Uint erts_no_dirty_cpu_schedulers;
Uint erts_no_dirty_io_schedulers;
#endif
+static char *erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_NO_FLAGS] = {0};
+int erts_aux_work_no_flags = ERTS_SSI_AUX_WORK_NO_FLAGS;
+
#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_VERY_LAZY (4*1024*1024)
#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_LAZY (512*1024)
#define ERTS_THR_PRGR_LATER_CLEANUP_OP_THRESHOLD_MEDIUM (64*1024)
@@ -508,6 +511,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#ifdef ERTS_SSI_AUX_WORK_REAP_PORTS
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
#endif
+ valid |= ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
if (~valid & value)
erl_exit(ERTS_ABORT_EXIT,
@@ -566,6 +570,41 @@ erts_pre_init_process(void)
erts_tsd_key_create(&sched_data_key, "erts_sched_data_key");
#endif
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX]
+ = "DELAYED_AW_WAKEUP";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DD_IX]
+ = "DD";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX]
+ = "DD_THR_PRGR";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX]
+ = "FIX_ALLOC_DEALLOC";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX]
+ = "FIX_ALLOC_LOWER_LIM";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX]
+ = "THR_PRGR_LATER_OP";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX]
+ = "CNCLD_TMRS";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX]
+ = "CNCLD_TMRS_THR_PRGR";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_ASYNC_READY_IX]
+ = "ASYNC_READY";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX]
+ = "ASYNC_READY_CLEAN";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX]
+ = "MISC_THR_PRGR";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX]
+ = "MISC";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CHECK_CHILDREN_IX]
+ = "CHECK_CHILDREN";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX]
+ = "SET_TMO";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
+ = "MSEG_CACHE_CHECK";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_REAP_PORTS_IX]
+ = "REAP_PORTS";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX]
+ = "DEBUG_WAIT_COMPLETED";
+
#ifdef ERTS_ENABLE_LOCK_CHECK
{
int ix;
@@ -1193,11 +1232,11 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
ERTS_DBG_CHK_SSI_AUX_WORK(ssi);
old_flgs = erts_atomic32_read_nob(&ssi->aux_work);
- if ((old_flgs & flgs) == 0) {
+ if ((old_flgs & flgs) != flgs) {
old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
- if ((old_flgs & flgs) == 0) {
+ if ((old_flgs & flgs) != flgs) {
#ifdef ERTS_SMP
erts_sched_poke(ssi);
#else
@@ -1217,7 +1256,7 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi,
old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs);
- if ((old_flgs & flgs) == 0) {
+ if ((old_flgs & flgs) != flgs) {
#ifdef ERTS_SMP
erts_sched_poke(ssi);
#else
@@ -1715,11 +1754,6 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
awdp->dd.thr_prgr = wakeup;
haw_thr_prgr_soft_wakeup(awdp, wakeup);
}
- else if (awdp->dd.completed_callback) {
- awdp->dd.completed_callback(awdp->dd.completed_arg);
- awdp->dd.completed_callback = NULL;
- awdp->dd.completed_arg = NULL;
- }
return aux_work & ~ERTS_SSI_AUX_WORK_DD;
}
@@ -1761,11 +1795,6 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
}
else {
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR);
- if (awdp->dd.completed_callback) {
- awdp->dd.completed_callback(awdp->dd.completed_arg);
- awdp->dd.completed_callback = NULL;
- awdp->dd.completed_arg = NULL;
- }
}
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
@@ -1955,78 +1984,142 @@ erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *),
#endif
}
-#ifdef ERTS_SMP
+static ERTS_INLINE erts_aint32_t
+handle_debug_wait_completed(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ erts_aint32_t saved_aux_work, flags;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
+
+ flags = awdp->debug.wait_completed.flags;
+
+ if (aux_work & flags)
+ return aux_work;
+
+ saved_aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
-static erts_atomic32_t completed_dealloc_count;
+ if (saved_aux_work & flags)
+ return aux_work & ~ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+
+ awdp->debug.wait_completed.callback(awdp->debug.wait_completed.arg);
+
+ awdp->debug.wait_completed.flags = 0;
+ awdp->debug.wait_completed.callback = NULL;
+ awdp->debug.wait_completed.arg = NULL;
+
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED);
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+}
+
+static erts_atomic32_t debug_wait_completed_count;
+static int debug_wait_completed_flags;
static void
-completed_dealloc(void *vproc)
+thr_debug_wait_completed(void *vproc)
{
- if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) {
+ if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == 0) {
erts_resume((Process *) vproc, (ErtsProcLocks) 0);
erts_proc_dec_refc((Process *) vproc);
}
}
static void
-setup_completed_dealloc(void *vproc)
+setup_thr_debug_wait_completed(void *vproc)
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
- ErtsAuxWorkData *awdp = (esdp
- ? &esdp->aux_work_data
- : aux_thread_aux_work_data);
- erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
- set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DD);
- awdp->dd.completed_callback = completed_dealloc;
- awdp->dd.completed_arg = vproc;
+ ErtsAuxWorkData *awdp;
+ erts_aint32_t wait_flags, aux_work_flags;
+#ifdef ERTS_SMP
+ awdp = esdp ? &esdp->aux_work_data : aux_thread_aux_work_data;
+#else
+ awdp = &esdp->aux_work_data;
+#endif
+
+ wait_flags = 0;
+ aux_work_flags = ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
+
+ if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS) {
+ erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
+ wait_flags |= (ERTS_SSI_AUX_WORK_DD
+ | ERTS_SSI_AUX_WORK_DD_THR_PRGR
+ | ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP);
+#ifdef ERTS_SMP
+ aux_work_flags |= ERTS_SSI_AUX_WORK_DD;
+#endif
+ }
+
+ if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS) {
+ wait_flags |= (ERTS_SSI_AUX_WORK_CNCLD_TMRS
+ | ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR
+ | ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP);
+#ifdef ERTS_SMP
+ if (awdp->esdp && !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp))
+ aux_work_flags |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
+#endif
+ }
+
+ set_aux_work_flags_wakeup_nob(awdp->ssi, aux_work_flags);
+
+ awdp->debug.wait_completed.flags = wait_flags;
+ awdp->debug.wait_completed.callback = thr_debug_wait_completed;
+ awdp->debug.wait_completed.arg = vproc;
}
static void
-prep_setup_completed_dealloc(void *vproc)
+prep_setup_thr_debug_wait_completed(void *vproc)
{
- erts_aint32_t count = (erts_aint32_t) (erts_no_schedulers+1);
- if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == count) {
+ erts_aint32_t count = (erts_aint32_t) erts_no_schedulers;
+#ifdef ERTS_SMP
+ count += 1; /* aux thread */
+#endif
+ if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) {
/* scheduler threads */
erts_schedule_multi_misc_aux_work(0,
erts_no_schedulers,
- setup_completed_dealloc,
+ setup_thr_debug_wait_completed,
vproc);
+#ifdef ERTS_SMP
/* aux_thread */
erts_schedule_misc_aux_work(0,
- setup_completed_dealloc,
+ setup_thr_debug_wait_completed,
vproc);
+#endif
}
}
-#endif /* ERTS_SMP */
int
-erts_debug_wait_deallocations(Process *c_p)
+erts_debug_wait_completed(Process *c_p, int flags)
{
-#ifndef ERTS_SMP
- erts_alloc_fix_alloc_shrink(1, 0);
- return 1;
-#else
/* Only one process at a time can do this */
- erts_aint32_t count = (erts_aint32_t) (2*(erts_no_schedulers+1));
- if (0 == erts_atomic32_cmpxchg_mb(&completed_dealloc_count,
+ erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers);
+#ifdef ERTS_SMP
+ count += 2; /* aux thread */
+#endif
+ if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count,
count,
0)) {
+ debug_wait_completed_flags = flags;
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
erts_proc_inc_refc(c_p);
/* scheduler threads */
erts_schedule_multi_misc_aux_work(0,
erts_no_schedulers,
- prep_setup_completed_dealloc,
+ prep_setup_thr_debug_wait_completed,
(void *) c_p);
+#ifdef ERTS_SMP
/* aux_thread */
erts_schedule_misc_aux_work(0,
- prep_setup_completed_dealloc,
+ prep_setup_thr_debug_wait_completed,
(void *) c_p);
+#endif
return 1;
}
return 0;
-#endif
}
@@ -2227,6 +2320,14 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
handle_reap_ports);
+ /*
+ * ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED *need* to be
+ * the last flag checked!
+ */
+
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED,
+ handle_debug_wait_completed);
+
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
#ifdef ERTS_SMP
@@ -5354,8 +5455,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST;
awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
- awdp->dd.completed_callback = NULL;
- awdp->dd.completed_arg = NULL;
awdp->cncld_tmrs.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->later_op.thr_prgr = ERTS_THR_PRGR_VAL_FIRST;
awdp->later_op.size = 0;
@@ -5386,6 +5485,9 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
awdp->delayed_wakeup.sched2jix[i] = -1;
}
#endif
+ awdp->debug.wait_completed.flags = 0;
+ awdp->debug.wait_completed.callback = NULL;
+ awdp->debug.wait_completed.arg = NULL;
}
static void
@@ -5694,11 +5796,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
init_swtreq_alloc();
#endif
+ erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */
+ debug_wait_completed_flags = 0;
#ifdef ERTS_SMP
- erts_atomic32_init_nob(&completed_dealloc_count, 0); /* debug only */
-
aux_thread_aux_work_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
sizeof(ErtsAuxWorkData));
@@ -10855,7 +10957,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->msg_inq.len = 0;
#endif
p->bif_timers = NULL;
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
p->accessor_bif_timers = NULL;
+#endif
p->mbuf = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
@@ -11035,7 +11139,9 @@ void erts_init_empty_process(Process *p)
p->msg.save = &p->msg.first;
p->msg.len = 0;
p->bif_timers = NULL;
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
p->accessor_bif_timers = NULL;
+#endif
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
@@ -11130,7 +11236,9 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->msg.first == NULL);
ASSERT(p->msg.len == 0);
ASSERT(p->bif_timers == NULL);
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
ASSERT(p->accessor_bif_timers == NULL);
+#endif
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -12086,6 +12194,7 @@ erts_continue_exit_process(Process *p)
p->bif_timers = NULL;
}
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
if (p->accessor_bif_timers) {
if (erts_detach_accessor_bif_timers(p,
p->accessor_bif_timers,
@@ -12096,6 +12205,7 @@ erts_continue_exit_process(Process *p)
ASSERT(erts_proc_read_refc(p) > 0);
p->accessor_bif_timers = NULL;
}
+#endif
#ifdef ERTS_SMP
if (p->flags & F_HAVE_BLCKD_MSCHED) {
@@ -12458,45 +12568,13 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work);
erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: ");
- for (i = 0; i < ERTS_SSI_AUX_WORK_MAX && flg; i++) {
+ for (i = 0; i < ERTS_SSI_AUX_WORK_NO_FLAGS && flg; i++) {
erts_aint32_t chk = (1 << i);
if (flg & chk) {
- switch (chk) {
- case ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP:
- erts_print(to, to_arg, "DELAYED_AW_WAKEUP"); break;
- case ERTS_SSI_AUX_WORK_DD:
- erts_print(to, to_arg, "DELAYED_DEALLOC"); break;
- case ERTS_SSI_AUX_WORK_DD_THR_PRGR:
- erts_print(to, to_arg, "DELAYED_DEALLOC_THR_PRGR"); break;
- case ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC:
- erts_print(to, to_arg, "FIX_ALLOC_DEALLOC"); break;
- case ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM:
- erts_print(to, to_arg, "FIX_ALLOC_LOWER_LIM"); break;
- case ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP:
- erts_print(to, to_arg, "THR_PRGR_LATER_OP"); break;
- case ERTS_SSI_AUX_WORK_CNCLD_TMRS:
- erts_print(to, to_arg, "CANCELED_TIMERS"); break;
- case ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR:
- erts_print(to, to_arg, "CANCELED_TIMERS_THR_PRGR"); break;
- case ERTS_SSI_AUX_WORK_ASYNC_READY:
- erts_print(to, to_arg, "ASYNC_READY"); break;
- case ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN:
- erts_print(to, to_arg, "ASYNC_READY_CLEAN"); break;
- case ERTS_SSI_AUX_WORK_MISC_THR_PRGR:
- erts_print(to, to_arg, "MISC_THR_PRGR"); break;
- case ERTS_SSI_AUX_WORK_MISC:
- erts_print(to, to_arg, "MISC"); break;
- case ERTS_SSI_AUX_WORK_CHECK_CHILDREN:
- erts_print(to, to_arg, "CHECK_CHILDREN"); break;
- case ERTS_SSI_AUX_WORK_SET_TMO:
- erts_print(to, to_arg, "SET_TMO"); break;
- case ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK:
- erts_print(to, to_arg, "MSEG_CACHE_CHECK"); break;
- case ERTS_SSI_AUX_WORK_REAP_PORTS:
- erts_print(to, to_arg, "REAP_PORTS"); break;
- default:
- erts_print(to, to_arg, "UNKNOWN(%d)", flg); break;
- }
+ if (erts_aux_work_flag_descr[i])
+ erts_print(to, to_arg, "%s", erts_aux_work_flag_descr[i]);
+ else
+ erts_print(to, to_arg, "1<<%d", i);
if (flg > chk)
erts_print(to, to_arg, " | ");
flg -= chk;
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index b1c30e7652..1da1239609 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -268,28 +268,73 @@ typedef enum {
| ERTS_SSI_FLG_SUSPENDED)
/*
- * Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative
- * eachother. Most frequent - lowest bit number.
+ * Keep ERTS_SSI_AUX_WORK flags ordered in expected frequency
+ * order relative eachother. Most frequent at lowest at lowest
+ * index.
+ *
+ * ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX *need* to be
+ * highest index...
+ *
+ * Remember to update description in erts_pre_init_process()
+ * when adding new flags...
*/
-#define ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP (((erts_aint32_t) 1) << 0)
-#define ERTS_SSI_AUX_WORK_DD (((erts_aint32_t) 1) << 1)
-#define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 2)
-#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 3)
-#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 4)
-#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP (((erts_aint32_t) 1) << 5)
-#define ERTS_SSI_AUX_WORK_CNCLD_TMRS (((erts_aint32_t) 1) << 6)
-#define ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR (((erts_aint32_t) 1) << 7)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 8)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 9)
-#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 10)
-#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 11)
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 12)
-#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 13)
-#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 14)
-#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 15)
-
-#define ERTS_SSI_AUX_WORK_MAX 16
+typedef enum {
+ ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX,
+ ERTS_SSI_AUX_WORK_DD_IX,
+ ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX,
+ ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX,
+ ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX,
+ ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX,
+ ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX,
+ ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX,
+ ERTS_SSI_AUX_WORK_ASYNC_READY_IX,
+ ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX,
+ ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX,
+ ERTS_SSI_AUX_WORK_MISC_IX,
+ ERTS_SSI_AUX_WORK_CHECK_CHILDREN_IX,
+ ERTS_SSI_AUX_WORK_SET_TMO_IX,
+ ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX,
+ ERTS_SSI_AUX_WORK_REAP_PORTS_IX,
+ ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX, /* SHOULD be last flag index */
+
+ ERTS_SSI_AUX_WORK_NO_FLAGS /* Not a flag index... */
+} ErtsSsiAuxWorkFlagIndex;
+
+#define ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX)
+#define ERTS_SSI_AUX_WORK_DD \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DD_IX)
+#define ERTS_SSI_AUX_WORK_DD_THR_PRGR \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DD_THR_PRGR_IX)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC_IX)
+#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM_IX)
+#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP_IX)
+#define ERTS_SSI_AUX_WORK_CNCLD_TMRS \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_CNCLD_TMRS_IX)
+#define ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR_IX)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_ASYNC_READY_IX)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX)
+#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX)
+#define ERTS_SSI_AUX_WORK_MISC \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_IX)
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_CHECK_CHILDREN_IX)
+#define ERTS_SSI_AUX_WORK_SET_TMO \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX)
+#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX)
+#define ERTS_SSI_AUX_WORK_REAP_PORTS \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_REAP_PORTS_IX)
+#define ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED_IX)
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
@@ -515,8 +560,6 @@ typedef struct {
#ifdef ERTS_SMP
struct {
ErtsThrPrgrVal thr_prgr;
- void (*completed_callback)(void *);
- void (*completed_arg)(void *);
} dd;
struct {
ErtsThrPrgrVal thr_prgr;
@@ -545,6 +588,13 @@ typedef struct {
ErtsDelayedAuxWorkWakeupJob *job;
} delayed_wakeup;
#endif
+ struct {
+ struct {
+ erts_aint32_t flags;
+ void (*callback)(void *);
+ void *arg;
+ } wait_completed;
+ } debug;
} ErtsAuxWorkData;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -925,7 +975,9 @@ struct process {
ErlMessageQueue msg; /* Message queue */
ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */
+#ifdef ERTS_BTM_ACCESSOR_SUPPORT
ErtsBifTimers *accessor_bif_timers; /* Accessor bif timers */
+#endif
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -1692,7 +1744,11 @@ Eterm erts_get_reader_groups_map(Process *c_p);
Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
Uint erts_debug_nbalance(void);
-int erts_debug_wait_deallocations(Process *c_p);
+
+#define ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS (1 << 0)
+#define ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS (1 << 1)
+
+int erts_debug_wait_completed(Process *c_p, int flags);
Uint erts_process_memory(Process *c_p);
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index bc04d7b78e..565528193e 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -128,10 +128,10 @@ FUNTY checked_##FUN(ARGTY x, const char *file, unsigned line) \
return _unchecked_##FUN(x); \
}
-ET_DEFINE_CHECKED(Eterm,make_boxed,Eterm*,_is_taggable_pointer);
+ET_DEFINE_CHECKED(Eterm,make_boxed,const Eterm*,_is_taggable_pointer);
ET_DEFINE_CHECKED(int,is_boxed,Eterm,!is_header);
ET_DEFINE_CHECKED(Eterm*,boxed_val,Wterm,_boxed_precond);
-ET_DEFINE_CHECKED(Eterm,make_list,Eterm*,_is_taggable_pointer);
+ET_DEFINE_CHECKED(Eterm,make_list,const Eterm*,_is_taggable_pointer);
ET_DEFINE_CHECKED(int,is_not_list,Eterm,!is_header);
ET_DEFINE_CHECKED(Eterm*,list_val,Wterm,_list_precond);
ET_DEFINE_CHECKED(Uint,unsigned_val,Eterm,is_small);
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 602aab46dc..7b15b34da1 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -198,7 +198,7 @@ struct erl_node_; /* Declared in erl_node_tables.h */
#endif
#define _is_aligned(x) (((Uint)(x) & 0x3) == 0)
#define _unchecked_make_boxed(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_BOXED)
-_ET_DECLARE_CHECKED(Eterm,make_boxed,Eterm*)
+_ET_DECLARE_CHECKED(Eterm,make_boxed,const Eterm*)
#define make_boxed(x) _ET_APPLY(make_boxed,(x))
#if 1
#define _is_not_boxed(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_BOXED))
@@ -214,7 +214,7 @@ _ET_DECLARE_CHECKED(Eterm*,boxed_val,Wterm)
/* cons cell ("list") access methods */
#define _unchecked_make_list(x) ((Uint) COMPRESS_POINTER(x) + TAG_PRIMARY_LIST)
-_ET_DECLARE_CHECKED(Eterm,make_list,Eterm*)
+_ET_DECLARE_CHECKED(Eterm,make_list,const Eterm*)
#define make_list(x) _ET_APPLY(make_list,(x))
#if 1
#define _unchecked_is_not_list(x) ((x) & (_TAG_PRIMARY_MASK-TAG_PRIMARY_LIST))
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index 4560cd23af..e64b86496a 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -453,4 +453,12 @@ ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+void
+erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
+ void (*tclbk)(void *),
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *),
+ void *arg);
+
#endif /* timer wheel api */
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 340c7033ab..14d42599a1 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -230,9 +230,23 @@ typedef struct {
ERTS_INTERNAL_BINARY_FIELDS
SWord orig_size;
void (*destructor)(Binary *);
- char magic_bin_data[1];
+ union {
+ struct {
+ ERTS_BINARY_STRUCT_ALIGNMENT
+ char data[1];
+ } aligned;
+ struct {
+ char data[1];
+ } unaligned;
+ } u;
} ErtsMagicBinary;
+#ifdef ARCH_32
+#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 4
+#else
+#define ERTS_MAGIC_BIN_BYTES_TO_ALIGN 0
+#endif
+
typedef union {
Binary binary;
ErtsMagicBinary magic_binary;
@@ -252,15 +266,30 @@ typedef union {
#define ERTS_MAGIC_BIN_DESTRUCTOR(BP) \
((ErtsBinary *) (BP))->magic_binary.destructor
#define ERTS_MAGIC_BIN_DATA(BP) \
- ((void *) ((ErtsBinary *) (BP))->magic_binary.magic_bin_data)
-#define ERTS_MAGIC_BIN_DATA_SIZE(BP) \
- ((BP)->orig_size - sizeof(void (*)(Binary *)))
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.aligned.data)
+#define ERTS_MAGIC_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.aligned.data) - offsetof(Binary,orig_bytes))
#define ERTS_MAGIC_BIN_ORIG_SIZE(Sz) \
- (sizeof(void (*)(Binary *)) + (Sz))
+ (ERTS_MAGIC_DATA_OFFSET + (Sz))
#define ERTS_MAGIC_BIN_SIZE(Sz) \
- (offsetof(ErtsMagicBinary,magic_bin_data) + (Sz))
-#define ERTS_MAGIC_BIN_FROM_DATA(DATA) \
- ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,magic_bin_data)))
+ (offsetof(ErtsMagicBinary,u.aligned.data) + (Sz))
+
+/* On 32-bit arch these macro variants will save memory
+ by not forcing 8-byte alignment for the magic payload.
+*/
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA(BP) \
+ ((void *) ((ErtsBinary *) (BP))->magic_binary.u.unaligned.data)
+#define ERTS_MAGIC_UNALIGNED_DATA_OFFSET \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) - offsetof(Binary,orig_bytes))
+#define ERTS_MAGIC_BIN_UNALIGNED_DATA_SIZE(BP) \
+ ((BP)->orig_size - ERTS_MAGIC_UNALIGNED_DATA_OFFSET)
+#define ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(Sz) \
+ (ERTS_MAGIC_UNALIGNED_DATA_OFFSET + (Sz))
+#define ERTS_MAGIC_BIN_UNALIGNED_SIZE(Sz) \
+ (offsetof(ErtsMagicBinary,u.unaligned.data) + (Sz))
+#define ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(DATA) \
+ ((ErtsBinary*)((char*)(DATA) - offsetof(ErtsMagicBinary,u.unaligned.data)))
+
#define Binary2ErlDrvBinary(B) (&((ErtsBinary *) (B))->driver.binary)
#define ErlDrvBinary2Binary(D) ((Binary *) \
@@ -748,6 +777,15 @@ ErtsPStack s = { (byte*)PSTK_DEF_STACK(s), /* pstart */ \
ERTS_ALC_T_ESTACK /* alloc_type */ \
}
+#define PSTACK_CHANGE_ALLOCATOR(s,t) \
+do { \
+ if (s.pstart != (byte*)PSTK_DEF_STACK(s)) { \
+ erl_exit(1, "Internal error - trying to change allocator " \
+ "type of active pstack\n"); \
+ } \
+ s.alloc_type = (t); \
+ } while (0)
+
#define PSTACK_DESTROY(s) \
do { \
if (s.pstart != (byte*)PSTK_DEF_STACK(s)) { \
@@ -757,6 +795,8 @@ do { \
#define PSTACK_IS_EMPTY(s) (s.psp < s.pstart)
+#define PSTACK_COUNT(s) (((PSTACK_TYPE*)s.psp + 1) - (PSTACK_TYPE*)s.pstart)
+
#define PSTACK_TOP(s) (ASSERT(!PSTACK_IS_EMPTY(s)), (PSTACK_TYPE*)(s.psp))
#define PSTACK_PUSH(s) \
@@ -767,6 +807,45 @@ do { \
#define PSTACK_POP(s) ((PSTACK_TYPE*) (s.psp -= sizeof(PSTACK_TYPE)))
+/*
+ * Do not free the stack after this, it may have pointers into what
+ * was saved in 'dst'.
+ */
+#define PSTACK_SAVE(s,dst)\
+do {\
+ if (s.pstart == (byte*)PSTK_DEF_STACK(s)) {\
+ UWord _pbytes = PSTACK_COUNT(s) * sizeof(PSTACK_TYPE);\
+ (dst)->pstart = erts_alloc(s.alloc_type,\
+ sizeof(PSTK_DEF_STACK(s)));\
+ sys_memcpy((dst)->pstart, s.pstart, _pbytes);\
+ (dst)->psp = (dst)->pstart + _pbytes - sizeof(PSTACK_TYPE);\
+ (dst)->pend = (dst)->pstart + sizeof(PSTK_DEF_STACK(s));\
+ (dst)->alloc_type = s.alloc_type;\
+ } else\
+ *(dst) = s;\
+ } while (0)
+
+/*
+ * Use on empty stack, only the allocator can be changed before this.
+ * The src stack is reset to NULL.
+ */
+#define PSTACK_RESTORE(s, src) \
+do { \
+ ASSERT(s.pstart == (byte*)PSTK_DEF_STACK(s)); \
+ s = *(src); /* struct copy */ \
+ (src)->pstart = NULL; \
+ ASSERT(s.psp >= (s.pstart - sizeof(PSTACK_TYPE))); \
+ ASSERT(s.psp < s.pend); \
+} while (0)
+
+#define PSTACK_DESTROY_SAVED(pstack)\
+do {\
+ if ((pstack)->pstart) {\
+ erts_free((pstack)->alloc_type, (pstack)->pstart);\
+ (pstack)->pstart = NULL;\
+ }\
+} while(0)
+
/* binary.c */
@@ -1055,6 +1134,9 @@ Sint erts_binary_set_loop_limit(Sint limit);
/* external.c */
void erts_init_external(void);
+/* erl_map.c */
+void erts_init_map(void);
+
/* erl_unicode.c */
void erts_init_unicode(void);
Sint erts_unicode_set_loop_limit(Sint limit);
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 8bffdedb2b..fe7c5826f5 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -529,6 +529,9 @@ erts_create_timer_wheel(ErtsSchedulerData *esdp)
tiw->next_timeout_time = mtime + ERTS_MONOTONIC_DAY;
tiw->sentinel.next = &tiw->sentinel;
tiw->sentinel.prev = &tiw->sentinel;
+ tiw->sentinel.u.func.timeout = NULL;
+ tiw->sentinel.u.func.cancel = NULL;
+ tiw->sentinel.u.func.arg = NULL;
return tiw;
}
@@ -624,6 +627,41 @@ erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
}
}
+void
+erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
+ void (*tclbk)(void *),
+ void (*func)(void *,
+ ErtsMonotonicTime,
+ void *),
+ void *arg)
+{
+ ErtsTWheelTimer *tmr;
+ int ix;
+
+ tmr = tiw->sentinel.next;
+ while (tmr != &tiw->sentinel) {
+ if (tmr->u.func.timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ tmr = tmr->next;
+ }
+
+ for (tmr = tiw->at_once.head; tmr; tmr = tmr->next) {
+ if (tmr->u.func.timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ }
+
+ for (ix = 0; ix < ERTS_TIW_SIZE; ix++) {
+ tmr = tiw->w[ix];
+ if (tmr) {
+ do {
+ if (tmr->u.func.timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ tmr = tmr->next;
+ } while (tmr != tiw->w[ix]);
+ }
+ }
+}
+
#ifdef ERTS_TW_DEBUG
void erts_p_slpq(void)
{