aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/beam_bif_load.c4
-rw-r--r--erts/emulator/beam/beam_bp.c26
-rw-r--r--erts/emulator/beam/beam_emu.c8
-rw-r--r--erts/emulator/beam/beam_load.c33
-rw-r--r--erts/emulator/beam/bif.c73
-rw-r--r--erts/emulator/beam/bif.tab5
-rw-r--r--erts/emulator/beam/binary.c2
-rw-r--r--erts/emulator/beam/break.c4
-rw-r--r--erts/emulator/beam/code_ix.h8
-rw-r--r--erts/emulator/beam/copy.c16
-rw-r--r--erts/emulator/beam/dist.c4
-rw-r--r--erts/emulator/beam/erl_alloc.c10
-rw-r--r--erts/emulator/beam/erl_alloc.types1
-rw-r--r--erts/emulator/beam/erl_bif_binary.c1
-rw-r--r--erts/emulator/beam/erl_bif_info.c6
-rw-r--r--erts/emulator/beam/erl_bif_trace.c4
-rw-r--r--erts/emulator/beam/erl_bif_unique.c2
-rw-r--r--erts/emulator/beam/erl_bif_unique.h10
-rw-r--r--erts/emulator/beam/erl_binary.h54
-rw-r--r--erts/emulator/beam/erl_bits.c7
-rw-r--r--erts/emulator/beam/erl_db.c23
-rw-r--r--erts/emulator/beam/erl_db_hash.c4
-rw-r--r--erts/emulator/beam/erl_db_tree.c12
-rw-r--r--erts/emulator/beam/erl_db_util.c7
-rw-r--r--erts/emulator/beam/erl_db_util.h6
-rw-r--r--erts/emulator/beam/erl_gc.c13
-rw-r--r--erts/emulator/beam/erl_hl_timer.c1839
-rw-r--r--erts/emulator/beam/erl_hl_timer.h4
-rw-r--r--erts/emulator/beam/erl_init.c73
-rw-r--r--erts/emulator/beam/erl_message.c7
-rw-r--r--erts/emulator/beam/erl_nif.c33
-rw-r--r--erts/emulator/beam/erl_process.c65
-rw-r--r--erts/emulator/beam/erl_process.h10
-rw-r--r--erts/emulator/beam/erl_process_dump.c6
-rw-r--r--erts/emulator/beam/erl_time.h80
-rw-r--r--erts/emulator/beam/erl_time_sup.c3
-rw-r--r--erts/emulator/beam/erl_trace.c9
-rw-r--r--erts/emulator/beam/export.c2
-rw-r--r--erts/emulator/beam/external.c27
-rw-r--r--erts/emulator/beam/global.h6
-rw-r--r--erts/emulator/beam/io.c17
-rw-r--r--erts/emulator/beam/time.c1525
-rw-r--r--erts/emulator/beam/utils.c2
43 files changed, 2676 insertions, 1375 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 4ba8c2a669..769fe89219 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -233,7 +233,7 @@ prepare_loading_2(BIF_ALIST_2)
}
hp = HAlloc(BIF_P, ERTS_MAGIC_REF_THING_SIZE);
res = erts_mk_magic_ref(&hp, &MSO(BIF_P), magic);
- erts_refc_dec(&magic->refc, 1);
+ erts_refc_dec(&magic->intern.refc, 1);
BIF_RET(res);
}
@@ -435,7 +435,7 @@ finish_loading_1(BIF_ALIST_1)
Eterm mod;
Eterm retval;
- erts_refc_inc(&p[i].code->refc, 1);
+ erts_refc_inc(&p[i].code->intern.refc, 1);
retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod);
ASSERT(retval == NIL || retval == am_on_load);
if (retval == am_on_load) {
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index b32c74ce7a..1efe7536d6 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -327,7 +327,7 @@ erts_consolidate_bif_bp_data(void)
static void
consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) ci->native;
+ GenericBp* g = ci->u.gen_bp;
GenericBpData* src;
GenericBpData* dst;
Uint flags;
@@ -376,7 +376,7 @@ consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local)
ASSERT(*erts_codeinfo_to_code(ci) !=
(BeamInstr) BeamOp(op_i_generic_breakpoint));
}
- ci->native = 0;
+ ci->u.gen_bp = NULL;
Free(g);
return;
}
@@ -427,7 +427,7 @@ erts_install_breakpoints(BpFunctions* f)
for (i = 0; i < n; i++) {
ErtsCodeInfo* ci = f->matching[i].ci;
BeamInstr *pc = erts_codeinfo_to_code(ci);
- GenericBp* g = (GenericBp *) ci->native;
+ GenericBp* g = ci->u.gen_bp;
if (*pc != br && g) {
Module* modp = f->matching[i].mod;
@@ -468,7 +468,7 @@ uninstall_breakpoint(ErtsCodeInfo *ci)
{
BeamInstr *pc = erts_codeinfo_to_code(ci);
if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
- GenericBp* g = (GenericBp *) ci->native;
+ GenericBp* g = ci->u.gen_bp;
if (g->data[erts_active_bp_ix()].flags == 0) {
/*
* The following write is not protected by any lock. We
@@ -549,7 +549,7 @@ erts_clear_trace_break(BpFunctions* f)
void
erts_clear_call_trace_bif(ErtsCodeInfo *ci, int local)
{
- GenericBp* g = (GenericBp *) ci->native;
+ GenericBp* g = ci->u.gen_bp;
if (g) {
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
@@ -624,7 +624,7 @@ erts_clear_module_break(Module *modp) {
continue;
uninstall_breakpoint(ci);
consolidate_bp_data(modp, ci, 1);
- ASSERT(ci->native == 0);
+ ASSERT(ci->u.gen_bp == NULL);
}
return n;
}
@@ -638,7 +638,7 @@ erts_clear_export_break(Module* modp, ErtsCodeInfo *ci)
erts_commit_staged_bp();
*erts_codeinfo_to_code(ci) = (BeamInstr) 0;
consolidate_bp_data(modp, ci, 0);
- ASSERT(ci->native == 0);
+ ASSERT(ci->u.gen_bp == NULL);
}
BeamInstr
@@ -651,7 +651,7 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg)
ASSERT(info->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- g = (GenericBp *) info->native;
+ g = info->u.gen_bp;
bp = &g->data[ix];
bp_flags = bp->flags;
ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
@@ -754,7 +754,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- g = (GenericBp *) ep->info.native;
+ g = ep->info.u.gen_bp;
if (g) {
bp = &g->data[erts_active_bp_ix()];
bp_flags = bp->flags;
@@ -1511,7 +1511,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
ErtsBpIndex ix = erts_staging_bp_ix();
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- g = (GenericBp *) ci->native;
+ g = ci->u.gen_bp;
if (g == 0) {
int i;
if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) {
@@ -1523,7 +1523,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
for (i = 0; i < ERTS_NUM_BP_IX; i++) {
g->data[i].flags = 0;
}
- ci->native = (BeamInstr) g;
+ ci->u.gen_bp = g;
}
bp = &g->data[ix];
@@ -1633,7 +1633,7 @@ clear_function_break(ErtsCodeInfo *ci, Uint break_flags)
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- if ((g = (GenericBp *) ci->native) == 0) {
+ if ((g = ci->u.gen_bp) == NULL) {
return 1;
}
@@ -1728,7 +1728,7 @@ get_time_break(ErtsCodeInfo *ci)
static GenericBpData*
check_break(ErtsCodeInfo *ci, Uint break_flags)
{
- GenericBp* g = (GenericBp *) ci->native;
+ GenericBp* g = ci->u.gen_bp;
ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
if (erts_is_native_break(ci)) {
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 6010c17c17..3d36094e07 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -3887,7 +3887,6 @@ do { \
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(num_bytes);
- erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
/*
@@ -3982,7 +3981,6 @@ do { \
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(BsOp1);
- erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
/*
@@ -4950,14 +4948,14 @@ do { \
*/
ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.u.ncallee = (void(*)(void)) ci->native;
+ c_p->hipe.u.ncallee = ci->u.ncallee;
++hipe_trap_count;
HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (ci->mfa.arity << 8));
}
OpCase(hipe_trap_call_closure): {
ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
- c_p->hipe.u.ncallee = (void(*)(void)) ci->native;
+ c_p->hipe.u.ncallee = ci->u.ncallee;
++hipe_trap_count;
HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (ci->mfa.arity << 8));
}
@@ -5029,7 +5027,7 @@ do { \
* ... remainder of original BEAM code
*/
ErtsCodeInfo *ci = erts_code_to_codeinfo(I);
- struct hipe_call_count *hcc = (struct hipe_call_count*)ci->native;
+ struct hipe_call_count *hcc = ci->u.hcc;
ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI));
ASSERT(hcc != NULL);
ASSERT(VALID_INSTR(hcc->opcode));
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 6eea963016..7b79e6303b 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -913,7 +913,7 @@ erts_alloc_loader_state(void)
magic = erts_create_magic_binary(sizeof(LoaderState),
loader_state_dtor);
- erts_refc_inc(&magic->refc, 1);
+ erts_refc_inc(&magic->intern.refc, 1);
stp = ERTS_MAGIC_BIN_DATA(magic);
stp->bin = NULL;
stp->function = THE_NON_VALUE; /* Function not known yet */
@@ -996,9 +996,7 @@ static void
free_loader_state(Binary* magic)
{
loader_state_dtor(magic);
- if (erts_refc_dectest(&magic->refc, 0) == 0) {
- erts_bin_free(magic);
- }
+ erts_bin_release(magic);
}
static ErlHeapFragment* new_literal_fragment(Uint size)
@@ -5672,9 +5670,7 @@ erts_release_literal_area(ErtsLiteralArea* literal_area)
Binary* bptr;
ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG);
bptr = ((ProcBin*)oh)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
- }
+ erts_bin_release(bptr);
oh = oh->next;
}
erts_free(ERTS_ALC_T_LITERAL, literal_area);
@@ -5708,12 +5704,13 @@ erts_is_module_native(BeamCodeHeader* code_hdr)
static Eterm
native_addresses(Process* p, BeamCodeHeader* code_hdr)
{
+ Eterm result = NIL;
+#ifdef HIPE
int i;
Eterm* hp;
Uint num_functions;
Uint need;
Eterm* hp_end;
- Eterm result = NIL;
num_functions = code_hdr->num_functions;
need = (6+BIG_UINT_HEAP_SIZE)*num_functions;
@@ -5725,16 +5722,17 @@ native_addresses(Process* p, BeamCodeHeader* code_hdr)
ASSERT(is_atom(ci->mfa.function)
|| is_nil(ci->mfa.function)); /* [] if BIF stub */
- if (ci->native != 0) {
+ if (ci->u.ncallee != NULL) {
Eterm addr;
ASSERT(is_atom(ci->mfa.function));
- addr = erts_bld_uint(&hp, NULL, ci->native);
+ addr = erts_bld_uint(&hp, NULL, (Uint)ci->u.ncallee);
tuple = erts_bld_tuple(&hp, NULL, 3, ci->mfa.function,
make_small(ci->mfa.arity), addr);
result = erts_bld_cons(&hp, NULL, tuple, result);
}
}
HRelease(p, hp_end, hp);
+#endif
return result;
}
@@ -6033,7 +6031,7 @@ make_stub(ErtsCodeInfo* info, Eterm mod, Eterm func, Uint arity, Uint native, Be
DBG_TRACE_MFA(mod,func,arity,"make beam stub at %p", erts_codeinfo_to_code(info));
ASSERT(WORDS_PER_FUNCTION == 6);
info->op = (BeamInstr) BeamOp(op_i_func_info_IaaI);
- info->native = native;
+ info->u.ncallee = (void (*)(void)) native;
info->mfa.module = mod;
info->mfa.function = func;
info->mfa.arity = arity;
@@ -6104,7 +6102,7 @@ stub_final_touch(LoaderState* stp, ErtsCodeInfo* ci)
Lambda* lp;
if (is_bif(ci->mfa.module, ci->mfa.function, ci->mfa.arity)) {
- ci->native = 0;
+ ci->u.ncallee = NULL;
ci->mfa.module = 0;
ci->mfa.function = 0;
ci->mfa.arity = 0;
@@ -6267,16 +6265,7 @@ patch_funentries(Eterm Patchlist)
fe = erts_get_fun_entry(Mod, uniq, index);
fe->native_address = (Uint *)native_address;
- /* Deliberate MEMORY LEAK of native fun entries!!!
- *
- * Uncomment line below when hipe code upgrade and purging works correctly.
- * Today we may get cases when old (leaked) native code of a purged module
- * gets called and tries to create instances of a deleted fun entry.
- *
- * Reproduced on a debug emulator with stdlib_test/qlc_SUITE:join_merge
- *
- * erts_smp_refc_dec(&fe->refc, 1);
- */
+ erts_smp_refc_dec(&fe->refc, 1);
if (!patch(Addresses, (Uint) fe))
return 0;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 214de3652f..84c9f9d7c4 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -3109,7 +3109,7 @@ BIF_RETTYPE integer_to_list_1(BIF_ALIST_1)
* On error returns: {error,not_a_list}, or {error, no_integer}
*/
-BIF_RETTYPE string_to_integer_1(BIF_ALIST_1)
+BIF_RETTYPE string_list_to_integer_1(BIF_ALIST_1)
{
Eterm res;
Eterm tail;
@@ -3295,7 +3295,7 @@ BIF_RETTYPE float_to_binary_2(BIF_ALIST_2)
#define LOAD_E(xi,xim,xl,xlm) ((xi)=(xim), (xl)=(xlm))
#define STRING_TO_FLOAT_BUF_INC_SZ (128)
-BIF_RETTYPE string_to_float_1(BIF_ALIST_1)
+BIF_RETTYPE string_list_to_float_1(BIF_ALIST_1)
{
Eterm orig = BIF_ARG_1;
Eterm list = orig;
@@ -4254,6 +4254,75 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE list_to_port_1(BIF_ALIST_1)
+{
+ /*
+ * A valid port identifier is on the format
+ * "#Port<N.P>" where N is node and P is
+ * the port id. Both N and P are of type Uint32.
+ */
+ Uint32 n, p;
+ char* cp;
+ int i;
+ DistEntry *dep = NULL;
+ char buf[6 /* #Port< */
+ + (2)*(10 + 1) /* N.P> */
+ + 1 /* \0 */];
+
+ /* walk down the list and create a C string */
+ if ((i = intlist_to_buf(BIF_ARG_1, buf, sizeof(buf)-1)) < 0)
+ goto bad;
+
+ buf[i] = '\0'; /* null terminal */
+
+ cp = &buf[0];
+ if (strncmp("#Port<", cp, 6) != 0)
+ goto bad;
+
+ cp += 6; /* strlen("#Port<") */
+
+ if (sscanf(cp, "%u.%u>", &n, &p) < 2)
+ goto bad;
+
+ if (p > ERTS_MAX_PORT_NUMBER)
+ goto bad;
+
+ dep = erts_channel_no_to_dist_entry(n);
+
+ if (!dep)
+ goto bad;
+
+ if(dep == erts_this_dist_entry) {
+ erts_deref_dist_entry(dep);
+ BIF_RET(make_internal_port(p));
+ }
+ else {
+ ExternalThing *etp;
+ ErlNode *enp;
+
+ if (is_nil(dep->cid))
+ goto bad;
+
+ enp = erts_find_or_insert_node(dep->sysname, dep->creation);
+ ASSERT(enp != erts_this_node);
+
+ etp = (ExternalThing *) HAlloc(BIF_P, EXTERNAL_THING_HEAD_SIZE + 1);
+ etp->header = make_external_port_header(1);
+ etp->next = MSO(BIF_P).first;
+ etp->node = enp;
+ etp->data.ui[0] = p;
+
+ MSO(BIF_P).first = (struct erl_off_heap_header*) etp;
+ erts_deref_dist_entry(dep);
+ BIF_RET(make_external_port(etp));
+ }
+
+ bad:
+ if (dep)
+ erts_deref_dist_entry(dep);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
BIF_RETTYPE list_to_ref_1(BIF_ALIST_1)
{
/*
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 66e5dc2988..3f6d82d65c 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -88,6 +88,7 @@ bif erlang:list_to_binary/1
bif erlang:list_to_float/1
bif erlang:list_to_integer/1
bif erlang:list_to_pid/1
+bif erlang:list_to_port/1
bif erlang:list_to_ref/1
bif erlang:list_to_tuple/1
bif erlang:loaded/0
@@ -460,8 +461,8 @@ bif error_logger:warning_map/0
bif erlang:get_module_info/1
bif erlang:get_module_info/2
ubif erlang:is_boolean/1
-bif string:to_integer/1
-bif string:to_float/1
+bif string:list_to_integer/1
+bif string:list_to_float/1
bif erlang:make_fun/3
bif erlang:iolist_size/1
bif erlang:iolist_to_binary/1
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 4dd8316dad..0df6bbb289 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -84,7 +84,6 @@ new_binary(Process *p, byte *buf, Uint len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
}
@@ -121,7 +120,6 @@ Eterm erts_new_mso_binary(Process *p, byte *buf, Uint len)
* Allocate the binary struct itself.
*/
bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
if (buf != NULL) {
sys_memcpy(bptr->orig_bytes, buf, len);
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 0b40d70cb7..e23fdb83d9 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -590,7 +590,7 @@ do_break(void)
#endif
#ifdef DEBUG
case 't':
- erts_p_slpq();
+ /* erts_p_slpq(); */
return;
case 'b':
bin_check();
@@ -660,7 +660,7 @@ bin_check(void)
erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
bp->val,
bp->val->orig_size,
- erts_refc_read(&bp->val->refc, 1));
+ erts_refc_read(&bp->val->intern.refc, 1));
}
}
if (printed) {
diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h
index 1b451bf921..e802ad5dd7 100644
--- a/erts/emulator/beam/code_ix.h
+++ b/erts/emulator/beam/code_ix.h
@@ -80,7 +80,13 @@ typedef struct ErtsCodeMFA_ {
in ops.tab to reflect the new func_info size */
typedef struct ErtsCodeInfo_ {
BeamInstr op; /* OpCode(i_func_info) */
- BeamInstr native; /* Used by hipe and trace to store extra data */
+ union {
+ struct generic_bp* gen_bp; /* Trace breakpoint */
+#ifdef HIPE
+ void (*ncallee)(void);
+ struct hipe_call_count* hcc;
+#endif
+ }u;
ErtsCodeMFA mfa;
} ErtsCodeInfo;
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 264ba89e8b..62c3f9520d 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -761,7 +761,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
}
*argp = make_binary(hbot);
pb = (ProcBin*) hbot;
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
pb->next = off_heap->first;
pb->flags = 0;
off_heap->first = (struct erl_off_heap_header*) pb;
@@ -809,7 +809,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
to->thing_word = HEADER_PROC_BIN;
to->size = real_size;
to->val = from->val;
- erts_refc_inc(&to->val->refc, 2);
+ erts_refc_inc(&to->val->intern.refc, 2);
to->bytes = from->bytes + offset;
to->next = off_heap->first;
to->flags = 0;
@@ -901,7 +901,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint
case REF_SUBTAG:
if (is_magic_ref_thing(objp)) {
ErtsMRefThing *mreft = (ErtsMRefThing *) objp;
- erts_refc_inc(&mreft->mb->refc, 2);
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
goto L_off_heap_node_container_common;
}
/* Fall through... */
@@ -1585,7 +1585,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
while (sz-- > 0) {
*hp++ = *ptr++;
}
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
pb->next = off_heap->first;
pb->flags = 0;
off_heap->first = (struct erl_off_heap_header*) pb;
@@ -1644,7 +1644,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
to->thing_word = HEADER_PROC_BIN;
to->size = real_size;
to->val = from->val;
- erts_refc_inc(&to->val->refc, 2);
+ erts_refc_inc(&to->val->intern.refc, 2);
to->bytes = from->bytes + offset;
to->next = off_heap->first;
to->flags = 0;
@@ -1678,7 +1678,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info,
case REF_SUBTAG:
if (is_magic_ref_thing(ptr)) {
ErtsMRefThing *mreft = (ErtsMRefThing *) ptr;
- erts_refc_inc(&mreft->mb->refc, 2);
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
goto off_heap_node_container_common;
}
/* Fall through... */
@@ -1847,7 +1847,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
case REFC_BINARY_SUBTAG:
{
ProcBin* pb = (ProcBin *) (tp-1);
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm));
}
goto off_heap_common;
@@ -1882,7 +1882,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
ErtsRefThing *rtp = (ErtsRefThing *) (tp - 1);
if (is_magic_ref_thing(rtp)) {
ErtsMRefThing *mreft = (ErtsMRefThing *) rtp;
- erts_refc_inc(&mreft->mb->refc, 2);
+ erts_refc_inc(&mreft->mb->intern.refc, 2);
goto off_heap_common;
}
/* Fall through... */
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index d1c2da9074..a1581908e5 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -629,7 +629,6 @@ alloc_dist_obuf(Uint size)
ErtsDistOutputBuf *obuf;
Uint obuf_size = sizeof(ErtsDistOutputBuf)+sizeof(byte)*(size-1);
Binary *bin = erts_bin_drv_alloc(obuf_size);
- erts_refc_init(&bin->refc, 1);
obuf = (ErtsDistOutputBuf *) &bin->orig_bytes[0];
#ifdef DEBUG
obuf->dbg_pattern = ERTS_DIST_OUTPUT_BUF_DBG_PATTERN;
@@ -643,8 +642,7 @@ free_dist_obuf(ErtsDistOutputBuf *obuf)
{
Binary *bin = ErtsDistOutputBuf2Binary(obuf);
ASSERT(obuf->dbg_pattern == ERTS_DIST_OUTPUT_BUF_DBG_PATTERN);
- if (erts_refc_dectest(&bin->refc, 0) == 0)
- erts_bin_free(bin);
+ erts_bin_release(bin);
}
static ERTS_INLINE Sint
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index a374593c5d..71957b2259 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -678,10 +678,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
= erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_ABIF_TIMER)]
- = erts_timer_type_size(ERTS_ALC_T_ABIF_TIMER);
-#endif
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_EXP_TRACE)]
= sizeof(NifExportTrace);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MREF_NSCHED_ENT)]
@@ -2440,12 +2436,6 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg)
&size.processes_used,
fi,
ERTS_ALC_T_BIF_TIMER);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- add_fix_values(&size.processes,
- &size.processes_used,
- fi,
- ERTS_ALC_T_ABIF_TIMER);
-#endif
add_fix_values(&size.processes,
&size.processes_used,
fi,
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 43f43f9034..f296a98125 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -168,7 +168,6 @@ type TIMER_SERVICE LONG_LIVED SYSTEM timer_service
type LL_PTIMER FIXED_SIZE PROCESSES ll_ptimer
type HL_PTIMER FIXED_SIZE PROCESSES hl_ptimer
type BIF_TIMER FIXED_SIZE PROCESSES bif_timer
-# type ABIF_TIMER FIXED_SIZE PROCESSES accessor_bif_timer
type TIMER_REQUEST SHORT_LIVED PROCESSES timer_request
type BTM_YIELD_STATE SHORT_LIVED PROCESSES btm_yield_state
type REG_TABLE STANDARD SYSTEM reg_tab
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 62a752d854..f79b5b6843 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -2669,7 +2669,6 @@ static BIF_RETTYPE do_binary_copy(Process *p, Eterm bin, Eterm en)
}
cbs->result = erts_bin_nrml_alloc(target_size); /* Always offheap
if trapping */
- erts_refc_init(&(cbs->result->refc), 1);
t = (byte *) cbs->result->orig_bytes; /* No offset or anything */
pos = 0;
i = 0;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 3a8687dc59..3a70c6036b 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -177,7 +177,7 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
if (szp)
*szp += 4+2;
if (hpp) {
- Uint refc = (Uint) erts_refc_read(&pb->val->refc, 1);
+ Uint refc = (Uint) erts_refc_read(&pb->val->intern.refc, 1);
tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
res = CONS(*hpp + 4, tuple, res);
*hpp += 4+2;
@@ -203,7 +203,7 @@ bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
if (szp)
*szp += 4+2;
if (hpp) {
- Uint refc = (Uint) erts_refc_read(&mrtp->mb->refc, 1);
+ Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
res = CONS(*hpp + 4, tuple, res);
*hpp += 4+2;
@@ -3984,7 +3984,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(am_false);
}
bin = erts_magic_ref2bin(tp[2]);
- refc = erts_refc_read(&bin->refc, 1);
+ refc = erts_refc_read(&bin->intern.refc, 1);
bin_addr = (UWord) bin;
sz = 4;
erts_bld_uword(NULL, &sz, bin_addr);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index f471390501..023bfca797 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1737,7 +1737,7 @@ setup_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- GenericBp* g = (GenericBp *) ep->info.native;
+ GenericBp* g = ep->info.u.gen_bp;
if (g) {
if (ExportIsBuiltIn(ep)) {
ASSERT(ep->beam[1]);
@@ -1755,7 +1755,7 @@ reset_bif_trace(void)
for (i = 0; i < BIF_SIZE; ++i) {
Export *ep = bif_export[i];
- GenericBp* g = (GenericBp *) ep->info.native;
+ GenericBp* g = ep->info.u.gen_bp;
if (g && g->data[active].flags == 0) {
if (ExportIsBuiltIn(ep)) {
ASSERT(ep->beam[1]);
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index 3fd4c87094..7f438daec0 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -197,7 +197,7 @@ erts_magic_ref_lookup_bin__(Uint32 refn[ERTS_REF_NUMBERS])
else {
erts_aint_t refc;
mb = tep->mb;
- refc = erts_refc_inc_unless(&mb->refc, 0, 0);
+ refc = erts_refc_inc_unless(&mb->intern.refc, 0, 0);
if (refc == 0)
mb = NULL;
}
diff --git a/erts/emulator/beam/erl_bif_unique.h b/erts/emulator/beam/erl_bif_unique.h
index 27c2a15a5e..0f3f794878 100644
--- a/erts/emulator/beam/erl_bif_unique.h
+++ b/erts/emulator/beam/erl_bif_unique.h
@@ -178,10 +178,10 @@ ERTS_GLB_INLINE Eterm
erts_mk_magic_ref(Eterm **hpp, ErlOffHeap *ohp, Binary *bp)
{
Eterm *hp = *hpp;
- ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
write_magic_ref_thing(hp, ohp, (ErtsMagicBinary *) bp);
*hpp += ERTS_MAGIC_REF_THING_SIZE;
- erts_refc_inc(&bp->refc, 1);
+ erts_refc_inc(&bp->intern.refc, 1);
OH_OVERHEAD(ohp, bp->orig_size / sizeof(Eterm));
return make_internal_ref(hp);
}
@@ -297,14 +297,14 @@ erts_iref_storage_save(ErtsIRefStorage *iref, Eterm ref)
ASSERT(is_magic_ref_thing(hp));
iref->is_magic = 1;
iref->u.mb = mrtp->mb;
- erts_refc_inc(&mrtp->mb->refc, 1);
+ erts_refc_inc(&mrtp->mb->intern.refc, 1);
}
}
ERTS_GLB_INLINE void
erts_iref_storage_clean(ErtsIRefStorage *iref)
{
- if (iref->is_magic && erts_refc_dectest(&iref->u.mb->refc, 0) == 0)
+ if (iref->is_magic && erts_refc_dectest(&iref->u.mb->intern.refc, 0) == 0)
erts_ref_bin_free(iref->u.mb);
#ifdef DEBUG
memset((void *) iref, 0xf, sizeof(ErtsIRefStorage));
@@ -337,7 +337,7 @@ erts_iref_storage_make_ref(ErtsIRefStorage *iref,
* refc increment of the cleaned storage...
*/
if (!clean_storage)
- erts_refc_inc(&iref->u.mb->refc, 1);
+ erts_refc_inc(&iref->u.mb->intern.refc, 1);
}
#ifdef DEBUG
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 6ff71ec6d1..de7dbf4e20 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -42,14 +42,16 @@
#define ERTS_BINARY_STRUCT_ALIGNMENT
#endif
-/* Add fields in ERTS_INTERNAL_BINARY_FIELDS, otherwise the drivers crash */
-#define ERTS_INTERNAL_BINARY_FIELDS \
- UWord flags; \
- erts_refc_t refc; \
+/* Add fields in binary_internals, otherwise the drivers crash */
+struct binary_internals {
+ UWord flags;
+ erts_refc_t refc;
ERTS_BINARY_STRUCT_ALIGNMENT
+};
+
typedef struct binary {
- ERTS_INTERNAL_BINARY_FIELDS
+ struct binary_internals intern;
SWord orig_size;
char orig_bytes[1]; /* to be continued */
} Binary;
@@ -63,7 +65,7 @@ typedef struct binary {
typedef struct magic_binary ErtsMagicBinary;
struct magic_binary {
- ERTS_INTERNAL_BINARY_FIELDS
+ struct binary_internals intern;
SWord orig_size;
int (*destructor)(Binary *);
Uint32 refn[ERTS_REF_NUMBERS];
@@ -87,7 +89,7 @@ typedef union {
Binary binary;
ErtsMagicBinary magic_binary;
struct {
- ERTS_INTERNAL_BINARY_FIELDS
+ struct binary_internals intern;
ErlDrvBinary binary;
} driver;
} ErtsBinary;
@@ -316,6 +318,7 @@ ERTS_GLB_INLINE Binary *erts_bin_nrml_alloc(Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc_fnf(Binary *bp, Uint size);
ERTS_GLB_INLINE Binary *erts_bin_realloc(Binary *bp, Uint size);
ERTS_GLB_INLINE void erts_bin_free(Binary *bp);
+ERTS_GLB_INLINE void erts_bin_release(Binary *bp);
ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size,
int (*destructor)(Binary *),
ErtsAlcType_t alloc_type,
@@ -374,7 +377,8 @@ erts_bin_drv_alloc_fnf(Uint size)
ERTS_CHK_BIN_ALIGNMENT(res);
if (res) {
res->orig_size = size;
- res->flags = BIN_FLAG_DRV;
+ res->intern.flags = BIN_FLAG_DRV;
+ erts_refc_init(&res->intern.refc, 1);
}
return res;
}
@@ -392,7 +396,8 @@ erts_bin_drv_alloc(Uint size)
res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
res->orig_size = size;
- res->flags = BIN_FLAG_DRV;
+ res->intern.flags = BIN_FLAG_DRV;
+ erts_refc_init(&res->intern.refc, 1);
return res;
}
@@ -410,7 +415,8 @@ erts_bin_nrml_alloc(Uint size)
res = erts_alloc(ERTS_ALC_T_BINARY, bsize);
ERTS_CHK_BIN_ALIGNMENT(res);
res->orig_size = size;
- res->flags = 0;
+ res->intern.flags = 0;
+ erts_refc_init(&res->intern.refc, 1);
return res;
}
@@ -419,9 +425,9 @@ erts_bin_realloc_fnf(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
: ERTS_ALC_T_BINARY;
- ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
+ ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
if (bsize < size) /* overflow */
return NULL;
nbp = erts_realloc_fnf(type, (void *) bp, bsize);
@@ -436,9 +442,9 @@ erts_bin_realloc(Binary *bp, Uint size)
{
Binary *nbp;
Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD;
- ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
+ ErtsAlcType_t type = (bp->intern.flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY
: ERTS_ALC_T_BINARY;
- ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0);
+ ASSERT((bp->intern.flags & BIN_FLAG_MAGIC) == 0);
if (bsize < size) /* overflow */
erts_realloc_enomem(type, bp, size);
nbp = erts_realloc_fnf(type, (void *) bp, bsize);
@@ -452,7 +458,7 @@ erts_bin_realloc(Binary *bp, Uint size)
ERTS_GLB_INLINE void
erts_bin_free(Binary *bp)
{
- if (bp->flags & BIN_FLAG_MAGIC) {
+ if (bp->intern.flags & BIN_FLAG_MAGIC) {
if (!ERTS_MAGIC_BIN_DESTRUCTOR(bp)(bp)) {
/* Destructor took control of the deallocation */
return;
@@ -460,12 +466,20 @@ erts_bin_free(Binary *bp)
erts_magic_ref_remove_bin(ERTS_MAGIC_BIN_REFN(bp));
erts_free(ERTS_MAGIC_BIN_ATYPE(bp), (void *) bp);
}
- else if (bp->flags & BIN_FLAG_DRV)
+ else if (bp->intern.flags & BIN_FLAG_DRV)
erts_free(ERTS_ALC_T_DRV_BINARY, (void *) bp);
else
erts_free(ERTS_ALC_T_BINARY, (void *) bp);
}
+ERTS_GLB_INLINE void
+erts_bin_release(Binary *bp)
+{
+ if (erts_refc_dectest(&bp->intern.refc, 0) == 0) {
+ erts_bin_free(bp);
+ }
+}
+
ERTS_GLB_INLINE Binary *
erts_create_magic_binary_x(Uint size, int (*destructor)(Binary *),
ErtsAlcType_t alloc_type,
@@ -478,10 +492,10 @@ erts_create_magic_binary_x(Uint size, int (*destructor)(Binary *),
if (!bptr)
erts_alloc_n_enomem(ERTS_ALC_T2N(alloc_type), bsize);
ERTS_CHK_BIN_ALIGNMENT(bptr);
- bptr->flags = BIN_FLAG_MAGIC;
+ bptr->intern.flags = BIN_FLAG_MAGIC;
bptr->orig_size = unaligned ? ERTS_MAGIC_BIN_UNALIGNED_ORIG_SIZE(size)
: ERTS_MAGIC_BIN_ORIG_SIZE(size);
- erts_refc_init(&bptr->refc, 0);
+ erts_refc_init(&bptr->intern.refc, 0);
ERTS_MAGIC_BIN_DESTRUCTOR(bptr) = destructor;
ERTS_MAGIC_BIN_ATYPE(bptr) = alloc_type;
erts_make_magic_ref_in_array(ERTS_MAGIC_BIN_REFN(bptr));
@@ -509,7 +523,7 @@ ERTS_GLB_INLINE erts_smp_atomic_t *
erts_smp_binary_to_magic_indirection(Binary *bp)
{
ErtsMagicIndirectionWord *mip;
- ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION);
mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp);
return &mip->smp_atomic_word;
@@ -519,7 +533,7 @@ ERTS_GLB_INLINE erts_atomic_t *
erts_binary_to_magic_indirection(Binary *bp)
{
ErtsMagicIndirectionWord *mip;
- ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION);
mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp);
return &mip->atomic_word;
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 885e955332..df3f6ad557 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1404,7 +1404,6 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- erts_refc_init(&bptr->refc, 1);
erts_current_bin = (byte *) bptr->orig_bytes;
/*
@@ -1518,14 +1517,11 @@ erts_bs_private_append(Process* p, Eterm bin, Eterm build_size_term, Uint unit)
* binary and copy the contents of the old binary into it.
*/
Binary* bptr = erts_bin_nrml_alloc(new_size);
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, binp->orig_bytes, binp->orig_size);
pb->flags |= PB_IS_WRITABLE | PB_ACTIVE_WRITER;
pb->val = bptr;
pb->bytes = (byte *) bptr->orig_bytes;
- if (erts_refc_dectest(&binp->refc, 0) == 0) {
- erts_bin_free(binp);
- }
+ erts_bin_release(binp);
}
}
erts_current_bin = pb->bytes;
@@ -1565,7 +1561,6 @@ erts_bs_init_writable(Process* p, Eterm sz)
* Allocate the binary data struct itself.
*/
bptr = erts_bin_nrml_alloc(bin_size);
- erts_refc_init(&bptr->refc, 1);
/*
* Now allocate the ProcBin on the heap.
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 378328856d..98c689f13f 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -217,7 +217,7 @@ make_btid(DbTable *tb)
* and table is refered once by being alive...
*/
erts_smp_refc_init(&tb->common.refc, 2);
- erts_refc_inc(&btid->refc, 1);
+ erts_refc_inc(&btid->intern.refc, 1);
}
static ERTS_INLINE DbTable* btid2tab(Binary* btid)
@@ -402,8 +402,8 @@ free_dbtable(void *vtb)
#endif
ASSERT(is_immed(tb->common.heir_data));
- if (tb->common.btid && erts_refc_dectest(&tb->common.btid->refc, 0) == 0)
- erts_bin_free(tb->common.btid);
+ if (tb->common.btid)
+ erts_bin_release(tb->common.btid);
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
}
@@ -3616,9 +3616,7 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
ASSERT(fix->counter == 0);
}
- if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) {
- erts_bin_free(fix->tabs.btid);
- }
+ erts_bin_release(fix->tabs.btid);
erts_free(ERTS_ALC_T_DB_FIXATION, fix);
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
++work;
@@ -3783,7 +3781,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
tb, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation));
fix->tabs.btid = tb->common.btid;
- erts_refc_inc(&fix->tabs.btid->refc, 2);
+ erts_refc_inc(&fix->tabs.btid->intern.refc, 2);
fix->procs.p = p;
fix->counter = 1;
fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
@@ -3819,7 +3817,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
#endif
fixed_tabs_delete(p, fix);
- erts_refc_dec(&fix->tabs.btid->refc, 1);
+ erts_refc_dec(&fix->tabs.btid->intern.refc, 1);
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, (void *) fix, sizeof(DbFixation));
@@ -3889,9 +3887,7 @@ static void free_fixations_op(DbFixation* fix, void* vctx)
{
fixed_tabs_delete(fix->procs.p, fix);
- if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) {
- erts_bin_free(fix->tabs.btid);
- }
+ erts_bin_release(fix->tabs.btid);
erts_db_free(ERTS_ALC_T_DB_FIXATION,
ctx->tb, (void *) fix, sizeof(DbFixation));
@@ -3906,9 +3902,8 @@ int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
ASSERT(fix->counter == 0);
fixed_tabs_delete(p, fix);
- if (erts_refc_dectest(&fix->tabs.btid->refc, 0) == 0) {
- erts_bin_free(fix->tabs.btid);
- }
+ erts_bin_release(fix->tabs.btid);
+
erts_free(ERTS_ALC_T_DB_FIXATION, fix);
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
return 1;
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 7ab27df00c..80c4824eeb 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -1258,7 +1258,7 @@ static int match_traverse(Process* p, DbTableHash* tb,
}
if (mpi.all_objects) {
- mpi.mp->flags |= BIN_FLAG_ALL_OBJECTS;
+ mpi.mp->intern.flags |= BIN_FLAG_ALL_OBJECTS;
}
/*
@@ -1383,7 +1383,7 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
void* context_ptr, /* For callbacks */
Eterm* ret)
{
- int all_objects = (*mpp)->flags & BIN_FLAG_ALL_OBJECTS;
+ int all_objects = (*mpp)->intern.flags & BIN_FLAG_ALL_OBJECTS;
HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
* the 'next' pointer in the previous term
*/
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index ab8da6ccf6..f6918b8ec4 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -995,7 +995,7 @@ static int db_select_continue_tree(Process *p,
sc.lastobj = NULL;
sc.max = 1000;
sc.keypos = tb->common.keypos;
- sc.all_objects = mp->flags & BIN_FLAG_ALL_OBJECTS;
+ sc.all_objects = mp->intern.flags & BIN_FLAG_ALL_OBJECTS;
sc.chunk_size = chunk_size;
reverse = unsigned_val(tptr[7]);
sc.got = signed_val(tptr[8]);
@@ -1187,7 +1187,7 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb= erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
@@ -1385,7 +1385,7 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
}
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
@@ -1510,7 +1510,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
@@ -1536,7 +1536,7 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
@@ -1932,7 +1932,7 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
}
key = copy_struct(key, sz, &hp, &MSO(p));
if (mpi.all_objects)
- (mpi.mp)->flags |= BIN_FLAG_ALL_OBJECTS;
+ (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 03cc11bdc4..24b22eafb8 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -3265,9 +3265,7 @@ void db_cleanup_offheap_comp(DbTerm* obj)
}
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
- erts_bin_free(u.pb->val);
- }
+ erts_bin_release(u.pb->val);
break;
case FUN_SUBTAG:
ASSERT(u.pb != &tmp);
@@ -3277,8 +3275,7 @@ void db_cleanup_offheap_comp(DbTerm* obj)
break;
case REF_SUBTAG:
ASSERT(is_magic_ref_thing(u.hdr));
- if (erts_refc_dectest(&u.mref->mb->refc, 0) == 0)
- erts_bin_free((Binary *)u.mref->mb);
+ erts_bin_release((Binary *)u.mref->mb);
break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 9be77fcefa..ed7b9c8618 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -504,7 +504,7 @@ ERTS_GLB_INLINE Binary *
erts_db_get_match_prog_binary_unchecked(Eterm term)
{
Binary *bp = erts_magic_ref2bin(term);
- ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
ASSERT((ERTS_MAGIC_BIN_DESTRUCTOR(bp) == erts_db_match_prog_destructor));
return bp;
}
@@ -516,7 +516,7 @@ erts_db_get_match_prog_binary(Eterm term)
if (!is_internal_magic_ref(term))
return NULL;
bp = erts_magic_ref2bin(term);
- ASSERT(bp->flags & BIN_FLAG_MAGIC);
+ ASSERT(bp->intern.flags & BIN_FLAG_MAGIC);
if (ERTS_MAGIC_BIN_DESTRUCTOR(bp) != erts_db_match_prog_destructor)
return NULL;
return bp;
@@ -528,7 +528,7 @@ erts_db_get_match_prog_binary(Eterm term)
** Convenience when compiling into Binary structures
*/
#define IsMatchProgBinary(BP) \
- (((BP)->flags & BIN_FLAG_MAGIC) \
+ (((BP)->intern.flags & BIN_FLAG_MAGIC) \
&& ERTS_MAGIC_BIN_DESTRUCTOR((BP)) == erts_db_match_prog_destructor)
#define Binary2MatchProg(BP) \
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index d51d4fff45..a991c2c164 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1252,7 +1252,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* link it into the MSO list for the process.
*/
- erts_refc_inc(&bptr->refc, 1);
+ erts_refc_inc(&bptr->intern.refc, 1);
*prev = ptr;
prev = &ptr->next;
}
@@ -2889,9 +2889,7 @@ sweep_off_heap(Process *p, int fullsweep)
case REFC_BINARY_SUBTAG:
{
Binary* bptr = ((ProcBin*)ptr)->val;
- if (erts_refc_dectest(&bptr->refc, 0) == 0) {
- erts_bin_free(bptr);
- }
+ erts_bin_release(bptr);
break;
}
case FUN_SUBTAG:
@@ -2907,8 +2905,7 @@ sweep_off_heap(Process *p, int fullsweep)
ErtsMagicBinary *bptr;
ASSERT(is_magic_ref_thing(ptr));
bptr = ((ErtsMRefThing *) ptr)->mb;
- if (erts_refc_dectest(&bptr->refc, 0) == 0)
- erts_bin_free((Binary *) bptr);
+ erts_bin_release((Binary *) bptr);
break;
}
default:
@@ -3609,7 +3606,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
erts_aint_t refc;
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- refc = erts_refc_read(&u.pb->val->refc, 1);
+ refc = erts_refc_read(&u.pb->val->intern.refc, 1);
break;
case FUN_SUBTAG:
refc = erts_smp_refc_read(&u.fun->fe->refc, 1);
@@ -3621,7 +3618,7 @@ erts_check_off_heap2(Process *p, Eterm *htop)
break;
case REF_SUBTAG:
ASSERT(is_magic_ref_thing(u.hdr));
- refc = erts_refc_read(&u.mref->mb->refc, 1);
+ refc = erts_refc_read(&u.mref->mb->intern.refc, 1);
break;
default:
ASSERT(!"erts_check_off_heap2: Invalid thing_word");
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 26be8c7edf..13d6136672 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -29,6 +29,8 @@
# include "config.h"
#endif
+/* #define ERTS_MAGIC_REF_BIF_TIMERS */
+
#include "sys.h"
#include "global.h"
#include "bif.h"
@@ -36,6 +38,9 @@
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
#include "erl_hl_timer.h"
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+#include "erl_binary.h"
+#endif
#define ERTS_TMR_CHECK_CANCEL_ON_CREATE 0
@@ -106,9 +111,6 @@ typedef enum {
#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 14)
#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 15)
#define ERTS_TMR_ROFLG_CALLBACK (((Uint32) 1) << 16)
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-#define ERTS_TMR_ROFLG_ABIF_TMR (((Uint32) 1) << 17)
-#endif
#define ERTS_TMR_ROFLG_SID_MASK \
(ERTS_TMR_ROFLG_HLT - (Uint32) 1)
@@ -127,6 +129,13 @@ typedef struct ErtsHLTimer_ ErtsHLTimer;
#define ERTS_HLT_PFIELD_NOT_IN_TABLE (~((UWord) 0))
+typedef struct ErtsBifTimer_ ErtsBifTimer;
+
+typedef struct {
+ ErtsBifTimer *next;
+ ErtsBifTimer *prev;
+} ErtsBifTimerList;
+
typedef struct {
UWord parent; /* parent pointer and flags... */
union {
@@ -144,9 +153,9 @@ typedef struct {
typedef struct {
UWord parent; /* parent pointer and flags... */
- ErtsHLTimer *right;
- ErtsHLTimer *left;
-} ErtsHLTimerTree;
+ ErtsBifTimer *right;
+ ErtsBifTimer *left;
+} ErtsBifTimerTree;
typedef struct {
Uint32 roflgs;
@@ -155,67 +164,75 @@ typedef struct {
void *arg;
erts_atomic_t next;
} u;
+ union {
+ Process *proc;
+ Port *port;
+ Eterm name;
+ void (*callback)(void *);
+ } receiver;
} ErtsTmrHead;
struct ErtsHLTimer_ {
ErtsTmrHead head; /* NEED to be first! */
+ ErtsMonotonicTime timeout;
union {
ErtsThrPrgrLaterOp cleanup;
ErtsHLTimerTimeTree tree;
} time;
- ErtsMonotonicTime timeout;
- union {
- Process *proc;
- Port *port;
- Eterm name;
- void (*callback)(void *);
- } receiver;
#ifdef ERTS_HLT_HARD_DEBUG
int pending_timeout;
#endif
-
- erts_smp_atomic32_t state;
-
- /* BIF timer only fields follow... */
- struct {
- Uint32 refn[ERTS_REF_NUMBERS];
- ErtsHLTimerTree proc_tree;
- ErtsHLTimerTree tree;
- Eterm message;
- ErlHeapFragment *bp;
- } btm;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- struct {
- Eterm accessor;
- ErtsHLTimerTree tree;
- } abtm;
-#endif
};
-#define ERTS_HL_PTIMER_SIZE offsetof(ErtsHLTimer, btm)
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-#define ERTS_BIF_TIMER_SIZE offsetof(ErtsHLTimer, abtm)
-#define ERTS_ABIF_TIMER_SIZE sizeof(ErtsHLTimer)
-#else
-#define ERTS_BIF_TIMER_SIZE sizeof(ErtsHLTimer)
-#endif
-
typedef struct {
ErtsTmrHead head; /* NEED to be first! */
union {
- void *p;
- void (*callback)(void *);
+ ErtsTWheelTimer tw_tmr;
+ ErtsThrPrgrLaterOp cleanup;
} u;
- ErtsTWheelTimer tw_tmr;
} ErtsTWTimer;
+struct ErtsBifTimer_ {
+ union {
+ ErtsTmrHead head;
+ ErtsHLTimer hlt;
+ ErtsTWTimer twt;
+ } type;
+ struct {
+ erts_smp_atomic32_t state;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin;
+ ErtsHLTimerList proc_list;
+#else
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsBifTimerTree proc_tree;
+ ErtsBifTimerTree tree;
+#endif
+ Eterm message;
+ ErlHeapFragment *bp;
+ } btm;
+};
+
typedef union {
ErtsTmrHead head;
ErtsHLTimer hlt;
ErtsTWTimer twt;
+ ErtsBifTimer btm;
} ErtsTimer;
+typedef ErtsTimer *(*ErtsCreateTimerFunc)(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg);
+
#ifdef SMALL_MEMORY
#define BIF_TIMER_PREALC_SZ 10
#define PTIMER_PREALC_SZ 10
@@ -225,7 +242,7 @@ typedef union {
#endif
ERTS_SCHED_PREF_PALLOC_IMPL(bif_timer_pre,
- ErtsHLTimer,
+ ErtsBifTimer,
BIF_TIMER_PREALC_SZ)
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(tw_timer,
@@ -296,12 +313,16 @@ struct ErtsHLTimerService_ {
ErtsHLTCncldTmrQ canceled_queue;
#endif
ErtsHLTimer *time_tree;
- ErtsHLTimer *btm_tree;
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsBifTimer *btm_tree;
+#endif
ErtsHLTimer *next_timeout;
ErtsYieldingTimeoutState yield;
ErtsTWheelTimer service_timer;
};
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+
static ERTS_INLINE int
refn_is_lt(Uint32 *x, Uint32 *y)
{
@@ -317,6 +338,14 @@ refn_is_lt(Uint32 *x, Uint32 *y)
return x[0] < y[0];
}
+static ERTS_INLINE int
+refn_is_eq(Uint32 *x, Uint32 *y)
+{
+ return (x[0] == y[0]) & (x[1] == y[1]) & (x[2] == y[2]);
+}
+
+#endif
+
#define ERTS_RBT_PREFIX time
#define ERTS_RBT_T ErtsHLTimer
#define ERTS_RBT_KEY_T ErtsMonotonicTime
@@ -506,8 +535,16 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#endif /* ERTS_HLT_HARD_DEBUG */
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+#define ERTS_BTM_HLT2REFN(T) ((T)->btm.mbin->refn)
+#else
+#define ERTS_BTM_HLT2REFN(T) ((T)->btm.refn)
+#endif
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+
#define ERTS_RBT_PREFIX btm
-#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_T ErtsBifTimer
#define ERTS_RBT_KEY_T Uint32 *
#define ERTS_RBT_FLAGS_T UWord
#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
@@ -533,7 +570,7 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
(T)->btm.tree.parent |= (F); \
} while (0)
#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
+ ((ErtsBifTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
#define ERTS_RBT_SET_PARENT(T, P) \
do { \
ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
@@ -544,20 +581,94 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.tree.right = (R))
#define ERTS_RBT_GET_LEFT(T) ((T)->btm.tree.left)
#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_INSERT
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_WANT_LOOKUP
+#endif
#define ERTS_RBT_WANT_FOREACH
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static ERTS_INLINE void
+proc_btm_list_insert(ErtsBifTimer **list, ErtsBifTimer *x)
+{
+ ErtsBifTimer *y = *list;
+ if (!y) {
+ x->btm.proc_list.next = x;
+ x->btm.proc_list.prev = x;
+ *list = x;
+ }
+ else {
+ ERTS_HLT_ASSERT(y->btm.proc_list.prev->btm.proc_list.next == y);
+ x->btm.proc_list.next = y;
+ x->btm.proc_list.prev = y->btm.proc_list.prev;
+ y->btm.proc_list.prev->btm.proc_list.next = x;
+ y->btm.proc_list.prev = x;
+ }
+}
+
+static ERTS_INLINE void
+proc_btm_list_delete(ErtsBifTimer **list, ErtsBifTimer *x)
+{
+ ErtsBifTimer *y = *list;
+ if (y == x && x->btm.proc_list.next == x) {
+ ERTS_HLT_ASSERT(x->btm.proc_list.prev == x);
+ *list = NULL;
+ }
+ else {
+ if (y == x)
+ *list = x->btm.proc_list.next;
+ ERTS_HLT_ASSERT(x->btm.proc_list.prev->btm.proc_list.next == x);
+ ERTS_HLT_ASSERT(x->btm.proc_list.next->btm.proc_list.prev == x);
+ x->btm.proc_list.prev->btm.proc_list.next = x->btm.proc_list.next;
+ x->btm.proc_list.next->btm.proc_list.prev = x->btm.proc_list.prev;
+ }
+ x->btm.proc_list.next = NULL;
+}
+
+static ERTS_INLINE int
+proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list,
+ void (*destroy)(ErtsBifTimer *, void *),
+ void *arg,
+ int limit)
+{
+ int i;
+ ErtsBifTimer *first, *last;
+
+ first = *list;
+ if (!first)
+ return 0;
+
+ last = first->btm.proc_list.prev;
+ for (i = 0; i < limit; i++) {
+ ErtsBifTimer *x = last;
+ last = last->btm.proc_list.prev;
+ (*destroy)(x, arg);
+ x->btm.proc_list.next = NULL;
+ if (x == first) {
+ *list = NULL;
+ return 0;
+ }
+ }
+
+ last->btm.proc_list.next = first;
+ first->btm.proc_list.prev = last;
+ return 1;
+}
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
#define ERTS_RBT_PREFIX proc_btm
-#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_T ErtsBifTimer
#define ERTS_RBT_KEY_T Uint32 *
#define ERTS_RBT_FLAGS_T UWord
#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
@@ -583,7 +694,7 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
(T)->btm.proc_tree.parent |= (F); \
} while (0)
#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
+ ((ErtsBifTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
#define ERTS_RBT_SET_PARENT(T, P) \
do { \
ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
@@ -594,71 +705,20 @@ same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.proc_tree.right = (R))
#define ERTS_RBT_GET_LEFT(T) ((T)->btm.proc_tree.left)
#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.proc_tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_GET_KEY(T) ERTS_BTM_HLT2REFN((T))
#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
-#define ERTS_RBT_WANT_DELETE
-#define ERTS_RBT_WANT_INSERT
-#define ERTS_RBT_WANT_LOOKUP
-#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
-#define ERTS_RBT_UNDEF
-
-#include "erl_rbtree.h"
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-
-#define ERTS_RBT_PREFIX abtm
-#define ERTS_RBT_T ErtsHLTimer
-#define ERTS_RBT_KEY_T Uint32 *
-#define ERTS_RBT_FLAGS_T UWord
-#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
- do { \
- (T)->abtm.tree.parent = (UWord) NULL; \
- (T)->abtm.tree.right = NULL; \
- (T)->abtm.tree.left = NULL; \
- } while (0)
-#define ERTS_RBT_IS_RED(T) \
- ((int) ((T)->abtm.tree.parent & ERTS_HLT_PFLG_RED))
-#define ERTS_RBT_SET_RED(T) \
- ((T)->abtm.tree.parent |= ERTS_HLT_PFLG_RED)
-#define ERTS_RBT_IS_BLACK(T) \
- (!ERTS_RBT_IS_RED((T)))
-#define ERTS_RBT_SET_BLACK(T) \
- ((T)->abtm.tree.parent &= ~ERTS_HLT_PFLG_RED)
-#define ERTS_RBT_GET_FLAGS(T) \
- ((T)->abtm.tree.parent & ERTS_HLT_PFLGS_MASK)
-#define ERTS_RBT_SET_FLAGS(T, F) \
- do { \
- ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
- (T)->abtm.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
- (T)->abtm.tree.parent |= (F); \
- } while (0)
-#define ERTS_RBT_GET_PARENT(T) \
- ((ErtsHLTimer *) ((T)->abtm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
-#define ERTS_RBT_SET_PARENT(T, P) \
- do { \
- ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
- (T)->abtm.tree.parent &= ERTS_HLT_PFLGS_MASK; \
- (T)->abtm.tree.parent |= (UWord) (P); \
- } while (0)
-#define ERTS_RBT_GET_RIGHT(T) ((T)->abtm.tree.right)
-#define ERTS_RBT_SET_RIGHT(T, R) ((T)->abtm.tree.right = (R))
-#define ERTS_RBT_GET_LEFT(T) ((T)->abtm.tree.left)
-#define ERTS_RBT_SET_LEFT(T, L) ((T)->abtm.tree.left = (L))
-#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
-#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
-#define ERTS_RBT_IS_EQ(KX, KY) \
- (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_IS_EQ(KX, KY) refn_is_eq((KX), (KY))
#define ERTS_RBT_WANT_DELETE
#define ERTS_RBT_WANT_INSERT
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
#define ERTS_RBT_WANT_LOOKUP
+#endif
#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
#define ERTS_RBT_UNDEF
#include "erl_rbtree.h"
-#endif /* ERTS_BTM_ACCESSOR_SUPPORT */
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
#ifdef ERTS_SMP
static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
@@ -680,7 +740,9 @@ erts_create_timer_service(void)
srv = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_SERVICE,
sizeof(ErtsHLTimerService));
srv->time_tree = NULL;
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
srv->btm_tree = NULL;
+#endif
srv->next_timeout = NULL;
srv->yield = init_yield;
erts_twheel_init_timer(&srv->service_timer);
@@ -697,11 +759,8 @@ erts_timer_type_size(ErtsAlcType_t type)
{
switch (type) {
case ERTS_ALC_T_LL_PTIMER: return sizeof(ErtsTWTimer);
- case ERTS_ALC_T_HL_PTIMER: return ERTS_HL_PTIMER_SIZE;
- case ERTS_ALC_T_BIF_TIMER: return ERTS_BIF_TIMER_SIZE;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- case ERTS_ALC_T_ABIF_TIMER: return ERTS_ABIF_TIMER_SIZE;
-#endif
+ case ERTS_ALC_T_HL_PTIMER: return sizeof(ErtsHLTimer);
+ case ERTS_ALC_T_BIF_TIMER: return sizeof(ErtsBifTimer);
default: ERTS_INTERNAL_ERROR("Unknown type");
}
return 0;
@@ -760,6 +819,111 @@ port_timeout_common(Port *port, void *tmr)
return 0;
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static erts_smp_atomic_t *
+mbin_to_btmref__(ErtsMagicBinary *mbin)
+{
+ return erts_smp_binary_to_magic_indirection((Binary *) mbin);
+}
+
+static ERTS_INLINE void
+magic_binary_init(ErtsMagicBinary *mbin, ErtsBifTimer *tmr)
+{
+ erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin);
+ erts_smp_atomic_init_nob(aptr, (erts_aint_t) tmr);
+}
+
+static ERTS_INLINE ErtsBifTimer *
+magic_binary_to_btm(ErtsMagicBinary *mbin)
+{
+ erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin);
+ ErtsBifTimer *tmr = (ErtsBifTimer *) erts_smp_atomic_read_nob(aptr);
+ ERTS_HLT_ASSERT(!tmr || tmr->btm.mbin == mbin);
+ return tmr;
+}
+
+#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE erts_aint_t
+init_btm_specifics(ErtsSchedulerData *esdp,
+ ErtsBifTimer *tmr, Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin
+#else
+ Uint32 *refn
+#endif
+ )
+{
+ Uint hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
+ int refc;
+ if (!hsz) {
+ tmr->btm.message = msg;
+ tmr->btm.bp = NULL;
+ }
+ else {
+ ErlHeapFragment *bp = new_message_buffer(hsz);
+ Eterm *hp = bp->mem;
+ tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
+ tmr->btm.bp = bp;
+ }
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ refc = 1;
+ tmr->btm.mbin = mbin;
+ erts_refc_inc(&mbin->refc, 1);
+ magic_binary_init(mbin, tmr);
+ tmr->btm.proc_list.next = NULL;
+#else
+ refc = 0;
+ tmr->btm.refn[0] = refn[0];
+ tmr->btm.refn[1] = refn[1];
+ tmr->btm.refn[2] = refn[2];
+
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+ btm_rbt_insert(&esdp->timer_service->btm_tree, tmr);
+#endif
+
+ erts_smp_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE);
+ return refc; /* refc from magic binary... */
+}
+
+static void tw_bif_timer_timeout(void *vbtmp);
+
+static ERTS_INLINE void
+timer_destroy(ErtsTimer *tmr, int twt, int btm)
+{
+ if (!btm) {
+ if (twt)
+ tw_timer_free(&tmr->twt);
+ else
+ erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
+ }
+ else {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ Binary *bp = (Binary *) tmr->btm.btm.mbin;
+ if (erts_refc_dectest(&bp->refc, 0) == 0)
+ erts_bin_free(bp);
+#endif
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_PRE_ALC)
+ bif_timer_pre_free(&tmr->btm);
+ else
+ erts_free(ERTS_ALC_T_BIF_TIMER, &tmr->btm);
+ }
+}
+
+static ERTS_INLINE void
+timer_pre_dec_refc(ErtsTimer *tmr)
+{
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t refc;
+ refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc);
+ ERTS_HLT_ASSERT(refc > 0);
+#else
+ erts_smp_atomic32_dec_nob(&tmr->head.refc);
+#endif
+}
+
/*
* Basic timer wheel timer stuff
*/
@@ -767,26 +931,39 @@ port_timeout_common(Port *port, void *tmr)
static void
scheduled_tw_timer_destroy(void *vtmr)
{
- tw_timer_free((ErtsTWTimer *) vtmr);
+ ErtsTimer * tmr = (ErtsTimer *) vtmr;
+ int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+ timer_destroy((ErtsTimer *) vtmr, 1, btm);
}
static void
schedule_tw_timer_destroy(ErtsTWTimer *tmr)
{
+ Uint size;
/*
* Reference to process/port can be
* dropped at once...
*/
if (tmr->head.roflgs & ERTS_TMR_ROFLG_PROC)
- erts_proc_dec_refc((Process *) tmr->u.p);
+ erts_proc_dec_refc(tmr->head.receiver.proc);
else if (tmr->head.roflgs & ERTS_TMR_ROFLG_PORT)
- erts_port_dec_refc((Port *) tmr->u.p);
+ erts_port_dec_refc(tmr->head.receiver.port);
+
+ if (!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ size = sizeof(ErtsHLTimer);
+ else {
+ /* Message buffer already dropped... */
+ size = sizeof(ErtsBifTimer);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ size += sizeof(ErtsMagicIndirectionWord);
+#endif
+ }
erts_schedule_thr_prgr_later_cleanup_op(
scheduled_tw_timer_destroy,
(void *) tmr,
- &tmr->tw_tmr.u.cleanup,
- sizeof(ErtsTWTimer));
+ &tmr->u.cleanup,
+ size);
}
static ERTS_INLINE void
@@ -802,7 +979,7 @@ static void
tw_proc_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- Process *proc = (Process *) twtp->u.p;
+ Process *proc = twtp->head.receiver.proc;
if (proc_timeout_common(proc, vtwtp))
tw_timer_dec_refc(twtp);
tw_timer_dec_refc(twtp);
@@ -812,84 +989,126 @@ static void
tw_port_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- Port *port = (Port *) twtp->u.p;
+ Port *port = twtp->head.receiver.port;
if (port_timeout_common(port, vtwtp))
tw_timer_dec_refc(twtp);
tw_timer_dec_refc(twtp);
}
static void
-tw_ptimer_cancel(void *vtwtp)
-{
- tw_timer_dec_refc((ErtsTWTimer *) vtwtp);
-}
-
-static void
cancel_tw_timer(ErtsSchedulerData *esdp, ErtsTWTimer *tmr)
{
ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
== (Uint32) esdp->no);
- erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->tw_tmr);
+ erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->u.tw_tmr);
+ tw_timer_dec_refc(tmr);
}
static void
tw_callback_timeout(void *vtwtp)
{
ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
- void (*callback)(void *) = twtp->u.callback;
+ void (*callback)(void *) = twtp->head.receiver.callback;
void *arg = twtp->head.u.arg;
tw_timer_dec_refc(twtp);
(*callback)(arg);
}
-static ErtsTWTimer *
-create_tw_timer(ErtsSchedulerData *esdp,
- ErtsTmrType type, void *p,
- void (*callback)(void *), void *arg,
- ErtsMonotonicTime timeout_pos)
+static ErtsTimer *
+create_tw_timer(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, ErtsTmrType type,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
+ void (*callback)(void *), void *arg)
{
ErtsTWTimer *tmr;
void (*timeout_func)(void *);
- void (*cancel_func)(void *);
erts_aint32_t refc;
- tmr = tw_timer_alloc();
- erts_twheel_init_timer(&tmr->tw_tmr);
-
- tmr->head.roflgs = (Uint32) esdp->no;
- ERTS_HLT_ASSERT((tmr->head.roflgs
- & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
+ if (type != ERTS_TMR_BIF) {
+ tmr = tw_timer_alloc();
+ tmr->head.roflgs = 0;
+ }
+ else {
+ if (short_time) {
+ tmr = (ErtsTWTimer *) bif_timer_pre_alloc();
+ if (!tmr)
+ goto alloc_bif_timer;
+ tmr->head.roflgs = (ERTS_TMR_ROFLG_BIF_TMR
+ | ERTS_TMR_ROFLG_PRE_ALC);
+ }
+ else {
+ alloc_bif_timer:
+ tmr = (ErtsTWTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ tmr->head.roflgs = ERTS_TMR_ROFLG_BIF_TMR;
+ }
+ }
+
+ erts_twheel_init_timer(&tmr->u.tw_tmr);
+ tmr->head.roflgs |= (Uint32) esdp->no;
+ ERTS_HLT_ASSERT((((Uint32) esdp->no)
+ & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
switch (type) {
case ERTS_TMR_PROC:
- tmr->u.p = p;
+ tmr->head.receiver.proc = (Process *) rcvrp;
tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
timeout_func = tw_proc_timeout;
- cancel_func = tw_ptimer_cancel;
- erts_proc_inc_refc((Process *) p);
+ erts_proc_inc_refc((Process *) rcvrp);
refc = 2;
break;
case ERTS_TMR_PORT:
- tmr->u.p = p;
+ tmr->head.receiver.port = (Port *) rcvrp;
tmr->head.roflgs |= ERTS_TMR_ROFLG_PORT;
timeout_func = tw_port_timeout;
- cancel_func = tw_ptimer_cancel;
- erts_port_inc_refc((Port *) p);
+ erts_port_inc_refc((Port *) rcvrp);
refc = 2;
break;
case ERTS_TMR_CALLBACK:
tmr->head.u.arg = arg;
- tmr->u.callback = callback;
+ tmr->head.receiver.callback = callback;
tmr->head.roflgs |= ERTS_TMR_ROFLG_CALLBACK;
timeout_func = tw_callback_timeout;
- cancel_func = NULL;
refc = 1;
break;
+ case ERTS_TMR_BIF:
+
+ timeout_func = tw_bif_timer_timeout;
+ if (is_internal_pid(rcvr)) {
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
+ tmr->head.receiver.proc = (Process *) rcvrp;
+ refc = 2;
+ }
+ else {
+ ERTS_HLT_ASSERT(is_atom(rcvr));
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_REG_NAME;
+ tmr->head.receiver.name = (Eterm) rcvr;
+ refc = 1;
+ }
+
+ refc += init_btm_specifics(esdp,
+ (ErtsBifTimer *) tmr,
+ msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin
+#else
+ refn
+#endif
+ );
+ break;
+
default:
ERTS_INTERNAL_ERROR("Unsupported timer type");
return NULL;
@@ -898,41 +1117,24 @@ create_tw_timer(ErtsSchedulerData *esdp,
erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
erts_twheel_set_timer(esdp->timer_wheel,
- &tmr->tw_tmr,
+ &tmr->u.tw_tmr,
timeout_func,
- cancel_func,
tmr,
timeout_pos);
- return tmr;
+ return (ErtsTimer *) tmr;
}
/*
* Basic high level timer stuff
*/
-static ERTS_INLINE void
-hl_timer_destroy(ErtsHLTimer *tmr)
-{
- Uint32 roflgs = tmr->head.roflgs;
- if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
- erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
- else {
- if (roflgs & ERTS_TMR_ROFLG_PRE_ALC)
- bif_timer_pre_free(tmr);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- else if (roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- erts_free(ERTS_ALC_T_ABIF_TIMER, tmr);
-#endif
- else
- erts_free(ERTS_ALC_T_BIF_TIMER, tmr);
- }
-}
-
static void
scheduled_hl_timer_destroy(void *vtmr)
{
- hl_timer_destroy((ErtsHLTimer *) vtmr);
+ ErtsTimer * tmr = (ErtsTimer *) vtmr;
+ int btm = !!(tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+ timer_destroy((ErtsTimer *) vtmr, 0, btm);
}
static void
@@ -948,25 +1150,25 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
ERTS_HLT_ASSERT(erts_smp_atomic32_read_nob(&tmr->head.refc) == 0);
if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
- ERTS_HLT_ASSERT(is_atom(tmr->receiver.name));
+ ERTS_HLT_ASSERT(is_atom(tmr->head.receiver.name));
}
else if (roflgs & ERTS_TMR_ROFLG_PROC) {
- ERTS_HLT_ASSERT(tmr->receiver.proc);
- erts_proc_dec_refc(tmr->receiver.proc);
+ ERTS_HLT_ASSERT(tmr->head.receiver.proc);
+ erts_proc_dec_refc(tmr->head.receiver.proc);
}
else if (roflgs & ERTS_TMR_ROFLG_PORT) {
- ERTS_HLT_ASSERT(tmr->receiver.port);
- erts_port_dec_refc(tmr->receiver.port);
+ ERTS_HLT_ASSERT(tmr->head.receiver.port);
+ erts_port_dec_refc(tmr->head.receiver.port);
}
if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
- size = ERTS_HL_PTIMER_SIZE;
- else {
- /*
- * Message buffer can be dropped at
- * once...
- */
size = sizeof(ErtsHLTimer);
+ else {
+ /* Message buffer already dropped... */
+ size = sizeof(ErtsBifTimer);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ size += sizeof(ErtsMagicIndirectionWord);
+#endif
}
erts_schedule_thr_prgr_later_cleanup_op(
@@ -975,18 +1177,6 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
}
static ERTS_INLINE void
-hl_timer_pre_dec_refc(ErtsHLTimer *tmr)
-{
-#ifdef ERTS_HLT_DEBUG
- erts_aint_t refc;
- refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc);
- ERTS_HLT_ASSERT(refc > 0);
-#else
- erts_smp_atomic32_dec_nob(&tmr->head.refc);
-#endif
-}
-
-static ERTS_INLINE void
hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs)
{
if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
@@ -1018,39 +1208,135 @@ check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
#endif
}
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
-static void
-hlt_delete_abtm(ErtsHLTimer *tmr)
+static int
+bif_timer_ref_destructor(Binary *unused)
{
- Process *proc;
+ return 1;
+}
- ERTS_HLT_ASSERT(tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR);
+static ERTS_INLINE void
+btm_clear_magic_binary(ErtsBifTimer *tmr)
+{
+ erts_smp_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin);
+ Uint32 roflgs = tmr->type.head.roflgs;
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t tval = erts_smp_atomic_xchg_nob(aptr,
+ (erts_aint_t) NULL);
+ ERTS_HLT_ASSERT(tval == (erts_aint_t) tmr);
+#else
+ erts_smp_atomic_set_nob(aptr, (erts_aint_t) NULL);
+#endif
+ if (roflgs & ERTS_TMR_ROFLG_HLT)
+ hl_timer_dec_refc(&tmr->type.hlt, roflgs);
+ else
+ tw_timer_dec_refc(&tmr->type.twt);
+}
- proc = erts_proc_lookup(tmr->abtm.accessor);
+#endif /* ERTS_MAGIC_REF_BIF_TIMERS */
- if (proc) {
- int deref = 0;
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
- if (tmr->abtm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- abtm_rbt_delete(&proc->accessor_bif_timers, tmr);
- deref = 1;
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
- if (deref)
- hl_timer_pre_dec_refc(tmr);
+static ERTS_INLINE void
+bif_timer_timeout(ErtsHLTimerService *srv,
+ ErtsBifTimer *tmr,
+ Uint32 roflgs)
+{
+ erts_aint32_t state;
+
+ ERTS_HLT_ASSERT(tmr->type.head.roflgs == roflgs);
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state,
+ ERTS_TMR_STATE_TIMED_OUT,
+ ERTS_TMR_STATE_ACTIVE);
+
+ ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
+ || state == ERTS_TMR_STATE_ACTIVE);
+
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+ Process *proc;
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+
+ if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
+ Eterm term;
+ term = tmr->type.head.receiver.name;
+ ERTS_HLT_ASSERT(is_atom(term));
+ term = erts_whereis_name_to_id(NULL, term);
+ proc = erts_proc_lookup(term);
+ }
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
+ proc = tmr->type.head.receiver.proc;
+ ERTS_HLT_ASSERT(proc);
+ }
+ if (proc) {
+ if (!ERTS_PROC_IS_EXITING(proc)) {
+ int dec_refc = 0;
+ ErtsMessage *mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = tmr->btm.bp;
+ tmr->btm.bp = NULL;
+ erts_queue_message(proc, 0, mp, tmr->btm.message,
+ am_clock_service);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.proc_list.next) {
+ proc_btm_list_delete(&proc->bif_timers, tmr);
+ dec_refc = 1;
+ }
+#else
+ if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ proc_btm_rbt_delete(&proc->bif_timers, tmr);
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ dec_refc = 1;
+ }
+#endif
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ if (dec_refc)
+ timer_pre_dec_refc((ErtsTimer *) tmr);
+ }
+ }
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
}
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&srv->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+
+
}
+static void
+tw_bif_timer_timeout(void *vbtmp)
+{
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsHLTimerService *srv = NULL;
+#else
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsHLTimerService *srv = esdp->timer_service;
#endif
+ ErtsBifTimer *btmp = (ErtsBifTimer *) vbtmp;
+ bif_timer_timeout(srv, btmp, btmp->type.head.roflgs);
+ tw_timer_dec_refc(&btmp->type.twt);
+}
-static ErtsHLTimer *
+static ErtsTimer *
create_hl_timer(ErtsSchedulerData *esdp,
ErtsMonotonicTime timeout_pos,
int short_time, ErtsTmrType type,
- void *rcvrp, Eterm rcvr, Eterm acsr,
- Eterm msg, Uint32 *refn,
+ void *rcvrp, Eterm rcvr,
+ Eterm msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsMagicBinary *mbin,
+#else
+ Uint32 *refn,
+#endif
void (*callback)(void *), void *arg)
{
ErtsHLTimerService *srv = esdp->timer_service;
@@ -1069,7 +1355,7 @@ create_hl_timer(ErtsSchedulerData *esdp,
if (type != ERTS_TMR_BIF) {
tmr = erts_alloc(ERTS_ALC_T_HL_PTIMER,
- ERTS_HL_PTIMER_SIZE);
+ sizeof(ErtsHLTimer));
tmr->timeout = timeout_pos;
switch (type) {
@@ -1078,7 +1364,7 @@ create_hl_timer(ErtsSchedulerData *esdp,
ERTS_HLT_ASSERT(is_internal_pid(rcvr));
erts_proc_inc_refc((Process *) rcvrp);
- tmr->receiver.proc = (Process *) rcvrp;
+ tmr->head.receiver.proc = (Process *) rcvrp;
roflgs |= ERTS_TMR_ROFLG_PROC;
refc = 2;
break;
@@ -1086,14 +1372,14 @@ create_hl_timer(ErtsSchedulerData *esdp,
case ERTS_TMR_PORT:
ERTS_HLT_ASSERT(is_internal_port(rcvr));
erts_port_inc_refc((Port *) rcvrp);
- tmr->receiver.port = (Port *) rcvrp;
+ tmr->head.receiver.port = (Port *) rcvrp;
roflgs |= ERTS_TMR_ROFLG_PORT;
refc = 2;
break;
case ERTS_TMR_CALLBACK:
roflgs |= ERTS_TMR_ROFLG_CALLBACK;
- tmr->receiver.callback = callback;
+ tmr->head.receiver.callback = callback;
tmr->head.u.arg = arg;
refc = 1;
break;
@@ -1105,84 +1391,47 @@ create_hl_timer(ErtsSchedulerData *esdp,
}
else { /* ERTS_TMR_BIF */
- Uint hsz;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- int is_abif_tmr = is_value(acsr) && acsr != rcvr;
-#endif
if (short_time) {
- tmr = bif_timer_pre_alloc();
+ tmr = (ErtsHLTimer *) bif_timer_pre_alloc();
if (!tmr)
goto alloc_bif_timer;
roflgs |= ERTS_TMR_ROFLG_PRE_ALC;
}
else {
alloc_bif_timer:
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (is_abif_tmr)
- tmr = erts_alloc(ERTS_ALC_T_ABIF_TIMER,
- ERTS_ABIF_TIMER_SIZE);
- else
-#endif
- tmr = erts_alloc(ERTS_ALC_T_BIF_TIMER,
- ERTS_BIF_TIMER_SIZE);
- }
+ tmr = (ErtsHLTimer *) erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ sizeof(ErtsBifTimer));
+ }
tmr->timeout = timeout_pos;
roflgs |= ERTS_TMR_ROFLG_BIF_TMR;
if (is_internal_pid(rcvr)) {
roflgs |= ERTS_TMR_ROFLG_PROC;
- tmr->receiver.proc = (Process *) rcvrp;
+ tmr->head.receiver.proc = (Process *) rcvrp;
refc = 2;
}
else {
ERTS_HLT_ASSERT(is_atom(rcvr));
roflgs |= ERTS_TMR_ROFLG_REG_NAME;
- tmr->receiver.name = rcvr;
+ tmr->head.receiver.name = rcvr;
refc = 1;
}
- hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
- if (!hsz) {
- tmr->btm.message = msg;
- tmr->btm.bp = NULL;
- }
- else {
- ErlHeapFragment *bp = new_message_buffer(hsz);
- Eterm *hp = bp->mem;
- tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
- tmr->btm.bp = bp;
- }
- tmr->btm.refn[0] = refn[0];
- tmr->btm.refn[1] = refn[1];
- tmr->btm.refn[2] = refn[2];
-
- tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (is_abif_tmr) {
- Process *aproc;
- roflgs |= ERTS_TMR_ROFLG_ABIF_TMR;
- tmr->abtm.accessor = acsr;
- aproc = erts_proc_lookup(acsr);
- if (!aproc)
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- else {
- refc++;
- erts_smp_proc_lock(aproc, ERTS_PROC_LOCK_BTM);
- abtm_rbt_insert(&aproc->accessor_bif_timers, tmr);
- erts_smp_proc_unlock(aproc, ERTS_PROC_LOCK_BTM);
- }
- }
+ refc += init_btm_specifics(esdp,
+ (ErtsBifTimer *) tmr,
+ msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin
+#else
+ refn
#endif
-
- btm_rbt_insert(&srv->btm_tree, tmr);
+ );
}
tmr->head.roflgs = roflgs;
erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
- erts_smp_atomic32_init_nob(&tmr->state, ERTS_TMR_STATE_ACTIVE);
if (!srv->next_timeout
|| tmr->timeout < srv->next_timeout->timeout) {
@@ -1192,7 +1441,6 @@ create_hl_timer(ErtsSchedulerData *esdp,
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
(void *) esdp,
tmr->timeout);
srv->next_timeout = tmr;
@@ -1209,79 +1457,20 @@ create_hl_timer(ErtsSchedulerData *esdp,
ERTS_HLT_HDBG_CHK_SRV(srv);
- return tmr;
-}
-
-static ERTS_INLINE void
-hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs)
-{
- ErtsProcLocks proc_locks = ERTS_PROC_LOCKS_MSG_SEND;
- Process *proc;
- int queued_message = 0;
- int dec_refc = 0;
- Uint32 is_reg_name = (roflgs & ERTS_TMR_ROFLG_REG_NAME);
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- hlt_delete_abtm(tmr);
-#endif
-
- if (is_reg_name) {
- Eterm pid;
- ERTS_HLT_ASSERT(is_atom(tmr->receiver.name));
- pid = erts_whereis_name_to_id(NULL, tmr->receiver.name);
- proc = erts_proc_lookup(pid);
- }
- else {
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
- ERTS_HLT_ASSERT(tmr->receiver.proc);
-
- proc = tmr->receiver.proc;
- proc_locks |= ERTS_PROC_LOCK_BTM;
- }
- if (proc) {
- erts_smp_proc_lock(proc, proc_locks);
- /*
- * If process is exiting, let it clean up
- * the btm tree by itself (it may be in
- * the middle of tree destruction).
- */
- if (!ERTS_PROC_IS_EXITING(proc)) {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = tmr->btm.bp;
- erts_queue_message(proc, proc_locks, mp,
- tmr->btm.message, am_clock_service);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_SEND);
- queued_message = 1;
- proc_locks &= ~ERTS_PROC_LOCKS_MSG_SEND;
- tmr->btm.bp = NULL;
- if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- proc_btm_rbt_delete(&proc->bif_timers, tmr);
- tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- dec_refc = 1;
- }
- }
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
- if (dec_refc)
- hl_timer_pre_dec_refc(tmr);
- }
- if (!queued_message && tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
+ return (ErtsTimer *) tmr;
}
static ERTS_INLINE void
hlt_proc_timeout(ErtsHLTimer *tmr)
{
- if (proc_timeout_common(tmr->receiver.proc, (void *) tmr))
+ if (proc_timeout_common(tmr->head.receiver.proc, (void *) tmr))
hl_timer_dec_refc(tmr, tmr->head.roflgs);
}
static ERTS_INLINE void
hlt_port_timeout(ErtsHLTimer *tmr)
{
- if (port_timeout_common(tmr->receiver.port, (void *) tmr))
+ if (port_timeout_common(tmr->head.receiver.port, (void *) tmr))
hl_timer_dec_refc(tmr, tmr->head.roflgs);
}
@@ -1289,41 +1478,24 @@ static void hlt_timeout(ErtsHLTimer *tmr, void *vsrv)
{
ErtsHLTimerService *srv = (ErtsHLTimerService *) vsrv;
Uint32 roflgs;
- erts_aint32_t state;
ERTS_HLT_HDBG_CHK_SRV(srv);
roflgs = tmr->head.roflgs;
ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_HLT);
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
- ERTS_TMR_STATE_TIMED_OUT,
- ERTS_TMR_STATE_ACTIVE);
-
- ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
- || state == ERTS_TMR_STATE_ACTIVE);
-
- if (state == ERTS_TMR_STATE_ACTIVE) {
-
- if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- hlt_bif_timer_timeout(tmr, roflgs);
- else if (roflgs & ERTS_TMR_ROFLG_PROC)
- hlt_proc_timeout(tmr);
- else if (roflgs & ERTS_TMR_ROFLG_PORT)
- hlt_port_timeout(tmr);
- else {
- ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
- (*tmr->receiver.callback)(tmr->head.u.arg);
- }
-
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
+ bif_timer_timeout(srv, (ErtsBifTimer *) tmr, roflgs);
+ else if (roflgs & ERTS_TMR_ROFLG_PROC)
+ hlt_proc_timeout(tmr);
+ else if (roflgs & ERTS_TMR_ROFLG_PORT)
+ hlt_port_timeout(tmr);
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_CALLBACK);
+ (*tmr->head.receiver.callback)(tmr->head.u.arg);
}
tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- if ((roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- && tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- btm_rbt_delete(&srv->btm_tree, tmr);
- tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
ERTS_HLT_HDBG_CHK_SRV(srv);
@@ -1390,7 +1562,6 @@ hlt_service_timeout(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
vesdp,
tmr->timeout);
}
@@ -1402,19 +1573,6 @@ hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
ERTS_HLT_HDBG_CHK_SRV(srv);
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
-
- if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
- btm_rbt_delete(&srv->btm_tree, tmr);
- tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- }
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
- hlt_delete_abtm(tmr);
-#endif
- }
-
if (tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE) {
/* Already removed... */
ERTS_HLT_HDBG_CHK_SRV(srv);
@@ -1460,7 +1618,6 @@ hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
erts_twheel_set_timer(esdp->timer_wheel,
&srv->service_timer,
hlt_service_timeout,
- NULL,
(void *) esdp,
smlst->timeout);
}
@@ -1485,6 +1642,17 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
== (Uint32) esdp->no);
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
+ ErtsBifTimer *btm = (ErtsBifTimer *) tmr;
+ if (btm->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, btm);
+ btm->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+ }
+#endif
+
if (roflgs & ERTS_TMR_ROFLG_HLT) {
hlt_delete_timer(esdp, &tmr->hlt);
hl_timer_dec_refc(&tmr->hlt, roflgs);
@@ -1750,57 +1918,86 @@ continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr)
* BIF timer specific
*/
+
Uint erts_bif_timer_memory_size(void)
{
return (Uint) 0;
}
static BIF_RETTYPE
-setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos,
- int short_time, Eterm rcvr, Eterm acsr,
- Eterm msg, int wrap)
+setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos,
+ int short_time, Eterm rcvr, Eterm msg, int wrap)
{
BIF_RETTYPE ret;
Eterm ref, tmo_msg, *hp;
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
ErtsSchedulerData *esdp;
- DeclareTmpHeap(tmp_hp, 4, c_p);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ Binary *mbin;
+#endif
+ Eterm tmp_hp[4];
+ ErtsCreateTimerFunc create_timer;
if (is_not_internal_pid(rcvr) && is_not_atom(rcvr))
goto badarg;
esdp = erts_proc_sched_data(c_p);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ mbin = erts_create_magic_indirection(bif_timer_ref_destructor);
+ hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE);
+ ref = erts_mk_magic_ref(&hp, &c_p->off_heap, mbin);
+ ASSERT(erts_get_ref_numbers_thr_id(((ErtsMagicBinary *)mbin)->refn)
+ == (Uint32) esdp->no);
+#else
hp = HAlloc(c_p, ERTS_REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
-
- ASSERT(erts_get_ref_numbers_thr_id(
- internal_ref_numbers(ref)) == (Uint32) esdp->no);
-
- UseTmpHeap(4, c_p);
+ ASSERT(erts_get_ref_numbers_thr_id(internal_ordinary_ref_numbers(ref))
+ == (Uint32) esdp->no);
+#endif
tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg;
- tmr = create_hl_timer(esdp, timeout_pos, short_time,
- ERTS_TMR_BIF, NULL, rcvr, acsr, tmo_msg,
- internal_ref_numbers(ref), NULL, NULL);
-
- UnUseTmpHeap(4, c_p);
+ create_timer = twheel ? create_tw_timer : create_hl_timer;
+ tmr = (ErtsBifTimer *) create_timer(esdp, timeout_pos,
+ short_time, ERTS_TMR_BIF,
+ NULL, rcvr, tmo_msg,
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ (ErtsMagicBinary *) mbin,
+#else
+ internal_ordinary_ref_numbers(ref),
+#endif
+ NULL, NULL);
if (is_internal_pid(rcvr)) {
Process *proc = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
rcvr, ERTS_PROC_LOCK_BTM,
ERTS_P2P_FLG_INC_REFC);
if (!proc) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#else
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
- hlt_delete_timer(esdp, tmr);
- hl_timer_destroy(tmr);
+ if (twheel)
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ else
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ timer_destroy((ErtsTimer *) tmr, twheel, 1);
}
else {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ proc_btm_list_insert(&proc->bif_timers, tmr);
+#else
proc_btm_rbt_insert(&proc->bif_timers, tmr);
+#endif
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
- tmr->receiver.proc = proc;
+ tmr->type.head.receiver.proc = proc;
}
}
@@ -1814,27 +2011,33 @@ badarg:
}
static int
-cancel_bif_timer(ErtsHLTimer *tmr)
+cancel_bif_timer(ErtsBifTimer *tmr)
{
erts_aint_t state;
Uint32 roflgs;
int res;
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state,
ERTS_TMR_STATE_CANCELED,
ERTS_TMR_STATE_ACTIVE);
if (state != ERTS_TMR_STATE_ACTIVE)
return 0;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+
if (tmr->btm.bp)
free_message_buffer(tmr->btm.bp);
res = -1;
- roflgs = tmr->head.roflgs;
+ roflgs = tmr->type.head.roflgs;
if (roflgs & ERTS_TMR_ROFLG_PROC) {
- Process *proc = tmr->receiver.proc;
- ERTS_HLT_ASSERT(!(tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
+ Process *proc;
+
+ proc = tmr->type.head.receiver.proc;
+ ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
/*
@@ -1842,29 +2045,238 @@ cancel_bif_timer(ErtsHLTimer *tmr)
* the btm tree by itself (it may be in
* the middle of tree destruction).
*/
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!ERTS_PROC_IS_EXITING(proc) && tmr->btm.proc_list.next) {
+ proc_btm_list_delete(&proc->bif_timers, tmr);
+ res = 1;
+ }
+#else
if (!ERTS_PROC_IS_EXITING(proc)
&& tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
proc_btm_rbt_delete(&proc->bif_timers, tmr);
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
res = 1;
}
+#endif
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
}
return res;
}
+static ERTS_INLINE Sint64
+access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel)
+{
+ int cncl_res;
+ Sint64 time_left;
+ ErtsMonotonicTime timeout;
+ int is_hlt;
+
+ if (!tmr)
+ return -1;
+
+ is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ timeout = (is_hlt
+ ? tmr->type.hlt.timeout
+ : erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr));
+
+ if (!cancel) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->btm.state);
+ if (state == ERTS_TMR_STATE_ACTIVE)
+ return get_time_left(esdp, timeout);
+ return -1;
+ }
+
+ cncl_res = cancel_bif_timer(tmr);
+ if (!cncl_res)
+ return -1;
+
+ time_left = get_time_left(esdp, timeout);
+
+ if (sid != (Uint32) esdp->no) {
+ if (cncl_res > 0)
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ }
+ else {
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+#endif
+ if (is_hlt) {
+ if (cncl_res > 0)
+ hl_timer_dec_refc(&tmr->type.hlt, tmr->type.hlt.head.roflgs);
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ }
+ else {
+ if (cncl_res > 0)
+ tw_timer_dec_refc(&tmr->type.twt);
+ cancel_tw_timer(esdp, &tmr->type.twt);
+ }
+ }
+
+ return time_left;
+}
+
+static ERTS_INLINE Eterm
+return_info(Process *c_p, Sint64 time_left)
+{
+ Uint hsz;
+ Eterm *hp;
+
+ if (time_left < 0)
+ return am_false;
+
+ if (time_left <= (Sint64) MAX_SMALL)
+ return make_small((Sint) time_left);
+
+ hsz = ERTS_SINT64_HEAP_SIZE(time_left);
+ hp = HAlloc(c_p, hsz);
+ return erts_sint64_to_big(time_left, &hp);
+}
+
+static ERTS_INLINE Eterm
+send_async_info(Process *proc, ErtsProcLocks initial_locks,
+ Eterm tref, int cancel, Sint64 time_left)
+{
+ ErtsProcLocks locks = initial_locks;
+ ErtsMessage *mp;
+ Eterm tag, res, msg, ref;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ hsz = 4;
+ hsz += NC_HEAP_SIZE(tref);
+
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
+
+ if (cancel)
+ tag = am_cancel_timer;
+ else
+ tag = am_read_timer;
+
+ ref = STORE_NC(&hp, ohp, tref);
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE3(hp, tag, ref, res);
+
+ erts_queue_message(proc, locks, mp, msg, am_clock_service);
+
+ locks &= ~initial_locks;
+ if (locks)
+ erts_smp_proc_unlock(proc, locks);
+
+ return am_ok;
+}
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static BIF_RETTYPE
+access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ BIF_RETTYPE ret;
+ Eterm res;
+ Sint64 time_left;
+
+ if (!is_internal_magic_ref(tref)) {
+ if (is_not_ref(tref)) {
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ return ret;
+ }
+ time_left = -1;
+ }
+ else {
+ ErtsMagicBinary *mbin;
+ mbin = (ErtsMagicBinary *) erts_magic_ref2bin(tref);
+ if (mbin->destructor != bif_timer_ref_destructor)
+ time_left = -1;
+ else {
+ ErtsBifTimer *tmr;
+ Uint32 sid;
+ tmr = magic_binary_to_btm(mbin);
+ sid = erts_get_ref_numbers_thr_id(internal_magic_ref_numbers(tref));
+ ASSERT(1 <= sid && sid <= erts_no_schedulers);
+ time_left = access_btm(tmr, sid, erts_proc_sched_data(c_p), cancel);
+ }
+ }
+
+ if (!info)
+ res = am_ok;
+ else if (!async)
+ res = return_info(c_p, time_left);
+ else
+ res = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
+ tref, cancel, time_left);
+
+ ERTS_BIF_PREP_RET(ret, res);
+
+ return ret;
+}
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+static ERTS_INLINE Eterm
+send_sync_info(Process *proc, ErtsProcLocks initial_locks,
+ Uint32 *refn, int cancel, Sint64 time_left)
+{
+ ErtsProcLocks locks = initial_locks;
+ ErtsMessage *mp;
+ Eterm res, msg, ref;
+ Uint hsz;
+ Eterm *hp;
+ ErlOffHeap *ohp;
+
+ hsz = 3 + ERTS_REF_THING_SIZE;
+
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ mp = erts_alloc_message_heap(proc, &locks, hsz, &hp, &ohp);
+
+ write_ref_thing(hp, refn[0], refn[1], refn[2]);
+ ref = make_internal_ref(hp);
+ hp += ERTS_REF_THING_SIZE;
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE2(hp, ref, res);
+
+ erts_queue_message(proc, locks, mp, msg, am_clock_service);
+
+ locks &= ~initial_locks;
+ if (locks)
+ erts_smp_proc_unlock(proc, locks);
+
+ return am_ok;
+}
+
static ERTS_INLINE Eterm
access_sched_local_btm(Process *c_p, Eterm pid,
- Eterm tref, Uint32 *trefn,
- Uint32 *rrefn,
- int async, int cancel,
- int return_res,
- int info)
+ Eterm tref, Uint32 *trefn,
+ Uint32 *rrefn,
+ int async, int cancel,
+ int return_res,
+ int info)
{
ErtsSchedulerData *esdp;
ErtsHLTimerService *srv;
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
Sint64 time_left;
Process *proc;
ErtsProcLocks proc_locks;
@@ -1884,111 +2296,40 @@ access_sched_local_btm(Process *c_p, Eterm pid,
srv = esdp->timer_service;
tmr = btm_rbt_lookup(srv->btm_tree, trefn);
- if (tmr) {
- if (!cancel) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->state);
- if (state == ERTS_TMR_STATE_ACTIVE)
- time_left = get_time_left(esdp, tmr->timeout);
- }
- else {
- int cncl_res = cancel_bif_timer(tmr);
- if (cncl_res) {
-
- time_left = get_time_left(esdp, tmr->timeout);
- if (cncl_res > 0)
- hl_timer_dec_refc(tmr, tmr->head.roflgs);
-
- hlt_delete_timer(esdp, tmr);
- }
- }
- }
+ time_left = access_btm(tmr, (Uint32) esdp->no, esdp, cancel);
if (!info)
- return am_ok;
-
- if (return_res) {
- ERTS_HLT_ASSERT(c_p);
- if (time_left < 0)
- return am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- return make_small((Sint) time_left);
- else {
- Uint hsz = ERTS_SINT64_HEAP_SIZE(time_left);
- Eterm *hp = HAlloc(c_p, hsz);
- return erts_sint64_to_big(time_left, &hp);
- }
- }
+ return am_ok;
if (c_p) {
- proc = c_p;
- proc_locks = ERTS_PROC_LOCK_MAIN;
+ proc = c_p;
+ proc_locks = ERTS_PROC_LOCK_MAIN;
}
else {
- proc = erts_proc_lookup(pid);
- proc_locks = 0;
+ proc = erts_proc_lookup(pid);
+ proc_locks = 0;
}
- if (proc) {
- Uint hsz;
- ErtsMessage *mp;
- Eterm *hp, msg, ref, result;
- ErlOffHeap *ohp;
- Uint32 *refn;
-#ifdef ERTS_HLT_DEBUG
- Eterm *hp_end;
-#endif
-
- hsz = ERTS_REF_THING_SIZE;
- if (async) {
- refn = trefn; /* timer ref */
- hsz += 4; /* 3-tuple */
- }
- else {
- refn = rrefn; /* request ref */
- hsz += 3; /* 2-tuple */
- }
-
- ERTS_HLT_ASSERT(refn);
-
- if (time_left > (Sint64) MAX_SMALL)
- hsz += ERTS_SINT64_HEAP_SIZE(time_left);
-
- mp = erts_alloc_message_heap(proc, &proc_locks,
- hsz, &hp, &ohp);
-
-#ifdef ERTS_HLT_DEBUG
- hp_end = hp + hsz;
-#endif
-
- if (time_left < 0)
- result = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- result = make_small((Sint) time_left);
- else
- result = erts_sint64_to_big(time_left, &hp);
-
- write_ref_thing(hp,
- refn[0],
- refn[1],
- refn[2]);
- ref = make_internal_ref(hp);
- hp += ERTS_REF_THING_SIZE;
-
- msg = (async
- ? TUPLE3(hp, (cancel
- ? am_cancel_timer
- : am_read_timer), ref, result)
- : TUPLE2(hp, ref, result));
-
- ERTS_HLT_ASSERT(hp + (async ? 4 : 3) == hp_end);
-
- erts_queue_message(proc, proc_locks, mp, msg, am_clock_service);
-
- if (c_p)
- proc_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
+ if (!async) {
+ if (c_p)
+ return return_info(c_p, time_left);
+
+ if (proc)
+ return send_sync_info(proc, proc_locks,
+ rrefn, cancel, time_left);
+ }
+ else if (proc) {
+ Eterm ref;
+ Eterm heap[ERTS_REF_THING_SIZE];
+ if (is_value(tref))
+ ref = tref;
+ else {
+ write_ref_thing(&heap[0], trefn[0], trefn[1], trefn[2]);
+ ref = make_internal_ref(&heap[0]);
+ }
+ return send_async_info(proc, proc_locks,
+ ref, cancel, time_left);
}
return am_ok;
@@ -2021,108 +2362,64 @@ bif_timer_access_request(void *vreq)
static int
try_access_sched_remote_btm(ErtsSchedulerData *esdp,
Process *c_p, Uint32 sid,
- Uint32 *trefn,
+ Eterm tref, Uint32 *trefn,
int async, int cancel,
int info, Eterm *resp)
{
- ErtsHLTimer *tmr;
+ ErtsBifTimer *tmr;
Sint64 time_left;
ERTS_HLT_ASSERT(c_p);
/*
* Check if the timer is aimed at current
- * process of if this process is an accessor
- * of the timer...
+ * process...
*/
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM);
tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (!tmr)
- tmr = abtm_rbt_lookup(c_p->accessor_bif_timers, trefn);
-#endif
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM);
if (!tmr)
return 0;
- if (!cancel) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->state);
- if (state == ERTS_TMR_STATE_ACTIVE)
- time_left = get_time_left(esdp, tmr->timeout);
- else
- time_left = -1;
- }
- else {
- int cncl_res = cancel_bif_timer(tmr);
- if (!cncl_res)
- time_left = -1;
- else {
- time_left = get_time_left(esdp, tmr->timeout);
- if (cncl_res > 0)
- queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
- }
- }
+ time_left = access_btm(tmr, sid, esdp, cancel);
- if (!info) {
+ if (!info)
*resp = am_ok;
- return 1;
- }
-
- if (!async) {
- if (time_left < 0)
- *resp = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- *resp = make_small((Sint) time_left);
- else {
- Uint hsz = ERTS_SINT64_HEAP_SIZE(time_left);
- Eterm *hp = HAlloc(c_p, hsz);
- *resp = erts_sint64_to_big(time_left, &hp);
- }
- }
- else {
- ErtsMessage *mp;
- Eterm tag, res, msg, tref;
- Uint hsz;
- Eterm *hp;
- ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
- ErlOffHeap *ohp;
-
- hsz = 4 + ERTS_REF_THING_SIZE;
- if (time_left > (Sint64) MAX_SMALL)
- hsz += ERTS_SINT64_HEAP_SIZE(time_left);
-
- mp = erts_alloc_message_heap(c_p, &proc_locks,
- hsz, &hp, &ohp);
- if (cancel)
- tag = am_cancel_timer;
- else
- tag = am_read_timer;
-
- write_ref_thing(hp,
- trefn[0],
- trefn[1],
- trefn[2]);
- tref = make_internal_ref(hp);
- hp += ERTS_REF_THING_SIZE;
-
- if (time_left < 0)
- res = am_false;
- else if (time_left <= (Sint64) MAX_SMALL)
- res = make_small((Sint) time_left);
- else
- res = erts_sint64_to_big(time_left, &hp);
-
- msg = TUPLE3(hp, tag, tref, res);
+ else if (!async)
+ *resp = return_info(c_p, time_left);
+ else
+ *resp = send_async_info(c_p, ERTS_PROC_LOCK_MAIN,
+ tref, cancel, time_left);
- erts_queue_message(c_p, proc_locks, mp, msg, am_clock_service);
+ return 1;
+}
- proc_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (proc_locks)
- erts_smp_proc_unlock(c_p, proc_locks);
+static Eterm
+no_timer_result(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ ErtsMessage *mp;
+ Uint hsz;
+ Eterm *hp, msg, ref, tag;
+ ErlOffHeap *ohp;
+ ErtsProcLocks locks;
- *resp = am_ok;
- }
- return 1;
+ if (!async)
+ return am_false;
+ if (!info)
+ return am_ok;
+
+ hsz = 4;
+ hsz += NC_HEAP_SIZE(tref);
+ locks = ERTS_PROC_LOCK_MAIN;
+ mp = erts_alloc_message_heap(c_p, &locks, hsz, &hp, &ohp);
+ ref = STORE_NC(&hp, ohp, tref);
+ tag = cancel ? am_cancel_timer : am_read_timer;
+ msg = TUPLE3(hp, tag, ref, am_false);
+ erts_queue_message(c_p, locks, mp, msg, am_clock_service);
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (locks)
+ erts_smp_proc_unlock(c_p, locks);
+ return am_ok;
}
static BIF_RETTYPE
@@ -2156,7 +2453,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
ERTS_BIF_PREP_RET(ret, res);
}
else if (try_access_sched_remote_btm(esdp, c_p,
- sid, trefn,
+ sid, tref, trefn,
async, cancel,
info, &res)) {
ERTS_BIF_PREP_RET(ret, res);
@@ -2235,11 +2532,11 @@ badarg:
return ret;
no_timer:
- ERTS_BIF_PREP_RET(ret, am_false);
- return ret;
-
+ return no_timer_result(c_p, tref, cancel, async, info);
}
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
static ERTS_INLINE int
bool_arg(Eterm val, int *argp)
{
@@ -2251,8 +2548,8 @@ bool_arg(Eterm val, int *argp)
}
static ERTS_INLINE int
-parse_bif_timer_options(Eterm option_list, int *async, int *info,
- int *abs, Eterm *accessor)
+parse_bif_timer_options(Eterm option_list, int *async,
+ int *info, int *abs)
{
Eterm list = option_list;
@@ -2262,8 +2559,6 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
*info = 1;
if (abs)
*abs = 0;
- if (accessor)
- *accessor = THE_NON_VALUE;
while (is_list(list)) {
Eterm *consp, *tp, opt;
@@ -2290,13 +2585,6 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
if (!abs || !bool_arg(tp[2], abs))
return 0;
break;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- case am_accessor:
- if (!accessor || is_not_internal_pid(tp[2]))
- return 0;
- *accessor = tp[2];
- break;
-#endif
default:
return 0;
}
@@ -2310,42 +2598,57 @@ parse_bif_timer_options(Eterm option_list, int *async, int *info,
}
static void
-exit_cancel_bif_timer(ErtsHLTimer *tmr, void *vesdp)
+exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp)
{
ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
Uint32 sid, roflgs;
erts_aint_t state;
+ int is_hlt;
- state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state,
ERTS_TMR_STATE_CANCELED,
ERTS_TMR_STATE_ACTIVE);
- roflgs = tmr->head.roflgs;
+ roflgs = tmr->type.head.roflgs;
sid = roflgs & ERTS_TMR_ROFLG_SID_MASK;
+ is_hlt = !!(roflgs & ERTS_TMR_ROFLG_HLT);
- ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(tmr->btm.refn));
+ ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(ERTS_BTM_HLT2REFN(tmr)));
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ERTS_HLT_ASSERT(tmr->btm.proc_list.next);
+#else
ERTS_HLT_ASSERT(tmr->btm.proc_tree.parent
!= ERTS_HLT_PFIELD_NOT_IN_TABLE);
-
tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+#endif
- if (sid == (Uint32) esdp->no) {
- if (state == ERTS_TMR_STATE_ACTIVE) {
- if (tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
- hlt_delete_timer(esdp, tmr);
- }
- hl_timer_dec_refc(tmr, roflgs);
- }
- else {
- if (state == ERTS_TMR_STATE_ACTIVE) {
- if (tmr->btm.bp)
- free_message_buffer(tmr->btm.bp);
- queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ btm_clear_magic_binary(tmr);
+#endif
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+
+ if (sid != (Uint32) esdp->no) {
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ return;
+ }
+
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&esdp->timer_service->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
}
- else
- hl_timer_dec_refc(tmr, roflgs);
+#endif
+ if (is_hlt)
+ hlt_delete_timer(esdp, &tmr->type.hlt);
+ else
+ cancel_tw_timer(esdp, &tmr->type.twt);
}
+ if (is_hlt)
+ hl_timer_dec_refc(&tmr->type.hlt, roflgs);
+ else
+ tw_timer_dec_refc(&tmr->type.twt);
}
#ifdef ERTS_HLT_DEBUG
@@ -2354,20 +2657,29 @@ exit_cancel_bif_timer(ErtsHLTimer *tmr, void *vesdp)
# define ERTS_BTM_MAX_DESTROY_LIMIT 50
#endif
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
typedef struct {
ErtsBifTimers *bif_timers;
union {
proc_btm_rbt_yield_state_t proc_btm_yield_state;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- abtm_rbt_yield_state_t abtm_yield_state;
-#endif
} u;
} ErtsBifTimerYieldState;
+#endif
-int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
+int erts_cancel_bif_timers(Process *p, ErtsBifTimers **btm, void **vyspp)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(p);
- ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
+
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+ return proc_btm_list_foreach_destroy_yielding(btm,
+ exit_cancel_bif_timer,
+ (void *) esdp,
+ ERTS_BTM_MAX_DESTROY_LIMIT);
+
+#else /* !ERTS_MAGIC_REF_BIF_TIMERS */
+
+ ErtsBifTimerYieldState ys = {*btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2399,63 +2711,18 @@ int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
}
return res;
-}
-
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
-
-static void
-detach_bif_timer(ErtsHLTimer *tmr, void *vesdp)
-{
- tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
- hl_timer_dec_refc(tmr, tmr->head.roflgs);
-}
-
-int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
-{
- ErtsSchedulerData *esdp = erts_proc_sched_data(p);
- ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
- ErtsBifTimerYieldState *ysp;
- int res;
-
- ysp = (ErtsBifTimerYieldState *) *vyspp;
- if (!ysp)
- ysp = &ys;
-
- res = abtm_rbt_foreach_destroy_yielding(&ysp->bif_timers,
- detach_bif_timer,
- (void *) esdp,
- &ysp->u.abtm_yield_state,
- ERTS_BTM_MAX_DESTROY_LIMIT);
-
- if (res == 0) {
- if (ysp != &ys)
- erts_free(ERTS_ALC_T_BTM_YIELD_STATE, ysp);
- *vyspp = NULL;
- }
- else {
-
- if (ysp == &ys) {
- ysp = erts_alloc(ERTS_ALC_T_BTM_YIELD_STATE,
- sizeof(ErtsBifTimerYieldState));
- sys_memcpy((void *) ysp, (void *) &ys,
- sizeof(ErtsBifTimerYieldState));
- }
-
- *vyspp = (void *) ysp;
- }
- return res;
+#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
}
-#endif /* ERTS_BTM_ACCESSOR_SUPPORT */
-
static ERTS_INLINE int
parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
ErtsMonotonicTime *conv_arg, int abs,
- ErtsMonotonicTime *tposp, int *stimep)
+ ErtsMonotonicTime *tposp, int *stimep,
+ ErtsMonotonicTime *msp)
{
- ErtsMonotonicTime t;
-
+ ErtsMonotonicTime t, now;
+
if (!term_to_Sint64(arg, &t)) {
ERTS_HLT_ASSERT(!is_small(arg));
if (!is_big(arg))
@@ -2470,22 +2737,30 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
if (conv_arg)
*conv_arg = t;
+ now = erts_get_monotonic_time(esdp);
+
if (abs) {
t += -1*ERTS_MONOTONIC_OFFSET_MSEC; /* external to internal */
if (t < ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN))
return 1;
if (t > ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_END))
return 1;
+ if (msp)
+ *msp = t - ERTS_MONOTONIC_TO_MSEC(now);
+
*stimep = (t - ERTS_MONOTONIC_TO_MSEC(esdp->last_monotonic_time)
< ERTS_BIF_TIMER_SHORT_TIME);
*tposp = ERTS_MSEC_TO_CLKTCKS(t);
}
else {
- ErtsMonotonicTime now, ticks;
+ ErtsMonotonicTime ticks;
if (t < 0)
return -1;
+ if (msp)
+ *msp = t;
+
ticks = ERTS_MSEC_TO_CLKTCKS(t);
if (ERTS_CLKTCK_RESOLUTION > 1000 && ticks < 0)
@@ -2493,7 +2768,6 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
ERTS_HLT_ASSERT(ticks >= 0);
- now = erts_get_monotonic_time(esdp);
ticks += ERTS_MONOTONIC_TO_CLKTCKS(now-1);
ticks += 1;
@@ -2516,66 +2790,68 @@ parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
BIF_RETTYPE send_after_3(BIF_ALIST_3)
{
- ErtsMonotonicTime timeout_pos;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, tres;
- tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- 0, &timeout_pos, &short_time);
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1,
+ NULL, 0, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, BIF_ARG_2, BIF_ARG_3, 0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, 0);
}
BIF_RETTYPE send_after_4(BIF_ALIST_4)
{
- ErtsMonotonicTime timeout_pos;
- Eterm accessor;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, abs, tres;
- if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
BIF_ERROR(BIF_P, BADARG);
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- abs, &timeout_pos, &short_time);
+ abs, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, accessor, BIF_ARG_3, 0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, 0);
}
BIF_RETTYPE start_timer_3(BIF_ALIST_3)
{
- ErtsMonotonicTime timeout_pos;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, tres;
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- 0, &timeout_pos, &short_time);
+ 0, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, BIF_ARG_2, BIF_ARG_3, !0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, !0);
}
BIF_RETTYPE start_timer_4(BIF_ALIST_4)
{
- ErtsMonotonicTime timeout_pos;
- Eterm accessor;
+ ErtsMonotonicTime timeout_pos, tmo;
int short_time, abs, tres;
- if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs))
BIF_ERROR(BIF_P, BADARG);
tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
- abs, &timeout_pos, &short_time);
+ abs, &timeout_pos, &short_time, &tmo);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
- return setup_bif_timer(BIF_P, timeout_pos, short_time,
- BIF_ARG_2, accessor, BIF_ARG_3, !0);
+ return setup_bif_timer(BIF_P, tmo < ERTS_TIMER_WHEEL_MSEC,
+ timeout_pos, short_time, BIF_ARG_2,
+ BIF_ARG_3, !0);
}
BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
@@ -2588,7 +2864,7 @@ BIF_RETTYPE cancel_timer_2(BIF_ALIST_2)
BIF_RETTYPE ret;
int async, info;
- if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL, NULL))
+ if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL))
return access_bif_timer(BIF_P, BIF_ARG_1, 1, async, info);
ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
@@ -2605,7 +2881,7 @@ BIF_RETTYPE read_timer_2(BIF_ALIST_2)
BIF_RETTYPE ret;
int async;
- if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL, NULL))
+ if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL))
return access_bif_timer(BIF_P, BIF_ARG_1, 0, async, 1);
ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
@@ -2620,14 +2896,13 @@ start_callback_timer(ErtsSchedulerData *esdp,
void *arg)
{
- if (twt)
- create_tw_timer(esdp, ERTS_TMR_CALLBACK, NULL,
- callback, arg, timeout_pos);
- else
- create_hl_timer(esdp, timeout_pos, 0,
- ERTS_TMR_CALLBACK, NULL,
- NIL, THE_NON_VALUE, NIL,
- NULL, callback, arg);
+ ErtsCreateTimerFunc create_timer = (twt
+ ? create_tw_timer
+ : create_hl_timer);
+ (void) create_timer(esdp, timeout_pos, 0,
+ ERTS_TMR_CALLBACK, NULL,
+ NIL, THE_NON_VALUE, NULL,
+ callback, arg);
}
typedef struct {
@@ -2704,18 +2979,18 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
if (tmo == 0)
c_p->flags |= F_TIMO;
else {
+ ErtsCreateTimerFunc create_timer;
c_p->flags |= F_INSLPQUEUE;
c_p->flags &= ~F_TIMO;
- if (tmo < ERTS_TIMER_WHEEL_MSEC)
- tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PROC, (void *) c_p,
- NULL, NULL, timeout_pos);
- else
- tmr = (void *) create_hl_timer(esdp, timeout_pos, short_time,
- ERTS_TMR_PROC, (void *) c_p,
- c_p->common.id, THE_NON_VALUE,
- NIL, NULL, NULL, NULL);
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, short_time,
+ ERTS_TMR_PROC, (void *) c_p,
+ c_p->common.id, THE_NON_VALUE,
+ NULL, NULL, NULL);
erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr);
}
}
@@ -2731,7 +3006,7 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo)
== ERTS_PTMR_NONE);
tres = parse_timeout_pos(esdp, etmo, &tmo, 0,
- &timeout_pos, &short_time);
+ &timeout_pos, &short_time, NULL);
if (tres != 0)
return tres;
@@ -2789,6 +3064,7 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo)
void *tmr;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsMonotonicTime timeout_pos;
+ ErtsCreateTimerFunc create_timer;
if (erts_smp_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE)
erts_cancel_port_timer(c_prt);
@@ -2797,13 +3073,12 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo)
timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
- if (tmo < ERTS_TIMER_WHEEL_MSEC)
- tmr = (void *) create_tw_timer(esdp, ERTS_TMR_PORT, (void *) c_prt,
- NULL, NULL, timeout_pos);
- else
- tmr = (void *) create_hl_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
- (void *) c_prt, c_prt->common.id,
- THE_NON_VALUE, NIL, NULL, NULL, NULL);
+ create_timer = (tmo < ERTS_TIMER_WHEEL_MSEC
+ ? create_tw_timer
+ : create_hl_timer);
+ tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT,
+ (void *) c_prt, c_prt->common.id,
+ THE_NON_VALUE, NULL, NULL, NULL);
erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
}
@@ -2842,7 +3117,7 @@ erts_read_port_timer(Port *c_prt)
if (tmr->head.roflgs & ERTS_TMR_ROFLG_HLT)
timeout_pos = tmr->hlt.timeout;
else
- timeout_pos = tmr->twt.tw_tmr.timeout_pos;
+ timeout_pos = erts_tweel_read_timeout(&tmr->twt.u.tw_tmr);
return get_time_left(NULL, timeout_pos);
}
@@ -2857,20 +3132,35 @@ typedef struct {
} ErtsBTMPrint;
static void
-btm_print(ErtsHLTimer *tmr, void *vbtmp)
+btm_print(ErtsBifTimer *tmr, void *vbtmp, ErtsMonotonicTime tpos, int is_hlt)
{
ErtsBTMPrint *btmp = (ErtsBTMPrint *) vbtmp;
ErtsMonotonicTime left;
Eterm receiver;
- if (tmr->timeout <= btmp->now)
- left = 0;
- else
- left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ return;
+#endif
- receiver = ((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
- ? tmr->receiver.name
- : tmr->receiver.proc->common.id);
+ if (is_hlt) {
+ ERTS_HLT_ASSERT(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ if (tmr->type.hlt.timeout <= btmp->now)
+ left = 0;
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tmr->type.hlt.timeout - btmp->now);
+ }
+ else {
+ ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT));
+ if (tpos <= btmp->now)
+ left = 0;
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tpos - btmp->now);
+ }
+
+ receiver = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->type.head.receiver.name
+ : tmr->type.head.receiver.proc->common.id);
erts_print(btmp->to, btmp->to_arg,
"=timer:%T\n"
@@ -2881,6 +3171,36 @@ btm_print(ErtsHLTimer *tmr, void *vbtmp)
(Sint64) left);
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static void
+hlt_btm_print(ErtsHLTimer *tmr, void *vbtmp)
+{
+ btm_print((ErtsBifTimer *) tmr, vbtmp, 0, 1);
+}
+
+static void
+twt_btm_print(void *vbtmp, ErtsMonotonicTime tpos, void *vtwtp)
+{
+ btm_print((ErtsBifTimer *) vtwtp, vbtmp, tpos, 0);
+}
+
+#else
+
+static void
+btm_tree_print(ErtsBifTimer *tmr, void *vbtmp)
+{
+ int is_hlt = !!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_HLT);
+ ErtsMonotonicTime tpos;
+ if (is_hlt)
+ tpos = 0;
+ else
+ tpos = erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr);
+ btm_print(tmr, vbtmp, tpos, is_hlt);
+}
+
+#endif
+
void
erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
{
@@ -2898,7 +3218,15 @@ erts_print_bif_timer_info(fmtfn_t to, void *to_arg)
for (six = 0; six < erts_no_schedulers; six++) {
ErtsHLTimerService *srv =
erts_aligned_scheduler_data[six].esd.timer_service;
- btm_rbt_foreach(srv->btm_tree, btm_print, (void *) &btmp);
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+ erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
+ twt_btm_print, (void *) &btmp);
+ time_rbt_foreach(srv->time_tree, hlt_btm_print, (void *) &btmp);
+#else
+ btm_rbt_foreach(srv->btm_tree, btm_tree_print, (void *) &btmp);
+#endif
}
}
@@ -2911,19 +3239,37 @@ typedef struct {
} ErtsBTMForeachDebug;
static void
-debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
+debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd)
{
- if (erts_smp_atomic32_read_nob(&tmr->state) == ERTS_TMR_STATE_ACTIVE) {
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ return;
+#endif
+ if (erts_smp_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) {
ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd;
- (*btmfd->func)(((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
- ? tmr->receiver.name
- : tmr->receiver.proc->common.id),
- tmr->btm.message,
- tmr->btm.bp,
- btmfd->arg);
+ Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->type.head.receiver.name
+ : tmr->type.head.receiver.proc->common.id);
+ (*btmfd->func)(id, tmr->btm.message, tmr->btm.bp, btmfd->arg);
}
}
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+
+static void
+hlt_debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
+{
+ debug_btm_foreach((ErtsBifTimer *) tmr, vbtmfd);
+}
+
+static void
+twt_debug_btm_foreach(void *vbtmfd, ErtsMonotonicTime tpos, void *vtwtp)
+{
+ debug_btm_foreach((ErtsBifTimer *) vtwtp, vbtmfd);
+}
+
+#endif
+
void
erts_debug_bif_timer_foreach(void (*func)(Eterm,
Eterm,
@@ -2943,9 +3289,20 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm,
for (six = 0; six < erts_no_schedulers; six++) {
ErtsHLTimerService *srv =
erts_aligned_scheduler_data[six].esd.timer_service;
+#ifdef ERTS_MAGIC_REF_BIF_TIMERS
+ ErtsTimerWheel *twheel =
+ erts_aligned_scheduler_data[six].esd.timer_wheel;
+ erts_twheel_debug_foreach(twheel, tw_bif_timer_timeout,
+ twt_debug_btm_foreach,
+ (void *) &btmfd);
+ time_rbt_foreach(srv->time_tree,
+ hlt_debug_btm_foreach,
+ (void *) &btmfd);
+#else
btm_rbt_foreach(srv->btm_tree,
debug_btm_foreach,
(void *) &btmfd);
+#endif
}
}
@@ -2964,7 +3321,7 @@ debug_callback_timer_foreach_list(ErtsHLTimer *tmr, void *vdfct)
= (ErtsDebugForeachCallbackTimer *) vdfct;
if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
- && (tmr->receiver.callback == dfct->tclbk))
+ && (tmr->head.receiver.callback == dfct->tclbk))
(*dfct->func)(dfct->arg,
tmr->timeout,
tmr->head.u.arg);
@@ -2982,7 +3339,7 @@ debug_callback_timer_foreach(ErtsHLTimer *tmr, void *vdfct)
vdfct);
if ((tmr->head.roflgs & ERTS_TMR_ROFLG_CALLBACK)
- && (tmr->receiver.callback == dfct->tclbk))
+ && (tmr->head.receiver.callback == dfct->tclbk))
(*dfct->func)(dfct->arg,
tmr->timeout,
tmr->head.u.arg);
@@ -2997,7 +3354,7 @@ debug_tw_callback_timer(void *vdfct,
ErtsDebugForeachCallbackTimer *dfct
= (ErtsDebugForeachCallbackTimer *) vdfct;
- if (twtp->u.callback == dfct->tclbk)
+ if (twtp->head.receiver.callback == dfct->tclbk)
(*dfct->func)(dfct->arg,
timeout_pos,
twtp->head.u.arg);
@@ -3068,7 +3425,9 @@ st_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
}
ERTS_HLT_ASSERT(tmr->time.tree.u.l.next->time.tree.u.l.prev == tmr);
ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev->time.tree.u.l.next == tmr);
- ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
+#endif
}
static void
@@ -3097,8 +3456,10 @@ tt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
& ~ERTS_HLT_PFLGS_MASK);
ERTS_HLT_ASSERT(tmr == prnt);
}
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)
- ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, ERTS_BTM_HLT2REFN(tmr)) == tmr);
+#endif
if (tmr->time.tree.same_time) {
ErtsHdbgHLT st_hdbg;
st_hdbg.srv = hdbg->srv;
@@ -3164,6 +3525,7 @@ hdbg_chk_srv(ErtsHLTimerService *srv)
time_rbt_foreach(srv->time_tree, tt_hdbg_func, (void *) &hdbg);
ERTS_HLT_ASSERT(hdbg.found_root);
}
+#ifndef ERTS_MAGIC_REF_BIF_TIMERS
if (srv->btm_tree) {
ErtsHdbgHLT hdbg;
hdbg.srv = srv;
@@ -3172,6 +3534,7 @@ hdbg_chk_srv(ErtsHLTimerService *srv)
btm_rbt_foreach(srv->btm_tree, bt_hdbg_func, (void *) &hdbg);
ERTS_HLT_ASSERT(hdbg.found_root);
}
+#endif
}
#endif /* ERTS_HLT_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h
index 9cdcd581a0..f70fcdd1c0 100644
--- a/erts/emulator/beam/erl_hl_timer.h
+++ b/erts/emulator/beam/erl_hl_timer.h
@@ -21,7 +21,7 @@
#ifndef ERL_HL_TIMER_H__
#define ERL_HL_TIMER_H__
-typedef struct ErtsHLTimer_ ErtsBifTimers;
+typedef struct ErtsBifTimer_ ErtsBifTimers;
typedef struct ErtsHLTimerService_ ErtsHLTimerService;
#include "sys.h"
@@ -56,7 +56,7 @@ void erts_cancel_proc_timer(Process *);
void erts_set_port_timer(Port *, Sint64);
void erts_cancel_port_timer(Port *);
Sint64 erts_read_port_timer(Port *);
-int erts_cancel_bif_timers(Process *, ErtsBifTimers *, void **);
+int erts_cancel_bif_timers(Process *, ErtsBifTimers **, void **);
int erts_detach_accessor_bif_timers(Process *, ErtsBifTimers *, void **);
ErtsHLTimerService *erts_create_timer_service(void);
void erts_hl_timer_init(void);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 541bfec532..eaaf5c911a 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -61,8 +61,9 @@
#define ERTS_DEFAULT_NO_ASYNC_THREADS 10
-#define ERTS_DEFAULT_SCHED_STACK_SIZE 256
-#define ERTS_MIN_SCHED_STACK_SIZE 20
+#define ERTS_DEFAULT_SCHED_STACK_SIZE 128
+#define ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE 40
+#define ERTS_DEFAULT_DIO_SCHED_STACK_SIZE 40
/*
* The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands
@@ -641,9 +642,22 @@ void erts_usage(void)
erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n");
erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n");
erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n");
- erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ erts_fprintf(stderr, " valid range is [%d-%d] (default %d)\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_SCHED_STACK_SIZE);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_fprintf(stderr, "-sssdcpu size suggested stack size in kilo words for dirty CPU scheduler\n");
+ erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
+ ERTS_SCHED_THREAD_MIN_STACK_SIZE,
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE);
+ erts_fprintf(stderr, "-sssdio size suggested stack size in kilo words for dirty IO scheduler\n");
+ erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
- ERTS_SCHED_THREAD_MAX_STACK_SIZE);
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE,
+ ERTS_DEFAULT_DIO_SCHED_STACK_SIZE);
+#endif
erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
erts_fprintf(stderr, " schedulers online (n2), maximum for both\n");
@@ -1327,6 +1341,10 @@ erl_start(int argc, char **argv)
* a lot of stack.
*/
erts_sched_thread_suggested_stack_size = ERTS_DEFAULT_SCHED_STACK_SIZE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_dcpu_sched_thread_suggested_stack_size = ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE;
+ erts_dio_sched_thread_suggested_stack_size = ERTS_DEFAULT_DIO_SCHED_STACK_SIZE;
+#endif
#ifdef DEBUG
verbose = DEBUG_DEFAULT;
@@ -1945,6 +1963,42 @@ erl_start(int argc, char **argv)
VERBOSE(DEBUG_SYSTEM,
("scheduler wakeup threshold: %s\n", arg));
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (has_prefix("ssdcpu", sub_param)) {
+ /* suggested stack size (Kilo Words) for dirty CPU scheduler threads */
+ arg = get_arg(sub_param+6, argv[i+1], &i);
+ erts_dcpu_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_dcpu_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_dcpu_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for dirty CPU scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested dirty CPU scheduler thread stack size %d kilo words\n",
+ erts_dcpu_sched_thread_suggested_stack_size));
+ }
+ else if (has_prefix("ssdio", sub_param)) {
+ /* suggested stack size (Kilo Words) for dirty IO scheduler threads */
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ erts_dio_sched_thread_suggested_stack_size = atoi(arg);
+
+ if ((erts_dio_sched_thread_suggested_stack_size
+ < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ || (erts_dio_sched_thread_suggested_stack_size >
+ ERTS_SCHED_THREAD_MAX_STACK_SIZE)) {
+ erts_fprintf(stderr, "bad stack size for dirty IO scheduler threads %s\n",
+ arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM,
+ ("suggested dirty IO scheduler thread stack size %d kilo words\n",
+ erts_dio_sched_thread_suggested_stack_size));
+ }
+#endif
else if (has_prefix("ss", sub_param)) {
/* suggested stack size (Kilo Words) for scheduler threads */
arg = get_arg(sub_param+2, argv[i+1], &i);
@@ -2259,8 +2313,14 @@ erl_start(int argc, char **argv)
boot_argc = argc - i; /* Number of arguments to init */
boot_argv = &argv[i];
- if (erts_sched_thread_suggested_stack_size < ERTS_MIN_SCHED_STACK_SIZE)
- erts_sched_thread_suggested_stack_size = ERTS_MIN_SCHED_STACK_SIZE;
+ if (erts_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (erts_dcpu_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_dcpu_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+ if (erts_dio_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
+ erts_dio_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
+#endif
erl_init(ncpu,
proc_tab_sz,
@@ -2334,6 +2394,7 @@ erl_start(int argc, char **argv)
set_main_stack_size();
erts_sched_init_time_sup(esdp);
erts_ets_sched_spec_data_init(esdp);
+ erts_aux_work_timeout_late_init(esdp);
process_main(esdp->x_reg_array, esdp->f_reg_array);
}
#endif
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index f181c1e3cb..17982a2d14 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -167,9 +167,7 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
for (u.hdr = offheap->first; u.hdr; u.hdr = u.hdr->next) {
switch (thing_subtag(u.hdr->thing_word)) {
case REFC_BINARY_SUBTAG:
- if (erts_refc_dectest(&u.pb->val->refc, 0) == 0) {
- erts_bin_free(u.pb->val);
- }
+ erts_bin_release(u.pb->val);
break;
case FUN_SUBTAG:
if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) {
@@ -178,8 +176,7 @@ erts_cleanup_offheap(ErlOffHeap *offheap)
break;
case REF_SUBTAG:
ASSERT(is_magic_ref_thing(u.hdr));
- if (erts_refc_dectest(&u.mref->mb->refc, 0) == 0)
- erts_bin_free((Binary *)u.mref->mb);
+ erts_bin_release((Binary *)u.mref->mb);
break;
default:
ASSERT(is_external_header(u.hdr->thing_word));
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index f86b9739fa..872b58d1ef 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1074,7 +1074,6 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin)
if (refbin == NULL) {
return 0; /* The NIF must take action */
}
- erts_refc_init(&refbin->refc, 1);
bin->size = size;
bin->data = (unsigned char*) refbin->orig_bytes;
@@ -1113,9 +1112,7 @@ void enif_release_binary(ErlNifBinary* bin)
if (bin->ref_bin != NULL) {
Binary* refbin = bin->ref_bin;
ASSERT(bin->bin_term == THE_NON_VALUE);
- if (erts_refc_dectest(&refbin->refc, 0) == 0) {
- erts_bin_free(refbin);
- }
+ erts_bin_release(refbin);
}
#ifdef DEBUG
bin->data = NULL;
@@ -1279,7 +1276,7 @@ Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
bin_term = make_binary(pb);
- if (erts_refc_read(&bptr->refc, 1) == 1) {
+ if (erts_refc_read(&bptr->intern.refc, 1) == 1) {
/* Total ownership transfer */
bin->ref_bin = NULL;
bin->bin_term = bin_term;
@@ -2249,7 +2246,7 @@ static int nif_resource_dtor(Binary* bin)
ASSERT(type->down);
erts_smp_mtx_lock(&rm->lock);
- ASSERT(erts_refc_read(&bin->refc, 0) == 0);
+ ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0);
if (rm->root) {
ASSERT(!rm->is_dying);
destroy_all_monitors(rm->root, resource);
@@ -2323,13 +2320,13 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref)
erts_smp_mtx_unlock(&rmp->lock);
if (free_me) {
- ASSERT(erts_refc_read(&bin->binary.refc, 0) == 0);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0);
erts_bin_free(&bin->binary);
}
return;
}
ASSERT(!rmp->is_dying);
- if (erts_refc_inc_unless(&bin->binary.refc, 0, 0) == 0) {
+ if (erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0) == 0) {
/*
* Racing resource destruction.
* To avoid a more complex refc-dance with destructing thread
@@ -2348,9 +2345,7 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref)
resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
post_nif_noproc(&msg_env);
- if (erts_refc_dectest(&bin->binary.refc, 0) == 0) {
- erts_bin_free(&bin->binary);
- }
+ erts_bin_release(&bin->binary);
}
erts_destroy_monitor(rmon);
}
@@ -2379,7 +2374,7 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
ASSERT(type->owner && type->next && type->prev); /* not allowed in load/upgrade */
resource->type = type;
- erts_refc_inc(&bin->refc, 1);
+ erts_refc_inc(&bin->intern.refc, 1);
#ifdef DEBUG
erts_refc_init(&resource->nif_refc, 1);
#endif
@@ -2408,9 +2403,7 @@ void enif_release_resource(void* obj)
#ifdef DEBUG
erts_refc_dec(&resource->nif_refc, 0);
#endif
- if (erts_refc_dectest(&bin->binary.refc, 0) == 0) {
- erts_bin_free(&bin->binary);
- }
+ erts_bin_release(&bin->binary);
}
void enif_keep_resource(void* obj)
@@ -2423,7 +2416,7 @@ void enif_keep_resource(void* obj)
#ifdef DEBUG
erts_refc_inc(&resource->nif_refc, 1);
#endif
- erts_refc_inc(&bin->binary.refc, 2);
+ erts_refc_inc(&bin->binary.intern.refc, 2);
}
Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource)
@@ -2460,7 +2453,7 @@ ERL_NIF_TERM enif_make_resource_binary(ErlNifEnv* env, void* obj,
pb->flags = 0;
OH_OVERHEAD(ohp, size / sizeof(Eterm));
- erts_refc_inc(&bin->binary.refc, 1);
+ erts_refc_inc(&bin->binary.intern.refc, 1);
return make_binary(hp);
}
@@ -2485,7 +2478,7 @@ int enif_get_resource(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifResourceType* typ
}
*/
mbin = ((ProcBin *) hp)->val;
- if (!(mbin->flags & BIN_FLAG_MAGIC))
+ if (!(mbin->intern.flags & BIN_FLAG_MAGIC))
return 0;
}
resource = (ErtsResource*) ERTS_MAGIC_BIN_UNALIGNED_DATA(mbin);
@@ -3658,11 +3651,11 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ci = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
code_ptr = erts_codeinfo_to_code(ci);
- if (ci->native == 0) {
+ if (ci->u.gen_bp == NULL) {
code_ptr[0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- GenericBp* g = (GenericBp *) ci->native;
+ GenericBp* g = ci->u.gen_bp;
ASSERT(code_ptr[0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint));
g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 894e0ee582..9947e33f47 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -194,7 +194,10 @@ static UWord thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_O
ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
int erts_sched_thread_suggested_stack_size = -1;
-
+#ifdef ERTS_DIRTY_SCHEDULERS
+int erts_dcpu_sched_thread_suggested_stack_size = -1;
+int erts_dio_sched_thread_suggested_stack_size = -1;
+#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
@@ -562,7 +565,6 @@ static int stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg);
static void aux_work_timeout(void *unused);
static void aux_work_timeout_early_init(int no_schedulers);
-static void aux_work_timeout_late_init(void);
static void setup_aux_work_timer(ErtsSchedulerData *esdp);
static int execute_sys_tasks(Process *c_p,
@@ -2792,6 +2794,9 @@ typedef struct {
int initialized;
erts_atomic32_t refc;
+#ifdef DEBUG
+ erts_atomic32_t used;
+#endif
erts_atomic32_t type[1];
} ErtsAuxWorkTmo;
@@ -2801,6 +2806,13 @@ static ERTS_INLINE void
start_aux_work_timer(ErtsSchedulerData *esdp)
{
ErtsMonotonicTime tmo = erts_get_monotonic_time(esdp);
+#ifdef DEBUG
+ Uint no = (Uint) erts_atomic32_xchg_mb(&aux_work_tmo->used,
+ (erts_aint32_t) esdp->no);
+ ASSERT(esdp->type == ERTS_SCHED_NORMAL);
+ ASSERT(!no);
+#endif
+
tmo = ERTS_MONOTONIC_TO_CLKTCKS(tmo-1);
tmo += ERTS_MSEC_TO_CLKTCKS(1000) + 1;
erts_twheel_init_timer(&aux_work_tmo->timer.data);
@@ -2808,7 +2820,6 @@ start_aux_work_timer(ErtsSchedulerData *esdp)
erts_twheel_set_timer(esdp->timer_wheel,
&aux_work_tmo->timer.data,
aux_work_timeout,
- NULL,
(void *) esdp,
tmo);
}
@@ -2837,16 +2848,19 @@ aux_work_timeout_early_init(int no_schedulers)
aux_work_tmo = (ErtsAuxWorkTmo *) p;
aux_work_tmo->initialized = 0;
erts_atomic32_init_nob(&aux_work_tmo->refc, 0);
+#ifdef DEBUG
+ erts_atomic32_init_nob(&aux_work_tmo->used, 0);
+#endif
for (i = 0; i <= no_schedulers; i++)
erts_atomic32_init_nob(&aux_work_tmo->type[i], 0);
}
void
-aux_work_timeout_late_init(void)
+erts_aux_work_timeout_late_init(ErtsSchedulerData *esdp)
{
aux_work_tmo->initialized = 1;
- if (erts_atomic32_read_nob(&aux_work_tmo->refc))
- start_aux_work_timer(erts_get_scheduler_data());
+ if (erts_atomic32_read_acqb(&aux_work_tmo->refc))
+ start_aux_work_timer(esdp);
}
static void
@@ -2854,6 +2868,13 @@ aux_work_timeout(void *vesdp)
{
erts_aint32_t refc;
int i;
+#ifdef DEBUG
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Uint no = (Uint) erts_atomic32_xchg_mb(&aux_work_tmo->used, 0);
+ ASSERT(no == esdp->no);
+ ASSERT(esdp == (ErtsSchedulerData *) vesdp);
+#endif
+
#ifdef ERTS_SMP
i = 0;
#else
@@ -6073,6 +6094,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
runq->scheduler = esdp;
esdp->run_queue = runq;
esdp->no = (Uint) num;
+ esdp->type = ERTS_SCHED_NORMAL;
#endif
esdp->ssi = ssi;
@@ -6467,8 +6489,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
/* init port tasks */
erts_port_task_init();
- aux_work_timeout_late_init();
-
#ifndef ERTS_SMP
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
erts_scheduler_data->verify_unused_temp_alloc
@@ -8754,6 +8774,9 @@ sched_thread_func(void *vesdp)
erts_sched_init_time_sup(esdp);
+ if (no == 1)
+ erts_aux_work_timeout_late_init(esdp);
+
(void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue,
ERTS_RUNQ_FLG_EXEC);
@@ -8979,6 +9002,7 @@ erts_start_schedulers(void)
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1);
+ opts.suggested_stack_size = erts_dcpu_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d\n", ix);
@@ -8986,6 +9010,7 @@ erts_start_schedulers(void)
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
erts_snprintf(opts.name, 16, "%d_dirty_io_scheduler", ix + 1);
+ opts.suggested_stack_size = erts_dio_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d\n", ix);
@@ -12429,9 +12454,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->msg_inq.len = 0;
#endif
p->bif_timers = NULL;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- p->accessor_bif_timers = NULL;
-#endif
p->mbuf = NULL;
p->msg_frag = NULL;
p->mbuf_sz = 0;
@@ -12630,9 +12652,6 @@ void erts_init_empty_process(Process *p)
p->msg.save = &p->msg.first;
p->msg.len = 0;
p->bif_timers = NULL;
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- p->accessor_bif_timers = NULL;
-#endif
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
@@ -12733,9 +12752,6 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->msg.first == NULL);
ASSERT(p->msg.len == 0);
ASSERT(p->bif_timers == NULL);
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- ASSERT(p->accessor_bif_timers == NULL);
-#endif
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -13801,7 +13817,7 @@ erts_continue_exit_process(Process *p)
ASSERT(erts_proc_read_refc(p) > 0);
if (p->bif_timers) {
- if (erts_cancel_bif_timers(p, p->bif_timers, &p->u.terminate)) {
+ if (erts_cancel_bif_timers(p, &p->bif_timers, &p->u.terminate)) {
ASSERT(erts_proc_read_refc(p) > 0);
goto yield;
}
@@ -13809,19 +13825,6 @@ erts_continue_exit_process(Process *p)
p->bif_timers = NULL;
}
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- if (p->accessor_bif_timers) {
- if (erts_detach_accessor_bif_timers(p,
- p->accessor_bif_timers,
- &p->u.terminate)) {
- ASSERT(erts_proc_read_refc(p) > 0);
- goto yield;
- }
- ASSERT(erts_proc_read_refc(p) > 0);
- p->accessor_bif_timers = NULL;
- }
-#endif
-
#ifdef ERTS_SMP
if (p->flags & F_SCHDLR_ONLN_WAITQ)
abort_sched_onln_chng_waitq(p);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 883d9f2a4c..5b35dc3c78 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -117,7 +117,11 @@ extern Uint erts_no_dirty_io_schedulers;
#endif
extern Uint erts_no_run_queues;
extern int erts_sched_thread_suggested_stack_size;
-#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 4 /* Kilo words */
+#ifdef ERTS_DIRTY_SCHEDULERS
+extern int erts_dcpu_sched_thread_suggested_stack_size;
+extern int erts_dio_sched_thread_suggested_stack_size;
+#endif
+#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 20 /* Kilo words */
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
#ifdef ERTS_SMP
@@ -1026,9 +1030,6 @@ struct process {
ErlMessageQueue msg; /* Message queue */
ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */
-#ifdef ERTS_BTM_ACCESSOR_SUPPORT
- ErtsBifTimers *accessor_bif_timers; /* Accessor bif timers */
-#endif
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -1830,6 +1831,7 @@ void erts_schedule_multi_misc_aux_work(int ignore_self,
void (*func)(void *),
void *arg);
erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
+void erts_aux_work_timeout_late_init(ErtsSchedulerData *esdp);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int, int, int);
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 77c7c5e73c..fbf14df92b 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -447,8 +447,8 @@ heap_dump(fmtfn_t to, void *to_arg, Eterm x)
ProcBin* pb = (ProcBin *) binary_val(x);
Binary* val = pb->val;
- if (erts_atomic_xchg_nob(&val->refc, 0) != 0) {
- val->flags = (UWord) all_binaries;
+ if (erts_atomic_xchg_nob(&val->intern.refc, 0) != 0) {
+ val->intern.flags = (UWord) all_binaries;
all_binaries = val;
}
erts_print(to, to_arg,
@@ -529,7 +529,7 @@ dump_binaries(fmtfn_t to, void *to_arg, Binary* current)
erts_print(to, to_arg, "%02X", bytes[i]);
}
erts_putc(to, to_arg, '\n');
- current = (Binary *) current->flags;
+ current = (Binary *) current->intern.flags;
}
}
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index a1c4220633..46d6da6448 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -21,19 +21,52 @@
#ifndef ERL_TIME_H__
#define ERL_TIME_H__
-/* timer wheel size NEED to be a power of 2 */
-#ifdef SMALL_MEMORY
-#define ERTS_TIW_SIZE (1 << 13)
-#else
-#define ERTS_TIW_SIZE (1 << 16)
+#if 0
+# define ERTS_TW_DEBUG
+#endif
+#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
+# define ERTS_TW_DEBUG
#endif
-#if defined(DEBUG) || 0
+#if defined(ERTS_TW_DEBUG)
#define ERTS_TIME_ASSERT(B) ERTS_ASSERT(B)
#else
#define ERTS_TIME_ASSERT(B) ((void) 1)
#endif
+#ifdef ERTS_TW_DEBUG
+/*
+ * Soon wheel will handle about 1 seconds
+ * Later wheel will handle about 8 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 10
+# define ERTS_TW_LATER_WHEEL_BITS 10
+#else
+# ifdef SMALL_MEMORY
+/*
+ * Soon wheel will handle about 4 seconds
+ * Later wheel will handle about 2 hours and 19 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 12
+# define ERTS_TW_LATER_WHEEL_BITS 12
+# else
+/*
+ * Soon wheel will handle about 16 seconds
+ * Later wheel will handle about 37 hours and 16 minutes
+ */
+# define ERTS_TW_SOON_WHEEL_BITS 14
+# define ERTS_TW_LATER_WHEEL_BITS 14
+# endif
+#endif
+
+/*
+ * Number of slots in each timer wheel...
+ *
+ * These *need* to be a power of 2
+ */
+#define ERTS_TW_SOON_WHEEL_SIZE (1 << ERTS_TW_SOON_WHEEL_BITS)
+#define ERTS_TW_LATER_WHEEL_SIZE (1 << ERTS_TW_LATER_WHEEL_BITS)
+
typedef enum {
ERTS_NO_TIME_WARP_MODE,
ERTS_SINGLE_TIME_WARP_MODE,
@@ -103,7 +136,10 @@ Eterm erts_system_time_source(struct process*c_p);
#define ERTS_CLKTCK_RESOLUTION (erts_time_sup__.r.o.clktck_resolution)
#endif
-#define ERTS_TIMER_WHEEL_MSEC (ERTS_TIW_SIZE/(ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_TW_SOON_WHEEL_MSEC (ERTS_TW_SOON_WHEEL_SIZE/(ERTS_CLKTCK_RESOLUTION/1000))
+#define ERTS_TW_LATER_WHEEL_MSEC (ERTS_TW_LATER_WHEEL_SIZE*ERTS_TW_SOON_WHEEL_MSEC/2)
+
+#define ERTS_TIMER_WHEEL_MSEC ERTS_TW_LATER_WHEEL_MSEC
struct erts_time_sup_read_only__ {
ErtsMonotonicTime monotonic_time_unit;
@@ -412,34 +448,25 @@ erts_time_unit_conversion(Uint64 value,
void erts_sched_init_time_sup(ErtsSchedulerData *esdp);
-#define ERTS_TWHEEL_SLOT_AT_ONCE -1
-#define ERTS_TWHEEL_SLOT_INACTIVE -2
+#define ERTS_TW_SLOT_INACTIVE (-2)
/*
** Timer entry:
*/
typedef struct erl_timer {
- struct erl_timer* next; /* next entry tiw slot or chain */
- struct erl_timer* prev; /* prev entry tiw slot or chain */
- union {
- struct {
- void (*timeout)(void*); /* called when timeout */
- void (*cancel)(void*); /* called when cancel (may be NULL) */
- void* arg; /* argument to timeout/cancel procs */
- } func;
- ErtsThrPrgrLaterOp cleanup;
- } u;
ErtsMonotonicTime timeout_pos; /* Timeout in absolute clock ticks */
+ struct erl_timer* next; /* next entry tiw slot or chain */
+ struct erl_timer* prev; /* prev entry tiw slot or chain */
+ void (*timeout)(void*); /* called when timeout */
+ void* arg; /* argument to timeout/cancel procs */
int slot;
} ErtsTWheelTimer;
typedef void (*ErlTimeoutProc)(void*);
-typedef void (*ErlCancelProc)(void*);
void erts_twheel_set_timer(ErtsTimerWheel *tiw,
ErtsTWheelTimer *p, ErlTimeoutProc timeout,
- ErlCancelProc cancel, void *arg,
- ErtsMonotonicTime timeout_pos);
+ void *arg, ErtsMonotonicTime timeout_pos);
void erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p);
ErtsTimerWheel *erts_create_timer_wheel(ErtsSchedulerData *esdp);
@@ -447,12 +474,13 @@ ErtsMonotonicTime erts_check_next_timeout_time(ErtsSchedulerData *);
ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p);
ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef);
+ERTS_GLB_INLINE ErtsMonotonicTime erts_tweel_read_timeout(ErtsTWheelTimer *twt);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p)
{
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ p->slot = ERTS_TW_SLOT_INACTIVE;
}
ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_tmo_ref)
@@ -460,6 +488,12 @@ ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_
return *((ErtsMonotonicTime *) nxt_tmo_ref);
}
+ERTS_GLB_INLINE ErtsMonotonicTime
+erts_tweel_read_timeout(ErtsTWheelTimer *twt)
+{
+ return twt->timeout_pos;
+}
+
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
void
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index cf9d3adc86..c69fec3c80 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -678,7 +678,6 @@ check_time_correction(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_time_correction,
- NULL,
(void *) esdp,
timeout_pos);
}
@@ -729,7 +728,6 @@ check_time_offset(void *vesdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_time_offset,
- NULL,
vesdp,
timeout_pos);
}
@@ -836,7 +834,6 @@ late_init_time_correction(ErtsSchedulerData *esdp)
erts_twheel_set_timer(esdp->timer_wheel,
&time_sup.inf.c.parmon.timer,
check_func,
- NULL,
(quick_init_drift_adj
? NULL
: esdp),
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 04f3160d42..870f1f142d 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1875,7 +1875,6 @@ trace_port_tmp_binary(char *bin, Sint sz, Binary **bptrp, Eterm **hp)
} else {
ProcBin* pb = (ProcBin *)*hp;
Binary *bptr = erts_bin_nrml_alloc(sz);
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, bin, sz);
pb->thing_word = HEADER_PROC_BIN;
pb->size = sz;
@@ -2000,8 +1999,8 @@ trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...)
TRACE_FUN_T_RECEIVE,
am_receive, data, THE_NON_VALUE, am_true);
- if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
- erts_bin_free(bptr);
+ if (bptr)
+ erts_bin_release(bptr);
if (orig_hp)
erts_free(ERTS_ALC_T_TMP, orig_hp);
@@ -2051,8 +2050,8 @@ void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
am_send, msg, to, am_true);
- if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
- erts_bin_free(bptr);
+ if (bptr)
+ erts_bin_release(bptr);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 33ed6d7ec1..ecfef28c57 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -132,7 +132,7 @@ export_alloc(struct export_entry* tmpl_e)
erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
obj = &blob->exp;
obj->info.op = 0;
- obj->info.native = 0;
+ obj->info.u.gen_bp = NULL;
obj->info.mfa.module = tmpl->info.mfa.module;
obj->info.mfa.function = tmpl->info.mfa.function;
obj->info.mfa.arity = tmpl->info.mfa.arity;
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 285ae4ac78..f5a5da981c 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1821,7 +1821,7 @@ static int ttb_context_destructor(Binary *context_bin)
case TTBEncode:
DESTROY_SAVED_WSTACK(&context->s.ec.wstack);
if (context->s.ec.result_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.ec.result_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.ec.result_bin->intern.refc),1));
erts_bin_free(context->s.ec.result_bin);
context->s.ec.result_bin = NULL;
}
@@ -1830,13 +1830,13 @@ static int ttb_context_destructor(Binary *context_bin)
erl_zlib_deflate_finish(&(context->s.cc.stream));
if (context->s.cc.destination_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.cc.destination_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.cc.destination_bin->intern.refc),1));
erts_bin_free(context->s.cc.destination_bin);
context->s.cc.destination_bin = NULL;
}
if (context->s.cc.result_bin != NULL) { /* Set to NULL if ever made alive! */
- ASSERT(erts_refc_read(&(context->s.cc.result_bin->refc),0) == 0);
+ ASSERT(erts_refc_read(&(context->s.cc.result_bin->intern.refc),1));
erts_bin_free(context->s.cc.result_bin);
context->s.cc.result_bin = NULL;
}
@@ -1920,7 +1920,6 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
result_bin = erts_bin_nrml_alloc(size);
- erts_refc_init(&result_bin->refc, 0);
result_bin->orig_bytes[0] = VERSION_MAGIC;
/* Next state immediately, no need to export context */
context->state = TTBEncode;
@@ -1960,8 +1959,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -1980,7 +1978,6 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
context->s.cc.result_bin = result_bin;
result_bin = erts_bin_nrml_alloc(real_size);
- erts_refc_init(&result_bin->refc, 0);
result_bin->orig_bytes[0] = VERSION_MAGIC;
context->s.cc.destination_bin = result_bin;
@@ -2028,15 +2025,15 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->next = MSO(p).first;
MSO(p).first = (struct erl_off_heap_header*)pb;
pb->val = result_bin;
+ ASSERT(erts_refc_read(&result_bin->intern.refc, 1));
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
erts_bin_free(context->s.cc.result_bin);
context->s.cc.result_bin = NULL;
context->alive = 0;
BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -2055,13 +2052,13 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
pb->bytes = (byte*) result_bin->orig_bytes;
pb->flags = 0;
OH_OVERHEAD(&(MSO(p)), pb->size / sizeof(Eterm));
- erts_refc_inc(&result_bin->refc, 1);
+ ASSERT(erts_refc_read(&result_bin->intern.refc, 1));
erl_zlib_deflate_finish(&(context->s.cc.stream));
erts_bin_free(context->s.cc.destination_bin);
context->s.cc.destination_bin = NULL;
context->alive = 0;
BUMP_REDS(p, (this_time * CONTEXT_REDS) / TERM_TO_BINARY_COMPRESS_CHUNK);
- if (context_b && erts_refc_read(&context_b->refc,0) == 0) {
+ if (context_b && erts_refc_read(&context_b->intern.refc,0) == 0) {
erts_bin_free(context_b);
}
return make_binary(pb);
@@ -2749,7 +2746,7 @@ enc_term_int(TTBEncodeContext* ctx, ErtsAtomCacheMap *acmp, Eterm obj, byte* ep,
erts_emasculate_writable_binary(pb);
bytes += (pb->val->orig_bytes - before_realloc);
}
- erts_refc_inc(&pb->val->refc, 2);
+ erts_refc_inc(&pb->val->intern.refc, 2);
sys_memcpy(&tmp, pb, sizeof(ProcBin));
tmp.next = *off_heap;
@@ -3509,7 +3506,6 @@ dec_term_atom_common:
} else {
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
- erts_refc_init(&dbin->refc, 1);
pb = (ProcBin *) hp;
hp += PROC_BIN_SIZE;
pb->thing_word = HEADER_PROC_BIN;
@@ -3562,7 +3558,6 @@ dec_term_atom_common:
Binary* dbin = erts_bin_nrml_alloc(n);
ProcBin* pb;
- erts_refc_init(&dbin->refc, 1);
pb = (ProcBin *) hp;
pb->thing_word = HEADER_PROC_BIN;
pb->size = n;
@@ -3865,7 +3860,7 @@ dec_term_atom_common:
sys_memcpy(pb, ep, sizeof(ProcBin));
ep += sizeof(ProcBin);
- erts_refc_inc(&pb->val->refc, 1);
+ erts_refc_inc(&pb->val->intern.refc, 1);
hp += PROC_BIN_SIZE;
pb->next = factory->off_heap->first;
factory->off_heap->first = (struct erl_off_heap_header*)pb;
@@ -3883,7 +3878,7 @@ dec_term_atom_common:
sys_memcpy(pb, ep, sizeof(ProcBin));
ep += sizeof(ProcBin);
- erts_refc_inc(&pb->val->refc, 1);
+ erts_refc_inc(&pb->val->intern.refc, 1);
hp += PROC_BIN_SIZE;
pb->next = factory->off_heap->first;
factory->off_heap->first = (struct erl_off_heap_header*)pb;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 139394680a..bb4d442240 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1425,14 +1425,14 @@ Eterm erts_msacc_request(Process *c_p, int action, Eterm *threads);
#define MatchSetRef(MPSP) \
do { \
if ((MPSP) != NULL) { \
- erts_refc_inc(&(MPSP)->refc, 1); \
+ erts_refc_inc(&(MPSP)->intern.refc, 1); \
} \
} while (0)
#define MatchSetUnref(MPSP) \
do { \
- if (((MPSP) != NULL) && erts_refc_dectest(&(MPSP)->refc, 0) <= 0) { \
- erts_bin_free(MPSP); \
+ if (((MPSP) != NULL)) { \
+ erts_bin_release(MPSP); \
} \
} while(0)
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index ddff862607..2f3117223f 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -3791,7 +3791,6 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
Binary* bptr;
bptr = erts_bin_nrml_alloc(len);
- erts_refc_init(&bptr->refc, 1);
sys_memcpy(bptr->orig_bytes, buf, len);
pb = (ProcBin *) hp;
@@ -4558,8 +4557,7 @@ static void
cleanup_scheduled_control(Binary *binp, char *bufp)
{
if (binp) {
- if (erts_refc_dectest(&binp->refc, 0) == 0)
- erts_bin_free(binp);
+ erts_bin_release(binp);
}
else {
if (bufp)
@@ -4903,7 +4901,7 @@ erts_port_control(Process* c_p,
ASSERT(bufp <= bufp + size);
ASSERT(binp->orig_bytes <= bufp
&& bufp + size <= binp->orig_bytes + binp->orig_size);
- erts_refc_inc(&binp->refc, 1);
+ erts_refc_inc(&binp->intern.refc, 1);
}
}
@@ -6400,7 +6398,6 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
ProcBin* pbp;
Binary* bp = erts_bin_nrml_alloc(size);
ASSERT(bufp);
- erts_refc_init(&bp->refc, 1);
sys_memcpy((void *) bp->orig_bytes, (void *) bufp, size);
pbp = (ProcBin *) erts_produce_heap(&factory,
PROC_BIN_SIZE, HEAP_EXTRA);
@@ -6910,21 +6907,21 @@ ErlDrvSInt
driver_binary_get_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_read(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_read(&bp->intern.refc, 1);
}
ErlDrvSInt
driver_binary_inc_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_inctest(&bp->refc, 2);
+ return (ErlDrvSInt) erts_refc_inctest(&bp->intern.refc, 2);
}
ErlDrvSInt
driver_binary_dec_refc(ErlDrvBinary *dbp)
{
Binary* bp = ErlDrvBinary2Binary(dbp);
- return (ErlDrvSInt) erts_refc_dectest(&bp->refc, 1);
+ return (ErlDrvSInt) erts_refc_dectest(&bp->intern.refc, 1);
}
@@ -6940,7 +6937,6 @@ driver_alloc_binary(ErlDrvSizeT size)
bin = erts_bin_drv_alloc_fnf((Uint) size);
if (!bin)
return NULL; /* The driver write must take action */
- erts_refc_init(&bin->refc, 1);
return Binary2ErlDrvBinary(bin);
}
@@ -6970,8 +6966,7 @@ void driver_free_binary(ErlDrvBinary* dbin)
return;
bin = ErlDrvBinary2Binary(dbin);
- if (erts_refc_dectest(&bin->refc, 0) == 0)
- erts_bin_free(bin);
+ erts_bin_release(bin);
}
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 6f15082130..a7e5a64b22 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -17,57 +17,157 @@
*
* %CopyrightEnd%
*/
-
+
/*
- * TIMING WHEEL
+ * TIMER WHEEL
+ *
+ *
+ * The time scale used for timers is Erlang monotonic time. The
+ * time unit used is ERTS specific clock ticks. A clock tick is
+ * currently defined to 1 millisecond. That is, the resolution of
+ * timers triggered by the runtime system is 1 millisecond.
*
- * Timeouts kept in an wheel. A timeout is measured relative to the
- * current slot (tiw_pos) in the wheel, and inserted at slot
- * (tiw_pos + timeout) % TIW_SIZE. Each timeout also has a count
- * equal to timeout/TIW_SIZE, which is needed since the time axis
- * is wrapped arount the wheel.
+ * When a timer is set, it is determined at what Erlang monotonic
+ * time, in clock ticks, it should be triggered.
*
- * Several slots may be processed in one operation. If the number of
- * slots is greater that the wheel size, the wheel is only traversed
- * once,
+ * The 'pos' field of the wheel corresponds to current time of
+ * the wheel. That is, it corresponds to Erlang monotonic time in
+ * clock tick time unit. The 'pos' field of the wheel is
+ * monotonically increased when erts_bump_timers() is called. All
+ * timers in the wheel that have a time less than or equal to
+ * 'pos' are triggered by the bump operation. The bump operation
+ * may however be spread over multiple calls to erts_bump_timers()
+ * if there are a lots of timers to trigger.
*
- * The following example shows a time axis where there is one timeout
- * at each "tick", and where 1, 2, 3 ... wheel slots are released in
- * one operation. The notation "<x" means "release all items with
- * counts less than x".
+ * Each scheduler thread maintains its own timer wheel. The timer
+ * wheel of a scheduler, however, actually consists of two wheels.
+ * A soon wheel and a later wheel.
+ *
+ *
+ * -- The Soon Wheel --
+ *
+ * The soon wheel contain timers that should be triggered soon.
+ * That is, they are soon to be triggered. Each slot in the soon
+ * wheel is 1 clock tick wide. The number of slots in the soon
+ * wheel is currently 2¹⁴. That is, it contains timers in the
+ * range ('pos', 'pos' + 2¹⁴] which corresponds to a bit more
+ * than 16 seconds.
+ *
+ * When the bump operation is started, 'pos' is moved forward to a
+ * position that corresponds to current Erlang monotonic time. Then
+ * all timers that are in the range (old 'pos', new 'pos'] are
+ * triggered. During a bump operation, the soon wheel may contain
+ * timers in the two, possibly overlapping, ranges (old 'pos',
+ * old 'pos' + 2¹⁴], and (new 'pos', new 'pos' + 2¹⁴]. This may
+ * occur even if the bump operation doesn't yield, due to timeout
+ * callbacks inserting new timers.
+ *
+ *
+ * -- The Later Wheel --
+ *
+ * The later wheel contain timers that are further away from 'pos'
+ * than the width of the soon timer wheel. That is, currently
+ * timers further away from 'pos' than 2¹⁴ clock ticks. The width
+ * of each slot in the later wheel is half the width of the soon
+ * wheel. That is, each slot is currently 2¹³ clock ticks wide
+ * which corresponds to about 8 seconds. If three timers of the
+ * times 'pos' + 17000, 'pos' + 18000, and 'pos' + 19000 are
+ * inserted, they will all end up in the same slot in the later
+ * wheel.
+ *
+ * The number of slots in the later wheel is currently the same as
+ * in the soon wheel, i.e. 2¹⁴. That is, one revolution of the later
+ * wheel currently corresponds to 2¹⁴×2¹³ clock ticks which is
+ * almost 37 ½ hour. Timers even further away than that are put in
+ * the later slot identified by their time modulo the size of the later
+ * wheel. Such timers are however very uncommon. Most timers used
+ * by the runtime system will utilize the high level timer API.
+ * The high level timer implementation will not insert timers
+ * further away then one revolution into the later wheel. It will
+ * instead keep such timers in a tree of very long timers. The
+ * high level timer implementation utilize one timer wheel timer
+ * for the management of this tree of timers. This timer is set to
+ * the closest timeout in the tree. This timer may however be
+ * further away than one revolution in the later wheel.
+ *
+ * The 'later.pos' field identifies next position in the later wheel.
+ * 'later.pos' is always increased by the width of a later wheel slot.
+ * That is, currently 2¹³ clock ticks. When 'pos' is moved (during
+ * a bump operation) closer to 'later.pos' than the width of a later
+ * wheel slot, i.e. currently when 'pos' + 2¹³ ≥ 'later.pos', we
+ * inspect the slot identified by 'later.pos' and then move 'later.pos'
+ * forward. When inspecting the later slot we move all timers in the
+ * slot, that are in the soon wheel range, from the later wheel to
+ * the soon wheel. Timers one or more revolutions of the later wheel
+ * away are kept in the slot.
+ *
+ * During normal operation, timers originally located in the later
+ * wheel will currently be moved into the soon wheel about 8 to
+ * 16 seconds before they should be triggered. During extremely
+ * heavy load, the scheduler might however be heavily delayed, so
+ * the code must be prepared for situations where time for
+ * triggering the timer has passed when we inspect the later wheel
+ * slot, and then trigger the timer immediately. We must also be
+ * prepared to inspect multiple later wheel slots at once due to the
+ * delay.
+ *
+ *
+ * -- Slot Management --
+ *
+ * All timers of a slot are placed in a circular double linked
+ * list. This makes insertion and removal of a timer O(1).
+ *
+ * While bumping timers in a slot, we move the circular list
+ * away from the slot, and refer to it from the 'sentinel'
+ * field. The list will stay there until we are done with it
+ * even if the bump operation should yield. The cancel operation
+ * can remove the timer from this position as well as from the
+ * slot position by just removing it from the circular double
+ * linked list that it is in.
+ *
+ * -- At Once Slot --
+ *
+ * If a timer is set that has a time earlier or equal to 'pos',
+ * it is not inserted into the wheel. It is instead inserted,
+ * into a circular double linked list referred to by the "at
+ * once" slot. When the bump operation is performed these timers
+ * will be triggered at once. The circular list of the slot will
+ * be moved to the 'sentinel' field while bumping these timers
+ * as when bumping an ordinary wheel slot. A yielding bump
+ * operation and cancelation of timers is handled the same way
+ * as if the timer was in a wheel slot.
+ *
+ * -- Searching for Next Timeout --
+ *
+ * In order to limit the amount of work needed in order to find
+ * next timeout, we keep track of total amount of timers in the
+ * wheels, total amount of timers in the later wheel, total amount
+ * of timers in soon wheel, and the total amount of timers in
+ * each range of slots. Each slot range currently contain 512
+ * slots.
+ *
+ * When next timeout is less than the soon wheel width away we
+ * determine the exact timeout. Due to the timer counts of
+ * slot ranges, we currently at most need to search 1024 slots
+ * in the soon wheel. This besides inspecting slot range counts
+ * and two slots in the later wheel which potentially might trigger
+ * timeouts for moving timers from the later wheel to the soon wheel
+ * earlier than timeouts in the soon wheel. We also keep track
+ * of latest known minimum timeout position in each wheel which
+ * makes it possible to avoid scanning from current position
+ * each time.
+ *
+ * When next timeout is further away than the soon wheel width
+ * we settle for the earliest possible timeout in the first
+ * non-empty slot range. The further away the next timeout is, the
+ * more likely it is that the next timeout change before we
+ * actually get there. That is, a change due to another timer is
+ * set to an earlier time and/or the timer is cancelled. It is
+ * therefore in this case no point determining next timeout
+ * exactly. If the state should not change, we will wake up a bit
+ * early and do a recalculation of next timeout and eventually
+ * we will be so close to it that we determine it exactly.
*
- * Size of wheel: 4
- *
- * --|----|----|----|----|----|----|----|----|----|----|----|----|----
- * 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0 2.1 2.2 2.3 3.0
- *
- * 1 [ )
- * <1 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0 2.1 2.2 2.3 2.0
- *
- * 2 [ )
- * <1 <1 0.2 0.3 0.0 0.1 1.2 1.3 1.0 1.1 2.2 2.3 2.0
- *
- * 3 [ )
- * <1 <1 <1 0.3 0.0 0.1 0.2 1.3 1.0 1.1 1.2 2.3 2.0
- *
- * 4 [ )
- * <1 <1 <1 <1 0.0 0.1 0.2 0.3 1.0 1.1 1.2 1.3 2.0
- *
- * 5 [ )
- * <2 <1 <1 <1. 0.1 0.2 0.3 0.0 1.1 1.2 1.3 1.0
- *
- * 6 [ )
- * <2 <2 <1 <1. 0.2 0.3 0.0 0.1 1.2 1.3 1.0
- *
- * 7 [ )
- * <2 <2 <2 <1. 0.3 0.0 0.1 0.2 1.3 1.0
- *
- * 8 [ )
- * <2 <2 <2 <2. 0.0 0.1 0.2 0.3 1.0
- *
- * 9 [ )
- * <3 <2 <2 <2. 0.1 0.2 0.3 0.0
- *
*/
#ifdef HAVE_CONFIG_H
@@ -80,8 +180,11 @@
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
-#define ERTS_MONOTONIC_DAY ERTS_SEC_TO_MONOTONIC(60*60*24)
-#define ERTS_CLKTCKS_DAY ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY)
+#define ERTS_MAX_CLKTCKS \
+ ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_TIME_MAX)
+
+#define ERTS_CLKTCKS_WEEK \
+ ERTS_MONOTONIC_TO_CLKTCKS(ERTS_SEC_TO_MONOTONIC(7*60*60*24))
#ifdef ERTS_ENABLE_LOCK_CHECK
#define ASSERT_NO_LOCKED_LOCKS erts_lc_check_exact(NULL, 0)
@@ -90,6 +193,10 @@
#endif
#if 0
+# define ERTS_TW_HARD_DEBUG
+#endif
+
+#if defined(ERTS_TW_HARD_DEBUG) && !defined(ERTS_TW_DEBUG)
# define ERTS_TW_DEBUG
#endif
#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
@@ -97,16 +204,62 @@
#endif
#undef ERTS_TW_ASSERT
-#if defined(ERTS_TW_DEBUG)
+#if defined(ERTS_TW_DEBUG)
# define ERTS_TW_ASSERT(E) ERTS_ASSERT(E)
#else
# define ERTS_TW_ASSERT(E) ((void) 1)
#endif
#ifdef ERTS_TW_DEBUG
-# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 5
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 500
#else
-# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 100
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 10000
+#endif
+#define ERTS_TW_COST_SLOT 1
+#define ERTS_TW_COST_SLOT_MOVE 5
+#define ERTS_TW_COST_TIMEOUT 100
+
+/*
+ * Every slot in the soon wheel is a clock tick (as defined
+ * by ERTS) wide. A clock tick is currently 1 milli second.
+ */
+
+#define ERTS_TW_SOON_WHEEL_FIRST_SLOT 0
+#define ERTS_TW_SOON_WHEEL_END_SLOT \
+ (ERTS_TW_SOON_WHEEL_FIRST_SLOT + ERTS_TW_SOON_WHEEL_SIZE)
+
+#define ERTS_TW_SOON_WHEEL_MASK (ERTS_TW_SOON_WHEEL_SIZE-1)
+
+/*
+ * Every slot in the later wheel is as wide as half the size
+ * of the soon wheel.
+ */
+
+#define ERTS_TW_LATER_WHEEL_SHIFT (ERTS_TW_SOON_WHEEL_BITS - 1)
+#define ERTS_TW_LATER_WHEEL_SLOT_SIZE \
+ ((ErtsMonotonicTime) (1 << ERTS_TW_LATER_WHEEL_SHIFT))
+#define ERTS_TW_LATER_WHEEL_POS_MASK \
+ (~((ErtsMonotonicTime) (1 << ERTS_TW_LATER_WHEEL_SHIFT)-1))
+
+#define ERTS_TW_LATER_WHEEL_FIRST_SLOT ERTS_TW_SOON_WHEEL_SIZE
+#define ERTS_TW_LATER_WHEEL_END_SLOT \
+ (ERTS_TW_LATER_WHEEL_FIRST_SLOT + ERTS_TW_LATER_WHEEL_SIZE)
+
+#define ERTS_TW_LATER_WHEEL_MASK (ERTS_TW_LATER_WHEEL_SIZE-1)
+
+#define ERTS_TW_SCNT_BITS 9
+#define ERTS_TW_SCNT_SHIFT
+#define ERTS_TW_SCNT_SIZE \
+ ((ERTS_TW_SOON_WHEEL_SIZE + ERTS_TW_LATER_WHEEL_SIZE) \
+ >> ERTS_TW_SCNT_BITS)
+
+#ifdef __GNUC__
+#if ERTS_TW_SOON_WHEEL_BITS < ERTS_TW_SCNT_BITS
+# warning Consider larger soon timer wheel
+#endif
+#if ERTS_TW_SOON_WHEEL_BITS < ERTS_TW_SCNT_BITS
+# warning Consider larger later timer wheel
+#endif
#endif
/* Actual interval time chosen by sys_init_time() */
@@ -119,95 +272,360 @@ static int tiw_itime; /* Constant after init */
# define TIW_ITIME tiw_itime
#endif
+const int etp_tw_soon_wheel_size = ERTS_TW_SOON_WHEEL_SIZE;
+const ErtsMonotonicTime etp_tw_soon_wheel_mask = ERTS_TW_SOON_WHEEL_MASK;
+const int etp_tw_soon_wheel_first_slot = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+
+const int etp_tw_later_wheel_size = ERTS_TW_LATER_WHEEL_SIZE;
+const ErtsMonotonicTime etp_tw_later_wheel_slot_size = ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+const int etp_tw_later_wheel_shift = ERTS_TW_LATER_WHEEL_SHIFT;
+const ErtsMonotonicTime etp_tw_later_wheel_mask = ERTS_TW_LATER_WHEEL_MASK;
+const ErtsMonotonicTime etp_tw_later_wheel_pos_mask = ERTS_TW_LATER_WHEEL_POS_MASK;
+const int etp_tw_later_wheel_first_slot = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+
struct ErtsTimerWheel_ {
- ErtsTWheelTimer *w[ERTS_TIW_SIZE];
+ ErtsTWheelTimer *slots[1 /* At Once Slot */
+ + ERTS_TW_SOON_WHEEL_SIZE /* Soon Wheel Slots */
+ + ERTS_TW_LATER_WHEEL_SIZE]; /* Later Wheel Slots */
+ ErtsTWheelTimer **w;
+ Sint scnt[ERTS_TW_SCNT_SIZE];
+ Sint bump_scnt[ERTS_TW_SCNT_SIZE];
ErtsMonotonicTime pos;
Uint nto;
struct {
- ErtsTWheelTimer *head;
- ErtsTWheelTimer *tail;
Uint nto;
} at_once;
+ struct {
+ ErtsMonotonicTime min_tpos;
+ Uint nto;
+ } soon;
+ struct {
+ ErtsMonotonicTime min_tpos;
+ int min_tpos_slot;
+ ErtsMonotonicTime pos;
+ Uint nto;
+ } later;
int yield_slot;
int yield_slots_left;
- int yield_start_pos;
ErtsTWheelTimer sentinel;
int true_next_timeout_time;
+ ErtsMonotonicTime next_timeout_pos;
ErtsMonotonicTime next_timeout_time;
};
-static ERTS_INLINE ErtsMonotonicTime
-find_next_timeout(ErtsSchedulerData *esdp,
- ErtsTimerWheel *tiw,
- int search_all,
- ErtsMonotonicTime curr_time, /* When !search_all */
- ErtsMonotonicTime max_search_time) /* When !search_all */
+#define ERTS_TW_SLOT_AT_ONCE (-1)
+
+#define ERTS_TW_BUMP_LATER_WHEEL(TIW) \
+ ((tiw)->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE >= (TIW)->later.pos)
+
+static int bump_later_wheel(ErtsTimerWheel *tiw, int *yield_count_p);
+
+#ifdef ERTS_TW_DEBUG
+#define ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(TIW, TO_POS) \
+ dbg_verify_empty_soon_slots((TIW), (TO_POS))
+#define ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(TIW, TO_POS) \
+ dbg_verify_empty_later_slots((TIW), (TO_POS))
+void dbg_verify_empty_soon_slots(ErtsTimerWheel *, ErtsMonotonicTime);
+void dbg_verify_empty_later_slots(ErtsTimerWheel *, ErtsMonotonicTime);
+#else
+#define ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(TIW, TO_POS)
+#define ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(TIW, TO_POS)
+#endif
+
+static ERTS_INLINE int
+scnt_get_ix(int slot)
{
- int start_ix, tiw_pos_ix;
- ErtsTWheelTimer *p;
+ return slot >> ERTS_TW_SCNT_BITS;
+}
+
+static ERTS_INLINE void
+scnt_inc(Sint *scnt, int slot)
+{
+ scnt[slot >> ERTS_TW_SCNT_BITS]++;
+}
+
+#ifdef ERTS_TW_HARD_DEBUG
+
+static ERTS_INLINE void
+scnt_ix_inc(Sint *scnt, int six)
+{
+ scnt[six]++;
+}
+
+#endif
+
+static ERTS_INLINE void
+scnt_dec(Sint *scnt, int slot)
+{
+ scnt[slot >> ERTS_TW_SCNT_BITS]--;
+ ERTS_TW_ASSERT(scnt[slot >> ERTS_TW_SCNT_BITS] >= 0);
+}
+
+static ERTS_INLINE void
+scnt_ix_dec(Sint *scnt, int six)
+{
+ scnt[six]--;
+ ERTS_TW_ASSERT(scnt[six] >= 0);
+}
+
+static ERTS_INLINE void
+scnt_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt, int first_slot,
+ int end_slot, ErtsMonotonicTime slot_sz)
+{
+ int slot = *slotp;
+ int left = *leftp;
+ int ix;
+
+ ERTS_TW_ASSERT(*leftp >= 0);
+
+ left--;
+ slot++;
+ if (slot == end_slot)
+ slot = first_slot;
+ ix = slot >> ERTS_TW_SCNT_BITS;
+
+ while (!scnt[ix] && left > 0) {
+ int diff, old_slot = slot;
+ ix++;
+ slot = (ix << ERTS_TW_SCNT_BITS);
+ diff = slot - old_slot;
+ if (left < diff) {
+ slot = old_slot + left;
+ diff = left;
+ }
+ if (slot < end_slot)
+ left -= diff;
+ else {
+ left -= end_slot - old_slot;
+ slot = first_slot;
+ ix = slot >> ERTS_TW_SCNT_BITS;
+ }
+ }
+
+ ERTS_TW_ASSERT(left >= -1);
+
+ if (posp)
+ *posp += slot_sz * ((ErtsMonotonicTime) (*leftp - left));
+ if (sixp)
+ *sixp = slot >> ERTS_TW_SCNT_BITS;
+ *leftp = left;
+ *slotp = slot;
+}
+
+
+static ERTS_INLINE void
+scnt_soon_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt)
+{
+ scnt_wheel_next(slotp, leftp, posp, sixp, scnt,
+ ERTS_TW_SOON_WHEEL_FIRST_SLOT,
+ ERTS_TW_SOON_WHEEL_END_SLOT, 1);
+}
+
+static ERTS_INLINE void
+scnt_later_wheel_next(int *slotp, int *leftp, ErtsMonotonicTime *posp,
+ int *sixp, Sint *scnt)
+{
+ scnt_wheel_next(slotp, leftp, posp, sixp, scnt,
+ ERTS_TW_LATER_WHEEL_FIRST_SLOT,
+ ERTS_TW_LATER_WHEEL_END_SLOT,
+ ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+}
+
+
+static ERTS_INLINE int
+soon_slot(ErtsMonotonicTime soon_pos)
+{
+ ErtsMonotonicTime slot = soon_pos;
+ slot &= ERTS_TW_SOON_WHEEL_MASK;
+
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT <= slot);
+ ERTS_TW_ASSERT(slot < ERTS_TW_SOON_WHEEL_END_SLOT);
+
+ return (int) slot;
+}
+
+static ERTS_INLINE int
+later_slot(ErtsMonotonicTime later_pos)
+{
+ ErtsMonotonicTime slot = later_pos;
+ slot >>= ERTS_TW_LATER_WHEEL_SHIFT;
+ slot &= ERTS_TW_LATER_WHEEL_MASK;
+ slot += ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+
+ ERTS_TW_ASSERT(ERTS_TW_LATER_WHEEL_FIRST_SLOT <= slot);
+ ERTS_TW_ASSERT(slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ return (int) slot;
+}
+
+#ifdef ERTS_TW_HARD_DEBUG
+#define ERTS_HARD_DBG_CHK_WHEELS(TIW, CHK_MIN_TPOS) \
+ hrd_dbg_check_wheels((TIW), (CHK_MIN_TPOS))
+static void hrd_dbg_check_wheels(ErtsTimerWheel *tiw, int check_min_tpos);
+#else
+#define ERTS_HARD_DBG_CHK_WHEELS(TIW, CHK_MIN_TPOS)
+#endif
+
+static ErtsMonotonicTime
+find_next_timeout(ErtsSchedulerData *esdp, ErtsTimerWheel *tiw)
+{
+ int slot, slots;
int true_min_timeout = 0;
- ErtsMonotonicTime min_timeout, min_timeout_pos, slot_timeout_pos;
+ ErtsMonotonicTime min_timeout_pos;
+
+ ERTS_TW_ASSERT(tiw->pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE < tiw->later.pos
+ && tiw->later.pos <= tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ ERTS_TW_ASSERT(tiw->yield_slot == ERTS_TW_SLOT_INACTIVE);
if (tiw->nto == 0) { /* no timeouts in wheel */
- if (!search_all)
- min_timeout_pos = tiw->pos;
- else {
- curr_time = erts_get_monotonic_time(esdp);
- tiw->pos = min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
- }
- min_timeout_pos += ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY);
- goto found_next;
+ ErtsMonotonicTime curr_time = erts_get_monotonic_time(esdp);
+ tiw->pos = min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->later.pos = min_timeout_pos + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ min_timeout_pos += ERTS_CLKTCKS_WEEK;
+ goto done;
}
- slot_timeout_pos = min_timeout_pos = tiw->pos;
- if (search_all)
- min_timeout_pos += ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY);
- else
- min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time + max_search_time);
+ ERTS_TW_ASSERT(tiw->soon.nto || tiw->later.nto);
- start_ix = tiw_pos_ix = (int) (tiw->pos & (ERTS_TIW_SIZE-1));
+ if (!tiw->soon.nto) {
+ ErtsMonotonicTime tpos, min_tpos;
- do {
- if (++slot_timeout_pos >= min_timeout_pos)
- break;
-
- p = tiw->w[tiw_pos_ix];
-
- if (p) {
- ErtsTWheelTimer *end = p;
-
- do {
- ErtsMonotonicTime timeout_pos;
- timeout_pos = p->timeout_pos;
- if (min_timeout_pos > timeout_pos) {
- true_min_timeout = 1;
- min_timeout_pos = timeout_pos;
- if (min_timeout_pos <= slot_timeout_pos)
- goto found_next;
- }
- p = p->next;
- } while (p != end);
- }
+ /* Search later wheel... */
+
+ min_tpos = tiw->later.min_tpos & ERTS_TW_LATER_WHEEL_POS_MASK;
+
+ if (min_tpos <= tiw->later.pos) {
+ tpos = tiw->later.pos;
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+ }
+ else {
+ ErtsMonotonicTime tmp;
+ /* Don't inspect slots we know are empty... */
+ tmp = min_tpos - tiw->later.pos;
+ tmp /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmp >= ERTS_TW_LATER_WHEEL_SIZE) {
+ /* Timeout more than one revolution ahead... */
+
+ /* Pre-timeout for move from later to soon wheel... */
+ min_timeout_pos = min_tpos - ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ goto done;
+ }
+ tpos = min_tpos;
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, min_tpos);
+ slots = ERTS_TW_LATER_WHEEL_SIZE - ((int) tmp);
+ }
+
+ slot = later_slot(tpos);
+
+ /*
+ * We never search for an exact timeout in the
+ * later wheel, but instead settle for the first
+ * scnt range used.
+ */
+ if (tiw->w[slot])
+ true_min_timeout = 1;
+ else
+ scnt_later_wheel_next(&slot, &slots, &tpos, NULL, tiw->scnt);
+
+ tiw->later.min_tpos = tpos;
+ tiw->later.min_tpos_slot = slot;
+ ERTS_TW_ASSERT(slot == later_slot(tpos));
+
+ /* Pre-timeout for move from later to soon wheel... */
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ min_timeout_pos = tpos;
+ }
+ else {
+ ErtsMonotonicTime tpos;
+ /* Search soon wheel... */
+
+ min_timeout_pos = tiw->pos + ERTS_TW_SOON_WHEEL_SIZE;
+
+ /*
+ * Besides inspecting the soon wheel we
+ * may also have to inspect two slots in the
+ * later wheel which potentially can trigger
+ * timeouts before timeouts in soon wheel...
+ */
+ if (tiw->later.min_tpos > (tiw->later.pos
+ + 2*ERTS_TW_LATER_WHEEL_SLOT_SIZE)) {
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(
+ tiw, 2*ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+ }
+ else {
+ int fslot;
+ tpos = tiw->later.pos;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ fslot = later_slot(tiw->later.pos);
+ if (tiw->w[fslot])
+ min_timeout_pos = tpos;
+ else {
+ tpos += ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tpos < min_timeout_pos) {
+ fslot++;
+ if (fslot == ERTS_TW_LATER_WHEEL_END_SLOT)
+ fslot = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ if (tiw->w[fslot])
+ min_timeout_pos = tpos;
+ }
+ }
+ }
+
+ if (tiw->soon.min_tpos <= tiw->pos) {
+ tpos = tiw->pos;
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+ }
+ else {
+ ErtsMonotonicTime tmp;
+ /* Don't inspect slots we know are empty... */
+ tmp = tiw->soon.min_tpos - tiw->pos;
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_SIZE > tmp);
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, tiw->soon.min_tpos);
+ slots = ERTS_TW_SOON_WHEEL_SIZE - ((int) tmp);
+ tpos = tiw->soon.min_tpos;
+ }
+
+ slot = soon_slot(tpos);
+
+ /* find next non-empty slot */
+ while (tpos < min_timeout_pos) {
+ if (tiw->w[slot]) {
+ ERTS_TW_ASSERT(tiw->w[slot]->timeout_pos == tpos);
+ min_timeout_pos = tpos;
+ break;
+ }
+ scnt_soon_wheel_next(&slot, &slots, &tpos, NULL, tiw->scnt);
+ }
- tiw_pos_ix++;
- if (tiw_pos_ix == ERTS_TIW_SIZE)
- tiw_pos_ix = 0;
- } while (start_ix != tiw_pos_ix);
+ tiw->soon.min_tpos = min_timeout_pos;
+ true_min_timeout = 1;
+ }
+
+done: {
+ ErtsMonotonicTime min_timeout;
-found_next:
+ min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
+ tiw->next_timeout_pos = min_timeout_pos;
+ tiw->next_timeout_time = min_timeout;
+ tiw->true_next_timeout_time = true_min_timeout;
- min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
- tiw->next_timeout_time = min_timeout;
- tiw->true_next_timeout_time = true_min_timeout;
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 1);
- return min_timeout;
+ return min_timeout;
+ }
}
static ERTS_INLINE void
insert_timer_into_slot(ErtsTimerWheel *tiw, int slot, ErtsTWheelTimer *p)
{
- ERTS_TW_ASSERT(slot >= 0);
- ERTS_TW_ASSERT(slot < ERTS_TIW_SIZE);
+ ERTS_TW_ASSERT(ERTS_TW_SLOT_AT_ONCE <= slot
+ && slot < ERTS_TW_LATER_WHEEL_END_SLOT);
p->slot = slot;
if (!tiw->w[slot]) {
tiw->w[slot] = p;
@@ -223,55 +641,89 @@ insert_timer_into_slot(ErtsTimerWheel *tiw, int slot, ErtsTWheelTimer *p)
prev->next = p;
next->prev = p;
}
+ if (slot == ERTS_TW_SLOT_AT_ONCE)
+ tiw->at_once.nto++;
+ else {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
+ ERTS_TW_ASSERT(p->timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ tiw->soon.nto++;
+ if (tiw->soon.min_tpos > tpos)
+ tiw->soon.min_tpos = tpos;
+ }
+ else {
+ ERTS_TW_ASSERT(p->timeout_pos >= tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ tiw->later.nto++;
+ if (tiw->later.min_tpos > tpos) {
+ tiw->later.min_tpos = tpos;
+ tiw->later.min_tpos_slot = slot;
+ }
+ }
+ scnt_inc(tiw->scnt, slot);
+ }
}
static ERTS_INLINE void
remove_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
{
int slot = p->slot;
- ERTS_TW_ASSERT(slot != ERTS_TWHEEL_SLOT_INACTIVE);
-
- if (slot >= 0) {
- /*
- * Timer in wheel or in circular
- * list of timers currently beeing
- * triggered (referred by sentinel).
- */
- ERTS_TW_ASSERT(slot < ERTS_TIW_SIZE);
-
- if (p->next == p) {
- ERTS_TW_ASSERT(tiw->w[slot] == p);
- tiw->w[slot] = NULL;
- }
- else {
- if (tiw->w[slot] == p)
- tiw->w[slot] = p->next;
- p->prev->next = p->next;
- p->next->prev = p->prev;
- }
+ int empty_slot;
+ ERTS_TW_ASSERT(slot != ERTS_TW_SLOT_INACTIVE);
+
+ /*
+ * Timer is in circular list either referred to
+ * by at once slot, slot in soon wheel, slot
+ * in later wheel, or by sentinel (timers currently
+ * being triggered).
+ */
+ ERTS_TW_ASSERT(ERTS_TW_SLOT_AT_ONCE <= slot
+ && slot < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ if (p->next == p) {
+ /* Cannot be referred by sentinel, i.e. must be referred by slot... */
+ ERTS_TW_ASSERT(tiw->w[slot] == p);
+ tiw->w[slot] = NULL;
+ empty_slot = 1;
}
else {
- /* Timer in "at once" queue... */
- ERTS_TW_ASSERT(slot == ERTS_TWHEEL_SLOT_AT_ONCE);
- if (p->prev)
- p->prev->next = p->next;
- else {
- ERTS_TW_ASSERT(tiw->at_once.head == p);
- tiw->at_once.head = p->next;
- }
- if (p->next)
- p->next->prev = p->prev;
- else {
- ERTS_TW_ASSERT(tiw->at_once.tail == p);
- tiw->at_once.tail = p->prev;
- }
+ if (tiw->w[slot] == p)
+ tiw->w[slot] = p->next;
+ p->prev->next = p->next;
+ p->next->prev = p->prev;
+ empty_slot = 0;
+ }
+ if (slot == ERTS_TW_SLOT_AT_ONCE) {
ERTS_TW_ASSERT(tiw->at_once.nto > 0);
tiw->at_once.nto--;
}
-
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
-
- tiw->nto--;
+ else {
+ scnt_dec(tiw->scnt, slot);
+ if (slot < ERTS_TW_SOON_WHEEL_END_SLOT) {
+ if (empty_slot
+ && tiw->true_next_timeout_time
+ && p->timeout_pos == tiw->next_timeout_pos) {
+ tiw->true_next_timeout_time = 0;
+ }
+ if (--tiw->soon.nto == 0)
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ }
+ else {
+ if (empty_slot
+ && tiw->true_next_timeout_time
+ && tiw->later.min_tpos_slot == slot) {
+ ErtsMonotonicTime tpos = tiw->later.min_tpos;
+ tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tpos == tiw->next_timeout_pos)
+ tiw->true_next_timeout_time = 0;
+ }
+ if (--tiw->later.nto == 0) {
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ }
+ }
+ }
+ p->slot = ERTS_TW_SLOT_INACTIVE;
}
ErtsMonotonicTime
@@ -280,58 +732,26 @@ erts_check_next_timeout_time(ErtsSchedulerData *esdp)
ErtsTimerWheel *tiw = esdp->timer_wheel;
ErtsMonotonicTime time;
ERTS_MSACC_DECLARE_CACHE_X();
+ ERTS_TW_ASSERT(tiw->next_timeout_time
+ == ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos));
if (tiw->true_next_timeout_time)
- return tiw->next_timeout_time;
+ return tiw->next_timeout_time; /* known timeout... */
+ if (tiw->next_timeout_pos > tiw->pos + ERTS_TW_SOON_WHEEL_SIZE)
+ return tiw->next_timeout_time; /* sufficiently later away... */
ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(ERTS_MSACC_STATE_TIMERS);
- time = find_next_timeout(esdp, tiw, 1, 0, 0);
+ time = find_next_timeout(esdp, tiw);
ERTS_MSACC_POP_STATE_M_X();
return time;
}
-#ifndef ERTS_TW_DEBUG
-#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) ((void) 0)
-#else
-#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) debug_check_safe_to_skip_to((TIW), (TO))
-static void
-debug_check_safe_to_skip_to(ErtsTimerWheel *tiw, ErtsMonotonicTime skip_to_pos)
-{
- int slots, ix;
- ErtsTWheelTimer *tmr;
- ErtsMonotonicTime tmp;
-
- ix = (int) (tiw->pos & (ERTS_TIW_SIZE-1));
- tmp = skip_to_pos - tiw->pos;
- ERTS_TW_ASSERT(tmp >= 0);
- if (tmp < (ErtsMonotonicTime) ERTS_TIW_SIZE)
- slots = (int) tmp;
- else
- slots = ERTS_TIW_SIZE;
-
- while (slots > 0) {
- tmr = tiw->w[ix];
- if (tmr) {
- ErtsTWheelTimer *end = tmr;
- do {
- ERTS_TW_ASSERT(tmr->timeout_pos > skip_to_pos);
- tmr = tmr->next;
- } while (tmr != end);
- }
- ix++;
- if (ix == ERTS_TIW_SIZE)
- ix = 0;
- slots--;
- }
-}
-#endif
-
static ERTS_INLINE void
timeout_timer(ErtsTWheelTimer *p)
{
ErlTimeoutProc timeout;
void *arg;
- p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
- timeout = p->u.func.timeout;
- arg = p->u.func.arg;
+ p->slot = ERTS_TW_SLOT_INACTIVE;
+ timeout = p->timeout;
+ arg = p->arg;
(*timeout)(arg);
ASSERT_NO_LOCKED_LOCKS;
}
@@ -339,73 +759,108 @@ timeout_timer(ErtsTWheelTimer *p)
void
erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
{
- int tiw_pos_ix, slots, yielded_slot_restarted, yield_count;
- ErtsMonotonicTime bump_to, tmp_slots, old_pos;
+ int slot, restarted, yield_count, slots, scnt_ix;
+ ErtsMonotonicTime bump_to;
+ Sint *scnt, *bump_scnt;
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
yield_count = ERTS_TWHEEL_BUMP_YIELD_LIMIT;
+ scnt = &tiw->scnt[0];
+ bump_scnt = &tiw->bump_scnt[0];
+
/*
* In order to be fair we always continue with work
* where we left off when restarting after a yield.
*/
- if (tiw->yield_slot >= 0) {
- yielded_slot_restarted = 1;
- tiw_pos_ix = tiw->yield_slot;
- slots = tiw->yield_slots_left;
+ slot = tiw->yield_slot;
+ restarted = slot != ERTS_TW_SLOT_INACTIVE;
+ if (restarted) {
bump_to = tiw->pos;
- old_pos = tiw->yield_start_pos;
- goto restart_yielded_slot;
+ if (slot >= ERTS_TW_LATER_WHEEL_FIRST_SLOT)
+ goto restart_yielded_later_slot;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ if (slot == ERTS_TW_SLOT_AT_ONCE)
+ goto restart_yielded_at_once_slot;
+ scnt_ix = scnt_get_ix(slot);
+ slots = tiw->yield_slots_left;
+ ASSERT(0 <= slots && slots <= ERTS_TW_SOON_WHEEL_SIZE);
+ goto restart_yielded_soon_slot;
}
do {
- yielded_slot_restarted = 0;
-
+ restarted = 0;
bump_to = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ tiw->true_next_timeout_time = 1;
+ tiw->next_timeout_pos = bump_to;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(bump_to);
while (1) {
ErtsTWheelTimer *p;
- old_pos = tiw->pos;
-
if (tiw->nto == 0) {
empty_wheel:
- ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, bump_to);
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, bump_to);
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, bump_to);
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
+ tiw->next_timeout_pos = bump_to + ERTS_CLKTCKS_WEEK;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos);;
tiw->pos = bump_to;
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ tiw->later.pos = bump_to + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
ERTS_MSACC_POP_STATE_M_X();
return;
}
- p = tiw->at_once.head;
- while (p) {
- if (--yield_count <= 0) {
- ERTS_TW_ASSERT(tiw->nto > 0);
- ERTS_TW_ASSERT(tiw->at_once.nto > 0);
- tiw->yield_slot = ERTS_TWHEEL_SLOT_AT_ONCE;
- tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
- ERTS_MSACC_POP_STATE_M_X();
- return;
- }
+ p = tiw->w[ERTS_TW_SLOT_AT_ONCE];
+
+ if (p) {
+
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[ERTS_TW_SLOT_AT_ONCE] = NULL;
+
+ while (1) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->nto--;
+ tiw->at_once.nto--;
+
+ timeout_timer(p);
+
+ yield_count -= ERTS_TW_COST_TIMEOUT;
- ERTS_TW_ASSERT(tiw->nto > 0);
- ERTS_TW_ASSERT(tiw->at_once.nto > 0);
- tiw->nto--;
- tiw->at_once.nto--;
- tiw->at_once.head = p->next;
- if (p->next)
- p->next->prev = NULL;
- else
- tiw->at_once.tail = NULL;
+ restart_yielded_at_once_slot:
- timeout_timer(p);
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (yield_count <= 0) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->yield_slot = ERTS_TW_SLOT_AT_ONCE;
+ ERTS_MSACC_POP_STATE_M_X();
+ return; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
- p = tiw->at_once.head;
}
if (tiw->pos >= bump_to) {
@@ -416,39 +871,66 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
if (tiw->nto == 0)
goto empty_wheel;
- if (tiw->true_next_timeout_time) {
- ErtsMonotonicTime skip_until_pos;
+ /*
+ * Save slot counts in bump operation local
+ * array.
+ *
+ * The amount of timers to trigger (or move)
+ * will only decrease from now until we have
+ * completed this bump operation (even if we
+ * yield in the middle of it).
+ *
+ * The amount of timers in the wheels may
+ * however increase due to timers being set
+ * by timeout callbacks.
+ */
+ sys_memcpy((void *) bump_scnt, (void *) scnt,
+ sizeof(Sint) * ERTS_TW_SCNT_SIZE);
+
+ if (tiw->soon.min_tpos > tiw->pos) {
+ ErtsMonotonicTime skip_until_pos = tiw->soon.min_tpos;
+
/*
* No need inspecting slots where we know no timeouts
* to trigger should reside.
*/
- skip_until_pos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
if (skip_until_pos > bump_to)
skip_until_pos = bump_to;
skip_until_pos--;
if (skip_until_pos > tiw->pos) {
- ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, skip_until_pos);
-
+ ERTS_TW_DBG_VERIFY_EMPTY_SOON_SLOTS(tiw, skip_until_pos);
tiw->pos = skip_until_pos;
}
}
- tiw_pos_ix = (int) ((tiw->pos+1) & (ERTS_TIW_SIZE-1));
- tmp_slots = (bump_to - tiw->pos);
- if (tmp_slots < (ErtsMonotonicTime) ERTS_TIW_SIZE)
- slots = (int) tmp_slots;
- else
- slots = ERTS_TIW_SIZE;
+ {
+ ErtsMonotonicTime tmp_slots = bump_to - tiw->pos;
+ tmp_slots = (bump_to - tiw->pos);
+ if (tmp_slots < ERTS_TW_SOON_WHEEL_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+ }
+ slot = soon_slot(tiw->pos+1);
tiw->pos = bump_to;
+ tiw->next_timeout_pos = bump_to;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(bump_to);
+
+ scnt_ix = scnt_get_ix(slot);
+
+ /* Timeout timers in soon wheel */
while (slots > 0) {
- p = tiw->w[tiw_pos_ix];
+ yield_count -= ERTS_TW_COST_SLOT;
+
+ p = tiw->w[slot];
if (p) {
+ /* timeout callback need tiw->pos to be up to date */
if (p->next == p) {
ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
@@ -459,22 +941,28 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
tiw->sentinel.next->prev = &tiw->sentinel;
tiw->sentinel.prev->next = &tiw->sentinel;
}
- tiw->w[tiw_pos_ix] = NULL;
+ tiw->w[slot] = NULL;
while (1) {
- if (p->timeout_pos > bump_to) {
- /* Very unusual case... */
- ++yield_count;
- insert_timer_into_slot(tiw, tiw_pos_ix, p);
- }
- else {
- /* Normal case... */
- timeout_timer(p);
- tiw->nto--;
- }
-
- restart_yielded_slot:
+ ERTS_TW_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT <= p->slot
+ && p->slot < ERTS_TW_SOON_WHEEL_END_SLOT);
+ if (--tiw->soon.nto == 0)
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ scnt_ix_dec(scnt, scnt_ix);
+ if (p->timeout_pos <= bump_to) {
+ timeout_timer(p);
+ tiw->nto--;
+ scnt_ix_dec(bump_scnt, scnt_ix);
+ yield_count -= ERTS_TW_COST_TIMEOUT;
+ }
+ else {
+ /* uncommon case */
+ insert_timer_into_slot(tiw, slot, p);
+ yield_count -= ERTS_TW_COST_SLOT_MOVE;
+ }
+
+ restart_yielded_soon_slot:
p = tiw->sentinel.next;
if (p == &tiw->sentinel) {
@@ -482,12 +970,9 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
break;
}
- if (--yield_count <= 0) {
- tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
- tiw->yield_slot = tiw_pos_ix;
+ if (yield_count <= 0) {
+ tiw->yield_slot = slot;
tiw->yield_slots_left = slots;
- tiw->yield_start_pos = old_pos;
ERTS_MSACC_POP_STATE_M_X();
return; /* Yield! */
}
@@ -496,24 +981,166 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
p->next->prev = &tiw->sentinel;
}
}
- tiw_pos_ix++;
- if (tiw_pos_ix == ERTS_TIW_SIZE)
- tiw_pos_ix = 0;
- slots--;
+
+ scnt_soon_wheel_next(&slot, &slots, NULL, &scnt_ix, bump_scnt);
}
+
+ if (ERTS_TW_BUMP_LATER_WHEEL(tiw)) {
+ restart_yielded_later_slot:
+ if (bump_later_wheel(tiw, &yield_count))
+ return; /* Yield! */
+ }
}
- } while (yielded_slot_restarted);
+ } while (restarted);
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
+ ERTS_TW_ASSERT(tiw->next_timeout_pos == bump_to);
- /* Search at most two seconds ahead... */
- (void) find_next_timeout(NULL, tiw, 0, curr_time, ERTS_SEC_TO_MONOTONIC(2));
+ (void) find_next_timeout(NULL, tiw);
ERTS_MSACC_POP_STATE_M_X();
}
+static int
+bump_later_wheel(ErtsTimerWheel *tiw, int *ycount_p)
+{
+ ErtsMonotonicTime cpos = tiw->pos;
+ ErtsMonotonicTime later_pos = tiw->later.pos;
+ int ycount = *ycount_p;
+ int slots, fslot, scnt_ix;
+ Sint *scnt, *bump_scnt;
+
+ scnt = &tiw->scnt[0];
+ bump_scnt = &tiw->bump_scnt[0];
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ if (tiw->yield_slot >= ERTS_TW_LATER_WHEEL_FIRST_SLOT) {
+ fslot = tiw->yield_slot;
+ scnt_ix = scnt_get_ix(fslot);
+ slots = tiw->yield_slots_left;
+ ASSERT(0 <= slots && slots <= ERTS_TW_LATER_WHEEL_SIZE);
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
+ goto restart_yielded_slot;
+ }
+ else {
+ ErtsMonotonicTime end_later_pos, tmp_slots, min_tpos;
+
+ min_tpos = tiw->later.min_tpos & ERTS_TW_LATER_WHEEL_POS_MASK;
+ end_later_pos = cpos + ERTS_TW_SOON_WHEEL_SIZE;
+ end_later_pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+
+ /* Skip known empty slots... */
+ if (min_tpos > later_pos) {
+ if (min_tpos > end_later_pos) {
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, end_later_pos);
+ tiw->later.pos = end_later_pos;
+ goto done;
+ }
+ later_pos = min_tpos;
+ ERTS_TW_DBG_VERIFY_EMPTY_LATER_SLOTS(tiw, later_pos);
+ }
+
+ tmp_slots = end_later_pos;
+ tmp_slots -= later_pos;
+ tmp_slots /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmp_slots < ERTS_TW_LATER_WHEEL_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+
+ fslot = later_slot(later_pos);
+ scnt_ix = scnt_get_ix(fslot);
+
+ tiw->later.pos = end_later_pos;
+ }
+
+ while (slots > 0) {
+ ErtsTWheelTimer *p;
+
+ ycount -= ERTS_TW_COST_SLOT;
+
+ p = tiw->w[fslot];
+
+ if (p) {
+
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[fslot] = NULL;
+
+ while (1) {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+
+ ERTS_TW_ASSERT(p->slot == fslot);
+
+ if (--tiw->later.nto == 0) {
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ }
+ scnt_ix_dec(scnt, scnt_ix);
+
+ if (tpos >= tiw->later.pos + ERTS_TW_LATER_WHEEL_SLOT_SIZE) {
+ /* keep in later slot; very uncommon... */
+ insert_timer_into_slot(tiw, fslot, p);
+ ycount -= ERTS_TW_COST_SLOT_MOVE;
+ }
+ else {
+ scnt_ix_dec(bump_scnt, scnt_ix);
+ ERTS_TW_ASSERT(tpos < cpos + ERTS_TW_SOON_WHEEL_SIZE);
+ if (tpos > cpos) {
+ /* move into soon wheel */
+ insert_timer_into_slot(tiw, soon_slot(tpos), p);
+ ycount -= ERTS_TW_COST_SLOT_MOVE;
+ }
+ else {
+ /* trigger at once */
+ timeout_timer(p);
+ tiw->nto--;
+ ycount -= ERTS_TW_COST_TIMEOUT;
+ }
+ }
+
+ restart_yielded_slot:
+
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (ycount < 0) {
+ tiw->yield_slot = fslot;
+ tiw->yield_slots_left = slots;
+ *ycount_p = 0;
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+ return 1; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
+ }
+
+ scnt_later_wheel_next(&fslot, &slots, NULL, &scnt_ix, bump_scnt);
+ }
+
+done:
+
+ ERTS_HARD_DBG_CHK_WHEELS(tiw, 0);
+
+ *ycount_p = ycount;
+
+ return 0;
+}
+
Uint
erts_timer_wheel_memory_size(void)
{
@@ -526,25 +1153,51 @@ erts_create_timer_wheel(ErtsSchedulerData *esdp)
ErtsMonotonicTime mtime;
int i;
ErtsTimerWheel *tiw;
+
+ /* Some compile time sanity checks... */
+
+ /* Slots... */
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_AT_ONCE == -1);
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_INACTIVE < ERTS_TW_SLOT_AT_ONCE);
+ ERTS_CT_ASSERT(ERTS_TW_SLOT_AT_ONCE + 1 == ERTS_TW_SOON_WHEEL_FIRST_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_FIRST_SLOT < ERTS_TW_SOON_WHEEL_END_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_END_SLOT == ERTS_TW_LATER_WHEEL_FIRST_SLOT);
+ ERTS_CT_ASSERT(ERTS_TW_LATER_WHEEL_FIRST_SLOT < ERTS_TW_LATER_WHEEL_END_SLOT);
+
+ /* Both wheel sizes should be a powers of 2 */
+ ERTS_CT_ASSERT(ERTS_TW_SOON_WHEEL_SIZE
+ && !(ERTS_TW_SOON_WHEEL_SIZE & (ERTS_TW_SOON_WHEEL_SIZE-1)));
+ ERTS_CT_ASSERT(ERTS_TW_LATER_WHEEL_SIZE
+ && !(ERTS_TW_LATER_WHEEL_SIZE & (ERTS_TW_LATER_WHEEL_SIZE-1)));
+
tiw = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_WHEEL,
sizeof(ErtsTimerWheel));
- for(i = 0; i < ERTS_TIW_SIZE; i++)
+ tiw->w = &tiw->slots[1];
+ for(i = ERTS_TW_SLOT_AT_ONCE; i < ERTS_TW_LATER_WHEEL_END_SLOT; i++)
tiw->w[i] = NULL;
+ for (i = 0; i < ERTS_TW_SCNT_SIZE; i++)
+ tiw->scnt[i] = 0;
+
mtime = erts_get_monotonic_time(esdp);
tiw->pos = ERTS_MONOTONIC_TO_CLKTCKS(mtime);
tiw->nto = 0;
- tiw->at_once.head = NULL;
- tiw->at_once.tail = NULL;
tiw->at_once.nto = 0;
- tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ tiw->soon.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->soon.nto = 0;
+ tiw->later.min_tpos = ERTS_MAX_CLKTCKS;
+ tiw->later.min_tpos_slot = ERTS_TW_LATER_WHEEL_END_SLOT;
+ tiw->later.pos = tiw->pos + ERTS_TW_SOON_WHEEL_SIZE;
+ tiw->later.pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tiw->later.nto = 0;
+ tiw->yield_slot = ERTS_TW_SLOT_INACTIVE;
tiw->true_next_timeout_time = 0;
- tiw->next_timeout_time = mtime + ERTS_MONOTONIC_DAY;
+ tiw->next_timeout_pos = tiw->pos + ERTS_CLKTCKS_WEEK;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->next_timeout_pos);
tiw->sentinel.next = &tiw->sentinel;
tiw->sentinel.prev = &tiw->sentinel;
- tiw->sentinel.u.func.timeout = NULL;
- tiw->sentinel.u.func.cancel = NULL;
- tiw->sentinel.u.func.arg = NULL;
+ tiw->sentinel.timeout = NULL;
+ tiw->sentinel.arg = NULL;
return tiw;
}
@@ -577,53 +1230,56 @@ erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode)
void
erts_twheel_set_timer(ErtsTimerWheel *tiw,
ErtsTWheelTimer *p, ErlTimeoutProc timeout,
- ErlCancelProc cancel, void *arg,
- ErtsMonotonicTime timeout_pos)
+ void *arg, ErtsMonotonicTime timeout_pos)
{
- ErtsMonotonicTime timeout_time;
+ int slot;
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
- p->u.func.timeout = timeout;
- p->u.func.cancel = cancel;
- p->u.func.arg = arg;
+ p->timeout = timeout;
+ p->arg = arg;
+
+ ERTS_TW_ASSERT(p->slot == ERTS_TW_SLOT_INACTIVE);
- ERTS_TW_ASSERT(p->slot == ERTS_TWHEEL_SLOT_INACTIVE);
+ tiw->nto++;
+ /* calculate slot */
if (timeout_pos <= tiw->pos) {
- tiw->nto++;
- tiw->at_once.nto++;
- p->next = NULL;
- p->prev = tiw->at_once.tail;
- if (tiw->at_once.tail) {
- ERTS_TW_ASSERT(tiw->at_once.head);
- tiw->at_once.tail->next = p;
- }
- else {
- ERTS_TW_ASSERT(!tiw->at_once.head);
- tiw->at_once.head = p;
- }
- tiw->at_once.tail = p;
- p->timeout_pos = tiw->pos;
- p->slot = ERTS_TWHEEL_SLOT_AT_ONCE;
- timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->pos);
+ /* at once */
+ p->timeout_pos = timeout_pos = tiw->pos;
+ slot = ERTS_TW_SLOT_AT_ONCE;
+ }
+ else if (timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE) {
+ /* soon wheel */
+ p->timeout_pos = timeout_pos;
+ slot = soon_slot(timeout_pos);
+ if (tiw->soon.min_tpos > timeout_pos)
+ tiw->soon.min_tpos = timeout_pos;
}
else {
- int slot;
-
- /* calculate slot */
- slot = (int) (timeout_pos & (ERTS_TIW_SIZE-1));
-
- insert_timer_into_slot(tiw, slot, p);
-
- tiw->nto++;
-
- timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
- p->timeout_pos = timeout_pos;
+ /* later wheel */
+ p->timeout_pos = timeout_pos;
+ slot = later_slot(timeout_pos);
+
+ /*
+ * Next timeout due to this timeout
+ * should be in good time before the
+ * actual timeout (one later wheel slot
+ * size). This, in order to move it
+ * from the later wheel to the soon
+ * wheel.
+ */
+ timeout_pos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ timeout_pos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
}
- if (timeout_time < tiw->next_timeout_time) {
+ insert_timer_into_slot(tiw, slot, p);
+
+ if (timeout_pos <= tiw->next_timeout_pos) {
tiw->true_next_timeout_time = 1;
- tiw->next_timeout_time = timeout_time;
+ if (timeout_pos < tiw->next_timeout_pos) {
+ tiw->next_timeout_pos = timeout_pos;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
+ }
}
ERTS_MSACC_POP_STATE_M_X();
}
@@ -631,15 +1287,10 @@ erts_twheel_set_timer(ErtsTimerWheel *tiw,
void
erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
{
- if (p->slot != ERTS_TWHEEL_SLOT_INACTIVE) {
- ErlCancelProc cancel;
- void *arg;
+ if (p->slot != ERTS_TW_SLOT_INACTIVE) {
ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
remove_timer(tiw, p);
- cancel = p->u.func.cancel;
- arg = p->u.func.arg;
- if (cancel)
- (*cancel)(arg);
+ tiw->nto--;
ERTS_MSACC_POP_STATE_M_X();
}
}
@@ -657,22 +1308,17 @@ erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
tmr = tiw->sentinel.next;
while (tmr != &tiw->sentinel) {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ if (tmr->timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->arg);
tmr = tmr->next;
}
- for (tmr = tiw->at_once.head; tmr; tmr = tmr->next) {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
- }
-
- for (ix = 0; ix < ERTS_TIW_SIZE; ix++) {
+ for (ix = ERTS_TW_SLOT_AT_ONCE; ix < ERTS_TW_LATER_WHEEL_END_SLOT; ix++) {
tmr = tiw->w[ix];
if (tmr) {
do {
- if (tmr->u.func.timeout == tclbk)
- (*func)(arg, tmr->timeout_pos, tmr->u.func.arg);
+ if (tmr->timeout == tclbk)
+ (*func)(arg, tmr->timeout_pos, tmr->arg);
tmr = tmr->next;
} while (tmr != tiw->w[ix]);
}
@@ -680,35 +1326,206 @@ erts_twheel_debug_foreach(ErtsTimerWheel *tiw,
}
#ifdef ERTS_TW_DEBUG
-void erts_p_slpq(void)
+
+void
+dbg_verify_empty_soon_slots(ErtsTimerWheel *tiw, ErtsMonotonicTime to_pos)
{
- erts_printf("Not yet implemented...\n");
-#if 0
- ErtsMonotonicTime current_time = erts_get_monotonic_time(NULL);
- int i;
- ErtsTWheelTimer* p;
-
- /* print the whole wheel, starting at the current position */
- erts_printf("\ncurrent time = %bps tiw_pos = %d tiw_nto %d\n",
- current_time, tiw->pos, tiw->nto);
- i = tiw->pos;
- if (tiw->w[i] != NULL) {
- erts_printf("%d:\n", i);
- for(p = tiw->w[i]; p != NULL; p = p->next) {
- erts_printf(" (timeout time %bps, slot %d)\n",
- ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos),
- p->slot);
- }
+ int ix;
+ ErtsMonotonicTime tmp;
+
+ ix = soon_slot(tiw->pos);
+ tmp = to_pos;
+ if (tmp > tiw->pos) {
+ int slots;
+ tmp -= tiw->pos;
+ ERTS_TW_ASSERT(tmp > 0);
+ if (tmp < (ErtsMonotonicTime) ERTS_TW_SOON_WHEEL_SIZE)
+ slots = (int) tmp;
+ else
+ slots = ERTS_TW_SOON_WHEEL_SIZE;
+
+ while (slots > 0) {
+ ERTS_TW_ASSERT(!tiw->w[ix]);
+ ix++;
+ if (ix == ERTS_TW_SOON_WHEEL_END_SLOT)
+ ix = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+ slots--;
+ }
+ }
+}
+
+void
+dbg_verify_empty_later_slots(ErtsTimerWheel *tiw, ErtsMonotonicTime to_pos)
+{
+ int ix;
+ ErtsMonotonicTime tmp;
+
+ ix = later_slot(tiw->later.pos);
+ tmp = to_pos;
+ tmp &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ if (tmp > tiw->later.pos) {
+ ErtsMonotonicTime pos_min;
+ int slots;
+ tmp -= tiw->later.pos;
+ tmp /= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ ERTS_TW_ASSERT(tmp > 0);
+
+ pos_min = tiw->later.pos;
+
+ if (tmp < (ErtsMonotonicTime) ERTS_TW_LATER_WHEEL_SIZE)
+ slots = (int) tmp;
+ else {
+ pos_min += ((tmp / ERTS_TW_LATER_WHEEL_SIZE)
+ * ERTS_TW_LATER_WHEEL_SLOT_SIZE);
+ slots = ERTS_TW_LATER_WHEEL_SIZE;
+ }
+
+ while (slots > 0) {
+ ErtsTWheelTimer *tmr = tiw->w[ix];
+ pos_min += ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ if (tmr) {
+ ErtsTWheelTimer *end = tmr;
+ do {
+ ERTS_TW_ASSERT(tmr->timeout_pos >= pos_min);
+ tmr = tmr->next;
+ } while (tmr != end);
+ }
+ ix++;
+ if (ix == ERTS_TW_LATER_WHEEL_END_SLOT)
+ ix = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ slots--;
+ }
+ }
+}
+
+#endif /* ERTS_TW_DEBUG */
+
+#ifdef ERTS_TW_HARD_DEBUG
+
+static void
+hrd_dbg_check_wheels(ErtsTimerWheel *tiw, int check_min_tpos)
+{
+ int ix, six, soon_tmo, later_tmo, at_once_tmo,
+ scnt_slot, scnt_slots, scnt_six;
+ ErtsMonotonicTime min_tpos;
+ Sint scnt[ERTS_TW_SCNT_SIZE];
+ ErtsTWheelTimer *p;
+
+ for (six = 0; six < ERTS_TW_SCNT_SIZE; six++)
+ scnt[six] = 0;
+
+ min_tpos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
+
+ at_once_tmo = 0;
+ p = tiw->w[ERTS_TW_SLOT_AT_ONCE];
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ do {
+ at_once_tmo++;
+ ERTS_TW_ASSERT(p->slot == ERTS_TW_SLOT_AT_ONCE);
+ ERTS_TW_ASSERT(p->timeout_pos <= tiw->pos);
+ ERTS_TW_ASSERT(!check_min_tpos || tiw->pos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
}
- for(i = ((i+1) & (ERTS_TIW_SIZE-1)); i != (tiw->pos & (ERTS_TIW_SIZE-1)); i = ((i+1) & (ERTS_TIW_SIZE-1))) {
- if (tiw->w[i] != NULL) {
- erts_printf("%d:\n", i);
- for(p = tiw->w[i]; p != NULL; p = p->next) {
- erts_printf(" (timeout time %bps, slot %d)\n",
- ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos), p->slot);
- }
- }
+
+ soon_tmo = 0;
+ scnt_slot = ERTS_TW_SOON_WHEEL_END_SLOT-1;
+ scnt_slots = ERTS_TW_SOON_WHEEL_SIZE;
+ scnt_six = 0;
+ scnt_soon_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ for (ix = ERTS_TW_SOON_WHEEL_FIRST_SLOT;
+ ix < ERTS_TW_SOON_WHEEL_END_SLOT;
+ ix++) {
+ p = tiw->w[ix];
+ six = scnt_get_ix(ix);
+ ERTS_TW_ASSERT(!p || six == scnt_six);
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ do {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ soon_tmo++;
+ scnt_ix_inc(scnt, six);
+ ERTS_TW_ASSERT(p->slot == ix);
+ ERTS_TW_ASSERT(ix == soon_slot(tpos));
+ ERTS_TW_ASSERT(p->timeout_pos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ ERTS_TW_ASSERT(!check_min_tpos || tpos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
+ }
+ if (ix == scnt_slot)
+ scnt_soon_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
}
-#endif
+
+ later_tmo = 0;
+ scnt_slot = ERTS_TW_SOON_WHEEL_END_SLOT-1;
+ scnt_slots = ERTS_TW_SOON_WHEEL_SIZE;
+ scnt_six = 0;
+ scnt_later_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ for (ix = ERTS_TW_LATER_WHEEL_FIRST_SLOT;
+ ix < ERTS_TW_LATER_WHEEL_END_SLOT;
+ ix++) {
+ p = tiw->w[ix];
+ six = scnt_get_ix(ix);
+ ERTS_TW_ASSERT(!p || six == scnt_six);
+ if (p) {
+ ErtsTWheelTimer *first = p;
+ six = scnt_get_ix(ix);
+ do {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ later_tmo++;
+ scnt_ix_inc(scnt, six);
+ ERTS_TW_ASSERT(p->slot == ix);
+ ERTS_TW_ASSERT(later_slot(tpos) == ix);
+ tpos &= ERTS_TW_LATER_WHEEL_POS_MASK;
+ tpos -= ERTS_TW_LATER_WHEEL_SLOT_SIZE;
+ ERTS_TW_ASSERT(!check_min_tpos || tpos >= min_tpos);
+ ERTS_TW_ASSERT(p->next->prev == p);
+ p = p->next;
+ } while (p != first);
+ }
+ if (ix == scnt_slot)
+ scnt_later_wheel_next(&scnt_slot, &scnt_slots,
+ NULL, &scnt_six, tiw->scnt);
+ }
+
+ if (tiw->yield_slot != ERTS_TW_SLOT_INACTIVE) {
+ p = tiw->sentinel.next;
+ ix = tiw->yield_slot;
+ while (p != &tiw->sentinel) {
+ ErtsMonotonicTime tpos = p->timeout_pos;
+ ERTS_TW_ASSERT(ix == p->slot);
+ if (ix == ERTS_TW_SLOT_AT_ONCE)
+ at_once_tmo++;
+ else {
+ scnt_inc(scnt, ix);
+ if (ix >= ERTS_TW_LATER_WHEEL_FIRST_SLOT) {
+ later_tmo++;
+ ERTS_TW_ASSERT(ix == later_slot(tpos));
+ }
+ else {
+ soon_tmo++;
+ ERTS_TW_ASSERT(ix == (tpos & ERTS_TW_SOON_WHEEL_MASK));
+ ERTS_TW_ASSERT(tpos < tiw->pos + ERTS_TW_SOON_WHEEL_SIZE);
+ }
+ p = p->next;
+ }
+ }
+ }
+
+
+ ERTS_TW_ASSERT(tiw->at_once.nto == at_once_tmo);
+ ERTS_TW_ASSERT(tiw->soon.nto == soon_tmo);
+ ERTS_TW_ASSERT(tiw->later.nto == later_tmo);
+ ERTS_TW_ASSERT(tiw->nto == soon_tmo + later_tmo + at_once_tmo);
+
+ for (six = 0; six < ERTS_TW_SCNT_SIZE; six++)
+ ERTS_TW_ASSERT(scnt[six] == tiw->scnt[six]);
}
-#endif /* ERTS_TW_DEBUG */
+
+#endif /* ERTS_TW_HARD_DEBUG */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 8f3f48f38f..9263798a28 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -3594,7 +3594,7 @@ store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns)
ErtsMRefThing *mreft = (ErtsMRefThing *) from_hp;
ErtsMagicBinary *mb = mreft->mb;
ASSERT(is_magic_ref_thing(from_hp));
- erts_refc_inc(&mb->refc, 2);
+ erts_refc_inc(&mb->intern.refc, 2);
OH_OVERHEAD(oh, mb->orig_size / sizeof(Eterm));
}