aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
authorLars Thorsen <[email protected]>2015-05-20 10:12:46 +0200
committerLars Thorsen <[email protected]>2015-05-20 10:12:46 +0200
commit7b81ed532898c735ad8901723df373e5f0246127 (patch)
tree5f69565b5016fb08d3bd62ecaf53fa12d4cb7c51 /erts/emulator/beam
parentfd63f2b7108e2607e43d709e6d3885e4a7406ec8 (diff)
parent5bd9afcd35d1a5c46dce8e75c650841a3061f45b (diff)
downloadotp-7b81ed532898c735ad8901723df373e5f0246127.tar.gz
otp-7b81ed532898c735ad8901723df373e5f0246127.tar.bz2
otp-7b81ed532898c735ad8901723df373e5f0246127.zip
Merge branch 'master' of git-server:otp
Conflicts: lib/orber/src/orber.app.src
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names3
-rw-r--r--erts/emulator/beam/beam_bif_load.c33
-rw-r--r--erts/emulator/beam/beam_emu.c87
-rw-r--r--erts/emulator/beam/beam_load.c79
-rw-r--r--erts/emulator/beam/beam_load.h2
-rw-r--r--erts/emulator/beam/bif.c25
-rw-r--r--erts/emulator/beam/bif.h1
-rw-r--r--erts/emulator/beam/bif.tab14
-rw-r--r--erts/emulator/beam/break.c12
-rw-r--r--erts/emulator/beam/code_ix.c4
-rw-r--r--erts/emulator/beam/copy.c6
-rw-r--r--erts/emulator/beam/erl_alloc.c31
-rw-r--r--erts/emulator/beam/erl_alloc.types14
-rw-r--r--erts/emulator/beam/erl_bif_info.c8
-rw-r--r--erts/emulator/beam/erl_bif_timer.c849
-rw-r--r--erts/emulator/beam/erl_bif_timer.h37
-rw-r--r--erts/emulator/beam/erl_bif_trace.c4
-rw-r--r--erts/emulator/beam/erl_db.c8
-rw-r--r--erts/emulator/beam/erl_db_tree.c2
-rw-r--r--erts/emulator/beam/erl_db_util.c38
-rw-r--r--erts/emulator/beam/erl_db_util.h1
-rw-r--r--erts/emulator/beam/erl_gc.c73
-rw-r--r--erts/emulator/beam/erl_hl_timer.c2894
-rw-r--r--erts/emulator/beam/erl_hl_timer.h80
-rw-r--r--erts/emulator/beam/erl_init.c26
-rw-r--r--erts/emulator/beam/erl_lock_check.c1
-rw-r--r--erts/emulator/beam/erl_map.c35
-rw-r--r--erts/emulator/beam/erl_message.c2
-rw-r--r--erts/emulator/beam/erl_message.h14
-rw-r--r--erts/emulator/beam/erl_nif.c4
-rw-r--r--erts/emulator/beam/erl_node_tables.c2
-rw-r--r--erts/emulator/beam/erl_port.h28
-rw-r--r--erts/emulator/beam/erl_port_task.c19
-rw-r--r--erts/emulator/beam/erl_process.c517
-rw-r--r--erts/emulator/beam/erl_process.h91
-rw-r--r--erts/emulator/beam/erl_process_lock.c144
-rw-r--r--erts/emulator/beam/erl_process_lock.h92
-rw-r--r--erts/emulator/beam/erl_ptab.c19
-rw-r--r--erts/emulator/beam/erl_ptab.h102
-rw-r--r--erts/emulator/beam/erl_rbtree.h1740
-rw-r--r--erts/emulator/beam/erl_thr_progress.c5
-rw-r--r--erts/emulator/beam/erl_time.h252
-rw-r--r--erts/emulator/beam/erl_time_sup.c740
-rw-r--r--erts/emulator/beam/global.h6
-rw-r--r--erts/emulator/beam/io.c114
-rw-r--r--erts/emulator/beam/register.c9
-rw-r--r--erts/emulator/beam/sys.h16
-rw-r--r--erts/emulator/beam/time.c741
-rw-r--r--erts/emulator/beam/utils.c338
49 files changed, 6794 insertions, 2568 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 8fdcbb4058..5ec1409adf 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -68,6 +68,7 @@ atom aborted
atom abs_path
atom absoluteURI
atom ac
+atom accessor
atom active
atom all
atom all_but_first
@@ -94,12 +95,14 @@ atom args
atom arg0
atom arity
atom asn1
+atom async
atom asynchronous
atom atom
atom atom_used
atom attributes
atom await_port_send_result
atom await_proc_exit
+atom await_result
atom await_sched_wall_time_modifications
atom awaiting_load
atom awaiting_unload
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index df1983a83d..500a98195b 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -39,12 +39,9 @@ static void set_default_trace_pattern(Eterm module);
static Eterm check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp);
static void delete_code(Module* modp);
static void decrement_refc(BeamInstr* code);
-static int is_native(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
-
-
BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
{
Module* modp;
@@ -59,8 +56,8 @@ BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
return am_undefined;
}
erts_rlock_old_code(code_ix);
- res = ((modp->curr.code && is_native(modp->curr.code)) ||
- (modp->old.code != 0 && is_native(modp->old.code))) ?
+ res = (erts_is_module_native(modp->curr.code) ||
+ erts_is_module_native(modp->old.code)) ?
am_true : am_false;
erts_runlock_old_code(code_ix);
return res;
@@ -371,7 +368,7 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
ASSERT(commiter_state.stager == NULL);
commiter_state.stager = c_p;
erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &commiter_state.lop);
- erts_smp_proc_inc_refc(c_p);
+ erts_proc_inc_refc(c_p);
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
/*
* smp_code_ix_commiter() will do the rest "later"
@@ -398,7 +395,7 @@ static void smp_code_ix_commiter(void* null)
erts_resume(p, ERTS_PROC_LOCK_STATUS);
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_dec_refc(p);
+ erts_proc_dec_refc(p);
}
#endif /* ERTS_SMP */
@@ -1106,25 +1103,3 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
}
return NIL;
}
-
-static int
-is_native(BeamInstr* code)
-{
- Uint i, num_functions = code[MI_NUM_FUNCTIONS];
-
- /* Check NativeAdress of first real function in module
- */
- for (i=0; i<num_functions; i++) {
- BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
- Eterm name = (Eterm) func_info[3];
-
- if (is_atom(name)) {
- return func_info[1] != 0;
- }
- else ASSERT(is_nil(name)); /* ignore BIF stubs */
- }
- /* Not a single non-BIF function? */
- return 0;
-}
-
-
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 21faf8fbf2..a21622f424 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -2119,44 +2119,32 @@ void process_main(void)
}
GetArg1(1, timeout_value);
if (timeout_value != make_small(0)) {
-#if !defined(ARCH_64) || HALFWORD_HEAP
- Uint time_val;
-#endif
- if (is_small(timeout_value) && signed_val(timeout_value) > 0 &&
-#if defined(ARCH_64) && !HALFWORD_HEAP
- ((unsigned_val(timeout_value) >> 32) == 0)
-#else
- 1
-#endif
- ) {
- /*
- * The timer routiner will set c_p->i to the value in
- * c_p->def_arg_reg[0]. Note that it is safe to use this
- * location because there are no living x registers in
- * a receive statement.
- * Note that for the halfword emulator, the two first elements
- * of the array are used.
- */
- BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
- *pi = I+3;
- set_timer(c_p, unsigned_val(timeout_value));
- } else if (timeout_value == am_infinity) {
+ if (timeout_value == am_infinity)
c_p->flags |= F_TIMO;
-#if !defined(ARCH_64) || HALFWORD_HEAP
- } else if (term_to_Uint(timeout_value, &time_val)) {
- BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
- *pi = I+3;
- set_timer(c_p, time_val);
-#endif
- } else { /* Wrong time */
- OpCase(i_wait_error_locked): {
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- /* Fall through */
+ else {
+ int tres = erts_set_proc_timer_term(c_p, timeout_value);
+ if (tres == 0) {
+ /*
+ * The timer routiner will set c_p->i to the value in
+ * c_p->def_arg_reg[0]. Note that it is safe to use this
+ * location because there are no living x registers in
+ * a receive statement.
+ * Note that for the halfword emulator, the two first elements
+ * of the array are used.
+ */
+ BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg;
+ *pi = I+3;
}
+ else { /* Wrong time */
+ OpCase(i_wait_error_locked): {
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ /* Fall through */
+ }
OpCase(i_wait_error): {
- c_p->freason = EXC_TIMEOUT_VALUE;
- goto find_func_info;
+ c_p->freason = EXC_TIMEOUT_VALUE;
+ goto find_func_info;
+ }
}
}
@@ -2205,7 +2193,7 @@ void process_main(void)
if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) {
BeamInstr** p = (BeamInstr **) c_p->def_arg_reg;
*p = I+3;
- set_timer(c_p, Arg(1));
+ erts_set_proc_timer_uword(c_p, Arg(1));
}
goto wait2;
}
@@ -5589,18 +5577,35 @@ next_catch(Process* c_p, Eterm *reg) {
static void
terminate_proc(Process* c_p, Eterm Value)
{
+ Eterm *hp;
+ Eterm Args = NIL;
+
/* Add a stacktrace if this is an error. */
if (GET_EXC_CLASS(c_p->freason) == EXTAG_ERROR) {
Value = add_stacktrace(c_p, Value, c_p->ftrace);
}
/* EXF_LOG is a primary exception flag */
if (c_p->freason & EXF_LOG) {
+ int alive = erts_is_alive;
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Error in process %T ", c_p->common.id);
- if (erts_is_alive)
- erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
- erts_dsprintf(dsbufp,"with exit value: %0.*T\n", display_items, Value);
- erts_send_error_to_logger(c_p->group_leader, dsbufp);
+
+ /* Build the format message */
+ erts_dsprintf(dsbufp, "Error in process ~p ");
+ if (alive)
+ erts_dsprintf(dsbufp, "on node ~p ");
+ erts_dsprintf(dsbufp, "with exit value:~n~p~n");
+
+ /* Build the args in reverse order */
+ hp = HAlloc(c_p, 2);
+ Args = CONS(hp, Value, Args);
+ if (alive) {
+ hp = HAlloc(c_p, 2);
+ Args = CONS(hp, erts_this_node->sysname, Args);
+ }
+ hp = HAlloc(c_p, 2);
+ Args = CONS(hp, c_p->common.id, Args);
+
+ erts_send_error_term_to_logger(c_p->group_leader, dsbufp, Args);
}
/*
* If we use a shared heap, the process will be garbage-collected.
@@ -5946,7 +5951,7 @@ build_stacktrace(Process* c_p, Eterm exc) {
* (e.g. spawn_link(erlang, abs, [1])).
*/
if (fi.current == NULL) {
- erts_set_current_function(&fi, c_p->initial);
+ erts_set_current_function(&fi, c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 282aa71109..0d40201934 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -530,6 +530,7 @@ static Eterm functions_in_module(Process* p, Eterm mod);
static Eterm attributes_for_module(Process* p, Eterm mod);
static Eterm compilation_info_for_module(Process* p, Eterm mod);
static Eterm md5_of_module(Process* p, Eterm mod);
+static Eterm has_native(Process* p, Eterm mod);
static Eterm native_addresses(Process* p, Eterm mod);
int patch_funentries(Eterm Patchlist);
int patch(Eterm Addresses, Uint fe);
@@ -5408,6 +5409,9 @@ erts_module_info_0(Process* p, Eterm module)
list = CONS(hp, tup, list)
BUILD_INFO(am_md5);
+#ifdef HIPE
+ BUILD_INFO(am_native);
+#endif
BUILD_INFO(am_compile);
BUILD_INFO(am_attributes);
BUILD_INFO(am_exports);
@@ -5433,6 +5437,8 @@ erts_module_info_1(Process* p, Eterm module, Eterm what)
return compilation_info_for_module(p, module);
} else if (what == am_native_addresses) {
return native_addresses(p, module);
+ } else if (what == am_native) {
+ return has_native(p, module);
}
return THE_NON_VALUE;
}
@@ -5493,6 +5499,53 @@ functions_in_module(Process* p, /* Process whose heap to use. */
}
/*
+ * Returns 'true' if mod has any native compiled functions, otherwise 'false'
+ */
+
+static Eterm
+has_native(Process* p, Eterm mod)
+{
+ Eterm result = am_false;
+#ifdef HIPE
+ Module* modp;
+
+ if (is_not_atom(mod)) {
+ return THE_NON_VALUE;
+ }
+
+ modp = erts_get_module(mod, erts_active_code_ix());
+ if (modp == NULL) {
+ return THE_NON_VALUE;
+ }
+
+ if (erts_is_module_native(modp->curr.code)) {
+ result = am_true;
+ }
+#endif
+ return result;
+}
+
+int
+erts_is_module_native(BeamInstr* code)
+{
+ Uint i, num_functions;
+
+ /* Check NativeAdress of first real function in module */
+ if (code != NULL) {
+ num_functions = code[MI_NUM_FUNCTIONS];
+ for (i=0; i<num_functions; i++) {
+ BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
+ Eterm name = (Eterm) func_info[3];
+ if (is_atom(name)) {
+ return func_info[1] != 0;
+ }
+ else ASSERT(is_nil(name)); /* ignore BIF stubs */
+ }
+ }
+ return 0;
+}
+
+/*
* Builds a list of all functions including native addresses.
* [{Name,Arity,NativeAddress},...]
*
@@ -5695,7 +5748,11 @@ md5_of_module(Process* p, /* Process whose heap to use. */
return THE_NON_VALUE;
}
code = modp->curr.code;
- res = new_binary(p, (byte *) code[MI_MD5_PTR], MD5_SIZE);
+ if (code[MI_MD5_PTR] != 0) {
+ res = new_binary(p, (byte *) code[MI_MD5_PTR], MD5_SIZE);
+ } else {
+ res = am_undefined;
+ }
return res;
}
@@ -6168,6 +6225,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
LoaderState* stp;
BeamInstr Funcs;
BeamInstr Patchlist;
+ Eterm MD5Bin;
Eterm* tp;
BeamInstr* code = NULL;
BeamInstr* ptrs;
@@ -6196,12 +6254,15 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
goto error;
}
tp = tuple_val(Info);
- if (tp[0] != make_arityval(2)) {
+ if (tp[0] != make_arityval(3)) {
goto error;
}
Funcs = tp[1];
- Patchlist = tp[2];
-
+ Patchlist = tp[2];
+ MD5Bin = tp[3];
+ if (is_not_binary(MD5Bin) || (binary_size(MD5Bin) != MD5_SIZE)) {
+ goto error;
+ }
if ((n = erts_list_length(Funcs)) < 0) {
goto error;
}
@@ -6251,6 +6312,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
code_size = ((WORDS_PER_FUNCTION+1)*n + MI_FUNCTIONS + 2) * sizeof(BeamInstr);
code_size += stp->chunks[ATTR_CHUNK].size;
code_size += stp->chunks[COMPILE_CHUNK].size;
+ code_size += MD5_SIZE;
code = erts_alloc_fnf(ERTS_ALC_T_CODE, code_size);
if (!code) {
goto error;
@@ -6357,6 +6419,15 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
if (info == NULL) {
goto error;
}
+ {
+ byte *tmp = NULL;
+ byte *md5 = NULL;
+ if ((md5 = erts_get_aligned_binary_bytes(MD5Bin, &tmp)) != NULL) {
+ sys_memcpy(info, md5, MD5_SIZE);
+ code[MI_MD5_PTR] = (BeamInstr) info;
+ }
+ erts_free_aligned_binary_bytes(tmp);
+ }
/*
* Insert the module in the module table.
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 0e3ca0bdb0..46b0c60ab0 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -23,10 +23,10 @@
#include "beam_opcodes.h"
#include "erl_process.h"
+int erts_is_module_native(BeamInstr* code);
Eterm beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks,
Eterm module);
-
typedef struct gen_op_entry {
char* name;
int arity;
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 4f2958c664..2b782f4484 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -44,6 +44,7 @@
#include "erl_bits.h"
#include "erl_bif_unique.h"
+Export *erts_await_result;
static Export* flush_monitor_messages_trap = NULL;
static Export* set_cpu_topology_trap = NULL;
static Export* await_proc_exit_trap = NULL;
@@ -831,26 +832,6 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
return ret;
}
-BIF_RETTYPE erts_internal_monitor_process_2(BIF_ALIST_2)
-{
- if (is_not_internal_pid(BIF_ARG_1)) {
- if (is_external_pid(BIF_ARG_1)
- && (external_pid_dist_entry(BIF_ARG_1)
- == erts_this_dist_entry)) {
- BIF_RET(am_false);
- }
- goto badarg;
- }
-
- if (is_not_internal_ref(BIF_ARG_2))
- goto badarg;
-
- BIF_RET(local_pid_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, 1));
-
-badarg:
- BIF_ERROR(BIF_P, BADARG);
-}
-
/**********************************************************************/
/* this is a combination of the spawn and link BIFs */
@@ -4904,6 +4885,10 @@ void erts_init_bif(void)
#endif
, &bif_return_trap);
+ erts_await_result = erts_export_put(am_erts_internal,
+ am_await_result,
+ 1);
+
erts_init_trap_export(&dsend_continue_trap_export,
am_erts_internal, am_dsend_continue_trap, 1,
dsend_continue_trap_1);
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index d461c3f479..b877711544 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -20,6 +20,7 @@
#ifndef __BIF_H__
#define __BIF_H__
+extern Export *erts_await_result;
extern Export* erts_format_cpu_topology_trap;
extern Export *erts_convert_time_unit_trap;
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 471f687101..eadba3eaff 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -171,11 +171,6 @@ bif erts_internal:map_hashmap_children/1
bif erts_internal:time_unit/0
-bif erts_internal:get_bif_timer_servers/0
-bif erts_internal:create_bif_timer/0
-bif erts_internal:access_bif_timer/1
-
-bif erts_internal:monitor_process/2
bif erts_internal:is_system_process/1
# inet_db support
@@ -220,6 +215,15 @@ bif math:sqrt/1
bif math:atan2/2
bif math:pow/2
+bif erlang:start_timer/3
+bif erlang:start_timer/4
+bif erlang:send_after/3
+bif erlang:send_after/4
+bif erlang:cancel_timer/1
+bif erlang:cancel_timer/2
+bif erlang:read_timer/1
+bif erlang:read_timer/2
+
bif erlang:make_tuple/2
bif erlang:append_element/2
bif erlang:make_tuple/3
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index e2fa572546..02e65cb9c6 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -36,7 +36,7 @@
#include "atom.h"
#include "beam_load.h"
#include "erl_instrument.h"
-#include "erl_bif_timer.h"
+#include "erl_hl_timer.h"
#include "erl_thr_progress.h"
/* Forward declarations -- should really appear somewhere else */
@@ -108,7 +108,7 @@ process_killer(void)
case 'k': {
ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
erts_aint32_t state;
- erts_smp_proc_inc_refc(rp);
+ erts_proc_inc_refc(rp);
erts_smp_proc_lock(rp, rp_locks);
state = erts_smp_atomic32_read_acqb(&rp->state);
if (state & (ERTS_PSFLG_FREE
@@ -131,7 +131,7 @@ process_killer(void)
0);
}
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
case 'n': br = 1; break;
case 'r': return;
@@ -227,9 +227,9 @@ print_process_info(int to, void *to_arg, Process *p)
* Display the initial function name
*/
erts_print(to, to_arg, "Spawned as: %T:%T/%bpu\n",
- p->initial[INITIAL_MOD],
- p->initial[INITIAL_FUN],
- p->initial[INITIAL_ARI]);
+ p->u.initial[INITIAL_MOD],
+ p->u.initial[INITIAL_FUN],
+ p->u.initial[INITIAL_ARI]);
if (p->current != NULL) {
if (running) {
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index 4344558348..d925709bd0 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -130,7 +130,7 @@ int erts_try_seize_code_write_permission(Process* c_p)
ASSERT(code_writing_process != c_p);
qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem));
qitem->p = c_p;
- erts_smp_proc_inc_refc(c_p);
+ erts_proc_inc_refc(c_p);
qitem->next = code_write_queue;
code_write_queue = qitem;
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
@@ -151,7 +151,7 @@ void erts_release_code_write_permission(void)
}
erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
code_write_queue = qitem->next;
- erts_smp_proc_dec_refc(qitem->p);
+ erts_proc_dec_refc(qitem->p);
erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
}
code_writing_process = NULL;
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 4d12dae787..850606dd86 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -608,11 +608,6 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
erts_refc_inc(&funp->fe->refc, 2);
}
goto off_heap_common;
-
- case MAP_SUBTAG:
- *hp++ = *tp++;
- sz--;
- break;
case EXTERNAL_PID_SUBTAG:
case EXTERNAL_PORT_SUBTAG:
case EXTERNAL_REF_SUBTAG:
@@ -648,7 +643,6 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap)
}
}
*hpp = hp;
-
return res;
}
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index e396395dde..dcae5509ec 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -39,7 +39,7 @@
#include "erl_instrument.h"
#include "erl_mseg.h"
#include "erl_monitors.h"
-#include "erl_bif_timer.h"
+#include "erl_hl_timer.h"
#include "erl_cpu_topology.h"
#include "erl_thr_queue.h"
#if defined(ERTS_ALC_T_DRV_SEL_D_STATE) || defined(ERTS_ALC_T_DRV_EV_D_STATE)
@@ -575,6 +575,15 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
= sizeof(ErtsThrQElement_t);
#endif
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)]
+ = erts_timer_type_size(ERTS_ALC_T_LL_PTIMER);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)]
+ = erts_timer_type_size(ERTS_ALC_T_HL_PTIMER);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_BIF_TIMER)]
+ = erts_timer_type_size(ERTS_ALC_T_BIF_TIMER);
+ fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_ABIF_TIMER)]
+ = erts_timer_type_size(ERTS_ALC_T_ABIF_TIMER);
+
#ifdef HARD_DEBUG
hdbg_init();
#endif
@@ -2322,6 +2331,22 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
&size.processes_used,
fi,
ERTS_ALC_T_MSG_REF);
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_LL_PTIMER);
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_HL_PTIMER);
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_BIF_TIMER);
+ add_fix_values(&size.processes,
+ &size.processes_used,
+ fi,
+ ERTS_ALC_T_ABIF_TIMER);
}
if (want.atom || want.atom_used) {
@@ -3186,7 +3211,7 @@ reply_alloc_info(void *vair)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0)
aireq_free(air);
@@ -3260,7 +3285,7 @@ erts_request_alloc_info(struct process *c_p,
erts_smp_atomic32_init_nob(&air->refc,
(erts_aint32_t) erts_no_schedulers);
- erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+ erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 074f864dee..57c506458c 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -163,9 +163,13 @@ type MSG_ROOTS TEMPORARY PROCESSES msg_roots
type ROOTSET TEMPORARY PROCESSES root_set
type LOADER_TMP TEMPORARY CODE loader_tmp
type PREPARED_CODE SHORT_LIVED CODE prepared_code
-type BIF_TIMER_TABLE LONG_LIVED SYSTEM bif_timer_table
-type SL_BIF_TIMER SHORT_LIVED PROCESSES bif_timer_sl
-type LL_BIF_TIMER STANDARD PROCESSES bif_timer_ll
+type TIMER_SERVICE LONG_LIVED SYSTEM timer_service
+type LL_PTIMER FIXED_SIZE PROCESSES ll_ptimer
+type HL_PTIMER FIXED_SIZE PROCESSES hl_ptimer
+type BIF_TIMER FIXED_SIZE PROCESSES bif_timer
+type ABIF_TIMER FIXED_SIZE PROCESSES accessor_bif_timer
+type TIMER_REQUEST SHORT_LIVED PROCESSES timer_request
+type BTM_YIELD_STATE SHORT_LIVED PROCESSES btm_yield_state
type REG_TABLE STANDARD SYSTEM reg_tab
type FUN_TABLE STANDARD CODE fun_tab
type DIST_TABLE STANDARD SYSTEM dist_tab
@@ -324,8 +328,6 @@ type ACTIVE_PROCS STANDARD PROCESSES active_procs
+endif
+if smp
-type SL_PTIMER SHORT_LIVED SYSTEM ptimer_sl
-type LL_PTIMER STANDARD SYSTEM ptimer_ll
type SYS_MSG_Q SHORT_LIVED PROCESSES system_messages_queue
type FP_EXCEPTION LONG_LIVED SYSTEM fp_exception
type LL_MPATHS LONG_LIVED SYSTEM ll_migration_paths
@@ -365,7 +367,6 @@ type AINFO_REQ STANDARD_LOW SYSTEM alloc_info_request
type SCHED_WTIME_REQ STANDARD_LOW SYSTEM sched_wall_time_request
type GC_INFO_REQ STANDARD_LOW SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD_LOW SYSTEM port_data_heap
-type BIF_TIMER_DATA LONG_LIVED_LOW SYSTEM bif_timer_data
+else # "fullword"
@@ -386,7 +387,6 @@ type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
-type BIF_TIMER_DATA LONG_LIVED SYSTEM bif_timer_data
+endif
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index fa7de23f00..f74aea80a7 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1054,9 +1054,9 @@ process_info_aux(Process *BIF_P,
case am_initial_call:
hp = HAlloc(BIF_P, 3+4);
res = TUPLE3(hp,
- rp->initial[INITIAL_MOD],
- rp->initial[INITIAL_FUN],
- make_small(rp->initial[INITIAL_ARI]));
+ rp->u.initial[INITIAL_MOD],
+ rp->u.initial[INITIAL_FUN],
+ make_small(rp->u.initial[INITIAL_ARI]));
hp += 4;
break;
@@ -2129,6 +2129,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_has_time_correction() ? am_true : am_false);
} else if (ERTS_IS_ATOM_STR("start_time", BIF_ARG_1)) {
BIF_RET(erts_get_monotonic_start_time(BIF_P));
+ } else if (ERTS_IS_ATOM_STR("end_time", BIF_ARG_1)) {
+ BIF_RET(erts_get_monotonic_end_time(BIF_P));
} else if (ERTS_IS_ATOM_STR("time_warp_mode", BIF_ARG_1)) {
switch (erts_time_warp_mode()) {
case ERTS_NO_TIME_WARP_MODE: {
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
deleted file mode 100644
index ac4a5644ac..0000000000
--- a/erts/emulator/beam/erl_bif_timer.c
+++ /dev/null
@@ -1,849 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2012. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-
-#include "erl_bif_timer.h"
-#include "global.h"
-#include "bif.h"
-#include "error.h"
-#include "big.h"
-#include "erl_thr_progress.h"
-#include "erl_bif_unique.h"
-
-/****************************************************************************
-** BIF Timer support
-****************************************************************************/
-
-#define BTM_FLG_SL_TIMER (((Uint32) 1) << 0)
-#define BTM_FLG_CANCELED (((Uint32) 1) << 1)
-#define BTM_FLG_HEAD (((Uint32) 1) << 2)
-#define BTM_FLG_BYNAME (((Uint32) 1) << 3)
-#define BTM_FLG_WRAP (((Uint32) 1) << 4)
-
-struct ErtsBifTimer_ {
- struct {
- union {
- ErtsBifTimer **head;
- ErtsBifTimer *prev;
- } u;
- ErtsBifTimer *next;
- } tab;
- union {
- Eterm name;
- struct {
- ErtsBifTimer *prev;
- ErtsBifTimer *next;
- Process *ess;
- } proc;
- } receiver;
- ErlTimer tm;
- ErlHeapFragment* bp;
- Uint32 flags;
- Eterm message;
- Uint32 ref_numbers[ERTS_REF_NUMBERS];
-};
-
-#ifdef SMALL_MEMORY
-#define TIMER_HASH_VEC_SZ 3331
-#define BTM_PREALC_SZ 10
-#else
-#define TIMER_HASH_VEC_SZ 10007
-#define BTM_PREALC_SZ 100
-#endif
-static ErtsBifTimer **bif_timer_tab;
-static Uint no_bif_timers;
-
-
-static erts_smp_rwmtx_t bif_timer_lock;
-
-#define erts_smp_safe_btm_rwlock(P, L) \
- safe_btm_lock((P), (L), 1)
-#define erts_smp_safe_btm_rlock(P, L) \
- safe_btm_lock((P), (L), 0)
-#define erts_smp_btm_rwlock() \
- erts_smp_rwmtx_rwlock(&bif_timer_lock)
-#define erts_smp_btm_tryrwlock() \
- erts_smp_rwmtx_tryrwlock(&bif_timer_lock)
-#define erts_smp_btm_rwunlock() \
- erts_smp_rwmtx_rwunlock(&bif_timer_lock)
-#define erts_smp_btm_rlock() \
- erts_smp_rwmtx_rlock(&bif_timer_lock)
-#define erts_smp_btm_tryrlock() \
- erts_smp_rwmtx_tryrlock(&bif_timer_lock)
-#define erts_smp_btm_runlock() \
- erts_smp_rwmtx_runlock(&bif_timer_lock)
-#define erts_smp_btm_lock_init() \
- erts_smp_rwmtx_init(&bif_timer_lock, "bif_timers")
-
-
-static ERTS_INLINE int
-safe_btm_lock(Process *c_p, ErtsProcLocks c_p_locks, int rw_lock)
-{
- ASSERT(c_p && c_p_locks);
-#ifdef ERTS_SMP
- if ((rw_lock ? erts_smp_btm_tryrwlock() : erts_smp_btm_tryrlock()) != EBUSY)
- return 0;
- erts_smp_proc_unlock(c_p, c_p_locks);
- if (rw_lock)
- erts_smp_btm_rwlock();
- else
- erts_smp_btm_rlock();
- erts_smp_proc_lock(c_p, c_p_locks);
- if (ERTS_PROC_IS_EXITING(c_p)) {
- if (rw_lock)
- erts_smp_btm_rwunlock();
- else
- erts_smp_btm_runlock();
- return 1;
- }
-#endif
- return 0;
-}
-
-ERTS_SCHED_PREF_PALLOC_IMPL(btm_pre, ErtsBifTimer, BTM_PREALC_SZ)
-
-static ERTS_INLINE int
-get_index(Uint32 *ref_numbers, Uint32 len)
-{
- Uint32 hash;
- /* len can potentially be larger than ERTS_REF_NUMBERS
- if it has visited another node... */
- if (len > ERTS_REF_NUMBERS)
- len = ERTS_REF_NUMBERS;
-
-#if ERTS_REF_NUMBERS != 3
-#error "ERTS_REF_NUMBERS changed. Update me..."
-#endif
- switch (len) {
- case 3: if (!ref_numbers[2]) len = 2;
- case 2: if (!ref_numbers[1]) len = 1;
- default: break;
- }
-
- ASSERT(1 <= len && len <= ERTS_REF_NUMBERS);
-
- hash = block_hash((byte *) ref_numbers, len * sizeof(Uint32), 0x08d12e65);
- return (int) (hash % ((Uint32) TIMER_HASH_VEC_SZ));
-}
-
-static Eterm
-create_ref(Uint *hp, Uint32 *ref_numbers, Uint32 len)
-{
- Uint32 *datap;
- int i;
-
-
- if (len > ERTS_MAX_REF_NUMBERS) {
- /* Such large refs should no be able to appear in the emulator */
- erl_exit(1, "%s:%d: Internal error\n", __FILE__, __LINE__);
- }
-
-#if defined(ARCH_64) && !HALFWORD_HEAP
- hp[0] = make_ref_thing_header(len/2 + 1);
- datap = (Uint32 *) &hp[1];
- *(datap++) = len;
-#else
- hp[0] = make_ref_thing_header(len);
- datap = (Uint32 *) &hp[1];
-#endif
-
- for (i = 0; i < len; i++)
- datap[i] = ref_numbers[i];
-
- return make_internal_ref(hp);
-}
-
-static int
-eq_non_standard_ref_numbers(Uint32 *rn1, Uint32 len1, Uint32 *rn2, Uint32 len2)
-{
-#if defined(ARCH_64) && !HALFWORD_HEAP
-#define MAX_REF_HEAP_SZ (1+(ERTS_MAX_REF_NUMBERS/2+1))
-#else
-#define MAX_REF_HEAP_SZ (1+ERTS_MAX_REF_NUMBERS)
-#endif
- DeclareTmpHeapNoproc(r1_hp,(MAX_REF_HEAP_SZ * 2));
- Eterm *r2_hp = r1_hp +MAX_REF_HEAP_SZ;
-
- return eq(create_ref(r1_hp, rn1, len1), create_ref(r2_hp, rn2, len2));
-#undef MAX_REF_HEAP_SZ
-}
-
-static ERTS_INLINE int
-eq_ref_numbers(Uint32 *rn1, Uint32 len1, Uint32 *rn2, Uint32 len2)
-{
- int res;
- if (len1 != ERTS_REF_NUMBERS || len2 != ERTS_REF_NUMBERS) {
- /* Can potentially happen, but will never... */
- return eq_non_standard_ref_numbers(rn1, len1, rn2, len2);
- }
-
-#if ERTS_REF_NUMBERS != 3
-#error "ERTS_REF_NUMBERS changed. Update me..."
-#endif
- res = rn1[0] == rn2[0] && rn1[1] == rn2[1] && rn1[2] == rn2[2];
-
- ASSERT(res
- ? eq_non_standard_ref_numbers(rn1, len1, rn2, len2)
- : !eq_non_standard_ref_numbers(rn1, len1, rn2, len2));
-
- return res;
-}
-
-static ERTS_INLINE ErtsBifTimer *
-tab_find(Eterm ref)
-{
- Uint32 *ref_numbers = internal_ref_numbers(ref);
- Uint32 ref_numbers_len = internal_ref_no_of_numbers(ref);
- int ix = get_index(ref_numbers, ref_numbers_len);
- ErtsBifTimer* btm;
-
- for (btm = bif_timer_tab[ix]; btm; btm = btm->tab.next)
- if (eq_ref_numbers(ref_numbers, ref_numbers_len,
- btm->ref_numbers, ERTS_REF_NUMBERS))
- return btm;
- return NULL;
-}
-
-static ERTS_INLINE void
-tab_remove(ErtsBifTimer* btm)
-{
- if (btm->flags & BTM_FLG_HEAD) {
- *btm->tab.u.head = btm->tab.next;
- if (btm->tab.next) {
- btm->tab.next->flags |= BTM_FLG_HEAD;
- btm->tab.next->tab.u.head = btm->tab.u.head;
- }
- }
- else {
- btm->tab.u.prev->tab.next = btm->tab.next;
- if (btm->tab.next)
- btm->tab.next->tab.u.prev = btm->tab.u.prev;
- }
- btm->flags |= BTM_FLG_CANCELED;
- ASSERT(no_bif_timers > 0);
- no_bif_timers--;
-}
-
-static ERTS_INLINE void
-tab_insert(ErtsBifTimer* btm)
-{
- int ix = get_index(btm->ref_numbers, ERTS_REF_NUMBERS);
- ErtsBifTimer* btm_list = bif_timer_tab[ix];
-
- if (btm_list) {
- btm_list->flags &= ~BTM_FLG_HEAD;
- btm_list->tab.u.prev = btm;
- }
-
- btm->flags |= BTM_FLG_HEAD;
- btm->tab.u.head = &bif_timer_tab[ix];
- btm->tab.next = btm_list;
- bif_timer_tab[ix] = btm;
- no_bif_timers++;
-}
-
-static ERTS_INLINE void
-link_proc(Process *p, ErtsBifTimer* btm)
-{
- btm->receiver.proc.ess = p;
- btm->receiver.proc.prev = NULL;
- btm->receiver.proc.next = p->u.bif_timers;
- if (p->u.bif_timers)
- p->u.bif_timers->receiver.proc.prev = btm;
- p->u.bif_timers = btm;
-}
-
-static ERTS_INLINE void
-unlink_proc(ErtsBifTimer* btm)
-{
- if (btm->receiver.proc.prev)
- btm->receiver.proc.prev->receiver.proc.next = btm->receiver.proc.next;
- else
- btm->receiver.proc.ess->u.bif_timers = btm->receiver.proc.next;
- if (btm->receiver.proc.next)
- btm->receiver.proc.next->receiver.proc.prev = btm->receiver.proc.prev;
-}
-
-static void
-bif_timer_cleanup(ErtsBifTimer* btm)
-{
- ASSERT(btm);
-
- if (btm->bp)
- free_message_buffer(btm->bp);
-
- if (!btm_pre_free(btm)) {
- if (btm->flags & BTM_FLG_SL_TIMER)
- erts_free(ERTS_ALC_T_SL_BIF_TIMER, (void *) btm);
- else
- erts_free(ERTS_ALC_T_LL_BIF_TIMER, (void *) btm);
- }
-}
-
-static void
-bif_timer_timeout(ErtsBifTimer* btm)
-{
- ASSERT(btm);
-
-
- erts_smp_btm_rwlock();
-
- if (btm->flags & BTM_FLG_CANCELED) {
- /*
- * A concurrent cancel is ongoing. Do not send the timeout message,
- * but cleanup here since the cancel call-back won't be called.
- */
-#ifndef ERTS_SMP
- ASSERT(0);
-#endif
- }
- else {
- ErtsProcLocks rp_locks = 0;
- Process* rp;
-
- tab_remove(btm);
-
- ASSERT(!erts_get_current_process());
-
- if (btm->flags & BTM_FLG_BYNAME)
- rp = erts_whereis_process(NULL, 0, btm->receiver.name, 0, 0);
- else {
- rp = btm->receiver.proc.ess;
- unlink_proc(btm);
- }
-
- if (rp) {
- Eterm message;
- ErlHeapFragment *bp;
-
- bp = btm->bp;
- btm->bp = NULL; /* Prevent cleanup of message buffer... */
-
- if (!(btm->flags & BTM_FLG_WRAP))
- message = btm->message;
- else {
-#if ERTS_REF_NUMBERS != 3
-#error "ERTS_REF_NUMBERS changed. Update me..."
-#endif
- Eterm ref;
- Uint *hp;
- Uint wrap_size = REF_THING_SIZE + 4;
- message = btm->message;
-
- if (!bp) {
- ErlOffHeap *ohp;
- ASSERT(is_immed(message));
- hp = erts_alloc_message_heap(wrap_size,
- &bp,
- &ohp,
- rp,
- &rp_locks);
- } else {
- Eterm old_size = bp->used_size;
- bp = erts_resize_message_buffer(bp, old_size + wrap_size,
- &message, 1);
- hp = &bp->mem[0] + old_size;
- }
-
- write_ref_thing(hp,
- btm->ref_numbers[0],
- btm->ref_numbers[1],
- btm->ref_numbers[2]);
- ref = make_internal_ref(hp);
- hp += REF_THING_SIZE;
- message = TUPLE3(hp, am_timeout, ref, message);
- }
-
- erts_queue_message(rp, &rp_locks, bp, message, NIL);
- erts_smp_proc_unlock(rp, rp_locks);
- }
- }
-
- erts_smp_btm_rwunlock();
-
- bif_timer_cleanup(btm);
-}
-
-static Eterm
-setup_bif_timer(Uint32 xflags,
- Process *c_p,
- Eterm time,
- Eterm receiver,
- Eterm message)
-{
- Process *rp;
- ErtsBifTimer* btm;
- Uint timeout;
- Eterm ref;
- Uint32 *ref_numbers;
-
- if (!term_to_Uint(time, &timeout))
- return THE_NON_VALUE;
-#if defined(ARCH_64) && !HALFWORD_HEAP
- if ((timeout >> 32) != 0)
- return THE_NON_VALUE;
-#endif
- if (is_not_internal_pid(receiver) && is_not_atom(receiver))
- return THE_NON_VALUE;
-
- ref = erts_make_ref(c_p);
-
- if (is_atom(receiver))
- rp = NULL;
- else {
- rp = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
- receiver, ERTS_PROC_LOCK_MSGQ);
- if (!rp)
- return ref;
- }
-
- if (timeout < ERTS_ALC_MIN_LONG_LIVED_TIME) {
- if (timeout < 1000) {
- btm = btm_pre_alloc();
- if (!btm)
- goto sl_timer_alloc;
- btm->flags = 0;
- }
- else {
- sl_timer_alloc:
- btm = (ErtsBifTimer *) erts_alloc(ERTS_ALC_T_SL_BIF_TIMER,
- sizeof(ErtsBifTimer));
- btm->flags = BTM_FLG_SL_TIMER;
- }
- }
- else {
- btm = (ErtsBifTimer *) erts_alloc(ERTS_ALC_T_LL_BIF_TIMER,
- sizeof(ErtsBifTimer));
- btm->flags = 0;
- }
-
- if (rp) {
- link_proc(rp, btm);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
- }
- else {
- ASSERT(is_atom(receiver));
- btm->receiver.name = receiver;
- btm->flags |= BTM_FLG_BYNAME;
- }
-
- btm->flags |= xflags;
-
- ref_numbers = internal_ref_numbers(ref);
- ASSERT(internal_ref_no_of_numbers(ref) == 3);
-#if ERTS_REF_NUMBERS != 3
-#error "ERTS_REF_NUMBERS changed. Update me..."
-#endif
- btm->ref_numbers[0] = ref_numbers[0];
- btm->ref_numbers[1] = ref_numbers[1];
- btm->ref_numbers[2] = ref_numbers[2];
-
- ASSERT(eq_ref_numbers(btm->ref_numbers, ERTS_REF_NUMBERS,
- ref_numbers, ERTS_REF_NUMBERS));
-
- if (is_immed(message)) {
- btm->bp = NULL;
- btm->message = message;
- }
- else {
- ErlHeapFragment* bp;
- Eterm* hp;
- Uint size;
-
- size = size_object(message);
- btm->bp = bp = new_message_buffer(size);
- hp = bp->mem;
- btm->message = copy_struct(message, size, &hp, &bp->off_heap);
- }
-
- tab_insert(btm);
- ASSERT(btm == tab_find(ref));
- erts_init_timer(&btm->tm);
- erts_set_timer(&btm->tm,
- (ErlTimeoutProc) bif_timer_timeout,
- (ErlCancelProc) bif_timer_cleanup,
- (void *) btm,
- timeout);
- return ref;
-}
-
-BIF_RETTYPE old_send_after_3(BIF_ALIST_3);
-/* send_after(Time, Pid, Message) -> Ref */
-BIF_RETTYPE old_send_after_3(BIF_ALIST_3)
-{
- Eterm res;
-
- if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
- ERTS_BIF_EXITED(BIF_P);
-
- res = setup_bif_timer(0, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
-
- erts_smp_btm_rwunlock();
-
- if (is_non_value(res)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- else {
- ASSERT(is_internal_ref(res));
- BIF_RET(res);
- }
-}
-
-BIF_RETTYPE old_start_timer_3(BIF_ALIST_3);
-/* start_timer(Time, Pid, Message) -> Ref */
-BIF_RETTYPE old_start_timer_3(BIF_ALIST_3)
-{
- Eterm res;
-
- if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
- ERTS_BIF_EXITED(BIF_P);
-
- res = setup_bif_timer(BTM_FLG_WRAP, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
-
- erts_smp_btm_rwunlock();
-
- if (is_non_value(res)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- else {
- ASSERT(is_internal_ref(res));
- BIF_RET(res);
- }
-}
-
-BIF_RETTYPE old_cancel_timer_1(BIF_ALIST_1);
-/* cancel_timer(Ref) -> false | RemainingTime */
-BIF_RETTYPE old_cancel_timer_1(BIF_ALIST_1)
-{
- Eterm res;
- ErtsBifTimer *btm;
-
- if (is_not_internal_ref(BIF_ARG_1)) {
- if (is_ref(BIF_ARG_1)) {
- BIF_RET(am_false);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (erts_smp_safe_btm_rwlock(BIF_P, ERTS_PROC_LOCK_MAIN))
- ERTS_BIF_EXITED(BIF_P);
-
- btm = tab_find(BIF_ARG_1);
- if (!btm || btm->flags & BTM_FLG_CANCELED) {
- erts_smp_btm_rwunlock();
- res = am_false;
- }
- else {
- Uint left = erts_time_left(&btm->tm);
- if (!(btm->flags & BTM_FLG_BYNAME)) {
- erts_smp_proc_lock(btm->receiver.proc.ess, ERTS_PROC_LOCK_MSGQ);
- unlink_proc(btm);
- erts_smp_proc_unlock(btm->receiver.proc.ess, ERTS_PROC_LOCK_MSGQ);
- }
- tab_remove(btm);
- ASSERT(!tab_find(BIF_ARG_1));
- erts_cancel_timer(&btm->tm);
- erts_smp_btm_rwunlock();
- res = erts_make_integer(left, BIF_P);
- }
-
- BIF_RET(res);
-}
-
-BIF_RETTYPE old_read_timer_1(BIF_ALIST_1);
-/* read_timer(Ref) -> false | RemainingTime */
-BIF_RETTYPE old_read_timer_1(BIF_ALIST_1)
-{
- Eterm res;
- ErtsBifTimer *btm;
-
- if (is_not_internal_ref(BIF_ARG_1)) {
- if (is_ref(BIF_ARG_1)) {
- BIF_RET(am_false);
- }
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (erts_smp_safe_btm_rlock(BIF_P, ERTS_PROC_LOCK_MAIN))
- ERTS_BIF_EXITED(BIF_P);
-
- btm = tab_find(BIF_ARG_1);
- if (!btm || btm->flags & BTM_FLG_CANCELED) {
- res = am_false;
- }
- else {
- Uint left = erts_time_left(&btm->tm);
- res = erts_make_integer(left, BIF_P);
- }
-
- erts_smp_btm_runlock();
-
- BIF_RET(res);
-}
-
-void
-erts_print_bif_timer_info(int to, void *to_arg)
-{
- int i;
- int lock = !ERTS_IS_CRASH_DUMPING;
-
- if (lock)
- erts_smp_btm_rlock();
-
- for (i = 0; i < TIMER_HASH_VEC_SZ; i++) {
- ErtsBifTimer *btm;
- for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
- Eterm receiver = (btm->flags & BTM_FLG_BYNAME
- ? btm->receiver.name
- : btm->receiver.proc.ess->common.id);
- erts_print(to, to_arg, "=timer:%T\n", receiver);
- erts_print(to, to_arg, "Message: %T\n", btm->message);
- erts_print(to, to_arg, "Time left: %u\n",
- erts_time_left(&btm->tm));
- }
- }
-
- if (lock)
- erts_smp_btm_runlock();
-}
-
-
-void
-erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
-{
- ErtsBifTimer *btm;
-
- if (erts_smp_btm_tryrwlock() == EBUSY) {
- erts_smp_proc_unlock(p, plocks);
- erts_smp_btm_rwlock();
- erts_smp_proc_lock(p, plocks);
- }
-
- btm = p->u.bif_timers;
- while (btm) {
- ErtsBifTimer *tmp_btm;
- ASSERT(!(btm->flags & BTM_FLG_CANCELED));
- tab_remove(btm);
- tmp_btm = btm;
- btm = btm->receiver.proc.next;
- erts_cancel_timer(&tmp_btm->tm);
- }
-
- p->u.bif_timers = NULL;
-
- erts_smp_btm_rwunlock();
-}
-
-static void erts_old_bif_timer_init(void)
-{
- int i;
- no_bif_timers = 0;
- init_btm_pre_alloc();
- erts_smp_btm_lock_init();
- bif_timer_tab = erts_alloc(ERTS_ALC_T_BIF_TIMER_TABLE,
- sizeof(ErtsBifTimer *)*TIMER_HASH_VEC_SZ);
- for (i = 0; i < TIMER_HASH_VEC_SZ; ++i)
- bif_timer_tab[i] = NULL;
-}
-
-Uint
-erts_bif_timer_memory_size(void)
-{
- Uint res;
- int lock = !ERTS_IS_CRASH_DUMPING;
-
- if (lock)
- erts_smp_btm_rlock();
-
- res = (sizeof(ErtsBifTimer *)*TIMER_HASH_VEC_SZ
- + no_bif_timers*sizeof(ErtsBifTimer));
-
- if (lock)
- erts_smp_btm_runlock();
-
- return res;
-}
-
-
-void
-erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *),
- void *arg)
-{
- int i;
-
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-
- for (i = 0; i < TIMER_HASH_VEC_SZ; i++) {
- ErtsBifTimer *btm;
- for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
- (*func)((btm->flags & BTM_FLG_BYNAME
- ? btm->receiver.name
- : btm->receiver.proc.ess->common.id),
- btm->message,
- btm->bp,
- arg);
- }
- }
-}
-
-typedef struct {
- Uint ref_heap[REF_THING_SIZE];
- Eterm pid[1];
-} ErtsBifTimerServers;
-
-static ErtsBifTimerServers *bif_timer_servers;
-
-void erts_bif_timer_init(void)
-{
- erts_old_bif_timer_init();
-}
-
-void
-erts_bif_timer_start_servers(Eterm parent)
-{
- Process *parent_proc;
- Eterm *hp, btr_ref, arg_list_end;
- ErlSpawnOpts so;
- int i;
-
- bif_timer_servers = erts_alloc(ERTS_ALC_T_BIF_TIMER_DATA,
- (sizeof(ErtsBifTimerServers)
- + (sizeof(Eterm)*(erts_no_schedulers-1))));
-
- so.flags = SPO_USE_ARGS|SPO_SYSTEM_PROC|SPO_PREFER_SCHED|SPO_OFF_HEAP_MSGS;
- so.min_heap_size = H_MIN_SIZE;
- so.min_vheap_size = BIN_VH_MIN_SIZE;
- so.priority = PRIORITY_MAX;
- so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
-
- /*
- * Parent is "init" and schedulers have not yet been started, so it
- * *should* be alive and well...
- */
- ASSERT(is_internal_pid(parent));
- parent_proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
- internal_pid_index(parent));
- ASSERT(parent_proc);
- ASSERT(parent_proc->common.id == parent);
- ASSERT(!ERTS_PROC_IS_EXITING(parent_proc));
-
- erts_smp_proc_lock(parent_proc, ERTS_PROC_LOCK_MAIN);
-
- hp = HAlloc(parent_proc, 2*erts_no_schedulers + 2 + REF_THING_SIZE);
-
- btr_ref = erts_make_ref_in_buffer(hp);
- hp += REF_THING_SIZE;
-
- arg_list_end = CONS(hp, btr_ref, NIL);
- hp += 2;
-
- for (i = 0; i < erts_no_schedulers; i++) {
- int sched = i+1;
- Eterm arg_list = CONS(hp, make_small(i+1), arg_list_end);
- hp += 2;
-
- so.scheduler = sched; /* Preferred scheduler */
-
- bif_timer_servers->pid[i] = erl_create_process(parent_proc,
- am_erts_internal,
- am_bif_timer_server,
- arg_list,
- &so);
- }
-
- erts_smp_proc_unlock(parent_proc, ERTS_PROC_LOCK_MAIN);
-
- hp = internal_ref_val(btr_ref);
- for (i = 0; i < REF_THING_SIZE; i++)
- bif_timer_servers->ref_heap[i] = hp[i];
-}
-
-BIF_RETTYPE
-erts_internal_get_bif_timer_servers_0(BIF_ALIST_0)
-{
- int i;
- Eterm *hp, res = NIL;
-
- hp = HAlloc(BIF_P, erts_no_schedulers*2);
- for (i = erts_no_schedulers-1; i >= 0; i--) {
- res = CONS(hp, bif_timer_servers->pid[i], res);
- hp += 2;
- }
- BIF_RET(res);
-}
-
-BIF_RETTYPE
-erts_internal_access_bif_timer_1(BIF_ALIST_1)
-{
- int ix;
- Uint32 *rdp;
- Eterm ref, pid, *hp, res;
-
- if (is_not_internal_ref(BIF_ARG_1)) {
- if (is_not_ref(BIF_ARG_1))
- BIF_ERROR(BIF_P, BADARG);
- BIF_RET(am_undefined);
- }
-
- rdp = internal_ref_numbers(BIF_ARG_1);
- ix = (int) erts_get_ref_numbers_thr_id(rdp);
- if (ix < 1 || erts_no_schedulers < ix)
- BIF_RET(am_undefined);
-
- pid = bif_timer_servers->pid[ix-1];
- ASSERT(is_internal_pid(pid));
-
- hp = HAlloc(BIF_P, 3 /* 2-tuple */ + REF_THING_SIZE);
- for (ix = 0; ix < REF_THING_SIZE; ix++)
- hp[ix] = bif_timer_servers->ref_heap[ix];
- ref = make_internal_ref(&hp[0]);
- hp += REF_THING_SIZE;
-
- res = TUPLE2(hp, ref, pid);
- BIF_RET(res);
-}
-
-BIF_RETTYPE
-erts_internal_create_bif_timer_0(BIF_ALIST_0)
-{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(BIF_P);
- Eterm *hp, btr_ref, t_ref, pid, res;
- int ix;
-
- hp = HAlloc(BIF_P, 4 /* 3-tuple */ + 2*REF_THING_SIZE);
- for (ix = 0; ix < REF_THING_SIZE; ix++)
- hp[ix] = bif_timer_servers->ref_heap[ix];
- btr_ref = make_internal_ref(&hp[0]);
- hp += REF_THING_SIZE;
-
- t_ref = erts_sched_make_ref_in_buffer(esdp, hp);
- hp += REF_THING_SIZE;
-
- ASSERT(erts_get_ref_numbers_thr_id(internal_ref_numbers(t_ref))
- == (Uint32) esdp->no);
-
- pid = bif_timer_servers->pid[((int) esdp->no) - 1];
-
- res = TUPLE3(hp, btr_ref, pid, t_ref);
-
- BIF_RET(res);
-}
diff --git a/erts/emulator/beam/erl_bif_timer.h b/erts/emulator/beam/erl_bif_timer.h
deleted file mode 100644
index c2f5dfd3c3..0000000000
--- a/erts/emulator/beam/erl_bif_timer.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2005-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-
-#ifndef ERL_BIF_TIMER_H__
-#define ERL_BIF_TIMER_H__
-
-typedef struct ErtsBifTimer_ ErtsBifTimer;
-
-#include "sys.h"
-#include "erl_process.h"
-#include "erl_message.h"
-
-Uint erts_bif_timer_memory_size(void);
-void erts_print_bif_timer_info(int to, void *to_arg);
-void erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks);
-void erts_bif_timer_init(void);
-void erts_bif_timer_foreach(void (*func)(Eterm,Eterm,ErlHeapFragment *,void *),
- void *arg);
-void erts_bif_timer_start_servers(Eterm);
-#endif
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index ac57205c47..13e0160648 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -359,7 +359,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
ASSERT(finish_bp.stager == NULL);
finish_bp.stager = p;
erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop);
- erts_smp_proc_inc_refc(p);
+ erts_proc_inc_refc(p);
erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(p, make_small(matches));
}
@@ -393,7 +393,7 @@ static void smp_bp_finisher(void* null)
erts_resume(p, ERTS_PROC_LOCK_STATUS);
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_dec_refc(p);
+ erts_proc_dec_refc(p);
}
}
#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index fff892ae54..2e2cb98354 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -620,7 +620,7 @@ BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2)
erts_fprintf(stderr,
"ets:safe_fixtable(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, BIF_P->common.id,
- BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+ BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
#endif
kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC;
@@ -1247,7 +1247,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
erts_fprintf(stderr,
"ets:rename(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, BIF_P->common.id,
- BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+ BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
#endif
@@ -1563,7 +1563,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
erts_fprintf(stderr,
"ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id,
- BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+ BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
erts_fprintf(stderr, "ets: new: meta_pid_to_fixed_tab common.memory_size = %ld\n",
@@ -1696,7 +1696,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
erts_fprintf(stderr,
"ets:delete(%T); Process: %T, initial: %T:%T/%bpu\n",
BIF_ARG_1, BIF_P->common.id,
- BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
+ BIF_P->u.initial[0], BIF_P->u.initial[1], BIF_P->u.initial[2]);
#endif
CHECK_TABLES();
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index d90af46659..c7bccc78c3 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -2716,7 +2716,7 @@ static int key_given(DbTableTree *tb, Eterm pattern, TreeDbTerm **ret,
*ret = this;
return 1;
} else if (partly_bound != NULL && key != am_Underscore &&
- db_is_variable(key) < 0)
+ db_is_variable(key) < 0 && !db_has_map(key))
*partly_bound = key;
return 0;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 0fb1c397c9..c6c3c55a7e 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -2039,7 +2039,7 @@ restart:
break;
case matchKey:
t = (Eterm) *pc++;
- tp = erts_maps_get_rel(t, make_flatmap_rel(ep, base), base);
+ tp = erts_maps_get_rel(t, make_boxed_rel(ep, base), base);
if (!tp) {
FAIL();
}
@@ -3347,6 +3347,37 @@ int db_is_variable(Eterm obj)
return N;
}
+/* check if node is (or contains) a map
+ * return 1 if node contains a map
+ * return 0 otherwise
+ */
+
+int db_has_map(Eterm node) {
+ DECLARE_ESTACK(s);
+
+ ESTACK_PUSH(s,node);
+ while (!ESTACK_ISEMPTY(s)) {
+ node = ESTACK_POP(s);
+ if (is_list(node)) {
+ while (is_list(node)) {
+ ESTACK_PUSH(s,CAR(list_val(node)));
+ node = CDR(list_val(node));
+ }
+ ESTACK_PUSH(s,node); /* Non wellformed list or [] */
+ } else if (is_tuple(node)) {
+ Eterm *tuple = tuple_val(node);
+ int arity = arityval(*tuple);
+ while(arity--) {
+ ESTACK_PUSH(s,*(++tuple));
+ }
+ } else if is_map(node) {
+ DESTROY_ESTACK(s);
+ return 1;
+ }
+ }
+ DESTROY_ESTACK(s);
+ return 0;
+}
/* check if obj is (or contains) a variable */
/* return 1 if obj contains a variable or underscore */
@@ -3380,6 +3411,11 @@ int db_has_variable(Eterm node) {
while (size--) {
ESTACK_PUSH(s, *(values++));
}
+ } else if (is_map(node)) { /* other map-nodes or map-heads */
+ Eterm *ptr = hashmap_val(node);
+ int i = hashmap_bitcount(MAP_HEADER_VAL(*ptr));
+ ptr += MAP_HEADER_ARITY(*ptr);
+ while(i--) { ESTACK_PUSH(s, *++ptr); }
}
break;
case TAG_PRIMARY_IMMED1:
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index ca206c7f58..b2d5a306cb 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -342,6 +342,7 @@ void* db_store_term(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj);
void* db_store_term_comp(DbTableCommon *tb, DbTerm* old, Uint offset, Eterm obj);
Eterm db_copy_element_from_ets(DbTableCommon* tb, Process* p, DbTerm* obj,
Uint pos, Eterm** hpp, Uint extra);
+int db_has_map(Eterm obj);
int db_has_variable(Eterm obj);
int db_is_variable(Eterm obj);
void db_do_update_element(DbUpdateHandle* handle,
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 0b18d2b9e8..1785fc27be 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -97,10 +97,10 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
-static Uint combined_message_size(Process* p, int off_heap_msgs);
+static Uint combined_message_size(Process* p);
static void remove_message_buffers(Process* p);
-static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int off_heap_msgs);
-static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int off_heap_msgs);
+static int major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
+static int minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl);
static void do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj);
static Eterm* sweep_rootset(Rootset *rootset, Eterm* htop, char* src, Uint src_size);
static Eterm* sweep_one_area(Eterm* n_hp, Eterm* n_htop, char* src, Uint src_size);
@@ -403,9 +403,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
Uint reclaimed_now = 0;
int done = 0;
- int off_heap_msgs;
- Uint ms1, s1, us1;
- erts_aint32_t state;
+ ErtsMonotonicTime start_time = 0; /* Shut up faulty warning... */
ErtsSchedulerData *esdp;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
@@ -422,11 +420,9 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
trace_gc(p, am_gc_start);
}
- state = erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
- off_heap_msgs = state & ERTS_PSFLG_OFF_HEAP_MSGS;
- if (erts_system_monitor_long_gc != 0) {
- get_now(&ms1, &s1, &us1);
- }
+ (void) erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
+ if (erts_system_monitor_long_gc != 0)
+ start_time = erts_get_monotonic_time(esdp);
ERTS_CHK_OFFHEAP(p);
@@ -449,11 +445,11 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
while (!done) {
if ((FLAGS(p) & F_NEED_FULLSWEEP) != 0) {
DTRACE2(gc_major_start, pidbuf, need);
- done = major_collection(p, need, objv, nobj, &reclaimed_now, off_heap_msgs);
+ done = major_collection(p, need, objv, nobj, &reclaimed_now);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
} else {
DTRACE2(gc_minor_start, pidbuf, need);
- done = minor_collection(p, need, objv, nobj, &reclaimed_now, off_heap_msgs);
+ done = minor_collection(p, need, objv, nobj, &reclaimed_now);
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
}
}
@@ -474,16 +470,14 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
}
if (erts_system_monitor_long_gc != 0) {
- Uint ms2, s2, us2;
- Sint t;
+ ErtsMonotonicTime end_time;
+ Uint gc_time;
if (erts_test_long_gc_sleep)
while (0 != erts_milli_sleep(erts_test_long_gc_sleep));
- get_now(&ms2, &s2, &us2);
- t = ms2 - ms1;
- t = t*1000000 + s2 - s1;
- t = t*1000 + ((Sint) (us2 - us1))/1000;
- if (t > 0 && (Uint)t > erts_system_monitor_long_gc) {
- monitor_long_gc(p, t);
+ end_time = erts_get_monotonic_time(esdp);
+ gc_time = (Uint) ERTS_MONOTONIC_TO_MSEC(end_time - start_time);
+ if (gc_time && gc_time > erts_system_monitor_long_gc) {
+ monitor_long_gc(p, gc_time);
}
}
if (erts_system_monitor_large_heap != 0) {
@@ -836,7 +830,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
}
static int
-minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int off_heap_msgs)
+minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
{
Uint mature = HIGH_WATER(p) - HEAP_START(p);
@@ -875,22 +869,20 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int of
Uint size_after;
Uint need_after;
Uint stack_size = STACK_SZ_ON_HEAP(p);
- Uint fragments = MBUF_SIZE(p) + combined_message_size(p, off_heap_msgs);
+ Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
Uint size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
Uint new_sz = next_heap_size(p, HEAP_SIZE(p) + fragments, 0);
do_minor(p, new_sz, objv, nobj);
- if (!off_heap_msgs) {
- /*
- * Copy newly received message onto the end of the new heap.
- */
- ErtsGcQuickSanityCheck(p);
- for (msgp = p->msg.first; msgp; msgp = msgp->next) {
- if (msgp->data.attached) {
- erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
- ErtsGcQuickSanityCheck(p);
- }
+ /*
+ * Copy newly received message onto the end of the new heap.
+ */
+ ErtsGcQuickSanityCheck(p);
+ for (msgp = p->msg.first; msgp; msgp = msgp->next) {
+ if (msgp->data.attached) {
+ erts_move_msg_attached_data_to_heap(&p->htop, &p->off_heap, msgp);
+ ErtsGcQuickSanityCheck(p);
}
}
ErtsGcQuickSanityCheck(p);
@@ -1216,7 +1208,7 @@ do_minor(Process *p, Uint new_sz, Eterm* objv, int nobj)
*/
static int
-major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int off_heap_msgs)
+major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
{
Rootset rootset;
Roots* roots;
@@ -1229,7 +1221,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int of
Uint oh_size = (char *) OLD_HTOP(p) - oh;
Uint n;
Uint new_sz;
- Uint fragments = MBUF_SIZE(p) + combined_message_size(p, off_heap_msgs);
+ Uint fragments = MBUF_SIZE(p) + combined_message_size(p);
size_before = fragments + (HEAP_TOP(p) - HEAP_START(p));
@@ -1439,7 +1431,7 @@ major_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl, int of
ErtsGcQuickSanityCheck(p);
- if (!off_heap_msgs) {
+ {
ErlMessage *msgp;
/*
* Copy newly received message onto the end of the new heap.
@@ -1509,14 +1501,11 @@ adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int
* mbuf list.
*/
static Uint
-combined_message_size(Process* p, int off_heap_msgs)
+combined_message_size(Process* p)
{
Uint sz;
ErlMessage *msgp;
- if (off_heap_msgs)
- return 0;
-
for (sz = 0, msgp = p->msg.first; msgp; msgp = msgp->next) {
if (msgp->data.attached)
sz += erts_msg_attached_data_size(msgp);
@@ -2665,7 +2654,7 @@ reply_gc_info(void *vgcirp)
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
if (erts_smp_atomic32_dec_read_nob(&gcirp->refc) == 0)
gcireq_free(vgcirp);
@@ -2689,7 +2678,7 @@ erts_gc_info_request(Process *c_p)
erts_smp_atomic32_init_nob(&gcirp->refc,
(erts_aint32_t) erts_no_schedulers);
- erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+ erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
new file mode 100644
index 0000000000..51cd843935
--- /dev/null
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -0,0 +1,2894 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2015. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: High level timers implementing BIF timers
+ * as well as process and port timers.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+#include "bif.h"
+#include "erl_bif_unique.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
+#include "erl_hl_timer.h"
+
+#define ERTS_TMR_CHECK_CANCEL_ON_CREATE 0
+
+#if 0
+# define ERTS_HLT_HARD_DEBUG
+#endif
+#if 0
+# define ERTS_HLT_DEBUG
+#endif
+
+#if defined(ERTS_HLT_HARD_DEBUG) || defined(DEBUG)
+# if defined(ERTS_HLT_HARD_DEBUG)
+# undef ERTS_RBT_HARD_DEBUG
+# define ERTS_RBT_HARD_DEBUG 1
+# endif
+# ifndef ERTS_HLT_DEBUG
+# define ERTS_HLT_DEBUG 1
+# endif
+#endif
+
+#undef ERTS_HLT_ASSERT
+#if defined(ERTS_HLT_DEBUG)
+# define ERTS_HLT_ASSERT(E) ERTS_ASSERT(E)
+# undef ERTS_RBT_DEBUG
+# define ERTS_RBT_DEBUG
+#else
+# define ERTS_HLT_ASSERT(E) ((void) 1)
+#endif
+
+#if defined(ERTS_HLT_HARD_DEBUG) && defined(__GNUC__)
+#warning "* * * * * * * * * * * * * * * * * *"
+#warning "* ERTS_HLT_HARD_DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * * * * * *"
+#endif
+
+#ifdef ERTS_HLT_HARD_DEBUG
+# define ERTS_HLT_HDBG_CHK_SRV(SRV) hdbg_chk_srv((SRV))
+static void hdbg_chk_srv(ErtsHLTimerService *srv);
+#else
+# define ERTS_HLT_HDBG_CHK_SRV(SRV) ((void) 1)
+#endif
+
+#if ERTS_REF_NUMBERS != 3
+#error "ERTS_REF_NUMBERS changed. Update me..."
+#endif
+
+#define ERTS_BIF_TIMER_SHORT_TIME 5000
+
+#ifdef ERTS_SMP
+# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore \
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore)
+#else
+# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore
+#endif
+
+/* Bit 0 to 9 contains scheduler id (see mask below) */
+#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 10)
+#define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 11)
+#define ERTS_TMR_ROFLG_ABIF_TMR (((Uint32) 1) << 12)
+#define ERTS_TMR_ROFLG_PRE_ALC (((Uint32) 1) << 13)
+#define ERTS_TMR_ROFLG_REG_NAME (((Uint32) 1) << 14)
+#define ERTS_TMR_ROFLG_PROC (((Uint32) 1) << 15)
+#define ERTS_TMR_ROFLG_PORT (((Uint32) 1) << 16)
+
+#define ERTS_TMR_ROFLG_SID_MASK \
+ (ERTS_TMR_ROFLG_HLT - (Uint32) 1)
+
+#define ERTS_TMR_STATE_ACTIVE ((erts_aint32_t) 0)
+#define ERTS_TMR_STATE_CANCELED ((erts_aint32_t) 1)
+#define ERTS_TMR_STATE_TIMED_OUT ((erts_aint32_t) 2)
+
+typedef struct ErtsHLTimer_ ErtsHLTimer;
+
+#define ERTS_HLT_PFLG_RED (((UWord) 1) << 0)
+#define ERTS_HLT_PFLG_SAME_TIME (((UWord) 1) << 1)
+
+#define ERTS_HLT_PFLGS_MASK \
+ (ERTS_HLT_PFLG_RED|ERTS_HLT_PFLG_SAME_TIME)
+
+#define ERTS_HLT_PFIELD_NOT_IN_TABLE (~((UWord) 0))
+
+typedef struct {
+ UWord parent; /* parent pointer and flags... */
+ union {
+ struct {
+ ErtsHLTimer *right;
+ ErtsHLTimer *left;
+ } t;
+ struct {
+ ErtsHLTimer *prev;
+ ErtsHLTimer *next;
+ } l;
+ } u;
+ ErtsHLTimer *same_time;
+} ErtsHLTimerTimeTree;
+
+typedef struct {
+ UWord parent; /* parent pointer and flags... */
+ ErtsHLTimer *right;
+ ErtsHLTimer *left;
+} ErtsHLTimerTree;
+
+typedef struct {
+ Uint32 roflgs;
+ erts_smp_atomic32_t refc;
+ union {
+ erts_atomic_t next;
+ } u;
+} ErtsTmrHead;
+
+struct ErtsHLTimer_ {
+ ErtsTmrHead head; /* NEED to be first! */
+ union {
+ ErtsThrPrgrLaterOp cleanup;
+ ErtsHLTimerTimeTree tree;
+ } time;
+ ErtsMonotonicTime timeout;
+ union {
+ Process *proc;
+ Port *port;
+ Eterm name;
+ } receiver;
+
+#ifdef ERTS_HLT_HARD_DEBUG
+ int pending_timeout;
+#endif
+
+ erts_smp_atomic32_t state;
+
+ /* BIF timer only fields follow... */
+ struct {
+ Uint32 refn[ERTS_REF_NUMBERS];
+ ErtsHLTimerTree proc_tree;
+ ErtsHLTimerTree tree;
+ Eterm message;
+ ErlHeapFragment *bp;
+ } btm;
+ struct {
+ Eterm accessor;
+ ErtsHLTimerTree tree;
+ } abtm;
+};
+
+#define ERTS_HL_PTIMER_SIZE offsetof(ErtsHLTimer, btm)
+#define ERTS_BIF_TIMER_SIZE offsetof(ErtsHLTimer, abtm)
+#define ERTS_ABIF_TIMER_SIZE sizeof(ErtsHLTimer)
+
+typedef struct {
+ ErtsTmrHead head; /* NEED to be first! */
+ void *p;
+ ErtsTWheelTimer tw_tmr;
+} ErtsTWTimer;
+
+typedef union {
+ ErtsTmrHead head;
+ ErtsHLTimer hlt;
+ ErtsTWTimer twt;
+} ErtsTimer;
+
+#ifdef SMALL_MEMORY
+#define BIF_TIMER_PREALC_SZ 10
+#define PTIMER_PREALC_SZ 10
+#else
+#define BIF_TIMER_PREALC_SZ 100
+#define PTIMER_PREALC_SZ 100
+#endif
+
+ERTS_SCHED_PREF_PALLOC_IMPL(bif_timer_pre,
+ ErtsHLTimer,
+ BIF_TIMER_PREALC_SZ)
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(tw_timer,
+ ErtsTWTimer,
+ PTIMER_PREALC_SZ,
+ ERTS_ALC_T_LL_PTIMER)
+
+#ifdef ERTS_HLT_DEBUG
+#define ERTS_TMR_TIMEOUT_YIELD_LIMIT 5
+#else
+#define ERTS_TMR_TIMEOUT_YIELD_LIMIT 100
+#endif
+#define ERTS_TMR_CANCELED_TIMER_LIMIT 100
+#define ERTS_TMR_CANCELED_TIMER_SMALL_LIMIT 5
+
+#define ERTS_TMR_TIMEOUT_YIELD_STATE_T same_time_list_yield_state_t
+#define ERTS_TMR_YIELDING_TIMEOUT_STATE_INITER {NULL, {0}}
+typedef struct {
+ int dummy;
+} ERTS_TMR_TIMEOUT_YIELD_STATE_T;
+
+typedef struct {
+ ErtsTmrHead marker;
+ erts_atomic_t last;
+} ErtsHLTCncldTmrQTail;
+
+#ifdef ERTS_SMP
+
+typedef struct {
+ /*
+ * This structure needs to be cache line aligned for best
+ * performance.
+ */
+ union {
+ /*
+ * Modified by threads returning canceled
+ * timers to this timer service.
+ */
+ ErtsHLTCncldTmrQTail data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
+ sizeof(ErtsHLTCncldTmrQTail))];
+ } tail;
+ /*
+ * Everything below this point is *only* accessed by the
+ * thread managing this timer service.
+ */
+ struct {
+ ErtsTimer *first;
+ ErtsTimer *unref_end;
+ struct {
+ ErtsThrPrgrVal thr_progress;
+ int thr_progress_reached;
+ ErtsTimer *unref_end;
+ } next;
+ int used_marker;
+ } head;
+} ErtsHLTCncldTmrQ;
+
+#endif /* ERTS_SMP */
+
+typedef struct {
+ ErtsHLTimer *root;
+ ERTS_TMR_TIMEOUT_YIELD_STATE_T state;
+} ErtsYieldingTimeoutState;
+
+struct ErtsHLTimerService_ {
+#ifdef ERTS_SMP
+ ErtsHLTCncldTmrQ canceled_queue;
+#endif
+ ErtsHLTimer *time_tree;
+ ErtsHLTimer *btm_tree;
+ ErtsHLTimer *next_timeout;
+ ErtsYieldingTimeoutState yield;
+ ErtsTWheelTimer service_timer;
+};
+
+static ERTS_INLINE int
+refn_is_lt(Uint32 *x, Uint32 *y)
+{
+ /* !0 if x < y */
+ if (x[2] < y[2])
+ return 1;
+ if (x[2] != y[2])
+ return 0;
+ if (x[1] < y[1])
+ return 1;
+ if (x[1] != y[1])
+ return 0;
+ return x[0] < y[0];
+}
+
+#define ERTS_RBT_PREFIX time
+#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_KEY_T ErtsMonotonicTime
+#define ERTS_RBT_FLAGS_T UWord
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->time.tree.parent = (UWord) NULL; \
+ (T)->time.tree.u.t.right = NULL; \
+ (T)->time.tree.u.t.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) \
+ ((int) ((T)->time.tree.parent & ERTS_HLT_PFLG_RED))
+#define ERTS_RBT_SET_RED(T) \
+ ((T)->time.tree.parent |= ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_IS_BLACK(T) \
+ (!ERTS_RBT_IS_RED((T)))
+#define ERTS_RBT_SET_BLACK(T) \
+ ((T)->time.tree.parent &= ~ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_GET_FLAGS(T) \
+ ((T)->time.tree.parent & ERTS_HLT_PFLGS_MASK)
+#define ERTS_RBT_SET_FLAGS(T, F) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->time.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
+ (T)->time.tree.parent |= (F); \
+ } while (0)
+#define ERTS_RBT_GET_PARENT(T) \
+ ((ErtsHLTimer *) ((T)->time.tree.parent & ~ERTS_HLT_PFLGS_MASK))
+#define ERTS_RBT_SET_PARENT(T, P) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->time.tree.parent &= ERTS_HLT_PFLGS_MASK; \
+ (T)->time.tree.parent |= (UWord) (P); \
+ } while (0)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->time.tree.u.t.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->time.tree.u.t.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->time.tree.u.t.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->time.tree.u.t.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->timeout)
+#define ERTS_RBT_IS_LT(KX, KY) ((KX) < (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) ((KX) == (KY))
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_SMALLEST
+#define ERTS_RBT_WANT_LOOKUP_INSERT
+#define ERTS_RBT_WANT_REPLACE
+#ifdef ERTS_HLT_HARD_DEBUG
+# define ERTS_RBT_WANT_FOREACH
+# define ERTS_RBT_WANT_LOOKUP
+#endif
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+/* Use circular list for timers at same time */
+
+static ERTS_INLINE void
+same_time_list_insert(ErtsHLTimer **root, ErtsHLTimer *tmr)
+{
+ ErtsHLTimer *first = *root;
+ if (!first) {
+ ERTS_HLT_ASSERT((((UWord) root) & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
+ tmr->time.tree.u.l.next = tmr;
+ tmr->time.tree.u.l.prev = tmr;
+ *root = tmr;
+ }
+ else {
+ tmr->time.tree.parent = ERTS_HLT_PFLG_SAME_TIME;
+ tmr->time.tree.u.l.next = first;
+ tmr->time.tree.u.l.prev = first->time.tree.u.l.prev;
+ first->time.tree.u.l.prev = tmr;
+ tmr->time.tree.u.l.prev->time.tree.u.l.next = tmr;
+ }
+}
+
+static ERTS_INLINE void
+same_time_list_delete(ErtsHLTimer *tmr)
+{
+ ErtsHLTimer **root, *next;
+
+ root = (ErtsHLTimer **) (tmr->time.tree.parent & ~ERTS_HLT_PFLG_SAME_TIME);
+ next = tmr->time.tree.u.l.next;
+
+ ERTS_HLT_ASSERT((tmr->time.tree.parent
+ == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME))
+ || (tmr->time.tree.parent
+ == ERTS_HLT_PFLG_SAME_TIME));
+
+ if (next == tmr) {
+ ERTS_HLT_ASSERT(root && *root == tmr);
+ ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev == tmr);
+ *root = NULL;
+ }
+ else {
+ if (root) {
+ ERTS_HLT_ASSERT(*root == tmr);
+ *root = next;
+ next->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
+ }
+ tmr->time.tree.u.l.next->time.tree.u.l.prev = tmr->time.tree.u.l.prev;
+ tmr->time.tree.u.l.prev->time.tree.u.l.next = next;
+ }
+}
+
+static ERTS_INLINE void
+same_time_list_new_root(ErtsHLTimer **root)
+{
+ ErtsHLTimer *tmr = *root;
+ if (tmr) {
+ ERTS_HLT_ASSERT(root);
+ tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
+ }
+}
+
+static ERTS_INLINE int
+same_time_list_foreach_destroy_yielding(ErtsHLTimer **root,
+ void (*op)(ErtsHLTimer *, void *),
+ void *arg,
+ ERTS_TMR_TIMEOUT_YIELD_STATE_T *ys,
+ Sint ylimit)
+{
+ Sint ycnt = ylimit;
+ ErtsHLTimer *end, *tmr = *root;
+ if (!tmr)
+ return 0;
+
+ ERTS_HLT_ASSERT(tmr->time.tree.parent
+ == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME));
+
+ end = tmr->time.tree.u.l.prev;
+ end->time.tree.u.l.next = NULL;
+
+ while (1) {
+ ErtsHLTimer *op_tmr = tmr;
+
+ ERTS_HLT_ASSERT((tmr->time.tree.parent
+ == (((UWord) root) | ERTS_HLT_PFLG_SAME_TIME))
+ || (tmr->time.tree.parent
+ == ERTS_HLT_PFLG_SAME_TIME));
+
+ tmr = tmr->time.tree.u.l.next;
+ (*op)(op_tmr, arg);
+ if (!tmr) {
+ *root = NULL;
+ return 0;
+ }
+ if (--ycnt <= 0) {
+ /* Make new circle of timers left to process... */
+ *root = tmr;
+ end->time.tree.u.l.next = tmr;
+ tmr->time.tree.u.l.prev = end;
+ tmr->time.tree.parent = ((UWord) root) | ERTS_HLT_PFLG_SAME_TIME;
+ return 1;
+ }
+ }
+}
+
+#ifdef ERTS_HLT_HARD_DEBUG
+
+static ERTS_INLINE void
+same_time_list_foreach(ErtsHLTimer *root,
+ void (*op)(ErtsHLTimer *, void *),
+ void *arg)
+{
+ if (root) {
+ ErtsHLTimer *tmr = root;
+ do {
+ (*op)(tmr, arg);
+ tmr = tmr->time.tree.u.l.next;
+ } while (root != tmr);
+ }
+}
+
+static ERTS_INLINE ErtsHLTimer *
+same_time_list_lookup(ErtsHLTimer *root, ErtsHLTimer *x)
+{
+ if (root) {
+ ErtsHLTimer *tmr = root;
+ do {
+ if (tmr == x)
+ return tmr;
+ tmr = tmr->time.tree.u.l.next;
+ } while (root != tmr);
+ }
+ return NULL;
+}
+
+#endif /* ERTS_HLT_HARD_DEBUG */
+
+#define ERTS_RBT_PREFIX btm
+#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_KEY_T Uint32 *
+#define ERTS_RBT_FLAGS_T UWord
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->btm.tree.parent = (UWord) NULL; \
+ (T)->btm.tree.right = NULL; \
+ (T)->btm.tree.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) \
+ ((int) ((T)->btm.tree.parent & ERTS_HLT_PFLG_RED))
+#define ERTS_RBT_SET_RED(T) \
+ ((T)->btm.tree.parent |= ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_IS_BLACK(T) \
+ (!ERTS_RBT_IS_RED((T)))
+#define ERTS_RBT_SET_BLACK(T) \
+ ((T)->btm.tree.parent &= ~ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_GET_FLAGS(T) \
+ ((T)->btm.tree.parent & ERTS_HLT_PFLGS_MASK)
+#define ERTS_RBT_SET_FLAGS(T, F) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->btm.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
+ (T)->btm.tree.parent |= (F); \
+ } while (0)
+#define ERTS_RBT_GET_PARENT(T) \
+ ((ErtsHLTimer *) ((T)->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
+#define ERTS_RBT_SET_PARENT(T, P) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->btm.tree.parent &= ERTS_HLT_PFLGS_MASK; \
+ (T)->btm.tree.parent |= (UWord) (P); \
+ } while (0)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->btm.tree.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.tree.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->btm.tree.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.tree.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) \
+ (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_FOREACH
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+#define ERTS_RBT_PREFIX proc_btm
+#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_KEY_T Uint32 *
+#define ERTS_RBT_FLAGS_T UWord
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->btm.proc_tree.parent = (UWord) NULL; \
+ (T)->btm.proc_tree.right = NULL; \
+ (T)->btm.proc_tree.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) \
+ ((int) ((T)->btm.proc_tree.parent & ERTS_HLT_PFLG_RED))
+#define ERTS_RBT_SET_RED(T) \
+ ((T)->btm.proc_tree.parent |= ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_IS_BLACK(T) \
+ (!ERTS_RBT_IS_RED((T)))
+#define ERTS_RBT_SET_BLACK(T) \
+ ((T)->btm.proc_tree.parent &= ~ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_GET_FLAGS(T) \
+ ((T)->btm.proc_tree.parent & ERTS_HLT_PFLGS_MASK)
+#define ERTS_RBT_SET_FLAGS(T, F) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->btm.proc_tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
+ (T)->btm.proc_tree.parent |= (F); \
+ } while (0)
+#define ERTS_RBT_GET_PARENT(T) \
+ ((ErtsHLTimer *) ((T)->btm.proc_tree.parent & ~ERTS_HLT_PFLGS_MASK))
+#define ERTS_RBT_SET_PARENT(T, P) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->btm.proc_tree.parent &= ERTS_HLT_PFLGS_MASK; \
+ (T)->btm.proc_tree.parent |= (UWord) (P); \
+ } while (0)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->btm.proc_tree.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->btm.proc_tree.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->btm.proc_tree.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->btm.proc_tree.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) \
+ (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+#define ERTS_RBT_PREFIX abtm
+#define ERTS_RBT_T ErtsHLTimer
+#define ERTS_RBT_KEY_T Uint32 *
+#define ERTS_RBT_FLAGS_T UWord
+#define ERTS_RBT_INIT_EMPTY_TNODE(T) \
+ do { \
+ (T)->abtm.tree.parent = (UWord) NULL; \
+ (T)->abtm.tree.right = NULL; \
+ (T)->abtm.tree.left = NULL; \
+ } while (0)
+#define ERTS_RBT_IS_RED(T) \
+ ((int) ((T)->abtm.tree.parent & ERTS_HLT_PFLG_RED))
+#define ERTS_RBT_SET_RED(T) \
+ ((T)->abtm.tree.parent |= ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_IS_BLACK(T) \
+ (!ERTS_RBT_IS_RED((T)))
+#define ERTS_RBT_SET_BLACK(T) \
+ ((T)->abtm.tree.parent &= ~ERTS_HLT_PFLG_RED)
+#define ERTS_RBT_GET_FLAGS(T) \
+ ((T)->abtm.tree.parent & ERTS_HLT_PFLGS_MASK)
+#define ERTS_RBT_SET_FLAGS(T, F) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (F)) & ~ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->abtm.tree.parent &= ~ERTS_HLT_PFLGS_MASK; \
+ (T)->abtm.tree.parent |= (F); \
+ } while (0)
+#define ERTS_RBT_GET_PARENT(T) \
+ ((ErtsHLTimer *) ((T)->abtm.tree.parent & ~ERTS_HLT_PFLGS_MASK))
+#define ERTS_RBT_SET_PARENT(T, P) \
+ do { \
+ ERTS_HLT_ASSERT((((UWord) (P)) & ERTS_HLT_PFLGS_MASK) == 0); \
+ (T)->abtm.tree.parent &= ERTS_HLT_PFLGS_MASK; \
+ (T)->abtm.tree.parent |= (UWord) (P); \
+ } while (0)
+#define ERTS_RBT_GET_RIGHT(T) ((T)->abtm.tree.right)
+#define ERTS_RBT_SET_RIGHT(T, R) ((T)->abtm.tree.right = (R))
+#define ERTS_RBT_GET_LEFT(T) ((T)->abtm.tree.left)
+#define ERTS_RBT_SET_LEFT(T, L) ((T)->abtm.tree.left = (L))
+#define ERTS_RBT_GET_KEY(T) ((T)->btm.refn)
+#define ERTS_RBT_IS_LT(KX, KY) refn_is_lt((KX), (KY))
+#define ERTS_RBT_IS_EQ(KX, KY) \
+ (((KX)[0] == (KY)[0]) & ((KX)[1] == (KY)[1]) & ((KX)[2] == (KY)[2]))
+#define ERTS_RBT_WANT_DELETE
+#define ERTS_RBT_WANT_INSERT
+#define ERTS_RBT_WANT_LOOKUP
+#define ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+#define ERTS_RBT_UNDEF
+
+#include "erl_rbtree.h"
+
+#ifdef ERTS_SMP
+static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
+#endif
+
+void
+erts_hl_timer_init(void)
+{
+ init_tw_timer_alloc();
+ init_bif_timer_pre_alloc();
+}
+
+ErtsHLTimerService *
+erts_create_timer_service(void)
+{
+ ErtsYieldingTimeoutState init_yield = ERTS_TMR_YIELDING_TIMEOUT_STATE_INITER;
+ ErtsHLTimerService *srv;
+
+ srv = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_SERVICE,
+ sizeof(ErtsHLTimerService));
+ srv->time_tree = NULL;
+ srv->btm_tree = NULL;
+ srv->next_timeout = NULL;
+ srv->yield = init_yield;
+ erts_twheel_init_timer(&srv->service_timer);
+
+#ifdef ERTS_SMP
+ init_canceled_queue(&srv->canceled_queue);
+#endif
+
+ return srv;
+}
+
+size_t
+erts_timer_type_size(ErtsAlcType_t type)
+{
+ switch (type) {
+ case ERTS_ALC_T_LL_PTIMER: return sizeof(ErtsTWTimer);
+ case ERTS_ALC_T_HL_PTIMER: return ERTS_HL_PTIMER_SIZE;
+ case ERTS_ALC_T_BIF_TIMER: return ERTS_BIF_TIMER_SIZE;
+ case ERTS_ALC_T_ABIF_TIMER: return ERTS_ABIF_TIMER_SIZE;
+ default: ERTS_INTERNAL_ERROR("Unknown type");
+ }
+ return 0;
+}
+
+static ERTS_INLINE ErtsMonotonicTime
+get_timeout_pos(ErtsMonotonicTime now, ErtsMonotonicTime msec)
+{
+ ErtsMonotonicTime timeout_pos;
+ if (msec <= 0)
+ return ERTS_MONOTONIC_TO_CLKTCKS(now);
+ timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(now-1);
+ timeout_pos += ERTS_MSEC_TO_CLKTCKS(msec) + 1;
+ return timeout_pos;
+}
+
+static ERTS_INLINE Sint64
+get_time_left(ErtsSchedulerData *esdp, ErtsMonotonicTime timeout_pos)
+{
+ ErtsMonotonicTime now = erts_get_monotonic_time(esdp);
+
+ now = ERTS_MONOTONIC_TO_CLKTCKS(now-1)+1;
+ if (timeout_pos <= now)
+ return (Sint64) 0;
+ return (Sint64) ERTS_CLKTCKS_TO_MSEC(timeout_pos - now);
+}
+
+static ERTS_INLINE int
+proc_timeout_common(Process *proc, void *tmr)
+{
+ if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&proc->common.timer,
+ ERTS_PTMR_TIMEDOUT,
+ (erts_aint_t) tmr)) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
+ if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING)))
+ erts_schedule_process(proc, state, 0);
+ return 1;
+ }
+ return 0;
+}
+
+static ERTS_INLINE int
+port_timeout_common(Port *port, void *tmr)
+{
+ if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&port->common.timer,
+ ERTS_PTMR_TIMEDOUT,
+ (erts_aint_t) tmr)) {
+ erts_port_task_schedule(port->common.id,
+ &port->timeout_task,
+ ERTS_PORT_TASK_TIMEOUT);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Basic timer wheel timer stuff
+ */
+
+static void
+scheduled_tw_timer_destroy(void *vtmr)
+{
+ tw_timer_free((ErtsTWTimer *) vtmr);
+}
+
+static void
+schedule_tw_timer_destroy(ErtsTWTimer *tmr)
+{
+ /*
+ * Reference to process/port can be
+ * dropped at once...
+ */
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_PROC)
+ erts_proc_dec_refc((Process *) tmr->p);
+ else
+ erts_port_dec_refc((Port *) tmr->p);
+
+ erts_schedule_thr_prgr_later_cleanup_op(
+ scheduled_tw_timer_destroy,
+ (void *) tmr,
+ &tmr->tw_tmr.u.cleanup,
+ sizeof(ErtsTWTimer));
+}
+
+static ERTS_INLINE void
+tw_timer_dec_refc(ErtsTWTimer *tmr)
+{
+ if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
+ ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore;
+ schedule_tw_timer_destroy(tmr);
+ }
+}
+
+static void
+tw_proc_timeout(void *vtwtp)
+{
+ ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
+ Process *proc = (Process *) twtp->p;
+ if (proc_timeout_common(proc, vtwtp))
+ tw_timer_dec_refc(twtp);
+ tw_timer_dec_refc(twtp);
+}
+
+static void
+tw_port_timeout(void *vtwtp)
+{
+ ErtsTWTimer *twtp = (ErtsTWTimer *) vtwtp;
+ Port *port = (Port *) twtp->p;
+ if (port_timeout_common(port, vtwtp))
+ tw_timer_dec_refc(twtp);
+ tw_timer_dec_refc(twtp);
+}
+
+static void
+tw_ptimer_cancel(void *vtwtp)
+{
+ tw_timer_dec_refc((ErtsTWTimer *) vtwtp);
+}
+
+static void
+cancel_tw_timer(ErtsSchedulerData *esdp, ErtsTWTimer *tmr)
+{
+ ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
+ == (Uint32) esdp->no);
+ erts_twheel_cancel_timer(esdp->timer_wheel, &tmr->tw_tmr);
+}
+
+static ErtsTWTimer *
+create_tw_timer(ErtsSchedulerData *esdp,
+ void *p, int is_proc,
+ ErtsMonotonicTime timeout_pos)
+{
+ ErtsTWTimer *tmr;
+ void (*timeout_func)(void *);
+
+ tmr = tw_timer_alloc();
+ erts_twheel_init_timer(&tmr->tw_tmr);
+
+ tmr->head.roflgs = (Uint32) esdp->no;
+ ERTS_HLT_ASSERT((tmr->head.roflgs
+ & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
+ tmr->p = p;
+ if (is_proc) {
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_PROC;
+ timeout_func = tw_proc_timeout;
+ erts_proc_inc_refc((Process *) p);
+ }
+ else {
+ tmr->head.roflgs |= ERTS_TMR_ROFLG_PORT;
+ timeout_func = tw_port_timeout;
+ erts_port_inc_refc((Port *) p);
+ }
+
+ erts_smp_atomic32_init_nob(&tmr->head.refc, 2);
+
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &tmr->tw_tmr,
+ timeout_func,
+ tw_ptimer_cancel,
+ tmr,
+ timeout_pos);
+
+ return tmr;
+}
+
+/*
+ * Basic high level timer stuff
+ */
+
+static ERTS_INLINE void
+hl_timer_destroy(ErtsHLTimer *tmr)
+{
+ Uint32 roflgs = tmr->head.roflgs;
+ if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ erts_free(ERTS_ALC_T_HL_PTIMER, tmr);
+ else {
+ if (roflgs & ERTS_TMR_ROFLG_PRE_ALC)
+ bif_timer_pre_free(tmr);
+ else if (roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
+ erts_free(ERTS_ALC_T_ABIF_TIMER, tmr);
+ else
+ erts_free(ERTS_ALC_T_BIF_TIMER, tmr);
+ }
+}
+
+static void
+scheduled_hl_timer_destroy(void *vtmr)
+{
+ hl_timer_destroy((ErtsHLTimer *) vtmr);
+}
+
+static void
+schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs)
+{
+ UWord size;
+
+ /*
+ * Reference to process/port can be dropped
+ * at once...
+ */
+
+ ERTS_HLT_ASSERT(erts_smp_atomic32_read_nob(&tmr->head.refc) == 0);
+
+ if (roflgs & ERTS_TMR_ROFLG_REG_NAME) {
+ ERTS_HLT_ASSERT(is_atom(tmr->receiver.name));
+ }
+ else if (roflgs & ERTS_TMR_ROFLG_PROC) {
+ ERTS_HLT_ASSERT(tmr->receiver.proc);
+ erts_proc_dec_refc(tmr->receiver.proc);
+ }
+ else if (roflgs & ERTS_TMR_ROFLG_PORT) {
+ ERTS_HLT_ASSERT(tmr->receiver.port);
+ erts_port_dec_refc(tmr->receiver.port);
+ }
+
+ if (!(roflgs & ERTS_TMR_ROFLG_BIF_TMR))
+ size = ERTS_HL_PTIMER_SIZE;
+ else {
+ /*
+ * Message buffer can be dropped at
+ * once...
+ */
+ size = sizeof(ErtsHLTimer);
+ }
+
+ erts_schedule_thr_prgr_later_cleanup_op(
+ scheduled_hl_timer_destroy, tmr,
+ &tmr->time.cleanup, size);
+}
+
+static ERTS_INLINE void
+hl_timer_pre_dec_refc(ErtsHLTimer *tmr)
+{
+#ifdef ERTS_HLT_DEBUG
+ erts_aint_t refc;
+ refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc);
+ ERTS_HLT_ASSERT(refc > 0);
+#else
+ erts_smp_atomic32_dec_nob(&tmr->head.refc);
+#endif
+}
+
+static ERTS_INLINE void
+hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs)
+{
+ if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) {
+ ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore;
+ schedule_hl_timer_destroy(tmr, roflgs);
+ }
+}
+
+static void hlt_service_timeout(void *vesdp);
+#ifdef ERTS_SMP
+static void handle_canceled_queue(ErtsSchedulerData *esdp,
+ ErtsHLTCncldTmrQ *cq,
+ int use_limit,
+ int ops_limit,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work);
+#endif
+
+static ERTS_INLINE void
+check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
+{
+#if defined(ERTS_SMP) && ERTS_TMR_CHECK_CANCEL_ON_CREATE
+ ErtsHLTCncldTmrQ *cq = &srv->canceled_queue;
+ if (cq->head.first != cq->head.unref_end)
+ handle_canceled_queue(esdp, cq, 1,
+ ERTS_TMR_CANCELED_TIMER_SMALL_LIMIT,
+ NULL, NULL, NULL);
+#endif
+}
+
+static void
+hlt_delete_abtm(ErtsHLTimer *tmr)
+{
+ Process *proc;
+
+ ERTS_HLT_ASSERT(tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR);
+
+ proc = erts_proc_lookup(tmr->abtm.accessor);
+
+ if (proc) {
+ int deref = 0;
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+ if (tmr->abtm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ abtm_rbt_delete(&proc->accessor_bif_timers, tmr);
+ deref = 1;
+ tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ if (deref)
+ hl_timer_pre_dec_refc(tmr);
+ }
+}
+
+static ErtsHLTimer *
+create_hl_timer(ErtsSchedulerData *esdp,
+ ErtsMonotonicTime timeout_pos,
+ int short_time, int is_bif_tmr,
+ void *rcvrp, Eterm rcvr, Eterm acsr,
+ Eterm msg, Uint32 *refn)
+{
+ ErtsHLTimerService *srv = esdp->timer_service;
+ ErtsHLTimer *tmr, *st_tmr;
+ erts_aint32_t refc;
+ Uint32 roflgs;
+ int is_abif_tmr = is_bif_tmr && is_value(acsr) && acsr != rcvr;
+
+ check_canceled_queue(esdp, srv);
+
+ ERTS_HLT_ASSERT((esdp->no & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
+
+ roflgs = ((Uint32) esdp->no) | ERTS_TMR_ROFLG_HLT;
+
+ if (!is_bif_tmr)
+ tmr = erts_alloc(ERTS_ALC_T_HL_PTIMER,
+ ERTS_HL_PTIMER_SIZE);
+ else if (short_time) {
+ tmr = bif_timer_pre_alloc();
+ if (!tmr)
+ goto alloc_bif_timer;
+ roflgs |= ERTS_TMR_ROFLG_PRE_ALC;
+ }
+ else {
+ alloc_bif_timer:
+ if (is_abif_tmr)
+ tmr = erts_alloc(ERTS_ALC_T_ABIF_TIMER,
+ ERTS_ABIF_TIMER_SIZE);
+ else
+ tmr = erts_alloc(ERTS_ALC_T_BIF_TIMER,
+ ERTS_BIF_TIMER_SIZE);
+ }
+
+ tmr->timeout = timeout_pos;
+
+ if (!is_bif_tmr) {
+ if (is_internal_pid(rcvr)) {
+ erts_proc_inc_refc((Process *) rcvrp);
+ tmr->receiver.proc = (Process *) rcvrp;
+ roflgs |= ERTS_TMR_ROFLG_PROC;
+ }
+ else {
+ erts_port_inc_refc((Port *) rcvrp);
+ ERTS_HLT_ASSERT(is_internal_port(rcvr));
+ tmr->receiver.port = (Port *) rcvrp;
+ roflgs |= ERTS_TMR_ROFLG_PORT;
+ }
+ refc = 2;
+ }
+ else {
+ Uint hsz;
+
+ roflgs |= ERTS_TMR_ROFLG_BIF_TMR;
+ if (is_internal_pid(rcvr)) {
+ roflgs |= ERTS_TMR_ROFLG_PROC;
+ tmr->receiver.proc = (Process *) rcvrp;
+ refc = 2;
+ }
+ else {
+ ERTS_HLT_ASSERT(is_atom(rcvr));
+ roflgs |= ERTS_TMR_ROFLG_REG_NAME;
+ tmr->receiver.name = rcvr;
+ refc = 1;
+ }
+
+ hsz = is_immed(msg) ? ((Uint) 0) : size_object(msg);
+ if (!hsz) {
+ tmr->btm.message = msg;
+ tmr->btm.bp = NULL;
+ }
+ else {
+ ErlHeapFragment *bp = new_message_buffer(hsz);
+ Eterm *hp = bp->mem;
+ tmr->btm.message = copy_struct(msg, hsz, &hp, &bp->off_heap);
+ tmr->btm.bp = bp;
+ }
+ tmr->btm.refn[0] = refn[0];
+ tmr->btm.refn[1] = refn[1];
+ tmr->btm.refn[2] = refn[2];
+
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ if (is_abif_tmr) {
+ Process *aproc;
+ roflgs |= ERTS_TMR_ROFLG_ABIF_TMR;
+ tmr->abtm.accessor = acsr;
+ aproc = erts_proc_lookup(acsr);
+ if (!aproc)
+ tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ else {
+ refc++;
+ erts_smp_proc_lock(aproc, ERTS_PROC_LOCK_BTM);
+ abtm_rbt_insert(&aproc->accessor_bif_timers, tmr);
+ erts_smp_proc_unlock(aproc, ERTS_PROC_LOCK_BTM);
+ }
+ }
+ }
+
+ tmr->head.roflgs = roflgs;
+ erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
+ erts_smp_atomic32_init_nob(&tmr->state, ERTS_TMR_STATE_ACTIVE);
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ if (!srv->next_timeout
+ || tmr->timeout < srv->next_timeout->timeout) {
+ if (srv->next_timeout)
+ erts_twheel_cancel_timer(esdp->timer_wheel,
+ &srv->service_timer);
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &srv->service_timer,
+ hlt_service_timeout,
+ NULL,
+ (void *) esdp,
+ tmr->timeout);
+ srv->next_timeout = tmr;
+ }
+
+ st_tmr = time_rbt_lookup_insert(&srv->time_tree, tmr);
+ tmr->time.tree.same_time = st_tmr;
+ if (st_tmr)
+ same_time_list_insert(&st_tmr->time.tree.same_time, tmr);
+
+ if (is_bif_tmr)
+ btm_rbt_insert(&srv->btm_tree, tmr);
+
+#ifdef ERTS_HLT_HARD_DEBUG
+ tmr->pending_timeout = 0;
+#endif
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ return tmr;
+}
+
+static ERTS_INLINE void
+hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs)
+{
+ ErtsProcLocks proc_locks = ERTS_PROC_LOCKS_MSG_SEND;
+ Process *proc;
+ int queued_message = 0;
+ int dec_refc = 0;
+ Uint32 is_reg_name = (roflgs & ERTS_TMR_ROFLG_REG_NAME);
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR);
+
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
+ hlt_delete_abtm(tmr);
+
+ if (is_reg_name) {
+ Eterm pid;
+ ERTS_HLT_ASSERT(is_atom(tmr->receiver.name));
+ pid = erts_whereis_name_to_id(NULL, tmr->receiver.name);
+ proc = erts_proc_lookup(pid);
+ }
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PROC);
+ ERTS_HLT_ASSERT(tmr->receiver.proc);
+
+ proc = tmr->receiver.proc;
+ proc_locks |= ERTS_PROC_LOCK_BTM;
+ }
+ if (proc) {
+ erts_smp_proc_lock(proc, proc_locks);
+ /*
+ * If process is exiting, let it clean up
+ * the btm tree by itself (it may be in
+ * the middle of tree destruction).
+ */
+ if (!ERTS_PROC_IS_EXITING(proc)) {
+ erts_queue_message(proc, &proc_locks, tmr->btm.bp,
+ tmr->btm.message, NIL);
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_SEND);
+ queued_message = 1;
+ proc_locks &= ~ERTS_PROC_LOCKS_MSG_SEND;
+ tmr->btm.bp = NULL;
+ if (tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ proc_btm_rbt_delete(&proc->bif_timers, tmr);
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ dec_refc = 1;
+ }
+ }
+ if (proc_locks)
+ erts_smp_proc_unlock(proc, proc_locks);
+ if (dec_refc)
+ hl_timer_pre_dec_refc(tmr);
+ }
+ if (!queued_message && tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+}
+
+static ERTS_INLINE void
+hlt_proc_timeout(ErtsHLTimer *tmr)
+{
+ if (proc_timeout_common(tmr->receiver.proc, (void *) tmr))
+ hl_timer_dec_refc(tmr, tmr->head.roflgs);
+}
+
+static ERTS_INLINE void
+hlt_port_timeout(ErtsHLTimer *tmr)
+{
+ if (port_timeout_common(tmr->receiver.port, (void *) tmr))
+ hl_timer_dec_refc(tmr, tmr->head.roflgs);
+}
+
+static void hlt_timeout(ErtsHLTimer *tmr, void *vsrv)
+{
+ ErtsHLTimerService *srv = (ErtsHLTimerService *) vsrv;
+ Uint32 roflgs;
+ erts_aint32_t state;
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ roflgs = tmr->head.roflgs;
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_HLT);
+
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ ERTS_TMR_STATE_TIMED_OUT,
+ ERTS_TMR_STATE_ACTIVE);
+
+ ERTS_HLT_ASSERT(state == ERTS_TMR_STATE_CANCELED
+ || state == ERTS_TMR_STATE_ACTIVE);
+
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+
+ if (roflgs & ERTS_TMR_ROFLG_BIF_TMR)
+ hlt_bif_timer_timeout(tmr, roflgs);
+ else if (roflgs & ERTS_TMR_ROFLG_PROC)
+ hlt_proc_timeout(tmr);
+ else {
+ ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_PORT);
+ hlt_port_timeout(tmr);
+ }
+ }
+
+ tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ if ((roflgs & ERTS_TMR_ROFLG_BIF_TMR)
+ && tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&srv->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ hl_timer_dec_refc(tmr, roflgs);
+}
+
+#ifdef ERTS_HLT_HARD_DEBUG
+static void
+set_pending_timeout(ErtsHLTimer *tmr, void *unused)
+{
+ tmr->pending_timeout = -1;
+}
+#endif
+
+static void
+hlt_service_timeout(void *vesdp)
+{
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ ErtsHLTimerService *srv = esdp->timer_service;
+ ErtsHLTimer *tmr = srv->next_timeout;
+ int yield;
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
+
+ ERTS_HLT_ASSERT(!srv->yield.root || srv->yield.root == tmr);
+ ERTS_HLT_ASSERT(tmr);
+ ERTS_HLT_ASSERT(tmr->timeout <= erts_get_monotonic_time(esdp));
+
+ if (!srv->yield.root) {
+ ERTS_HLT_ASSERT(tmr->time.tree.parent
+ != ERTS_HLT_PFIELD_NOT_IN_TABLE);
+ time_rbt_delete(&srv->time_tree, tmr);
+ tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+#ifdef ERTS_HLT_HARD_DEBUG
+ tmr->pending_timeout = 1;
+ if (tmr->time.tree.same_time)
+ same_time_list_foreach(tmr->time.tree.same_time, set_pending_timeout, NULL);
+#endif
+ }
+
+ if (!tmr->time.tree.same_time && !srv->yield.root)
+ yield = 0;
+ else {
+ yield = same_time_list_foreach_destroy_yielding(
+ &tmr->time.tree.same_time, hlt_timeout, (void *) srv,
+ &srv->yield.state, ERTS_TMR_TIMEOUT_YIELD_LIMIT);
+ }
+
+ if (yield)
+ srv->yield.root = tmr;
+ else {
+ srv->yield.root = NULL;
+ hlt_timeout(tmr, (void *) srv);
+
+ tmr = time_rbt_smallest(srv->time_tree);
+ srv->next_timeout = tmr;
+ }
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ if (tmr)
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &srv->service_timer,
+ hlt_service_timeout,
+ NULL,
+ vesdp,
+ tmr->timeout);
+}
+
+static void
+hlt_delete_timer(ErtsSchedulerData *esdp, ErtsHLTimer *tmr)
+{
+ ErtsHLTimerService *srv = esdp->timer_service;
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR) {
+
+ if (tmr->btm.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ btm_rbt_delete(&srv->btm_tree, tmr);
+ tmr->btm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ }
+
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_ABIF_TMR)
+ hlt_delete_abtm(tmr);
+ }
+
+ if (tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ /* Already removed... */
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+ return;
+ }
+
+ if (tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) {
+ same_time_list_delete(tmr);
+ }
+ else if (tmr->time.tree.same_time) {
+ ErtsHLTimer *st_container;
+
+ ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ st_container = tmr->time.tree.same_time->time.tree.u.l.prev;
+
+ ERTS_HLT_ASSERT(st_container);
+ ERTS_HLT_ASSERT(st_container->time.tree.parent
+ & ERTS_HLT_PFLG_SAME_TIME);
+ ERTS_HLT_ASSERT(tmr->timeout == st_container->timeout);
+
+ same_time_list_delete(st_container);
+ st_container->time.tree.same_time = tmr->time.tree.same_time;
+ same_time_list_new_root(&st_container->time.tree.same_time);
+
+ time_rbt_replace(&srv->time_tree, tmr, st_container);
+ ERTS_HLT_ASSERT((st_container->time.tree.parent
+ & ERTS_HLT_PFLG_SAME_TIME) == 0);
+
+ if (srv->next_timeout == tmr)
+ srv->next_timeout = st_container;
+ }
+ else {
+ ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ time_rbt_delete(&srv->time_tree, tmr);
+ if (tmr == srv->next_timeout) {
+ ErtsHLTimer *smlst;
+ erts_twheel_cancel_timer(esdp->timer_wheel,
+ &srv->service_timer);
+ smlst = time_rbt_smallest(srv->time_tree);
+ srv->next_timeout = smlst;
+ if (smlst) {
+ ERTS_HLT_ASSERT(smlst->timeout > tmr->timeout);
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &srv->service_timer,
+ hlt_service_timeout,
+ NULL,
+ (void *) esdp,
+ smlst->timeout);
+ }
+ }
+ }
+ tmr->time.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+ hl_timer_dec_refc(tmr, tmr->head.roflgs);
+
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+}
+
+/*
+ * Pass canceled timers back to originating scheduler
+ */
+
+static ERTS_INLINE void
+cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
+ ErtsTimer *tmr)
+{
+ Uint32 roflgs = tmr->head.roflgs;
+ ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
+ ERTS_HLT_ASSERT((tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK)
+ == (Uint32) esdp->no);
+ if (roflgs & ERTS_TMR_ROFLG_HLT) {
+ hlt_delete_timer(esdp, &tmr->hlt);
+ hl_timer_dec_refc(&tmr->hlt, roflgs);
+ }
+ else {
+ cancel_tw_timer(esdp, &tmr->twt);
+ tw_timer_dec_refc(&tmr->twt);
+ }
+}
+
+#ifdef ERTS_SMP
+
+static void
+init_canceled_queue(ErtsHLTCncldTmrQ *cq)
+{
+ erts_atomic_init_nob(&cq->tail.data.marker.u.next, ERTS_AINT_NULL);
+ erts_atomic_init_nob(&cq->tail.data.last,
+ (erts_aint_t) &cq->tail.data.marker);
+ cq->head.first = (ErtsTimer *) &cq->tail.data.marker;
+ cq->head.unref_end = (ErtsTimer *) &cq->tail.data.marker;
+ cq->head.next.thr_progress = erts_thr_progress_current();
+ cq->head.next.thr_progress_reached = 1;
+ cq->head.next.unref_end = (ErtsTimer *) &cq->tail.data.marker;
+ cq->head.used_marker = 1;
+}
+
+static ERTS_INLINE int
+cq_enqueue(ErtsHLTCncldTmrQ *cq, ErtsTimer *tmr, int cinit)
+{
+ erts_aint_t itmp;
+ ErtsTimer *enq, *this = tmr;
+
+ erts_atomic_init_nob(&this->head.u.next, ERTS_AINT_NULL);
+ /* Enqueue at end of list... */
+
+ enq = (ErtsTimer *) erts_atomic_read_nob(&cq->tail.data.last);
+ itmp = erts_atomic_cmpxchg_relb(&enq->head.u.next,
+ (erts_aint_t) this,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL) {
+ /* We are required to move last pointer */
+#ifdef DEBUG
+ ASSERT(ERTS_AINT_NULL == erts_atomic_read_nob(&this->head.u.next));
+ ASSERT(((erts_aint_t) enq)
+ == erts_atomic_xchg_relb(&cq->tail.data.last,
+ (erts_aint_t) this));
+#else
+ erts_atomic_set_relb(&cq->tail.data.last, (erts_aint_t) this);
+#endif
+ return 1;
+ }
+ else {
+ /*
+ * We *need* to insert element somewhere in between the
+ * last element we read earlier and the actual last element.
+ */
+ int i = cinit;
+
+ while (1) {
+ erts_aint_t itmp2;
+ erts_atomic_set_nob(&this->head.u.next, itmp);
+ itmp2 = erts_atomic_cmpxchg_relb(&enq->head.u.next,
+ (erts_aint_t) this,
+ itmp);
+ if (itmp == itmp2)
+ return 0; /* inserted this */
+ if ((i & 1) == 0)
+ itmp = itmp2;
+ else {
+ enq = (ErtsTimer *) itmp2;
+ itmp = erts_atomic_read_acqb(&enq->head.u.next);
+ ASSERT(itmp != ERTS_AINT_NULL);
+ }
+ i++;
+ }
+ }
+}
+
+static ERTS_INLINE erts_aint_t
+check_insert_marker(ErtsHLTCncldTmrQ *cq, erts_aint_t ilast)
+{
+ if (!cq->head.used_marker
+ && cq->head.unref_end == (ErtsTimer *) ilast) {
+ erts_aint_t itmp;
+ ErtsTimer *last = (ErtsTimer *) ilast;
+
+ erts_atomic_init_nob(&cq->tail.data.marker.u.next, ERTS_AINT_NULL);
+ itmp = erts_atomic_cmpxchg_relb(&last->head.u.next,
+ (erts_aint_t) &cq->tail.data.marker,
+ ERTS_AINT_NULL);
+ if (itmp == ERTS_AINT_NULL) {
+ ilast = (erts_aint_t) &cq->tail.data.marker;
+ cq->head.used_marker = !0;
+ erts_atomic_set_relb(&cq->tail.data.last, ilast);
+ }
+ }
+ return ilast;
+}
+
+static ERTS_INLINE ErtsTimer *
+cq_dequeue(ErtsHLTCncldTmrQ *cq)
+{
+ ErtsTimer *tmr;
+
+ if (cq->head.first == cq->head.unref_end)
+ return NULL;
+
+ tmr = cq->head.first;
+ if (tmr == (ErtsTimer *) &cq->tail.data.marker) {
+ ASSERT(cq->head.used_marker);
+ cq->head.used_marker = 0;
+ tmr = (ErtsTimer *) erts_atomic_read_nob(&tmr->head.u.next);
+ if (tmr == cq->head.unref_end) {
+ cq->head.first = tmr;
+ return NULL;
+ }
+ }
+
+ cq->head.first = (ErtsTimer *) erts_atomic_read_nob(&tmr->head.u.next);
+
+ ASSERT(cq->head.first);
+
+ return tmr;
+}
+
+static int
+cq_check_incoming(ErtsSchedulerData *esdp, ErtsHLTCncldTmrQ *cq)
+{
+ erts_aint_t ilast = erts_atomic_read_nob(&cq->tail.data.last);
+ if (((ErtsTimer *) ilast) == (ErtsTimer *) &cq->tail.data.marker
+ && cq->head.first == (ErtsTimer *) &cq->tail.data.marker) {
+ /* Nothing more to do... */
+ return 0;
+ }
+
+ if (cq->head.next.thr_progress_reached
+ || erts_thr_progress_has_reached(cq->head.next.thr_progress)) {
+ cq->head.next.thr_progress_reached = 1;
+ /* Move unreferenced end pointer forward... */
+
+ ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore;
+
+ cq->head.unref_end = cq->head.next.unref_end;
+
+ ilast = check_insert_marker(cq, ilast);
+
+ if (cq->head.unref_end != (ErtsTimer *) ilast) {
+ cq->head.next.unref_end = (ErtsTimer *) ilast;
+ cq->head.next.thr_progress = erts_thr_progress_later(esdp);
+ cq->head.next.thr_progress_reached = 0;
+ }
+ }
+ return 1;
+}
+
+static ERTS_INLINE void
+store_earliest_thr_prgr(ErtsThrPrgrVal *prev_val, ErtsHLTCncldTmrQ *cq)
+{
+ if (!cq->head.next.thr_progress_reached
+ && (*prev_val == ERTS_THR_PRGR_INVALID
+ || erts_thr_progress_cmp(cq->head.next.thr_progress,
+ *prev_val) < 0)) {
+ *prev_val = cq->head.next.thr_progress;
+ }
+}
+
+static void
+handle_canceled_queue(ErtsSchedulerData *esdp,
+ ErtsHLTCncldTmrQ *cq,
+ int use_limit,
+ int ops_limit,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work)
+{
+ int need_thr_prgr = 0;
+ int need_mr_wrk = 0;
+ int have_checked_incoming = 0;
+ int ops = 0;
+
+ ERTS_HLT_ASSERT(cq == &esdp->timer_service->canceled_queue);
+
+ while (1) {
+ ErtsTimer *tmr = cq_dequeue(cq);
+
+ if (tmr)
+ cleanup_sched_local_canceled_timer(esdp, tmr);
+ else {
+ if (have_checked_incoming)
+ break;
+ need_thr_prgr = cq_check_incoming(esdp, cq);
+ if (need_thr_progress) {
+ *need_thr_progress |= need_thr_prgr;
+ if (need_thr_prgr)
+ store_earliest_thr_prgr(thr_prgr_p, cq);
+ }
+ have_checked_incoming = 1;
+ continue;
+ }
+
+ if (use_limit && ++ops >= ops_limit) {
+ if (cq->head.first != cq->head.unref_end) {
+ need_mr_wrk = 1;
+ if (need_more_work)
+ *need_more_work |= 1;
+ }
+ break;
+ }
+ }
+
+ if (need_thr_progress && !(need_thr_prgr | need_mr_wrk)) {
+ need_thr_prgr = cq_check_incoming(esdp, cq);
+ *need_thr_progress |= need_thr_prgr;
+ if (need_thr_prgr)
+ store_earliest_thr_prgr(thr_prgr_p, cq);
+ }
+}
+
+void
+erts_handle_canceled_timers(void *vesdp,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work)
+{
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
+
+ handle_canceled_queue(esdp, &esdp->timer_service->canceled_queue,
+ 1, ERTS_TMR_CANCELED_TIMER_LIMIT,
+ need_thr_progress, thr_prgr_p,
+ need_more_work);
+}
+
+#endif /* ERTS_SMP */
+
+static void
+queue_canceled_timer(ErtsSchedulerData *esdp, int rsched_id, ErtsTimer *tmr)
+{
+#ifdef ERTS_SMP
+ ErtsHLTCncldTmrQ *cq;
+ cq = &ERTS_SCHEDULER_IX(rsched_id-1)->timer_service->canceled_queue;
+ if (cq_enqueue(cq, tmr, rsched_id - (int) esdp->no))
+ erts_notify_canceled_timer(esdp, rsched_id);
+#else
+ ERTS_INTERNAL_ERROR("Unexpected enqueue of canceled timer");
+#endif
+}
+
+static void
+continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr)
+{
+#ifdef ERTS_SMP
+ Uint32 sid = (tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK);
+
+ if (esdp->no != sid)
+ queue_canceled_timer(esdp, sid, tmr);
+ else
+#endif
+ cleanup_sched_local_canceled_timer(esdp, tmr);
+}
+
+/*
+ * BIF timer specific
+ */
+
+Uint erts_bif_timer_memory_size(void)
+{
+ return (Uint) 0;
+}
+
+static BIF_RETTYPE
+setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos,
+ int short_time, Eterm rcvr, Eterm acsr,
+ Eterm msg, int wrap)
+{
+ BIF_RETTYPE ret;
+ Eterm ref, tmo_msg, *hp;
+ ErtsHLTimer *tmr;
+ ErtsSchedulerData *esdp;
+ DeclareTmpHeap(tmp_hp, 4, c_p);
+
+ if (is_not_internal_pid(rcvr) && is_not_atom(rcvr))
+ goto badarg;
+
+ esdp = ERTS_PROC_GET_SCHDATA(c_p);
+
+ hp = HAlloc(c_p, REF_THING_SIZE);
+ ref = erts_sched_make_ref_in_buffer(esdp, hp);
+
+ ASSERT(erts_get_ref_numbers_thr_id(
+ internal_ref_numbers(ref)) == (Uint32) esdp->no);
+
+ UseTmpHeap(4, c_p);
+
+ tmo_msg = wrap ? TUPLE3(tmp_hp, am_timeout, ref, msg) : msg;
+
+ tmr = create_hl_timer(esdp, timeout_pos, short_time, 1, NULL,
+ rcvr, acsr, tmo_msg, internal_ref_numbers(ref));
+
+ UnUseTmpHeap(4, c_p);
+
+ if (is_internal_pid(rcvr)) {
+ Process *proc = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ rcvr, ERTS_PROC_LOCK_BTM,
+ ERTS_P2P_FLG_INC_REFC);
+ if (!proc) {
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+ hlt_delete_timer(esdp, tmr);
+ hl_timer_destroy(tmr);
+ }
+ else {
+ proc_btm_rbt_insert(&proc->bif_timers, tmr);
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ tmr->receiver.proc = proc;
+ }
+ }
+
+ ERTS_BIF_PREP_RET(ret, ref);
+ return ret;
+
+badarg:
+
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ return ret;
+}
+
+static int
+cancel_bif_timer(ErtsHLTimer *tmr)
+{
+ erts_aint_t state;
+ Uint32 roflgs;
+ int res;
+
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ ERTS_TMR_STATE_CANCELED,
+ ERTS_TMR_STATE_ACTIVE);
+ if (state != ERTS_TMR_STATE_ACTIVE)
+ return 0;
+
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+
+ res = -1;
+
+ roflgs = tmr->head.roflgs;
+ if (roflgs & ERTS_TMR_ROFLG_PROC) {
+ Process *proc = tmr->receiver.proc;
+ ERTS_HLT_ASSERT(!(tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME));
+
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM);
+ /*
+ * If process is exiting, let it clean up
+ * the btm tree by itself (it may be in
+ * the middle of tree destruction).
+ */
+ if (!ERTS_PROC_IS_EXITING(proc)
+ && tmr->btm.proc_tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE) {
+ proc_btm_rbt_delete(&proc->bif_timers, tmr);
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ res = 1;
+ }
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM);
+ }
+
+ return res;
+}
+
+static ERTS_INLINE Eterm
+access_sched_local_btm(Process *c_p, Eterm pid,
+ Eterm tref, Uint32 *trefn,
+ Uint32 *rrefn,
+ int async, int cancel,
+ int return_res,
+ int info)
+{
+ ErtsSchedulerData *esdp;
+ ErtsHLTimerService *srv;
+ ErtsHLTimer *tmr;
+ Sint64 time_left;
+ Process *proc;
+ ErtsProcLocks proc_locks;
+
+ time_left = -1;
+
+ if (!c_p)
+ esdp = erts_get_scheduler_data();
+ else {
+ esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
+ }
+
+ ERTS_HLT_ASSERT(erts_get_ref_numbers_thr_id(trefn)
+ == (Uint32) esdp->no);
+
+ srv = esdp->timer_service;
+
+ tmr = btm_rbt_lookup(srv->btm_tree, trefn);
+ if (tmr) {
+ if (!cancel) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->state);
+ if (state == ERTS_TMR_STATE_ACTIVE)
+ time_left = get_time_left(esdp, tmr->timeout);
+ }
+ else {
+ int cncl_res = cancel_bif_timer(tmr);
+ if (cncl_res) {
+
+ time_left = get_time_left(esdp, tmr->timeout);
+
+ if (cncl_res > 0)
+ hl_timer_dec_refc(tmr, tmr->head.roflgs);
+
+ hlt_delete_timer(esdp, tmr);
+ }
+ }
+ }
+
+ if (!info)
+ return am_ok;
+
+ if (return_res) {
+ ERTS_HLT_ASSERT(c_p);
+ if (time_left < 0)
+ return am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ return make_small((Sint) time_left);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(time_left);
+ Eterm *hp = HAlloc(c_p, hsz);
+ return erts_sint64_to_big(time_left, &hp);
+ }
+ }
+
+ if (c_p) {
+ proc = c_p;
+ proc_locks = ERTS_PROC_LOCK_MAIN;
+ }
+ else {
+ proc = erts_proc_lookup(pid);
+ proc_locks = 0;
+ }
+
+ if (proc) {
+ Uint hsz;
+ ErlOffHeap *ohp;
+ ErlHeapFragment* bp;
+ Eterm *hp, msg, ref, result;
+#ifdef ERTS_HLT_DEBUG
+ Eterm *hp_end;
+#endif
+
+ hsz = 3; /* 2-tuple */
+ if (!async)
+ hsz += REF_THING_SIZE;
+ else {
+ if (is_non_value(tref) || proc != c_p)
+ hsz += REF_THING_SIZE;
+ hsz += 1; /* upgrade to 3-tuple */
+ }
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ if (proc == c_p) {
+ bp = NULL;
+ ohp = NULL;
+ hp = HAlloc(c_p, hsz);
+ }
+ else {
+ hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ proc,
+ &proc_locks);
+ }
+
+#ifdef ERTS_HLT_DEBUG
+ hp_end = hp + hsz;
+#endif
+
+ if (time_left < 0)
+ result = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ result = make_small((Sint) time_left);
+ else
+ result = erts_sint64_to_big(time_left, &hp);
+
+ if (!async) {
+ write_ref_thing(hp,
+ rrefn[0],
+ rrefn[1],
+ rrefn[2]);
+ ref = make_internal_ref(hp);
+ hp += REF_THING_SIZE;
+ msg = TUPLE2(hp, ref, result);
+
+ ERTS_HLT_ASSERT(hp + 3 == hp_end);
+ }
+ else {
+ Eterm tag = cancel ? am_cancel_timer : am_read_timer;
+ if (is_value(tref) && proc == c_p)
+ ref = tref;
+ else {
+ write_ref_thing(hp,
+ trefn[0],
+ trefn[1],
+ trefn[2]);
+ ref = make_internal_ref(hp);
+ hp += REF_THING_SIZE;
+ }
+ msg = TUPLE3(hp, tag, ref, result);
+
+ ERTS_HLT_ASSERT(hp + 4 == hp_end);
+
+ }
+ erts_queue_message(proc, &proc_locks, bp, msg, NIL);
+
+ if (c_p)
+ proc_locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (proc_locks)
+ erts_smp_proc_unlock(proc, proc_locks);
+ }
+
+ return am_ok;
+}
+
+#define ERTS_BTM_REQ_FLG_ASYNC (((Uint32) 1) << 0)
+#define ERTS_BTM_REQ_FLG_CANCEL (((Uint32) 1) << 1)
+#define ERTS_BTM_REQ_FLG_INFO (((Uint32) 1) << 2)
+
+typedef struct {
+ Eterm pid;
+ Uint32 trefn[ERTS_REF_NUMBERS];
+ Uint32 rrefn[ERTS_REF_NUMBERS];
+ Uint32 flags;
+} ErtsBifTimerRequest;
+
+static void
+bif_timer_access_request(void *vreq)
+{
+ ErtsBifTimerRequest *req = (ErtsBifTimerRequest *) vreq;
+ int async = (int) (req->flags & ERTS_BTM_REQ_FLG_ASYNC);
+ int cancel = (int) (req->flags & ERTS_BTM_REQ_FLG_CANCEL);
+ int info = (int) (req->flags & ERTS_BTM_REQ_FLG_INFO);
+ (void) access_sched_local_btm(NULL, req->pid, THE_NON_VALUE,
+ req->trefn, req->rrefn, async,
+ cancel, 0, info);
+ erts_free(ERTS_ALC_T_TIMER_REQUEST, vreq);
+}
+
+static int
+try_access_sched_remote_btm(ErtsSchedulerData *esdp,
+ Process *c_p, Uint32 sid,
+ Eterm tref, Uint32 *trefn,
+ int async, int cancel,
+ int info, Eterm *resp)
+{
+ ErtsHLTimer *tmr;
+ Sint64 time_left;
+
+ ERTS_HLT_ASSERT(c_p);
+
+ /*
+ * Check if the timer is aimed at current
+ * process of if this process is an accessor
+ * of the timer...
+ */
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM);
+ tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn);
+ if (!tmr)
+ tmr = abtm_rbt_lookup(c_p->accessor_bif_timers, trefn);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM);
+ if (!tmr)
+ return 0;
+
+ if (!cancel) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->state);
+ if (state == ERTS_TMR_STATE_ACTIVE)
+ time_left = get_time_left(esdp, tmr->timeout);
+ else
+ time_left = -1;
+ }
+ else {
+ int cncl_res = cancel_bif_timer(tmr);
+ if (!cncl_res)
+ time_left = -1;
+ else {
+ time_left = get_time_left(esdp, tmr->timeout);
+ if (cncl_res > 0)
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ }
+ }
+
+ if (!info) {
+ *resp = am_ok;
+ return 1;
+ }
+
+ if (!async) {
+ if (time_left < 0)
+ *resp = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ *resp = make_small((Sint) time_left);
+ else {
+ Uint hsz = ERTS_SINT64_HEAP_SIZE(time_left);
+ Eterm *hp = HAlloc(c_p, hsz);
+ *resp = erts_sint64_to_big(time_left, &hp);
+ }
+ }
+ else {
+ Eterm tag, res, msg;
+ Uint hsz;
+ Eterm *hp;
+ ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
+
+ hsz = 4;
+ if (time_left > (Sint64) MAX_SMALL)
+ hsz += ERTS_SINT64_HEAP_SIZE(time_left);
+
+ hp = HAlloc(c_p, hsz);
+ if (cancel)
+ tag = am_cancel_timer;
+ else
+ tag = am_read_timer;
+
+ if (time_left < 0)
+ res = am_false;
+ else if (time_left <= (Sint64) MAX_SMALL)
+ res = make_small((Sint) time_left);
+ else
+ res = erts_sint64_to_big(time_left, &hp);
+
+ msg = TUPLE3(hp, tag, tref, res);
+
+ erts_queue_message(c_p, &proc_locks, NULL, msg, NIL);
+
+ proc_locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (proc_locks)
+ erts_smp_proc_unlock(c_p, proc_locks);
+
+ *resp = am_ok;
+ }
+ return 1;
+}
+
+static BIF_RETTYPE
+access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
+{
+ BIF_RETTYPE ret;
+ ErtsSchedulerData *esdp;
+ Uint32 sid;
+ Uint32 *trefn;
+ Eterm res;
+
+ if (is_not_internal_ref(tref)) {
+ if (is_not_ref(tref))
+ goto badarg;
+ else
+ goto no_timer;
+ }
+
+ esdp = ERTS_PROC_GET_SCHDATA(c_p);
+
+ trefn = internal_ref_numbers(tref);
+ sid = erts_get_ref_numbers_thr_id(trefn);
+ if (sid < 1 || erts_no_schedulers < sid)
+ goto no_timer;
+
+ if (sid == (Uint32) esdp->no) {
+ res = access_sched_local_btm(c_p, c_p->common.id,
+ tref, trefn, NULL,
+ async, cancel, !async,
+ info);
+ ERTS_BIF_PREP_RET(ret, res);
+ }
+ else if (try_access_sched_remote_btm(esdp, c_p, sid,
+ tref, trefn,
+ async, cancel,
+ info, &res)) {
+ ERTS_BIF_PREP_RET(ret, res);
+ }
+ else {
+ /*
+ * Schedule access for execution on
+ * remote scheduler...
+ */
+ ErtsBifTimerRequest *req = erts_alloc(ERTS_ALC_T_TIMER_REQUEST,
+ sizeof(ErtsBifTimerRequest));
+
+ req->flags = 0;
+ if (cancel)
+ req->flags |= ERTS_BTM_REQ_FLG_CANCEL;
+ if (async)
+ req->flags |= ERTS_BTM_REQ_FLG_ASYNC;
+ if (info)
+ req->flags |= ERTS_BTM_REQ_FLG_INFO;
+
+ req->pid = c_p->common.id;
+
+ req->trefn[0] = trefn[0];
+ req->trefn[1] = trefn[1];
+ req->trefn[2] = trefn[2];
+
+ if (async)
+ ERTS_BIF_PREP_RET(ret, am_ok);
+ else {
+ Eterm *hp, rref;
+ Uint32 *rrefn;
+
+ hp = HAlloc(c_p, REF_THING_SIZE);
+ rref = erts_sched_make_ref_in_buffer(esdp, hp);
+ rrefn = internal_ref_numbers(rref);
+
+ req->rrefn[0] = rrefn[0];
+ req->rrefn[1] = rrefn[1];
+ req->rrefn[2] = rrefn[2];
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+
+ if (ERTS_PROC_PENDING_EXIT(c_p))
+ ERTS_VBUMP_ALL_REDS(c_p);
+ else {
+ /*
+ * Caller needs to wait for a message containing
+ * the ref that we just created. No such message
+ * can exist in callers message queue at this time.
+ * We therefore move the save pointer of the
+ * callers message queue to the end of the queue.
+ *
+ * NOTE: It is of vital importance that the caller
+ * immediately do a receive unconditionaly
+ * waiting for the message with the reference;
+ * otherwise, next receive will *not* work
+ * as expected!
+ */
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ c_p->msg.save = c_p->msg.last;
+ }
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+
+ ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, rref);
+ }
+
+ erts_schedule_misc_aux_work(sid,
+ bif_timer_access_request,
+ (void *) req);
+ }
+
+ return ret;
+
+badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ return ret;
+
+no_timer:
+ ERTS_BIF_PREP_RET(ret, am_false);
+ return ret;
+
+}
+
+static ERTS_INLINE int
+bool_arg(Eterm val, int *argp)
+{
+ switch (val) {
+ case am_true: *argp = 1; return 1;
+ case am_false: *argp = 0; return 1;
+ default: return 0;
+ }
+}
+
+static ERTS_INLINE int
+parse_bif_timer_options(Eterm option_list, int *async, int *info,
+ int *abs, Eterm *accessor)
+{
+ Eterm list = option_list;
+
+ if (async)
+ *async = 0;
+ if (info)
+ *info = 1;
+ if (abs)
+ *abs = 0;
+ if (accessor)
+ *accessor = THE_NON_VALUE;
+
+ while (is_list(list)) {
+ Eterm *consp, *tp, opt;
+
+ consp = list_val(list);
+ opt = CAR(consp);
+ if (is_not_tuple(opt))
+ return 0;
+
+ tp = tuple_val(opt);
+ if (arityval(tp[0]) != 2)
+ return 0;
+
+ switch (tp[1]) {
+ case am_async:
+ if (!async || !bool_arg(tp[2], async))
+ return 0;
+ break;
+ case am_info:
+ if (!info || !bool_arg(tp[2], info))
+ return 0;
+ break;
+ case am_abs:
+ if (!abs || !bool_arg(tp[2], abs))
+ return 0;
+ break;
+ case am_accessor:
+ if (!accessor || is_not_internal_pid(tp[2]))
+ return 0;
+ *accessor = tp[2];
+ break;
+ default:
+ return 0;
+ }
+
+ list = CDR(consp);
+ }
+
+ if (is_not_nil(list))
+ return 0;
+ return 1;
+}
+
+static void
+exit_cancel_bif_timer(ErtsHLTimer *tmr, void *vesdp)
+{
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ Uint32 sid, roflgs;
+ erts_aint_t state;
+
+ state = erts_smp_atomic32_cmpxchg_acqb(&tmr->state,
+ ERTS_TMR_STATE_CANCELED,
+ ERTS_TMR_STATE_ACTIVE);
+
+ roflgs = tmr->head.roflgs;
+ sid = roflgs & ERTS_TMR_ROFLG_SID_MASK;
+
+ ERTS_HLT_ASSERT(sid == erts_get_ref_numbers_thr_id(tmr->btm.refn));
+ ERTS_HLT_ASSERT(tmr->btm.proc_tree.parent
+ != ERTS_HLT_PFIELD_NOT_IN_TABLE);
+
+ tmr->btm.proc_tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+
+ if (sid == (Uint32) esdp->no) {
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+ hlt_delete_timer(esdp, tmr);
+ }
+ hl_timer_dec_refc(tmr, roflgs);
+ }
+ else {
+ if (state == ERTS_TMR_STATE_ACTIVE) {
+ if (tmr->btm.bp)
+ free_message_buffer(tmr->btm.bp);
+ queue_canceled_timer(esdp, sid, (ErtsTimer *) tmr);
+ }
+ else
+ hl_timer_dec_refc(tmr, roflgs);
+ }
+}
+
+#ifdef ERTS_HLT_DEBUG
+# define ERTS_BTM_MAX_DESTROY_LIMIT 2
+#else
+# define ERTS_BTM_MAX_DESTROY_LIMIT 50
+#endif
+
+typedef struct {
+ ErtsBifTimers *bif_timers;
+ union {
+ proc_btm_rbt_yield_state_t proc_btm_yield_state;
+ abtm_rbt_yield_state_t abtm_yield_state;
+ } u;
+} ErtsBifTimerYieldState;
+
+int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
+{
+ ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(p);
+ ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
+ ErtsBifTimerYieldState *ysp;
+ int res;
+
+ ysp = (ErtsBifTimerYieldState *) *vyspp;
+ if (!ysp)
+ ysp = &ys;
+
+ res = proc_btm_rbt_foreach_destroy_yielding(&ysp->bif_timers,
+ exit_cancel_bif_timer,
+ (void *) esdp,
+ &ysp->u.proc_btm_yield_state,
+ ERTS_BTM_MAX_DESTROY_LIMIT);
+
+ if (res == 0) {
+ if (ysp != &ys)
+ erts_free(ERTS_ALC_T_BTM_YIELD_STATE, ysp);
+ *vyspp = NULL;
+ }
+ else {
+
+ if (ysp == &ys) {
+ ysp = erts_alloc(ERTS_ALC_T_BTM_YIELD_STATE,
+ sizeof(ErtsBifTimerYieldState));
+ sys_memcpy((void *) ysp, (void *) &ys,
+ sizeof(ErtsBifTimerYieldState));
+ }
+
+ *vyspp = (void *) ysp;
+ }
+
+ return res;
+}
+
+static void
+detach_bif_timer(ErtsHLTimer *tmr, void *vesdp)
+{
+ tmr->abtm.tree.parent = ERTS_HLT_PFIELD_NOT_IN_TABLE;
+ hl_timer_dec_refc(tmr, tmr->head.roflgs);
+}
+
+int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
+{
+ ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(p);
+ ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
+ ErtsBifTimerYieldState *ysp;
+ int res;
+
+ ysp = (ErtsBifTimerYieldState *) *vyspp;
+ if (!ysp)
+ ysp = &ys;
+
+ res = abtm_rbt_foreach_destroy_yielding(&ysp->bif_timers,
+ detach_bif_timer,
+ (void *) esdp,
+ &ysp->u.abtm_yield_state,
+ ERTS_BTM_MAX_DESTROY_LIMIT);
+
+ if (res == 0) {
+ if (ysp != &ys)
+ erts_free(ERTS_ALC_T_BTM_YIELD_STATE, ysp);
+ *vyspp = NULL;
+ }
+ else {
+
+ if (ysp == &ys) {
+ ysp = erts_alloc(ERTS_ALC_T_BTM_YIELD_STATE,
+ sizeof(ErtsBifTimerYieldState));
+ sys_memcpy((void *) ysp, (void *) &ys,
+ sizeof(ErtsBifTimerYieldState));
+ }
+
+ *vyspp = (void *) ysp;
+ }
+
+ return res;
+}
+
+static ERTS_INLINE int
+parse_timeout_pos(ErtsSchedulerData *esdp, Eterm arg,
+ ErtsMonotonicTime *conv_arg, int abs,
+ ErtsMonotonicTime *tposp, int *stimep)
+{
+ ErtsMonotonicTime t;
+
+ if (!term_to_Sint64(arg, &t)) {
+ ERTS_HLT_ASSERT(!is_small(arg));
+ if (!is_big(arg))
+ return -1;
+
+ if (abs || !big_sign(arg))
+ return 1;
+
+ return -1;
+ }
+
+ if (conv_arg)
+ *conv_arg = t;
+
+ if (abs) {
+ t += -1*ERTS_MONOTONIC_OFFSET_MSEC; /* external to internal */
+ if (t < ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_BEGIN))
+ return 1;
+ if (t > ERTS_MONOTONIC_TO_MSEC(ERTS_MONOTONIC_END))
+ return 1;
+ *stimep = (t - ERTS_MONOTONIC_TO_MSEC(esdp->last_monotonic_time)
+ < ERTS_BIF_TIMER_SHORT_TIME);
+ *tposp = ERTS_MSEC_TO_CLKTCKS(t);
+ }
+ else {
+ ErtsMonotonicTime now, ticks;
+
+ if (t < 0)
+ return -1;
+
+ ticks = ERTS_MSEC_TO_CLKTCKS(t);
+
+ if (ERTS_CLKTCK_RESOLUTION > 1000 && ticks < 0)
+ return 1;
+
+ ERTS_HLT_ASSERT(ticks >= 0);
+
+ now = erts_get_monotonic_time(esdp);
+ ticks += ERTS_MONOTONIC_TO_CLKTCKS(now-1);
+ ticks += 1;
+
+ if (ticks < ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_BEGIN))
+ return 1;
+ if (ticks > ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_END))
+ return 1;
+
+ *stimep = (t < ERTS_BIF_TIMER_SHORT_TIME);
+ *tposp = ticks;
+ }
+
+ return 0;
+}
+
+/*
+ *
+ * The BIF timer BIFs...
+ */
+
+BIF_RETTYPE send_after_3(BIF_ALIST_3)
+{
+ ErtsMonotonicTime timeout_pos;
+ int short_time, tres;
+
+ tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ 0, &timeout_pos, &short_time);
+ if (tres != 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ return setup_bif_timer(BIF_P, timeout_pos, short_time,
+ BIF_ARG_2, BIF_ARG_2, BIF_ARG_3, 0);
+}
+
+BIF_RETTYPE send_after_4(BIF_ALIST_4)
+{
+ ErtsMonotonicTime timeout_pos;
+ Eterm accessor;
+ int short_time, abs, tres;
+
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
+ BIF_ERROR(BIF_P, BADARG);
+
+ tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ abs, &timeout_pos, &short_time);
+ if (tres != 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ return setup_bif_timer(BIF_P, timeout_pos, short_time,
+ BIF_ARG_2, accessor, BIF_ARG_3, 0);
+}
+
+BIF_RETTYPE start_timer_3(BIF_ALIST_3)
+{
+ ErtsMonotonicTime timeout_pos;
+ int short_time, tres;
+
+ tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ 0, &timeout_pos, &short_time);
+ if (tres != 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ return setup_bif_timer(BIF_P, timeout_pos, short_time,
+ BIF_ARG_2, BIF_ARG_2, BIF_ARG_3, !0);
+}
+
+BIF_RETTYPE start_timer_4(BIF_ALIST_4)
+{
+ ErtsMonotonicTime timeout_pos;
+ Eterm accessor;
+ int short_time, abs, tres;
+
+ if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
+ BIF_ERROR(BIF_P, BADARG);
+
+ tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ abs, &timeout_pos, &short_time);
+ if (tres != 0)
+ BIF_ERROR(BIF_P, BADARG);
+
+ return setup_bif_timer(BIF_P, timeout_pos, short_time,
+ BIF_ARG_2, accessor, BIF_ARG_3, !0);
+}
+
+BIF_RETTYPE cancel_timer_1(BIF_ALIST_1)
+{
+ return access_bif_timer(BIF_P, BIF_ARG_1, 1, 0, 1);
+}
+
+BIF_RETTYPE cancel_timer_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE ret;
+ int async, info;
+
+ if (parse_bif_timer_options(BIF_ARG_2, &async, &info, NULL, NULL))
+ return access_bif_timer(BIF_P, BIF_ARG_1, 1, async, info);
+
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ return ret;
+}
+
+BIF_RETTYPE read_timer_1(BIF_ALIST_1)
+{
+ return access_bif_timer(BIF_P, BIF_ARG_1, 0, 0, 1);
+}
+
+BIF_RETTYPE read_timer_2(BIF_ALIST_2)
+{
+ BIF_RETTYPE ret;
+ int async;
+
+ if (parse_bif_timer_options(BIF_ARG_2, &async, NULL, NULL, NULL))
+ return access_bif_timer(BIF_P, BIF_ARG_1, 0, async, 1);
+
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ return ret;
+}
+
+/*
+ * Process and Port timer functionality.
+ *
+ * NOTE! These are only allowed to be called by a
+ * scheduler thread that currently is
+ * executing the process or port.
+ */
+
+static ERTS_INLINE void
+set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
+ ErtsMonotonicTime timeout_pos, int short_time)
+{
+ void *tmr;
+ check_canceled_queue(esdp, esdp->timer_service);
+
+ if (tmo == 0)
+ c_p->flags |= F_TIMO;
+ else {
+
+ c_p->flags |= F_INSLPQUEUE;
+ c_p->flags &= ~F_TIMO;
+
+ if (tmo < ERTS_TIMER_WHEEL_MSEC)
+ tmr = (void *) create_tw_timer(esdp, (void *) c_p, 1, timeout_pos);
+ else
+ tmr = (void *) create_hl_timer(esdp, timeout_pos,
+ short_time, 0, (void *) c_p,
+ c_p->common.id, NIL, NIL, NULL);
+ erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr);
+ }
+}
+
+int
+erts_set_proc_timer_term(Process *c_p, Eterm etmo)
+{
+ ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsMonotonicTime tmo, timeout_pos;
+ int short_time, tres;
+
+ ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer)
+ == ERTS_PTMR_NONE);
+
+ tres = parse_timeout_pos(esdp, etmo, &tmo, 0,
+ &timeout_pos, &short_time);
+ if (tres != 0)
+ return tres;
+
+ if ((tmo >> 32) != 0)
+ return 1;
+
+ set_proc_timer_common(c_p, esdp, tmo, timeout_pos, short_time);
+ return 0;
+}
+
+void
+erts_set_proc_timer_uword(Process *c_p, UWord tmo)
+{
+ ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+
+ ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer)
+ == ERTS_PTMR_NONE);
+
+#ifndef ARCH_32
+ ERTS_HLT_ASSERT((tmo >> 32) == (UWord) 0);
+#endif
+
+ if (tmo == 0)
+ c_p->flags |= F_TIMO;
+ else {
+ ErtsMonotonicTime timeout_pos;
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp),
+ (ErtsMonotonicTime) tmo);
+ set_proc_timer_common(c_p, esdp, (ErtsMonotonicTime) tmo,
+ timeout_pos,
+ tmo < ERTS_BIF_TIMER_SHORT_TIME);
+ }
+}
+
+void
+erts_cancel_proc_timer(Process *c_p)
+{
+ erts_aint_t tval;
+ tval = erts_smp_atomic_xchg_acqb(&c_p->common.timer,
+ ERTS_PTMR_NONE);
+ c_p->flags &= ~(F_INSLPQUEUE|F_TIMO);
+ if (tval == ERTS_PTMR_NONE)
+ return;
+ if (tval == ERTS_PTMR_TIMEDOUT) {
+ erts_smp_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE);
+ return;
+ }
+ continue_cancel_ptimer(ERTS_PROC_GET_SCHDATA(c_p),
+ (ErtsTimer *) tval);
+}
+
+void
+erts_set_port_timer(Port *c_prt, Sint64 tmo)
+{
+ void *tmr;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ErtsMonotonicTime timeout_pos;
+
+ if (erts_smp_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE)
+ erts_cancel_port_timer(c_prt);
+
+ check_canceled_queue(esdp, esdp->timer_service);
+
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp), tmo);
+
+ if (tmo < ERTS_TIMER_WHEEL_MSEC)
+ tmr = (void *) create_tw_timer(esdp, (void *) c_prt, 0,
+ timeout_pos);
+ else
+ tmr = (void *) create_hl_timer(esdp, timeout_pos, 0, 0,
+ (void *) c_prt,
+ c_prt->common.id, NIL, NIL,
+ NULL);
+ erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr);
+}
+
+void
+erts_cancel_port_timer(Port *c_prt)
+{
+ erts_aint_t tval;
+ tval = erts_smp_atomic_xchg_acqb(&c_prt->common.timer,
+ ERTS_PTMR_NONE);
+ if (tval == ERTS_PTMR_NONE)
+ return;
+ if (tval == ERTS_PTMR_TIMEDOUT) {
+ while (!erts_port_task_is_scheduled(&c_prt->timeout_task))
+ erts_thr_yield();
+ erts_port_task_abort(&c_prt->timeout_task);
+ erts_smp_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE);
+ return;
+ }
+ continue_cancel_ptimer(erts_get_scheduler_data(),
+ (ErtsTimer *) tval);
+}
+
+Sint64
+erts_read_port_timer(Port *c_prt)
+{
+ ErtsTimer *tmr;
+ erts_aint_t itmr;
+ ErtsMonotonicTime timeout_pos;
+
+ itmr = erts_smp_atomic_read_acqb(&c_prt->common.timer);
+ if (itmr == ERTS_PTMR_NONE)
+ return (Sint64) -1;
+ if (itmr == ERTS_PTMR_TIMEDOUT)
+ return (Sint64) 0;
+ tmr = (ErtsTimer *) itmr;
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_HLT)
+ timeout_pos = tmr->hlt.timeout;
+ else
+ timeout_pos = tmr->twt.tw_tmr.timeout_pos;
+ return get_time_left(NULL, timeout_pos);
+}
+
+/*
+ * Debug stuff...
+ */
+
+typedef struct {
+ int to;
+ void *to_arg;
+ ErtsMonotonicTime now;
+} ErtsBTMPrint;
+
+static void
+btm_print(ErtsHLTimer *tmr, void *vbtmp)
+{
+ ErtsBTMPrint *btmp = (ErtsBTMPrint *) vbtmp;
+ ErtsMonotonicTime left;
+ Eterm receiver;
+
+ if (tmr->timeout <= btmp->now)
+ left = 0;
+ left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
+
+ receiver = ((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->receiver.name
+ : tmr->receiver.proc->common.id);
+
+ erts_print(btmp->to, btmp->to_arg,
+ "=timer:%T\n"
+ "Message: %T\n"
+ "Time left: %b64d\n",
+ receiver,
+ tmr->btm.message,
+ (Sint64) left);
+}
+
+void
+erts_print_bif_timer_info(int to, void *to_arg)
+{
+ ErtsBTMPrint btmp;
+ int six;
+
+ if (!ERTS_IS_CRASH_DUMPING)
+ ERTS_INTERNAL_ERROR("Not crash dumping");
+
+ btmp.to = to;
+ btmp.to_arg = to_arg;
+ btmp.now = erts_get_monotonic_time(NULL);
+ btmp.now = ERTS_MONOTONIC_TO_CLKTCKS(btmp.now);
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsHLTimerService *srv =
+ erts_aligned_scheduler_data[six].esd.timer_service;
+ btm_rbt_foreach(srv->btm_tree, btm_print, (void *) &btmp);
+ }
+}
+
+typedef struct {
+ void (*func)(Eterm,
+ Eterm,
+ ErlHeapFragment *,
+ void *);
+ void *arg;
+} ErtsBTMForeachDebug;
+
+static void
+debug_btm_foreach(ErtsHLTimer *tmr, void *vbtmfd)
+{
+ if (erts_smp_atomic32_read_nob(&tmr->state) == ERTS_TMR_STATE_ACTIVE) {
+ ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd;
+ (*btmfd->func)(((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
+ ? tmr->receiver.name
+ : tmr->receiver.proc->common.id),
+ tmr->btm.message,
+ tmr->btm.bp,
+ btmfd->arg);
+ }
+}
+
+void
+erts_debug_bif_timer_foreach(void (*func)(Eterm,
+ Eterm,
+ ErlHeapFragment *,
+ void *),
+ void *arg)
+{
+ ErtsBTMForeachDebug btmfd;
+ int six;
+
+ btmfd.func = func;
+ btmfd.arg = arg;
+
+ if (!erts_smp_thr_progress_is_blocking())
+ ERTS_INTERNAL_ERROR("Not blocking thread progress");
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsHLTimerService *srv =
+ erts_aligned_scheduler_data[six].esd.timer_service;
+ btm_rbt_foreach(srv->btm_tree,
+ debug_btm_foreach,
+ (void *) &btmfd);
+ }
+}
+
+#ifdef ERTS_HLT_HARD_DEBUG
+
+typedef struct {
+ ErtsHLTimerService *srv;
+ int found_root;
+ ErtsHLTimer **rootpp;
+} ErtsHdbgHLT;
+
+static void
+st_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
+{
+ ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
+ ErtsHLTimer **rootpp;
+ ERTS_HLT_ASSERT(tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME);
+ if (tmr->time.tree.parent == ERTS_HLT_PFLG_SAME_TIME) {
+ ERTS_HLT_ASSERT(tmr != *hdbg->rootpp);
+ }
+ else {
+ rootpp = (ErtsHLTimer **) (tmr->time.tree.parent
+ & ~ERTS_HLT_PFLG_SAME_TIME);
+ ERTS_HLT_ASSERT(rootpp == hdbg->rootpp);
+ ERTS_HLT_ASSERT(tmr == *rootpp);
+ ERTS_HLT_ASSERT(!hdbg->found_root);
+ hdbg->found_root = 1;
+ }
+ ERTS_HLT_ASSERT(tmr->time.tree.u.l.next->time.tree.u.l.prev == tmr);
+ ERTS_HLT_ASSERT(tmr->time.tree.u.l.prev->time.tree.u.l.next == tmr);
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+}
+
+static void
+tt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
+{
+ ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
+ ErtsHLTimer *prnt;
+ ERTS_HLT_ASSERT((tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ prnt = (ErtsHLTimer *) (tmr->time.tree.parent & ~ERTS_HLT_PFLGS_MASK);
+ if (prnt) {
+ ERTS_HLT_ASSERT(prnt->time.tree.u.t.left == tmr
+ || prnt->time.tree.u.t.right == tmr);
+ }
+ else {
+ ERTS_HLT_ASSERT(!hdbg->found_root);
+ hdbg->found_root = 1;
+ ERTS_HLT_ASSERT(tmr == *hdbg->rootpp);
+ }
+ if (tmr->time.tree.u.t.left) {
+ prnt = (ErtsHLTimer *) (tmr->time.tree.u.t.left->time.tree.parent
+ & ~ERTS_HLT_PFLGS_MASK);
+ ERTS_HLT_ASSERT(tmr == prnt);
+ }
+ if (tmr->time.tree.u.t.right) {
+ prnt = (ErtsHLTimer *) (tmr->time.tree.u.t.right->time.tree.parent
+ & ~ERTS_HLT_PFLGS_MASK);
+ ERTS_HLT_ASSERT(tmr == prnt);
+ }
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+ if (tmr->time.tree.same_time) {
+ ErtsHdbgHLT st_hdbg;
+ st_hdbg.srv = hdbg->srv;
+ st_hdbg.found_root = 0;
+ st_hdbg.rootpp = &tmr->time.tree.same_time;
+ same_time_list_foreach(tmr->time.tree.same_time, st_hdbg_func, (void *) &st_hdbg);
+ ERTS_HLT_ASSERT(st_hdbg.found_root);
+ }
+}
+
+static void
+bt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
+{
+ ErtsHdbgHLT *hdbg = (ErtsHdbgHLT *) vhdbg;
+ ErtsHLTimer *prnt;
+ ERTS_HLT_ASSERT((tmr->btm.tree.parent & ERTS_HLT_PFLG_SAME_TIME) == 0);
+ prnt = (ErtsHLTimer *) (tmr->btm.tree.parent & ~ERTS_HLT_PFLGS_MASK);
+ if (prnt) {
+ ERTS_HLT_ASSERT(prnt->btm.tree.left == tmr
+ || prnt->btm.tree.right == tmr);
+ }
+ else {
+ ERTS_HLT_ASSERT(!hdbg->found_root);
+ hdbg->found_root = 1;
+ ERTS_HLT_ASSERT(tmr == *hdbg->rootpp);
+ }
+ if (tmr->btm.tree.left) {
+ prnt = (ErtsHLTimer *) (tmr->btm.tree.left->btm.tree.parent
+ & ~ERTS_HLT_PFLGS_MASK);
+ ERTS_HLT_ASSERT(tmr == prnt);
+ }
+ if (tmr->btm.tree.right) {
+ prnt = (ErtsHLTimer *) (tmr->btm.tree.right->btm.tree.parent
+ & ~ERTS_HLT_PFLGS_MASK);
+ ERTS_HLT_ASSERT(tmr == prnt);
+ }
+ if (tmr->pending_timeout) {
+ if (tmr->pending_timeout > 0) /* container > 0 */
+ ERTS_HLT_ASSERT(tmr->time.tree.parent == ERTS_HLT_PFIELD_NOT_IN_TABLE);
+ else {
+ ERTS_HLT_ASSERT(tmr->time.tree.parent != ERTS_HLT_PFIELD_NOT_IN_TABLE);
+ ERTS_HLT_ASSERT(tmr->time.tree.parent & ERTS_HLT_PFLG_SAME_TIME);
+ }
+ }
+ else {
+ ErtsHLTimer *ttmr = time_rbt_lookup(hdbg->srv->time_tree, tmr->timeout);
+ ERTS_HLT_ASSERT(ttmr);
+ if (ttmr != tmr) {
+ ERTS_HLT_ASSERT(ttmr->time.tree.same_time);
+ ERTS_HLT_ASSERT(tmr == same_time_list_lookup(ttmr->time.tree.same_time, tmr));
+ }
+ }
+}
+
+static void
+hdbg_chk_srv(ErtsHLTimerService *srv)
+{
+ if (srv->time_tree) {
+ ErtsHdbgHLT hdbg;
+ hdbg.srv = srv;
+ hdbg.found_root = 0;
+ hdbg.rootpp = &srv->time_tree;
+ time_rbt_foreach(srv->time_tree, tt_hdbg_func, (void *) &hdbg);
+ ERTS_HLT_ASSERT(hdbg.found_root);
+ }
+ if (srv->btm_tree) {
+ ErtsHdbgHLT hdbg;
+ hdbg.srv = srv;
+ hdbg.found_root = 0;
+ hdbg.rootpp = &srv->btm_tree;
+ btm_rbt_foreach(srv->btm_tree, bt_hdbg_func, (void *) &hdbg);
+ ERTS_HLT_ASSERT(hdbg.found_root);
+ }
+}
+
+#endif /* ERTS_HLT_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h
new file mode 100644
index 0000000000..30889a71da
--- /dev/null
+++ b/erts/emulator/beam/erl_hl_timer.h
@@ -0,0 +1,80 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2015. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_HL_TIMER_H__
+#define ERL_HL_TIMER_H__
+
+typedef struct ErtsHLTimer_ ErtsBifTimers;
+typedef struct ErtsHLTimerService_ ErtsHLTimerService;
+
+#include "sys.h"
+#include "erl_process.h"
+#define ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_port.h"
+#undef ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_message.h"
+#include "erl_alloc_types.h"
+
+#define ERTS_PTMR_NONE ((erts_aint_t) NULL)
+#define ERTS_PTMR_TIMEDOUT (ERTS_PTMR_NONE + ((erts_aint_t) 1))
+
+#define ERTS_PTMR_INIT(P) \
+ erts_smp_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE)
+#define ERTS_PTMR_IS_SET(P) \
+ (ERTS_PTMR_NONE != erts_smp_atomic_read_nob(&(P)->common.timer))
+#define ERTS_PTMR_IS_TIMED_OUT(P) \
+ (ERTS_PTMR_TIMEDOUT == erts_smp_atomic_read_nob(&(P)->common.timer))
+
+#define ERTS_PTMR_CLEAR(P) \
+ do { \
+ ASSERT(ERTS_PTMR_IS_TIMED_OUT((P))); \
+ erts_smp_atomic_set_nob(&(P)->common.timer, \
+ ERTS_PTMR_NONE); \
+ } while (0)
+
+size_t erts_timer_type_size(ErtsAlcType_t type);
+int erts_set_proc_timer_term(Process *, Eterm);
+void erts_set_proc_timer_uword(Process *, UWord);
+void erts_cancel_proc_timer(Process *);
+void erts_set_port_timer(Port *, Sint64);
+void erts_cancel_port_timer(Port *);
+Sint64 erts_read_port_timer(Port *);
+int erts_cancel_bif_timers(Process *, ErtsBifTimers *, void **);
+int erts_detach_accessor_bif_timers(Process *, ErtsBifTimers *, void **);
+ErtsHLTimerService *erts_create_timer_service(void);
+void erts_hl_timer_init(void);
+
+#ifdef ERTS_SMP
+void
+erts_handle_canceled_timers(void *vesdp,
+ int *need_thr_progress,
+ ErtsThrPrgrVal *thr_prgr_p,
+ int *need_more_work);
+#endif
+
+Uint erts_bif_timer_memory_size(void);
+void erts_print_bif_timer_info(int to, void *to_arg);
+
+void erts_debug_bif_timer_foreach(void (*func)(Eterm,
+ Eterm,
+ ErlHeapFragment *,
+ void *),
+ void *arg);
+
+#endif /* ERL_HL_TIMER_H__ */
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 4f12727044..988ff0e2b5 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -35,7 +35,7 @@
#include "dist.h"
#include "erl_mseg.h"
#include "erl_threads.h"
-#include "erl_bif_timer.h"
+#include "erl_hl_timer.h"
#include "erl_instrument.h"
#include "erl_printf_term.h"
#include "erl_misc_utils.h"
@@ -46,6 +46,8 @@
#include "erl_async.h"
#include "erl_ptab.h"
#include "erl_bif_unique.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -365,7 +367,6 @@ erl_init(int ncpu,
erts_init_binary(); /* Must be after init_emulator() */
erts_bp_init();
init_db(); /* Must be after init_emulator */
- erts_bif_timer_init();
erts_init_node_tables();
init_dist();
erl_drv_thr_init();
@@ -2095,11 +2096,8 @@ erl_start(int argc, char **argv)
erts_initialized = 1;
- {
- Eterm init = erl_first_process_otp("otp_ring0", NULL, 0,
- boot_argc, boot_argv);
- erts_bif_timer_start_servers(init);
- }
+ (void) erl_first_process_otp("otp_ring0", NULL, 0,
+ boot_argc, boot_argv);
#ifdef ERTS_SMP
erts_start_schedulers();
@@ -2107,13 +2105,17 @@ erl_start(int argc, char **argv)
erts_sys_main_thread(); /* May or may not return! */
#else
- erts_thr_set_main_status(1, 1);
+ {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ erts_thr_set_main_status(1, 1);
#if ERTS_USE_ASYNC_READY_Q
- erts_get_scheduler_data()->aux_work_data.async_ready.queue
- = erts_get_async_ready_queue(1);
+ esdp->aux_work_data.async_ready.queue
+ = erts_get_async_ready_queue(1);
#endif
- set_main_stack_size();
- process_main();
+ set_main_stack_size();
+ erts_sched_init_time_sup(esdp);
+ process_main();
+ }
#endif
}
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 261460d054..617ce84895 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -91,6 +91,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "driver_list", NULL },
{ "proc_link", "pid" },
{ "proc_msgq", "pid" },
+ { "proc_btm", "pid" },
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
{ "code_write_permission", NULL },
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index bb2a2bcdf9..a1bd39dbc8 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -1884,7 +1884,7 @@ erts_hashmap_get(Uint32 hx, Eterm key, Eterm node)
UseTmpHeapNoproc(2);
ASSERT(is_boxed(node));
- ptr = boxed_val(node);
+ ptr = boxed_val_rel(node, map_base);
hdr = *ptr;
ASSERT(is_header(hdr));
ASSERT(is_hashmap_header_head(hdr));
@@ -1905,8 +1905,7 @@ erts_hashmap_get(Uint32 hx, Eterm key, Eterm node)
node = ptr[ix+1];
if (is_list(node)) { /* LEAF NODE [K|V] */
- ptr = list_val(node);
-
+ ptr = list_val_rel(node,map_base);
res = eq_rel(CAR(ptr), map_base, key, NULL) ? &(CDR(ptr)) : NULL;
break;
}
@@ -1914,7 +1913,7 @@ erts_hashmap_get(Uint32 hx, Eterm key, Eterm node)
hx = hashmap_shift_hash(th,hx,lvl,key);
ASSERT(is_boxed(node));
- ptr = boxed_val(node);
+ ptr = boxed_val_rel(node, map_base);
hdr = *ptr;
ASSERT(is_header(hdr));
ASSERT(!is_hashmap_header_head(hdr));
@@ -2586,12 +2585,12 @@ BIF_RETTYPE erts_internal_map_type_1(BIF_ALIST_1) {
DECL_AM(hashmap);
DECL_AM(hashmap_node);
DECL_AM(flatmap);
- if (is_flatmap(BIF_ARG_1)) {
- BIF_RET(AM_flatmap);
- } else if (is_hashmap(BIF_ARG_1)) {
+ if (is_map(BIF_ARG_1)) {
Eterm hdr = *(boxed_val(BIF_ARG_1));
ASSERT(is_header(hdr));
switch (hdr & _HEADER_MAP_SUBTAG_MASK) {
+ case HAMT_SUBTAG_HEAD_FLATMAP:
+ BIF_RET(AM_flatmap);
case HAMT_SUBTAG_HEAD_ARRAY:
case HAMT_SUBTAG_HEAD_BITMAP:
BIF_RET(AM_hashmap);
@@ -2612,23 +2611,22 @@ BIF_RETTYPE erts_internal_map_type_1(BIF_ALIST_1) {
*/
BIF_RETTYPE erts_internal_map_hashmap_children_1(BIF_ALIST_1) {
- if (is_hashmap(BIF_ARG_1)) {
+ if (is_map(BIF_ARG_1)) {
Eterm node = BIF_ARG_1;
Eterm *ptr, hdr, *hp, res = NIL;
Uint sz = 0;
ptr = boxed_val(node);
hdr = *ptr;
-
ASSERT(is_header(hdr));
switch(hdr & _HEADER_MAP_SUBTAG_MASK) {
- case HAMT_SUBTAG_NODE_BITMAP:
- sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
- ptr += 1;
- break;
+ case HAMT_SUBTAG_HEAD_FLATMAP:
+ BIF_ERROR(BIF_P, BADARG);
case HAMT_SUBTAG_HEAD_BITMAP:
- sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
- ptr += 2;
+ ptr++;
+ case HAMT_SUBTAG_NODE_BITMAP:
+ ptr++;
+ sz = hashmap_bitcount(MAP_HEADER_VAL(hdr));
break;
case HAMT_SUBTAG_HEAD_ARRAY:
sz = 16;
@@ -2642,12 +2640,9 @@ BIF_RETTYPE erts_internal_map_hashmap_children_1(BIF_ALIST_1) {
hp = HAlloc(BIF_P, 2*sz);
while(sz--) { res = CONS(hp, *ptr++, res); hp += 2; }
BIF_RET(res);
- } else if (is_flatmap(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- } else {
- BIF_P->fvalue = BIF_ARG_1;
- BIF_ERROR(BIF_P, BADMAP);
}
+ BIF_P->fvalue = BIF_ARG_1;
+ BIF_ERROR(BIF_P, BADMAP);
}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 247ea10764..ccfc2e6458 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -990,7 +990,7 @@ erts_send_message(Process* sender,
#endif
);
BM_SWAP_TIMER(send,system);
- } else if (sender == receiver && !(sender->flags & F_OFF_HEAP_MSGS)) {
+ } else if (sender == receiver) {
/* Drop message if receiver has a pending exit ... */
#ifdef ERTS_SMP
ErtsProcLocks need_locks = (~(*receiver_locks)
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 8f9ea939e8..1e1dafee90 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -213,25 +213,15 @@ do { \
if ((M)->data.attached) { \
Uint need__ = erts_msg_attached_data_size((M)); \
if ((ST) - (HT) >= need__) { \
- Uint *htop__; \
- move__attached__msg__data____: \
- htop__ = (HT); \
+ Uint *htop__ = (HT); \
erts_move_msg_attached_data_to_heap(&htop__, &MSO((P)), (M));\
ASSERT(htop__ - (HT) <= need__); \
(HT) = htop__; \
} \
else { \
- int off_heap_msgs__ = (int) (P)->flags & F_OFF_HEAP_MSGS; \
- if (!off_heap_msgs__) \
- need__ = 0; \
{ SWPO ; } \
- (FC) -= erts_garbage_collect((P), need__, NULL, 0); \
+ (FC) -= erts_garbage_collect((P), 0, NULL, 0); \
{ SWPI ; } \
- if (off_heap_msgs__) { \
- ASSERT((M)->data.attached); \
- ASSERT((ST) - (HT) >= need__); \
- goto move__attached__msg__data____; \
- } \
} \
ASSERT(!(M)->data.attached); \
} \
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 25caaa4e44..426a00304e 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -336,7 +336,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
rp = (scheduler
? erts_proc_lookup(receiver)
: erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
- receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC));
+ receiver, rp_locks, ERTS_P2P_FLG_INC_REFC));
if (rp == NULL) {
ASSERT(env == NULL || receiver != c_p->common.id);
return 0;
@@ -364,7 +364,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
if (flush_me) {
cache_env(env);
}
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index c6d136f951..bcf6311079 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1469,7 +1469,7 @@ setup_reference_table(void)
erts_db_foreach_table(insert_ets_table, NULL);
/* Insert all bif timers */
- erts_bif_timer_foreach(insert_bif_timer, NULL);
+ erts_debug_bif_timer_foreach(insert_bif_timer, NULL);
/* Insert node table (references to dist) */
hash_foreach(&erts_node_table, insert_erl_node, NULL);
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index ad3f104a68..3920fae2d9 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -350,6 +350,7 @@ int erts_lc_is_port_locked(Port *);
ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt);
ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt);
ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc);
+ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt);
ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt);
ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt);
@@ -359,37 +360,26 @@ ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt);
ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt)
{
-#ifdef ERTS_SMP
- erts_ptab_inc_refc(&prt->common);
-#else
- erts_atomic32_inc_nob(&prt->refc);
-#endif
+ erts_ptab_atmc_inc_refc(&prt->common);
}
ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt)
{
-#ifdef ERTS_SMP
- int referred = erts_ptab_dec_test_refc(&prt->common);
+ int referred = erts_ptab_atmc_dec_test_refc(&prt->common);
if (!referred)
erts_port_free(prt);
-#else
- int refc = erts_atomic32_dec_read_nob(&prt->refc);
- if (refc == 0)
- erts_port_free(prt);
-#endif
}
ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc)
{
-#ifdef ERTS_SMP
- int referred = erts_ptab_add_test_refc(&prt->common, add_refc);
+ int referred = erts_ptab_atmc_add_test_refc(&prt->common, add_refc);
if (!referred)
erts_port_free(prt);
-#else
- int refc = erts_atomic32_add_read_nob(&prt->refc, add_refc);
- if (refc == 0)
- erts_port_free(prt);
-#endif
+}
+
+ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt)
+{
+ return erts_ptab_atmc_read_refc(&prt->common);
}
ERTS_GLB_INLINE int
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 2aa0a27197..c701737e26 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1646,6 +1646,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_aint32_t state;
int active;
Uint64 start_time = 0;
+ ErtsSchedulerData *esdp = runq->scheduler;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
@@ -1662,7 +1663,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
*curr_port_pp = pp;
if (erts_sched_stat.enabled) {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
int migrated = old && old != esdp->no;
@@ -1718,11 +1718,16 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
switch (ptp->type) {
case ERTS_PORT_TASK_TIMEOUT:
reset_handle(ptp);
- reds = ERTS_PORT_REDS_TIMEOUT;
- if (!(state & ERTS_PORT_SFLGS_DEAD)) {
- DTRACE_DRIVER(driver_timeout, pp);
- (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
- }
+ if (!ERTS_PTMR_IS_TIMED_OUT(pp))
+ reds = 0;
+ else {
+ ERTS_PTMR_CLEAR(pp);
+ reds = ERTS_PORT_REDS_TIMEOUT;
+ if (!(state & ERTS_PORT_SFLGS_DEAD)) {
+ DTRACE_DRIVER(driver_timeout, pp);
+ (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
+ }
+ }
break;
case ERTS_PORT_TASK_INPUT:
reds = ERTS_PORT_REDS_INPUT;
@@ -1879,7 +1884,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
runq->scheduler->reductions += reds;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);
+ ERTS_PORT_REDUCTIONS_EXECUTED(esdp, runq, reds);
return res;
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 00c7b163c2..af8db519d4 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -44,8 +44,10 @@
#include "dtrace-wrapper.h"
#include "erl_ptab.h"
#include "erl_bif_unique.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
-
+#define ERTS_CHECK_TIME_REDS CONTEXT_REDS
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2)
@@ -463,7 +465,7 @@ static int stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg);
static void aux_work_timeout(void *unused);
static void aux_work_timeout_early_init(int no_schedulers);
static void aux_work_timeout_late_init(void);
-static void setup_aux_work_timer(void);
+static void setup_aux_work_timer(ErtsSchedulerData *esdp);
static int execute_sys_tasks(Process *c_p,
erts_aint32_t *statep,
@@ -493,6 +495,8 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_DD;
valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+ valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
+ valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
#endif
#if HAVE_ERTS_MSEG
@@ -610,13 +614,11 @@ erts_pre_init_process(void)
#endif
}
-#ifdef ERTS_SMP
static void
release_process(void *vproc)
{
- erts_smp_proc_dec_refc((Process *) vproc);
+ erts_proc_dec_refc((Process *) vproc);
}
-#endif
/* initialize the scheduler */
void
@@ -632,16 +634,18 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
erts_ptab_init_table(&erts_proc,
ERTS_ALC_T_PROC_TABLE,
-#ifdef ERTS_SMP
release_process,
-#else
- NULL,
-#endif
(ErtsPTabElementCommon *) &erts_invalid_process.common,
proc_tab_size,
sizeof(Process),
"process_table",
- legacy_proc_tab);
+ legacy_proc_tab,
+#ifdef ERTS_SMP
+ 1
+#else
+ 0
+#endif
+ );
last_reductions = 0;
last_exact_reductions = 0;
@@ -1031,7 +1035,7 @@ reply_sched_wall_time(void *vswtrp)
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0)
swtreq_free(vswtrp);
@@ -1063,7 +1067,7 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable)
erts_smp_atomic32_init_nob(&swtrp->refc,
(erts_aint32_t) erts_no_schedulers);
- erts_smp_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
+ erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
@@ -1124,7 +1128,7 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
xplocks &= ~plocks;
if (xplocks && erts_smp_proc_trylock(p, xplocks) == EBUSY) {
if (xplocks & ERTS_PROC_LOCK_MAIN) {
- erts_smp_proc_inc_refc(p);
+ erts_proc_inc_refc(p);
erts_smp_proc_unlock(p, plocks);
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL);
refc = 1;
@@ -1140,7 +1144,7 @@ erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
if (xplocks)
erts_smp_proc_unlock(p, xplocks);
if (refc)
- erts_smp_proc_dec_refc(p);
+ erts_proc_dec_refc(p);
ASSERT(p->psd);
if (p->psd != psd)
erts_free(ERTS_ALC_T_PSD, psd);
@@ -1768,6 +1772,101 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i
}
/*
+ * Canceled timers
+ */
+
+void
+erts_notify_canceled_timer(ErtsSchedulerData *esdp, int rsid)
+{
+ ASSERT(esdp && esdp == erts_get_scheduler_data());
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
+ schedule_aux_work_wakeup(&esdp->aux_work_data,
+ rsid,
+ ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+ else
+ set_aux_work_flags_wakeup_relb(ERTS_SCHED_SLEEP_INFO_IX(rsid-1),
+ ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_canceled_timers(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsSchedulerSleepInfo *ssi = awdp->ssi;
+ int need_thr_progress = 0;
+ ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
+ int more_work = 0;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+ erts_handle_canceled_timers((void *) awdp->esdp,
+ &need_thr_progress,
+ &wakeup,
+ &more_work);
+ if (more_work) {
+ if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS)
+ & ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR) {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ aux_work &= ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
+ }
+ return aux_work;
+ }
+
+ if (need_thr_progress) {
+ if (wakeup == ERTS_THR_PRGR_INVALID)
+ wakeup = erts_thr_progress_later(awdp->esdp);
+ awdp->cncld_tmrs.thr_prgr = wakeup;
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ haw_thr_prgr_soft_wakeup(awdp, wakeup);
+ }
+ return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS;
+}
+
+static ERTS_INLINE erts_aint32_t
+handle_canceled_timers_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsSchedulerSleepInfo *ssi;
+ int need_thr_progress;
+ int more_work;
+ ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
+ ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
+#endif
+ if (!erts_thr_progress_has_reached_this(current, awdp->cncld_tmrs.thr_prgr))
+ return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
+
+ ssi = awdp->ssi;
+ need_thr_progress = 0;
+ more_work = 0;
+
+ erts_handle_canceled_timers((void *) awdp->esdp,
+ &need_thr_progress,
+ &wakeup,
+ &more_work);
+ if (more_work) {
+ set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ return ((aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR)
+ | ERTS_SSI_AUX_WORK_CNCLD_TMRS);
+ }
+
+ if (need_thr_progress) {
+ if (wakeup == ERTS_THR_PRGR_INVALID)
+ wakeup = erts_thr_progress_later(awdp->esdp);
+ awdp->cncld_tmrs.thr_prgr = wakeup;
+ haw_thr_prgr_soft_wakeup(awdp, wakeup);
+ }
+ else {
+ unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
+ }
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
+}
+
+/*
* Handle scheduled thread progress later operations.
*/
#define ERTS_MAX_THR_PRGR_LATER_OPS 50
@@ -1865,7 +1964,7 @@ completed_dealloc(void *vproc)
{
if (erts_atomic32_dec_read_mb(&completed_dealloc_count) == 0) {
erts_resume((Process *) vproc, (ErtsProcLocks) 0);
- erts_smp_proc_dec_refc((Process *) vproc);
+ erts_proc_dec_refc((Process *) vproc);
}
}
@@ -1914,7 +2013,7 @@ erts_debug_wait_deallocations(Process *c_p)
count,
0)) {
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
- erts_smp_proc_inc_refc(c_p);
+ erts_proc_inc_refc(c_p);
/* scheduler threads */
erts_schedule_multi_misc_aux_work(0,
erts_no_schedulers,
@@ -2029,7 +2128,7 @@ static ERTS_INLINE erts_aint32_t
handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO);
- setup_aux_work_timer();
+ setup_aux_work_timer(awdp->esdp);
return aux_work & ~ERTS_SSI_AUX_WORK_SET_TMO;
}
@@ -2089,6 +2188,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP,
handle_thr_prgr_later_op);
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS,
+ handle_canceled_timers);
+ /* CNCLD_TMRS must be before CNCLD_TMRS_THR_PRGR */
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR,
+ handle_canceled_timers_thr_prgr);
#endif
#if ERTS_USE_ASYNC_READY_Q
@@ -2138,8 +2242,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
typedef struct {
union {
- ErlTimer data;
- char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErlTimer))];
+ ErtsTWheelTimer data;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsTWheelTimer))];
} timer;
int initialized;
@@ -2149,6 +2253,22 @@ typedef struct {
static ErtsAuxWorkTmo *aux_work_tmo;
+static ERTS_INLINE void
+start_aux_work_timer(ErtsSchedulerData *esdp)
+{
+ ErtsMonotonicTime tmo = erts_get_monotonic_time(esdp);
+ tmo = ERTS_MONOTONIC_TO_CLKTCKS(tmo-1);
+ tmo += ERTS_MSEC_TO_CLKTCKS(1000) + 1;
+ erts_twheel_init_timer(&aux_work_tmo->timer.data);
+ ASSERT(esdp);
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &aux_work_tmo->timer.data,
+ aux_work_timeout,
+ NULL,
+ (void *) esdp,
+ tmo);
+}
+
static void
aux_work_timeout_early_init(int no_schedulers)
{
@@ -2181,18 +2301,12 @@ void
aux_work_timeout_late_init(void)
{
aux_work_tmo->initialized = 1;
- if (erts_atomic32_read_nob(&aux_work_tmo->refc)) {
- erts_init_timer(&aux_work_tmo->timer.data);
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
- }
+ if (erts_atomic32_read_nob(&aux_work_tmo->refc))
+ start_aux_work_timer(erts_get_scheduler_data());
}
static void
-aux_work_timeout(void *unused)
+aux_work_timeout(void *vesdp)
{
erts_aint32_t refc;
int i;
@@ -2215,31 +2329,18 @@ aux_work_timeout(void *unused)
if (refc != 1
|| 1 != erts_atomic32_cmpxchg_relb(&aux_work_tmo->refc, 0, 1)) {
/* Setup next timeout... */
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
+ start_aux_work_timer((ErtsSchedulerData *) vesdp);
}
}
static void
-setup_aux_work_timer(void)
+setup_aux_work_timer(ErtsSchedulerData *esdp)
{
-#ifndef ERTS_SMP
- if (!erts_get_scheduler_data())
+ if (!esdp || !esdp->timer_wheel)
set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(0),
ERTS_SSI_AUX_WORK_SET_TMO);
else
-#endif
- {
- erts_init_timer(&aux_work_tmo->timer.data);
- erts_set_timer(&aux_work_tmo->timer.data,
- aux_work_timeout,
- NULL,
- NULL,
- 1000);
- }
+ start_aux_work_timer(esdp);
}
erts_aint32_t
@@ -2270,7 +2371,7 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
if (refc == 1) {
erts_atomic32_inc_acqb(&aux_work_tmo->refc);
if (aux_work_tmo->initialized)
- setup_aux_work_timer();
+ setup_aux_work_timer(erts_get_scheduler_data());
}
}
return old;
@@ -2649,11 +2750,6 @@ aux_thread(void *unused)
erts_aint32_t aux_work;
ErtsThrPrgrCallbacks callbacks;
int thr_prgr_active = 1;
- ErtsTimerWheel *timer_wheel = erts_default_timer_wheel;
- ErtsNextTimeoutRef nxt_tmo_ref = erts_get_next_timeout_reference(timer_wheel);
-
- if (!timer_wheel)
- ERTS_INTERNAL_ERROR("Missing aux timer wheel");
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -2677,7 +2773,6 @@ aux_thread(void *unused)
sched_prep_spin_wait(ssi);
while (1) {
- ErtsMonotonicTime current_time;
erts_aint32_t flgs;
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
@@ -2689,56 +2784,28 @@ aux_thread(void *unused)
erts_thr_progress_leader_update(NULL);
}
- if (aux_work) {
- current_time = erts_get_monotonic_time();
- if (current_time >= erts_next_timeout_time(nxt_tmo_ref)) {
- if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
- erts_bump_timers(timer_wheel, current_time);
- }
- }
- else {
- ErtsMonotonicTime timeout_time;
- timeout_time = erts_check_next_timeout_time(timer_wheel,
- ERTS_SEC_TO_MONOTONIC(10*60));
- current_time = erts_get_monotonic_time();
- if (current_time >= timeout_time) {
- if (!thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 1);
- }
- else {
- if (thr_prgr_active)
- erts_thr_progress_active(NULL, thr_prgr_active = 0);
- erts_thr_progress_prepare_wait(NULL);
+ if (!aux_work) {
+ if (thr_prgr_active)
+ erts_thr_progress_active(NULL, thr_prgr_active = 0);
+ erts_thr_progress_prepare_wait(NULL);
- ERTS_SCHED_FAIR_YIELD();
+ ERTS_SCHED_FAIR_YIELD();
- flgs = sched_spin_wait(ssi, 0);
+ flgs = sched_spin_wait(ssi, 0);
+ if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ ASSERT(flgs & ERTS_SSI_FLG_WAITING);
+ flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
+ int res;
+ ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- int res;
- ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- current_time = erts_get_monotonic_time();
- do {
- Sint64 timeout;
- if (current_time >= timeout_time)
- break;
- timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time
- - current_time
- - 1) + 1;
- res = erts_tse_twait(ssi->event, timeout);
- current_time = erts_get_monotonic_time();
- } while (res == EINTR);
- }
+ do {
+ res = erts_tse_wait(ssi->event);
+ } while (res == EINTR);
}
- erts_thr_progress_finalize_wait(NULL);
}
- if (current_time >= timeout_time)
- erts_bump_timers(timer_wheel, current_time);
+ erts_thr_progress_finalize_wait(NULL);
}
flgs = sched_prep_spin_wait(ssi);
@@ -2821,7 +2888,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (aux_work) {
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
- current_time = erts_get_monotonic_time();
+ current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
@@ -2832,9 +2899,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
else {
ErtsMonotonicTime timeout_time;
- timeout_time = erts_check_next_timeout_time(esdp->timer_wheel,
- ERTS_SEC_TO_MONOTONIC(10*60));
- current_time = erts_get_monotonic_time();
+ timeout_time = erts_check_next_timeout_time(esdp);
+ current_time = erts_get_monotonic_time(esdp);
if (current_time >= timeout_time) {
if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
@@ -2860,7 +2926,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
int res;
ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- current_time = erts_get_monotonic_time();
+ current_time = erts_get_monotonic_time(esdp);
do {
Sint64 timeout;
if (current_time >= timeout_time)
@@ -2869,7 +2935,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
- current_time
- 1) + 1;
res = erts_tse_twait(ssi->event, timeout);
- current_time = erts_get_monotonic_time();
+ current_time = erts_get_monotonic_time(esdp);
} while (res == EINTR);
}
}
@@ -2944,7 +3010,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ASSERT(!erts_port_task_have_outstanding_io_tasks());
erl_sys_schedule(1); /* Might give us something to do */
- current_time = erts_get_monotonic_time();
+ current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -3062,7 +3128,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erl_sys_schedule(0);
{
- ErtsMonotonicTime current_time = erts_get_monotonic_time();
+ ErtsMonotonicTime current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
erts_bump_timers(esdp->timer_wheel, current_time);
}
@@ -5273,6 +5339,7 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->dd.completed_callback = NULL;
awdp->dd.completed_arg = NULL;
+ awdp->cncld_tmrs.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->later_op.thr_prgr = ERTS_THR_PRGR_VAL_FIRST;
awdp->later_op.size = 0;
awdp->later_op.first = NULL;
@@ -5338,9 +5405,6 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->no = (Uint) num;
#endif
- esdp->timer_wheel = erts_default_timer_wheel;
- esdp->next_tmo_ref = erts_get_next_timeout_reference(esdp->timer_wheel);
-
esdp->ssi = ssi;
esdp->current_process = NULL;
esdp->current_port = NULL;
@@ -5353,6 +5417,9 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->run_queue = runq;
esdp->run_queue->scheduler = esdp;
+ esdp->last_monotonic_time = 0;
+ esdp->check_time_reds = 0;
+
esdp->thr_id = (Uint32) num;
erts_sched_bif_unique_init(esdp);
@@ -5916,13 +5983,6 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
int check_emigration_need;
#endif
-#ifdef ERTS_SMP
- if ((p->static_flags & ERTS_STC_FLG_PREFER_SCHED)
- && p->preferred_run_queue != RUNQ_READ_RQ(&p->run_queue)) {
- RUNQ_SET_RQ(&p->run_queue, p->preferred_run_queue);
- }
-#endif
-
a = state;
while (1) {
@@ -6730,6 +6790,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
}
+ (void) erts_get_monotonic_time(esdp);
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
@@ -6863,7 +6924,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
if (aux_work) {
- current_time = erts_get_monotonic_time();
+ current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
if (!thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
@@ -6874,9 +6935,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
}
else {
ErtsMonotonicTime timeout_time;
- timeout_time = erts_check_next_timeout_time(esdp->timer_wheel,
- ERTS_SEC_TO_MONOTONIC(60*60));
- current_time = erts_get_monotonic_time();
+ timeout_time = erts_check_next_timeout_time(esdp);
+ current_time = erts_get_monotonic_time(esdp);
if (current_time >= timeout_time) {
if (!thr_prgr_active) {
@@ -6902,7 +6962,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
| ERTS_SSI_FLG_SUSPENDED)) {
int res;
- current_time = erts_get_monotonic_time();
+ current_time = erts_get_monotonic_time(esdp);
do {
Sint64 timeout;
if (current_time >= timeout_time)
@@ -6911,7 +6971,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
- current_time
- 1) + 1;
res = erts_tse_twait(ssi->event, timeout);
- current_time = erts_get_monotonic_time();
+ current_time = erts_get_monotonic_time(esdp);
} while (res == EINTR);
}
}
@@ -7739,8 +7799,8 @@ sched_thread_func(void *vesdp)
ErtsSchedulerData *esdp = vesdp;
Uint no = esdp->no;
- esdp->timer_wheel = erts_create_timer_wheel((int) no);
- esdp->next_tmo_ref = erts_get_next_timeout_reference(esdp->timer_wheel);
+ erts_sched_init_time_sup(esdp);
+
#ifdef ERTS_SMP
ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = erts_tse_fetch();
callbacks.arg = (void *) esdp->ssi;
@@ -9109,7 +9169,7 @@ Process *schedule(Process *p, int calls)
schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */
proxy_p = NULL;
- ERTS_PROC_REDUCTIONS_EXECUTED(rq,
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
(int) ERTS_PSFLGS_GET_USR_PRIO(state),
reds,
actual_reds);
@@ -9126,12 +9186,9 @@ Process *schedule(Process *p, int calls)
ASSERT(esdp->free_process == p);
esdp->free_process = NULL;
#else
- state = erts_smp_atomic32_read_nob(&p->state);
- if (!(state & ERTS_PSFLG_IN_RUNQ))
- erts_free_proc(p);
+ erts_proc_dec_refc(p);
#endif
}
-
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
#endif
@@ -9139,13 +9196,13 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- {
- ErtsMonotonicTime current_time = erts_get_monotonic_time();
- if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
- erts_smp_runq_unlock(rq);
- erts_bump_timers(esdp->timer_wheel, current_time);
- erts_smp_runq_lock(rq);
- }
+ if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS)
+ (void) erts_get_monotonic_time(esdp);
+
+ if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) {
+ erts_smp_runq_unlock(rq);
+ erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time);
+ erts_smp_runq_lock(rq);
}
BM_STOP_TIMER(system);
@@ -9305,7 +9362,7 @@ Process *schedule(Process *p, int calls)
erts_smp_runq_unlock(rq);
erl_sys_schedule(1);
- current_time = erts_get_monotonic_time();
+ current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
erts_bump_timers(esdp->timer_wheel, current_time);
@@ -9446,13 +9503,8 @@ Process *schedule(Process *p, int calls)
| ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_ACTIVE_SYS))
== ERTS_PSFLG_SUSPENDED)) {
- if (state & ERTS_PSFLG_FREE) {
-#ifdef ERTS_SMP
- erts_smp_proc_dec_refc(p);
-#else
- erts_free_proc(p);
-#endif
- }
+ if (state & ERTS_PSFLG_FREE)
+ erts_proc_dec_refc(p);
if (proxy_p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9595,6 +9647,20 @@ Process *schedule(Process *p, int calls)
/* Never run a suspended process */
ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
+ ASSERT(erts_proc_read_refc(p) > 0);
+
+ if (ERTS_PTMR_IS_TIMED_OUT(p)) {
+ BeamInstr** pi;
+#ifdef ERTS_SMP
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+#endif
+ pi = (BeamInstr **) p->def_arg_reg;
+ p->i = *pi;
+ p->flags &= ~F_INSLPQUEUE;
+ p->flags |= F_TIMO;
+ ERTS_PTMR_CLEAR(p);
+ }
+
return p;
}
}
@@ -10503,6 +10569,8 @@ erts_free_proc(Process *p)
#ifdef ERTS_SMP
erts_proc_lock_fin(p);
#endif
+ ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE);
+ ASSERT(0 == erts_proc_read_refc(p));
erts_free(ERTS_ALC_T_PROC, (void *) p);
}
@@ -10555,6 +10623,8 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
return NULL;
}
+ ASSERT(erts_proc_read_refc(p) > 0);
+
ASSERT(internal_pid_serial(p->common.id) <= ERTS_MAX_PID_SERIAL);
p->approx_started = erts_get_approx_time();
@@ -10604,10 +10674,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
int ix = so->scheduler-1;
ASSERT(0 <= ix && ix < erts_no_run_queues);
rq = ERTS_RUNQ_IX(ix);
- if (!(so->flags & SPO_PREFER_SCHED)) {
- /* Unsupported feature... */
- state |= ERTS_PSFLG_BOUND;
- }
+ /* Unsupported feature... */
+ state |= ERTS_PSFLG_BOUND;
}
prio = (erts_aint32_t) so->priority;
}
@@ -10615,9 +10683,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
state |= (((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_ACT_PRIO_OFFSET)
| ((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_USR_PRIO_OFFSET));
- if (so->flags & SPO_OFF_HEAP_MSGS)
- state |= ERTS_PSFLG_OFF_HEAP_MSGS;
-
if (!rq)
rq = erts_get_runq_proc(parent);
@@ -10641,12 +10706,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
heap_need = arg_size;
p->flags = erts_default_process_flags;
- if (so->flags & SPO_OFF_HEAP_MSGS)
- p->flags |= F_OFF_HEAP_MSGS;
-#ifdef ERTS_SMP
- p->preferred_run_queue = NULL;
-#endif
p->static_flags = 0;
if (so->flags & SPO_SYSTEM_PROC)
p->static_flags |= ERTS_STC_FLG_SYSTEM_PROC;
@@ -10654,12 +10714,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->min_heap_size = so->min_heap_size;
p->min_vheap_size = so->min_vheap_size;
p->max_gen_gcs = so->max_gen_gcs;
- if (so->flags & SPO_PREFER_SCHED) {
-#ifdef ERTS_SMP
- p->preferred_run_queue = rq;
-#endif
- p->static_flags |= ERTS_STC_FLG_PREFER_SCHED;
- }
} else {
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
@@ -10668,9 +10722,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->schedule_count = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
- p->initial[INITIAL_MOD] = mod;
- p->initial[INITIAL_FUN] = func;
- p->initial[INITIAL_ARI] = (Uint) arity;
+ p->u.initial[INITIAL_MOD] = mod;
+ p->u.initial[INITIAL_FUN] = func;
+ p->u.initial[INITIAL_ARI] = (Uint) arity;
/*
* Must initialize binary lists here before copying binaries to process.
@@ -10711,7 +10765,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
/* No need to initialize p->fcalls. */
- p->current = p->initial+INITIAL_MOD;
+ p->current = p->u.initial+INITIAL_MOD;
p->i = (BeamInstr *) beam_apply;
p->cp = (BeamInstr *) beam_apply+1;
@@ -10734,11 +10788,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->ftrace = NIL;
p->reds = 0;
-#ifdef ERTS_SMP
- p->common.u.alive.ptimer = NULL;
-#else
- erts_init_timer(&p->common.u.alive.tm);
-#endif
+ ERTS_PTMR_INIT(p);
p->common.u.alive.reg = NULL;
ERTS_P_LINKS(p) = NULL;
@@ -10769,7 +10819,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->msg_inq.last = &p->msg_inq.first;
p->msg_inq.len = 0;
#endif
- p->u.bif_timers = NULL;
+ p->bif_timers = NULL;
+ p->accessor_bif_timers = NULL;
p->mbuf = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
@@ -10927,11 +10978,7 @@ void erts_init_empty_process(Process *p)
p->bin_old_vheap = 0;
p->sys_task_qs = NULL;
p->bin_vheap_mature = 0;
-#ifdef ERTS_SMP
- p->common.u.alive.ptimer = NULL;
-#else
- erts_init_timer(&p->common.u.alive.tm);
-#endif
+ ERTS_PTMR_INIT(p);
p->next = NULL;
p->off_heap.first = NULL;
p->off_heap.overhead = 0;
@@ -10952,14 +10999,15 @@ void erts_init_empty_process(Process *p)
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
p->msg.len = 0;
- p->u.bif_timers = NULL;
+ p->bif_timers = NULL;
+ p->accessor_bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
p->seq_trace_token = NIL;
- p->initial[0] = 0;
- p->initial[1] = 0;
- p->initial[2] = 0;
+ p->u.initial[0] = 0;
+ p->u.initial[1] = 0;
+ p->u.initial[2] = 0;
p->catches = 0;
p->cp = NULL;
p->i = NULL;
@@ -11007,7 +11055,6 @@ void erts_init_empty_process(Process *p)
p->pending_suspenders = NULL;
p->pending_exit.reason = THE_NON_VALUE;
p->pending_exit.bp = NULL;
- p->preferred_run_queue = NULL;
erts_proc_lock_init(p);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
@@ -11047,7 +11094,8 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->suspend_monitors == NULL);
ASSERT(p->msg.first == NULL);
ASSERT(p->msg.len == 0);
- ASSERT(p->u.bif_timers == NULL);
+ ASSERT(p->bif_timers == NULL);
+ ASSERT(p->accessor_bif_timers == NULL);
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -11211,7 +11259,6 @@ set_proc_exiting(Process *p,
*/
p->freason = EXTAG_EXIT;
KILL_CATCHES(p);
- cancel_timer(p);
p->i = (BeamInstr *) beam_exit;
if (enqueue)
@@ -11901,6 +11948,7 @@ erts_do_exit_process(Process* p, Eterm reason)
{
p->arity = 0; /* No live registers */
p->fvalue = reason;
+
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_exit)) {
@@ -11955,20 +12003,20 @@ erts_do_exit_process(Process* p, Eterm reason)
ASSERT((ERTS_TRACE_FLAGS(p) & F_INITIAL_TRACE_FLAGS)
== F_INITIAL_TRACE_FLAGS);
- cancel_timer(p); /* Always cancel timer just in case */
-
- if (p->u.bif_timers)
- erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL);
+ ASSERT(erts_proc_read_refc(p) > 0);
+ if (ERTS_PTMR_IS_SET(p)) {
+ erts_cancel_proc_timer(p);
+ ASSERT(erts_proc_read_refc(p) > 0);
+ }
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
/*
- * The p->u.bif_timers of this process can *not* be used anymore;
+ * p->u.initial of this process can *not* be used anymore;
* will be overwritten by misc termination data.
*/
p->u.terminate = NULL;
-
erts_continue_exit_process(p);
}
@@ -11993,6 +12041,27 @@ erts_continue_exit_process(Process *p)
ASSERT(ERTS_PROC_IS_EXITING(p));
+ ASSERT(erts_proc_read_refc(p) > 0);
+ if (p->bif_timers) {
+ if (erts_cancel_bif_timers(p, p->bif_timers, &p->u.terminate)) {
+ ASSERT(erts_proc_read_refc(p) > 0);
+ goto yield;
+ }
+ ASSERT(erts_proc_read_refc(p) > 0);
+ p->bif_timers = NULL;
+ }
+
+ if (p->accessor_bif_timers) {
+ if (erts_detach_accessor_bif_timers(p,
+ p->accessor_bif_timers,
+ &p->u.terminate)) {
+ ASSERT(erts_proc_read_refc(p) > 0);
+ goto yield;
+ }
+ ASSERT(erts_proc_read_refc(p) > 0);
+ p->accessor_bif_timers = NULL;
+ }
+
#ifdef ERTS_SMP
if (p->flags & F_HAVE_BLCKD_MSCHED) {
ErtsSchedSuspendResult ssr;
@@ -12086,6 +12155,8 @@ erts_continue_exit_process(Process *p)
p->scheduler_data->current_process = NULL;
p->scheduler_data->free_process = p;
+#else
+ erts_proc_inc_refc(p); /* Decremented in schedule() */
#endif
/* Time of death! */
@@ -12104,29 +12175,23 @@ erts_continue_exit_process(Process *p)
{
/* Inactivate and notify free */
erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state);
-#ifdef ERTS_SMP
int refc_inced = 0;
-#endif
while (1) {
n = e = a;
ASSERT(a & ERTS_PSFLG_EXITING);
n |= ERTS_PSFLG_FREE;
n &= ~ERTS_PSFLG_ACTIVE;
-#ifdef ERTS_SMP
if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
- erts_smp_proc_inc_refc(p);
+ erts_proc_inc_refc(p);
refc_inced = 1;
}
-#endif
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
-#ifdef ERTS_SMP
if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
- erts_smp_proc_dec_refc(p);
-#endif
+ erts_proc_dec_refc(p);
}
dep = ((p->flags & F_DISTRIBUTION)
@@ -12217,64 +12282,6 @@ erts_continue_exit_process(Process *p)
}
-/* Callback for process timeout */
-static void
-timeout_proc(Process* p)
-{
- erts_aint32_t state;
- BeamInstr** pi = (BeamInstr **) p->def_arg_reg;
- p->i = *pi;
- p->flags |= F_TIMO;
- p->flags &= ~F_INSLPQUEUE;
-
- state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & ERTS_PSFLG_ACTIVE))
- schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
-}
-
-
-void
-cancel_timer(Process* p)
-{
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
- p->flags &= ~(F_INSLPQUEUE|F_TIMO);
-#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(p->common.u.alive.ptimer);
-#else
- erts_cancel_timer(&p->common.u.alive.tm);
-#endif
-}
-
-/*
- * Insert a process into the time queue, with a timeout 'timeout' in ms.
- */
-void
-set_timer(Process* p, Uint timeout)
-{
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
-
- /* check for special case timeout=0 DONT ADD TO time queue */
- if (timeout == 0) {
- p->flags |= F_TIMO;
- return;
- }
- p->flags |= F_INSLPQUEUE;
- p->flags &= ~F_TIMO;
-
-#ifdef ERTS_SMP
- erts_create_smp_ptimer(&p->common.u.alive.ptimer,
- p->common.id,
- (ErlTimeoutProc) timeout_proc,
- timeout);
-#else
- erts_set_timer(&p->common.u.alive.tm,
- (ErlTimeoutProc) timeout_proc,
- NULL,
- (void*) p,
- timeout);
-#endif
-}
-
/*
* Stack dump functions follow.
*/
@@ -12432,6 +12439,10 @@ erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
erts_print(to, to_arg, "FIX_ALLOC_LOWER_LIM"); break;
case ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP:
erts_print(to, to_arg, "THR_PRGR_LATER_OP"); break;
+ case ERTS_SSI_AUX_WORK_CNCLD_TMRS:
+ erts_print(to, to_arg, "CANCELED_TIMERS"); break;
+ case ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR:
+ erts_print(to, to_arg, "CANCELED_TIMERS_THR_PRGR"); break;
case ERTS_SSI_AUX_WORK_ASYNC_READY:
erts_print(to, to_arg, "ASYNC_READY"); break;
case ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN:
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 743711cc3b..b1c30e7652 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -52,7 +52,7 @@ typedef struct process Process;
#include "erl_node_container_utils.h"
#include "erl_node_tables.h"
#include "erl_monitors.h"
-#include "erl_bif_timer.h"
+#include "erl_hl_timer.h"
#include "erl_time.h"
#include "erl_atom_table.h"
#include "external.h"
@@ -278,16 +278,18 @@ typedef enum {
#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 3)
#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 4)
#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP (((erts_aint32_t) 1) << 5)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 6)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 7)
-#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 8)
-#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 9)
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 10)
-#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 11)
-#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 12)
-#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 13)
-
-#define ERTS_SSI_AUX_WORK_MAX 14
+#define ERTS_SSI_AUX_WORK_CNCLD_TMRS (((erts_aint32_t) 1) << 6)
+#define ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR (((erts_aint32_t) 1) << 7)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 8)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 9)
+#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 10)
+#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 11)
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 12)
+#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 13)
+#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 14)
+#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 15)
+
+#define ERTS_SSI_AUX_WORK_MAX 16
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
@@ -463,19 +465,21 @@ typedef union {
extern ErtsAlignedRunQueue *erts_aligned_run_queues;
-#define ERTS_PROC_REDUCTIONS_EXECUTED(RQ, PRIO, REDS, AREDS) \
+#define ERTS_PROC_REDUCTIONS_EXECUTED(SD, RQ, PRIO, REDS, AREDS)\
do { \
(RQ)->procs.reductions += (AREDS); \
(RQ)->procs.prio_info[(PRIO)].reds += (REDS); \
(RQ)->check_balance_reds -= (REDS); \
(RQ)->wakeup_other_reds += (AREDS); \
+ (SD)->check_time_reds += (AREDS); \
} while (0)
-#define ERTS_PORT_REDUCTIONS_EXECUTED(RQ, REDS) \
+#define ERTS_PORT_REDUCTIONS_EXECUTED(SD, RQ, REDS) \
do { \
(RQ)->ports.info.reds += (REDS); \
(RQ)->check_balance_reds -= (REDS); \
(RQ)->wakeup_other_reds += (REDS); \
+ (SD)->check_time_reds += (REDS); \
} while (0)
typedef struct {
@@ -516,6 +520,9 @@ typedef struct {
} dd;
struct {
ErtsThrPrgrVal thr_prgr;
+ } cncld_tmrs;
+ struct {
+ ErtsThrPrgrVal thr_prgr;
UWord size;
ErtsThrPrgrLaterOp *first;
ErtsThrPrgrLaterOp *last;
@@ -566,6 +573,7 @@ struct ErtsSchedulerData_ {
ErtsTimerWheel *timer_wheel;
ErtsNextTimeoutRef next_tmo_ref;
+ ErtsHLTimerService *timer_service;
#ifdef ERTS_SMP
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
@@ -592,6 +600,9 @@ struct ErtsSchedulerData_ {
ErtsAuxWorkData aux_work_data;
ErtsAtomCacheMap atom_cache_map;
+ ErtsMonotonicTime last_monotonic_time;
+ int check_time_reds;
+
Uint32 thr_id;
Uint64 unique;
Uint64 ref;
@@ -913,10 +924,8 @@ struct process {
ErlMessageQueue msg; /* Message queue */
- union {
- ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
- void *terminate;
- } u;
+ ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */
+ ErtsBifTimers *accessor_bif_timers; /* Accessor bif timers */
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -927,9 +936,12 @@ struct process {
#ifdef USE_VM_PROBES
Eterm dt_utag; /* Place to store the dynamc trace user tag */
Uint dt_utag_flags; /* flag field for the dt_utag */
-#endif
- BeamInstr initial[3]; /* Initial module(0), function(1), arity(2), often used instead
+#endif
+ union {
+ void *terminate;
+ BeamInstr initial[3]; /* Initial module(0), function(1), arity(2), often used instead
of pointer to funcinfo instruction, hence the BeamInstr datatype */
+ } u;
BeamInstr* current; /* Current Erlang function, part of the funcinfo:
* module(0), function(1), arity(2)
* (module and functions are tagged atoms;
@@ -975,7 +987,6 @@ struct process {
ErtsSchedulerData *scheduler_data;
Eterm suspendee;
ErtsPendingSuspend *pending_suspenders;
- ErtsRunQueue *preferred_run_queue;
erts_smp_atomic_t run_queue;
#ifdef HIPE
struct hipe_process_state_smp hipe_smp;
@@ -1085,15 +1096,14 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15)
#define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16)
#define ERTS_PSFLG_DELAYED_SYS ERTS_PSFLG_BIT(17)
-#define ERTS_PSFLG_OFF_HEAP_MSGS ERTS_PSFLG_BIT(18)
#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(19)
-#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(20)
-#define ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q ERTS_PSFLG_BIT(21)
-#define ERTS_PSFLG_DIRTY_IO_PROC_IN_Q ERTS_PSFLG_BIT(22)
-#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 23)
+#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(18)
+#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(19)
+#define ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q ERTS_PSFLG_BIT(20)
+#define ERTS_PSFLG_DIRTY_IO_PROC_IN_Q ERTS_PSFLG_BIT(21)
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 22)
#else
-#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 19)
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 18)
#endif
#define ERTS_PSFLGS_IN_PRQ_MASK (ERTS_PSFLG_IN_PRQ_MAX \
@@ -1112,7 +1122,6 @@ void erts_check_for_holes(Process* p);
* Static flags that do not change after process creation.
*/
#define ERTS_STC_FLG_SYSTEM_PROC (((Uint32) 1) << 0)
-#define ERTS_STC_FLG_PREFER_SCHED (((Uint32) 1) << 1)
/* The sequential tracing token is a tuple of size 5:
*
@@ -1141,9 +1150,7 @@ void erts_check_for_holes(Process* p);
#define SPO_LINK 1
#define SPO_USE_ARGS 2
#define SPO_MONITOR 4
-#define SPO_OFF_HEAP_MSGS 8
-#define SPO_SYSTEM_PROC 16
-#define SPO_PREFER_SCHED 32
+#define SPO_SYSTEM_PROC 8
/*
* The following struct contains options for a process to be spawned.
@@ -1231,7 +1238,6 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
#define F_DISABLE_GC (1 << 11) /* Disable GC */
-#define F_OFF_HEAP_MSGS (1 << 12)
/* process trace_flags */
#define F_SENSITIVE (1 << 0)
@@ -1267,8 +1273,6 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
# define F_INITIAL_TRACE_FLAGS 0
#endif
-
-
#define TRACEE_FLAGS ( F_TRACE_PROCS | F_TRACE_CALLS \
| F_TRACE_SOS | F_TRACE_SOS1| F_TRACE_RECEIVE \
| F_TRACE_SOL | F_TRACE_SOL1 | F_TRACE_SEND \
@@ -1302,12 +1306,14 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define ERTS_XSIG_FLG_IGN_KILL (((Uint32) 1) << 0)
#define ERTS_XSIG_FLG_NO_IGN_NORMAL (((Uint32) 1) << 1)
-#define CANCEL_TIMER(p) \
- do { \
- if ((p)->flags & (F_INSLPQUEUE)) \
- cancel_timer(p); \
- else \
- (p)->flags &= ~F_TIMO; \
+#define CANCEL_TIMER(P) \
+ do { \
+ if ((P)->flags & (F_INSLPQUEUE|F_TIMO)) { \
+ if ((P)->flags & F_INSLPQUEUE) \
+ erts_cancel_proc_timer((P)); \
+ else \
+ (P)->flags &= ~F_TIMO; \
+ } \
} while (0)
#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
@@ -1594,6 +1600,9 @@ Eterm erts_multi_scheduling_blockers(Process *);
void erts_start_schedulers(void);
void erts_alloc_notify_delayed_dealloc(int);
void erts_alloc_ensure_handle_delayed_dealloc_call(int);
+#ifdef ERTS_SMP
+void erts_notify_canceled_timer(ErtsSchedulerData *, int);
+#endif
void erts_smp_notify_check_children_needed(void);
#endif
#if ERTS_USE_ASYNC_READY_Q
@@ -1628,8 +1637,6 @@ void erts_schedule_misc_op(void (*)(void *), void *);
Eterm erl_create_process(Process*, Eterm, Eterm, Eterm, ErlSpawnOpts*);
void erts_do_exit_process(Process*, Eterm);
void erts_continue_exit_process(Process *);
-void set_timer(Process*, Uint);
-void cancel_timer(Process*);
/* Begin System profile */
Uint erts_runnable_process_count(void);
/* End System profile */
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 82cc68222d..fff267ff2a 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -103,6 +103,7 @@ static struct {
Sint16 proc_lock_main;
Sint16 proc_lock_link;
Sint16 proc_lock_msgq;
+ Sint16 proc_lock_btm;
Sint16 proc_lock_status;
} lc_id;
#endif
@@ -145,6 +146,7 @@ erts_init_proc_lock(int cpus)
lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
+ lc_id.proc_lock_btm = erts_lc_get_lock_order_id("proc_btm");
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
#endif
}
@@ -707,7 +709,7 @@ proc_safelock(int is_managed,
need_locks1 |= unlock_locks;
if (!is_managed && !have_locks1) {
refc1 = 1;
- erts_smp_proc_inc_refc(p1);
+ erts_proc_inc_refc(p1);
}
erts_smp_proc_unlock(p1, unlock_locks);
}
@@ -717,7 +719,7 @@ proc_safelock(int is_managed,
need_locks2 |= unlock_locks;
if (!is_managed && !have_locks2) {
refc2 = 1;
- erts_smp_proc_inc_refc(p2);
+ erts_proc_inc_refc(p2);
}
erts_smp_proc_unlock(p2, unlock_locks);
}
@@ -798,9 +800,9 @@ proc_safelock(int is_managed,
if (!is_managed) {
if (refc1)
- erts_smp_proc_dec_refc(p1);
+ erts_proc_dec_refc(p1);
if (refc2)
- erts_smp_proc_dec_refc(p2);
+ erts_proc_dec_refc(p2);
}
}
@@ -861,8 +863,8 @@ erts_pid2proc_opt(Process *c_p,
return NULL;
need_locks &= ~c_p_have_locks;
if (!need_locks) {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- erts_smp_proc_inc_refc(c_p);
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(c_p);
return c_p;
}
}
@@ -875,8 +877,8 @@ erts_pid2proc_opt(Process *c_p,
if (proc->common.id != pid)
proc = NULL;
else if (!need_locks) {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- erts_smp_proc_inc_refc(proc);
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(proc);
}
else {
int busy;
@@ -916,8 +918,8 @@ erts_pid2proc_opt(Process *c_p,
#endif
if (!busy) {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- erts_smp_proc_inc_refc(proc);
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
/* all is great */
@@ -932,8 +934,8 @@ erts_pid2proc_opt(Process *c_p,
proc = ERTS_PROC_LOCK_BUSY;
else {
int managed;
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- erts_smp_proc_inc_refc(proc);
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
@@ -941,7 +943,7 @@ erts_pid2proc_opt(Process *c_p,
managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
if (!managed) {
- erts_smp_proc_inc_refc(proc);
+ erts_proc_inc_refc(proc);
erts_thr_progress_unmanaged_continue(dhndl);
dec_refc_proc = proc;
@@ -978,14 +980,14 @@ erts_pid2proc_opt(Process *c_p,
erts_smp_proc_unlock(proc, need_locks);
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ if (flags & ERTS_P2P_FLG_INC_REFC)
dec_refc_proc = proc;
proc = NULL;
}
if (dec_refc_proc)
- erts_smp_proc_dec_refc(dec_refc_proc);
+ erts_proc_dec_refc(dec_refc_proc);
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
ERTS_LC_ASSERT(!proc
@@ -1038,6 +1040,11 @@ erts_proc_lock_init(Process *p)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
+ erts_mtx_init_x(&p->lock.btm, "proc_btm", p->common.id, do_lock_count);
+ ethr_mutex_lock(&p->lock.btm.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.btm.lc);
+#endif
erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id,
do_lock_count);
ethr_mutex_lock(&p->lock.status.mtx);
@@ -1045,7 +1052,6 @@ erts_proc_lock_init(Process *p)
erts_lc_trylock(1, &p->lock.status.lc);
#endif
#endif
- erts_atomic32_init_nob(&p->lock.refc, 1);
#ifdef ERTS_PROC_LOCK_DEBUG
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
@@ -1064,6 +1070,7 @@ erts_proc_lock_fin(Process *p)
erts_mtx_destroy(&p->lock.main);
erts_mtx_destroy(&p->lock.link);
erts_mtx_destroy(&p->lock.msgq);
+ erts_mtx_destroy(&p->lock.btm);
erts_mtx_destroy(&p->lock.status);
#endif
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
@@ -1079,17 +1086,20 @@ void erts_lcnt_proc_lock_init(Process *p) {
if (p->common.id != ERTS_INVALID_PID) {
erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id);
erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, p->common.id);
erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->common.id);
erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->common.id);
} else {
erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK);
erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK);
+ erts_lcnt_init_lock(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK);
erts_lcnt_init_lock(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK);
erts_lcnt_init_lock(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK);
}
} else {
sys_memzero(&(p->lock.lcnt_main), sizeof(p->lock.lcnt_main));
sys_memzero(&(p->lock.lcnt_msgq), sizeof(p->lock.lcnt_msgq));
+ sys_memzero(&(p->lock.lcnt_btm), sizeof(p->lock.lcnt_btm));
sys_memzero(&(p->lock.lcnt_link), sizeof(p->lock.lcnt_link));
sys_memzero(&(p->lock.lcnt_status), sizeof(p->lock.lcnt_status));
}
@@ -1099,6 +1109,7 @@ void erts_lcnt_proc_lock_init(Process *p) {
void erts_lcnt_proc_lock_destroy(Process *p) {
erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_btm));
erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
}
@@ -1111,6 +1122,9 @@ void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (locks & ERTS_PROC_LOCK_MSGQ) {
erts_lcnt_lock(&(lock->lcnt_msgq));
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock(&(lock->lcnt_btm));
+ }
if (locks & ERTS_PROC_LOCK_LINK) {
erts_lcnt_lock(&(lock->lcnt_link));
}
@@ -1128,6 +1142,9 @@ void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks, cha
if (locks & ERTS_PROC_LOCK_MSGQ) {
erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_btm), file, line);
+ }
if (locks & ERTS_PROC_LOCK_LINK) {
erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
}
@@ -1145,6 +1162,9 @@ void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (locks & ERTS_PROC_LOCK_MSGQ) {
erts_lcnt_lock_unaquire(&(lock->lcnt_msgq));
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_lock_unaquire(&(lock->lcnt_btm));
+ }
if (locks & ERTS_PROC_LOCK_LINK) {
erts_lcnt_lock_unaquire(&(lock->lcnt_link));
}
@@ -1162,6 +1182,9 @@ void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (locks & ERTS_PROC_LOCK_MSGQ) {
erts_lcnt_unlock(&(lock->lcnt_msgq));
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_unlock(&(lock->lcnt_btm));
+ }
if (locks & ERTS_PROC_LOCK_LINK) {
erts_lcnt_unlock(&(lock->lcnt_link));
}
@@ -1178,6 +1201,9 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res
if (locks & ERTS_PROC_LOCK_MSGQ) {
erts_lcnt_trylock(&(lock->lcnt_msgq), res);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ erts_lcnt_trylock(&(lock->lcnt_btm), res);
+ }
if (locks & ERTS_PROC_LOCK_LINK) {
erts_lcnt_trylock(&(lock->lcnt_link), res);
}
@@ -1235,6 +1261,10 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
lck.id = lc_id.proc_lock_msgq;
erts_lc_lock_x(&lck,file,line);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_lock_x(&lck,file,line);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_lock_x(&lck,file,line);
@@ -1260,6 +1290,10 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
lck.id = lc_id.proc_lock_msgq;
erts_lc_trylock_x(locked, &lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_trylock_x(locked, &lck, file, line);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_trylock_x(locked, &lck, file, line);
@@ -1276,6 +1310,10 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_status;
erts_lc_unlock(&lck);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_unlock(&lck);
@@ -1303,6 +1341,10 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_status;
erts_lc_might_unlock(&lck);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_might_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_might_unlock(&lck);
@@ -1322,6 +1364,8 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
erts_lc_might_unlock(&p->lock.link.lc);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_might_unlock(&p->lock.msgq.lc);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_lc_might_unlock(&p->lock.btm.lc);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_might_unlock(&p->lock.status.lc);
#endif
@@ -1347,6 +1391,10 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
lck.id = lc_id.proc_lock_msgq;
erts_lc_require_lock(&lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_require_lock(&lck, file, line);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_require_lock(&lck, file, line);
@@ -1358,6 +1406,8 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
erts_lc_require_lock(&p->lock.link.lc, file, line);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_require_lock(&p->lock.msgq.lc, file, line);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_lc_require_lock(&p->lock.btm.lc, file, line);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_require_lock(&p->lock.status.lc, file, line);
#endif
@@ -1374,6 +1424,10 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_status;
erts_lc_unrequire_lock(&lck);
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ lck.id = lc_id.proc_lock_btm;
+ erts_lc_unrequire_lock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_MSGQ) {
lck.id = lc_id.proc_lock_msgq;
erts_lc_unrequire_lock(&lck);
@@ -1393,6 +1447,8 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
erts_lc_unrequire_lock(&p->lock.link.lc);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_lc_unrequire_lock(&p->lock.msgq.lc);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_lc_unrequire_lock(&p->lock.btm.lc);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_unrequire_lock(&p->lock.status.lc);
#endif
@@ -1414,6 +1470,8 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_link;
else if (locks & ERTS_PROC_LOCK_MSGQ)
lck.id = lc_id.proc_lock_msgq;
+ else if (locks & ERTS_PROC_LOCK_BTM)
+ lck.id = lc_id.proc_lock_btm;
else if (locks & ERTS_PROC_LOCK_STATUS)
lck.id = lc_id.proc_lock_status;
else
@@ -1448,7 +1506,8 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
{
int have_locks_len = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
@@ -1464,18 +1523,24 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
have_locks[have_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_btm;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
have_locks[have_locks_len++].extra = p->common.id;
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t have_locks[4];
+ erts_lc_lock_t have_locks[5];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
if (locks & ERTS_PROC_LOCK_LINK)
have_locks[have_locks_len++] = p->lock.link.lc;
if (locks & ERTS_PROC_LOCK_MSGQ)
have_locks[have_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ have_locks[have_locks_len++] = p->lock.btm.lc;
if (locks & ERTS_PROC_LOCK_STATUS)
have_locks[have_locks_len++] = p->lock.status.lc;
#endif
@@ -1488,11 +1553,11 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
int have_locks_len = 0;
int have_not_locks_len = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
- erts_lc_lock_t have_not_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_not_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
@@ -1521,6 +1586,14 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq;
have_not_locks[have_not_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_btm;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_btm;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
have_locks[have_locks_len++].extra = p->common.id;
@@ -1530,8 +1603,8 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_not_locks[have_not_locks_len++].extra = p->common.id;
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t have_locks[4];
- erts_lc_lock_t have_not_locks[4];
+ erts_lc_lock_t have_locks[5];
+ erts_lc_lock_t have_not_locks[5];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
@@ -1545,6 +1618,10 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len++] = p->lock.msgq.lc;
else
have_not_locks[have_not_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ have_locks[have_locks_len++] = p->lock.btm.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.btm.lc;
if (locks & ERTS_PROC_LOCK_STATUS)
have_locks[have_locks_len++] = p->lock.status.lc;
else
@@ -1558,10 +1635,10 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
ErtsProcLocks
erts_proc_lc_my_proc_locks(Process *p)
{
- int resv[4];
+ int resv[5];
ErtsProcLocks res = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
+ erts_lc_lock_t locks[5] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
@@ -1570,17 +1647,21 @@ erts_proc_lc_my_proc_locks(Process *p)
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LC_LOCK_INIT(lc_id.proc_lock_btm,
+ p->common.id,
+ ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t locks[4] = {p->lock.main.lc,
+ erts_lc_lock_t locks[5] = {p->lock.main.lc,
p->lock.link.lc,
p->lock.msgq.lc,
+ p->lock.btm.lc,
p->lock.status.lc};
#endif
- erts_lc_have_locks(resv, locks, 4);
+ erts_lc_have_locks(resv, locks, 5);
if (resv[0])
res |= ERTS_PROC_LOCK_MAIN;
if (resv[1])
@@ -1588,6 +1669,8 @@ erts_proc_lc_my_proc_locks(Process *p)
if (resv[2])
res |= ERTS_PROC_LOCK_MSGQ;
if (resv[3])
+ res |= ERTS_PROC_LOCK_BTM;
+ if (resv[4])
res |= ERTS_PROC_LOCK_STATUS;
return res;
@@ -1596,13 +1679,14 @@ erts_proc_lc_my_proc_locks(Process *p)
void
erts_proc_lc_chk_no_proc_locks(char *file, int line)
{
- int resv[4];
- int ids[4] = {lc_id.proc_lock_main,
+ int resv[5];
+ int ids[5] = {lc_id.proc_lock_main,
lc_id.proc_lock_link,
lc_id.proc_lock_msgq,
+ lc_id.proc_lock_btm,
lc_id.proc_lock_status};
- erts_lc_have_lock_ids(resv, ids, 4);
- if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3])) {
+ erts_lc_have_lock_ids(resv, ids, 5);
+ if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4])) {
erts_lc_fail("%s:%d: Thread has process locks locked when expected "
"not to have any process locks locked",
file, line);
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 052d992d3f..8957e7773b 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -65,7 +65,7 @@
#endif
-#define ERTS_PROC_LOCK_MAX_BIT 3
+#define ERTS_PROC_LOCK_MAX_BIT 4
typedef erts_aint32_t ErtsProcLocks;
@@ -81,17 +81,18 @@ typedef struct erts_proc_lock_t_ {
erts_lcnt_lock_t lcnt_main;
erts_lcnt_lock_t lcnt_link;
erts_lcnt_lock_t lcnt_msgq;
+ erts_lcnt_lock_t lcnt_btm;
erts_lcnt_lock_t lcnt_status;
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_t main;
erts_mtx_t link;
erts_mtx_t msgq;
+ erts_mtx_t btm;
erts_mtx_t status;
#else
# error "no implementation"
#endif
- erts_atomic32_t refc;
#ifdef ERTS_PROC_LOCK_DEBUG
erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
#endif
@@ -120,11 +121,17 @@ typedef struct erts_proc_lock_t_ {
* Message queue lock:
* Protects the following fields in the process structure:
* * msg_inq
- * * bif_timers
*/
#define ERTS_PROC_LOCK_MSGQ (((ErtsProcLocks) 1) << 2)
/*
+ * Bif timer lock:
+ * Protects the following fields in the process structure:
+ * * bif_timers
+ */
+#define ERTS_PROC_LOCK_BTM (((ErtsProcLocks) 1) << 3)
+
+/*
* Status lock:
* Protects the following fields in the process structure:
* * pending_suspenders
@@ -463,6 +470,9 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCK_MSGQ)
if (erts_mtx_trylock(&p->lock.msgq) == EBUSY)
goto busy_msgq;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ if (erts_mtx_trylock(&p->lock.btm) == EBUSY)
+ goto busy_btm;
if (locks & ERTS_PROC_LOCK_STATUS)
if (erts_mtx_trylock(&p->lock.status) == EBUSY)
goto busy_status;
@@ -470,6 +480,9 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
return 0;
busy_status:
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_mtx_unlock(&p->lock.btm);
+busy_btm:
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_unlock(&p->lock.msgq);
busy_msgq:
@@ -549,6 +562,8 @@ erts_smp_proc_lock__(Process *p,
erts_mtx_lock(&p->lock.link);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_lock(&p->lock.msgq);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_mtx_lock(&p->lock.btm);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_mtx_lock(&p->lock.status);
@@ -638,6 +653,8 @@ erts_smp_proc_unlock__(Process *p,
if (locks & ERTS_PROC_LOCK_STATUS)
erts_mtx_unlock(&p->lock.status);
+ if (locks & ERTS_PROC_LOCK_BTM)
+ erts_mtx_unlock(&p->lock.btm);
if (locks & ERTS_PROC_LOCK_MSGQ)
erts_mtx_unlock(&p->lock.msgq);
if (locks & ERTS_PROC_LOCK_LINK)
@@ -752,9 +769,10 @@ ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks);
ERTS_GLB_INLINE void erts_smp_proc_unlock(Process *, ErtsProcLocks);
ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks);
-ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *);
-ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *);
-ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *, Sint32);
+ERTS_GLB_INLINE void erts_proc_inc_refc(Process *);
+ERTS_GLB_INLINE void erts_proc_dec_refc(Process *);
+ERTS_GLB_INLINE void erts_proc_add_refc(Process *, Sint);
+ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -814,28 +832,59 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
#endif
}
-ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *p)
+ERTS_GLB_INLINE void erts_proc_inc_refc(Process *p)
{
+ ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
#ifdef ERTS_SMP
+ erts_ptab_atmc_inc_refc(&p->common);
+#else
erts_ptab_inc_refc(&p->common);
#endif
}
-ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
+ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p)
{
+ Sint referred;
+ ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
#ifdef ERTS_SMP
- int referred = erts_ptab_dec_test_refc(&p->common);
- if (!referred)
- erts_free_proc(p);
+ referred = erts_ptab_atmc_dec_test_refc(&p->common);
+#else
+ referred = erts_ptab_dec_test_refc(&p->common);
#endif
+ if (!referred) {
+ ASSERT(ERTS_PROC_IS_EXITING(p));
+ ASSERT(ERTS_AINT_NULL
+ == erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(p->common.id)));
+ erts_free_proc(p);
+ }
}
-ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 add_refc)
+ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc)
{
+ Sint referred;
+ ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
#ifdef ERTS_SMP
- int referred = erts_ptab_add_test_refc(&p->common, add_refc);
- if (!referred)
+ referred = erts_ptab_atmc_add_test_refc(&p->common, add_refc);
+#else
+ referred = erts_ptab_add_test_refc(&p->common, add_refc);
+#endif
+ if (!referred) {
+ ASSERT(ERTS_PROC_IS_EXITING(p));
+ ASSERT(ERTS_AINT_NULL
+ == erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(p->common.id)));
erts_free_proc(p);
+ }
+}
+
+ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *p)
+{
+ ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
+#ifdef ERTS_SMP
+ return erts_ptab_atmc_read_refc(&p->common);
+#else
+ return erts_ptab_read_refc(&p->common);
#endif
}
@@ -868,7 +917,7 @@ void erts_proc_safelock(Process *a_proc,
#define ERTS_P2P_FLG_ALLOW_OTHER_X (1 << 0)
#define ERTS_P2P_FLG_TRY_LOCK (1 << 1)
-#define ERTS_P2P_FLG_SMP_INC_REFC (1 << 2)
+#define ERTS_P2P_FLG_INC_REFC (1 << 2)
#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_invalid_process)
@@ -928,11 +977,14 @@ erts_pid2proc_opt(Process *c_p_unused,
int flags)
{
Process *proc = erts_proc_lookup_raw(pid);
- return ((!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
- && proc
- && ERTS_PROC_IS_EXITING(proc))
- ? NULL
- : proc);
+ if (!proc)
+ return NULL;
+ if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
+ && ERTS_PROC_IS_EXITING(proc))
+ return NULL;
+ if (flags & ERTS_P2P_FLG_INC_REFC)
+ erts_proc_inc_refc(proc);
+ return proc;
}
#endif /* !ERTS_SMP */
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
index 02943ee683..c688db98d8 100644
--- a/erts/emulator/beam/erl_ptab.c
+++ b/erts/emulator/beam/erl_ptab.c
@@ -360,7 +360,8 @@ erts_ptab_init_table(ErtsPTab *ptab,
int size,
UWord element_size,
char *name,
- int legacy)
+ int legacy,
+ int atomic_refc)
{
size_t tab_sz, alloc_sz;
Uint32 bits, cl, cli, ix, ix_per_cache_line, tab_cache_lines;
@@ -415,6 +416,8 @@ erts_ptab_init_table(ErtsPTab *ptab,
ptab->r.o.invalid_data = erts_ptab_id2data(ptab, invalid_element->id);
ptab->r.o.release_element = release_element;
+ ptab->r.o.atomic_refc = atomic_refc;
+
if (legacy) {
ptab->r.o.free_id_data = NULL;
ptab->r.o.dix_cl_mask = 0;
@@ -533,9 +536,10 @@ erts_ptab_new_element(ErtsPTab *ptab,
init_ptab_el(init_arg, (Eterm) data);
-#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
-#endif
+ if (ptab->r.o.atomic_refc)
+ erts_atomic_init_nob(&ptab_el->refc.atmc, 1);
+ else
+ ptab_el->refc.sint = 1;
pix = erts_ptab_data2pix(ptab, (Eterm) data);
@@ -608,9 +612,10 @@ erts_ptab_new_element(ErtsPTab *ptab,
init_ptab_el(init_arg, data);
-#ifdef ERTS_SMP
- erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
-#endif
+ if (ptab->r.o.atomic_refc)
+ erts_atomic_init_nob(&ptab_el->refc.atmc, 1);
+ else
+ ptab_el->refc.sint = 1;
/* Move into slot reserved */
#ifdef DEBUG
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index 876241159b..102d41e07f 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -51,11 +51,13 @@
typedef struct {
Eterm id;
-#ifdef ERTS_SMP
- erts_atomic32_t refc;
-#endif
+ union {
+ erts_atomic_t atmc;
+ Sint sint;
+ } refc;
Eterm tracer_proc;
Uint trace_flags;
+ erts_smp_atomic_t timer;
union {
/* --- While being alive --- */
struct {
@@ -63,11 +65,6 @@ typedef struct {
struct reg_proc *reg;
ErtsLink *links;
ErtsMonitor *monitors;
-#ifdef ERTS_SMP
- ErtsSmpPTimer *ptimer;
-#else
- ErlTimer tm;
-#endif
} alive;
/* --- While being released --- */
@@ -111,6 +108,7 @@ typedef struct {
Eterm invalid_data;
void (*release_element)(void *);
UWord element_size;
+ int atomic_refc;
} ErtsPTabReadOnlyData;
typedef struct {
@@ -181,7 +179,8 @@ void erts_ptab_init_table(ErtsPTab *ptab,
int size,
UWord element_size,
char *name,
- int legacy);
+ int legacy,
+ int atomic_refc);
int erts_ptab_new_element(ErtsPTab *ptab,
ErtsPTabElementCommon *ptab_el,
void *init_arg,
@@ -206,9 +205,15 @@ ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix);
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix);
ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix);
ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el);
-ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el);
-ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
- Sint32 add_refc);
+ERTS_GLB_INLINE Sint erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE Sint erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint add_refc);
+ERTS_GLB_INLINE Sint erts_ptab_read_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE Sint erts_ptab_atmc_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint add_refc);
+ERTS_GLB_INLINE Sint erts_ptab_atmc_read_refc(ErtsPTabElementCommon *ptab_el);
ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab);
ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab);
ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab);
@@ -365,50 +370,65 @@ ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix)
return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]);
}
-ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el)
+ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el)
{
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_aint32_t refc = erts_atomic32_inc_read_nob(&ptab_el->refc);
- ERTS_SMP_LC_ASSERT(refc > 1);
+ erts_aint_t refc = erts_atomic_inc_read_nob(&ptab_el->refc.atmc);
+ ERTS_LC_ASSERT(refc > 1);
#else
- erts_atomic32_inc_nob(&ptab_el->refc);
-#endif
+ erts_atomic_inc_nob(&ptab_el->refc.atmc);
#endif
}
-ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el)
+ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el)
{
-#ifdef ERTS_SMP
- erts_aint32_t refc = erts_atomic32_dec_read_nob(&ptab_el->refc);
+ erts_aint_t refc = erts_atomic_dec_read_relb(&ptab_el->refc.atmc);
ERTS_SMP_LC_ASSERT(refc >= 0);
- return (int) refc;
-#else
- return 0;
+#ifdef ERTS_SMP
+ if (refc == 0)
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
#endif
+ return (Sint) refc;
}
-ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
- Sint32 add_refc)
+ERTS_GLB_INLINE Sint erts_ptab_atmc_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint add_refc)
{
-#ifdef ERTS_SMP
- erts_aint32_t refc;
+ erts_aint_t refc = erts_atomic_add_read_mb(&ptab_el->refc.atmc,
+ (erts_aint_t) add_refc);
+ ERTS_SMP_LC_ASSERT(refc >= 0);
+ return (Sint) refc;
+}
-#ifndef ERTS_ENABLE_LOCK_CHECK
- if (add_refc >= 0) {
- erts_atomic32_add_nob(&ptab_el->refc,
- (erts_aint32_t) add_refc);
- return 1;
- }
-#endif
+ERTS_GLB_INLINE Sint erts_ptab_atmc_read_refc(ErtsPTabElementCommon *ptab_el)
+{
+ return (Sint) erts_atomic_read_nob(&ptab_el->refc.atmc);
+}
+
+ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el)
+{
+ ptab_el->refc.sint++;
+ ASSERT(ptab_el->refc.sint > 1);
+}
- refc = erts_atomic32_add_read_nob(&ptab_el->refc,
- (erts_aint32_t) add_refc);
+ERTS_GLB_INLINE Sint erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el)
+{
+ Sint refc = --ptab_el->refc.sint;
ERTS_SMP_LC_ASSERT(refc >= 0);
- return (int) refc;
-#else
- return 0;
-#endif
+ return refc;
+}
+
+ERTS_GLB_INLINE Sint erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint add_refc)
+{
+ ptab_el->refc.sint += add_refc;
+ ERTS_SMP_LC_ASSERT(ptab_el->refc.sint >= 0);
+ return (Sint) ptab_el->refc.sint;
+}
+
+ERTS_GLB_INLINE Sint erts_ptab_read_refc(ErtsPTabElementCommon *ptab_el)
+{
+ return ptab_el->refc.sint;
}
ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab)
diff --git a/erts/emulator/beam/erl_rbtree.h b/erts/emulator/beam/erl_rbtree.h
new file mode 100644
index 0000000000..ea0a8976bb
--- /dev/null
+++ b/erts/emulator/beam/erl_rbtree.h
@@ -0,0 +1,1740 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2015. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A Red-Black (binary search) Tree implementation. The search,
+ * insert, and delete operations are all O(log n) operations
+ * on a Red-Black Tree. Red-Black Trees are described in
+ * "Introduction to Algorithms", by Thomas H. Cormen, Charles
+ * E. Leiserson, and Ronald L. Riverest.
+ *
+ * Use by defining mandatory defines as well as defines for
+ * API functions wanted, and include this header.
+ *
+ * Author: Rickard Green
+ *
+ *
+ * Mandatory defines:
+ * - ERTS_RBT_PREFIX - Prefix to use on functions.
+ * - ERTS_RBT_T - Type of a tree node.
+ * - ERTS_RBT_KEY_T - Type of key for a tree node.
+ * - ERTS_RBT_FLAGS_T - Type of flags for a tree node.
+ * - ERTS_RBT_INIT_EMPTY_TNODE(T) -Initialize an empty tree node.
+ * - ERTS_RBT_IS_RED(T) - Is tree node red?
+ * - ERTS_RBT_SET_RED(T) - Set tree node red.
+ * - ERTS_RBT_IS_BLACK(T) - Is tree node back?
+ * - ERTS_RBT_SET_BLACK(T) - Set tree node black.
+ * - ERTS_RBT_GET_FLAGS(T) - Get flags of tree node (incl colors).
+ * - ERTS_RBT_SET_FLAGS(T, F) - Set flags of tree note.
+ * - ERTS_RBT_GET_PARENT(T) - Get parent node.
+ * - ERTS_RBT_SET_PARENT(T, P) - Set parent node.
+ * - ERTS_RBT_GET_RIGHT(T) - Get right child node.
+ * - ERTS_RBT_SET_RIGHT(T, R) - Set right child node.
+ * - ERTS_RBT_GET_LEFT(T) - Get left child node.
+ * - ERTS_RBT_SET_LEFT(T, L) - Set left child node.
+ * - ERTS_RBT_GET_KEY(T) - Get key of node.
+ * - ERTS_RBT_IS_LT(KX, KY) - Is key KX less than key KY?
+ * - ERTS_RBT_IS_EQ(KX, KY) - Is key KX equal to key KY?
+ *
+ * Optional defines:
+ *
+ * - ERTS_RBT_UNDEF - Undefine all user defined ERTS_RBT_*
+ * defines after use.
+ *
+ * - ERTS_RBT_NO_API_INLINE - Do not inline API functions.
+ *
+ * Attached data management:
+ * - ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE(L, OP, NP) - Called
+ * when a rotate operation has been performed. If L (in int)
+ * is a non zero, a left rotation was performed; otherwise,
+ * a right rotation was performed. OR points to the old
+ * parent node and NP points to the new parent node.
+ * - ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD(F, T) - Called when
+ * a delete operation modifies a tree node. A delete
+ * modification is either a removal or replacement of a
+ * node. F points to the parent of the tree node that was
+ * modified. T points to the next ancestor that will be
+ * modified. If T is NULL, no more removal and/or
+ * replacements will be made. One typically wants to update
+ * the attached data of each node between F and T. If T is
+ * NULL all the way up to the root.
+ * - ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(OR, NR) - Called
+ * when the root node changes. OR points to the old
+ * root node and NP points to the new root node.
+ *
+ * Request implementation of API functions:
+ * - ERTS_RBT_WANT_DELETE
+ * - ERTS_RBT_WANT_INSERT
+ * - ERTS_RBT_WANT_LOOKUP_INSERT
+ * - ERTS_RBT_WANT_REPLACE
+ * - ERTS_RBT_WANT_LOOKUP
+ * - ERTS_RBT_WANT_SMALLEST
+ * - ERTS_RBT_WANT_LARGEST
+ * - ERTS_RBT_WANT_FOREACH
+ * - ERTS_RBT_WANT_FOREACH_DESTROY
+ * - ERTS_RBT_WANT_FOREACH_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_SMALL
+ * - ERTS_RBT_WANT_FOREACH_LARGE
+ * - ERTS_RBT_WANT_FOREACH_SMALL_DESTROY
+ * - ERTS_RBT_WANT_FOREACH_LARGE_DESTROY
+ * - ERTS_RBT_WANT_FOREACH_SMALL_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_LARGE_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING
+ * - ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING
+ * - ERTS_RBT_WANT_DEBUG_PRINT
+ *
+ * The yield state data type will equal
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t.
+ *
+ * The yield state should be statically initialized by
+ * ERTS_RBT_YIELD_STAT_INITER.
+ *
+ *
+ * The following API functions are implemented if corresponding
+ * ERTS_RBT_WANT_<OPERATION> is defined:
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_delete(
+ * ERTS_RBT_T **tree,
+ * ERTS_RBT_T *element);
+ * Delete element from tree.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_insert(
+ * ERTS_RBT_T **tree,
+ * ERTS_RBT_T *element);
+ * Insert element into tree.
+ *
+ * - ERTS_RBT_T * <ERTS_RBT_PREFIX>_rbt_lookup_insert(
+ * ERTS_RBT_T **tree,
+ * ERTS_RBT_T *element);
+ * Look up an element in the tree that compares as equal to the
+ * element passed as argument, and return the looked up element.
+ * If no element compared as equal, insert the element passed as
+ * argument into the tree, and return NULL.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_replace(
+ * ERTS_RBT_T **tree,
+ * ERTS_RBT_T *old_element,
+ * ERTS_RBT_T *new_element);
+ * Replace old_element in the tree with new_element. Both elements
+ * *should* compare as equal.
+ *
+ * - ERTS_RBT_T * <ERTS_RBT_PREFIX>_rbt_lookup(
+ * ERTS_RBT_T *tree,
+ * ERTS_RBT_KEY_T key);
+ * Look up an element with a key that compares as equal to
+ * the key passed as argument.
+ *
+ * - ERTS_RBT_T * <ERTS_RBT_PREFIX>_rbt_smallest(
+ * ERTS_RBT_T *tree);
+ * Look up the element with the smallest key.
+ *
+ * - ERTS_RBT_T * <ERTS_RBT_PREFIX>_rbt_largest(
+ * ERTS_RBT_T *tree);
+ * Look up the element with the largest key.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element.
+ * Order is undefined.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_destroy(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element.
+ * Order is undefined. Each element should be destroyed
+ * by 'op'.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_yielding(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element.
+ * Order is undefined.
+ *
+ * Yield when 'ylimit' elements has been processed. Zero is
+ * returned when yielding, and a non-zero value is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_destroy_yielding(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element.
+ * Order is undefined. Each element should be destroyed
+ * by 'op'.
+ *
+ * Yield when 'ylimit' elements has been processed. Zero is
+ * returned when yielding, and a non-zero value is returned when
+ * the whole tree has been processed.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_small(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element from
+ * smallest towards larger elements.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_large(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element from
+ * largest towards smaller elements.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_small_yielding(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element from
+ * smallest towards larger elements.
+ *
+ * Yield when 'ylimit' elements has been processed. Zero is
+ * returned when yielding, and a non-zero value is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_large_yielding(
+ * ERTS_RBT_T *tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element from
+ * largest towards smaller elements.
+ *
+ * Yield when 'ylimit' elements has been processed. Zero is
+ * returned when yielding, and a non-zero value is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_small_destroy(
+ * ERTS_RBT_T **tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void (*destr)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element from
+ * smallest towards larger elements.
+ *
+ * Destroy elements by calling the destructor 'destr'. Elements
+ * are destroyed when not needed by the tree structure anymore.
+ * Note that elements are often *not* destroyed in another order
+ * than the order that the elements are operated on.
+ *
+ * 'arg' is passed as argument to 'op' and 'destroy'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_foreach_large_destroy(
+ * ERTS_RBT_T **tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void (*destr)(ERTS_RBT_T *, void *),
+ * void *arg);
+ * Operate by calling the operator 'op' on each element from
+ * largest towards smaller elements.
+ *
+ * Destroy elements by calling the destructor 'destr'. Elements
+ * are destroyed when not needed by the tree structure anymore.
+ * Note that elements are often destroyed in another order
+ * than the order that the elements are operated on.
+ *
+ * 'arg' is passed as argument to 'op' and 'destroy'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_small_destroy_yielding(
+ * ERTS_RBT_T **tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void (*destr)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element from
+ * smallest towards larger elements.
+ *
+ * Destroy elements by calling the destructor 'destr'. Elements
+ * are destroyed when not needed by the tree structure anymore.
+ * Note that elements are often destroyed in another order
+ * than the order that the elements are operated on.
+ *
+ * Yield when 'ylimit' elements has been processed. Zero is
+ * returned when yielding, and a non-zero value is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op' and 'destroy'.
+ *
+ * - int <ERTS_RBT_PREFIX>_rbt_foreach_large_destroy_yielding(
+ * ERTS_RBT_T **tree,
+ * void (*op)(ERTS_RBT_T *, void *),
+ * void (*destr)(ERTS_RBT_T *, void *),
+ * void *arg,
+ * <ERTS_RBT_PREFIX>_rbt_yield_state_t *ystate,
+ * Sint ylimit);
+ * Operate by calling the operator 'op' on each element from
+ * largest towards smaller elements.
+ *
+ * Destroy elements by calling the destructor 'destr'. Elements
+ * are destroyed when not needed by the tree structure anymore.
+ * Note that elements are often destroyed in another order
+ * than the order that the elements are operated on.
+ *
+ * Yield when 'ylimit' elements has been processed. Zero is
+ * returned when yielding, and a non-zero value is returned when
+ * the whole tree has been processed. The tree should not be
+ * modified until all of it has been processed.
+ *
+ * 'arg' is passed as argument to 'op' and 'destroy'.
+ *
+ * - void <ERTS_RBT_PREFIX>_rbt_debug_print(
+ * FILE *filep,
+ * ERTS_RBT_T *x,
+ * int indent,
+ * (void)(*print_node)(ERTS_RBT_T *));
+ * Prints the tree. Note that this function is recursive.
+ * Should only be used for debuging.
+ */
+
+
+/*
+ * Check that we have all mandatory defines
+ */
+#ifndef ERTS_RBT_PREFIX
+# error Missing definition of ERTS_RBT_PREFIX
+#endif
+#ifndef ERTS_RBT_T
+# error Missing definition of ERTS_RBT_T
+#endif
+#ifndef ERTS_RBT_KEY_T
+# error Missing definition of ERTS_RBT_KEY_T
+#endif
+#ifndef ERTS_RBT_FLAGS_T
+# error Missing definition of ERTS_RBT_FLAGS_T
+#endif
+#ifndef ERTS_RBT_INIT_EMPTY_TNODE
+# error Missing definition of ERTS_RBT_INIT_EMPTY_TNODE
+#endif
+#ifndef ERTS_RBT_IS_RED
+# error Missing definition of ERTS_RBT_IS_RED
+#endif
+#ifndef ERTS_RBT_SET_RED
+# error Missing definition of ERTS_RBT_SET_RED
+#endif
+#ifndef ERTS_RBT_IS_BLACK
+# error Missing definition of ERTS_RBT_IS_BLACK
+#endif
+#ifndef ERTS_RBT_SET_BLACK
+# error Missing definition of ERTS_RBT_SET_BLACK
+#endif
+#ifndef ERTS_RBT_GET_FLAGS
+# error Missing definition of ERTS_RBT_GET_FLAGS
+#endif
+#ifndef ERTS_RBT_SET_FLAGS
+# error Missing definition of ERTS_RBT_SET_FLAGS
+#endif
+#ifndef ERTS_RBT_GET_PARENT
+# error Missing definition of ERTS_RBT_GET_PARENT
+#endif
+#ifndef ERTS_RBT_SET_PARENT
+# error Missing definition of ERTS_RBT_SET_PARENT
+#endif
+#ifndef ERTS_RBT_GET_RIGHT
+# error Missing definition of ERTS_RBT_GET_RIGHT
+#endif
+#ifndef ERTS_RBT_GET_LEFT
+# error Missing definition of ERTS_RBT_GET_LEFT
+#endif
+#ifndef ERTS_RBT_IS_LT
+# error Missing definition of ERTS_RBT_IS_LT
+#endif
+#ifndef ERTS_RBT_GET_KEY
+# error Missing definition of ERTS_RBT_GET_KEY
+#endif
+#ifndef ERTS_RBT_IS_EQ
+# error Missing definition of ERTS_RBT_IS_EQ
+#endif
+
+#if defined(ERTS_RBT_HARD_DEBUG) || defined(DEBUG)
+# ifndef ERTS_RBT_DEBUG
+# define ERTS_RBT_DEBUG 1
+# endif
+#endif
+
+#if defined(ERTS_RBT_HARD_DEBUG) && defined(__GNUC__)
+#warning "* * * * * * * * * * * * * * * * * *"
+#warning "* ERTS_RBT_HARD_DEBUG IS ENABLED! *"
+#warning "* * * * * * * * * * * * * * * * * *"
+#endif
+
+#undef ERTS_RBT_ASSERT
+#if defined(ERTS_RBT_DEBUG)
+#define ERTS_RBT_ASSERT(E) ERTS_ASSERT(E)
+#else
+#define ERTS_RBT_ASSERT(E) ((void) 1)
+#endif
+
+#undef ERTS_RBT_API_INLINE__
+#if defined(ERTS_RBT_NO_API_INLINE) || defined(ERTS_RBT_DEBUG)
+# define ERTS_RBT_API_INLINE__
+#else
+# define ERTS_RBT_API_INLINE__ ERTS_INLINE
+#endif
+
+#ifndef ERTS_RBT_YIELD_STAT_INITER
+# define ERTS_RBT_YIELD_STAT_INITER {NULL, 0}
+#endif
+
+#define ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y) \
+ X ## Y
+#define ERTS_RBT_CONCAT_MACRO_VALUES__(X, Y) \
+ ERTS_RBT_CONCAT_MACRO_VALUES___(X, Y)
+
+#undef ERTS_RBT_YIELD_STATE_T__
+#define ERTS_RBT_YIELD_STATE_T__ \
+ ERTS_RBT_CONCAT_MACRO_VALUES__(ERTS_RBT_PREFIX, _rbt_yield_state_t)
+
+typedef struct {
+ ERTS_RBT_T *x;
+ int up;
+} ERTS_RBT_YIELD_STATE_T__;
+
+#define ERTS_RBT_FUNC__(Name) \
+ ERTS_RBT_CONCAT_MACRO_VALUES__(ERTS_RBT_PREFIX, _rbt_ ## Name)
+
+#undef ERTS_RBT_NEED_REPLACE__
+#undef ERTS_RBT_NEED_INSERT__
+#undef ERTS_RBT_NEED_ROTATE__
+#undef ERTS_RBT_NEED_FOREACH_UNORDERED__
+#undef ERTS_RBT_NEED_FOREACH_ORDERED__
+#undef ERTS_RBT_NEED_HDBG_CHECK_TREE__
+#undef ERTS_RBT_HDBG_CHECK_TREE__
+
+#if defined(ERTS_RBT_WANT_REPLACE) || defined(ERTS_RBT_WANT_DELETE)
+# define ERTS_RBT_NEED_REPLACE__
+#endif
+#if defined(ERTS_RBT_WANT_INSERT) || defined(ERTS_RBT_WANT_LOOKUP_INSERT)
+# define ERTS_RBT_NEED_INSERT__
+#endif
+#if defined(ERTS_RBT_WANT_DELETE) || defined(ERTS_RBT_NEED_INSERT__)
+# define ERTS_RBT_NEED_ROTATE__
+#endif
+#if defined(ERTS_RBT_WANT_FOREACH) \
+ || defined(ERTS_RBT_WANT_FOREACH_YIELDING) \
+ || defined(ERTS_RBT_WANT_FOREACH_DESTROY) \
+ || defined(ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING)
+# define ERTS_RBT_NEED_FOREACH_UNORDERED__
+#endif
+#if defined(ERTS_RBT_WANT_FOREACH_SMALL) \
+ || defined(ERTS_RBT_WANT_FOREACH_LARGE) \
+ || defined(ERTS_RBT_WANT_FOREACH_SMALL_YIELDING) \
+ || defined(ERTS_RBT_WANT_FOREACH_LARGE_YIELDING) \
+ || defined(ERTS_RBT_WANT_FOREACH_SMALL_DESTROY) \
+ || defined(ERTS_RBT_WANT_FOREACH_LARGE_DESTROY) \
+ || defined(ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING) \
+ || defined(ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING)
+# define ERTS_RBT_NEED_FOREACH_ORDERED__
+#endif
+#if defined(ERTS_RBT_HARD_DEBUG) \
+ && (defined(ERTS_RBT_WANT_DELETE) \
+ || defined(ERTS_RBT_NEED_INSERT__))
+static void ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root);
+# define ERTS_RBT_NEED_HDBG_CHECK_TREE__
+# define ERTS_RBT_HDBG_CHECK_TREE__(R) \
+ ERTS_RBT_FUNC__(hdbg_check_tree)((R))
+#else
+# define ERTS_RBT_HDBG_CHECK_TREE__(R) ((void) 1)
+#endif
+
+#ifdef ERTS_RBT_NEED_ROTATE__
+
+static ERTS_INLINE void
+ERTS_RBT_FUNC__(left_rotate__)(ERTS_RBT_T **root, ERTS_RBT_T *x)
+{
+ ERTS_RBT_T *y, *l, *p;
+
+ y = ERTS_RBT_GET_RIGHT(x);
+ l = ERTS_RBT_GET_LEFT(y);
+ ERTS_RBT_SET_RIGHT(x, l);
+
+ if (l)
+ ERTS_RBT_SET_PARENT(l, x);
+
+ p = ERTS_RBT_GET_PARENT(x);
+ ERTS_RBT_SET_PARENT(y, p);
+
+ if (!p) {
+ ERTS_RBT_ASSERT(*root == x);
+ *root = y;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(x, y);
+#endif
+ }
+ else if (x == ERTS_RBT_GET_LEFT(p))
+ ERTS_RBT_SET_LEFT(p, y);
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+ ERTS_RBT_SET_RIGHT(p, y);
+ }
+ ERTS_RBT_SET_LEFT(y, x);
+ ERTS_RBT_SET_PARENT(x, y);
+
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE
+ ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE(!0, x, y);
+#endif
+
+}
+
+static ERTS_INLINE void
+ERTS_RBT_FUNC__(right_rotate__)(ERTS_RBT_T **root, ERTS_RBT_T *x)
+{
+ ERTS_RBT_T *y, *r, *p;
+
+ y = ERTS_RBT_GET_LEFT(x);
+ r = ERTS_RBT_GET_RIGHT(y);
+ ERTS_RBT_SET_LEFT(x, r);
+
+ if (r)
+ ERTS_RBT_SET_PARENT(r, x);
+
+ p = ERTS_RBT_GET_PARENT(x);
+ ERTS_RBT_SET_PARENT(y, p);
+
+ if (!p) {
+ ERTS_RBT_ASSERT(*root == x);
+ *root = y;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(x, y);
+#endif
+ }
+ else if (x == ERTS_RBT_GET_RIGHT(p))
+ ERTS_RBT_SET_RIGHT(p, y);
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_LEFT(p));
+ ERTS_RBT_SET_LEFT(p, y);
+ }
+
+ ERTS_RBT_SET_RIGHT(y, x);
+ ERTS_RBT_SET_PARENT(x, y);
+
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE
+ ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE(0, x, y);
+#endif
+
+}
+
+#endif /* ERTS_RBT_NEED_ROTATE__ */
+
+#ifdef ERTS_RBT_NEED_REPLACE__
+
+/*
+ * Replace node x with node y
+ */
+static ERTS_INLINE void
+ERTS_RBT_FUNC__(replace__)(ERTS_RBT_T **root, ERTS_RBT_T *x, ERTS_RBT_T *y)
+{
+ ERTS_RBT_T *p, *r, *l;
+ ERTS_RBT_FLAGS_T f;
+
+ p = ERTS_RBT_GET_PARENT(x);
+ if (!p) {
+ ERTS_RBT_ASSERT(*root == x);
+ *root = y;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(x, y);
+#endif
+ }
+ else if (x == ERTS_RBT_GET_LEFT(p))
+ ERTS_RBT_SET_LEFT(p, y);
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+ ERTS_RBT_SET_RIGHT(p, y);
+ }
+ l = ERTS_RBT_GET_LEFT(x);
+ if (l) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_PARENT(l) == x);
+ ERTS_RBT_SET_PARENT(l, y);
+ }
+ r = ERTS_RBT_GET_RIGHT(x);
+ if (r) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_PARENT(r) == x);
+ ERTS_RBT_SET_PARENT(r, y);
+ }
+
+ f = ERTS_RBT_GET_FLAGS(x);
+ ERTS_RBT_SET_FLAGS(y, f);
+ ERTS_RBT_SET_PARENT(y, p);
+ ERTS_RBT_SET_RIGHT(y, r);
+ ERTS_RBT_SET_LEFT(y, l);
+}
+
+#endif /* ERTS_RBT_NEED_REPLACE__ */
+
+#ifdef ERTS_RBT_WANT_REPLACE
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(replace)(ERTS_RBT_T **root, ERTS_RBT_T *x, ERTS_RBT_T *y)
+{
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_EQ(ERTS_RBT_GET_KEY(x),
+ ERTS_RBT_GET_KEY(y)));
+
+ ERTS_RBT_FUNC__(replace__)(root, x, y);
+}
+
+#endif /* ERTS_RBT_WANT_REPLACE */
+
+#ifdef ERTS_RBT_WANT_DELETE
+
+/*
+ * Delete a node.
+ */
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(delete)(ERTS_RBT_T **root, ERTS_RBT_T *n)
+{
+ int spliced_is_black;
+ ERTS_RBT_T *p, *x, *y, *z = n;
+ ERTS_RBT_T null_x; /* null_x is used to get the fixup started when we
+ splice out a node without children. */
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root);
+
+ ERTS_RBT_INIT_EMPTY_TNODE(&null_x);
+
+ /* Remove node from tree... */
+
+ /* Find node to splice out */
+ if (!ERTS_RBT_GET_LEFT(z) || !ERTS_RBT_GET_RIGHT(z))
+ y = z;
+ else {
+ /* Set y to z:s successor */
+ y = ERTS_RBT_GET_RIGHT(z);
+ while (1) {
+ ERTS_RBT_T *t = ERTS_RBT_GET_LEFT(y);
+ if (!t)
+ break;
+ y = t;
+ }
+ }
+ /* splice out y */
+ x = ERTS_RBT_GET_LEFT(y);
+ if (!x)
+ x = ERTS_RBT_GET_RIGHT(y);
+ spliced_is_black = ERTS_RBT_IS_BLACK(y);
+ p = ERTS_RBT_GET_PARENT(y);
+ if (x)
+ ERTS_RBT_SET_PARENT(x, p);
+ else if (spliced_is_black) {
+ x = &null_x;
+ ERTS_RBT_SET_BLACK(x);
+ ERTS_RBT_SET_PARENT(x, p);
+ ERTS_RBT_SET_LEFT(y, x);
+ }
+
+ if (!p) {
+ ERTS_RBT_ASSERT(*root == y);
+ *root = x;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(y, x);
+#endif
+ }
+ else {
+ if (y == ERTS_RBT_GET_LEFT(p))
+ ERTS_RBT_SET_LEFT(p, x);
+ else {
+ ERTS_RBT_ASSERT(y == ERTS_RBT_GET_RIGHT(p));
+ ERTS_RBT_SET_RIGHT(p, x);
+ }
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD
+ if (p != z)
+ ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD(p, y == z ? NULL : z);
+#endif
+ }
+ if (y != z) {
+ /* We spliced out the successor of z; replace z by the successor */
+ ERTS_RBT_FUNC__(replace__)(root, z, y);
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD
+ ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD(y, NULL);
+#endif
+ }
+
+ if (spliced_is_black) {
+ /* We removed a black node which makes the resulting tree
+ violate the Red-Black Tree properties. Fixup tree... */
+
+ p = ERTS_RBT_GET_PARENT(x);
+ while (ERTS_RBT_IS_BLACK(x) && p) {
+ ERTS_RBT_T *r, *l;
+
+ /*
+ * x has an "extra black" which we move up the tree
+ * until we reach the root or until we can get rid of it.
+ *
+ * y is the sibbling of x, and p is their parent
+ */
+
+ if (x == ERTS_RBT_GET_LEFT(p)) {
+ y = ERTS_RBT_GET_RIGHT(p);
+
+ ERTS_RBT_ASSERT(y);
+
+ if (ERTS_RBT_IS_RED(y)) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(y));
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(y));
+
+ ERTS_RBT_SET_BLACK(y);
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(p));
+
+ ERTS_RBT_SET_RED(p);
+ ERTS_RBT_FUNC__(left_rotate__)(root, p);
+ p = ERTS_RBT_GET_PARENT(x);
+ y = ERTS_RBT_GET_RIGHT(p);
+ }
+
+ ERTS_RBT_ASSERT(y);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(y));
+
+ l = ERTS_RBT_GET_LEFT(y);
+ r = ERTS_RBT_GET_RIGHT(y);
+ if ((!l || ERTS_RBT_IS_BLACK(l))
+ && (!r || ERTS_RBT_IS_BLACK(r))) {
+ ERTS_RBT_SET_RED(y);
+ x = p;
+ p = ERTS_RBT_GET_PARENT(x);
+ }
+ else {
+ if (!r || ERTS_RBT_IS_BLACK(r)) {
+ ERTS_RBT_SET_BLACK(l);
+ ERTS_RBT_SET_RED(y);
+ ERTS_RBT_FUNC__(right_rotate__)(root, y);
+ p = ERTS_RBT_GET_PARENT(x);
+ y = ERTS_RBT_GET_RIGHT(p);
+ }
+
+ ERTS_RBT_ASSERT(y);
+
+ if (p && ERTS_RBT_IS_RED(p)) {
+
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(y);
+ }
+
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(y));
+
+ ERTS_RBT_SET_BLACK(ERTS_RBT_GET_RIGHT(y));
+ ERTS_RBT_FUNC__(left_rotate__)(root, p);
+ x = *root;
+ break;
+ }
+ }
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+
+ y = ERTS_RBT_GET_LEFT(p);
+
+ ERTS_RBT_ASSERT(y);
+
+ if (ERTS_RBT_IS_RED(y)) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(y));
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(y));
+
+ ERTS_RBT_SET_BLACK(y);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(p));
+ ERTS_RBT_SET_RED(p);
+ ERTS_RBT_FUNC__(right_rotate__)(root, p);
+
+ p = ERTS_RBT_GET_PARENT(x);
+ y = ERTS_RBT_GET_LEFT(p);
+ }
+
+ ERTS_RBT_ASSERT(y);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(y));
+
+ l = ERTS_RBT_GET_LEFT(y);
+ r = ERTS_RBT_GET_RIGHT(y);
+
+ if ((!r || ERTS_RBT_IS_BLACK(r))
+ && (!l || ERTS_RBT_IS_BLACK(l))) {
+ ERTS_RBT_SET_RED(y);
+ x = p;
+ p = ERTS_RBT_GET_PARENT(x);
+ }
+ else {
+ if (!l || ERTS_RBT_IS_BLACK(l)) {
+ ERTS_RBT_SET_BLACK(r);
+ ERTS_RBT_SET_RED(y);
+ ERTS_RBT_FUNC__(left_rotate__)(root, y);
+
+ p = ERTS_RBT_GET_PARENT(x);
+ y = ERTS_RBT_GET_LEFT(p);
+ }
+
+ ERTS_RBT_ASSERT(y);
+
+ if (p && ERTS_RBT_IS_RED(p)) {
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(y);
+ }
+
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(y));
+
+ ERTS_RBT_SET_BLACK(ERTS_RBT_GET_LEFT(y));
+ ERTS_RBT_FUNC__(right_rotate__)(root, p);
+ x = *root;
+ break;
+ }
+ }
+ }
+
+ ERTS_RBT_SET_BLACK(x);
+
+ x = &null_x;
+ p = ERTS_RBT_GET_PARENT(x);
+
+ if (p) {
+ if (ERTS_RBT_GET_LEFT(p) == x)
+ ERTS_RBT_SET_LEFT(p, NULL);
+ else {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(p) == x);
+ ERTS_RBT_SET_RIGHT(p, NULL);
+ }
+
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_LEFT(x));
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_RIGHT(x));
+ }
+ else if (*root == x) {
+ *root = NULL;
+
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(x, NULL);
+#endif
+
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_LEFT(x));
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_RIGHT(x));
+ }
+ }
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root);
+
+}
+
+#endif /* ERTS_RBT_WANT_DELETE */
+
+#ifdef ERTS_RBT_NEED_INSERT__
+
+static void
+ERTS_RBT_FUNC__(insert_fixup__)(ERTS_RBT_T **root, ERTS_RBT_T *n)
+{
+ ERTS_RBT_T *x, *y;
+
+ x = n;
+
+ /*
+ * Rearrange the tree so that it satisfies the Red-Black Tree properties
+ */
+
+ ERTS_RBT_ASSERT(x != *root && ERTS_RBT_IS_RED(ERTS_RBT_GET_PARENT(x)));
+ do {
+ ERTS_RBT_T *p, *pp;
+
+ /*
+ * x and its parent are both red. Move the red pair up the tree
+ * until we get to the root or until we can separate them.
+ */
+
+ p = ERTS_RBT_GET_PARENT(x);
+ pp = ERTS_RBT_GET_PARENT(p);
+
+ ERTS_RBT_ASSERT(p && pp);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(pp));
+
+ if (p == ERTS_RBT_GET_LEFT(pp)) {
+ y = ERTS_RBT_GET_RIGHT(pp);
+ if (y && ERTS_RBT_IS_RED(y)) {
+ ERTS_RBT_SET_BLACK(y);
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(pp);
+ x = pp;
+ }
+ else {
+
+ if (x == ERTS_RBT_GET_RIGHT(p)) {
+ x = p;
+ ERTS_RBT_FUNC__(left_rotate__)(root, x);
+ p = ERTS_RBT_GET_PARENT(x);
+ pp = ERTS_RBT_GET_PARENT(p);
+
+ ERTS_RBT_ASSERT(p && pp);
+ }
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_LEFT(ERTS_RBT_GET_LEFT(pp)));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(p));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(pp));
+ ERTS_RBT_ASSERT(!y || ERTS_RBT_IS_BLACK(y));
+
+
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(pp);
+ ERTS_RBT_FUNC__(right_rotate__)(root, pp);
+
+
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(ERTS_RBT_GET_PARENT(x)) == x);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(
+ ERTS_RBT_GET_RIGHT(
+ ERTS_RBT_GET_PARENT(x))));
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x)
+ || ERTS_RBT_IS_BLACK(ERTS_RBT_GET_PARENT(x)));
+ break;
+ }
+ }
+ else {
+ ERTS_RBT_ASSERT(p == ERTS_RBT_GET_RIGHT(pp));
+
+ y = ERTS_RBT_GET_LEFT(pp);
+ if (y && ERTS_RBT_IS_RED(y)) {
+ ERTS_RBT_SET_BLACK(y);
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(pp);
+ x = pp;
+ }
+ else {
+
+ if (x == ERTS_RBT_GET_LEFT(p)) {
+ x = p;
+ ERTS_RBT_FUNC__(right_rotate__)(root, x);
+ p = ERTS_RBT_GET_PARENT(x);
+ pp = ERTS_RBT_GET_PARENT(p);
+
+ ERTS_RBT_ASSERT(p && pp);
+ }
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(ERTS_RBT_GET_RIGHT(pp)));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(p));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_BLACK(pp));
+ ERTS_RBT_ASSERT(!y || ERTS_RBT_IS_BLACK(y));
+
+
+ ERTS_RBT_SET_BLACK(p);
+ ERTS_RBT_SET_RED(pp);
+ ERTS_RBT_FUNC__(left_rotate__)(root, pp);
+
+
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_RIGHT(ERTS_RBT_GET_PARENT(x)) == x);
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(x));
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_RED(
+ ERTS_RBT_GET_LEFT(
+ ERTS_RBT_GET_PARENT(x))));
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x)
+ || ERTS_RBT_IS_BLACK(ERTS_RBT_GET_PARENT(x)));
+ break;
+ }
+ }
+ } while (x != *root && ERTS_RBT_IS_RED(ERTS_RBT_GET_PARENT(x)));
+
+ ERTS_RBT_SET_BLACK(*root);
+
+}
+
+static ERTS_INLINE ERTS_RBT_T *
+ERTS_RBT_FUNC__(insert_aux__)(ERTS_RBT_T **root, ERTS_RBT_T *n, int lookup)
+{
+ ERTS_RBT_KEY_T kn = ERTS_RBT_GET_KEY(n);
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root);
+
+ ERTS_RBT_INIT_EMPTY_TNODE(n);
+
+ if (!*root) {
+ ERTS_RBT_SET_BLACK(n);
+ *root = n;
+#ifdef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+ ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT(NULL, n);
+#endif
+ }
+ else {
+ ERTS_RBT_T *p, *x = *root;
+
+ while (1) {
+ ERTS_RBT_KEY_T kx;
+ ERTS_RBT_T *c;
+
+ kx = ERTS_RBT_GET_KEY(x);
+
+ if (lookup && ERTS_RBT_IS_EQ(kn, kx)) {
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root);
+
+ return x;
+ }
+
+ if (ERTS_RBT_IS_LT(kn, kx)) {
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c) {
+ ERTS_RBT_SET_PARENT(n, x);
+ ERTS_RBT_SET_LEFT(x, n);
+ p = x;
+ break;
+ }
+ }
+ else {
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c) {
+ ERTS_RBT_SET_PARENT(n, x);
+ ERTS_RBT_SET_RIGHT(x, n);
+ p = x;
+ break;
+ }
+ }
+
+ x = c;
+ }
+
+ ERTS_RBT_ASSERT(p);
+
+ ERTS_RBT_SET_RED(n);
+ if (ERTS_RBT_IS_RED(p))
+ ERTS_RBT_FUNC__(insert_fixup__)(root, n);
+ }
+
+ ERTS_RBT_HDBG_CHECK_TREE__(*root);
+
+ return NULL;
+}
+
+#endif /* ERTS_RBT_NEED_INSERT__ */
+
+#ifdef ERTS_RBT_WANT_LOOKUP_INSERT
+
+static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
+ERTS_RBT_FUNC__(lookup_insert)(ERTS_RBT_T **root, ERTS_RBT_T *n)
+{
+ return ERTS_RBT_FUNC__(insert_aux__)(root, n, !0);
+}
+
+#endif /* ERTS_RBT_WANT_LOOKUP_INSERT */
+
+#ifdef ERTS_RBT_WANT_INSERT
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(insert)(ERTS_RBT_T **root, ERTS_RBT_T *n)
+{
+ (void) ERTS_RBT_FUNC__(insert_aux__)(root, n, 0);
+}
+
+#endif /* ERTS_RBT_WANT_INSERT */
+
+#ifdef ERTS_RBT_WANT_LOOKUP
+
+static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
+ERTS_RBT_FUNC__(lookup)(ERTS_RBT_T *root, ERTS_RBT_KEY_T key)
+{
+ ERTS_RBT_T *x = root;
+
+ if (!x)
+ return NULL;
+
+ while (1) {
+ ERTS_RBT_KEY_T kx = ERTS_RBT_GET_KEY(x);
+ ERTS_RBT_T *c;
+
+ if (ERTS_RBT_IS_EQ(key, kx))
+ return x;
+
+ if (ERTS_RBT_IS_LT(key, kx)) {
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ return NULL;
+ }
+ else {
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c)
+ return NULL;
+ }
+
+ x = c;
+ }
+}
+
+#endif /* ERTS_RBT_WANT_LOOKUP */
+
+#ifdef ERTS_RBT_WANT_SMALLEST
+
+static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
+ERTS_RBT_FUNC__(smallest)(ERTS_RBT_T *root)
+{
+ ERTS_RBT_T *x = root;
+
+ if (!x)
+ return NULL;
+
+ while (1) {
+ ERTS_RBT_T *c = ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ return x;
+}
+
+#endif /* ERTS_RBT_WANT_SMALLEST */
+
+#ifdef ERTS_RBT_WANT_LARGEST
+
+static ERTS_RBT_API_INLINE__ ERTS_RBT_T *
+ERTS_RBT_FUNC__(largest)(ERTS_RBT_T *root)
+{
+ ERTS_RBT_T *x = root;
+
+ if (!x)
+ return NULL;
+
+ while (1) {
+ ERTS_RBT_T *c = ERTS_RBT_GET_RIGHT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ return x;
+}
+
+#endif /* ERTS_RBT_WANT_LARGEST */
+
+#ifdef ERTS_RBT_NEED_FOREACH_UNORDERED__
+
+static ERTS_INLINE int
+ERTS_RBT_FUNC__(foreach_unordered__)(ERTS_RBT_T **root,
+ int destroying,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ int yielding,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ ERTS_RBT_T *c, *p, *x;
+
+ ERTS_RBT_ASSERT(!yielding || ystate);
+
+ if (yielding && ystate->x) {
+ x = ystate->x;
+ ERTS_RBT_ASSERT(ystate->up);
+ goto restart_up;
+ }
+ else {
+ x = *root;
+ if (!x)
+ return 0;
+ if (destroying)
+ *root = NULL;
+ }
+
+ while (1) {
+
+ while (1) {
+
+ while (1) {
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ while (1) {
+#ifdef ERTS_RBT_DEBUG
+ int cdir;
+#endif
+ if (yielding && ylimit-- <= 0) {
+ ystate->x = x;
+ ystate->up = 1;
+ return 1;
+ }
+
+ restart_up:
+
+ p = ERTS_RBT_GET_PARENT(x);
+
+#ifdef ERTS_RBT_DEBUG
+ ERTS_RBT_ASSERT(!destroying || !ERTS_RBT_GET_LEFT(x));
+ ERTS_RBT_ASSERT(!destroying || !ERTS_RBT_GET_RIGHT(x));
+
+ if (p) {
+ if (x == ERTS_RBT_GET_LEFT(p)) {
+ cdir = -1;
+ if (destroying)
+ ERTS_RBT_SET_LEFT(p, NULL);
+ }
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+ cdir = 1;
+ if (destroying)
+ ERTS_RBT_SET_RIGHT(p, NULL);
+ }
+ }
+#endif
+
+ (*op)(x, arg);
+
+ if (!p) {
+ if (yielding) {
+ ystate->x = NULL;
+ ystate->up = 0;
+ }
+ return 0; /* Done */
+ }
+
+ c = ERTS_RBT_GET_RIGHT(p);
+ if (c && c != x) {
+ ERTS_RBT_ASSERT(cdir < 0);
+
+ /* Go down tree of x's sibling... */
+ x = c;
+ break;
+ }
+
+ x = p;
+ }
+ }
+}
+
+#endif /* ERTS_RBT_NEED_FOREACH_UNORDERED__ */
+
+#ifdef ERTS_RBT_NEED_FOREACH_ORDERED__
+
+static ERTS_INLINE int
+ERTS_RBT_FUNC__(foreach_ordered__)(ERTS_RBT_T **root,
+ int from_small,
+ int destroying,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destroy)(ERTS_RBT_T *, void *),
+ void *arg,
+ int yielding,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ ERTS_RBT_T *c, *p, *x;
+
+ ERTS_RBT_ASSERT(!yielding || ystate);
+ ERTS_RBT_ASSERT(!destroying || destroy);
+
+ if (yielding && ystate->x) {
+ x = ystate->x;
+ if (ystate->up)
+ goto restart_up;
+ else
+ goto restart_down;
+ }
+ else {
+ x = *root;
+ if (!x)
+ return 0;
+ if (destroying)
+ *root = NULL;
+ }
+
+ while (1) {
+
+ while (1) {
+
+ while (1) {
+ c = from_small ? ERTS_RBT_GET_LEFT(x) : ERTS_RBT_GET_RIGHT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ (*op)(x, arg);
+
+ if (yielding && --ylimit <= 0) {
+ ystate->x = x;
+ ystate->up = 0;
+ return 1;
+ }
+
+ restart_down:
+
+ c = from_small ? ERTS_RBT_GET_RIGHT(x) : ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ break;
+ x = c;
+ }
+
+ while (1) {
+ p = ERTS_RBT_GET_PARENT(x);
+
+ if (p) {
+
+ c = from_small ? ERTS_RBT_GET_RIGHT(p) : ERTS_RBT_GET_LEFT(p);
+ if (!c || c != x) {
+ ERTS_RBT_ASSERT((from_small
+ ? ERTS_RBT_GET_LEFT(p)
+ : ERTS_RBT_GET_RIGHT(p)) == x);
+
+ (*op)(p, arg);
+
+ if (yielding && --ylimit <= 0) {
+ ystate->x = x;
+ ystate->up = 1;
+ return 1;
+ restart_up:
+ p = ERTS_RBT_GET_PARENT(x);
+ }
+ }
+
+ if (c && c != x) {
+ ERTS_RBT_ASSERT((from_small
+ ? ERTS_RBT_GET_LEFT(p)
+ : ERTS_RBT_GET_RIGHT(p)) == x);
+
+ /* Go down tree of x's sibling... */
+ x = c;
+ break;
+ }
+ }
+
+ if (destroying) {
+
+#ifdef ERTS_RBT_DEBUG
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_LEFT(x)
+ && !ERTS_RBT_GET_RIGHT(x));
+
+ if (p) {
+ if (x == ERTS_RBT_GET_LEFT(p))
+ ERTS_RBT_SET_LEFT(p, NULL);
+ else {
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_RIGHT(p));
+ ERTS_RBT_SET_RIGHT(p, NULL);
+ }
+ }
+#endif
+
+ (*destroy)(x, arg);
+ }
+
+ if (!p) {
+ if (yielding) {
+ ystate->x = NULL;
+ ystate->up = 0;
+ }
+ return 1; /* Done */
+ }
+ x = p;
+ }
+ }
+}
+
+#endif /* ERTS_RBT_NEED_FOREACH_ORDERED__ */
+
+#ifdef ERTS_RBT_WANT_FOREACH
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_unordered__)(&root, 0, op, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH */
+
+#ifdef ERTS_RBT_WANT_FOREACH_SMALL
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_small)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_ordered__)(&root, 1, 0,
+ op, NULL, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_SMALL */
+
+#ifdef ERTS_RBT_WANT_FOREACH_LARGE
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_large)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_ordered__)(&root, 0, 0,
+ op, NULL, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_LARGE */
+
+#ifdef ERTS_RBT_WANT_FOREACH_YIELDING
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_yielding)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ (void) ERTS_RBT_FUNC__(foreach_unordered__)(*root, 0, op, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_SMALL_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_small_yielding)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_ordered__)(&root, 1, 0,
+ op, NULL, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_SMALL_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_LARGE_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_large_yielding)(ERTS_RBT_T *root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_ordered__)(&root, 0, 0,
+ op, NULL, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_LARGE_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_DESTROY
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_destroy)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_unordered__)(root, 1, op, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_DESTROY */
+
+#ifdef ERTS_RBT_WANT_FOREACH_SMALL_DESTROY
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_small_destroy)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destr)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_ordered__)(root, 1, 1,
+ op, destr, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_SMALL_DESTROY */
+
+#ifdef ERTS_RBT_WANT_FOREACH_LARGE_DESTROY
+
+static ERTS_RBT_API_INLINE__ void
+ERTS_RBT_FUNC__(foreach_large_destroy)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destr)(ERTS_RBT_T *, void *),
+ void *arg)
+{
+ (void) ERTS_RBT_FUNC__(foreach_ordered__)(root, 0, 1,
+ op, destr, arg,
+ 0, NULL, 0);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_LARGE_DESTROY */
+
+#ifdef ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_destroy_yielding)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_unordered__)(root, 1, op, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_small_destroy_yielding)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destr)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_ordered__)(root, 1, 1,
+ op, destr, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING */
+
+#ifdef ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING
+
+static ERTS_RBT_API_INLINE__ int
+ERTS_RBT_FUNC__(foreach_large_destroy_yielding)(ERTS_RBT_T **root,
+ void (*op)(ERTS_RBT_T *, void *),
+ void (*destr)(ERTS_RBT_T *, void *),
+ void *arg,
+ ERTS_RBT_YIELD_STATE_T__ *ystate,
+ Sint ylimit)
+{
+ return ERTS_RBT_FUNC__(foreach_ordered__)(root, 0, 1,
+ op, destr, arg,
+ 1, ystate, ylimit);
+}
+
+#endif /* ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING */
+
+#ifdef ERTS_RBT_WANT_DEBUG_PRINT
+
+static void
+ERTS_RBT_FUNC__(debug_print)(FILE *filep, ERTS_RBT_T *x, int indent,
+ void (*print_node)(ERTS_RBT_T *))
+{
+ if (x) {
+ ERTS_RBT_FUNC__(debug_print)(filep, ERTS_RBT_GET_RIGHT(x),
+ indent+2, print_node);
+ erts_fprintf(filep,
+ "%*s[%s:%p:",
+ indent, "",
+ ERTS_RBT_IS_BLACK(x) ? "Black" : "Red",
+ x);
+ (*print_node)(x);
+ erts_fprintf(filep, "]\n");
+ ERTS_RBT_FUNC__(debug_print)(filep, ERTS_RBT_GET_LEFT(x),
+ indent+2, print_node);
+ }
+}
+
+#endif /* ERTS_RBT_WANT_DEBUG_PRINT */
+
+#ifdef ERTS_RBT_NEED_HDBG_CHECK_TREE__
+
+static void
+ERTS_RBT_FUNC__(hdbg_check_tree)(ERTS_RBT_T *root)
+{
+ int black_depth = -1, no_black = 0;
+ ERTS_RBT_T *c, *p, *x = root;
+ ERTS_RBT_KEY_T kx;
+ ERTS_RBT_KEY_T kc;
+
+ if (!x)
+ return;
+
+ ERTS_RBT_ASSERT(!ERTS_RBT_GET_PARENT(x));
+
+ while (1) {
+
+ while (1) {
+
+ while (1) {
+
+ if (ERTS_RBT_IS_BLACK(x))
+ no_black++;
+ else {
+ c = ERTS_RBT_GET_RIGHT(x);
+ ERTS_RBT_ASSERT(!c || ERTS_RBT_IS_BLACK(c));
+ c = ERTS_RBT_GET_LEFT(x);
+ ERTS_RBT_ASSERT(!c || ERTS_RBT_IS_BLACK(c));
+ }
+
+ c = ERTS_RBT_GET_LEFT(x);
+ if (!c)
+ break;
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_PARENT(c));
+
+ kx = ERTS_RBT_GET_KEY(x);
+ kc = ERTS_RBT_GET_KEY(c);
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kc, kx)
+ || ERTS_RBT_IS_EQ(kc, kx));
+
+ x = c;
+ }
+
+ c = ERTS_RBT_GET_RIGHT(x);
+ if (!c) {
+ if (black_depth < 0)
+ black_depth = no_black;
+ ERTS_RBT_ASSERT(black_depth == no_black);
+ break;
+ }
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_PARENT(c));
+
+ kx = ERTS_RBT_GET_KEY(x);
+ kc = ERTS_RBT_GET_KEY(c);
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kx, kc)
+ || ERTS_RBT_IS_EQ(kx, kc));
+ x = c;
+ }
+
+ while (1) {
+ p = ERTS_RBT_GET_PARENT(x);
+
+ if (ERTS_RBT_IS_BLACK(x))
+ no_black--;
+
+ if (p) {
+
+ ERTS_RBT_ASSERT(x == ERTS_RBT_GET_LEFT(p)
+ || x == ERTS_RBT_GET_RIGHT(p));
+
+ c = ERTS_RBT_GET_RIGHT(p);
+ if (c && c != x) {
+ ERTS_RBT_ASSERT(ERTS_RBT_GET_LEFT(p) == x);
+
+ kx = ERTS_RBT_GET_KEY(x);
+ kc = ERTS_RBT_GET_KEY(c);
+
+ ERTS_RBT_ASSERT(ERTS_RBT_IS_LT(kx, kc)
+ || ERTS_RBT_IS_EQ(kx, kc));
+ /* Go down tree of x's sibling... */
+ x = c;
+ break;
+ }
+ }
+
+ if (!p) {
+ ERTS_RBT_ASSERT(root == x);
+ ERTS_RBT_ASSERT(no_black == 0);
+ return; /* Done */
+ }
+
+ x = p;
+ }
+ }
+}
+
+#undef ERTS_RBT_PRINT_TREE__
+
+#endif /* ERTS_RBT_NEED_HDBG_CHECK_TREE__ */
+
+#undef ERTS_RBT_ASSERT
+#undef ERTS_RBT_DEBUG
+#undef ERTS_RBT_API_INLINE__
+#undef ERTS_RBT_YIELD_STATE_T__
+#undef ERTS_RBT_NEED_REPLACE__
+#undef ERTS_RBT_NEED_INSERT__
+#undef ERTS_RBT_NEED_ROTATE__
+#undef ERTS_RBT_NEED_FOREACH_UNORDERED__
+#undef ERTS_RBT_NEED_FOREACH_ORDERED__
+#undef ERTS_RBT_NEED_HDBG_CHECK_TREE__
+#undef ERTS_RBT_HDBG_CHECK_TREE__
+
+#ifdef ERTS_RBT_UNDEF
+# undef ERTS_RBT_PREFIX
+# undef ERTS_RBT_T
+# undef ERTS_RBT_KEY_T
+# undef ERTS_RBT_FLAGS_T
+# undef ERTS_RBT_INIT_EMPTY_TNODE
+# undef ERTS_RBT_IS_RED
+# undef ERTS_RBT_SET_RED
+# undef ERTS_RBT_IS_BLACK
+# undef ERTS_RBT_SET_BLACK
+# undef ERTS_RBT_GET_FLAGS
+# undef ERTS_RBT_SET_FLAGS
+# undef ERTS_RBT_GET_PARENT
+# undef ERTS_RBT_SET_PARENT
+# undef ERTS_RBT_GET_RIGHT
+# undef ERTS_RBT_SET_RIGHT
+# undef ERTS_RBT_GET_LEFT
+# undef ERTS_RBT_SET_LEFT
+# undef ERTS_RBT_GET_KEY
+# undef ERTS_RBT_IS_LT
+# undef ERTS_RBT_IS_EQ
+# undef ERTS_RBT_UNDEF
+# undef ERTS_RBT_NO_API_INLINE
+# undef ERTS_RBT_UPDATE_ATTACHED_DATA_ROTATE
+# undef ERTS_RBT_UPDATE_ATTACHED_DATA_DMOD
+# undef ERTS_RBT_UPDATE_ATTACHED_DATA_CHGROOT
+# undef ERTS_RBT_WANT_DELETE
+# undef ERTS_RBT_WANT_INSERT
+# undef ERTS_RBT_WANT_LOOKUP_INSERT
+# undef ERTS_RBT_WANT_REPLACE
+# undef ERTS_RBT_WANT_LOOKUP
+# undef ERTS_RBT_WANT_SMALLEST
+# undef ERTS_RBT_WANT_LARGEST
+# undef ERTS_RBT_WANT_FOREACH
+# undef ERTS_RBT_WANT_FOREACH_DESTROY
+# undef ERTS_RBT_WANT_FOREACH_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_DESTROY_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_SMALL
+# undef ERTS_RBT_WANT_FOREACH_LARGE
+# undef ERTS_RBT_WANT_FOREACH_SMALL_DESTROY
+# undef ERTS_RBT_WANT_FOREACH_LARGE_DESTROY
+# undef ERTS_RBT_WANT_FOREACH_SMALL_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_LARGE_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_SMALL_DESTROY_YIELDING
+# undef ERTS_RBT_WANT_FOREACH_LARGE_DESTROY_YIELDING
+# undef ERTS_RBT_WANT_DEBUG_PRINT
+#endif
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 4c9b00d2ee..78e0964e8b 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -1360,6 +1360,7 @@ erts_thr_progress_fatal_error_wait(SWord timeout) {
erts_aint32_t bc;
SWord time_left = timeout;
ErtsMonotonicTime timeout_time;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
/*
* Counting poll intervals may give us a too long timeout
@@ -1367,7 +1368,7 @@ erts_thr_progress_fatal_error_wait(SWord timeout) {
* this. In case we havn't got time correction this may
* however fail too...
*/
- timeout_time = erts_get_monotonic_time();
+ timeout_time = erts_get_monotonic_time(esdp);
timeout_time += ERTS_MSEC_TO_MONOTONIC((ErtsMonotonicTime) timeout);
while (1) {
@@ -1378,7 +1379,7 @@ erts_thr_progress_fatal_error_wait(SWord timeout) {
break; /* Succefully blocked all managed threads */
if (time_left <= 0)
break; /* Timeout */
- if (timeout_time <= erts_get_monotonic_time())
+ if (timeout_time <= erts_get_monotonic_time(esdp))
break; /* Timeout */
}
}
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index cb7764addc..4560cd23af 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -20,73 +20,39 @@
#ifndef ERL_TIME_H__
#define ERL_TIME_H__
+/* timer wheel size NEED to be a power of 2 */
+#ifdef SMALL_MEMORY
+#define ERTS_TIW_SIZE (1 << 13)
+#else
+#define ERTS_TIW_SIZE (1 << 16)
+#endif
+
#if defined(DEBUG) || 0
#define ERTS_TIME_ASSERT(B) ERTS_ASSERT(B)
#else
#define ERTS_TIME_ASSERT(B) ((void) 1)
#endif
+typedef enum {
+ ERTS_NO_TIME_WARP_MODE,
+ ERTS_SINGLE_TIME_WARP_MODE,
+ ERTS_MULTI_TIME_WARP_MODE
+} ErtsTimeWarpMode;
+
typedef struct ErtsTimerWheel_ ErtsTimerWheel;
-typedef erts_atomic64_t * ErtsNextTimeoutRef;
-extern ErtsTimerWheel *erts_default_timer_wheel;
+typedef ErtsMonotonicTime * ErtsNextTimeoutRef;
extern SysTimeval erts_first_emu_time;
-/*
-** Timer entry:
-*/
-typedef struct erl_timer {
- struct erl_timer* next; /* next entry tiw slot or chain */
- struct erl_timer* prev; /* prev entry tiw slot or chain */
- Uint slot; /* slot in timer wheel */
- erts_smp_atomic_t wheel;
- ErtsMonotonicTime timeout_pos; /* Timeout in absolute clock ticks */
- /* called when timeout */
- void (*timeout)(void*);
- /* called when cancel (may be NULL) */
- void (*cancel)(void*);
- void* arg; /* argument to timeout/cancel procs */
-} ErlTimer;
-
-typedef void (*ErlTimeoutProc)(void*);
-typedef void (*ErlCancelProc)(void*);
-
-#ifdef ERTS_SMP
-/*
- * Process and port timer
- */
-typedef union ErtsSmpPTimer_ ErtsSmpPTimer;
-union ErtsSmpPTimer_ {
- struct {
- ErlTimer tm;
- Eterm id;
- void (*timeout_func)(void*);
- ErtsSmpPTimer **timer_ref;
- Uint32 flags;
- } timer;
- ErtsSmpPTimer *next;
-};
-
-void erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
- Eterm id,
- ErlTimeoutProc timeout_func,
- Uint timeout);
-void erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer);
-#endif
void erts_monitor_time_offset(Eterm id, Eterm ref);
int erts_demonitor_time_offset(Eterm ref);
+int erts_init_time_sup(int, ErtsTimeWarpMode);
void erts_late_init_time_sup(void);
-/* timer-wheel api */
-
-ErtsTimerWheel *erts_create_timer_wheel(int);
ErtsNextTimeoutRef erts_get_next_timeout_reference(ErtsTimerWheel *);
void erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode);
-void erts_set_timer(ErlTimer*, ErlTimeoutProc, ErlCancelProc, void*, Uint);
-void erts_cancel_timer(ErlTimer*);
-Uint erts_time_left(ErlTimer *);
void erts_bump_timers(ErtsTimerWheel *, ErtsMonotonicTime);
Uint erts_timer_wheel_memory_size(void);
@@ -94,27 +60,6 @@ Uint erts_timer_wheel_memory_size(void);
void erts_p_slpq(void);
#endif
-ErtsMonotonicTime erts_check_next_timeout_time(ErtsTimerWheel *,
- ErtsMonotonicTime);
-
-ERTS_GLB_INLINE void erts_init_timer(ErlTimer *p);
-ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void erts_init_timer(ErlTimer *p)
-{
- erts_smp_atomic_init_nob(&p->wheel, (erts_aint_t) NULL);
-}
-
-ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_tmo_ref)
-{
- return (ErtsMonotonicTime) erts_atomic64_read_acqb((erts_atomic64_t *) nxt_tmo_ref);
-}
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-
/* time_sup */
#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME_CPU_TIME))
@@ -147,6 +92,7 @@ ErtsTimeOffsetState erts_time_offset_state(void);
ErtsTimeOffsetState erts_finalize_time_offset(void);
struct process;
Eterm erts_get_monotonic_start_time(struct process *c_p);
+Eterm erts_get_monotonic_end_time(struct process *c_p);
Eterm erts_monotonic_time_source(struct process*c_p);
Eterm erts_system_time_source(struct process*c_p);
@@ -156,8 +102,20 @@ Eterm erts_system_time_source(struct process*c_p);
#define ERTS_CLKTCK_RESOLUTION (erts_time_sup__.r.o.clktck_resolution)
#endif
+#define ERTS_TIMER_WHEEL_MSEC (ERTS_TIW_SIZE/(ERTS_CLKTCK_RESOLUTION/1000))
+
struct erts_time_sup_read_only__ {
ErtsMonotonicTime monotonic_time_unit;
+#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+ ErtsMonotonicTime start;
+ struct {
+ ErtsMonotonicTime native;
+ ErtsMonotonicTime nsec;
+ ErtsMonotonicTime usec;
+ ErtsMonotonicTime msec;
+ ErtsMonotonicTime sec;
+ } start_offset;
+#endif
#ifndef SYS_CLOCK_RESOLUTION
ErtsMonotonicTime clktck_resolution;
#endif
@@ -213,6 +171,16 @@ erts_time_unit_conversion(Uint64 value,
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+/*
+ * Range of monotonic time internally
+ */
+
+#define ERTS_MONOTONIC_BEGIN \
+ ERTS_MONOTONIC_TIME_UNIT
+#define ERTS_MONOTONIC_END \
+ ((ERTS_MONOTONIC_TIME_MAX / ERTS_MONOTONIC_TIME_UNIT) \
+ * ERTS_MONOTONIC_TIME_UNIT)
+
#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
/*
@@ -224,9 +192,6 @@ erts_time_unit_conversion(Uint64 value,
# error Compile time time unit needs to be at least 1000000
#endif
-#define ERTS_MONOTONIC_TIME_UNIT \
- ((ErtsMonotonicTime) ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT)
-
#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT == 1000*1000*1000
/* Nano-second time unit */
@@ -257,6 +222,66 @@ erts_time_unit_conversion(Uint64 value,
#error Missing implementation for monotonic time unit
#endif
+#define ERTS_MONOTONIC_TIME_UNIT \
+ ((ErtsMonotonicTime) ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT)
+
+/*
+ * NOTE! ERTS_MONOTONIC_TIME_START_EXTERNAL *need* to be a multiple
+ * of ERTS_MONOTONIC_TIME_UNIT.
+ */
+
+#ifdef ARCH_32
+/*
+ * Want to use a big-num of arity 2 as long as possible (584 years
+ * in the nano-second time unit case).
+ */
+#define ERTS_MONOTONIC_TIME_START_EXTERNAL \
+ (((((((ErtsMonotonicTime) 1) << 32)-1) \
+ / ERTS_MONOTONIC_TIME_UNIT) \
+ * ERTS_MONOTONIC_TIME_UNIT) \
+ + ERTS_MONOTONIC_TIME_UNIT)
+
+#else /* ARCH_64 */
+
+#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT <= 10*1000*1000
+
+/*
+ * Using micro second time unit or lower. Start at zero since
+ * time will remain an immediate for a very long time anyway
+ * (1827 years in the 10 micro second case)...
+ */
+#define ERTS_MONOTONIC_TIME_START_EXTERNAL ((ErtsMonotonicTime) 0)
+
+#else /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT > 10*1000*1000 */
+
+/*
+ * Want to use an immediate as long as possible (36 years in the
+ * nano-second time unit case).
+*/
+#define ERTS_MONOTONIC_TIME_START_EXTERNAL \
+ ((((ErtsMonotonicTime) MIN_SMALL) \
+ / ERTS_MONOTONIC_TIME_UNIT) \
+ * ERTS_MONOTONIC_TIME_UNIT)
+
+#endif /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT > 1000*1000 */
+
+#endif /* ARCH_64 */
+
+/*
+ * Offsets from internal monotonic time to external monotonic time
+ */
+
+#define ERTS_MONOTONIC_OFFSET_NATIVE \
+ (ERTS_MONOTONIC_TIME_START_EXTERNAL - ERTS_MONOTONIC_BEGIN)
+#define ERTS_MONOTONIC_OFFSET_NSEC \
+ ERTS_MONOTONIC_TO_NSEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
+#define ERTS_MONOTONIC_OFFSET_USEC \
+ ERTS_MONOTONIC_TO_USEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
+#define ERTS_MONOTONIC_OFFSET_MSEC \
+ ERTS_MONOTONIC_TO_MSEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
+#define ERTS_MONOTONIC_OFFSET_SEC \
+ ERTS_MONOTONIC_TO_SEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
+
#define ERTS_MONOTONIC_TO_CLKTCKS__(MON) \
((MON) / (ERTS_MONOTONIC_TIME_UNIT/ERTS_CLKTCK_RESOLUTION))
#define ERTS_CLKTCKS_TO_MONOTONIC__(TCKS) \
@@ -264,8 +289,23 @@ erts_time_unit_conversion(Uint64 value,
#else /* !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
+/*
+ * Initialized in erts_init_sys_time_sup()
+ */
#define ERTS_MONOTONIC_TIME_UNIT (erts_time_sup__.r.o.monotonic_time_unit)
+/*
+ * Offsets from internal monotonic time to external monotonic time
+ *
+ * Initialized in erts_init_time_sup()...
+ */
+#define ERTS_MONOTONIC_TIME_START_EXTERNAL (erts_time_sup__.r.o.start)
+#define ERTS_MONOTONIC_OFFSET_NATIVE (erts_time_sup__.r.o.start_offset.native)
+#define ERTS_MONOTONIC_OFFSET_NSEC (erts_time_sup__.r.o.start_offset.nsec)
+#define ERTS_MONOTONIC_OFFSET_USEC (erts_time_sup__.r.o.start_offset.usec)
+#define ERTS_MONOTONIC_OFFSET_MSEC (erts_time_sup__.r.o.start_offset.msec)
+#define ERTS_MONOTONIC_OFFSET_SEC (erts_time_sup__.r.o.start_offset.sec)
+
#define ERTS_CONV_FROM_MON_UNIT___(M, TO) \
((ErtsMonotonicTime) \
erts_time_unit_conversion((Uint64) (M), \
@@ -303,6 +343,10 @@ erts_time_unit_conversion(Uint64 value,
#endif /* !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
+#define ERTS_MONOTONIC_TIME_END_EXTERNAL \
+ (ERTS_MONOTONIC_TIME_START_EXTERNAL \
+ + (ERTS_MONOTONIC_END - ERTS_MONOTONIC_BEGIN))
+
#define ERTS_MSEC_TO_CLKTCKS__(MON) \
((MON) * (ERTS_CLKTCK_RESOLUTION/1000))
#define ERTS_CLKTCKS_TO_MSEC__(TCKS) \
@@ -348,3 +392,65 @@ erts_time_unit_conversion(Uint64 value,
ERTS_CLKTCKS_TO_MSEC__((X)))
#endif /* ERL_TIME_H__ */
+
+/* timer-wheel api */
+#if defined(ERTS_WANT_TIMER_WHEEL_API) && !defined(ERTS_GOT_TIMER_WHEEL_API)
+#define ERTS_GOT_TIMER_WHEEL_API
+
+#include "erl_thr_progress.h"
+#include "erl_process.h"
+
+void erts_sched_init_time_sup(ErtsSchedulerData *esdp);
+
+
+#define ERTS_TWHEEL_SLOT_AT_ONCE -1
+#define ERTS_TWHEEL_SLOT_INACTIVE -2
+
+/*
+** Timer entry:
+*/
+typedef struct erl_timer {
+ struct erl_timer* next; /* next entry tiw slot or chain */
+ struct erl_timer* prev; /* prev entry tiw slot or chain */
+ union {
+ struct {
+ void (*timeout)(void*); /* called when timeout */
+ void (*cancel)(void*); /* called when cancel (may be NULL) */
+ void* arg; /* argument to timeout/cancel procs */
+ } func;
+ ErtsThrPrgrLaterOp cleanup;
+ } u;
+ ErtsMonotonicTime timeout_pos; /* Timeout in absolute clock ticks */
+ int slot;
+} ErtsTWheelTimer;
+
+typedef void (*ErlTimeoutProc)(void*);
+typedef void (*ErlCancelProc)(void*);
+
+void erts_twheel_set_timer(ErtsTimerWheel *tiw,
+ ErtsTWheelTimer *p, ErlTimeoutProc timeout,
+ ErlCancelProc cancel, void *arg,
+ ErtsMonotonicTime timeout_pos);
+void erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p);
+ErtsTimerWheel *erts_create_timer_wheel(ErtsSchedulerData *esdp);
+
+ErtsMonotonicTime erts_check_next_timeout_time(ErtsSchedulerData *);
+
+ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p);
+ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void erts_twheel_init_timer(ErtsTWheelTimer *p)
+{
+ p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
+}
+
+ERTS_GLB_INLINE ErtsMonotonicTime erts_next_timeout_time(ErtsNextTimeoutRef nxt_tmo_ref)
+{
+ return *((ErtsMonotonicTime *) nxt_tmo_ref);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* timer wheel api */
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 9a7466ff48..e550c999b8 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -30,6 +30,8 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
static erts_smp_mtx_t erts_timeofday_mtx;
static erts_smp_mtx_t erts_get_time_mtx;
@@ -57,82 +59,13 @@ static int time_sup_initialized = 0;
static void
schedule_send_time_offset_changed_notifications(ErtsMonotonicTime new_offset);
-/*
- * NOTE! ERTS_MONOTONIC_TIME_START *need* to be a multiple
- * of ERTS_MONOTONIC_TIME_UNIT.
- */
-
-#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
-
-#ifdef ARCH_32
-/*
- * Want to use a big-num of arity 2 as long as possible (584 years
- * in the nano-second time unit case).
- */
-#define ERTS_MONOTONIC_TIME_START \
- (((((((ErtsMonotonicTime) 1) << 32)-1) \
- / ERTS_MONOTONIC_TIME_UNIT) \
- * ERTS_MONOTONIC_TIME_UNIT) \
- + ERTS_MONOTONIC_TIME_UNIT)
-
-#else /* ARCH_64 */
-
-#if ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT <= 10*1000*1000
-
-/*
- * Using micro second time unit or lower. Start at zero since
- * time will remain an immediate for a very long time anyway
- * (1827 years in the 10 micro second case)...
- */
-#define ERTS_MONOTONIC_TIME_START ((ErtsMonotonicTime) 0)
-
-#else /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT > 1000*1000 */
-
-/*
- * Want to use an immediate as long as possible (36 years in the
- * nano-second time unit case).
-*/
-#define ERTS_MONOTONIC_TIME_START \
- ((((ErtsMonotonicTime) MIN_SMALL) \
- / ERTS_MONOTONIC_TIME_UNIT) \
- * ERTS_MONOTONIC_TIME_UNIT)
-
-#endif /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT > 1000*1000 */
-
-#endif /* ARCH_64 */
-
-#define ERTS_MONOTONIC_OFFSET_NATIVE \
- (ERTS_MONOTONIC_TIME_START - ERTS_MONOTONIC_TIME_UNIT)
-#define ERTS_MONOTONIC_OFFSET_NSEC \
- ERTS_MONOTONIC_TO_NSEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
-#define ERTS_MONOTONIC_OFFSET_USEC \
- ERTS_MONOTONIC_TO_USEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
-#define ERTS_MONOTONIC_OFFSET_MSEC \
- ERTS_MONOTONIC_TO_MSEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
-#define ERTS_MONOTONIC_OFFSET_SEC \
- ERTS_MONOTONIC_TO_SEC__(ERTS_MONOTONIC_OFFSET_NATIVE)
-
-#else /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
-
-/*
- * Initialized in erts_init_time_sup()...
- */
-
-#define ERTS_MONOTONIC_TIME_START (time_sup.r.o.start)
-#define ERTS_MONOTONIC_OFFSET_NATIVE (time_sup.r.o.start_offset.native)
-#define ERTS_MONOTONIC_OFFSET_NSEC (time_sup.r.o.start_offset.nsec)
-#define ERTS_MONOTONIC_OFFSET_USEC (time_sup.r.o.start_offset.usec)
-#define ERTS_MONOTONIC_OFFSET_MSEC (time_sup.r.o.start_offset.msec)
-#define ERTS_MONOTONIC_OFFSET_SEC (time_sup.r.o.start_offset.sec)
-
-#endif /* ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT */
-
struct time_sup_read_only__ {
ErtsMonotonicTime (*get_time)(void);
int correction;
ErtsTimeWarpMode warp_mode;
#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
ErtsMonotonicTime moffset;
+ int os_corrected_monotonic_time;
int os_monotonic_time_disable;
char *os_monotonic_time_func;
char *os_monotonic_time_clock_id;
@@ -145,26 +78,20 @@ struct time_sup_read_only__ {
int os_system_time_locked;
Uint64 os_system_time_resolution;
Uint64 os_system_time_extended;
-#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
- ErtsMonotonicTime start;
- struct {
- ErtsMonotonicTime native;
- ErtsMonotonicTime nsec;
- ErtsMonotonicTime usec;
- ErtsMonotonicTime msec;
- ErtsMonotonicTime sec;
- } start_offset;
-#endif
struct {
ErtsMonotonicTime large_diff;
ErtsMonotonicTime small_diff;
} adj;
+ struct {
+ ErtsMonotonicTime error;
+ ErtsMonotonicTime resolution;
+ int intervals;
+ int use_avg;
+ } drift_adj;
};
typedef struct {
-#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
ErtsMonotonicTime drift; /* Correction for os monotonic drift */
-#endif
ErtsMonotonicTime error; /* Correction for error between system times */
} ErtsMonotonicCorrection;
@@ -174,7 +101,7 @@ typedef struct {
ErtsMonotonicCorrection correction;
} ErtsMonotonicCorrectionInstance;
-#define ERTS_DRIFT_INTERVALS 5
+#define ERTS_MAX_DRIFT_INTERVALS 50
typedef struct {
struct {
struct {
@@ -185,7 +112,7 @@ typedef struct {
ErtsMonotonicTime sys;
ErtsMonotonicTime mon;
} time;
- } intervals[ERTS_DRIFT_INTERVALS];
+ } intervals[ERTS_MAX_DRIFT_INTERVALS];
struct {
ErtsMonotonicTime sys;
ErtsMonotonicTime mon;
@@ -197,9 +124,7 @@ typedef struct {
typedef struct {
ErtsMonotonicCorrectionInstance prev;
ErtsMonotonicCorrectionInstance curr;
-#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
ErtsMonotonicDriftData drift;
-#endif
ErtsMonotonicTime last_check;
int short_check_interval;
} ErtsMonotonicCorrectionData;
@@ -208,15 +133,16 @@ struct time_sup_infrequently_changed__ {
#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
struct {
erts_smp_rwmtx_t rwmtx;
- ErlTimer timer;
+ ErtsTWheelTimer timer;
ErtsMonotonicCorrectionData cdata;
} parmon;
ErtsMonotonicTime minit;
#endif
- int finalized_offset;
ErtsSystemTime sinit;
ErtsMonotonicTime not_corrected_moffset;
- erts_atomic64_t offset;
+ erts_smp_atomic64_t offset;
+ ErtsMonotonicTime shadow_offset;
+ erts_smp_atomic32_t preliminary_offset;
};
struct time_sup_frequently_changed__ {
@@ -254,21 +180,32 @@ erts_get_approx_time(void)
static ERTS_INLINE void
init_time_offset(ErtsMonotonicTime offset)
{
- erts_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset);
+ erts_smp_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset);
}
static ERTS_INLINE void
set_time_offset(ErtsMonotonicTime offset)
{
- erts_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset);
+ erts_smp_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset);
}
static ERTS_INLINE ErtsMonotonicTime
get_time_offset(void)
{
- return (ErtsMonotonicTime) erts_atomic64_read_acqb(&time_sup.inf.c.offset);
+ return (ErtsMonotonicTime) erts_smp_atomic64_read_acqb(&time_sup.inf.c.offset);
}
+static ERTS_INLINE void
+update_last_mtime(ErtsSchedulerData *esdp, ErtsMonotonicTime mtime)
+{
+ if (!esdp)
+ esdp = erts_get_scheduler_data();
+ if (esdp) {
+ ASSERT(mtime >= esdp->last_monotonic_time);
+ esdp->last_monotonic_time = mtime;
+ esdp->check_time_reds = 0;
+ }
+}
#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
@@ -290,16 +227,38 @@ get_time_offset(void)
#define ERTS_TIME_DRIFT_MAX_ADJ_DIFF ERTS_USEC_TO_MONOTONIC(50)
#define ERTS_TIME_DRIFT_MIN_ADJ_DIFF ERTS_USEC_TO_MONOTONIC(5)
+/*
+ * Maximum drift of the OS monotonic clock expected.
+ *
+ * We use 1 milli second per second. If the monotonic
+ * clock drifts more than this we will fail to adjust for
+ * drift, and error correction will kick in instead.
+ * If it is larger than this, one could argue that the
+ * primitive is to poor to be used...
+ */
+#define ERTS_MAX_MONOTONIC_DRIFT ERTS_MSEC_TO_MONOTONIC(1)
+
+/*
+ * We assume that precision is 32 times worse than the
+ * resolution. This is a wild guess, but there are no
+ * practical way to determine actual precision.
+ */
+#define ERTS_ASSUMED_PRECISION_DROP 32
+
+#define ERTS_MIN_MONOTONIC_DRIFT_MEASUREMENT \
+ (ERTS_SHORT_TIME_CORRECTION_CHECK - 2*ERTS_MAX_MONOTONIC_DRIFT)
+
+
static ERTS_INLINE ErtsMonotonicTime
calc_corrected_erl_mtime(ErtsMonotonicTime os_mtime,
ErtsMonotonicCorrectionInstance *cip,
- ErtsMonotonicTime *os_mdiff_p)
+ ErtsMonotonicTime *os_mdiff_p,
+ int os_drift_corrected)
{
ErtsMonotonicTime erl_mtime, diff = os_mtime - cip->os_mtime;
ERTS_TIME_ASSERT(diff >= 0);
-#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
- diff += (cip->correction.drift*diff)/ERTS_MONOTONIC_TIME_UNIT;
-#endif
+ if (!os_drift_corrected)
+ diff += (cip->correction.drift*diff)/ERTS_MONOTONIC_TIME_UNIT;
erl_mtime = cip->erl_mtime;
erl_mtime += diff;
erl_mtime += cip->correction.error*(diff/ERTS_TCORR_ERR_UNIT);
@@ -308,7 +267,8 @@ calc_corrected_erl_mtime(ErtsMonotonicTime os_mtime,
return erl_mtime;
}
-static ErtsMonotonicTime get_corrected_time(void)
+static ERTS_INLINE ErtsMonotonicTime
+read_corrected_time(int os_drift_corrected)
{
ErtsMonotonicTime os_mtime;
ErtsMonotonicCorrectionData cdata;
@@ -331,7 +291,18 @@ static ErtsMonotonicTime get_corrected_time(void)
cip = &cdata.prev;
}
- return calc_corrected_erl_mtime(os_mtime, cip, NULL);
+ return calc_corrected_erl_mtime(os_mtime, cip, NULL,
+ os_drift_corrected);
+}
+
+static ErtsMonotonicTime get_os_drift_corrected_time(void)
+{
+ return read_corrected_time(!0);
+}
+
+static ErtsMonotonicTime get_corrected_time(void)
+{
+ return read_corrected_time(0);
}
#ifdef ERTS_TIME_CORRECTION_PRINT
@@ -352,66 +323,53 @@ print_correction(int change,
usec_sdiff = ERTS_MONOTONIC_TO_USEC(sdiff);
if (!change)
- fprintf(stderr,
- "sdiff = %lld usec : [ec=%lld ppm, dc=%lld ppb] : "
- "tmo = %lld msec\r\n",
- (long long) usec_sdiff,
- (long long) (1000000*old_ecorr) / ERTS_TCORR_ERR_UNIT,
- (long long) (1000000000*old_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
- (long long) tmo);
+ erts_fprintf(stderr,
+ "sdiff = %b64d usec : [ec=%b64d ppm, dc=%b64d ppb] : "
+ "tmo = %bpu msec\r\n",
+ usec_sdiff,
+ (1000000*old_ecorr) / ERTS_TCORR_ERR_UNIT,
+ (1000000000*old_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
+ tmo);
else
- fprintf(stderr,
- "sdiff = %lld usec : [ec=%lld ppm, dc=%lld ppb] "
- "-> [ec=%lld ppm, dc=%lld ppb] : tmo = %lld msec\r\n",
- (long long) usec_sdiff,
- (long long) (1000000*old_ecorr) / ERTS_TCORR_ERR_UNIT,
- (long long) (1000000000*old_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
- (long long) (1000000*new_ecorr) / ERTS_TCORR_ERR_UNIT,
- (long long) (1000000000*new_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
- (long long) tmo);
-
+ erts_fprintf(stderr,
+ "sdiff = %b64d usec : [ec=%b64d ppm, dc=%b64d ppb] "
+ "-> [ec=%b64d ppm, dc=%b64d ppb] : tmo = %bpu msec\r\n",
+ usec_sdiff,
+ (1000000*old_ecorr) / ERTS_TCORR_ERR_UNIT,
+ (1000000000*old_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
+ (1000000*new_ecorr) / ERTS_TCORR_ERR_UNIT,
+ (1000000000*new_dcorr) / ERTS_MONOTONIC_TIME_UNIT,
+ tmo);
}
#endif
+static ERTS_INLINE ErtsMonotonicTime
+get_timeout_pos(ErtsMonotonicTime now, ErtsMonotonicTime tmo)
+{
+ ErtsMonotonicTime tpos;
+ tpos = ERTS_MONOTONIC_TO_CLKTCKS(now - 1);
+ tpos += ERTS_MSEC_TO_CLKTCKS(tmo);
+ tpos += 1;
+ return tpos;
+}
+
static void
-check_time_correction(void *unused)
+check_time_correction(void *vesdp)
{
-#ifndef ERTS_TIME_CORRECTION_PRINT
-# define ERTS_PRINT_CORRECTION
-#else
-# ifdef ERTS_HAVE_CORRECTED_OS_MONOTONIC
-# define ERTS_PRINT_CORRECTION \
- print_correction(set_new_correction, \
- sdiff, \
- cip->correction.error, \
- 0, \
- new_correction.error, \
- 0, \
- timeout)
-# else
-# define ERTS_PRINT_CORRECTION \
- print_correction(set_new_correction, \
- sdiff, \
- cip->correction.error, \
- cip->correction.drift, \
- new_correction.error, \
- new_correction.drift, \
- timeout)
-# endif
-#endif
+ int init_drift_adj = !vesdp;
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
ErtsMonotonicCorrectionData cdata;
ErtsMonotonicCorrection new_correction;
ErtsMonotonicCorrectionInstance *cip;
ErtsMonotonicTime mdiff, sdiff, os_mtime, erl_mtime, os_stime,
- erl_stime, time_offset;
+ erl_stime, time_offset, timeout_pos;
Uint timeout;
- int set_new_correction, begin_short_intervals = 0;
+ int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time;
+ int set_new_correction = 0, begin_short_intervals = 0;
erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
- ASSERT(time_sup.inf.c.finalized_offset);
-
erts_os_times(&os_mtime, &os_stime);
cdata = time_sup.inf.c.parmon.cdata;
@@ -423,14 +381,23 @@ check_time_correction(void *unused)
"OS monotonic time stepped backwards\n");
cip = &cdata.curr;
- erl_mtime = calc_corrected_erl_mtime(os_mtime, cip, &mdiff);
+ erl_mtime = calc_corrected_erl_mtime(os_mtime, cip, &mdiff,
+ os_drift_corrected);
time_offset = get_time_offset();
erl_stime = erl_mtime + time_offset;
sdiff = erl_stime - os_stime;
+ if (time_sup.inf.c.shadow_offset) {
+ ERTS_TIME_ASSERT(time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE);
+ if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ sdiff += time_sup.inf.c.shadow_offset;
+ else
+ time_sup.inf.c.shadow_offset = 0;
+ }
+
new_correction = cip->correction;
-
+
if (time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE
&& (sdiff < -2*time_sup.r.o.adj.small_diff
|| 2*time_sup.r.o.adj.small_diff < sdiff)) {
@@ -440,9 +407,24 @@ check_time_correction(void *unused)
set_time_offset(time_offset);
schedule_send_time_offset_changed_notifications(time_offset);
begin_short_intervals = 1;
- if (cdata.curr.correction.error == 0)
- set_new_correction = 0;
- else {
+ if (cdata.curr.correction.error != 0) {
+ set_new_correction = 1;
+ new_correction.error = 0;
+ }
+ }
+ else if ((time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE
+ && erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ && (sdiff < -2*time_sup.r.o.adj.small_diff
+ || 2*time_sup.r.o.adj.small_diff < sdiff)) {
+ /*
+ * System time diff exeeded limits; change shadow offset
+ * and let OS system time leap away from Erlang system
+ * time.
+ */
+ time_sup.inf.c.shadow_offset -= sdiff;
+ sdiff = 0;
+ begin_short_intervals = 1;
+ if (cdata.curr.correction.error != 0) {
set_new_correction = 1;
new_correction.error = 0;
}
@@ -462,16 +444,11 @@ check_time_correction(void *unused)
else
new_correction.error = -ERTS_TCORR_ERR_SMALL_ADJ;
}
- else {
- set_new_correction = 0;
- }
}
else if (cdata.curr.correction.error > 0) {
if (sdiff < 0) {
- if (cdata.curr.correction.error == ERTS_TCORR_ERR_LARGE_ADJ
- || -time_sup.r.o.adj.large_diff <= sdiff)
- set_new_correction = 0;
- else {
+ if (cdata.curr.correction.error != ERTS_TCORR_ERR_LARGE_ADJ
+ && sdiff < -time_sup.r.o.adj.large_diff) {
new_correction.error = ERTS_TCORR_ERR_LARGE_ADJ;
set_new_correction = 1;
}
@@ -490,14 +467,11 @@ check_time_correction(void *unused)
}
else /* if (cdata.curr.correction.error < 0) */ {
if (0 < sdiff) {
- if (cdata.curr.correction.error == -ERTS_TCORR_ERR_LARGE_ADJ
- || sdiff <= time_sup.r.o.adj.large_diff)
- set_new_correction = 0;
- else {
+ if (cdata.curr.correction.error != -ERTS_TCORR_ERR_LARGE_ADJ
+ && time_sup.r.o.adj.large_diff < sdiff) {
new_correction.error = -ERTS_TCORR_ERR_LARGE_ADJ;
set_new_correction = 1;
}
- set_new_correction = 0;
}
else if (sdiff < -time_sup.r.o.adj.small_diff) {
set_new_correction = 1;
@@ -512,8 +486,7 @@ check_time_correction(void *unused)
}
}
-#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
- {
+ if (!os_drift_corrected) {
ErtsMonotonicDriftData *ddp = &time_sup.inf.c.parmon.cdata.drift;
int ix = ddp->ix;
ErtsMonotonicTime mtime_diff, old_os_mtime;
@@ -521,25 +494,26 @@ check_time_correction(void *unused)
old_os_mtime = ddp->intervals[ix].time.mon;
mtime_diff = os_mtime - old_os_mtime;
- if (mtime_diff >= ERTS_SEC_TO_MONOTONIC(10)) {
+ if ((mtime_diff >= ERTS_MIN_MONOTONIC_DRIFT_MEASUREMENT)
+ | init_drift_adj) {
ErtsMonotonicTime drift_adj, drift_adj_diff, old_os_stime,
- stime_diff, mtime_acc, stime_acc, avg_drift_adj;
+ smtime_diff, stime_diff, mtime_acc, stime_acc,
+ avg_drift_adj, max_drift;
old_os_stime = ddp->intervals[ix].time.sys;
mtime_acc = ddp->acc.mon;
stime_acc = ddp->acc.sys;
- avg_drift_adj = (((stime_acc - mtime_acc)*ERTS_MONOTONIC_TIME_UNIT)
+ avg_drift_adj = (((stime_acc - mtime_acc)
+ * ERTS_MONOTONIC_TIME_UNIT)
/ mtime_acc);
mtime_diff = os_mtime - old_os_mtime;
stime_diff = os_stime - old_os_stime;
- drift_adj = (((stime_diff - mtime_diff)*ERTS_MONOTONIC_TIME_UNIT)
- / mtime_diff);
-
+ smtime_diff = stime_diff - mtime_diff;
ix++;
- if (ix >= ERTS_DRIFT_INTERVALS)
+ if (ix >= time_sup.r.o.drift_adj.intervals)
ix = 0;
mtime_acc -= ddp->intervals[ix].diff.mon;
mtime_acc += mtime_diff;
@@ -555,24 +529,50 @@ check_time_correction(void *unused)
ddp->acc.mon = mtime_acc;
ddp->acc.sys = stime_acc;
- drift_adj_diff = avg_drift_adj - drift_adj;
- if (drift_adj_diff < -ERTS_TIME_DRIFT_MAX_ADJ_DIFF
- || ERTS_TIME_DRIFT_MAX_ADJ_DIFF < drift_adj_diff) {
- ddp->dirty_counter = ERTS_DRIFT_INTERVALS;
+ max_drift = ERTS_MAX_MONOTONIC_DRIFT;
+ max_drift *= ERTS_MONOTONIC_TO_SEC(mtime_diff);
+
+ if (smtime_diff > time_sup.r.o.drift_adj.error + max_drift
+ || smtime_diff < -1*time_sup.r.o.drift_adj.error - max_drift) {
+ dirty_intervals:
+ /*
+ * We had a leap in system time. Mark array as
+ * dirty to ensure that dirty values are rotated
+ * out before we use it again...
+ */
+ ddp->dirty_counter = time_sup.r.o.drift_adj.intervals;
begin_short_intervals = 1;
}
- else {
- if (ddp->dirty_counter <= 0) {
- drift_adj = ((stime_acc - mtime_acc)
- *ERTS_MONOTONIC_TIME_UNIT) / mtime_acc;
+ else if (ddp->dirty_counter > 0) {
+ if (init_drift_adj) {
+ new_correction.drift = ((smtime_diff
+ * ERTS_MONOTONIC_TIME_UNIT)
+ / mtime_diff);
+ set_new_correction = 1;
}
- if (ddp->dirty_counter >= 0) {
- if (ddp->dirty_counter == 0) {
- /* Force set new drift correction... */
- set_new_correction = 1;
- }
+ ddp->dirty_counter--;
+ }
+ else {
+ if (ddp->dirty_counter == 0) {
+ /* Force set new drift correction... */
+ set_new_correction = 1;
ddp->dirty_counter--;
}
+
+ if (time_sup.r.o.drift_adj.use_avg)
+ drift_adj = (((stime_acc - mtime_acc)
+ * ERTS_MONOTONIC_TIME_UNIT)
+ / mtime_acc);
+ else
+ drift_adj = ((smtime_diff
+ * ERTS_MONOTONIC_TIME_UNIT)
+ / mtime_diff);
+
+ drift_adj_diff = avg_drift_adj - drift_adj;
+ if (drift_adj_diff < -ERTS_TIME_DRIFT_MAX_ADJ_DIFF
+ || ERTS_TIME_DRIFT_MAX_ADJ_DIFF < drift_adj_diff)
+ goto dirty_intervals;
+
drift_adj_diff = drift_adj - new_correction.drift;
if (drift_adj_diff) {
if (drift_adj_diff > ERTS_TIME_DRIFT_MAX_ADJ_DIFF)
@@ -580,7 +580,6 @@ check_time_correction(void *unused)
else if (drift_adj_diff < -ERTS_TIME_DRIFT_MAX_ADJ_DIFF)
drift_adj_diff = -ERTS_TIME_DRIFT_MAX_ADJ_DIFF;
new_correction.drift += drift_adj_diff;
-
if (drift_adj_diff < -ERTS_TIME_DRIFT_MIN_ADJ_DIFF
|| ERTS_TIME_DRIFT_MIN_ADJ_DIFF < drift_adj_diff) {
set_new_correction = 1;
@@ -589,7 +588,6 @@ check_time_correction(void *unused)
}
}
}
-#endif
begin_short_intervals |= set_new_correction;
@@ -608,25 +606,36 @@ check_time_correction(void *unused)
timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_LONG_TIME_CORRECTION_CHECK);
else {
ErtsMonotonicTime ecorr = new_correction.error;
- if (sdiff < 0)
- sdiff = -1*sdiff;
+ ErtsMonotonicTime abs_sdiff;
+ abs_sdiff = (sdiff < 0) ? -1*sdiff : sdiff;
if (ecorr < 0)
ecorr = -1*ecorr;
- if (sdiff > ecorr*(ERTS_LONG_TIME_CORRECTION_CHECK/ERTS_TCORR_ERR_UNIT))
+ if (abs_sdiff > ecorr*(ERTS_LONG_TIME_CORRECTION_CHECK/ERTS_TCORR_ERR_UNIT))
timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_LONG_TIME_CORRECTION_CHECK);
else {
- timeout = ERTS_MONOTONIC_TO_MSEC((ERTS_TCORR_ERR_UNIT*sdiff)/ecorr);
+ timeout = ERTS_MONOTONIC_TO_MSEC((ERTS_TCORR_ERR_UNIT*abs_sdiff)/ecorr);
if (timeout < 10)
timeout = 10;
}
}
if (timeout > ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK)
- && time_sup.inf.c.parmon.cdata.short_check_interval) {
+ && (time_sup.inf.c.parmon.cdata.short_check_interval
+ || time_sup.inf.c.parmon.cdata.drift.dirty_counter >= 0)) {
timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK);
}
- ERTS_PRINT_CORRECTION;
+ timeout_pos = get_timeout_pos(erl_mtime, timeout);
+
+#ifdef ERTS_TIME_CORRECTION_PRINT
+ print_correction(set_new_correction,
+ sdiff,
+ cip->correction.error,
+ cip->correction.drift,
+ new_correction.error,
+ new_correction.drift,
+ timeout);
+#endif
if (set_new_correction) {
erts_smp_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx);
@@ -638,16 +647,17 @@ check_time_correction(void *unused)
/*
* Current correction instance begin when
- * OS monotonic time has increased one unit.
+ * OS monotonic time has increased two units.
*/
- os_mtime++;
+ os_mtime += 2;
/*
* Erlang monotonic time corresponding to
* next OS monotonic time using previous
* correction.
*/
- erl_mtime = calc_corrected_erl_mtime(os_mtime, cip, NULL);
+ erl_mtime = calc_corrected_erl_mtime(os_mtime, cip, NULL,
+ os_drift_corrected);
/*
* Save new current correction instance.
@@ -659,23 +669,74 @@ check_time_correction(void *unused)
erts_smp_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx);
}
- erts_set_timer(&time_sup.inf.c.parmon.timer,
- check_time_correction,
- NULL,
- NULL,
- timeout);
+ if (!esdp)
+ esdp = erts_get_scheduler_data();
-#undef ERTS_PRINT_CORRECTION
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &time_sup.inf.c.parmon.timer,
+ check_time_correction,
+ NULL,
+ (void *) esdp,
+ timeout_pos);
}
-#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
+static ErtsMonotonicTime get_os_corrected_time(void)
+{
+ ASSERT(time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE);
+ return erts_os_monotonic_time() + time_sup.r.o.moffset;
+}
+
+static void
+check_time_offset(void *vesdp)
+{
+ ErtsSchedulerData *esdp = (ErtsSchedulerData *) vesdp;
+ ErtsMonotonicTime sdiff, os_mtime, erl_mtime, os_stime,
+ erl_stime, time_offset, timeout, timeout_pos;
+
+ ASSERT(time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE);
+
+ erts_os_times(&os_mtime, &os_stime);
+
+ erl_mtime = os_mtime + time_sup.r.o.moffset;
+ time_offset = get_time_offset();
+ erl_stime = erl_mtime + time_offset;
+
+ sdiff = erl_stime - os_stime;
+
+ if ((sdiff < -2*time_sup.r.o.adj.small_diff
+ || 2*time_sup.r.o.adj.small_diff < sdiff)) {
+ /* System time diff exeeded limits; change time offset... */
+#ifdef ERTS_TIME_CORRECTION_PRINT
+ erts_fprintf(stderr, "sdiff = %b64d nsec -> 0 nsec\n",
+ ERTS_MONOTONIC_TO_NSEC(sdiff));
+#endif
+ time_offset -= sdiff;
+ sdiff = 0;
+ set_time_offset(time_offset);
+ schedule_send_time_offset_changed_notifications(time_offset);
+ }
+#ifdef ERTS_TIME_CORRECTION_PRINT
+ else erts_fprintf(stderr, "sdiff = %b64d nsec\n",
+ ERTS_MONOTONIC_TO_NSEC(sdiff));
+#endif
+
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_LONG_TIME_CORRECTION_CHECK);
+ timeout_pos = get_timeout_pos(erl_mtime, timeout);
+
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &time_sup.inf.c.parmon.timer,
+ check_time_offset,
+ NULL,
+ vesdp,
+ timeout_pos);
+}
static void
-init_check_time_correction(void *unused)
+init_check_time_correction(void *vesdp)
{
ErtsMonotonicDriftData *ddp;
ErtsMonotonicTime old_mtime, old_stime, mtime, stime, mtime_diff,
- stime_diff;
+ stime_diff, smtime_diff, max_drift;
int ix;
ddp = &time_sup.inf.c.parmon.cdata.drift;
@@ -687,7 +748,13 @@ init_check_time_correction(void *unused)
mtime_diff = mtime - old_mtime;
stime_diff = stime - old_stime;
- if (100*stime_diff < 80*mtime_diff || 120*mtime_diff < 100*stime_diff ) {
+ smtime_diff = stime_diff - mtime_diff;
+
+ max_drift = ERTS_MAX_MONOTONIC_DRIFT;
+ max_drift *= ERTS_MONOTONIC_TO_SEC(mtime_diff);
+
+ if (smtime_diff > time_sup.r.o.drift_adj.error + max_drift
+ || smtime_diff < -1*time_sup.r.o.drift_adj.error - max_drift) {
/* Had a system time leap... pretend no drift... */
stime_diff = mtime_diff;
}
@@ -697,29 +764,28 @@ init_check_time_correction(void *unused)
* a drift adjustment, and repeat this interval
* in all slots...
*/
- for (ix = 0; ix < ERTS_DRIFT_INTERVALS; ix++) {
+ for (ix = 0; ix < time_sup.r.o.drift_adj.intervals; ix++) {
ddp->intervals[ix].diff.mon = mtime_diff;
ddp->intervals[ix].diff.sys = stime_diff;
ddp->intervals[ix].time.mon = old_mtime;
ddp->intervals[ix].time.sys = old_stime;
}
- ddp->acc.sys = stime_diff*ERTS_DRIFT_INTERVALS;
- ddp->acc.mon = mtime_diff*ERTS_DRIFT_INTERVALS;
+ ddp->acc.sys = stime_diff*time_sup.r.o.drift_adj.intervals;
+ ddp->acc.mon = mtime_diff*time_sup.r.o.drift_adj.intervals;
ddp->ix = 0;
- ddp->dirty_counter = ERTS_DRIFT_INTERVALS;
+ ddp->dirty_counter = time_sup.r.o.drift_adj.intervals;
- check_time_correction(NULL);
+ check_time_correction(vesdp);
}
-#endif
-
static ErtsMonotonicTime
finalize_corrected_time_offset(ErtsSystemTime *stimep)
{
ErtsMonotonicTime os_mtime;
ErtsMonotonicCorrectionData cdata;
ErtsMonotonicCorrectionInstance *cip;
+ int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time;
erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx);
@@ -734,25 +800,46 @@ finalize_corrected_time_offset(ErtsSystemTime *stimep)
"OS monotonic time stepped backwards\n");
cip = &cdata.curr;
- return calc_corrected_erl_mtime(os_mtime, cip, NULL);
+ return calc_corrected_erl_mtime(os_mtime, cip, NULL,
+ os_drift_corrected);
}
static void
-late_init_time_correction(void)
+late_init_time_correction(ErtsSchedulerData *esdp)
{
- if (time_sup.inf.c.finalized_offset) {
+ int quick_init_drift_adj;
+ void (*check_func)(void *);
+ ErtsMonotonicTime timeout, timeout_pos;
- erts_init_timer(&time_sup.inf.c.parmon.timer);
- erts_set_timer(&time_sup.inf.c.parmon.timer,
-#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
- init_check_time_correction,
-#else
- check_time_correction,
-#endif
- NULL,
- NULL,
- ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK));
+ quick_init_drift_adj =
+ ERTS_MONOTONIC_TO_USEC(time_sup.r.o.drift_adj.error) == 0;
+
+ if (quick_init_drift_adj)
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK/10);
+ else
+ timeout = ERTS_MONOTONIC_TO_MSEC(ERTS_SHORT_TIME_CORRECTION_CHECK);
+
+ if (!time_sup.r.o.os_corrected_monotonic_time)
+ check_func = init_check_time_correction;
+ else if (time_sup.r.o.get_time == get_os_corrected_time) {
+ quick_init_drift_adj = 0;
+ check_func = check_time_offset;
}
+ else
+ check_func = check_time_correction;
+
+ timeout_pos = get_timeout_pos(erts_get_monotonic_time(esdp),
+ timeout);
+
+ erts_twheel_init_timer(&time_sup.inf.c.parmon.timer);
+ erts_twheel_set_timer(esdp->timer_wheel,
+ &time_sup.inf.c.parmon.timer,
+ check_func,
+ NULL,
+ (quick_init_drift_adj
+ ? NULL
+ : esdp),
+ timeout_pos);
}
#endif /* ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT */
@@ -832,6 +919,8 @@ void erts_init_sys_time_sup(void)
#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
time_sup.r.o.os_monotonic_time_disable
= !sys_init_time_res.have_os_monotonic_time;
+ time_sup.r.o.os_corrected_monotonic_time =
+ sys_init_time_res.have_corrected_os_monotonic_time;
time_sup.r.o.os_monotonic_time_func
= sys_init_time_res.os_monotonic_time_info.func;
time_sup.r.o.os_monotonic_time_clock_id
@@ -856,11 +945,13 @@ void erts_init_sys_time_sup(void)
int
erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
{
- ErtsMonotonicTime resolution;
+ ErtsMonotonicTime resolution, ilength, intervals, short_isecs;
#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
ErtsMonotonicTime abs_native_offset, native_offset;
#endif
+ erts_hl_timer_init();
+
ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX);
erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
@@ -870,57 +961,62 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
time_sup.r.o.warp_mode = time_warp_mode;
if (time_warp_mode == ERTS_SINGLE_TIME_WARP_MODE)
- time_sup.inf.c.finalized_offset = 0;
+ erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1);
else
- time_sup.inf.c.finalized_offset = ~0;
+ erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0);
+ time_sup.inf.c.shadow_offset = 0;
#if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT
+ /*
+ * NOTE! erts_time_sup__.r.o.start *need* to be a multiple
+ * of ERTS_MONOTONIC_TIME_UNIT.
+ */
+
#ifdef ARCH_32
- time_sup.r.o.start = ((((ErtsMonotonicTime) 1) << 32)-1);
- time_sup.r.o.start /= ERTS_MONOTONIC_TIME_UNIT;
- time_sup.r.o.start *= ERTS_MONOTONIC_TIME_UNIT;
- time_sup.r.o.start += ERTS_MONOTONIC_TIME_UNIT;
- native_offset = time_sup.r.o.start - ERTS_MONOTONIC_TIME_UNIT;
- native_offset = native_offset;
+ erts_time_sup__.r.o.start = ((((ErtsMonotonicTime) 1) << 32)-1);
+ erts_time_sup__.r.o.start /= ERTS_MONOTONIC_TIME_UNIT;
+ erts_time_sup__.r.o.start *= ERTS_MONOTONIC_TIME_UNIT;
+ erts_time_sup__.r.o.start += ERTS_MONOTONIC_TIME_UNIT;
+ native_offset = erts_time_sup__.r.o.start - ERTS_MONOTONIC_BEGIN;
+ abs_native_offset = native_offset;
#else /* ARCH_64 */
if (ERTS_MONOTONIC_TIME_UNIT <= 10*1000*1000) {
- time_sup.r.o.start = 0;
- native_offset = -ERTS_MONOTONIC_TIME_UNIT;
- abs_native_offset = ERTS_MONOTONIC_TIME_UNIT;
+ erts_time_sup__.r.o.start = 0;
+ native_offset = -ERTS_MONOTONIC_BEGIN;
+ abs_native_offset = ERTS_MONOTONIC_BEGIN;
}
else {
- time_sup.r.o.start = ((ErtsMonotonicTime) MIN_SMALL);
- time_sup.r.o.start /= ERTS_MONOTONIC_TIME_UNIT;
- time_sup.r.o.start *= ERTS_MONOTONIC_TIME_UNIT;
- native_offset = time_sup.r.o.start - ERTS_MONOTONIC_TIME_UNIT;
+ erts_time_sup__.r.o.start = ((ErtsMonotonicTime) MIN_SMALL);
+ erts_time_sup__.r.o.start /= ERTS_MONOTONIC_TIME_UNIT;
+ erts_time_sup__.r.o.start *= ERTS_MONOTONIC_TIME_UNIT;
+ native_offset = erts_time_sup__.r.o.start - ERTS_MONOTONIC_BEGIN;
abs_native_offset = -1*native_offset;
}
#endif
- time_sup.r.o.start_offset.native = (time_sup.r.o.start
- - ERTS_MONOTONIC_TIME_UNIT);
- time_sup.r.o.start_offset.nsec = (ErtsMonotonicTime)
+ erts_time_sup__.r.o.start_offset.native = native_offset;
+ erts_time_sup__.r.o.start_offset.nsec = (ErtsMonotonicTime)
erts_time_unit_conversion((Uint64) abs_native_offset,
(Uint32) ERTS_MONOTONIC_TIME_UNIT,
(Uint32) 1000*1000*1000);
- time_sup.r.o.start_offset.usec = (ErtsMonotonicTime)
+ erts_time_sup__.r.o.start_offset.usec = (ErtsMonotonicTime)
erts_time_unit_conversion((Uint64) abs_native_offset,
(Uint32) ERTS_MONOTONIC_TIME_UNIT,
(Uint32) 1000*1000);
- time_sup.r.o.start_offset.msec = (ErtsMonotonicTime)
+ erts_time_sup__.r.o.start_offset.msec = (ErtsMonotonicTime)
erts_time_unit_conversion((Uint64) abs_native_offset,
(Uint32) ERTS_MONOTONIC_TIME_UNIT,
(Uint32) 1000);
- time_sup.r.o.start_offset.sec = (ErtsMonotonicTime)
+ erts_time_sup__.r.o.start_offset.sec = (ErtsMonotonicTime)
erts_time_unit_conversion((Uint64) abs_native_offset,
(Uint32) ERTS_MONOTONIC_TIME_UNIT,
(Uint32) 1);
if (native_offset < 0) {
- time_sup.r.o.start_offset.nsec *= -1;
- time_sup.r.o.start_offset.usec *= -1;
- time_sup.r.o.start_offset.msec *= -1;
- time_sup.r.o.start_offset.sec *= -1;
+ erts_time_sup__.r.o.start_offset.nsec *= -1;
+ erts_time_sup__.r.o.start_offset.usec *= -1;
+ erts_time_sup__.r.o.start_offset.msec *= -1;
+ erts_time_sup__.r.o.start_offset.sec *= -1;
}
#endif
@@ -938,17 +1034,66 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
time_sup.r.o.adj.large_diff = ERTS_USEC_TO_MONOTONIC(500);
time_sup.r.o.adj.small_diff = time_sup.r.o.adj.large_diff/10;
+ time_sup.r.o.drift_adj.resolution = resolution;
+
+ if (time_sup.r.o.os_corrected_monotonic_time) {
+ time_sup.r.o.drift_adj.use_avg = 0;
+ time_sup.r.o.drift_adj.intervals = 0;
+ time_sup.r.o.drift_adj.error = 0;
+ time_sup.inf.c.parmon.cdata.drift.dirty_counter = -1;
+ }
+ else {
+ /*
+ * Calculate length of the interval in seconds needed
+ * in order to get an error that is at most 1 micro second.
+ * If this interval is longer than the short time correction
+ * check interval we use the average of all values instead
+ * of the latest value.
+ */
+ short_isecs = ERTS_MONOTONIC_TO_SEC(ERTS_SHORT_TIME_CORRECTION_CHECK);
+ ilength = ERTS_ASSUMED_PRECISION_DROP * ERTS_MONOTONIC_TIME_UNIT;
+ ilength /= (resolution * ERTS_USEC_TO_MONOTONIC(1));
+ time_sup.r.o.drift_adj.use_avg = ilength > short_isecs;
+
+ if (ilength == 0)
+ intervals = 5;
+ else {
+ intervals = ilength / short_isecs;
+ if (intervals > ERTS_MAX_DRIFT_INTERVALS)
+ intervals = ERTS_MAX_DRIFT_INTERVALS;
+ else if (intervals < 5)
+ intervals = 5;
+ }
+ time_sup.r.o.drift_adj.intervals = (int) intervals;
+
+ /*
+ * drift_adj.error equals maximum assumed error
+ * over a short time interval. We use this value also
+ * when examining a large interval. In this case the
+ * error will be smaller, but we do not want to
+ * recalculate this over and over again.
+ */
+
+ time_sup.r.o.drift_adj.error = ERTS_MONOTONIC_TIME_UNIT;
+ time_sup.r.o.drift_adj.error *= ERTS_ASSUMED_PRECISION_DROP;
+ time_sup.r.o.drift_adj.error /= resolution * short_isecs;
+ }
#ifdef ERTS_TIME_CORRECTION_PRINT
- fprintf(stderr, "start = %lld\n\r", (long long) ERTS_MONOTONIC_TIME_START);
- fprintf(stderr, "native offset = %lld\n\r", (long long) ERTS_MONOTONIC_OFFSET_NATIVE);
- fprintf(stderr, "nsec offset = %lld\n\r", (long long) ERTS_MONOTONIC_OFFSET_NSEC);
- fprintf(stderr, "usec offset = %lld\n\r", (long long) ERTS_MONOTONIC_OFFSET_USEC);
- fprintf(stderr, "msec offset = %lld\n\r", (long long) ERTS_MONOTONIC_OFFSET_MSEC);
- fprintf(stderr, "sec offset = %lld\n\r", (long long) ERTS_MONOTONIC_OFFSET_SEC);
- fprintf(stderr, "large diff = %lld usec\r\n",
- (long long) ERTS_MONOTONIC_TO_USEC(time_sup.r.o.adj.large_diff));
- fprintf(stderr, "small diff = %lld usec\r\n",
- (long long) ERTS_MONOTONIC_TO_USEC(time_sup.r.o.adj.small_diff));
+ erts_fprintf(stderr, "resolution = %b64d\n", resolution);
+ erts_fprintf(stderr, "adj large diff = %b64d usec\n",
+ ERTS_MONOTONIC_TO_USEC(time_sup.r.o.adj.large_diff));
+ erts_fprintf(stderr, "adj small diff = %b64d usec\n",
+ ERTS_MONOTONIC_TO_USEC(time_sup.r.o.adj.small_diff));
+ if (!time_sup.r.o.os_corrected_monotonic_time) {
+ erts_fprintf(stderr, "drift intervals = %d\n",
+ time_sup.r.o.drift_adj.intervals);
+ erts_fprintf(stderr, "drift adj error = %b64d usec\n",
+ ERTS_MONOTONIC_TO_USEC(time_sup.r.o.drift_adj.error));
+ erts_fprintf(stderr, "drift adj max diff = %b64d nsec\n",
+ ERTS_MONOTONIC_TO_NSEC(ERTS_TIME_DRIFT_MAX_ADJ_DIFF));
+ erts_fprintf(stderr, "drift adj min diff = %b64d nsec\n",
+ ERTS_MONOTONIC_TO_NSEC(ERTS_TIME_DRIFT_MIN_ADJ_DIFF));
+ }
#endif
if (ERTS_MONOTONIC_TIME_UNIT < ERTS_CLKTCK_RESOLUTION)
@@ -967,8 +1112,9 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
erts_os_times(&time_sup.inf.c.minit,
&time_sup.inf.c.sinit);
time_sup.r.o.moffset = -1*time_sup.inf.c.minit;
+ time_sup.r.o.moffset += ERTS_MONOTONIC_BEGIN;
offset = time_sup.inf.c.sinit;
- offset -= ERTS_MONOTONIC_TIME_UNIT;
+ offset -= ERTS_MONOTONIC_BEGIN;
init_time_offset(offset);
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
@@ -979,19 +1125,22 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
cdatap = &time_sup.inf.c.parmon.cdata;
-#ifndef ERTS_HAVE_CORRECTED_OS_MONOTONIC
cdatap->drift.intervals[0].time.sys = time_sup.inf.c.sinit;
cdatap->drift.intervals[0].time.mon = time_sup.inf.c.minit;
cdatap->curr.correction.drift = 0;
-#endif
cdatap->curr.correction.error = 0;
- cdatap->curr.erl_mtime = ERTS_MONOTONIC_TIME_UNIT;
+ cdatap->curr.erl_mtime = ERTS_MONOTONIC_BEGIN;
cdatap->curr.os_mtime = time_sup.inf.c.minit;
cdatap->last_check = time_sup.inf.c.minit;
cdatap->short_check_interval = ERTS_INIT_SHORT_INTERVAL_COUNTER;
cdatap->prev = cdatap->curr;
- time_sup.r.o.get_time = get_corrected_time;
+ if (!time_sup.r.o.os_corrected_monotonic_time)
+ time_sup.r.o.get_time = get_corrected_time;
+ else if (time_sup.r.o.warp_mode == ERTS_MULTI_TIME_WARP_MODE)
+ time_sup.r.o.get_time = get_os_corrected_time;
+ else
+ time_sup.r.o.get_time = get_os_drift_corrected_time;
}
else
#endif
@@ -999,7 +1148,7 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
ErtsMonotonicTime stime, offset;
time_sup.r.o.get_time = get_not_corrected_time;
stime = time_sup.inf.c.sinit = erts_os_system_time();
- offset = stime - ERTS_MONOTONIC_TIME_UNIT;
+ offset = stime - ERTS_MONOTONIC_BEGIN;
time_sup.inf.c.not_corrected_moffset = offset;
init_time_offset(offset);
time_sup.f.c.last_not_corrected_time = 0;
@@ -1019,14 +1168,24 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
void
erts_late_init_time_sup(void)
{
-#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
- /* Timer wheel must be initialized */
- if (time_sup.r.o.get_time == get_corrected_time)
- late_init_time_correction();
-#endif
erts_late_sys_init_time();
}
+void
+erts_sched_init_time_sup(ErtsSchedulerData *esdp)
+{
+ esdp->timer_wheel = erts_create_timer_wheel(esdp);
+ esdp->next_tmo_ref = erts_get_next_timeout_reference(esdp->timer_wheel);
+ esdp->timer_service = erts_create_timer_service();
+#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
+ if (esdp->no == 1) {
+ /* A timer wheel to use must have beeen initialized */
+ if (time_sup.r.o.get_time != get_not_corrected_time)
+ late_init_time_correction(esdp);
+ }
+#endif
+}
+
ErtsTimeWarpMode erts_time_warp_mode(void)
{
return time_sup.r.o.warp_mode;
@@ -1038,9 +1197,9 @@ ErtsTimeOffsetState erts_time_offset_state(void)
case ERTS_NO_TIME_WARP_MODE:
return ERTS_TIME_OFFSET_FINAL;
case ERTS_SINGLE_TIME_WARP_MODE:
- if (time_sup.inf.c.finalized_offset)
- return ERTS_TIME_OFFSET_FINAL;
- return ERTS_TIME_OFFSET_PRELIMINARY;
+ if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset))
+ return ERTS_TIME_OFFSET_PRELIMINARY;
+ return ERTS_TIME_OFFSET_FINAL;
case ERTS_MULTI_TIME_WARP_MODE:
return ERTS_TIME_OFFSET_VOLATILE;
default:
@@ -1073,7 +1232,7 @@ erts_finalize_time_offset(void)
erts_smp_mtx_lock(&erts_get_time_mtx);
- if (!time_sup.inf.c.finalized_offset) {
+ if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) {
ErtsMonotonicTime mtime, new_offset;
#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
@@ -1110,19 +1269,12 @@ erts_finalize_time_offset(void)
set_time_offset(new_offset);
schedule_send_time_offset_changed_notifications(new_offset);
- time_sup.inf.c.finalized_offset = ~0;
+ erts_smp_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0);
res = ERTS_TIME_OFFSET_PRELIMINARY;
}
erts_smp_mtx_unlock(&erts_get_time_mtx);
-#ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT
- if (res == ERTS_TIME_OFFSET_PRELIMINARY
- && time_sup.r.o.get_time == get_corrected_time) {
- late_init_time_correction();
- }
-#endif
-
return res;
}
default:
@@ -1176,6 +1328,7 @@ wall_clock_elapsed_time_both(UWord *ms_total, UWord *ms_diff)
erts_smp_mtx_lock(&erts_timeofday_mtx);
now = time_sup.r.o.get_time();
+ update_last_mtime(NULL, now);
elapsed = ERTS_MONOTONIC_TO_MSEC(now);
*ms_total = (UWord) elapsed;
@@ -1564,6 +1717,7 @@ get_now(Uint* megasec, Uint* sec, Uint* microsec)
mtime = time_sup.r.o.get_time();
time_offset = get_time_offset();
+ update_last_mtime(NULL, mtime);
now = ERTS_MONOTONIC_TO_USEC(mtime + time_offset);
erts_smp_mtx_lock(&erts_timeofday_mtx);
@@ -1588,9 +1742,11 @@ get_now(Uint* megasec, Uint* sec, Uint* microsec)
}
ErtsMonotonicTime
-erts_get_monotonic_time(void)
+erts_get_monotonic_time(ErtsSchedulerData *esdp)
{
- return time_sup.r.o.get_time();
+ ErtsMonotonicTime mtime = time_sup.r.o.get_time();
+ update_last_mtime(esdp, mtime);
+ return mtime;
}
void
@@ -1817,7 +1973,13 @@ make_time_val(Process *c_p, ErtsMonotonicTime time_val)
Eterm
erts_get_monotonic_start_time(struct process *c_p)
{
- return make_time_val(c_p, ERTS_MONOTONIC_TIME_START);
+ return make_time_val(c_p, ERTS_MONOTONIC_TIME_START_EXTERNAL);
+}
+
+Eterm
+erts_get_monotonic_end_time(struct process *c_p)
+{
+ return make_time_val(c_p, ERTS_MONOTONIC_TIME_END_EXTERNAL);
}
static Eterm
@@ -2009,13 +2171,16 @@ time_unit_conversion(Process *c_p, Eterm term, ErtsMonotonicTime val, ErtsMonoto
BIF_RETTYPE monotonic_time_0(BIF_ALIST_0)
{
ErtsMonotonicTime mtime = time_sup.r.o.get_time();
+ update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
mtime += ERTS_MONOTONIC_OFFSET_NATIVE;
BIF_RET(make_time_val(BIF_P, mtime));
}
BIF_RETTYPE monotonic_time_1(BIF_ALIST_1)
{
- BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, time_sup.r.o.get_time(), 1));
+ ErtsMonotonicTime mtime = time_sup.r.o.get_time();
+ update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime, 1));
}
BIF_RETTYPE system_time_0(BIF_ALIST_0)
@@ -2023,6 +2188,7 @@ BIF_RETTYPE system_time_0(BIF_ALIST_0)
ErtsMonotonicTime mtime, offset;
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
+ update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
BIF_RET(make_time_val(BIF_P, mtime + offset));
}
@@ -2031,6 +2197,7 @@ BIF_RETTYPE system_time_1(BIF_ALIST_0)
ErtsMonotonicTime mtime, offset;
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
+ update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime + offset, 0));
}
@@ -2060,6 +2227,7 @@ BIF_RETTYPE timestamp_0(BIF_ALIST_0)
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
+ update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
stime = ERTS_MONOTONIC_TO_USEC(mtime + offset);
all_sec = stime / ERTS_MONOTONIC_TIME_MEGA;
mega_sec = (Uint) (stime / ERTS_MONOTONIC_TIME_TERA);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 54fba9274f..340c7033ab 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1309,8 +1309,7 @@ erts_alloc_message_heap_state(Uint size,
state = erts_smp_atomic32_read_acqb(&receiver->state);
if (statep)
*statep = state;
- if (state & (ERTS_PSFLG_OFF_HEAP_MSGS
- | ERTS_PSFLG_EXITING
+ if (state & (ERTS_PSFLG_EXITING
| ERTS_PSFLG_PENDING_EXIT))
goto allocate_in_mbuf;
#endif
@@ -1331,8 +1330,7 @@ erts_alloc_message_heap_state(Uint size,
state = erts_smp_atomic32_read_nob(&receiver->state);
if (statep)
*statep = state;
- if ((state & (ERTS_PSFLG_OFF_HEAP_MSGS
- | ERTS_PSFLG_EXITING
+ if ((state & (ERTS_PSFLG_EXITING
| ERTS_PSFLG_PENDING_EXIT))
|| (receiver->flags & F_DISABLE_GC)
|| HEAP_LIMIT(receiver) - HEAP_TOP(receiver) <= size) {
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index dec92be40a..ccc7da265e 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -48,6 +48,7 @@
#include "dtrace-wrapper.h"
#include "erl_map.h"
#include "erl_bif_unique.h"
+#include "erl_hl_timer.h"
extern ErlDrvEntry fd_driver_entry;
#ifndef __OSE__
@@ -379,11 +380,7 @@ static Port *create_port(char *name,
prt->dist_entry = NULL;
ERTS_PORT_INIT_CONNECTED(prt, pid);
prt->common.u.alive.reg = NULL;
-#ifdef ERTS_SMP
- prt->common.u.alive.ptimer = NULL;
-#else
- sys_memset(&prt->common.u.alive.tm, 0, sizeof(ErlTimer));
-#endif
+ ERTS_PTMR_INIT(prt);
erts_port_task_handle_init(&prt->timeout_task);
prt->psd = NULL;
prt->drv_data = (SWord) 0;
@@ -463,11 +460,7 @@ erts_port_free(Port *prt)
| ERTS_PORT_SFLG_FREE));
ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG);
-#ifdef ERTS_SMP
- ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->common.refc) == 0);
-#else
- ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->refc) == 0);
-#endif
+ ERTS_LC_ASSERT(erts_atomic_read_nob(&prt->common.refc.atmc) == 0);
erts_port_task_fini_sched(&prt->sched);
@@ -736,11 +729,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
/*
* Must clean up the port.
*/
-#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(port->common.u.alive.ptimer);
-#else
- erts_cancel_timer(&(port->common.u.alive.tm));
-#endif
+ erts_cancel_port_timer(port);
stopq(port);
if (port->linebuf != NULL) {
erts_free(ERTS_ALC_T_LINEBUF,
@@ -2798,7 +2787,8 @@ void erts_init_io(int port_tab_size,
port_tab_size,
common_element_size, /* Doesn't need to be excact */
"port_table",
- legacy_port_tab);
+ legacy_port_tab,
+ 1);
erts_smp_atomic_init_nob(&erts_bytes_out, 0);
erts_smp_atomic_init_nob(&erts_bytes_in, 0);
@@ -3065,7 +3055,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
rp = (scheduler
? erts_proc_lookup(pid)
- : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_INC_REFC));
if (rp) {
Eterm tuple;
@@ -3083,7 +3073,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
}
@@ -3127,7 +3117,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
rp = (scheduler
? erts_proc_lookup(to)
- : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp)
return;
@@ -3178,7 +3168,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
/*
@@ -3264,7 +3254,7 @@ deliver_vec_message(Port* prt, /* Port */
rp = (scheduler
? erts_proc_lookup(to)
- : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp)
return;
@@ -3344,7 +3334,7 @@ deliver_vec_message(Port* prt, /* Port */
erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined);
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
@@ -3434,11 +3424,8 @@ terminate_port(Port *prt)
send_closed_port_id = NIL;
}
-#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(prt->common.u.alive.ptimer);
-#else
- erts_cancel_timer(&prt->common.u.alive.tm);
-#endif
+ if (ERTS_PTMR_IS_SET(prt))
+ erts_cancel_port_timer(prt);
drv = prt->drv_ptr;
if ((drv != NULL) && (drv->stop != NULL)) {
@@ -4985,24 +4972,6 @@ erts_free_port_names(ErtsPortNames *pnp)
erts_free(ERTS_ALC_T_PORT_NAMES, pnp);
}
-static void schedule_port_timeout(Port *p)
-{
- /*
- * Scheduling of port timeouts can be done without port locking, but
- * since the task handle is stored in the port structure and the ptimer
- * structure is protected by the port lock we require the port to be
- * locked for now...
- *
- * TODO: Implement scheduling of port timeouts without locking
- * the port.
- * /Rickard
- */
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
- erts_port_task_schedule(p->common.id,
- &p->timeout_task,
- ERTS_PORT_TASK_TIMEOUT);
-}
-
ErlDrvTermData driver_mk_term_nil(void)
{
return driver_term_nil;
@@ -5031,7 +5000,7 @@ void driver_report_exit(ErlDrvPort ix, int status)
rp = (scheduler
? erts_proc_lookup(pid)
- : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp)
return;
@@ -5045,7 +5014,7 @@ void driver_report_exit(ErlDrvPort ix, int status)
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
#define ERTS_B2T_STATES_DEF_STATES_SZ 5
@@ -5361,7 +5330,7 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
scheduler = erts_get_scheduler_id() != 0;
rp = (scheduler
? erts_proc_lookup(to)
- : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp) {
res = 0;
goto done;
@@ -5656,14 +5625,12 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
HRelease(rp, hp_end, hp);
}
}
-#ifdef ERTS_SMP
if (rp) {
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
- erts_smp_proc_dec_refc(rp);
+ erts_proc_dec_refc(rp);
}
-#endif
cleanup_b2t_states(&b2t);
DESTROY_ESTACK(stack);
return res;
@@ -6609,18 +6576,6 @@ int driver_pushq(ErlDrvPort ix, char* buffer, ErlDrvSizeT len)
return code;
}
-static ERTS_INLINE void
-drv_cancel_timer(Port *prt)
-{
-#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(prt->common.u.alive.ptimer);
-#else
- erts_cancel_timer(&prt->common.u.alive.tm);
-#endif
- if (erts_port_task_is_scheduled(&prt->timeout_task))
- erts_port_task_abort(&prt->timeout_task);
-}
-
int driver_set_timer(ErlDrvPort ix, unsigned long t)
{
Port* prt = erts_drvport2port(ix);
@@ -6632,19 +6587,8 @@ int driver_set_timer(ErlDrvPort ix, unsigned long t)
if (prt->drv_ptr->timeout == NULL)
return -1;
- drv_cancel_timer(prt);
-#ifdef ERTS_SMP
- erts_create_smp_ptimer(&prt->common.u.alive.ptimer,
- prt->common.id,
- (ErlTimeoutProc) schedule_port_timeout,
- t);
-#else
- erts_set_timer(&prt->common.u.alive.tm,
- (ErlTimeoutProc) schedule_port_timeout,
- NULL,
- prt,
- t);
-#endif
+
+ erts_set_port_timer(prt, (Sint64) t);
return 0;
}
@@ -6654,28 +6598,28 @@ int driver_cancel_timer(ErlDrvPort ix)
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- drv_cancel_timer(prt);
+ erts_cancel_port_timer(prt);
return 0;
}
-
int
driver_read_timer(ErlDrvPort ix, unsigned long* t)
{
Port* prt = erts_drvport2port(ix);
+ Sint64 left;
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-#ifdef ERTS_SMP
- *t = (prt->common.u.alive.ptimer
- ? erts_time_left(&prt->common.u.alive.ptimer->timer.tm)
- : 0);
-#else
- *t = erts_time_left(&prt->common.u.alive.tm);
-#endif
+
+ left = erts_read_port_timer(prt);
+ if (left < 0)
+ left = 0;
+
+ *t = (unsigned long) left;
+
return 0;
}
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index c626cb2780..4d557b3a17 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -269,7 +269,10 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
#ifdef ERTS_SMP
ErtsProcLocks c_p_locks = c_p ? ERTS_PROC_LOCK_MAIN : 0;
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (c_p) ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+#endif
+
reg_safe_read_lock(c_p, &c_p_locks);
if (c_p && !c_p_locks)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
@@ -380,8 +383,6 @@ erts_whereis_name(Process *c_p,
erts_smp_proc_unlock(rp->p, need_locks);
*proc = NULL;
}
- if (*proc && (flags & ERTS_P2P_FLG_SMP_INC_REFC))
- erts_smp_proc_inc_refc(rp->p);
}
#else
if (rp->p
@@ -390,6 +391,8 @@ erts_whereis_name(Process *c_p,
else
*proc = NULL;
#endif
+ if (*proc && (flags & ERTS_P2P_FLG_INC_REFC))
+ erts_proc_inc_refc(*proc);
}
}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 251b39508f..cd53069872 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -657,6 +657,7 @@ erts_dsprintf_buf_t *erts_create_logger_dsbuf(void);
int erts_send_info_to_logger(Eterm, erts_dsprintf_buf_t *);
int erts_send_warning_to_logger(Eterm, erts_dsprintf_buf_t *);
int erts_send_error_to_logger(Eterm, erts_dsprintf_buf_t *);
+int erts_send_error_term_to_logger(Eterm, erts_dsprintf_buf_t *, Eterm);
int erts_send_info_to_logger_str(Eterm, char *);
int erts_send_warning_to_logger_str(Eterm, char *);
int erts_send_error_to_logger_str(Eterm, char *);
@@ -703,14 +704,9 @@ extern char *erts_default_arg0;
extern char os_type[];
-typedef enum {
- ERTS_NO_TIME_WARP_MODE,
- ERTS_SINGLE_TIME_WARP_MODE,
- ERTS_MULTI_TIME_WARP_MODE
-} ErtsTimeWarpMode;
-
typedef struct {
int have_os_monotonic_time;
+ int have_corrected_os_monotonic_time;
ErtsMonotonicTime os_monotonic_time_unit;
ErtsMonotonicTime sys_clock_resolution;
struct {
@@ -729,14 +725,13 @@ typedef struct {
} ErtsSysInitTimeResult;
#define ERTS_SYS_INIT_TIME_RESULT_INITER \
- {0, (ErtsMonotonicTime) -1, (ErtsMonotonicTime) 1}
+ {0, 0, (ErtsMonotonicTime) -1, (ErtsMonotonicTime) 1}
extern void erts_init_sys_time_sup(void);
extern void sys_init_time(ErtsSysInitTimeResult *);
extern void erts_late_sys_init_time(void);
extern void erts_deliver_time(void);
extern void erts_time_remaining(SysTimeval *);
-extern int erts_init_time_sup(int, ErtsTimeWarpMode);
extern void erts_sys_init_float(void);
extern void erts_thread_init_float(void);
extern void erts_thread_disable_fpe(void);
@@ -782,8 +777,6 @@ extern char *erts_sys_ddll_error(int code);
/*
* System interfaces for startup.
*/
-#include "erl_time.h"
-
void erts_sys_schedule_interrupt(int set);
#ifdef ERTS_SMP
void erts_sys_schedule_interrupt_timed(int, ErtsMonotonicTime);
@@ -825,7 +818,8 @@ int univ_to_local(
int local_to_univ(Sint *year, Sint *month, Sint *day,
Sint *hour, Sint *minute, Sint *second, int isdst);
void get_now(Uint*, Uint*, Uint*);
-ErtsMonotonicTime erts_get_monotonic_time(void);
+struct ErtsSchedulerData_;
+ErtsMonotonicTime erts_get_monotonic_time(struct ErtsSchedulerData_ *);
void get_sys_now(Uint*, Uint*, Uint*);
void set_break_quit(void (*)(void), void (*)(void));
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 2bdda6c8af..8bffdedb2b 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -76,6 +76,11 @@
#include "sys.h"
#include "erl_vm.h"
#include "global.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
+
+#define ERTS_MONOTONIC_DAY ERTS_SEC_TO_MONOTONIC(60*60*24)
+#define ERTS_CLKTCKS_DAY ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY)
#ifdef ERTS_ENABLE_LOCK_CHECK
#define ASSERT_NO_LOCKED_LOCKS erts_lc_check_exact(NULL, 0)
@@ -83,20 +88,24 @@
#define ASSERT_NO_LOCKED_LOCKS
#endif
-#define ERTS_MONOTONIC_DAY ERTS_SEC_TO_MONOTONIC(60*60*24)
-#define ERTS_CLKTCKS_DAY ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY)
-
+#if 0
+# define ERTS_TW_DEBUG
+#endif
+#if defined(DEBUG) && !defined(ERTS_TW_DEBUG)
+# define ERTS_TW_DEBUG
+#endif
-/* BEGIN tiw_lock protected variables
-**
-** The individual timer cells in tiw are also protected by the same mutex.
-*/
+#undef ERTS_TW_ASSERT
+#if defined(ERTS_TW_DEBUG)
+# define ERTS_TW_ASSERT(E) ERTS_ASSERT(E)
+#else
+# define ERTS_TW_ASSERT(E) ((void) 1)
+#endif
-/* timing wheel size NEED to be a power of 2 */
-#ifdef SMALL_MEMORY
-#define TIW_SIZE (1 << 13)
+#ifdef ERTS_TW_DEBUG
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 5
#else
-#define TIW_SIZE (1 << 20)
+# define ERTS_TWHEEL_BUMP_YIELD_LIMIT 100
#endif
/* Actual interval time chosen by sys_init_time() */
@@ -110,173 +119,170 @@ static int tiw_itime; /* Constant after init */
#endif
struct ErtsTimerWheel_ {
- ErlTimer *w[TIW_SIZE];
+ ErtsTWheelTimer *w[ERTS_TIW_SIZE];
ErtsMonotonicTime pos;
Uint nto;
struct {
- ErlTimer *head;
- ErlTimer **tail;
+ ErtsTWheelTimer *head;
+ ErtsTWheelTimer *tail;
Uint nto;
} at_once;
+ int yield_slot;
+ int yield_slots_left;
+ int yield_start_pos;
+ ErtsTWheelTimer sentinel;
int true_next_timeout_time;
ErtsMonotonicTime next_timeout_time;
- erts_atomic64_t next_timeout;
- erts_smp_atomic32_t is_bumping;
- erts_smp_mtx_t lock;
};
-ErtsTimerWheel *erts_default_timer_wheel; /* managed by aux thread */
-
-static ERTS_INLINE ErtsTimerWheel *
-get_timer_wheel(ErlTimer *p)
-{
- return (ErtsTimerWheel *) erts_smp_atomic_read_acqb(&p->wheel);
-}
-
-static ERTS_INLINE void
-set_timer_wheel(ErlTimer *p, ErtsTimerWheel *tiw)
-{
- erts_smp_atomic_set_relb(&p->wheel, (erts_aint_t) tiw);
-}
-
-static ERTS_INLINE void
-init_next_timeout(ErtsTimerWheel *tiw,
- ErtsMonotonicTime time)
-{
- erts_atomic64_init_nob(&tiw->next_timeout,
- (erts_aint64_t) time);
-}
-
-static ERTS_INLINE void
-set_next_timeout(ErtsTimerWheel *tiw,
- ErtsMonotonicTime time,
- int true_timeout)
-{
- tiw->true_next_timeout_time = true_timeout;
- tiw->next_timeout_time = time;
- erts_atomic64_set_relb(&tiw->next_timeout,
- (erts_aint64_t) time);
-}
-
-/* get the time (in units of TIW_ITIME) to the next timeout,
- or -1 if there are no timeouts */
-
static ERTS_INLINE ErtsMonotonicTime
-find_next_timeout(ErtsTimerWheel *tiw,
- ErtsMonotonicTime curr_time,
- ErtsMonotonicTime max_search_time)
+find_next_timeout(ErtsSchedulerData *esdp,
+ ErtsTimerWheel *tiw,
+ int search_all,
+ ErtsMonotonicTime curr_time, /* When !search_all */
+ ErtsMonotonicTime max_search_time) /* When !search_all */
{
int start_ix, tiw_pos_ix;
- ErlTimer *p;
- int true_min_timeout;
- ErtsMonotonicTime min_timeout, min_timeout_pos, slot_timeout_pos, timeout_limit;
-
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&tiw->lock));
-
- if (tiw->true_next_timeout_time)
- return tiw->next_timeout_time;
-
- /* We never set next timeout beyond timeout_limit */
- timeout_limit = curr_time + ERTS_MONOTONIC_DAY;
+ ErtsTWheelTimer *p;
+ int true_min_timeout = 0;
+ ErtsMonotonicTime min_timeout, min_timeout_pos, slot_timeout_pos;
if (tiw->nto == 0) { /* no timeouts in wheel */
- true_min_timeout = tiw->true_next_timeout_time = 0;
- min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(timeout_limit);
+ if (!search_all)
+ min_timeout_pos = tiw->pos;
+ else {
+ curr_time = erts_get_monotonic_time(esdp);
+ tiw->pos = min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ }
+ min_timeout_pos += ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY);
goto found_next;
}
- /*
- * Don't want others entering trying to bump
- * timers while we are checking...
- */
- set_next_timeout(tiw, timeout_limit, 0);
-
- true_min_timeout = 1;
- slot_timeout_pos = tiw->pos;
- min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time + max_search_time);
+ slot_timeout_pos = min_timeout_pos = tiw->pos;
+ if (search_all)
+ min_timeout_pos += ERTS_MONOTONIC_TO_CLKTCKS(ERTS_MONOTONIC_DAY);
+ else
+ min_timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time + max_search_time);
- start_ix = tiw_pos_ix = (int) (tiw->pos & (TIW_SIZE-1));
+ start_ix = tiw_pos_ix = (int) (tiw->pos & (ERTS_TIW_SIZE-1));
do {
- slot_timeout_pos++;
- if (slot_timeout_pos >= min_timeout_pos) {
- true_min_timeout = 0;
+ if (++slot_timeout_pos >= min_timeout_pos)
break;
- }
p = tiw->w[tiw_pos_ix];
- while (p) {
- ErtsMonotonicTime timeout_pos;
- ASSERT(p != p->next);
- timeout_pos = p->timeout_pos;
- if (min_timeout_pos > timeout_pos) {
- min_timeout_pos = timeout_pos;
- if (min_timeout_pos <= slot_timeout_pos)
- goto found_next;
- }
- p = p->next;
+ if (p) {
+ ErtsTWheelTimer *end = p;
+
+ do {
+ ErtsMonotonicTime timeout_pos;
+ timeout_pos = p->timeout_pos;
+ if (min_timeout_pos > timeout_pos) {
+ true_min_timeout = 1;
+ min_timeout_pos = timeout_pos;
+ if (min_timeout_pos <= slot_timeout_pos)
+ goto found_next;
+ }
+ p = p->next;
+ } while (p != end);
}
tiw_pos_ix++;
- if (tiw_pos_ix == TIW_SIZE)
+ if (tiw_pos_ix == ERTS_TIW_SIZE)
tiw_pos_ix = 0;
} while (start_ix != tiw_pos_ix);
found_next:
min_timeout = ERTS_CLKTCKS_TO_MONOTONIC(min_timeout_pos);
- if (min_timeout != tiw->next_timeout_time)
- set_next_timeout(tiw, min_timeout, true_min_timeout);
+ tiw->next_timeout_time = min_timeout;
+ tiw->true_next_timeout_time = true_min_timeout;
return min_timeout;
}
-static void
-remove_timer(ErtsTimerWheel *tiw, ErlTimer *p)
+static ERTS_INLINE void
+insert_timer_into_slot(ErtsTimerWheel *tiw, int slot, ErtsTWheelTimer *p)
{
- /* first */
- if (!p->prev) {
- tiw->w[p->slot] = p->next;
- if(p->next)
- p->next->prev = NULL;
- } else {
- p->prev->next = p->next;
+ ERTS_TW_ASSERT(slot >= 0);
+ ERTS_TW_ASSERT(slot < ERTS_TIW_SIZE);
+ p->slot = slot;
+ if (!tiw->w[slot]) {
+ tiw->w[slot] = p;
+ p->next = p;
+ p->prev = p;
}
+ else {
+ ErtsTWheelTimer *next, *prev;
+ next = tiw->w[slot];
+ prev = next->prev;
+ p->next = next;
+ p->prev = prev;
+ prev->next = p;
+ next->prev = p;
+ }
+}
+
+static ERTS_INLINE void
+remove_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
+{
+ int slot = p->slot;
+ ERTS_TW_ASSERT(slot != ERTS_TWHEEL_SLOT_INACTIVE);
+
+ if (slot >= 0) {
+ /*
+ * Timer in wheel or in circular
+ * list of timers currently beeing
+ * triggered (referred by sentinel).
+ */
+ ERTS_TW_ASSERT(slot < ERTS_TIW_SIZE);
- /* last */
- if (!p->next) {
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->w[slot] == p);
+ tiw->w[slot] = NULL;
+ }
+ else {
+ if (tiw->w[slot] == p)
+ tiw->w[slot] = p->next;
+ p->prev->next = p->next;
+ p->next->prev = p->prev;
+ }
+ }
+ else {
+ /* Timer in "at once" queue... */
+ ERTS_TW_ASSERT(slot == ERTS_TWHEEL_SLOT_AT_ONCE);
if (p->prev)
- p->prev->next = NULL;
- } else {
- p->next->prev = p->prev;
+ p->prev->next = p->next;
+ else {
+ ERTS_TW_ASSERT(tiw->at_once.head == p);
+ tiw->at_once.head = p->next;
+ }
+ if (p->next)
+ p->next->prev = p->prev;
+ else {
+ ERTS_TW_ASSERT(tiw->at_once.tail == p);
+ tiw->at_once.tail = p->prev;
+ }
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->at_once.nto--;
}
- p->next = NULL;
- p->prev = NULL;
+ p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
- set_timer_wheel(p, NULL);
tiw->nto--;
}
ErtsMonotonicTime
-erts_check_next_timeout_time(ErtsTimerWheel *tiw,
- ErtsMonotonicTime max_search_time)
+erts_check_next_timeout_time(ErtsSchedulerData *esdp)
{
- ErtsMonotonicTime next, curr;
-
- curr = erts_get_monotonic_time();
-
- erts_smp_mtx_lock(&tiw->lock);
-
- next = find_next_timeout(tiw, curr, max_search_time);
-
- erts_smp_mtx_unlock(&tiw->lock);
-
- return next;
+ ErtsTimerWheel *tiw = esdp->timer_wheel;
+ if (tiw->true_next_timeout_time)
+ return tiw->next_timeout_time;
+ return find_next_timeout(esdp, tiw, 1, 0, 0);
}
-#ifndef DEBUG
+#ifndef ERTS_TW_DEBUG
#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) ((void) 0)
#else
#define ERTS_DBG_CHK_SAFE_TO_SKIP_TO(TIW, TO) debug_check_safe_to_skip_to((TIW), (TO))
@@ -284,192 +290,252 @@ static void
debug_check_safe_to_skip_to(ErtsTimerWheel *tiw, ErtsMonotonicTime skip_to_pos)
{
int slots, ix;
- ErlTimer *tmr;
+ ErtsTWheelTimer *tmr;
ErtsMonotonicTime tmp;
- ix = (int) (tiw->pos & (TIW_SIZE-1));
+ ix = (int) (tiw->pos & (ERTS_TIW_SIZE-1));
tmp = skip_to_pos - tiw->pos;
- ASSERT(tmp >= 0);
- if (tmp < (ErtsMonotonicTime) TIW_SIZE)
+ ERTS_TW_ASSERT(tmp >= 0);
+ if (tmp < (ErtsMonotonicTime) ERTS_TIW_SIZE)
slots = (int) tmp;
else
- slots = TIW_SIZE;
+ slots = ERTS_TIW_SIZE;
while (slots > 0) {
- tmr = tiw->w[ix];
- while (tmr) {
- ASSERT(tmr->timeout_pos > skip_to_pos);
- tmr = tmr->next;
- }
- ix++;
- if (ix == TIW_SIZE)
- ix = 0;
- slots--;
+ tmr = tiw->w[ix];
+ if (tmr) {
+ ErtsTWheelTimer *end = tmr;
+ do {
+ ERTS_TW_ASSERT(tmr->timeout_pos > skip_to_pos);
+ tmr = tmr->next;
+ } while (tmr != end);
+ }
+ ix++;
+ if (ix == ERTS_TIW_SIZE)
+ ix = 0;
+ slots--;
}
}
#endif
+static ERTS_INLINE void
+timeout_timer(ErtsTWheelTimer *p)
+{
+ ErlTimeoutProc timeout;
+ void *arg;
+ p->slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ timeout = p->u.func.timeout;
+ arg = p->u.func.arg;
+ (*timeout)(arg);
+ ASSERT_NO_LOCKED_LOCKS;
+}
+
void
erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
{
- int tiw_pos_ix, slots;
- ErlTimer *p, *timeout_head, **timeout_tail;
- ErtsMonotonicTime bump_to, tmp_slots;
-
- if (erts_smp_atomic32_cmpxchg_nob(&tiw->is_bumping, 1, 0) != 0)
- return; /* Another thread is currently bumping... */
+ int tiw_pos_ix, slots, yielded_slot_restarted, yield_count;
+ ErtsMonotonicTime bump_to, tmp_slots, old_pos;
- bump_to = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ yield_count = ERTS_TWHEEL_BUMP_YIELD_LIMIT;
- erts_smp_mtx_lock(&tiw->lock);
+ /*
+ * In order to be fair we always continue with work
+ * where we left off when restarting after a yield.
+ */
- if (tiw->pos >= bump_to) {
- timeout_head = NULL;
- goto done;
+ if (tiw->yield_slot >= 0) {
+ yielded_slot_restarted = 1;
+ tiw_pos_ix = tiw->yield_slot;
+ slots = tiw->yield_slots_left;
+ bump_to = tiw->pos;
+ old_pos = tiw->yield_start_pos;
+ goto restart_yielded_slot;
}
- /* Don't want others here while we are bumping... */
- set_next_timeout(tiw, curr_time + ERTS_MONOTONIC_DAY, 0);
+ do {
- if (!tiw->at_once.head) {
- timeout_head = NULL;
- timeout_tail = &timeout_head;
- }
- else {
- ASSERT(tiw->nto >= tiw->at_once.nto);
- timeout_head = tiw->at_once.head;
- timeout_tail = tiw->at_once.tail;
- tiw->nto -= tiw->at_once.nto;
- tiw->at_once.head = NULL;
- tiw->at_once.tail = &tiw->at_once.head;
- tiw->at_once.nto = 0;
- }
+ yielded_slot_restarted = 0;
- if (tiw->nto == 0) {
- ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, bump_to);
- tiw->pos = bump_to;
- goto done;
- }
+ bump_to = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
- if (tiw->true_next_timeout_time) {
- ErtsMonotonicTime skip_until_pos;
- /*
- * No need inspecting slots where we know no timeouts
- * to trigger should reside.
- */
+ while (1) {
+ ErtsTWheelTimer *p;
- skip_until_pos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
- if (skip_until_pos > bump_to)
- skip_until_pos = bump_to;
+ old_pos = tiw->pos;
- ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, skip_until_pos);
- ASSERT(skip_until_pos > tiw->pos);
-
- tiw->pos = skip_until_pos - 1;
- }
+ if (tiw->nto == 0) {
+ empty_wheel:
+ ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, bump_to);
+ tiw->true_next_timeout_time = 0;
+ tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
+ tiw->pos = bump_to;
+ tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ return;
+ }
- tiw_pos_ix = (int) ((tiw->pos+1) & (TIW_SIZE-1));
- tmp_slots = (bump_to - tiw->pos);
- if (tmp_slots < (ErtsMonotonicTime) TIW_SIZE)
- slots = (int) tmp_slots;
- else
- slots = TIW_SIZE;
-
- while (slots > 0) {
- p = tiw->w[tiw_pos_ix];
- while (p) {
- ErlTimer *next = p->next;
- ASSERT(p != next);
- if (p->timeout_pos <= bump_to) { /* we have a timeout */
- /* Remove from list */
- remove_timer(tiw, p);
- *timeout_tail = p; /* Insert in timeout queue */
- timeout_tail = &p->next;
+ p = tiw->at_once.head;
+ while (p) {
+ if (--yield_count <= 0) {
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->yield_slot = ERTS_TWHEEL_SLOT_AT_ONCE;
+ tiw->true_next_timeout_time = 1;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
+ return;
+ }
+
+ ERTS_TW_ASSERT(tiw->nto > 0);
+ ERTS_TW_ASSERT(tiw->at_once.nto > 0);
+ tiw->nto--;
+ tiw->at_once.nto--;
+ tiw->at_once.head = p->next;
+ if (p->next)
+ p->next->prev = NULL;
+ else
+ tiw->at_once.tail = NULL;
+
+ timeout_timer(p);
+
+ p = tiw->at_once.head;
}
- p = next;
- }
- tiw_pos_ix++;
- if (tiw_pos_ix == TIW_SIZE)
- tiw_pos_ix = 0;
- slots--;
- }
- ASSERT(tmp_slots >= (ErtsMonotonicTime) TIW_SIZE
- || tiw_pos_ix == (int) ((bump_to+1) & (TIW_SIZE-1)));
+ if (tiw->pos >= bump_to)
+ break;
- tiw->pos = bump_to;
+ if (tiw->nto == 0)
+ goto empty_wheel;
- /* Search at most two seconds ahead... */
- (void) find_next_timeout(tiw, curr_time, ERTS_SEC_TO_MONOTONIC(2));
+ if (tiw->true_next_timeout_time) {
+ ErtsMonotonicTime skip_until_pos;
+ /*
+ * No need inspecting slots where we know no timeouts
+ * to trigger should reside.
+ */
-done:
+ skip_until_pos = ERTS_MONOTONIC_TO_CLKTCKS(tiw->next_timeout_time);
+ if (skip_until_pos > bump_to)
+ skip_until_pos = bump_to;
- erts_smp_mtx_unlock(&tiw->lock);
-
- erts_smp_atomic32_set_nob(&tiw->is_bumping, 0);
+ skip_until_pos--;
- /* Call timedout timers callbacks */
- while (timeout_head) {
- ErlTimeoutProc timeout;
- void *arg;
- p = timeout_head;
- timeout_head = p->next;
- /* Here comes hairy use of the timer fields!
- * They are reset without having the lock.
- * It is assumed that no code but this will
- * accesses any field until the ->timeout
- * callback is called.
- */
- ASSERT(p->timeout_pos <= bump_to);
- p->next = NULL;
- p->prev = NULL;
- p->slot = 0;
- timeout = p->timeout;
- arg = p->arg;
- (*timeout)(arg);
- }
+ if (skip_until_pos > tiw->pos) {
+ ERTS_DBG_CHK_SAFE_TO_SKIP_TO(tiw, skip_until_pos);
+
+ tiw->pos = skip_until_pos;
+ }
+ }
+
+ tiw_pos_ix = (int) ((tiw->pos+1) & (ERTS_TIW_SIZE-1));
+ tmp_slots = (bump_to - tiw->pos);
+ if (tmp_slots < (ErtsMonotonicTime) ERTS_TIW_SIZE)
+ slots = (int) tmp_slots;
+ else
+ slots = ERTS_TIW_SIZE;
+
+ tiw->pos = bump_to;
+
+ while (slots > 0) {
+
+ p = tiw->w[tiw_pos_ix];
+ if (p) {
+ if (p->next == p) {
+ ERTS_TW_ASSERT(tiw->sentinel.next == &tiw->sentinel);
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ }
+ else {
+ tiw->sentinel.next = p->next;
+ tiw->sentinel.prev = p->prev;
+ tiw->sentinel.next->prev = &tiw->sentinel;
+ tiw->sentinel.prev->next = &tiw->sentinel;
+ }
+ tiw->w[tiw_pos_ix] = NULL;
+
+ while (1) {
+
+ if (p->timeout_pos > bump_to) {
+ /* Very unusual case... */
+ ++yield_count;
+ insert_timer_into_slot(tiw, tiw_pos_ix, p);
+ }
+ else {
+ /* Normal case... */
+ timeout_timer(p);
+ tiw->nto--;
+ }
+
+ restart_yielded_slot:
+
+ p = tiw->sentinel.next;
+ if (p == &tiw->sentinel) {
+ ERTS_TW_ASSERT(tiw->sentinel.prev == &tiw->sentinel);
+ break;
+ }
+
+ if (--yield_count <= 0) {
+ tiw->true_next_timeout_time = 1;
+ tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
+ tiw->yield_slot = tiw_pos_ix;
+ tiw->yield_slots_left = slots;
+ tiw->yield_start_pos = old_pos;
+ return; /* Yield! */
+ }
+
+ tiw->sentinel.next = p->next;
+ p->next->prev = &tiw->sentinel;
+ }
+ }
+ tiw_pos_ix++;
+ if (tiw_pos_ix == ERTS_TIW_SIZE)
+ tiw_pos_ix = 0;
+ slots--;
+ }
+ }
+
+ } while (yielded_slot_restarted);
+
+ tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ tiw->true_next_timeout_time = 0;
+ tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
+
+ /* Search at most two seconds ahead... */
+ (void) find_next_timeout(NULL, tiw, 0, curr_time, ERTS_SEC_TO_MONOTONIC(2));
}
Uint
erts_timer_wheel_memory_size(void)
{
-#ifdef ERTS_SMP
- return sizeof(ErtsTimerWheel)*(1 + erts_no_schedulers);
-#else
- return sizeof(ErtsTimerWheel);
-#endif
+ return sizeof(ErtsTimerWheel)*erts_no_schedulers;
}
ErtsTimerWheel *
-erts_create_timer_wheel(int no)
+erts_create_timer_wheel(ErtsSchedulerData *esdp)
{
ErtsMonotonicTime mtime;
int i;
ErtsTimerWheel *tiw;
- tiw = (ErtsTimerWheel *) erts_alloc(ERTS_ALC_T_TIMER_WHEEL,
- sizeof(ErtsTimerWheel));
- for(i = 0; i < TIW_SIZE; i++)
+ tiw = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_TIMER_WHEEL,
+ sizeof(ErtsTimerWheel));
+ for(i = 0; i < ERTS_TIW_SIZE; i++)
tiw->w[i] = NULL;
- erts_smp_atomic32_init_nob(&tiw->is_bumping, 0);
- erts_smp_mtx_init_x(&tiw->lock, "timer_wheel", make_small(no));
-
- mtime = erts_get_monotonic_time();
+ mtime = erts_get_monotonic_time(esdp);
tiw->pos = ERTS_MONOTONIC_TO_CLKTCKS(mtime);
tiw->nto = 0;
tiw->at_once.head = NULL;
- tiw->at_once.tail = &tiw->at_once.head;
+ tiw->at_once.tail = NULL;
tiw->at_once.nto = 0;
+ tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
tiw->true_next_timeout_time = 0;
tiw->next_timeout_time = mtime + ERTS_MONOTONIC_DAY;
- init_next_timeout(tiw, mtime + ERTS_MONOTONIC_DAY);
+ tiw->sentinel.next = &tiw->sentinel;
+ tiw->sentinel.prev = &tiw->sentinel;
return tiw;
}
ErtsNextTimeoutRef
erts_get_next_timeout_reference(ErtsTimerWheel *tiw)
{
- return (ErtsNextTimeoutRef) &tiw->next_timeout;
+ return (ErtsNextTimeoutRef) &tiw->next_timeout_time;
}
@@ -490,153 +556,83 @@ erts_init_time(int time_correction, ErtsTimeWarpMode time_warp_mode)
#else
tiw_itime = itime;
#endif
-
- erts_default_timer_wheel = erts_create_timer_wheel(0);
}
void
-erts_set_timer(ErlTimer *p, ErlTimeoutProc timeout,
- ErlCancelProc cancel, void *arg, Uint to)
+erts_twheel_set_timer(ErtsTimerWheel *tiw,
+ ErtsTWheelTimer *p, ErlTimeoutProc timeout,
+ ErlCancelProc cancel, void *arg,
+ ErtsMonotonicTime timeout_pos)
{
- ErtsMonotonicTime timeout_time, timeout_pos;
- ErtsMonotonicTime curr_time;
- ErtsTimerWheel *tiw;
- ErtsSchedulerData *esdp;
+ ErtsMonotonicTime timeout_time;
- curr_time = erts_get_monotonic_time();
- esdp = erts_get_scheduler_data();
- if (esdp)
- tiw = esdp->timer_wheel;
- else
- tiw = erts_default_timer_wheel;
-
- erts_smp_mtx_lock(&tiw->lock);
-
- if (get_timer_wheel(p))
- ERTS_INTERNAL_ERROR("Double set timer");
+ p->u.func.timeout = timeout;
+ p->u.func.cancel = cancel;
+ p->u.func.arg = arg;
- p->timeout = timeout;
- p->cancel = cancel;
- p->arg = arg;
+ ERTS_TW_ASSERT(p->slot == ERTS_TWHEEL_SLOT_INACTIVE);
- if (to == 0) {
- timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time);
+ if (timeout_pos <= tiw->pos) {
tiw->nto++;
tiw->at_once.nto++;
- *tiw->at_once.tail = p;
- tiw->at_once.tail = &p->next;
p->next = NULL;
- p->timeout_pos = timeout_pos;
- timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
+ p->prev = tiw->at_once.tail;
+ if (tiw->at_once.tail) {
+ ERTS_TW_ASSERT(tiw->at_once.head);
+ tiw->at_once.tail->next = p;
+ }
+ else {
+ ERTS_TW_ASSERT(!tiw->at_once.head);
+ tiw->at_once.head = p;
+ }
+ tiw->at_once.tail = p;
+ p->timeout_pos = tiw->pos;
+ p->slot = ERTS_TWHEEL_SLOT_AT_ONCE;
+ timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(tiw->pos);
}
else {
- int tm;
- ErtsMonotonicTime ticks;
-
- ticks = ERTS_MSEC_TO_CLKTCKS(to);
- timeout_pos = ERTS_MONOTONIC_TO_CLKTCKS(curr_time - 1) + 1 + ticks;
+ int slot;
/* calculate slot */
- tm = (int) (timeout_pos & (TIW_SIZE-1));
- p->slot = (Uint) tm;
-
- /* insert at head of list at slot */
- p->next = tiw->w[tm];
- p->prev = NULL;
- if (p->next != NULL)
- p->next->prev = p;
- tiw->w[tm] = p;
+ slot = (int) (timeout_pos & (ERTS_TIW_SIZE-1));
+
+ insert_timer_into_slot(tiw, slot, p);
tiw->nto++;
timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(timeout_pos);
p->timeout_pos = timeout_pos;
-
- ASSERT(ERTS_MSEC_TO_MONOTONIC(to) <= timeout_time - curr_time);
- ASSERT(timeout_time - curr_time
- < ERTS_MSEC_TO_MONOTONIC(to) + ERTS_CLKTCKS_TO_MONOTONIC(1));
}
- if (timeout_time < tiw->next_timeout_time)
- set_next_timeout(tiw, timeout_time, 1);
-
- set_timer_wheel(p, tiw);
-
- erts_smp_mtx_unlock(&tiw->lock);
-
-#if defined(ERTS_SMP)
- if (tiw == erts_default_timer_wheel)
- erts_interupt_aux_thread_timed(timeout_time);
-#endif
-
+ if (timeout_time < tiw->next_timeout_time) {
+ tiw->true_next_timeout_time = 1;
+ tiw->next_timeout_time = timeout_time;
+ }
}
void
-erts_cancel_timer(ErlTimer *p)
+erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
{
- ErtsTimerWheel *tiw;
- ErlCancelProc cancel;
- void *arg = NULL; /* Shut up faulty warning... */
-
- tiw = get_timer_wheel(p);
- if (!tiw)
- return;
-
- erts_smp_mtx_lock(&tiw->lock);
- if (tiw != get_timer_wheel(p))
- cancel = NULL;
- else {
+ if (p->slot != ERTS_TWHEEL_SLOT_INACTIVE) {
+ ErlCancelProc cancel;
+ void *arg;
remove_timer(tiw, p);
- p->slot = 0;
-
- cancel = p->cancel;
- arg = p->arg;
+ cancel = p->u.func.cancel;
+ arg = p->u.func.arg;
+ if (cancel)
+ (*cancel)(arg);
}
- erts_smp_mtx_unlock(&tiw->lock);
-
- if (cancel)
- (*cancel)(arg);
}
-/*
- Returns the amount of time left in ms until the timer 'p' is triggered.
- 0 is returned if 'p' isn't active.
- 0 is returned also if the timer is overdue (i.e., would have triggered
- immediately if it hadn't been cancelled).
-*/
-Uint
-erts_time_left(ErlTimer *p)
-{
- ErtsTimerWheel *tiw;
- ErtsMonotonicTime current_time, timeout_time;
-
- tiw = get_timer_wheel(p);
- if (!tiw)
- return 0;
-
- erts_smp_mtx_lock(&tiw->lock);
- if (tiw != get_timer_wheel(p))
- timeout_time = ERTS_MONOTONIC_TIME_MIN;
- else
- timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(p->timeout_pos);
- erts_smp_mtx_unlock(&tiw->lock);
-
- current_time = erts_get_monotonic_time();
- if (timeout_time <= current_time)
- return 0;
- return (Uint) ERTS_MONOTONIC_TO_MSEC(timeout_time - current_time);
-}
-
-#ifdef DEBUG
+#ifdef ERTS_TW_DEBUG
void erts_p_slpq(void)
{
- ErtsTimerWheel *tiw = erts_default_timer_wheel;
- ErtsMonotonicTime current_time = erts_get_monotonic_time();
+ erts_printf("Not yet implemented...\n");
+#if 0
+ ErtsMonotonicTime current_time = erts_get_monotonic_time(NULL);
int i;
- ErlTimer* p;
+ ErtsTWheelTimer* p;
- erts_smp_mtx_lock(&tiw->lock);
-
/* print the whole wheel, starting at the current position */
erts_printf("\ncurrent time = %bps tiw_pos = %d tiw_nto %d\n",
current_time, tiw->pos, tiw->nto);
@@ -649,7 +645,7 @@ void erts_p_slpq(void)
p->slot);
}
}
- for(i = ((i+1) & (TIW_SIZE-1)); i != (tiw->pos & (TIW_SIZE-1)); i = ((i+1) & (TIW_SIZE-1))) {
+ for(i = ((i+1) & (ERTS_TIW_SIZE-1)); i != (tiw->pos & (ERTS_TIW_SIZE-1)); i = ((i+1) & (ERTS_TIW_SIZE-1))) {
if (tiw->w[i] != NULL) {
erts_printf("%d:\n", i);
for(p = tiw->w[i]; p != NULL; p = p->next) {
@@ -658,7 +654,6 @@ void erts_p_slpq(void)
}
}
}
-
- erts_smp_mtx_unlock(&tiw->lock);
+#endif
}
-#endif /* DEBUG */
+#endif /* ERTS_TW_DEBUG */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 51de3528be..965de748c9 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -50,6 +50,8 @@
#include "erl_ptab.h"
#include "erl_check_io.h"
#include "erl_bif_unique.h"
+#define ERTS_WANT_TIMER_WHEEL_API
+#include "erl_time.h"
#ifdef HIPE
# include "hipe_mode_switch.h"
#endif
@@ -2220,97 +2222,157 @@ tail_recur:
#undef MAKE_HASH_CDR_POST_OP
}
-static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
+static Eterm
+do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
+ ErlHeapFragment **bp, Process **p, Uint sz)
{
- /* error_logger !
- {notify,{info_msg,gleader,{emulator,"~s~n",[<message as list>]}}} |
- {notify,{error,gleader,{emulator,"~s~n",[<message as list>]}}} |
- {notify,{warning_msg,gleader,{emulator,"~s~n",[<message as list>}]}} */
- Eterm* hp;
- Uint sz;
Uint gl_sz;
- Eterm gl;
- Eterm list,plist,format,tuple1,tuple2,tuple3;
- ErlOffHeap *ohp;
- ErlHeapFragment *bp = NULL;
-#if !defined(ERTS_SMP)
- Process *p;
-#endif
-
- ASSERT(is_atom(tag));
-
- if (len <= 0) {
- return -1;
- }
+ gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
+ sz = sz + gl_sz;
#ifndef ERTS_SMP
#ifdef USE_THREADS
- p = NULL;
if (erts_get_scheduler_data()) /* Must be scheduler thread */
#endif
{
- p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
- if (p) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ *p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
+ if (*p) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&(*p)->state);
if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))
- p = NULL;
+ *p = NULL;
}
}
- if (!p) {
- /* buf *always* points to a null terminated string */
- erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
- tag, buf);
- return 0;
+ if (!*p) {
+ return NIL;
}
- /* So we have an error logger, lets build the message */
-#endif
- gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
- sz = len * 2 /* message list */+ 2 /* cons surrounding message list */
- + gl_sz +
- 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */ +
- 8 /* "~s~n" */;
-#ifndef ERTS_SMP
- if (sz <= HeapWordsLeft(p)) {
- ohp = &MSO(p);
- hp = HEAP_TOP(p);
- HEAP_TOP(p) += sz;
+ /* So we have an error logger, lets build the message */
+ if (sz <= HeapWordsLeft(*p)) {
+ *ohp = &MSO(*p);
+ *hp = HEAP_TOP(*p);
+ HEAP_TOP(*p) += sz;
} else {
#endif
- bp = new_message_buffer(sz);
- ohp = &bp->off_heap;
- hp = bp->mem;
+ *bp = new_message_buffer(sz);
+ *ohp = &(*bp)->off_heap;
+ *hp = (*bp)->mem;
#ifndef ERTS_SMP
}
#endif
- gl = (is_nil(gleader)
+
+ return (is_nil(gleader)
? am_noproc
: (IS_CONST(gleader)
? gleader
- : copy_struct(gleader,gl_sz,&hp,ohp)));
- list = buf_to_intlist(&hp, buf, len, NIL);
- plist = CONS(hp,list,NIL);
- hp += 2;
- format = buf_to_intlist(&hp, "~s~n", 4, NIL);
- tuple1 = TUPLE3(hp, am_emulator, format, plist);
- hp += 4;
- tuple2 = TUPLE3(hp, tag, gl, tuple1);
- hp += 4;
- tuple3 = TUPLE2(hp, am_notify, tuple2);
+ : copy_struct(gleader,gl_sz,hp,*ohp)));
+}
+
+static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *bp,
+ Process *p, Eterm message)
+{
#ifdef HARDDEBUG
- erts_fprintf(stderr, "%T\n", tuple3);
+ erts_fprintf(stderr, "%T\n", message);
#endif
#ifdef ERTS_SMP
{
Eterm from = erts_get_current_pid();
if (is_not_internal_pid(from))
from = NIL;
- erts_queue_error_logger_message(from, tuple3, bp);
+ erts_queue_error_logger_message(from, message, bp);
}
#else
- erts_queue_message(p, NULL /* only used for smp build */, bp, tuple3, NIL);
+ erts_queue_message(p, NULL /* only used for smp build */, bp, message, NIL);
#endif
+}
+
+/* error_logger !
+ {notify,{info_msg,gleader,{emulator,format,[args]}}} |
+ {notify,{error,gleader,{emulator,format,[args]}}} |
+ {notify,{warning_msg,gleader,{emulator,format,[args}]}} */
+static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
+{
+ Uint sz;
+ Eterm gl;
+ Eterm list,args,format,tuple1,tuple2,tuple3;
+
+ Eterm *hp = NULL;
+ ErlOffHeap *ohp = NULL;
+ ErlHeapFragment *bp = NULL;
+ Process *p = NULL;
+
+ ASSERT(is_atom(tag));
+
+ if (len <= 0) {
+ return -1;
+ }
+
+ sz = len * 2 /* message list */ + 2 /* cons surrounding message list */
+ + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */
+ + 8 /* "~s~n" */;
+
+ /* gleader size is accounted and allocated next */
+ gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
+
+ if(is_nil(gl)) {
+ /* buf *always* points to a null terminated string */
+ erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
+ tag, buf);
+ return 0;
+ }
+
+ list = buf_to_intlist(&hp, buf, len, NIL);
+ args = CONS(hp,list,NIL);
+ hp += 2;
+ format = buf_to_intlist(&hp, "~s~n", 4, NIL);
+ tuple1 = TUPLE3(hp, am_emulator, format, args);
+ hp += 4;
+ tuple2 = TUPLE3(hp, tag, gl, tuple1);
+ hp += 4;
+ tuple3 = TUPLE2(hp, am_notify, tuple2);
+
+ do_send_logger_message(hp, ohp, bp, p, tuple3);
+ return 0;
+}
+
+static int do_send_term_to_logger(Eterm tag, Eterm gleader,
+ char *buf, int len, Eterm args)
+{
+ Uint sz;
+ Eterm gl;
+ Uint args_sz;
+ Eterm format,tuple1,tuple2,tuple3;
+
+ Eterm *hp = NULL;
+ ErlOffHeap *ohp = NULL;
+ ErlHeapFragment *bp = NULL;
+ Process *p = NULL;
+
+ ASSERT(is_atom(tag));
+
+ args_sz = size_object(args);
+ sz = len * 2 /* format */ + args_sz
+ + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */;
+
+ /* gleader size is accounted and allocated next */
+ gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
+
+ if(is_nil(gl)) {
+ /* buf *always* points to a null terminated string */
+ erts_fprintf(stderr, "(no error logger present) %T: \"%s\" %T\n",
+ tag, buf, args);
+ return 0;
+ }
+
+ format = buf_to_intlist(&hp, buf, len, NIL);
+ args = copy_struct(args, args_sz, &hp, ohp);
+ tuple1 = TUPLE3(hp, am_emulator, format, args);
+ hp += 4;
+ tuple2 = TUPLE3(hp, tag, gl, tuple1);
+ hp += 4;
+ tuple3 = TUPLE2(hp, am_notify, tuple2);
+
+ do_send_logger_message(hp, ohp, bp, p, tuple3);
return 0;
}
@@ -2338,6 +2400,12 @@ send_error_to_logger(Eterm gleader, char *buf, int len)
return do_send_to_logger(am_error, gleader, buf, len);
}
+static ERTS_INLINE int
+send_error_term_to_logger(Eterm gleader, char *buf, int len, Eterm args)
+{
+ return do_send_term_to_logger(am_error, gleader, buf, len, args);
+}
+
#define LOGGER_DSBUF_INC_SZ 256
static erts_dsprintf_buf_t *
@@ -2413,6 +2481,15 @@ erts_send_error_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp)
}
int
+erts_send_error_term_to_logger(Eterm gleader, erts_dsprintf_buf_t *dsbufp, Eterm args)
+{
+ int res;
+ res = send_error_term_to_logger(gleader, dsbufp->str, dsbufp->str_len, args);
+ destroy_logger_dsbuf(dsbufp);
+ return res;
+}
+
+int
erts_send_info_to_logger_str(Eterm gleader, char *str)
{
return send_info_to_logger(gleader, str, sys_strlen(str));
@@ -4441,145 +4518,6 @@ is_string(Eterm list)
return 0;
}
-#ifdef ERTS_SMP
-
-/*
- * Process and Port timers in smp case
- */
-
-ERTS_SCHED_PREF_PRE_ALLOC_IMPL(ptimer_pre, ErtsSmpPTimer, 1000)
-
-#define ERTS_PTMR_FLGS_ALLCD_SIZE \
- 2
-#define ERTS_PTMR_FLGS_ALLCD_MASK \
- ((((Uint32) 1) << ERTS_PTMR_FLGS_ALLCD_SIZE) - 1)
-
-#define ERTS_PTMR_FLGS_PREALLCD ((Uint32) 1)
-#define ERTS_PTMR_FLGS_SLALLCD ((Uint32) 2)
-#define ERTS_PTMR_FLGS_LLALLCD ((Uint32) 3)
-#define ERTS_PTMR_FLG_CANCELLED (((Uint32) 1) << (ERTS_PTMR_FLGS_ALLCD_SIZE+0))
-
-static void
-init_ptimers(void)
-{
- init_ptimer_pre_alloc();
-}
-
-static ERTS_INLINE void
-free_ptimer(ErtsSmpPTimer *ptimer)
-{
- switch (ptimer->timer.flags & ERTS_PTMR_FLGS_ALLCD_MASK) {
- case ERTS_PTMR_FLGS_PREALLCD:
- (void) ptimer_pre_free(ptimer);
- break;
- case ERTS_PTMR_FLGS_SLALLCD:
- erts_free(ERTS_ALC_T_SL_PTIMER, (void *) ptimer);
- break;
- case ERTS_PTMR_FLGS_LLALLCD:
- erts_free(ERTS_ALC_T_LL_PTIMER, (void *) ptimer);
- break;
- default:
- erl_exit(ERTS_ABORT_EXIT,
- "Internal error: Bad ptimer alloc type\n");
- break;
- }
-}
-
-/* Callback for process timeout cancelled */
-static void
-ptimer_cancelled(ErtsSmpPTimer *ptimer)
-{
- free_ptimer(ptimer);
-}
-
-/* Callback for process timeout */
-static void
-ptimer_timeout(ErtsSmpPTimer *ptimer)
-{
- if (is_internal_pid(ptimer->timer.id)) {
- Process *p;
- p = erts_pid2proc_opt(NULL,
- 0,
- ptimer->timer.id,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (p) {
- if (!ERTS_PROC_IS_EXITING(p)
- && !(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
- ASSERT(*ptimer->timer.timer_ref == ptimer);
- *ptimer->timer.timer_ref = NULL;
- (*ptimer->timer.timeout_func)(p);
- }
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- }
- }
- else {
- Port *p;
- ASSERT(is_internal_port(ptimer->timer.id));
- p = erts_id2port_sflgs(ptimer->timer.id,
- NULL,
- 0,
- ERTS_PORT_SFLGS_DEAD);
- if (p) {
- if (!(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
- ASSERT(*ptimer->timer.timer_ref == ptimer);
- *ptimer->timer.timer_ref = NULL;
- (*ptimer->timer.timeout_func)(p);
- }
- erts_port_release(p);
- }
- }
- free_ptimer(ptimer);
-}
-
-void
-erts_create_smp_ptimer(ErtsSmpPTimer **timer_ref,
- Eterm id,
- ErlTimeoutProc timeout_func,
- Uint timeout)
-{
- ErtsSmpPTimer *res = ptimer_pre_alloc();
- if (res)
- res->timer.flags = ERTS_PTMR_FLGS_PREALLCD;
- else {
- if (timeout < ERTS_ALC_MIN_LONG_LIVED_TIME) {
- res = erts_alloc(ERTS_ALC_T_SL_PTIMER, sizeof(ErtsSmpPTimer));
- res->timer.flags = ERTS_PTMR_FLGS_SLALLCD;
- }
- else {
- res = erts_alloc(ERTS_ALC_T_LL_PTIMER, sizeof(ErtsSmpPTimer));
- res->timer.flags = ERTS_PTMR_FLGS_LLALLCD;
- }
- }
- res->timer.timeout_func = timeout_func;
- res->timer.timer_ref = timer_ref;
- res->timer.id = id;
- erts_init_timer(&res->timer.tm);
-
- ASSERT(!*timer_ref);
-
- *timer_ref = res;
-
- erts_set_timer(&res->timer.tm,
- (ErlTimeoutProc) ptimer_timeout,
- (ErlCancelProc) ptimer_cancelled,
- (void*) res,
- timeout);
-}
-
-void
-erts_cancel_smp_ptimer(ErtsSmpPTimer *ptimer)
-{
- if (ptimer) {
- ASSERT(*ptimer->timer.timer_ref == ptimer);
- *ptimer->timer.timer_ref = NULL;
- ptimer->timer.flags |= ERTS_PTMR_FLG_CANCELLED;
- erts_cancel_timer(&ptimer->timer.tm);
- }
-}
-
-#endif
-
static int trim_threshold;
static int top_pad;
static int mmap_threshold;
@@ -4589,9 +4527,7 @@ Uint tot_bin_allocated;
void erts_init_utils(void)
{
-#ifdef ERTS_SMP
- init_ptimers();
-#endif
+
}
void erts_init_utils_mem(void)