aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names7
-rw-r--r--erts/emulator/beam/beam_bp.c2
-rw-r--r--erts/emulator/beam/beam_emu.c77
-rw-r--r--erts/emulator/beam/bif.c253
-rw-r--r--erts/emulator/beam/bif.tab2
-rw-r--r--erts/emulator/beam/big.c433
-rw-r--r--erts/emulator/beam/big.h14
-rw-r--r--erts/emulator/beam/erl_alloc.c6
-rw-r--r--erts/emulator/beam/erl_alloc.h18
-rw-r--r--erts/emulator/beam/erl_alloc.types1
-rw-r--r--erts/emulator/beam/erl_async.c34
-rw-r--r--erts/emulator/beam/erl_bif_info.c42
-rw-r--r--erts/emulator/beam/erl_bif_trace.c41
-rw-r--r--erts/emulator/beam/erl_db.c46
-rw-r--r--erts/emulator/beam/erl_db_util.h5
-rw-r--r--erts/emulator/beam/erl_driver.h86
-rw-r--r--erts/emulator/beam/erl_drv_nif.h84
-rw-r--r--erts/emulator/beam/erl_gc.c60
-rw-r--r--erts/emulator/beam/erl_gc.h5
-rw-r--r--erts/emulator/beam/erl_hl_timer.c7
-rw-r--r--erts/emulator/beam/erl_init.c3
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_msacc.c486
-rw-r--r--erts/emulator/beam/erl_msacc.h409
-rw-r--r--erts/emulator/beam/erl_nif.c20
-rw-r--r--erts/emulator/beam/erl_nif.h60
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h6
-rw-r--r--erts/emulator/beam/erl_port_task.c3
-rw-r--r--erts/emulator/beam/erl_process.c73
-rw-r--r--erts/emulator/beam/erl_process.h87
-rw-r--r--erts/emulator/beam/erl_threads.h34
-rw-r--r--erts/emulator/beam/erl_time.h6
-rw-r--r--erts/emulator/beam/erl_time_sup.c204
-rw-r--r--erts/emulator/beam/erl_trace.c757
-rw-r--r--erts/emulator/beam/erl_trace.h32
-rw-r--r--erts/emulator/beam/global.h2
-rw-r--r--erts/emulator/beam/io.c55
-rw-r--r--erts/emulator/beam/ops.tab9
-rw-r--r--erts/emulator/beam/sys.h15
-rw-r--r--erts/emulator/beam/time.c20
40 files changed, 2674 insertions, 832 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index ae7f405cce..6fb08ee896 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -102,6 +102,7 @@ atom asynchronous
atom atom
atom atom_used
atom attributes
+atom await_microstate_accounting_modifications
atom await_port_send_result
atom await_proc_exit
atom await_result
@@ -172,6 +173,7 @@ atom const
atom context_switches
atom control
atom copy
+atom counters
atom cpu
atom cpu_timestamp
atom cr
@@ -258,6 +260,7 @@ atom functions
atom function_clause
atom garbage_collecting
atom garbage_collection
+atom garbage_collection_info
atom gc_end
atom gc_start
atom Ge='>='
@@ -268,6 +271,7 @@ atom get_tcw
atom getenv
atom gather_gc_info_result
atom gather_io_bytes
+atom gather_microstate_accounting_result
atom gather_sched_wall_time_result
atom getting_linked
atom getting_unlinked
@@ -359,6 +363,7 @@ atom merge_trap
atom meta
atom meta_match_spec
atom micro_seconds
+atom microstate_accounting
atom milli_seconds
atom min_heap_size
atom min_bin_vheap_size
@@ -372,6 +377,7 @@ atom monitor
atom monitor_nodes
atom monitors
atom monotonic
+atom monotonic_timestamp
atom more
atom multi_scheduling
atom multiline
@@ -556,6 +562,7 @@ atom static
atom stderr_to_stdout
atom stop
atom stream
+atom strict_monotonic_timestamp
atom sunrm
atom suspend
atom suspended
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 2a23da4f25..5d471d168b 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -864,7 +864,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
Eterm* cpp;
int return_to_trace = 0;
BeamInstr w;
- BeamInstr *cp_save;
+ BeamInstr *cp_save = c_p->cp;
Uint32 flags;
Uint need = 0;
Eterm* E = c_p->stop;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 4d7b00b032..d4fb36acc3 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -113,6 +113,9 @@ do { \
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#endif
+#define GET_BIF_MODULE(p) ((Eterm) (((Export *) p)->code[0]))
+#define GET_BIF_FUNCTION(p) ((Eterm) (((Export *) p)->code[1]))
+#define GET_BIF_ARITY(p) ((Eterm) (((Export *) p)->code[2]))
#define GET_BIF_ADDRESS(p) ((BifFunction) (((Export *) p)->code[4]))
#define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm))))
@@ -1249,6 +1252,8 @@ void process_main(void)
Uint64 start_time = 0; /* Monitor long schedule */
BeamInstr* start_time_i = NULL;
+ ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
+
ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */
@@ -1302,6 +1307,8 @@ void process_main(void)
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_MSACC_UPDATE_CACHE_X();
+
if (erts_system_monitor_long_schedule != 0) {
start_time = erts_timestamp_millis();
start_time_i = c_p->i;
@@ -2741,11 +2748,21 @@ do { \
*/
OpCase(call_bif_e):
{
- Eterm (*bf)(Process*, Eterm*, BeamInstr*) = GET_BIF_ADDRESS(Arg(0));
+ Eterm (*bf)(Process*, Eterm*, BeamInstr*);
Eterm result;
BeamInstr *next;
ErlHeapFragment *live_hf_end;
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X()) {
+ if (GET_BIF_MODULE(Arg(0)) == am_ets) {
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS);
+ } else {
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_BIF);
+ }
+ }
+
+ bf = GET_BIF_ADDRESS(Arg(0));
+
PRE_BIF_SWAPOUT(c_p);
c_p->fcalls = FCALLS - 1;
if (FCALLS <= 0) {
@@ -2768,6 +2785,12 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
+ /* We have to update the cache if we are enabled in order
+ to make sure no book keeping is done after we disabled
+ msacc. We don't always do this as it is quite expensive. */
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X())
+ ERTS_MSACC_UPDATE_CACHE_X();
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
if (is_value(result)) {
r(0) = result;
CHECK_TERM(r(0));
@@ -3452,6 +3475,8 @@ do { \
BifFunction vbf;
ErlHeapFragment *live_hf_end;
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
+
DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
c_p->current = I-3; /* current and vbf set to please handle_error */
SWAPOUT;
@@ -3476,6 +3501,8 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+
DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
goto apply_bif_or_nif_epilogue;
@@ -3490,6 +3517,13 @@ do { \
* code[3]: &&apply_bif
* code[4]: Function pointer to BIF function
*/
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X()) {
+ if ((Eterm)I[-3] == am_ets) {
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS);
+ } else {
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_BIF);
+ }
+ }
c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */
c_p->i = I; /* In case we apply check_process_code/2. */
@@ -3516,7 +3550,12 @@ do { \
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
}
-
+ /* We have to update the cache if we are enabled in order
+ to make sure no book keeping is done after we disabled
+ msacc. We don't always do this as it is quite expensive. */
+ if (ERTS_MSACC_IS_ENABLED_CACHED_X())
+ ERTS_MSACC_UPDATE_CACHE_X();
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
DTRACE_BIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
apply_bif_or_nif_epilogue:
@@ -3885,7 +3924,7 @@ do { \
Op1 += Arg1;
store_bs_add_result:
- if (MY_IS_SSMALL((Sint) Op1)) {
+ if (Op1 <= MAX_SMALL) {
Op1 = make_small(Op1);
} else {
/*
@@ -4895,6 +4934,38 @@ do { \
}
}
+ /* This is optimised as an instruction because
+ it has to be very very fast */
+ OpCase(i_perf_counter): {
+ BeamInstr* next;
+ ErtsSysPerfCounter ts;
+ PreFetch(0, next);
+
+ ts = erts_sys_perf_counter();
+
+ if (IS_SSMALL(ts)) {
+ r(0) = make_small((Sint)ts);
+ } else {
+ TestHeap(ERTS_SINT64_HEAP_SIZE(ts),0);
+ r(0) = make_big(HTOP);
+#if defined(ARCH_32) || HALFWORD_HEAP
+ if (ts >= (((Uint64) 1) << 32)) {
+ *HTOP = make_pos_bignum_header(2);
+ BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff));
+ BIG_DIGIT(HTOP, 1) = (Uint) ((ts >> 32) & ((Uint) 0xffffffff));
+ HTOP += 3;
+ }
+ else
+#endif
+ {
+ *HTOP = make_pos_bignum_header(1);
+ BIG_DIGIT(HTOP, 0) = (Uint) ts;
+ HTOP += 2;
+ }
+ }
+ NextPF(0, next);
+ }
+
OpCase(i_debug_breakpoint): {
HEAVY_SWAPOUT;
I = call_error_handler(c_p, I-3, reg, am_breakpoint);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index bb9165cd79..e116b10b95 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -44,6 +44,7 @@
#include "erl_ptab.h"
#include "erl_bits.h"
#include "erl_bif_unique.h"
+#include "erl_msacc.h"
Export *erts_await_result;
static Export* flush_monitor_messages_trap = NULL;
@@ -54,6 +55,9 @@ Export* erts_format_cpu_topology_trap = NULL;
static Export dsend_continue_trap_export;
Export *erts_convert_time_unit_trap = NULL;
+static Export *await_msacc_mod_trap = NULL;
+static erts_smp_atomic32_t msacc;
+
static Export *await_sched_wall_time_mod_trap;
static erts_smp_atomic32_t sched_wall_time;
@@ -2143,7 +2147,11 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
int connect = !0;
Eterm l = opts;
Sint result;
+
DeclareTypedTmpHeap(ErtsSendContext, ctx, BIF_P);
+
+ ERTS_MSACC_PUSH_STATE_M_X();
+
UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), BIF_P);
ctx->suspend = !0;
@@ -2172,7 +2180,10 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
ref = NIL;
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_SEND);
result = do_send(p, to, msg, &ref, ctx);
+ ERTS_MSACC_POP_STATE_M_X();
+
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
if (ERTS_IS_PROC_OUT_OF_REDS(p))
@@ -2288,8 +2299,8 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
Eterm ref;
Sint result;
DeclareTypedTmpHeap(ErtsSendContext, ctx, p);
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_SEND);
UseTmpHeap(sizeof(ErtsSendContext)/sizeof(Eterm), p);
-
#ifdef DEBUG
ref = NIL;
#endif
@@ -2300,7 +2311,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
ctx->dss.phase = ERTS_DSIG_SEND_PHASE_INIT;
result = do_send(p, to, msg, &ref, ctx);
-
+
+ ERTS_MSACC_POP_STATE_M_X();
+
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
if (ERTS_IS_PROC_OUT_OF_REDS(p))
@@ -2904,175 +2917,20 @@ BIF_RETTYPE integer_to_list_1(BIF_ALIST_1)
/**********************************************************************/
-/* convert a list of ascii ascii integer value to an integer */
-
-
-#define LTI_BAD_STRUCTURE 0
-#define LTI_NO_INTEGER 1
-#define LTI_SOME_INTEGER 2
-#define LTI_ALL_INTEGER 3
-
-static int do_list_to_integer(Process *p, Eterm orig_list,
- Eterm *integer, Eterm *rest)
-{
- Sint i = 0;
- Uint ui = 0;
- int skip = 0;
- int neg = 0;
- Sint n = 0;
- int m;
- int lg2;
- Eterm res;
- Eterm* hp;
- Eterm *hp_end;
- Eterm lst = orig_list;
- Eterm tail = lst;
- int error_res = LTI_BAD_STRUCTURE;
-
- if (is_nil(lst)) {
- error_res = LTI_NO_INTEGER;
- error:
- *rest = tail;
- *integer = make_small(0);
- return error_res;
- }
- if (is_not_list(lst))
- goto error;
-
- /* if first char is a '-' then it is a negative integer */
- if (CAR(list_val(lst)) == make_small('-')) {
- neg = 1;
- skip = 1;
- lst = CDR(list_val(lst));
- if (is_not_list(lst)) {
- tail = lst;
- error_res = LTI_NO_INTEGER;
- goto error;
- }
- } else if (CAR(list_val(lst)) == make_small('+')) {
- /* ignore plus */
- skip = 1;
- lst = CDR(list_val(lst));
- if (is_not_list(lst)) {
- tail = lst;
- error_res = LTI_NO_INTEGER;
- goto error;
- }
- }
-
- /* Calculate size and do type check */
-
- while(1) {
- if (is_not_small(CAR(list_val(lst)))) {
- break;
- }
- if (unsigned_val(CAR(list_val(lst))) < '0' ||
- unsigned_val(CAR(list_val(lst))) > '9') {
- break;
- }
- ui = ui * 10;
- ui = ui + unsigned_val(CAR(list_val(lst))) - '0';
- n++;
- lst = CDR(list_val(lst));
- if (is_nil(lst)) {
- break;
- }
- if (is_not_list(lst)) {
- break;
- }
- }
-
- tail = lst;
- if (!n) {
- error_res = LTI_NO_INTEGER;
- goto error;
- }
-
-
- /* If n <= 8 then we know it's a small int
- ** since 2^27 = 134217728. If n > 8 then we must
- ** construct a bignum and let that routine do the checking
- */
-
- if (n <= SMALL_DIGITS) { /* It must be small */
- if (neg) i = -(Sint)ui;
- else i = (Sint)ui;
- res = make_small(i);
- } else {
- /* Convert from log10 to log2 by multiplying with 1/log10(2)=3.3219
- which we round up to (3 + 1/3) */
- lg2 = (n+1)*3 + (n+1)/3 + 1;
- m = (lg2+D_EXP-1)/D_EXP; /* number of digits */
- m = BIG_NEED_SIZE(m); /* number of words + thing */
-
- hp = HAlloc(p, m);
- hp_end = hp + m;
-
- lst = orig_list;
- if (skip)
- lst = CDR(list_val(lst));
-
- /* load first digits (at least one digit) */
- if ((i = (n % D_DECIMAL_EXP)) == 0)
- i = D_DECIMAL_EXP;
- n -= i;
- m = 0;
- while(i--) {
- m = 10*m + (unsigned_val(CAR(list_val(lst))) - '0');
- lst = CDR(list_val(lst));
- }
- res = small_to_big(m, hp); /* load first digits */
-
- while(n) {
- i = D_DECIMAL_EXP;
- n -= D_DECIMAL_EXP;
- m = 0;
- while(i--) {
- m = 10*m + (unsigned_val(CAR(list_val(lst))) - '0');
- lst = CDR(list_val(lst));
- }
- if (is_small(res))
- res = small_to_big(signed_val(res), hp);
- res = big_times_small(res, D_DECIMAL_BASE, hp);
- if (is_small(res))
- res = small_to_big(signed_val(res), hp);
- res = big_plus_small(res, m, hp);
- }
-
- if (neg) {
- if (is_small(res))
- res = make_small(-signed_val(res));
- else {
- Uint *big = big_val(res); /* point to thing */
- *big = bignum_header_neg(*big);
- }
- }
-
- if (is_not_small(res)) {
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
+/*
+ * Converts a list of ascii base10 digits to an integer fully or partially.
+ * Returns result and the remaining tail.
+ * On error returns: {error,not_a_list}, or {error, no_integer}
+ */
- if (is_not_small(res)) {
- hp += (big_arity(res)+1);
- }
- }
- HRelease(p,hp_end,hp);
- }
- *integer = res;
- *rest = tail;
- if (tail != NIL) {
- return LTI_SOME_INTEGER;
- }
- return LTI_ALL_INTEGER;
-}
BIF_RETTYPE string_to_integer_1(BIF_ALIST_1)
{
Eterm res;
Eterm tail;
Eterm *hp;
/* must be a list */
- switch (do_list_to_integer(BIF_P,BIF_ARG_1,&res,&tail)) {
- /* HAlloc after do_list_to_integer as it
- might HAlloc itself (bignum) */
+ switch (erts_list_to_integer(BIF_P, BIF_ARG_1, 10, &res, &tail)) {
+ /* HAlloc after erts_list_to_integer as it might HAlloc itself (bignum) */
case LTI_BAD_STRUCTURE:
hp = HAlloc(BIF_P,3);
BIF_RET(TUPLE2(hp, am_error, am_not_a_list));
@@ -3087,13 +2945,14 @@ BIF_RETTYPE string_to_integer_1(BIF_ALIST_1)
BIF_RETTYPE list_to_integer_1(BIF_ALIST_1)
{
- /* Using do_list_to_integer is about twice as fast as using
+ /* Using erts_list_to_integer is about twice as fast as using
erts_chars_to_integer because we do not have to copy the
entire list */
Eterm res;
Eterm dummy;
/* must be a list */
- if (do_list_to_integer(BIF_P,BIF_ARG_1,&res,&dummy) != LTI_ALL_INTEGER) {
+ if (erts_list_to_integer(BIF_P, BIF_ARG_1, 10,
+ &res, &dummy) != LTI_ALL_INTEGER) {
BIF_ERROR(BIF_P,BADARG);
}
BIF_RET(res);
@@ -3101,14 +2960,12 @@ BIF_RETTYPE list_to_integer_1(BIF_ALIST_1)
BIF_RETTYPE list_to_integer_2(BIF_ALIST_2)
{
-
/* Bif implementation is about 50% faster than pure erlang,
and since we have erts_chars_to_integer now it is simpler
as well. This could be optmized further if we did not have to
copy the list to buf. */
int i;
- Eterm res;
- char *buf = NULL;
+ Eterm res, dummy;
int base;
i = erts_list_length(BIF_ARG_1);
@@ -3116,31 +2973,16 @@ BIF_RETTYPE list_to_integer_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
base = signed_val(BIF_ARG_2);
-
+
if (base < 2 || base > 36)
BIF_ERROR(BIF_P, BADARG);
- /* Take fast path if base it 10 */
- if (base == 10)
- return list_to_integer_1(BIF_P,&BIF_ARG_1);
-
- buf = (char *) erts_alloc(ERTS_ALC_T_TMP, i + 1);
-
- if (intlist_to_buf(BIF_ARG_1, buf, i) < 0)
- goto list_to_integer_1_error;
- buf[i] = '\0'; /* null terminal */
-
- if ((res = erts_chars_to_integer(BIF_P,buf,i,base)) == THE_NON_VALUE)
- goto list_to_integer_1_error;
-
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
+ if (erts_list_to_integer(BIF_P, BIF_ARG_1, base,
+ &res, &dummy) != LTI_ALL_INTEGER) {
+ BIF_ERROR(BIF_P,BADARG);
+ }
BIF_RET(res);
-
- list_to_integer_1_error:
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- BIF_ERROR(BIF_P, BADARG);
-
- }
+}
/**********************************************************************/
@@ -4586,6 +4428,31 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
default:
ERTS_INTERNAL_ERROR("Unknown state");
}
+#ifdef ERTS_ENABLE_MSACC
+ } else if (BIF_ARG_1 == am_microstate_accounting) {
+ Eterm threads;
+ if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ erts_aint32_t new = BIF_ARG_2 == am_true ? ERTS_MSACC_ENABLE : ERTS_MSACC_DISABLE;
+ erts_aint32_t old = erts_smp_atomic32_xchg_nob(&msacc, new);
+ Eterm ref = erts_msacc_request(BIF_P, new, &threads);
+ if (is_non_value(ref))
+ BIF_RET(old ? am_true : am_false);
+ BIF_TRAP3(await_msacc_mod_trap,
+ BIF_P,
+ ref,
+ old ? am_true : am_false,
+ threads);
+ } else if (BIF_ARG_2 == am_reset) {
+ Eterm ref = erts_msacc_request(BIF_P, ERTS_MSACC_RESET, &threads);
+ erts_aint32_t old = erts_smp_atomic32_read_nob(&msacc);
+ ASSERT(is_value(ref));
+ BIF_TRAP3(await_msacc_mod_trap,
+ BIF_P,
+ ref,
+ old ? am_true : am_false,
+ threads);
+ }
+#endif
} else if (ERTS_IS_ATOM_STR("scheduling_statistics", BIF_ARG_1)) {
int what;
if (ERTS_IS_ATOM_STR("disable", BIF_ARG_2))
@@ -4915,8 +4782,12 @@ void erts_init_bif(void)
await_port_send_result_trap
= erts_export_put(am_erts_internal, am_await_port_send_result, 3);
await_sched_wall_time_mod_trap
- = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2);
+ = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2);
+ await_msacc_mod_trap
+ = erts_export_put(am_erts_internal, am_await_microstate_accounting_modifications, 3);
+
erts_smp_atomic32_init_nob(&sched_wall_time, 0);
+ erts_smp_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED());
}
#ifdef HARDDEBUG
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 1b8ae8cef5..4efc055aaf 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -170,6 +170,7 @@ bif erts_internal:term_type/1
bif erts_internal:map_hashmap_children/1
bif erts_internal:time_unit/0
+bif erts_internal:perf_counter_unit/0
bif erts_internal:is_system_process/1
@@ -369,6 +370,7 @@ bif os:getpid/0
bif os:timestamp/0
bif os:system_time/0
bif os:system_time/1
+bif os:perf_counter/0
#
# Bifs in the erl_ddll module (the module actually does not exist)
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 02d37e24df..11838e24ef 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -48,7 +48,7 @@
_t_dst = (dst)+((sz)-1); \
_t_src = (src)+((sz)-1); \
while(_t_sz--) *_t_dst-- = *_t_src--; \
- } \
+ } \
} while(0)
/* add a and b with carry in + out */
@@ -423,6 +423,25 @@
#endif
+/* Forward declaration of lookup tables (See below in this file) used in list to
+ * integer conversions for different bases. Also used in bignum printing.
+ */
+static const byte digits_per_sint_lookup[36-1];
+static const byte digits_per_small_lookup[36-1];
+static const Sint largest_power_of_base_lookup[36-1];
+
+static ERTS_INLINE byte get_digits_per_signed_int(Uint base) {
+ return digits_per_sint_lookup[base-2];
+}
+
+static ERTS_INLINE byte get_digits_per_small(Uint base) {
+ return digits_per_small_lookup[base-2];
+}
+
+static ERTS_INLINE Sint get_largest_power_of_base(Uint base) {
+ return largest_power_of_base_lookup[base-2];
+}
+
/*
** compare two number vectors
*/
@@ -1719,8 +1738,10 @@ static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
short sign = BIG_SIGN(xp);
ErtsDigit rem;
Uint n = 0;
+ const Uint digits_per_Sint = get_digits_per_signed_int(10);
+ const Sint largest_pow_of_base = get_largest_power_of_base(10);
- if (xl == 1 && *dx < D_DECIMAL_BASE) {
+ if (xl == 1 && *dx < largest_pow_of_base) {
rem = *dx;
if (rem == 0) {
(*write_func)(arg, '0'); n++;
@@ -1738,7 +1759,7 @@ static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
MOVE_DIGITS(tmp, dx, xl);
while(1) {
- tmpl = D_div(tmp, tmpl, D_DECIMAL_BASE, tmp, &rem);
+ tmpl = D_div(tmp, tmpl, largest_pow_of_base, tmp, &rem);
if (tmpl == 1 && *tmp == 0) {
while(rem) {
(*write_func)(arg, (rem % 10)+'0'); n++;
@@ -1746,7 +1767,7 @@ static Uint write_big(Wterm x, void (*write_func)(void *, char), void *arg)
}
break;
} else {
- int i = D_DECIMAL_EXP;
+ Uint i = digits_per_Sint;
while(i--) {
(*write_func)(arg, (rem % 10)+'0'); n++;
rem /= 10;
@@ -2522,63 +2543,100 @@ int term_equals_2pow32(Eterm x)
}
}
+static ERTS_INLINE int c2int_is_invalid_char(byte ch, int base) {
+ return (ch < '0'
+ || (ch > ('0' + base - 1)
+ && !(base > 10
+ && ((ch >= 'a' && ch < ('a' + base - 10))
+ || (ch >= 'A' && ch < ('A' + base - 10))))));
+}
-#define IS_VALID_CHARACTER(CHAR,BASE) \
- (CHAR < '0' \
- || (CHAR > ('0' + BASE - 1) \
- && !(BASE > 10 \
- && ((CHAR >= 'a' && CHAR < ('a' + BASE - 10)) \
- || (CHAR >= 'A' && CHAR < ('A' + BASE - 10))))))
-#define CHARACTER_FROM_BASE(CHAR) \
- ((CHAR <= '9') ? CHAR - '0' : 10 + ((CHAR <= 'Z') ? CHAR - 'A' : CHAR - 'a'))
-#define D_BASE_EXP(BASE) (d_base_exp_lookup[BASE-2])
-#define D_BASE_BASE(BASE) (d_base_base_lookup[BASE-2])
-#define LG2_LOOKUP(BASE) (lg2_lookup[base-2])
+static ERTS_INLINE byte c2int_digit_from_base(byte ch) {
+ return ch <= '9' ? ch - '0'
+ : (10 + (ch <= 'Z' ? ch - 'A' : ch - 'a'));
+}
/*
- * for i in 2..64 do
- * lg2_lookup[i-2] = log2(i)
- * end
- * How many bits are needed to store string of size n
+ * How many bits are needed to store 1 digit of given base in binary
+ * Wo.Alpha formula: Table [log2[n], {n,2,36}]
*/
-const double lg2_lookup[] = { 1.0, 1.58496, 2, 2.32193, 2.58496, 2.80735, 3.0,
- 3.16993, 3.32193, 3.45943, 3.58496, 3.70044, 3.80735, 3.90689, 4.0,
- 4.08746, 4.16993, 4.24793, 4.32193, 4.39232, 4.45943, 4.52356, 4.58496,
- 4.64386, 4.70044, 4.75489, 4.80735, 4.85798, 4.90689, 4.9542, 5.0,
- 5.04439, 5.08746, 5.12928, 5.16993, 5.20945, 5.24793, 5.2854, 5.32193,
- 5.35755, 5.39232, 5.42626, 5.45943, 5.49185, 5.52356, 5.55459, 5.58496,
- 5.61471, 5.64386, 5.67243, 5.70044, 5.72792, 5.75489, 5.78136, 5.80735,
- 5.83289, 5.85798, 5.88264, 5.90689, 5.93074, 5.9542, 5.97728, 6.0 };
+static const double lg2_lookup[36-1] = {
+ 1.0, 1.58496, 2.0, 2.32193, 2.58496, 2.80735, 3.0, 3.16993, 3.32193,
+ 3.45943, 3.58496, 3.70044, 3.80735, 3.90689, 4.0, 4.08746, 4.16993, 4.24793,
+ 4.32193, 4.39232, 4.45943, 4.52356, 4.58496, 4.64386, 4.70044, 4.75489,
+ 4.80735, 4.85798, 4.90689, 4.9542, 5.0, 5.04439, 5.08746, 5.12928, 5.16993
+};
+static ERTS_INLINE double lookup_log2(Uint base) {
+ return lg2_lookup[base - 2];
+}
/*
- * for i in 2..64 do
- * d_base_exp_lookup[i-2] = 31 / lg2_lookup[i-2];
- * end
- * How many characters can fit in 31 bits
+ * How many digits can fit into a signed int (Sint) for given base, we take
+ * one digit away just to be on the safer side (some corner cases).
*/
-const byte d_base_exp_lookup[] = { 31, 19, 15, 13, 11, 11, 10, 9, 9, 8, 8, 8, 8,
- 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5 };
+static const byte digits_per_sint_lookup[36-1] = {
+#if (SIZEOF_VOID_P == 4)
+ /* Wo.Alpha formula: Table [Trunc[31 / log[2,n]]-1, {n, 2, 36}] */
+ 30, 18, 14, 12, 10, 10, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4
+#elif (SIZEOF_VOID_P == 8)
+ /* Wo.Alpha formula: Table [Trunc[63 / log[2,n]]-1, {n, 2, 36}] */
+ 62, 38, 30, 26, 23, 21, 20, 18, 17, 17, 16, 16, 15, 15, 14, 14, 14, 13, 13,
+ 13, 13, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11
+#else
+ #error "Please produce a lookup table for the new architecture"
+#endif
+};
/*
- * for i in 2..64 do
- * d_base_base_lookup[i-2] = pow(i,d_base_exp_lookup[i-2]);
- * end
- * How much can the characters which fit in 31 bit represent
+ * How many digits can fit into Erlang Small (SMALL_BITS-1) counting sign bit
*/
-const Uint d_base_base_lookup[] = { 2147483648u, 1162261467u, 1073741824u,
- 1220703125u, 362797056u, 1977326743u, 1073741824u, 387420489u,
- 1000000000u, 214358881u, 429981696u, 815730721u, 1475789056u,
- 170859375u, 268435456u, 410338673u, 612220032u, 893871739u, 1280000000u,
- 1801088541u, 113379904u, 148035889u, 191102976u, 244140625u, 308915776u,
- 387420489u, 481890304u, 594823321u, 729000000u, 887503681u, 1073741824u,
- 1291467969u, 1544804416u, 1838265625u, 60466176u, 69343957u, 79235168u,
- 90224199u, 102400000u, 115856201u, 130691232u, 147008443u, 164916224u,
- 184528125u, 205962976u, 229345007u, 254803968u, 282475249u, 312500000u,
- 345025251u, 380204032u, 418195493u, 459165024u, 503284375u, 550731776u,
- 601692057u, 656356768u, 714924299u, 777600000u, 844596301u, 916132832u,
- 992436543u, 1073741824u };
+static const byte digits_per_small_lookup[36-1] = {
+#if (SIZEOF_VOID_P == 4)
+ /* Wo.Alpha formula: Table [Trunc[27 / log[2,n]]-1, {n, 2, 36}] */
+ 27, 17, 13, 11, 10, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+#elif (SIZEOF_VOID_P == 8)
+ /* Wo.Alpha formula: Table [Trunc[59 / log[2,n]]-1, {n, 2, 36}] */
+ 59, 37, 29, 25, 22, 21, 19, 18, 17, 17, 16, 15, 15, 15, 14, 14, 14, 13, 13,
+ 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11
+#else
+ #error "Please produce a lookup table for the new architecture"
+#endif
+};
+
+/*
+ * Largest power of base which can be represented in a signed int (Sint).
+ * Calculated by base 2..36 to the power of corresponding element from
+ * digits_per_sint_lookup.
+ */
+static const Sint largest_power_of_base_lookup[36-1] = {
+#if (SIZEOF_VOID_P == 4)
+ /* Wo.Alpha formula: Table [Pow[n, Trunc[31 / log[2,n]]-1], {n, 2, 36}] */
+ 1073741824, 387420489, 268435456, 244140625, 60466176, 282475249, 134217728,
+ 43046721, 100000000, 19487171, 35831808, 62748517, 105413504, 11390625,
+ 16777216, 24137569, 34012224, 47045881, 64000000, 85766121, 5153632,
+ 6436343,7962624, 9765625, 11881376, 14348907, 17210368, 20511149, 24300000,
+ 28629151, 33554432, 39135393, 45435424, 52521875, 1679616
+#elif (SIZEOF_VOID_P == 8)
+ /* Wo.Alpha formula: Table [Pow[n, Trunc[63 / log[2,n]]-1], {n, 2, 36}]
+ * with LL added after each element manually */
+ 4611686018427387904LL, 1350851717672992089LL, 1152921504606846976LL,
+ 1490116119384765625LL, 789730223053602816LL, 558545864083284007LL,
+ 1152921504606846976LL, 150094635296999121LL, 100000000000000000LL,
+ 505447028499293771LL, 184884258895036416LL, 665416609183179841LL,
+ 155568095557812224LL, 437893890380859375LL, 72057594037927936LL,
+ 168377826559400929LL, 374813367582081024LL, 42052983462257059LL,
+ 81920000000000000LL, 154472377739119461LL, 282810057883082752LL,
+ 21914624432020321LL, 36520347436056576LL, 59604644775390625LL,
+ 95428956661682176LL, 150094635296999121LL, 232218265089212416LL,
+ 12200509765705829LL, 17714700000000000LL, 25408476896404831LL,
+ 36028797018963968LL, 50542106513726817LL, 70188843638032384LL,
+ 96549157373046875LL, 131621703842267136LL
+#else
+ #error "Please produce a lookup table for the new architecture"
+#endif
+};
Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
Uint size, const int base) {
@@ -2588,8 +2646,11 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
int neg = 0;
byte b;
Eterm *hp, *hp_end;
- int m;
+ Sint m;
int lg2;
+ const Uint digits_per_small = get_digits_per_small(base);
+ const Uint digits_per_Sint = get_digits_per_signed_int(base);
+ const Sint largest_pow_of_base = get_largest_power_of_base(base);
if (size == 0)
goto bytebuf_to_integer_1_error;
@@ -2604,57 +2665,68 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
size--;
}
+ /* Trim leading zeroes */
+ if (size) {
+ while (*bytes == '0') {
+ bytes++;
+ size--;
+ if (!size) {
+ /* All zero! */
+ res = make_small(0);
+ goto bytebuf_to_integer_1_done;
+ }
+ }
+ }
+
if (size == 0)
goto bytebuf_to_integer_1_error;
- if (size < SMALL_DIGITS && base <= 10) {
- /* *
- * Take shortcut if we know that all chars are '0' < b < '9' and
- * fit in a small. This improves speed by about 10% over the generic
- * small case.
- * */
- while (size--) {
- b = *bytes++;
+ if (size < digits_per_small) {
+ if (base <= 10) {
+ /* *
+ * Take shortcut if we know that all chars are '0' < b < '9' and
+ * fit in a small. This improves speed by about 10% over the generic
+ * small case.
+ * */
+ while (size--) {
+ b = *bytes++;
- if (b < '0' || b > ('0'+base-1))
- goto bytebuf_to_integer_1_error;
+ if (b < '0' || b > ('0'+base-1))
+ goto bytebuf_to_integer_1_error;
- i = i * base + b - '0';
- }
+ i = i * base + b - '0';
+ }
- if (neg)
- i = -i;
- res = make_small(i);
- goto bytebuf_to_integer_1_done;
+ if (neg)
+ i = -i;
+ res = make_small(i);
+ goto bytebuf_to_integer_1_done;
+ }
+
+ /* Take shortcut if we know it will fit in a small.
+ * This improves speed by about 30%.
+ */
+ while (size) {
+ b = *bytes++;
+ size--;
+
+ if (c2int_is_invalid_char(b, base))
+ goto bytebuf_to_integer_1_error;
+
+ i = i * base + c2int_digit_from_base(b);
+ }
+
+ if (neg)
+ i = -i;
+ res = make_small(i);
+ goto bytebuf_to_integer_1_done;
}
/*
* Calculate the maximum number of bits which will
* be needed to represent the binary
*/
- lg2 = ((size+2)*LG2_LOOKUP(base)+1);
-
- if (lg2 < SMALL_BITS) {
- /* Take shortcut if we know it will fit in a small.
- * This improves speed by about 30%.
- */
- while (size) {
- b = *bytes++;
- size--;
-
- if (IS_VALID_CHARACTER(b,base))
- goto bytebuf_to_integer_1_error;
-
- i = i * base + CHARACTER_FROM_BASE(b);
-
- }
-
- if (neg)
- i = -i;
- res = make_small(i);
- goto bytebuf_to_integer_1_done;
-
- }
+ lg2 = ((size+2)*lookup_log2(base)+1);
/* Start calculating bignum */
m = (lg2 + D_EXP-1)/D_EXP;
@@ -2663,8 +2735,8 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
hp = HAlloc(BIF_P, m);
hp_end = hp + m;
- if ((i = (size % D_BASE_EXP(base))) == 0)
- i = D_BASE_EXP(base);
+ if ((i = (size % digits_per_Sint)) == 0)
+ i = digits_per_Sint;
n = size - i;
m = 0;
@@ -2672,34 +2744,34 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
while (i--) {
b = *bytes++;
- if (IS_VALID_CHARACTER(b,base)) {
+ if (c2int_is_invalid_char(b,base)) {
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_error;
}
- m = base * m + CHARACTER_FROM_BASE(b);
+ m = base * m + c2int_digit_from_base(b);
}
res = small_to_big(m, hp);
while (n) {
- i = D_BASE_EXP(base);
- n -= D_BASE_EXP(base);
+ i = digits_per_Sint;
+ n -= digits_per_Sint;
m = 0;
while (i--) {
b = *bytes++;
- if (IS_VALID_CHARACTER(b,base)) {
+ if (c2int_is_invalid_char(b,base)) {
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_error;
}
- m = base * m + CHARACTER_FROM_BASE(b);
+ m = base * m + c2int_digit_from_base(b);
}
if (is_small(res)) {
res = small_to_big(signed_val(res), hp);
}
- res = big_times_small(res, D_BASE_BASE(base), hp);
+ res = big_times_small(res, largest_pow_of_base, hp);
if (is_small(res)) {
res = small_to_big(signed_val(res), hp);
}
@@ -2730,5 +2802,166 @@ bytebuf_to_integer_1_error:
bytebuf_to_integer_1_done:
return res;
+}
+/* Converts list of digits with given 'base' to integer sequentially. Returns
+ * result in 'integer_out', remaining tail goes to 'tail_out' and returns result
+ * code if the list was consumed fully or partially or there was an error
+ */
+LTI_result_t erts_list_to_integer(Process *BIF_P, Eterm orig_list,
+ const Uint base,
+ Eterm *integer_out, Eterm *tail_out)
+{
+ Sint i = 0;
+ Uint ui = 0;
+ int skip = 0;
+ int neg = 0;
+ Sint n = 0;
+ Sint m;
+ int lg2;
+ Eterm res;
+ Eterm lst = orig_list;
+ Eterm tail = lst;
+ int error_res = LTI_BAD_STRUCTURE;
+ const Uint digits_per_small = get_digits_per_small(base);
+ const Uint digits_per_Sint = get_digits_per_signed_int(base);
+
+ if (is_nil(lst)) {
+ error_res = LTI_NO_INTEGER;
+ error:
+ *tail_out = tail;
+ *integer_out = make_small(0);
+ return error_res;
+ }
+ if (is_not_list(lst))
+ goto error;
+
+ /* if first char is a '-' then it is a negative integer */
+ if (CAR(list_val(lst)) == make_small('-')) {
+ neg = 1;
+ skip = 1;
+ lst = CDR(list_val(lst));
+ if (is_not_list(lst)) {
+ tail = lst;
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+ } else if (CAR(list_val(lst)) == make_small('+')) {
+ /* ignore plus */
+ skip = 1;
+ lst = CDR(list_val(lst));
+ if (is_not_list(lst)) {
+ tail = lst;
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+ }
+
+ /* Calculate size and do type check */
+
+ while(1) {
+ byte ch;
+ if (is_not_small(CAR(list_val(lst)))) {
+ break;
+ }
+ ch = unsigned_val(CAR(list_val(lst)));
+ if (c2int_is_invalid_char(ch, base)) {
+ break;
+ }
+ ui = ui * base;
+ ui = ui + c2int_digit_from_base(ch);
+ n++;
+ lst = CDR(list_val(lst));
+ if (is_nil(lst)) {
+ break;
+ }
+ if (is_not_list(lst)) {
+ break;
+ }
+ }
+
+ tail = lst;
+ if (!n) {
+ error_res = LTI_NO_INTEGER;
+ goto error;
+ }
+
+
+ /* If length fits inside Sint then we know it's a small int. Else we
+ * must construct a bignum and let that routine do the checking
+ */
+
+ if (n <= digits_per_small) { /* It must be small */
+ i = neg ? -(Sint)ui : (Sint)ui;
+ res = make_small(i);
+ } else {
+ const Sint largest_pow_of_base = get_largest_power_of_base(base);
+ Eterm *hp;
+ Eterm *hp_end;
+
+ /* Convert from log_base to log2 using lookup table */
+ lg2 = ((n+2)*lookup_log2(base)+1);
+ m = (lg2+D_EXP-1)/D_EXP; /* number of digits */
+ m = BIG_NEED_SIZE(m); /* number of words + thing */
+
+ hp = HAlloc(BIF_P, m);
+ hp_end = hp + m;
+
+ lst = orig_list;
+ if (skip)
+ lst = CDR(list_val(lst));
+
+ /* load first digits (at least one digit) */
+ if ((i = (n % digits_per_Sint)) == 0)
+ i = digits_per_Sint;
+ n -= i;
+ m = 0;
+ while(i--) {
+ m *= base;
+ m += c2int_digit_from_base(unsigned_val(CAR(list_val(lst))));
+ lst = CDR(list_val(lst));
+ }
+ res = small_to_big(m, hp); /* load first digits */
+
+ while(n) {
+ i = digits_per_Sint;
+ n -= digits_per_Sint;
+ m = 0;
+ while(i--) {
+ m *= base;
+ m += c2int_digit_from_base(unsigned_val(CAR(list_val(lst))));
+ lst = CDR(list_val(lst));
+ }
+ if (is_small(res))
+ res = small_to_big(signed_val(res), hp);
+ res = big_times_small(res, largest_pow_of_base, hp);
+ if (is_small(res))
+ res = small_to_big(signed_val(res), hp);
+ res = big_plus_small(res, m, hp);
+ }
+
+ if (neg) {
+ if (is_small(res))
+ res = make_small(-signed_val(res));
+ else {
+ Uint *big = big_val(res); /* point to thing */
+ *big = bignum_header_neg(*big);
+ }
+ }
+
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res)+1);
+ }
+ }
+ HRelease(BIF_P, hp_end, hp);
+ }
+ *integer_out = res;
+ *tail_out = tail;
+ if (tail != NIL) {
+ return LTI_SOME_INTEGER;
+ }
+ return LTI_ALL_INTEGER;
}
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index 94f9bce10e..9c92de6b55 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -54,9 +54,6 @@ typedef Uint32 ErtsHalfDigit;
#error "can not determine machine size"
#endif
-#define D_DECIMAL_EXP 9
-#define D_DECIMAL_BASE 1000000000
-
typedef Uint dsize_t; /* Vector size type */
#define D_EXP (ERTS_SIZEOF_ETERM*8)
@@ -173,4 +170,15 @@ Eterm erts_sint64_to_big(Sint64, Eterm **);
Eterm erts_chars_to_integer(Process *, char*, Uint, const int);
+/* How list_to_integer classifies the input, was it even a string? */
+typedef enum {
+ LTI_BAD_STRUCTURE = 0,
+ LTI_NO_INTEGER = 1,
+ LTI_SOME_INTEGER = 2,
+ LTI_ALL_INTEGER = 3
+} LTI_result_t;
+
+LTI_result_t erts_list_to_integer(Process *BIF_P, Eterm orig_list,
+ const Uint base,
+ Eterm *integer_out, Eterm *tail_out);
#endif
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 5544712e8d..b53b662a59 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -103,9 +103,9 @@ static Uint install_debug_functions(void);
static int lock_all_physical_memory = 0;
-ErtsAllocatorFunctions_t erts_allctrs[ERTS_ALC_A_MAX+1];
+ErtsAllocatorFunctions_t ERTS_WRITE_UNLIKELY(erts_allctrs[ERTS_ALC_A_MAX+1]);
ErtsAllocatorInfo_t erts_allctrs_info[ERTS_ALC_A_MAX+1];
-ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1];
+ErtsAllocatorThrSpec_t ERTS_WRITE_UNLIKELY(erts_allctr_thr_spec[ERTS_ALC_A_MAX+1]);
#define ERTS_MIN(A, B) ((A) < (B) ? (A) : (B))
#define ERTS_MAX(A, B) ((A) > (B) ? (A) : (B))
@@ -1917,7 +1917,7 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
va_start(argp, n);
size = va_arg(argp, Uint);
va_end(argp);
- erl_exit(1,
+ erl_exit(-1,
"%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
allctr_str, op, size, t_str);
break;
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index 14e80960f5..71e4713624 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -233,12 +233,14 @@ ERTS_ALC_INLINE
void *erts_alloc(ErtsAlcType_t type, Uint size)
{
void *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
size);
if (!res)
erts_alloc_n_enomem(ERTS_ALC_T2N(type), size);
+ ERTS_MSACC_POP_STATE_X();
return res;
}
@@ -246,6 +248,7 @@ ERTS_ALC_INLINE
void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size)
{
void *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
@@ -253,37 +256,48 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size)
size);
if (!res)
erts_realloc_n_enomem(ERTS_ALC_T2N(type), ptr, size);
+ ERTS_MSACC_POP_STATE_X();
return res;
}
ERTS_ALC_INLINE
void erts_free(ErtsAlcType_t type, void *ptr)
{
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
(*erts_allctrs[ERTS_ALC_T2A(type)].free)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
ptr);
+ ERTS_MSACC_POP_STATE_X();
}
ERTS_ALC_INLINE
void *erts_alloc_fnf(ErtsAlcType_t type, Uint size)
{
- return (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
+ void *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
+ res = (*erts_allctrs[ERTS_ALC_T2A(type)].alloc)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
size);
+ ERTS_MSACC_POP_STATE_X();
+ return res;
}
ERTS_ALC_INLINE
void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size)
{
- return (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
+ void *res;
+ ERTS_MSACC_PUSH_AND_SET_STATE_X(ERTS_MSACC_STATE_ALLOC);
+ res = (*erts_allctrs[ERTS_ALC_T2A(type)].realloc)(
ERTS_ALC_T2N(type),
erts_allctrs[ERTS_ALC_T2A(type)].extra,
ptr,
size);
+ ERTS_MSACC_POP_STATE_X();
+ return res;
}
ERTS_ALC_INLINE
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index e0bc71c88a..5f153ac0ab 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -361,6 +361,7 @@ type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
type GC_INFO_REQ SHORT_LIVED SYSTEM gc_info_request
type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
+type MSACC DRIVER SYSTEM microstate_accounting
#
# Types used by system specific code
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index be0bc0cfec..cdeeb5281b 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -400,13 +400,19 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
#endif
if (!p) {
- if (a->async_free)
+ if (a->async_free) {
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_PORT);
a->async_free(a->async_data);
+ ERTS_MSACC_POP_STATE();
+ }
}
else {
if (async_ready(p, a->async_data)) {
- if (a->async_free)
+ if (a->async_free) {
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_PORT);
a->async_free(a->async_data);
+ ERTS_MSACC_POP_STATE();
+ }
}
#if ERTS_USE_ASYNC_READY_Q
erts_port_release(p);
@@ -460,6 +466,8 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
{
ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
erts_tse_t *tse = erts_tse_fetch();
+ ERTS_DECLARE_DUMMY(Uint no);
+
#ifdef ERTS_SMP
ErtsThrPrgrCallbacks callbacks;
@@ -483,10 +491,12 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
/* Inform main thread that we are done initializing... */
erts_mtx_lock(&async->init.data.mtx);
- async->init.data.no_initialized++;
+ no = async->init.data.no_initialized++;
erts_cnd_signal(&async->init.data.cnd);
erts_mtx_unlock(&async->init.data.mtx);
+ erts_msacc_init_thread("async", no, 0);
+
return tse;
}
@@ -494,6 +504,7 @@ static void *async_main(void* arg)
{
ErtsAsyncQ *aq = (ErtsAsyncQ *) arg;
erts_tse_t *tse = async_thread_init(aq);
+ ERTS_MSACC_DECLARE_CACHE();
while (1) {
ErtsThrQPrepEnQ_t *prep_enq;
@@ -501,11 +512,14 @@ static void *async_main(void* arg)
if (is_nil(a->port))
break; /* Time to die */
+ ERTS_MSACC_UPDATE_CACHE();
+
#if ERTS_ASYNC_PRINT_JOB
erts_fprintf(stderr, "<- %ld\n", a->async_id);
#endif
-
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
a->async_invoke(a->async_data);
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
async_reply(a, prep_enq);
}
@@ -628,10 +642,13 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
unsigned int qix;
#if ERTS_USE_ASYNC_READY_Q
Uint sched_id;
+ ERTS_MSACC_PUSH_STATE();
sched_id = erts_get_scheduler_id();
if (!sched_id)
sched_id = 1;
+#else
+ ERTS_MSACC_PUSH_STATE();
#endif
prt = erts_drvport2port(ix);
@@ -684,12 +701,17 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
return id;
}
#endif
-
+
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
(*a->async_invoke)(a->async_data);
+ ERTS_MSACC_POP_STATE();
if (async_ready(prt, a->async_data)) {
- if (a->async_free != NULL)
+ if (a->async_free != NULL) {
+ ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
(*a->async_free)(a->async_data);
+ ERTS_MSACC_POP_STATE();
+ }
}
erts_free(ERTS_ALC_T_ASYNC, (void *) a);
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index e69908bbef..017339e1f6 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -64,6 +64,7 @@ static Export* alloc_sizes_trap = NULL;
static Export* gather_io_bytes_trap = NULL;
static Export *gather_sched_wall_time_res_trap;
+static Export *gather_msacc_res_trap;
static Export *gather_gc_info_res_trap;
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
@@ -592,7 +593,8 @@ static Eterm pi_args[] = {
am_min_bin_vheap_size,
am_current_location,
am_current_stacktrace,
- am_message_queue_data
+ am_message_queue_data,
+ am_garbage_collection_info
};
#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
@@ -641,6 +643,7 @@ pi_arg2ix(Eterm arg)
case am_current_location: return 29;
case am_current_stacktrace: return 30;
case am_message_queue_data: return 31;
+ case am_garbage_collection_info: return 32;
default: return -1;
}
}
@@ -1398,6 +1401,32 @@ process_info_aux(Process *BIF_P,
break;
}
+ case am_garbage_collection_info: {
+ Uint sz = 0, actual_sz = 0;
+
+ if (rp == BIF_P) {
+ sz += ERTS_PROCESS_GC_INFO_MAX_SIZE;
+ } else {
+ erts_process_gc_info(rp, &sz, NULL);
+ sz += 3;
+ }
+
+ hp = HAlloc(BIF_P, sz);
+ res = erts_process_gc_info(rp, &actual_sz, &hp);
+
+ /* We may have some extra space, fill with 0 tuples */
+ if (actual_sz <= sz - 3) {
+ for (; actual_sz < sz - 3; hp++, actual_sz++)
+ hp[0] = make_arityval(0);
+ } else {
+ for (; actual_sz < sz; hp++, actual_sz++)
+ hp[0] = make_arityval(0);
+ hp = HAlloc(BIF_P, 3);
+ }
+
+ break;
+ }
+
case am_group_leader: {
int sz = NC_HEAP_SIZE(rp->group_leader);
hp = HAlloc(BIF_P, 3 + sz);
@@ -3232,6 +3261,14 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
szp = NULL;
hpp = &hp;
}
+#ifdef ERTS_ENABLE_MSACC
+ } else if (BIF_ARG_1 == am_microstate_accounting) {
+ Eterm threads;
+ res = erts_msacc_request(BIF_P, ERTS_MSACC_GATHER, &threads);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP2(gather_msacc_res_trap, BIF_P, res, threads);
+#endif
} else if (BIF_ARG_1 == am_context_switches) {
Eterm cs = erts_make_integer(erts_get_total_context_switches(), BIF_P);
hp = HAlloc(BIF_P, 3);
@@ -4368,6 +4405,9 @@ erts_bif_info_init(void)
= erts_export_put(am_erlang, am_gather_gc_info_result, 1);
gather_io_bytes_trap
= erts_export_put(am_erts_internal, am_gather_io_bytes, 2);
+ gather_msacc_res_trap
+ = erts_export_put(am_erts_internal, am_gather_microstate_accounting_result, 2);
+
process_info_init();
os_info_init();
}
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 4d67e39e7e..71a7079b09 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -431,6 +431,9 @@ Uint
erts_trace_flag2bit(Eterm flag)
{
switch (flag) {
+ case am_timestamp: return F_NOW_TS;
+ case am_strict_monotonic_timestamp: return F_STRICT_MON_TS;
+ case am_monotonic_timestamp: return F_MON_TS;
case am_all: return TRACEE_FLAGS;
case am_send: return F_TRACE_SEND;
case am_receive: return F_TRACE_RECEIVE;
@@ -439,7 +442,6 @@ erts_trace_flag2bit(Eterm flag)
case am_set_on_first_spawn: return F_TRACE_SOS1;
case am_set_on_link: return F_TRACE_SOL;
case am_set_on_first_link: return F_TRACE_SOL1;
- case am_timestamp: return F_TIMESTAMP;
case am_running: return F_TRACE_SCHED;
case am_exiting: return F_TRACE_SCHED_EXIT;
case am_garbage_collection: return F_TRACE_GC;
@@ -592,7 +594,7 @@ Eterm trace_3(BIF_ALIST_3)
ERTS_TRACE_FLAGS(tracee_port) |= mask;
else
ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
-
+
if (!ERTS_TRACE_FLAGS(tracee_port))
ERTS_TRACER_PROC(tracee_port) = NIL;
else if (tracer != NIL)
@@ -978,7 +980,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
}
if (key == am_flags) {
- int num_flags = 19; /* MAXIMUM number of flags. */
+ int num_flags = 21; /* MAXIMUM number of flags. */
Uint needed = 3+2*num_flags;
Eterm flag_list = NIL;
Eterm* limit;
@@ -996,6 +998,9 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
#endif
hp = HAlloc(p, needed);
limit = hp+needed;
+ FLAG(F_NOW_TS, am_timestamp);
+ FLAG(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
+ FLAG(F_MON_TS, am_monotonic_timestamp);
FLAG(F_TRACE_SEND, am_send);
FLAG(F_TRACE_RECEIVE, am_receive);
FLAG(F_TRACE_SOS, am_set_on_spawn);
@@ -1007,7 +1012,6 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
FLAG(F_TRACE_SCHED, am_running);
FLAG(F_TRACE_SCHED_EXIT, am_exiting);
FLAG(F_TRACE_GC, am_garbage_collection);
- FLAG(F_TIMESTAMP, am_timestamp);
FLAG(F_TRACE_ARITY_ONLY, am_arity);
FLAG(F_TRACE_RETURN_TO, am_return_to);
FLAG(F_TRACE_SILENT, am_silent);
@@ -1798,7 +1802,11 @@ Eterm erts_seq_trace(Process *p, Eterm arg1, Eterm arg2,
} else if (arg1 == am_print) {
current_flag = SEQ_TRACE_PRINT;
} else if (arg1 == am_timestamp) {
- current_flag = SEQ_TRACE_TIMESTAMP;
+ current_flag = SEQ_TRACE_NOW_TS;
+ } else if (arg1 == am_strict_monotonic_timestamp) {
+ current_flag = SEQ_TRACE_STRICT_MON_TS;
+ } else if (arg1 == am_monotonic_timestamp) {
+ current_flag = SEQ_TRACE_MON_TS;
}
else
current_flag = 0;
@@ -1901,7 +1909,9 @@ BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item)
if (have_no_seqtrace(SEQ_TRACE_TOKEN(p))) {
if ((item == am_send) || (item == am_receive) ||
- (item == am_print) || (item == am_timestamp)) {
+ (item == am_print) || (item == am_timestamp)
+ || (item == am_monotonic_timestamp)
+ || (item == am_strict_monotonic_timestamp)) {
hp = HAlloc(p,3);
res = TUPLE2(hp, item, am_false);
BIF_RET(res);
@@ -1919,7 +1929,11 @@ BIF_RETTYPE erl_seq_trace_info(Process *p, Eterm item)
} else if (item == am_print) {
current_flag = SEQ_TRACE_PRINT;
} else if (item == am_timestamp) {
- current_flag = SEQ_TRACE_TIMESTAMP;
+ current_flag = SEQ_TRACE_NOW_TS;
+ } else if (item == am_strict_monotonic_timestamp) {
+ current_flag = SEQ_TRACE_STRICT_MON_TS;
+ } else if (item == am_monotonic_timestamp) {
+ current_flag = SEQ_TRACE_MON_TS;
} else {
current_flag = 0;
}
@@ -2221,6 +2235,7 @@ static Eterm system_profile_get(Process *p) {
if (erts_system_profile_flags.exclusive) {
res = CONS(hp, am_exclusive, res); hp += 2;
}
+
return TUPLE2(hp, system_profile, res);
}
}
@@ -2239,6 +2254,7 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
int system_blocked = 0;
Process *profiler_p = NULL;
Port *profiler_port = NULL;
+ int ts;
if (profiler == am_undefined || list == NIL) {
prev = system_profile_get(p);
@@ -2270,7 +2286,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
goto error;
}
- for (scheduler = 0, runnable_ports = 0, runnable_procs = 0, exclusive = 0;
+ for (ts = ERTS_TRACE_FLG_NOW_TIMESTAMP, scheduler = 0,
+ runnable_ports = 0, runnable_procs = 0, exclusive = 0;
is_list(list);
list = CDR(list_val(list))) {
@@ -2283,6 +2300,12 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
exclusive = !0;
} else if (t == am_scheduler) {
scheduler = !0;
+ } else if (t == am_timestamp) {
+ ts = ERTS_TRACE_FLG_NOW_TIMESTAMP;
+ } else if (t == am_strict_monotonic_timestamp) {
+ ts = ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP;
+ } else if (t == am_monotonic_timestamp) {
+ ts = ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP;
} else goto error;
}
if (is_not_nil(list)) goto error;
@@ -2295,7 +2318,7 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
erts_system_profile_flags.runnable_ports = !!runnable_ports;
erts_system_profile_flags.runnable_procs = !!runnable_procs;
erts_system_profile_flags.exclusive = !!exclusive;
-
+ erts_system_profile_ts_type = ts;
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 3030c1c91a..55b8789e29 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -1376,7 +1376,6 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
status |= DB_ORDERED_SET;
status &= ~(DB_SET | DB_BAG | DB_DUPLICATE_BAG);
}
- /*TT*/
else if (is_tuple(val)) {
Eterm *tp = tuple_val(val);
if (arityval(tp[0]) == 2) {
@@ -3460,10 +3459,10 @@ static void fix_table_locked(Process* p, DbTable* tb)
#endif
erts_refc_inc(&tb->common.ref,1);
fix = tb->common.fixations;
- if (fix == NULL) {
- get_now(&(tb->common.megasec),
- &(tb->common.sec),
- &(tb->common.microsec));
+ if (fix == NULL) {
+ tb->common.time.monotonic
+ = erts_get_monotonic_time(ERTS_PROC_GET_SCHDATA(p));
+ tb->common.time.offset = erts_get_time_offset();
}
else {
for (; fix != NULL; fix = fix->next) {
@@ -3722,6 +3721,7 @@ static int free_table_cont(Process *p,
static Eterm table_info(Process* p, DbTable* tb, Eterm What)
{
Eterm ret = THE_NON_VALUE;
+ int use_monotonic;
if (What == am_size) {
ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems));
@@ -3779,7 +3779,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_true;
else
ret = am_false;
- } else if (What == am_atom_put("safe_fixed",10)) {
+ } else if ((use_monotonic
+ = ERTS_IS_ATOM_STR("safe_fixed_monotonic_time",
+ What))
+ || ERTS_IS_ATOM_STR("safe_fixed", What)) {
#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
@@ -3788,7 +3791,19 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
Eterm *hp;
Eterm tpl, lst;
DbFixation *fix;
- need = 7;
+ Sint64 mtime;
+
+ need = 3;
+ if (use_monotonic) {
+ mtime = (Sint64) tb->common.time.monotonic;
+ mtime += ERTS_MONOTONIC_OFFSET_NATIVE;
+ if (!IS_SSMALL(mtime))
+ need += ERTS_SINT64_HEAP_SIZE(mtime);
+ }
+ else {
+ mtime = 0;
+ need += 4;
+ }
for (fix = tb->common.fixations; fix != NULL; fix = fix->next) {
need += 5;
}
@@ -3800,11 +3815,18 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
lst = CONS(hp,tpl,lst);
hp += 2;
}
- tpl = TUPLE3(hp,
- make_small(tb->common.megasec),
- make_small(tb->common.sec),
- make_small(tb->common.microsec));
- hp += 4;
+ if (use_monotonic)
+ tpl = (IS_SSMALL(mtime)
+ ? make_small(mtime)
+ : erts_sint64_to_big(mtime, &hp));
+ else {
+ Uint ms, s, us;
+ erts_make_timestamp_value(&ms, &s, &us,
+ tb->common.time.monotonic,
+ tb->common.time.offset);
+ tpl = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
+ hp += 4;
+ }
ret = TUPLE2(hp, tpl, lst);
} else {
ret = am_false;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index c0966655cd..911ed37aef 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -226,7 +226,10 @@ typedef struct db_table_common {
DbTableMethod* meth; /* table methods */
erts_smp_atomic_t nitems; /* Total number of items in table */
erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */
- Uint megasec,sec,microsec; /* Last fixation time */
+ struct { /* Last fixation time */
+ ErtsMonotonicTime monotonic;
+ ErtsMonotonicTime offset;
+ } time;
DbFixation* fixations; /* List of processes who have done safe_fixtable,
"local" fixations not included. */
/* All 32-bit fields */
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 0636171ad1..0234b9b0e4 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -37,43 +37,6 @@
# endif
#endif
-#ifdef SIZEOF_CHAR
-# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
-# undef SIZEOF_CHAR
-#endif
-#ifdef SIZEOF_SHORT
-# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
-# undef SIZEOF_SHORT
-#endif
-#ifdef SIZEOF_INT
-# define SIZEOF_INT_SAVED__ SIZEOF_INT
-# undef SIZEOF_INT
-#endif
-#ifdef SIZEOF_LONG
-# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
-# undef SIZEOF_LONG
-#endif
-#ifdef SIZEOF_LONG_LONG
-# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
-# undef SIZEOF_LONG_LONG
-#endif
-#include "erl_int_sizes_config.h"
-#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
-# error SIZEOF_CHAR mismatch
-#endif
-#if defined(SIZEOF_SHORT_SAVED__) && SIZEOF_SHORT_SAVED__ != SIZEOF_SHORT
-# error SIZEOF_SHORT mismatch
-#endif
-#if defined(SIZEOF_INT_SAVED__) && SIZEOF_INT_SAVED__ != SIZEOF_INT
-# error SIZEOF_INT mismatch
-#endif
-#if defined(SIZEOF_LONG_SAVED__) && SIZEOF_LONG_SAVED__ != SIZEOF_LONG
-# error SIZEOF_LONG mismatch
-#endif
-#if defined(SIZEOF_LONG_LONG_SAVED__) && SIZEOF_LONG_LONG_SAVED__ != SIZEOF_LONG_LONG
-# error SIZEOF_LONG_LONG mismatch
-#endif
-
#include "erl_drv_nif.h"
#include <stdlib.h>
@@ -168,28 +131,12 @@ typedef struct {
/*
* Integer types
*/
-#if defined(__WIN32__) && (SIZEOF_VOID_P == 8)
-typedef unsigned __int64 ErlDrvTermData;
-typedef unsigned __int64 ErlDrvUInt;
-typedef signed __int64 ErlDrvSInt;
-#else
-typedef unsigned long ErlDrvTermData;
-typedef unsigned long ErlDrvUInt;
-typedef signed long ErlDrvSInt;
-#endif
-#if defined(__WIN32__)
-typedef unsigned __int64 ErlDrvUInt64;
-typedef __int64 ErlDrvSInt64;
-#elif SIZEOF_LONG == 8
-typedef unsigned long ErlDrvUInt64;
-typedef long ErlDrvSInt64;
-#elif SIZEOF_LONG_LONG == 8
-typedef unsigned long long ErlDrvUInt64;
-typedef long long ErlDrvSInt64;
-#else
-#error No 64-bit integer type
-#endif
+typedef ErlNapiUInt64 ErlDrvUInt64;
+typedef ErlNapiSInt64 ErlDrvSInt64;
+typedef ErlNapiUInt ErlDrvUInt;
+typedef ErlNapiSInt ErlDrvSInt;
+typedef ErlNapiUInt ErlDrvTermData;
#if defined(__WIN32__) || defined(_WIN32)
typedef ErlDrvUInt ErlDrvSizeT;
@@ -242,6 +189,17 @@ typedef struct {
unsigned long microsecs;
} ErlDrvNowData;
+typedef ErlDrvSInt64 ErlDrvTime;
+
+#define ERL_DRV_TIME_ERROR ((ErlDrvSInt64) ERTS_NAPI_TIME_ERROR__)
+
+typedef enum {
+ ERL_DRV_SEC = ERTS_NAPI_SEC__,
+ ERL_DRV_MSEC = ERTS_NAPI_MSEC__,
+ ERL_DRV_USEC = ERTS_NAPI_USEC__,
+ ERL_DRV_NSEC = ERTS_NAPI_NSEC__
+} ErlDrvTimeUnit;
+
/*
* Error codes that can be return from driver.
*/
@@ -678,8 +636,16 @@ EXTERN long driver_async(ErlDrvPort ix,
EXTERN int driver_lock_driver(ErlDrvPort ix);
/* Get the current 'now' timestamp (analogue to erlang:now()) */
-EXTERN int driver_get_now(ErlDrvNowData *now);
-
+EXTERN int driver_get_now(ErlDrvNowData *now) ERL_DRV_DEPRECATED_FUNC;
+
+/* Erlang Monotonic Time */
+EXTERN ErlDrvTime erl_drv_monotonic_time(ErlDrvTimeUnit time_unit);
+/* Time offset between Erlang Monotonic Time and Erlang System Time */
+EXTERN ErlDrvTime erl_drv_time_offset(ErlDrvTimeUnit time_unit);
+/* Time unit conversion */
+EXTERN ErlDrvTime erl_drv_convert_time_unit(ErlDrvTime val,
+ ErlDrvTimeUnit from,
+ ErlDrvTimeUnit to);
/* These were removed from the ANSI version, now they're back. */
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index e2385f63f4..f6b946ae82 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -50,6 +50,90 @@ typedef enum {
} ErlDrvDirtyJobFlags;
#endif
+#ifdef SIZEOF_CHAR
+# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
+# undef SIZEOF_CHAR
+#endif
+#ifdef SIZEOF_SHORT
+# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
+# undef SIZEOF_SHORT
+#endif
+#ifdef SIZEOF_INT
+# define SIZEOF_INT_SAVED__ SIZEOF_INT
+# undef SIZEOF_INT
+#endif
+#ifdef SIZEOF_LONG
+# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
+# undef SIZEOF_LONG
+#endif
+#ifdef SIZEOF_LONG_LONG
+# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
+# undef SIZEOF_LONG_LONG
+#endif
+#ifdef HALFWORD_HEAP_EMULATOR
+# define HALFWORD_HEAP_EMULATOR_SAVED__ HALFWORD_HEAP_EMULATOR
+# undef HALFWORD_HEAP_EMULATOR
+#endif
+#include "erl_int_sizes_config.h"
+#if defined(SIZEOF_CHAR_SAVED__) && SIZEOF_CHAR_SAVED__ != SIZEOF_CHAR
+# error SIZEOF_CHAR mismatch
+#endif
+#if defined(SIZEOF_SHORT_SAVED__) && SIZEOF_SHORT_SAVED__ != SIZEOF_SHORT
+# error SIZEOF_SHORT mismatch
+#endif
+#if defined(SIZEOF_INT_SAVED__) && SIZEOF_INT_SAVED__ != SIZEOF_INT
+# error SIZEOF_INT mismatch
+#endif
+#if defined(SIZEOF_LONG_SAVED__) && SIZEOF_LONG_SAVED__ != SIZEOF_LONG
+# error SIZEOF_LONG mismatch
+#endif
+#if defined(SIZEOF_LONG_LONG_SAVED__) && SIZEOF_LONG_LONG_SAVED__ != SIZEOF_LONG_LONG
+# error SIZEOF_LONG_LONG mismatch
+#endif
+
+#if !defined(__GNUC__) && (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
+typedef unsigned __int64 ErlNapiUInt64;
+typedef signed __int64 ErlNapiSInt64;
+#define ERL_NAPI_SINT64_MAX__ 9223372036854775807i64
+#define ERL_NAPI_SINT64_MIN__ (-ERL_NAPI_SINT64_MAX__ - 1i64)
+#elif SIZEOF_LONG == 8
+typedef unsigned long ErlNapiUInt64;
+typedef signed long ErlNapiSInt64;
+#define ERL_NAPI_SINT64_MAX__ 9223372036854775807L
+#define ERL_NAPI_SINT64_MIN__ (-ERL_NAPI_SINT64_MAX__ - 1L)
+#elif SIZEOF_LONG_LONG == 8
+typedef unsigned long long ErlNapiUInt64;
+typedef signed long long ErlNapiSInt64;
+#define ERL_NAPI_SINT64_MAX__ 9223372036854775807LL
+#define ERL_NAPI_SINT64_MIN__ (-ERL_NAPI_SINT64_MAX__ - 1LL)
+#else
+# error No 64-bit integer type
+#endif
+
+#if SIZEOF_VOID_P == 8
+typedef ErlNapiUInt64 ErlNapiUInt;
+typedef ErlNapiSInt64 ErlNapiSInt;
+#elif SIZEOF_VOID_P == 4
+# if SIZEOF_LONG == SIZEOF_VOID_P
+typedef unsigned long ErlNapiUInt;
+typedef signed long ErlNapiSInt;
+# elif SIZEOF_INT == SIZEOF_VOID_P
+typedef unsigned int ErlNapiUInt;
+typedef signed int ErlNapiSInt;
+# else
+# error No 32-bit integer type
+# endif
+#else
+# error Not support arch
+#endif
+
+#define ERTS_NAPI_TIME_ERROR__ ERL_NAPI_SINT64_MIN__
+
+#define ERTS_NAPI_SEC__ 0
+#define ERTS_NAPI_MSEC__ 1
+#define ERTS_NAPI_USEC__ 2
+#define ERTS_NAPI_NSEC__ 3
+
#endif /* __ERL_DRV_NIF_H__ */
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 0f86b9a25c..21b03ae8bd 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -576,6 +576,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
int reds;
ErtsMonotonicTime start_time = 0; /* Shut up faulty warning... */
ErtsSchedulerData *esdp;
+ ERTS_MSACC_PUSH_STATE_M();
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
@@ -588,6 +589,8 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
else if (p->live_hf_end != ERTS_INVALID_HFRAG_PTR)
live_hf_end = p->live_hf_end;
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC);
+
esdp = erts_get_scheduler_data();
if (IS_TRACED_FL(p, F_TRACE_GC)) {
@@ -624,9 +627,11 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
}
else {
do_major_collection:
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC_FULL);
DTRACE2(gc_major_start, pidbuf, need);
reds = major_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC);
}
reset_active_writer(p);
@@ -669,6 +674,8 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
FLAGS(p) &= ~F_FORCE_GC;
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
+ ERTS_MSACC_POP_STATE_M();
+
#ifdef CHECK_FOR_HOLES
/*
* We intentionally do not rescan the areas copied by the GC.
@@ -729,9 +736,7 @@ erts_garbage_collect_hibernate(Process* p)
*/
erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
ErtsGcQuickSanityCheck(p);
- ASSERT(p->mbuf == NULL);
ASSERT(p->stop == p->hend); /* Stack must be empty. */
- ASSERT(!p->abandoned_heap);
/*
* Do it.
@@ -2947,6 +2952,57 @@ erts_gc_info_request(Process *c_p)
return ref;
}
+Eterm
+erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp)
+{
+ ERTS_DECL_AM(bin_vheap_size);
+ ERTS_DECL_AM(bin_vheap_block_size);
+ ERTS_DECL_AM(bin_old_vheap_size);
+ ERTS_DECL_AM(bin_old_vheap_block_size);
+ Eterm tags[] = {
+ /* If you increase the number of elements here, make sure to update
+ any call sites as they may have stack allocations that depend
+ on the number of elements here. */
+ am_old_heap_block_size,
+ am_heap_block_size,
+ am_mbuf_size,
+ am_recent_size,
+ am_stack_size,
+ am_old_heap_size,
+ am_heap_size,
+ AM_bin_vheap_size,
+ AM_bin_vheap_block_size,
+ AM_bin_old_vheap_size,
+ AM_bin_old_vheap_block_size
+ };
+ UWord values[] = {
+ OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
+ HEAP_SIZE(p),
+ MBUF_SIZE(p),
+ HIGH_WATER(p) - HEAP_START(p),
+ STACK_START(p) - p->stop,
+ OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
+ HEAP_TOP(p) - HEAP_START(p),
+ MSO(p).overhead,
+ BIN_VHEAP_SZ(p),
+ BIN_OLD_VHEAP(p),
+ BIN_OLD_VHEAP_SZ(p)
+ };
+
+ Eterm res = THE_NON_VALUE;
+
+ ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(*tags));
+ ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == ERTS_PROCESS_GC_INFO_MAX_TERMS);
+
+ res = erts_bld_atom_uword_2tup_list(hpp,
+ sizep,
+ sizeof(values)/sizeof(*values),
+ tags,
+ values);
+
+ return res;
+}
+
#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
static int
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index d603866cbf..2cedd9361f 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -132,6 +132,11 @@ typedef struct {
Uint64 garbage_cols;
} ErtsGCInfo;
+#define ERTS_PROCESS_GC_INFO_MAX_TERMS (11) /* number of elements in process_gc_info*/
+#define ERTS_PROCESS_GC_INFO_MAX_SIZE \
+ (ERTS_PROCESS_GC_INFO_MAX_TERMS * (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE))
+Eterm erts_process_gc_info(struct process*, Uint *, Eterm **);
+
void erts_gc_info(ErtsGCInfo *gcip);
void erts_init_gc(void);
int erts_garbage_collect_nobump(struct process*, int, Eterm*, int);
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 734057a12c..491d2f8c84 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -1055,6 +1055,8 @@ create_hl_timer(ErtsSchedulerData *esdp,
erts_aint32_t refc;
Uint32 roflgs;
+ ERTS_HLT_HDBG_CHK_SRV(srv);
+
check_canceled_queue(esdp, srv);
ERTS_HLT_ASSERT((esdp->no & ~ERTS_TMR_ROFLG_SID_MASK) == 0);
@@ -1179,8 +1181,6 @@ create_hl_timer(ErtsSchedulerData *esdp,
erts_smp_atomic32_init_nob(&tmr->head.refc, refc);
erts_smp_atomic32_init_nob(&tmr->state, ERTS_TMR_STATE_ACTIVE);
- ERTS_HLT_HDBG_CHK_SRV(srv);
-
if (!srv->next_timeout
|| tmr->timeout < srv->next_timeout->timeout) {
if (srv->next_timeout)
@@ -3093,7 +3093,8 @@ tt_hdbg_func(ErtsHLTimer *tmr, void *vhdbg)
& ~ERTS_HLT_PFLGS_MASK);
ERTS_HLT_ASSERT(tmr == prnt);
}
- ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
+ if (tmr->head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)
+ ERTS_HLT_ASSERT(btm_rbt_lookup(hdbg->srv->btm_tree, tmr->btm.refn) == tmr);
if (tmr->time.tree.same_time) {
ErtsHdbgHLT st_hdbg;
st_hdbg.srv = hdbg->srv;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 42aca726bf..520b504fcb 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -394,6 +394,7 @@ erl_init(int ncpu,
#endif
packet_parser_init();
erl_nif_init();
+ erts_msacc_init();
}
static Eterm
@@ -1193,6 +1194,7 @@ early_init(int *argc, char **argv) /*
erts_thr_late_init(&elid);
}
#endif
+ erts_msacc_early_init();
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_late_init();
@@ -2220,6 +2222,7 @@ erl_start(int argc, char **argv)
#else
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ erts_msacc_init_thread("scheduler", 1, 1);
erts_thr_set_main_status(1, 1);
#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index f7b4bd8041..598bf84c0b 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -137,6 +137,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "mmap_init_atoms", NULL },
{ "drv_tsd", NULL },
{ "async_enq_mtx", NULL },
+ { "msacc_list_mutex", NULL },
+ { "msacc_unmanaged_mutex", NULL },
#ifdef ERTS_SMP
{ "atom_tab", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
new file mode 100644
index 0000000000..71e3fd8b6e
--- /dev/null
+++ b/erts/emulator/beam/erl_msacc.c
@@ -0,0 +1,486 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014-2015. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Microstate accounting.
+ *
+ * We keep track of the different states that the
+ * Erlang VM threads are in, in order to provide
+ * performance/debugging statistics. There is a
+ * small overhead in enabling this, but in the big
+ * scheme of things it should be negligible.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERTS_MSACC_STATE_STRINGS 1
+
+#include "sys.h"
+#include "global.h"
+#include "erl_threads.h"
+#include "erl_bif_unique.h"
+#include "erl_map.h"
+#include "erl_msacc.h"
+
+#if ERTS_ENABLE_MSACC
+
+static Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, Eterm **hpp, Uint *szp);
+static void erts_msacc_reset(ErtsMsAcc *msacc);
+static ErtsMsAcc* get_msacc(void);
+
+#ifdef USE_THREADS
+erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key);
+#else
+ErtsMsAcc *ERTS_WRITE_UNLIKELY(erts_msacc) = NULL;
+#endif
+int ERTS_WRITE_UNLIKELY(erts_msacc_enabled);
+
+static Eterm *erts_msacc_state_atoms = NULL;
+static erts_rwmtx_t msacc_mutex;
+static ErtsMsAcc *msacc_managed = NULL;
+#ifdef USE_THREADS
+static ErtsMsAcc *msacc_unmanaged = NULL;
+static Uint msacc_unmanaged_count = 0;
+#endif
+
+/* we have to split initiation as atoms are not inited in early init */
+void erts_msacc_early_init(void) {
+#ifndef ERTS_MSACC_ALWAYS_ON
+ erts_msacc_enabled = 0;
+#endif
+ erts_rwmtx_init(&msacc_mutex,"msacc_list_mutex");
+#ifdef USE_THREADS
+ erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key");
+#else
+ erts_msacc = NULL;
+#endif
+}
+
+void erts_msacc_init(void) {
+ int i;
+ erts_msacc_state_atoms = erts_alloc(ERTS_ALC_T_MSACC,
+ sizeof(Eterm)*ERTS_MSACC_STATE_COUNT);
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
+ erts_msacc_state_atoms[i] = am_atom_put(erts_msacc_states[i],
+ strlen(erts_msacc_states[i]));
+ }
+}
+
+void erts_msacc_init_thread(char *type, int id, int managed) {
+ ErtsMsAcc *msacc;
+
+ msacc = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc));
+
+ msacc->type = strdup(type);
+ msacc->id = make_small(id);
+ msacc->unmanaged = !managed;
+ msacc->tid = erts_thr_self();
+ msacc->perf_counter = 0;
+
+#ifdef USE_THREADS
+ erts_rwmtx_rwlock(&msacc_mutex);
+ if (!managed) {
+ erts_mtx_init(&msacc->mtx,"msacc_unmanaged_mutex");
+ msacc->next = msacc_unmanaged;
+ msacc_unmanaged = msacc;
+ msacc_unmanaged_count++;
+ ERTS_MSACC_TSD_SET(msacc);
+ } else {
+ msacc->next = msacc_managed;
+ msacc_managed = msacc;
+ }
+ erts_rwmtx_rwunlock(&msacc_mutex);
+#else
+ msacc_managed = msacc;
+#endif
+
+ erts_msacc_reset(msacc);
+
+#ifdef ERTS_MSACC_ALWAYS_ON
+ ERTS_MSACC_TSD_SET(msacc);
+ msacc->perf_counter = erts_sys_perf_counter();
+ msacc->state = ERTS_MSACC_STATE_OTHER;
+#endif
+}
+
+/*
+ * Creates a structure looking like this
+ * #{ type => scheduler, id => 1, counters => #{ State1 => Counter1 ... StateN => CounterN}}
+ */
+static
+Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, Eterm **hpp, Uint *szp) {
+ int i;
+ Eterm *hp;
+ Eterm key, state_key, state_map;
+ Eterm res = THE_NON_VALUE;
+ flatmap_t *map;
+
+ if (szp) {
+ *szp += MAP_HEADER_FLATMAP_SZ + 1 + 2*(3);
+ *szp += MAP_HEADER_FLATMAP_SZ + 1 + 2*(ERTS_MSACC_STATE_COUNT);
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
+ (void)erts_bld_sint64(NULL,szp,(Sint64)msacc->perf_counters[i]);
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ (void)erts_bld_uint64(NULL,szp,msacc->state_counters[i]);
+ *szp += 3; /* tuple to put state+perf counter in */
+#endif
+ }
+ }
+
+ if (hpp) {
+ Eterm counters[ERTS_MSACC_STATE_COUNT];
+ hp = *hpp;
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
+ Eterm counter = erts_bld_sint64(&hp,NULL,(Sint64)msacc->perf_counters[i]);
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ Eterm counter__ = erts_bld_uint64(&hp,NULL,msacc->state_counters[i]);
+ counters[i] = TUPLE2(hp,counter,counter__);
+ hp += 3;
+#else
+ counters[i] = counter;
+#endif
+ }
+
+ key = TUPLE3(hp,am_counters,am_id,am_type);
+ hp += 4;
+
+ state_key = make_tuple(hp);
+ hp[0] = make_arityval(ERTS_MSACC_STATE_COUNT);
+
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++)
+ hp[1+i] = erts_msacc_state_atoms[i];
+ hp += 1 + ERTS_MSACC_STATE_COUNT;
+
+ map = (flatmap_t*)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ map->thing_word = MAP_HEADER_FLATMAP;
+ map->size = ERTS_MSACC_STATE_COUNT;
+ map->keys = state_key;
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++)
+ hp[i] = counters[i];
+ hp += ERTS_MSACC_STATE_COUNT;
+ state_map = make_flatmap(map);
+
+ map = (flatmap_t*)hp;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ map->thing_word = MAP_HEADER_FLATMAP;
+ map->size = 3;
+ map->keys = key;
+ hp[0] = state_map;
+ hp[1] = msacc->id;
+ hp[2] = am_atom_put(msacc->type,strlen(msacc->type));
+ hp += 3;
+
+ *hpp = hp;
+ res = make_flatmap(map);
+ }
+
+ return res;
+}
+
+typedef struct {
+ int action;
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[REF_THING_SIZE];
+ Uint req_sched;
+ erts_smp_atomic32_t refc;
+} ErtsMSAccReq;
+
+static ErtsMsAcc* get_msacc(void) {
+ ErtsMsAcc *msacc;
+ erts_rwmtx_rlock(&msacc_mutex);
+ msacc = msacc_managed;
+ while (!erts_equal_tids(msacc->tid,erts_thr_self())) {
+ msacc = msacc->next;
+ ASSERT(msacc != NULL);
+ }
+ erts_rwmtx_runlock(&msacc_mutex);
+ return msacc;
+}
+
+static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ Process *rp = msaccrp->proc;
+ ErtsMessage *msgp = NULL;
+ Eterm **hpp, *hp;
+ Eterm ref_copy = NIL, msg;
+ Uint sz, *szp;
+ ErlOffHeap *ohp = NULL;
+ ErtsProcLocks rp_locks = (esdp && msaccrp->req_sched == esdp->no
+ ? ERTS_PROC_LOCK_MAIN : 0);
+
+ sz = 0;
+ hpp = NULL;
+ szp = &sz;
+
+ if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);
+
+ while (1) {
+ if (hpp)
+ ref_copy = STORE_NC(hpp, ohp, msaccrp->ref);
+ else
+ *szp += REF_THING_SIZE;
+
+ if (msaccrp->action != ERTS_MSACC_GATHER)
+ msg = ref_copy;
+ else {
+ msg = erts_msacc_gather_stats(msacc, hpp, szp);
+ msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
+ }
+ if (hpp)
+ break;
+
+ msgp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ hpp = &hp;
+ szp = NULL;
+ }
+
+ if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);
+
+ erts_queue_message(rp, &rp_locks, msgp, msg, NIL);
+
+ if (esdp && msaccrp->req_sched == esdp->no)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+
+}
+
+static void
+reply_msacc(void *vmsaccrp)
+{
+ ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET();
+ ErtsMSAccReq *msaccrp = (ErtsMSAccReq *) vmsaccrp;
+
+ ASSERT(!msacc || !msacc->unmanaged);
+
+ if (msaccrp->action == ERTS_MSACC_ENABLE && !msacc) {
+ msacc = get_msacc();
+
+ msacc->perf_counter = erts_sys_perf_counter();
+
+ msacc->state = ERTS_MSACC_STATE_OTHER;
+
+ ERTS_MSACC_TSD_SET(msacc);
+
+ } else if (msaccrp->action == ERTS_MSACC_DISABLE && msacc) {
+ ERTS_MSACC_TSD_SET(NULL);
+ } else if (msaccrp->action == ERTS_MSACC_RESET) {
+ msacc = msacc ? msacc : get_msacc();
+ erts_msacc_reset(msacc);
+ } else if (msaccrp->action == ERTS_MSACC_GATHER && !msacc) {
+ msacc = get_msacc();
+ }
+
+ ASSERT(!msacc || !msacc->unmanaged);
+
+ send_reply(msacc, msaccrp);
+
+ erts_proc_dec_refc(msaccrp->proc);
+
+ if (erts_smp_atomic32_dec_read_nob(&msaccrp->refc) == 0)
+ erts_free(ERTS_ALC_T_MSACC, vmsaccrp);
+}
+
+static void erts_msacc_reset(ErtsMsAcc *msacc) {
+ int i;
+ if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);
+
+ for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
+ msacc->perf_counters[i] = 0;
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ msacc->state_counters[i] = 0;
+#endif
+ }
+
+ if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);
+}
+
+#endif /* ERTS_ENABLE_MSACC */
+
+
+/*
+ * This function is responsible for enabling, disabling, resetting and
+ * gathering data related to microstate accounting.
+ *
+ * Managed threads and unmanaged threads are handled differently.
+ * - managed threads get a misc_aux job telling them to switch on msacc
+ * - unmanaged have some fields protected by a mutex that has to be taken
+ * before any values can be updated
+ *
+ * For performance reasons there is also a global value erts_msacc_enabled
+ * that controls the state of all threads. Statistics gathering is only on
+ * if erts_msacc_enabled && msacc is true.
+ */
+Eterm
+erts_msacc_request(Process *c_p, int action, Eterm *threads)
+{
+#ifdef ERTS_ENABLE_MSACC
+ ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET();
+ ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ Eterm ref;
+ ErtsMSAccReq *msaccrp;
+ Eterm *hp;
+
+
+#ifdef ERTS_MSACC_ALWAYS_ON
+ if (action == ERTS_MSACC_ENABLE || action == ERTS_MSACC_DISABLE)
+ return THE_NON_VALUE;
+#else
+ /* take care of double enable, and double disable here */
+ if (msacc && action == ERTS_MSACC_ENABLE) {
+ return THE_NON_VALUE;
+ } else if (!msacc && action == ERTS_MSACC_DISABLE) {
+ return THE_NON_VALUE;
+ }
+#endif
+
+ ref = erts_make_ref(c_p);
+
+ msaccrp = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMSAccReq));
+ hp = &msaccrp->ref_heap[0];
+
+ msaccrp->action = action;
+ msaccrp->proc = c_p;
+ msaccrp->ref = STORE_NC(&hp, NULL, ref);
+ msaccrp->req_sched = esdp->no;
+
+#ifdef ERTS_SMP
+ *threads = erts_no_schedulers;
+ *threads += 1; /* aux thread */
+#else
+ *threads = 1;
+#endif
+
+ erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads);
+
+ erts_proc_add_refc(c_p, *threads);
+
+ if (erts_no_schedulers > 1)
+ erts_schedule_multi_misc_aux_work(1,
+ erts_no_schedulers,
+ reply_msacc,
+ (void *) msaccrp);
+#ifdef ERTS_SMP
+ /* aux thread */
+ erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp);
+#endif
+
+#ifdef USE_THREADS
+ /* Manage unmanaged threads */
+ switch (action) {
+ case ERTS_MSACC_GATHER: {
+ Uint unmanaged_count;
+ ErtsMsAcc *msacc, **unmanaged;
+ int i = 0;
+
+ /* we copy a list of pointers here so that we do not have to have
+ the msacc_mutex when sending messages */
+ erts_rwmtx_rlock(&msacc_mutex);
+ unmanaged_count = msacc_unmanaged_count;
+ unmanaged = erts_alloc(ERTS_ALC_T_MSACC,
+ sizeof(ErtsMsAcc*)*unmanaged_count);
+
+ for (i = 0, msacc = msacc_unmanaged;
+ i < unmanaged_count;
+ i++, msacc = msacc->next) {
+ unmanaged[i] = msacc;
+ }
+ erts_rwmtx_runlock(&msacc_mutex);
+
+ for (i = 0; i < unmanaged_count; i++) {
+ erts_mtx_lock(&unmanaged[i]->mtx);
+ if (unmanaged[i]->perf_counter) {
+ ErtsSysPerfCounter perf_counter;
+ /* if enabled update stats */
+ perf_counter = erts_sys_perf_counter();
+ unmanaged[i]->perf_counters[unmanaged[i]->state] +=
+ perf_counter - unmanaged[i]->perf_counter;
+ unmanaged[i]->perf_counter = perf_counter;
+ }
+ erts_mtx_unlock(&unmanaged[i]->mtx);
+ send_reply(unmanaged[i],msaccrp);
+ }
+ erts_free(ERTS_ALC_T_MSACC,unmanaged);
+ /* We have just sent unmanaged_count messages, so bump no of threads */
+ *threads += unmanaged_count;
+ break;
+ }
+ case ERTS_MSACC_RESET: {
+ ErtsMsAcc *msacc;
+ erts_rwmtx_rlock(&msacc_mutex);
+ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next)
+ erts_msacc_reset(msacc);
+ erts_rwmtx_runlock(&msacc_mutex);
+ break;
+ }
+ case ERTS_MSACC_ENABLE: {
+ erts_rwmtx_rlock(&msacc_mutex);
+ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) {
+ erts_mtx_lock(&msacc->mtx);
+ msacc->perf_counter = erts_sys_perf_counter();
+ /* we assume the unmanaged thread is sleeping */
+ msacc->state = ERTS_MSACC_STATE_SLEEP;
+ erts_mtx_unlock(&msacc->mtx);
+ }
+ erts_rwmtx_runlock(&msacc_mutex);
+ break;
+ }
+ case ERTS_MSACC_DISABLE: {
+ ErtsSysPerfCounter perf_counter;
+ erts_rwmtx_rlock(&msacc_mutex);
+ /* make sure to update stats with latest results */
+ for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) {
+ erts_mtx_lock(&msacc->mtx);
+ perf_counter = erts_sys_perf_counter();
+ msacc->perf_counters[msacc->state] += perf_counter - msacc->perf_counter;
+ msacc->perf_counter = 0;
+ erts_mtx_unlock(&msacc->mtx);
+ }
+ erts_rwmtx_runlock(&msacc_mutex);
+ break;
+ }
+ default: { ASSERT(0); }
+ }
+
+#endif
+
+ *threads = make_small(*threads);
+
+ reply_msacc((void *) msaccrp);
+
+#ifndef ERTS_MSACC_ALWAYS_ON
+ /* enable/disable the global value */
+ if (action == ERTS_MSACC_ENABLE) {
+ erts_msacc_enabled = 1;
+ } else if (action == ERTS_MSACC_DISABLE) {
+ erts_msacc_enabled = 0;
+ }
+#endif
+
+ return ref;
+#else
+ return THE_NON_VALUE;
+#endif
+}
diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h
new file mode 100644
index 0000000000..284388f7aa
--- /dev/null
+++ b/erts/emulator/beam/erl_msacc.h
@@ -0,0 +1,409 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2014-2015. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_MSACC_H__
+#define ERL_MSACC_H__
+
+/* Can be enabled/disabled via configure */
+#if ERTS_ENABLE_MSACC == 2
+#define ERTS_MSACC_EXTENDED_STATES 1
+#endif
+
+/* Uncomment this to also count the number of
+ transitions to a state. This will add a count
+ to the counter map. */
+/* #define ERTS_MSACC_STATE_COUNTERS 1 */
+
+/* Uncomment this to make msacc to always be on,
+ this reduces overhead a little bit when profiling */
+/* #define ERTS_MSACC_ALWAYS_ON 1 */
+
+#define ERTS_MSACC_DISABLE 0
+#define ERTS_MSACC_ENABLE 1
+#define ERTS_MSACC_RESET 2
+#define ERTS_MSACC_GATHER 3
+
+/*
+ * When adding a new state, you have to:
+ * * Add it here
+ * * Increment ERTS_MSACC_STATE_COUNT
+ * * Add string value to erts_msacc_states
+ * * Have to be in alphabetical order!
+ * * Only add states to the non-extended section after
+ * careful benchmarking to make sure the overhead
+ * when disabled is minimal.
+ */
+
+#ifndef ERTS_MSACC_EXTENDED_STATES
+#define ERTS_MSACC_STATE_AUX 0
+#define ERTS_MSACC_STATE_CHECK_IO 1
+#define ERTS_MSACC_STATE_EMULATOR 2
+#define ERTS_MSACC_STATE_GC 3
+#define ERTS_MSACC_STATE_OTHER 4
+#define ERTS_MSACC_STATE_PORT 5
+#define ERTS_MSACC_STATE_SLEEP 6
+
+#define ERTS_MSACC_STATE_COUNT 7
+
+#if ERTS_MSACC_STATE_STRINGS && ERTS_ENABLE_MSACC
+static char *erts_msacc_states[] = {
+ "aux",
+ "check_io",
+ "emulator",
+ "gc",
+ "other",
+ "port",
+ "sleep"
+};
+#endif
+
+#else
+
+#define ERTS_MSACC_STATE_ALLOC 0
+#define ERTS_MSACC_STATE_AUX 1
+#define ERTS_MSACC_STATE_BIF 2
+#define ERTS_MSACC_STATE_BUSY_WAIT 3
+#define ERTS_MSACC_STATE_CHECK_IO 4
+#define ERTS_MSACC_STATE_EMULATOR 5
+#define ERTS_MSACC_STATE_ETS 6
+#define ERTS_MSACC_STATE_GC 7
+#define ERTS_MSACC_STATE_GC_FULL 8
+#define ERTS_MSACC_STATE_NIF 9
+#define ERTS_MSACC_STATE_OTHER 10
+#define ERTS_MSACC_STATE_PORT 11
+#define ERTS_MSACC_STATE_SEND 12
+#define ERTS_MSACC_STATE_SLEEP 13
+#define ERTS_MSACC_STATE_TIMERS 14
+
+#define ERTS_MSACC_STATE_COUNT 15
+
+#if ERTS_MSACC_STATE_STRINGS
+static char *erts_msacc_states[] = {
+ "alloc",
+ "aux",
+ "bif",
+ "busy_wait",
+ "check_io",
+ "emulator",
+ "ets",
+ "gc",
+ "gc_full",
+ "nif",
+ "other",
+ "port",
+ "send",
+ "sleep",
+ "timers"
+};
+#endif
+
+#endif
+
+typedef struct erl_msacc_t_ ErtsMsAcc;
+
+struct erl_msacc_t_ {
+
+ /* the the values below are protected by mtx iff unmanaged = 1 */
+ ErtsSysPerfCounter perf_counter;
+ ErtsSysPerfCounter perf_counters[ERTS_MSACC_STATE_COUNT];
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ Uint64 state_counters[ERTS_MSACC_STATE_COUNT];
+#endif
+ Uint state;
+
+ /* protected by msacc_mutex in erl_msacc.c, and should be constant */
+ int unmanaged;
+ erts_mtx_t mtx;
+ ErtsMsAcc *next;
+ erts_tid_t tid;
+ Eterm id;
+ char *type;
+};
+
+#if ERTS_ENABLE_MSACC
+
+#define ERTS_MSACC_INLINE ERTS_GLB_INLINE
+
+#ifdef USE_THREADS
+extern erts_tsd_key_t erts_msacc_key;
+#else
+extern ErtsMsAcc *erts_msacc;
+#endif
+
+#ifdef ERTS_MSACC_ALWAYS_ON
+#define erts_msacc_enabled 1
+#else
+extern int erts_msacc_enabled;
+#endif
+
+#ifdef USE_THREADS
+#define ERTS_MSACC_TSD_GET() erts_tsd_get(erts_msacc_key)
+#define ERTS_MSACC_TSD_SET(tsd) erts_tsd_set(erts_msacc_key,tsd)
+#else
+#define ERTS_MSACC_TSD_GET() erts_msacc
+#define ERTS_MSACC_TSD_SET(tsd) erts_msacc = tsd
+#endif
+
+void erts_msacc_early_init(void);
+void erts_msacc_init(void);
+void erts_msacc_init_thread(char *type, int id, int liberty);
+
+/* The defines below are used to instrument the vm code
+ * with different state changes. There are two variants
+ * of each define. One that has a cached ErtsMsAcc *
+ * that it can use, and one that does not.
+ * The cached values are necessary to have in order to get
+ * low enough overhead when running without msacc enabled.
+ *
+ * The two most common patterns to use the function with are:
+ *
+ * ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_NEWSTATE);
+ * ... call other function in new state ...
+ * ERTS_MSACC_POP_STATE();
+ *
+ * Note that the erts_msacc_push* function declare new variables, so
+ * to conform with C89 we have to call it in the beginning of a function.
+ * We might not want to change state it the beginning though, so we use this:
+ *
+ * ERTS_MSACC_PUSH_STATE();
+ * ... some other code ...
+ * ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_NEWSTATE);
+ * ... call other function in new state ...
+ * ERTS_MSACC_POP_STATE();
+ *
+ * Notice that we used the cached version of set_state as push_state already
+ * read the erts_msacc_enabled to the cache.
+ *
+ * Most macros also have other variants with the suffix _m which means that
+ * they are known to only be called in managed threads, or with the _x suffix
+ * which means that it should only be used in an emulator compiled with
+ * extended states.
+ *
+ * Here is a listing of the entire api:
+ *
+ * void ERTS_MSACC_DECLARE_CACHE()
+ * void ERTS_MSACC_UPDATE_CACHE()
+ * void ERTS_MSACC_IS_ENABLED()
+ * void ERTS_MSACC_IS_ENABLED_CACHED()
+ *
+ * void ERTS_MSACC_PUSH_STATE()
+ * void ERTS_MSACC_SET_STATE(int state)
+ * void ERTS_MSACC_PUSH_AND_SET_STATE(int state)
+ *
+ * void ERTS_MSACC_PUSH_STATE_CACHED()
+ * void ERTS_MSACC_SET_STATE_CACHED(int state)
+ * void ERTS_MSACC_PUSH_AND_SET_STATE_CACHED(int state)
+ * void ERTS_MSACC_POP_STATE()
+ *
+ * void ERTS_MSACC_PUSH_STATE_M()
+ * void ERTS_MSACC_PUSH_STATE_CACHED_M()
+ * void ERTS_MSACC_SET_STATE_CACHED_M(int state)
+ * void ERTS_MSACC_SET_STATE_M(int state)
+ * void ERTS_MSACC_POP_STATE_M()
+ * void ERTS_MSACC_PUSH_AND_SET_STATE_M(int state)
+ *
+ * Most functions are also available with an _x suffix that are only enabled
+ * when using the extra states. If they are not, just add them to the end
+ * of this file.
+ */
+
+/* cache handling functions */
+#define ERTS_MSACC_IS_ENABLED() ERTS_UNLIKELY(erts_msacc_enabled)
+#define ERTS_MSACC_DECLARE_CACHE() \
+ ErtsMsAcc *ERTS_MSACC_UPDATE_CACHE(); \
+ ERTS_DECLARE_DUMMY(Uint __erts_msacc_state) = ERTS_MSACC_STATE_OTHER;
+#define ERTS_MSACC_IS_ENABLED_CACHED() ERTS_UNLIKELY(__erts_msacc_cache != NULL)
+#define ERTS_MSACC_UPDATE_CACHE() \
+ __erts_msacc_cache = erts_msacc_enabled ? ERTS_MSACC_TSD_GET() : NULL
+
+
+/* The defines below implicitly declare and load a new cache */
+#define ERTS_MSACC_PUSH_STATE() \
+ ERTS_MSACC_DECLARE_CACHE(); \
+ ERTS_MSACC_PUSH_STATE_CACHED()
+#define ERTS_MSACC_SET_STATE(state) \
+ ERTS_MSACC_DECLARE_CACHE(); \
+ ERTS_MSACC_SET_STATE_CACHED(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE(state) \
+ ERTS_MSACC_PUSH_STATE(); ERTS_MSACC_SET_STATE_CACHED(state)
+
+/* The defines below need an already declared cache to work */
+#define ERTS_MSACC_PUSH_STATE_CACHED() \
+ __erts_msacc_state = ERTS_MSACC_IS_ENABLED_CACHED() ? \
+ erts_msacc_get_state_um__(__erts_msacc_cache) : ERTS_MSACC_STATE_OTHER
+#define ERTS_MSACC_SET_STATE_CACHED(state) \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) \
+ erts_msacc_set_state_um__(__erts_msacc_cache, state, 1)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED(state) \
+ ERTS_MSACC_PUSH_STATE_CACHED(); ERTS_MSACC_SET_STATE_CACHED(state)
+#define ERTS_MSACC_POP_STATE() \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) \
+ erts_msacc_set_state_um__(__erts_msacc_cache, __erts_msacc_state, 0)
+
+/* Only use these defines when we know that we have in a managed thread */
+#define ERTS_MSACC_PUSH_STATE_M() \
+ ERTS_MSACC_DECLARE_CACHE(); \
+ ERTS_MSACC_PUSH_STATE_CACHED_M()
+#define ERTS_MSACC_PUSH_STATE_CACHED_M() \
+ __erts_msacc_state = ERTS_MSACC_IS_ENABLED_CACHED() ? \
+ erts_msacc_get_state_m__(__erts_msacc_cache) : ERTS_MSACC_STATE_OTHER
+#define ERTS_MSACC_SET_STATE_M(state) \
+ ERTS_MSACC_DECLARE_CACHE(); \
+ ERTS_MSACC_SET_STATE_CACHED_M(state)
+#define ERTS_MSACC_SET_STATE_CACHED_M(state) \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) \
+ erts_msacc_set_state_m__(__erts_msacc_cache, state, 1)
+#define ERTS_MSACC_POP_STATE_M() \
+ if (ERTS_MSACC_IS_ENABLED_CACHED()) \
+ erts_msacc_set_state_m__(__erts_msacc_cache, __erts_msacc_state, 0)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_M(state) \
+ ERTS_MSACC_PUSH_STATE_M(); ERTS_MSACC_SET_STATE_CACHED_M(state)
+
+ERTS_MSACC_INLINE
+void erts_msacc_set_state_um__(ErtsMsAcc *msacc,Uint state,int increment);
+ERTS_MSACC_INLINE
+void erts_msacc_set_state_m__(ErtsMsAcc *msacc,Uint state,int increment);
+
+ERTS_MSACC_INLINE
+Uint erts_msacc_get_state_um__(ErtsMsAcc *msacc);
+ERTS_MSACC_INLINE
+Uint erts_msacc_get_state_m__(ErtsMsAcc *msacc);
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_MSACC_INLINE
+Uint erts_msacc_get_state_um__(ErtsMsAcc *msacc) {
+ Uint state;
+ if (msacc->unmanaged)
+ erts_mtx_lock(&msacc->mtx);
+ state = msacc->state;
+ if (msacc->unmanaged)
+ erts_mtx_unlock(&msacc->mtx);
+ return state;
+}
+
+ERTS_MSACC_INLINE
+Uint erts_msacc_get_state_m__(ErtsMsAcc *msacc) {
+ return msacc->state;
+}
+
+ERTS_MSACC_INLINE
+void erts_msacc_set_state_um__(ErtsMsAcc *msacc, Uint new_state, int increment) {
+ if (ERTS_UNLIKELY(msacc->unmanaged)) {
+ erts_mtx_lock(&msacc->mtx);
+ msacc->state = new_state;
+ if (ERTS_LIKELY(!msacc->perf_counter)) {
+ erts_mtx_unlock(&msacc->mtx);
+ return;
+ }
+ }
+
+ erts_msacc_set_state_m__(msacc,new_state,increment);
+
+ if (ERTS_UNLIKELY(msacc->unmanaged))
+ erts_mtx_unlock(&msacc->mtx);
+}
+
+ERTS_MSACC_INLINE
+void erts_msacc_set_state_m__(ErtsMsAcc *msacc, Uint new_state, int increment) {
+ ErtsSysPerfCounter prev_perf_counter;
+ Sint64 diff;
+
+ if (new_state == msacc->state)
+ return;
+
+ prev_perf_counter = msacc->perf_counter;
+ msacc->perf_counter = erts_sys_perf_counter();
+ diff = msacc->perf_counter - prev_perf_counter;
+ ASSERT(diff >= 0);
+ msacc->perf_counters[msacc->state] += diff;
+#ifdef ERTS_MSACC_STATE_COUNTERS
+ msacc->state_counters[new_state] += increment;
+#endif
+ msacc->state = new_state;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#else
+
+#define ERTS_MSACC_IS_ENABLED() 0
+#define erts_msacc_early_init()
+#define erts_msacc_init()
+#define erts_msacc_init_thread(type, id, liberty)
+#define ERTS_MSACC_PUSH_STATE()
+#define ERTS_MSACC_PUSH_STATE_CACHED()
+#define ERTS_MSACC_POP_STATE()
+#define ERTS_MSACC_SET_STATE(state)
+#define ERTS_MSACC_SET_STATE_CACHED(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED(state)
+#define ERTS_MSACC_UPDATE_CACHE()
+#define ERTS_MSACC_IS_ENABLED_CACHED()
+#define ERTS_MSACC_DECLARE_CACHE()
+#define ERTS_MSACC_PUSH_STATE_M()
+#define ERTS_MSACC_PUSH_STATE_CACHED_M()
+#define ERTS_MSACC_SET_STATE_CACHED_M(state)
+#define ERTS_MSACC_POP_STATE_M()
+#define ERTS_MSACC_PUSH_AND_SET_STATE_M(state)
+
+
+#endif /* ERTS_ENABLE_MSACC */
+
+#ifndef ERTS_MSACC_EXTENDED_STATES
+
+#define ERTS_MSACC_PUSH_STATE_X()
+#define ERTS_MSACC_POP_STATE_X()
+#define ERTS_MSACC_SET_STATE_X(state)
+#define ERTS_MSACC_SET_STATE_M_X(state)
+#define ERTS_MSACC_SET_STATE_CACHED_X(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_X(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(state)
+#define ERTS_MSACC_UPDATE_CACHE_X()
+#define ERTS_MSACC_IS_ENABLED_CACHED_X() 0
+#define ERTS_MSACC_DECLARE_CACHE_X()
+#define ERTS_MSACC_PUSH_STATE_M_X()
+#define ERTS_MSACC_PUSH_STATE_CACHED_M_X()
+#define ERTS_MSACC_SET_STATE_CACHED_M_X(state)
+#define ERTS_MSACC_POP_STATE_M_X()
+#define ERTS_MSACC_PUSH_AND_SET_STATE_M_X(state)
+
+#else
+
+#define ERTS_MSACC_PUSH_STATE_X() ERTS_MSACC_PUSH_STATE()
+#define ERTS_MSACC_POP_STATE_X() ERTS_MSACC_POP_STATE()
+#define ERTS_MSACC_SET_STATE_X(state) ERTS_MSACC_SET_STATE(state)
+#define ERTS_MSACC_SET_STATE_M_X(state) ERTS_MSACC_SET_STATE_M(state)
+#define ERTS_MSACC_SET_STATE_CACHED_X(state) ERTS_MSACC_SET_STATE_CACHED(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_X(state) ERTS_MSACC_PUSH_AND_SET_STATE(state)
+#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(state) ERTS_MSACC_PUSH_AND_SET_STATE_CACHED(state)
+#define ERTS_MSACC_UPDATE_CACHE_X() ERTS_MSACC_UPDATE_CACHE()
+#define ERTS_MSACC_IS_ENABLED_CACHED_X() ERTS_MSACC_IS_ENABLED_CACHED()
+#define ERTS_MSACC_DECLARE_CACHE_X() ERTS_MSACC_DECLARE_CACHE()
+#define ERTS_MSACC_PUSH_STATE_M_X() ERTS_MSACC_PUSH_STATE_M()
+#define ERTS_MSACC_PUSH_STATE_CACHED_M_X() ERTS_MSACC_PUSH_STATE_CACHED_M()
+#define ERTS_MSACC_SET_STATE_CACHED_M_X(state) ERTS_MSACC_SET_STATE_CACHED_M(state)
+#define ERTS_MSACC_POP_STATE_M_X() ERTS_MSACC_POP_STATE_M()
+#define ERTS_MSACC_PUSH_AND_SET_STATE_M_X(state) ERTS_MSACC_PUSH_AND_SET_STATE_M(state)
+
+#endif /* !ERTS_MSACC_EXTENDED_STATES */
+
+#endif /* ERL_MSACC_H__ */
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index d659788f30..b057ec7770 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1188,6 +1188,26 @@ void enif_thread_exit(void *resp) { erl_drv_thread_exit(resp); }
int enif_thread_join(ErlNifTid tid, void **respp) { return erl_drv_thread_join(tid,respp); }
int enif_getenv(const char *key, char *value, size_t *value_size) { return erl_drv_getenv(key, value, value_size); }
+ErlNifTime enif_monotonic_time(ErlNifTimeUnit time_unit)
+{
+ return (ErlNifTime) erts_napi_monotonic_time((int) time_unit);
+}
+
+ErlNifTime enif_time_offset(ErlNifTimeUnit time_unit)
+{
+ return (ErlNifTime) erts_napi_time_offset((int) time_unit);
+}
+
+ErlNifTime
+enif_convert_time_unit(ErlNifTime val,
+ ErlNifTimeUnit from,
+ ErlNifTimeUnit to)
+{
+ return (ErlNifTime) erts_napi_convert_time_unit((ErtsMonotonicTime) val,
+ (int) from,
+ (int) to);
+}
+
int enif_fprintf(void* filep, const char* format, ...)
{
int ret;
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 40c2ad6f08..c9c8b1d0af 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -49,9 +49,10 @@
** add ErlNifFunc flags
** 2.8: 18.0 add enif_has_pending_exception
** 2.9: 18.2 enif_getenv
+** 2.10: Time API
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 9
+#define ERL_NIF_MINOR_VERSION 10
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -67,54 +68,31 @@
#include <stdlib.h>
-#ifdef SIZEOF_CHAR
-# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
-# undef SIZEOF_CHAR
-#endif
-#ifdef SIZEOF_SHORT
-# define SIZEOF_SHORT_SAVED__ SIZEOF_SHORT
-# undef SIZEOF_SHORT
-#endif
-#ifdef SIZEOF_INT
-# define SIZEOF_INT_SAVED__ SIZEOF_INT
-# undef SIZEOF_INT
-#endif
-#ifdef SIZEOF_LONG
-# define SIZEOF_LONG_SAVED__ SIZEOF_LONG
-# undef SIZEOF_LONG
-#endif
-#ifdef SIZEOF_LONG_LONG
-# define SIZEOF_LONG_LONG_SAVED__ SIZEOF_LONG_LONG
-# undef SIZEOF_LONG_LONG
-#endif
-#include "erl_int_sizes_config.h"
-
#ifdef __cplusplus
extern "C" {
#endif
-#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
-typedef unsigned __int64 ErlNifUInt64;
-typedef __int64 ErlNifSInt64;
-#elif SIZEOF_LONG == 8
-typedef unsigned long ErlNifUInt64;
-typedef long ErlNifSInt64;
-#elif SIZEOF_LONG_LONG == 8
-typedef unsigned long long ErlNifUInt64;
-typedef long long ErlNifSInt64;
-#else
-#error No 64-bit integer type
-#endif
+typedef ErlNapiUInt64 ErlNifUInt64;
+typedef ErlNapiSInt64 ErlNifSInt64;
+typedef ErlNapiUInt ErlNifUInt;
+typedef ErlNapiSInt ErlNifSInt;
-#define ERL_NIF_VM_VARIANT "beam.vanilla"
-#if SIZEOF_LONG == SIZEOF_VOID_P
-typedef unsigned long ERL_NIF_TERM;
-#elif SIZEOF_LONG_LONG == SIZEOF_VOID_P
-typedef unsigned long long ERL_NIF_TERM;
-#endif
+# define ERL_NIF_VM_VARIANT "beam.vanilla"
+typedef ErlNifUInt ERL_NIF_TERM;
typedef ERL_NIF_TERM ERL_NIF_UINT;
+typedef ErlNifSInt64 ErlNifTime;
+
+#define ERL_NIF_TIME_ERROR ((ErlNifSInt64) ERTS_NAPI_TIME_ERROR__)
+
+typedef enum {
+ ERL_NIF_SEC = ERTS_NAPI_SEC__,
+ ERL_NIF_MSEC = ERTS_NAPI_MSEC__,
+ ERL_NIF_USEC = ERTS_NAPI_USEC__,
+ ERL_NIF_NSEC = ERTS_NAPI_NSEC__
+} ErlNifTimeUnit;
+
struct enif_environment_t;
typedef struct enif_environment_t ErlNifEnv;
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 08b9afc6af..1448a508a2 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -160,6 +160,9 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int
ERL_NIF_API_FUNC_DECL(int, enif_has_pending_exception, (ErlNifEnv *env, ERL_NIF_TERM* reason));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_raise_exception, (ErlNifEnv *env, ERL_NIF_TERM reason));
ERL_NIF_API_FUNC_DECL(int,enif_getenv,(const char* key, char* value, size_t* value_size));
+ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_monotonic_time, (ErlNifTimeUnit));
+ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_time_offset, (ErlNifTimeUnit));
+ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_convert_time_unit, (ErlNifTime, ErlNifTimeUnit, ErlNifTimeUnit));
/*
** ADD NEW ENTRIES HERE (before this comment) !!!
@@ -312,6 +315,9 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
# define enif_has_pending_exception ERL_NIF_API_FUNC_MACRO(enif_has_pending_exception)
# define enif_raise_exception ERL_NIF_API_FUNC_MACRO(enif_raise_exception)
# define enif_getenv ERL_NIF_API_FUNC_MACRO(enif_getenv)
+# define enif_monotonic_time ERL_NIF_API_FUNC_MACRO(enif_monotonic_time)
+# define enif_time_offset ERL_NIF_API_FUNC_MACRO(enif_time_offset)
+# define enif_convert_time_unit ERL_NIF_API_FUNC_MACRO(enif_convert_time_unit)
/*
** ADD NEW ENTRIES HERE (before this comment)
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 2c09834d19..197b328fe2 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1648,6 +1648,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
int active;
Uint64 start_time = 0;
ErtsSchedulerData *esdp = runq->scheduler;
+ ERTS_MSACC_PUSH_STATE_M();
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
@@ -1690,6 +1691,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
state = erts_atomic32_read_nob(&pp->state);
pp->reds = ERTS_PORT_REDS_EXECUTE;
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
goto begin_handle_tasks;
while (1) {
@@ -1822,6 +1824,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
}
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
if (io_tasks_executed) {
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index c386d63a9e..8e1ebe795c 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -148,14 +148,14 @@ extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-int erts_default_spo_flags = 0;
-int erts_eager_check_io = 1;
-int erts_sched_compact_load;
-int erts_sched_balance_util = 0;
-Uint erts_no_schedulers;
+int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = 0;
+int ERTS_WRITE_UNLIKELY(erts_eager_check_io) = 1;
+int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
+int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0;
+Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers);
#ifdef ERTS_DIRTY_SCHEDULERS
-Uint erts_no_dirty_cpu_schedulers;
-Uint erts_no_dirty_io_schedulers;
+Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_cpu_schedulers);
+Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_io_schedulers);
#endif
static char *erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_NO_FLAGS] = {0};
@@ -295,7 +295,7 @@ do { \
erts_sched_stat_t erts_sched_stat;
#ifdef USE_THREADS
-static erts_tsd_key_t sched_data_key;
+static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
#endif
static erts_smp_atomic32_t function_calls;
@@ -335,15 +335,16 @@ static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
static Uint last_reductions;
static Uint last_exact_reductions;
-Eterm erts_system_monitor;
-Eterm erts_system_monitor_long_gc;
-Uint erts_system_monitor_long_schedule;
-Eterm erts_system_monitor_large_heap;
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor);
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor_long_gc);
+Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_long_schedule);
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor_large_heap);
struct erts_system_monitor_flags_t erts_system_monitor_flags;
/* system performance monitor */
Eterm erts_system_profile;
struct erts_system_profile_flags_t erts_system_profile_flags;
+int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
#if ERTS_MAX_PROCESSES > 0x7fffffff
#error "Need to store process_count in another type"
@@ -940,6 +941,12 @@ sched_wall_time_change(ErtsSchedulerData *esdp, int working)
}
}
}
+ if (!working) {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_BUSY_WAIT);
+ } else {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_OTHER);
+ }
+
}
typedef struct {
@@ -1695,15 +1702,17 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
int need_thr_progress = 0;
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
int more_work = 0;
-
+ ERTS_MSACC_PUSH_STATE_M_X();
#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
#endif
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ALLOC);
erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
&need_thr_progress,
&wakeup,
&more_work);
+ ERTS_MSACC_POP_STATE_M_X();
if (more_work) {
if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD)
& ERTS_SSI_AUX_WORK_DD_THR_PRGR) {
@@ -2175,12 +2184,15 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
if (!(aux_work & ~ignore)) { \
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
+ ERTS_MSACC_UPDATE_CACHE(); \
+ ERTS_MSACC_POP_STATE_M(); \
return aux_work; \
} \
}
erts_aint32_t aux_work = orig_aux_work;
erts_aint32_t ignore = 0;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_AUX);
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
#ifdef ERTS_SMP
@@ -2271,6 +2283,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
haw_thr_prgr_current_check_progress(awdp);
#endif
+ ERTS_MSACC_UPDATE_CACHE();
+ ERTS_MSACC_POP_STATE_M();
return aux_work;
#undef HANDLE_AUX_WORK
@@ -2778,6 +2792,8 @@ aux_thread(void *unused)
ssi->event = erts_tse_fetch();
+ erts_msacc_init_thread("aux", 1, 1);
+
callbacks.arg = (void *) ssi;
callbacks.wakeup = thr_prgr_wakeup;
callbacks.prepare_wait = thr_prgr_prep_wait;
@@ -2788,6 +2804,7 @@ aux_thread(void *unused)
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
+
sched_prep_spin_wait(ssi);
while (1) {
@@ -2841,6 +2858,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#ifdef ERTS_SMP
int thr_prgr_active = 1;
erts_aint32_t flgs;
+#endif
+ ERTS_MSACC_PUSH_STATE_M();
+#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -2897,6 +2917,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
}
@@ -2958,7 +2979,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
- 1) + 1;
} else
timeout = -1;
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
res = erts_tse_twait(ssi->event, timeout);
+ ERTS_MSACC_POP_STATE_M();
current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
erts_get_monotonic_time(esdp);
} while (res == EINTR);
@@ -3027,9 +3050,13 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (working)
sched_wall_time_change(esdp, working = 0);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
+
ASSERT(!erts_port_task_have_outstanding_io_tasks());
erl_sys_schedule(1); /* Might give us something to do */
+ ERTS_MSACC_POP_STATE_M();
+
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
@@ -3050,6 +3077,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_thr_progress_active(esdp, thr_prgr_active = 1);
#endif
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
#ifdef ERTS_SMP
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
@@ -3147,8 +3175,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
+
erl_sys_schedule(0);
+ ERTS_MSACC_POP_STATE_M();
+
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
ErtsMonotonicTime current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
@@ -7925,6 +7957,8 @@ sched_thread_func(void *vesdp)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
+ erts_msacc_init_thread("scheduler", no, 1);
+
erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
erts_alloc_register_scheduler(vesdp);
#endif
@@ -9203,6 +9237,8 @@ Process *schedule(Process *p, int calls)
Uint32 flags;
erts_aint32_t state = 0; /* Supress warning... */
+ ERTS_MSACC_DECLARE_CACHE();
+
#ifdef USE_VM_PROBES
if (p != NULL && DTRACE_ENABLED(process_unscheduled)) {
DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
@@ -9307,6 +9343,8 @@ Process *schedule(Process *p, int calls)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+
if (state & ERTS_PSFLG_FREE) {
#ifdef ERTS_SMP
ASSERT(esdp->free_process == p);
@@ -9467,7 +9505,7 @@ Process *schedule(Process *p, int calls)
scheduler_wait(&fcalls, esdp, rq);
flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
flags |= ERTS_RUNQ_FLG_EXEC;
-
+ ERTS_MSACC_UPDATE_CACHE();
#ifdef ERTS_SMP
non_empty_runq(rq);
#endif
@@ -9482,6 +9520,8 @@ Process *schedule(Process *p, int calls)
* Schedule system-level activities.
*/
+ ERTS_MSACC_PUSH_STATE_CACHED_M();
+
erts_smp_atomic32_set_relb(&function_calls, 0);
fcalls = 0;
@@ -9489,7 +9529,9 @@ Process *schedule(Process *p, int calls)
erts_sys_schedule_interrupt(0);
#endif
erts_smp_runq_unlock(rq);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
erl_sys_schedule(1);
+ ERTS_MSACC_POP_STATE_M();
current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
@@ -9567,9 +9609,12 @@ Process *schedule(Process *p, int calls)
case 0: /* No process at all */
default:
ASSERT(qmask == 0);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
goto check_activities_to_run;
}
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR);
+
BM_START_TIMER(system);
/*
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index b50fa5a10f..59e63780f0 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -60,6 +60,9 @@ typedef struct process Process;
#include "erl_mseg.h"
#include "erl_async.h"
#include "erl_gc.h"
+#define ERTS_ONLY_INCLUDE_TRACE_FLAGS
+#include "erl_trace.h"
+#undef ERTS_ONLY_INCLUDE_TRACE_FLAGS
#ifdef HIPE
#include "hipe_process.h"
@@ -1284,6 +1287,7 @@ struct erts_system_profile_flags_t {
unsigned int exclusive : 1;
};
extern struct erts_system_profile_flags_t erts_system_profile_flags;
+extern int erts_system_profile_ts_type;
/* process flags */
#define F_HIBERNATE_SCHED (1 << 0) /* Schedule out after hibernate op */
@@ -1315,61 +1319,90 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
* not be scheduled out while F_DELAY_GC is set.
*/
+#define ERTS_TRACE_FLAGS_TS_TYPE_SHIFT 0
+
+#define F_TRACE_FLAG(N) (1 << (ERTS_TRACE_TS_TYPE_BITS + (N)))
+
/* process trace_flags */
-#define F_SENSITIVE (1 << 0)
-#define F_TRACE_SEND (1 << 1)
-#define F_TRACE_RECEIVE (1 << 2)
-#define F_TRACE_SOS (1 << 3) /* Set on spawn */
-#define F_TRACE_SOS1 (1 << 4) /* Set on first spawn */
-#define F_TRACE_SOL (1 << 5) /* Set on link */
-#define F_TRACE_SOL1 (1 << 6) /* Set on first link */
-#define F_TRACE_CALLS (1 << 7)
-#define F_TIMESTAMP (1 << 8)
-#define F_TRACE_PROCS (1 << 9)
-#define F_TRACE_FIRST_CHILD (1 << 10)
-#define F_TRACE_SCHED (1 << 11)
-#define F_TRACE_GC (1 << 12)
-#define F_TRACE_ARITY_ONLY (1 << 13)
-#define F_TRACE_RETURN_TO (1 << 14) /* Return_to trace when breakpoint tracing */
-#define F_TRACE_SILENT (1 << 15) /* No call trace msg suppress */
-#define F_TRACER (1 << 16) /* May be (has been) tracer */
-#define F_EXCEPTION_TRACE (1 << 17) /* May have exception trace on stack */
+
+#define F_NOW_TS (ERTS_TRACE_FLG_NOW_TIMESTAMP \
+ << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define F_STRICT_MON_TS (ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP \
+ << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define F_MON_TS (ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP \
+ << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define F_SENSITIVE F_TRACE_FLAG(0)
+#define F_TRACE_SEND F_TRACE_FLAG(1)
+#define F_TRACE_RECEIVE F_TRACE_FLAG(2)
+#define F_TRACE_SOS F_TRACE_FLAG(3) /* Set on spawn */
+#define F_TRACE_SOS1 F_TRACE_FLAG(4) /* Set on first spawn */
+#define F_TRACE_SOL F_TRACE_FLAG(5) /* Set on link */
+#define F_TRACE_SOL1 F_TRACE_FLAG(6) /* Set on first link */
+#define F_TRACE_CALLS F_TRACE_FLAG(7)
+#define F_TRACE_PROCS F_TRACE_FLAG(8)
+#define F_TRACE_FIRST_CHILD F_TRACE_FLAG(9)
+#define F_TRACE_SCHED F_TRACE_FLAG(10)
+#define F_TRACE_GC F_TRACE_FLAG(11)
+#define F_TRACE_ARITY_ONLY F_TRACE_FLAG(12)
+#define F_TRACE_RETURN_TO F_TRACE_FLAG(13) /* Return_to trace when breakpoint tracing */
+#define F_TRACE_SILENT F_TRACE_FLAG(14) /* No call trace msg suppress */
+#define F_TRACER F_TRACE_FLAG(15) /* May be (has been) tracer */
+#define F_EXCEPTION_TRACE F_TRACE_FLAG(16) /* May have exception trace on stack */
/* port trace flags, currently the same as process trace flags */
-#define F_TRACE_SCHED_PORTS (1 << 18) /* Trace of port scheduling */
-#define F_TRACE_SCHED_PROCS (1 << 19) /* With virtual scheduling */
-#define F_TRACE_PORTS (1 << 20) /* Ports equivalent to F_TRACE_PROCS */
-#define F_TRACE_SCHED_NO (1 << 21) /* Trace with scheduler id */
-#define F_TRACE_SCHED_EXIT (1 << 22)
+#define F_TRACE_SCHED_PORTS F_TRACE_FLAG(17) /* Trace of port scheduling */
+#define F_TRACE_SCHED_PROCS F_TRACE_FLAG(18) /* With virtual scheduling */
+#define F_TRACE_PORTS F_TRACE_FLAG(19) /* Ports equivalent to F_TRACE_PROCS */
+#define F_TRACE_SCHED_NO F_TRACE_FLAG(20) /* Trace with scheduler id */
+#define F_TRACE_SCHED_EXIT F_TRACE_FLAG(21)
-#define F_NUM_FLAGS 23
+#define F_NUM_FLAGS (ERTS_TRACE_TS_TYPE_BITS + 22)
#ifdef DEBUG
# define F_INITIAL_TRACE_FLAGS (5 << F_NUM_FLAGS)
#else
# define F_INITIAL_TRACE_FLAGS 0
#endif
+/* F_TIMESTAMP_MASK is a bit-field of all all timestamp types */
+#define F_TIMESTAMP_MASK \
+ (ERTS_TRACE_TS_TYPE_MASK << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
+
#define TRACEE_FLAGS ( F_TRACE_PROCS | F_TRACE_CALLS \
| F_TRACE_SOS | F_TRACE_SOS1| F_TRACE_RECEIVE \
| F_TRACE_SOL | F_TRACE_SOL1 | F_TRACE_SEND \
- | F_TRACE_SCHED | F_TIMESTAMP | F_TRACE_GC \
+ | F_TRACE_SCHED | F_TIMESTAMP_MASK | F_TRACE_GC \
| F_TRACE_ARITY_ONLY | F_TRACE_RETURN_TO \
| F_TRACE_SILENT | F_TRACE_SCHED_PROCS | F_TRACE_PORTS \
| F_TRACE_SCHED_PORTS | F_TRACE_SCHED_NO \
| F_TRACE_SCHED_EXIT)
#define ERTS_TRACEE_MODIFIER_FLAGS \
- (F_TRACE_SILENT | F_TIMESTAMP | F_TRACE_SCHED_NO)
+ (F_TRACE_SILENT | F_TIMESTAMP_MASK | F_TRACE_SCHED_NO)
#define ERTS_PORT_TRACEE_FLAGS \
(ERTS_TRACEE_MODIFIER_FLAGS | F_TRACE_PORTS | F_TRACE_SCHED_PORTS)
#define ERTS_PROC_TRACEE_FLAGS \
((TRACEE_FLAGS & ~ERTS_PORT_TRACEE_FLAGS) | ERTS_TRACEE_MODIFIER_FLAGS)
+#define SEQ_TRACE_FLAG(N) (1 << (ERTS_TRACE_TS_TYPE_BITS + (N)))
+
/* Sequential trace flags */
+
+/* SEQ_TRACE_TIMESTAMP_MASK is a bit-field */
+#define SEQ_TRACE_TIMESTAMP_MASK \
+ (ERTS_TRACE_TS_TYPE_MASK << ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT)
+
#define SEQ_TRACE_SEND (1 << 0)
#define SEQ_TRACE_RECEIVE (1 << 1)
#define SEQ_TRACE_PRINT (1 << 2)
-#define SEQ_TRACE_TIMESTAMP (1 << 3)
+
+#define ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT 3
+
+#define SEQ_TRACE_NOW_TS (ERTS_TRACE_FLG_NOW_TIMESTAMP \
+ << ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define SEQ_TRACE_STRICT_MON_TS (ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP \
+ << ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT)
+#define SEQ_TRACE_MON_TS (ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP \
+ << ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT)
#ifdef USE_VM_PROBES
#define DT_UTAG_PERMANENT (1 << 0)
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 34f91e2ec8..e689547d1d 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -2056,6 +2056,8 @@ erts_atomic64_read_dirty(erts_atomic64_t *var)
#endif /* !USE_THREADS */
+#include "erl_msacc.h"
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
@@ -2414,6 +2416,7 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
{
#ifdef USE_THREADS
int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
@@ -2432,6 +2435,7 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
#endif
if (res != 0 && res != EINTR)
erts_thr_fatal_error(res, "wait on condition variable");
+ ERTS_MSACC_POP_STATE();
#endif
}
@@ -3488,7 +3492,11 @@ ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
{
#ifdef USE_THREADS
- return ethr_event_wait(&((ethr_ts_event *) ep)->event);
+ int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
+ res = ethr_event_wait(&((ethr_ts_event *) ep)->event);
+ ERTS_MSACC_POP_STATE();
+ return res;
#else
return ENOTSUP;
#endif
@@ -3497,7 +3505,11 @@ ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
{
#ifdef USE_THREADS
- return ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
+ int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
+ res = ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
+ ERTS_MSACC_POP_STATE();
+ return res;
#else
return ENOTSUP;
#endif
@@ -3506,8 +3518,12 @@ ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo)
{
#ifdef USE_THREADS
- return ethr_event_twait(&((ethr_ts_event *) ep)->event,
- (ethr_sint64_t) tmo);
+ int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
+ res = ethr_event_twait(&((ethr_ts_event *) ep)->event,
+ (ethr_sint64_t) tmo);
+ ERTS_MSACC_POP_STATE();
+ return res;
#else
return ENOTSUP;
#endif
@@ -3516,9 +3532,13 @@ ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo)
ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo)
{
#ifdef USE_THREADS
- return ethr_event_stwait(&((ethr_ts_event *) ep)->event,
- spincount,
- (ethr_sint64_t) tmo);
+ int res;
+ ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
+ res = ethr_event_stwait(&((ethr_ts_event *) ep)->event,
+ spincount,
+ (ethr_sint64_t) tmo);
+ ERTS_MSACC_POP_STATE();
+ return res;
#else
return ENOTSUP;
#endif
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index 43e543e035..5242063550 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -133,11 +133,17 @@ typedef struct {
extern ErtsTimeSupData erts_time_sup__;
+ErtsMonotonicTime erts_napi_monotonic_time(int time_unit);
+ErtsMonotonicTime erts_napi_time_offset(int time_unit);
+ErtsMonotonicTime erts_napi_convert_time_unit(ErtsMonotonicTime val, int from, int to);
+
ERTS_GLB_INLINE Uint64
erts_time_unit_conversion(Uint64 value,
Uint32 from_time_unit,
Uint32 to_time_unit);
+ErtsSysPerfCounter erts_perf_counter_unit(void);
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE Uint64
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 7ec64506e8..98159fdf72 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -33,6 +33,8 @@
#include "global.h"
#define ERTS_WANT_TIMER_WHEEL_API
#include "erl_time.h"
+#include "erl_driver.h"
+#include "erl_nif.h"
static erts_smp_mtx_t erts_timeofday_mtx;
static erts_smp_mtx_t erts_get_time_mtx;
@@ -57,6 +59,7 @@ static int time_sup_initialized = 0;
#define ERTS_MONOTONIC_TIME_TERA \
(ERTS_MONOTONIC_TIME_GIGA*ERTS_MONOTONIC_TIME_KILO)
+static void init_time_napi(void);
static void
schedule_send_time_offset_changed_notifications(ErtsMonotonicTime new_offset);
@@ -948,6 +951,8 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode)
ErtsMonotonicTime abs_native_offset, native_offset;
#endif
+ init_time_napi();
+
erts_hl_timer_init();
ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX);
@@ -1744,7 +1749,40 @@ erts_get_monotonic_time(ErtsSchedulerData *esdp)
{
ErtsMonotonicTime mtime = time_sup.r.o.get_time();
update_last_mtime(esdp, mtime);
- return mtime;
+ return mtime;
+}
+
+ErtsMonotonicTime
+erts_get_time_offset(void)
+{
+ return get_time_offset();
+}
+
+static ERTS_INLINE void
+make_timestamp_value(Uint* megasec, Uint* sec, Uint* microsec,
+ ErtsMonotonicTime mtime, ErtsMonotonicTime offset)
+{
+ ErtsMonotonicTime stime, as;
+ Uint ms;
+
+ stime = ERTS_MONOTONIC_TO_USEC(mtime + offset);
+
+ as = stime / ERTS_MONOTONIC_TIME_MEGA;
+ *megasec = ms = (Uint) (stime / ERTS_MONOTONIC_TIME_TERA);
+ *sec = (Uint) (as - (((ErtsMonotonicTime) ms)
+ * ERTS_MONOTONIC_TIME_MEGA));
+ *microsec = (Uint) (stime - as*ERTS_MONOTONIC_TIME_MEGA);
+
+ ASSERT(((ErtsMonotonicTime) ms)*ERTS_MONOTONIC_TIME_TERA
+ + ((ErtsMonotonicTime) *sec)*ERTS_MONOTONIC_TIME_MEGA
+ + *microsec == stime);
+}
+
+void
+erts_make_timestamp_value(Uint* megasec, Uint* sec, Uint* microsec,
+ ErtsMonotonicTime mtime, ErtsMonotonicTime offset)
+{
+ make_timestamp_value(megasec, sec, microsec, mtime, offset);
}
void
@@ -2165,6 +2203,146 @@ time_unit_conversion(Process *c_p, Eterm term, ErtsMonotonicTime val, ErtsMonoto
return ret;
}
+
+/*
+ * Time Native API (drivers and NIFs)
+ */
+
+#define ERTS_NAPI_TIME_ERROR ((ErtsMonotonicTime) ERTS_NAPI_TIME_ERROR__)
+
+static void
+init_time_napi(void)
+{
+ /* Verify that time native api constants are as expected... */
+
+ ASSERT(sizeof(ErtsMonotonicTime) == sizeof(ErlDrvTime));
+ ASSERT(ERL_DRV_TIME_ERROR == (ErlDrvTime) ERTS_NAPI_TIME_ERROR);
+ ASSERT(ERL_DRV_TIME_ERROR < (ErlDrvTime) 0);
+ ASSERT(ERTS_NAPI_SEC__ == (int) ERL_DRV_SEC);
+ ASSERT(ERTS_NAPI_MSEC__ == (int) ERL_DRV_MSEC);
+ ASSERT(ERTS_NAPI_USEC__ == (int) ERL_DRV_USEC);
+ ASSERT(ERTS_NAPI_NSEC__ == (int) ERL_DRV_NSEC);
+
+ ASSERT(sizeof(ErtsMonotonicTime) == sizeof(ErlNifTime));
+ ASSERT(ERL_NIF_TIME_ERROR == (ErlNifTime) ERTS_NAPI_TIME_ERROR);
+ ASSERT(ERL_NIF_TIME_ERROR < (ErlNifTime) 0);
+ ASSERT(ERTS_NAPI_SEC__ == (int) ERL_NIF_SEC);
+ ASSERT(ERTS_NAPI_MSEC__ == (int) ERL_NIF_MSEC);
+ ASSERT(ERTS_NAPI_USEC__ == (int) ERL_NIF_USEC);
+ ASSERT(ERTS_NAPI_NSEC__ == (int) ERL_NIF_NSEC);
+}
+
+ErtsMonotonicTime
+erts_napi_monotonic_time(int time_unit)
+{
+ ErtsSchedulerData *esdp;
+ ErtsMonotonicTime mtime;
+
+ /* At least for now only allow schedulers to do this... */
+ esdp = erts_get_scheduler_data();
+ if (!esdp)
+ return ERTS_NAPI_TIME_ERROR;
+
+ mtime = time_sup.r.o.get_time();
+ update_last_mtime(esdp, mtime);
+
+ switch (time_unit) {
+ case ERTS_NAPI_SEC__:
+ mtime = ERTS_MONOTONIC_TO_SEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_SEC;
+ break;
+ case ERTS_NAPI_MSEC__:
+ mtime = ERTS_MONOTONIC_TO_MSEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_MSEC;
+ break;
+ case ERTS_NAPI_USEC__:
+ mtime = ERTS_MONOTONIC_TO_USEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_USEC;
+ break;
+ case ERTS_NAPI_NSEC__:
+ mtime = ERTS_MONOTONIC_TO_NSEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_NSEC;
+ break;
+ default:
+ return ERTS_NAPI_TIME_ERROR;
+ }
+
+ return mtime;
+}
+
+ErtsMonotonicTime
+erts_napi_time_offset(int time_unit)
+{
+ ErtsSchedulerData *esdp;
+ ErtsSystemTime offs;
+
+ /* At least for now only allow schedulers to do this... */
+ esdp = erts_get_scheduler_data();
+ if (!esdp)
+ return ERTS_NAPI_TIME_ERROR;
+
+ offs = get_time_offset();
+ switch (time_unit) {
+ case ERTS_NAPI_SEC__:
+ offs = ERTS_MONOTONIC_TO_SEC(offs);
+ offs -= ERTS_MONOTONIC_OFFSET_SEC;
+ break;
+ case ERTS_NAPI_MSEC__:
+ offs = ERTS_MONOTONIC_TO_MSEC(offs);
+ offs -= ERTS_MONOTONIC_OFFSET_MSEC;
+ break;
+ case ERTS_NAPI_USEC__:
+ offs = ERTS_MONOTONIC_TO_USEC(offs);
+ offs -= ERTS_MONOTONIC_OFFSET_USEC;
+ break;
+ case ERTS_NAPI_NSEC__:
+ offs = ERTS_MONOTONIC_TO_NSEC(offs);
+ offs -= ERTS_MONOTONIC_OFFSET_NSEC;
+ break;
+ default:
+ return ERTS_NAPI_TIME_ERROR;
+ }
+ return offs;
+}
+
+ErtsMonotonicTime
+erts_napi_convert_time_unit(ErtsMonotonicTime val, int from, int to)
+{
+ ErtsMonotonicTime ffreq, tfreq, denom;
+ /*
+ * Convertion between time units using floor function.
+ *
+ * Note that this needs to work also for negative
+ * values. Ordinary integer division on a negative
+ * value will give ceiling...
+ */
+
+ switch ((int) from) {
+ case ERTS_NAPI_SEC__: ffreq = 1; break;
+ case ERTS_NAPI_MSEC__: ffreq = 1000; break;
+ case ERTS_NAPI_USEC__: ffreq = 1000*1000; break;
+ case ERTS_NAPI_NSEC__: ffreq = 1000*1000*1000; break;
+ default: return ERTS_NAPI_TIME_ERROR;
+ }
+
+ switch ((int) to) {
+ case ERTS_NAPI_SEC__: tfreq = 1; break;
+ case ERTS_NAPI_MSEC__: tfreq = 1000; break;
+ case ERTS_NAPI_USEC__: tfreq = 1000*1000; break;
+ case ERTS_NAPI_NSEC__: tfreq = 1000*1000*1000; break;
+ default: return ERTS_NAPI_TIME_ERROR;
+ }
+
+ if (tfreq >= ffreq)
+ return val * (tfreq / ffreq);
+
+ denom = ffreq / tfreq;
+ if (val >= 0)
+ return val / denom;
+
+ return (val - (denom - 1)) / denom;
+}
+
/* Built in functions */
BIF_RETTYPE monotonic_time_0(BIF_ALIST_0)
@@ -2221,22 +2399,14 @@ BIF_RETTYPE time_offset_1(BIF_ALIST_1)
BIF_RETTYPE timestamp_0(BIF_ALIST_0)
{
Eterm *hp, res;
- ErtsMonotonicTime stime, mtime, all_sec, offset;
+ ErtsMonotonicTime mtime, offset;
Uint mega_sec, sec, micro_sec;
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
- stime = ERTS_MONOTONIC_TO_USEC(mtime + offset);
- all_sec = stime / ERTS_MONOTONIC_TIME_MEGA;
- mega_sec = (Uint) (stime / ERTS_MONOTONIC_TIME_TERA);
- sec = (Uint) (all_sec - (((ErtsMonotonicTime) mega_sec)
- * ERTS_MONOTONIC_TIME_MEGA));
- micro_sec = (Uint) (stime - all_sec*ERTS_MONOTONIC_TIME_MEGA);
- ASSERT(((ErtsMonotonicTime) mega_sec)*ERTS_MONOTONIC_TIME_TERA
- + ((ErtsMonotonicTime) sec)*ERTS_MONOTONIC_TIME_MEGA
- + micro_sec == stime);
+ make_timestamp_value(&mega_sec, &sec, &micro_sec, mtime, offset);
/*
* Mega seconds is the only value that potentially
@@ -2265,9 +2435,19 @@ BIF_RETTYPE os_system_time_0(BIF_ALIST_0)
BIF_RET(make_time_val(BIF_P, stime));
}
-BIF_RETTYPE os_system_time_1(BIF_ALIST_0)
+BIF_RETTYPE os_system_time_1(BIF_ALIST_1)
{
ErtsSystemTime stime = erts_os_system_time();
BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, stime, 0));
}
+BIF_RETTYPE
+os_perf_counter_0(BIF_ALIST_0)
+{
+ BIF_RET(make_time_val(BIF_P, erts_sys_perf_counter()));
+}
+
+BIF_RETTYPE erts_internal_perf_counter_unit_0(BIF_ALIST_0)
+{
+ BIF_RET(make_time_val(BIF_P, erts_sys_perf_counter_unit()));
+}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index b774ba97af..25393369a7 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -38,6 +38,7 @@
#include "erl_binary.h"
#include "erl_bits.h"
#include "erl_thr_progress.h"
+#include "erl_bif_unique.h"
#if 0
#define DEBUG_PRINTOUTS
@@ -77,6 +78,264 @@ enum ErtsSysMsgType {
SYS_MSG_TYPE_SYSPROF
};
+#define ERTS_TRACE_TS_NOW_MAX_SIZE \
+ 4
+#define ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
+ ERTS_MAX_SINT64_HEAP_SIZE
+#define ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE \
+ (3 + ERTS_MAX_SINT64_HEAP_SIZE \
+ + ERTS_MAX_UINT64_HEAP_SIZE)
+
+#define ERTS_TRACE_PATCH_TS_MAX_SIZE \
+ (1 + ((ERTS_TRACE_TS_NOW_MAX_SIZE \
+ > ERTS_TRACE_TS_MONOTONIC_MAX_SIZE) \
+ ? ((ERTS_TRACE_TS_NOW_MAX_SIZE \
+ > ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
+ ? ERTS_TRACE_TS_NOW_MAX_SIZE \
+ : ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
+ : ((ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
+ > ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE) \
+ ? ERTS_TRACE_TS_MONOTONIC_MAX_SIZE \
+ : ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE)))
+
+#define TFLGS_TS_TYPE(p) ERTS_TFLGS2TSTYPE(ERTS_TRACE_FLAGS((p)))
+
+/*
+ * FUTURE CHANGES:
+ *
+ * The timestamp functionality has intentionally been
+ * split in two parts for future use even though it
+ * is not used like this today. take_timestamp() takes
+ * the timestamp and calculate heap need for it (which
+ * is not constant). write_timestamp() writes the
+ * timestamp to the allocated heap. That is, one typically
+ * want to take the timestamp before allocating the heap
+ * and then write it to the heap.
+ *
+ * The trace output functionality now use patch_ts_size(),
+ * write_ts(), and patch_ts(). write_ts() both takes the
+ * timestamp and writes it. Since we don't know the
+ * heap need when allocating the heap area we need to
+ * over allocate (maximum size from patch_ts_size()) and
+ * then potentially (often) shrink the heap area after the
+ * timestamp has been written. The only reason it is
+ * currently done this way is because we do not want to
+ * make major changes of the trace behavior in a patch.
+ * This is planned to be changed in next major release.
+ */
+
+typedef struct {
+ int ts_type_flag;
+ union {
+ struct {
+ Uint ms;
+ Uint s;
+ Uint us;
+ } now;
+ struct {
+ ErtsMonotonicTime time;
+ Sint64 raw_unique;
+ } monotonic;
+ } u;
+} ErtsTraceTimeStamp;
+
+static ERTS_INLINE Uint
+take_timestamp(ErtsTraceTimeStamp *tsp, int ts_type)
+{
+ int ts_type_flag = ts_type & -ts_type; /* least significant flag */
+
+ ASSERT(ts_type_flag == ERTS_TRACE_FLG_NOW_TIMESTAMP
+ || ts_type_flag == ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP
+ || ts_type_flag == ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP
+ || ts_type_flag == 0);
+
+ tsp->ts_type_flag = ts_type_flag;
+ switch (ts_type_flag) {
+ case 0:
+ return (Uint) 0;
+ case ERTS_TRACE_FLG_NOW_TIMESTAMP:
+#ifdef HAVE_ERTS_NOW_CPU
+ if (erts_cpu_timestamp)
+ erts_get_now_cpu(&tsp->u.now.ms, &tsp->u.now.s, &tsp->u.now.us);
+ else
+#endif
+ get_now(&tsp->u.now.ms, &tsp->u.now.s, &tsp->u.now.us);
+ return (Uint) 4;
+ case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
+ case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP: {
+ Uint hsz = 0;
+ ErtsMonotonicTime mtime = erts_get_monotonic_time(NULL);
+ mtime = ERTS_MONOTONIC_TO_NSEC(mtime);
+ mtime += ERTS_MONOTONIC_OFFSET_NSEC;
+ hsz = (IS_SSMALL(mtime) ?
+ (Uint) 0
+ : ERTS_SINT64_HEAP_SIZE((Sint64) mtime));
+ tsp->u.monotonic.time = mtime;
+ if (ts_type_flag == ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP) {
+ Sint64 raw_unique;
+ hsz += 3; /* 2-tuple */
+ raw_unique = erts_raw_get_unique_monotonic_integer();
+ tsp->u.monotonic.raw_unique = raw_unique;
+ hsz += erts_raw_unique_monotonic_integer_heap_size(raw_unique);
+ }
+ return hsz;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("invalid timestamp type");
+ return 0;
+ }
+}
+
+static ERTS_INLINE Eterm
+write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp)
+{
+ int ts_type_flag = tsp->ts_type_flag;
+ Eterm res;
+
+ switch (ts_type_flag) {
+ case 0:
+ return NIL;
+ case ERTS_TRACE_FLG_NOW_TIMESTAMP:
+ res = TUPLE3(*hpp,
+ make_small(tsp->u.now.ms),
+ make_small(tsp->u.now.s),
+ make_small(tsp->u.now.us));
+ *hpp += 4;
+ return res;
+ case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
+ case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP: {
+ Sint64 mtime, raw;
+ Eterm unique, emtime;
+
+ mtime = (Sint64) tsp->u.monotonic.time;
+ emtime = (IS_SSMALL(mtime)
+ ? make_small((Sint64) mtime)
+ : erts_sint64_to_big((Sint64) mtime, hpp));
+
+ if (ts_type_flag == ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP)
+ return emtime;
+
+ raw = tsp->u.monotonic.raw_unique;
+ unique = erts_raw_make_unique_monotonic_integer_value(hpp,
+ raw);
+ res = TUPLE2(*hpp, emtime, unique);
+ *hpp += 3;
+ return res;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("invalid timestamp type");
+ return THE_NON_VALUE;
+ }
+}
+
+#define PATCH_TS_SIZE(p) patch_ts_size(TFLGS_TS_TYPE(p))
+
+static ERTS_INLINE Uint
+patch_ts_size(int ts_type)
+{
+ int ts_type_flag = ts_type & -ts_type; /* least significant flag */
+ switch (ts_type_flag) {
+ case 0:
+ return 0;
+ case ERTS_TRACE_FLG_NOW_TIMESTAMP:
+ return 1 + ERTS_TRACE_TS_NOW_MAX_SIZE;
+ case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
+ return 1 + ERTS_TRACE_TS_MONOTONIC_MAX_SIZE;
+ case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP:
+ return 1 + ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE;
+ default:
+ ERTS_INTERNAL_ERROR("invalid timestamp type");
+ return 0;
+ }
+}
+
+/*
+ * Write a timestamp. The timestamp MUST be the last
+ * thing built on the heap. This since write_ts() might
+ * adjust the size of the used area.
+ */
+static Eterm
+write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer)
+{
+ ErtsTraceTimeStamp ts;
+ Sint shrink;
+ Eterm res, *ts_hp = hp;
+ Uint hsz;
+
+ ASSERT(ts_type);
+
+ hsz = take_timestamp(&ts, ts_type);
+
+ res = write_timestamp(&ts, &ts_hp);
+
+ ASSERT(ts_hp == hp + hsz);
+
+ switch (ts.ts_type_flag) {
+ case ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP:
+ shrink = ERTS_TRACE_TS_MONOTONIC_MAX_SIZE;
+ break;
+ case ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP:
+ shrink = ERTS_TRACE_TS_STRICT_MONOTONIC_MAX_SIZE;
+ break;
+ default:
+ return res;
+ }
+
+ shrink -= hsz;
+
+ ASSERT(shrink >= 0);
+
+ if (shrink) {
+ if (bp)
+ bp->used_size -= shrink;
+#ifndef ERTS_SMP
+ else if (tracer) {
+ Eterm *endp = ts_hp + shrink;
+ HRelease(tracer, endp, ts_hp);
+ }
+#endif
+ }
+
+ return res;
+}
+
+/*
+ * Patch a timestamp into a tuple. The tuple MUST be the last thing
+ * built on the heap before the call, and the timestamp MUST be
+ * the last thing after the call. This since patch_ts() might adjust
+ * the size of the used area.
+ */
+
+#define PATCH_TS__(Type, Tuple, Hp, Bp, Tracer) \
+ do { \
+ int ts_type__ = (Type); \
+ if (ts_type__) \
+ patch_ts(ts_type__, (Tuple), (Hp), (Bp), (Tracer)); \
+ } while (0)
+
+#ifdef ERTS_SMP
+#define PATCH_TS(Type, Tuple, Hp, Bp, Tracer) \
+ PATCH_TS__((Type), (Tuple), (Hp), (Bp), NULL)
+#else
+#define PATCH_TS(Type, Tuple, Hp, Bp, Tracer) \
+ PATCH_TS__((Type), (Tuple), (Hp), (Bp), (Tracer))
+#endif
+
+static ERTS_INLINE void
+patch_ts(int ts_type, Eterm tuple, Eterm* hp, ErlHeapFragment *bp, Process *tracer)
+{
+ Eterm *tptr = tuple_val(tuple);
+ int arity = arityval(*tptr);
+
+ ASSERT(ts_type);
+ ASSERT((tptr+arity+1) == hp);
+
+ tptr[0] = make_arityval(arity+1);
+ tptr[1] = am_trace_ts;
+
+ *hp = write_ts(ts_type, hp+1, bp, tracer);
+}
+
#ifdef ERTS_SMP
static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
Eterm from,
@@ -364,23 +623,6 @@ erts_get_system_profile(void) {
return profile;
}
-
-#ifdef HAVE_ERTS_NOW_CPU
-# define GET_NOW(m, s, u) \
-do { \
- if (erts_cpu_timestamp) \
- erts_get_now_cpu(m, s, u); \
- else \
- get_now(m, s, u); \
-} while (0)
-#else
-# define GET_NOW(m, s, u) do {get_now(m, s, u);} while (0)
-#endif
-
-
-
-static Eterm* patch_ts(Eterm tuple4, Eterm* hp);
-
#ifdef ERTS_SMP
static void
do_send_to_port(Eterm to,
@@ -435,11 +677,11 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
/* Send {trace_ts, Pid, out, 0, Timestamp}
* followed by {trace_ts, Pid, in, 0, NewTimestamp}
*
- * 'NewTimestamp' is fetched from GET_NOW() through patch_ts().
+ * 'NewTimestamp' through patch_ts().
*/
static void
-do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
-#define LOCAL_HEAP_SIZE (4+5+5)
+do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp, int ts_type) {
+#define LOCAL_HEAP_SIZE (5+5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
Eterm message;
Eterm *hp;
@@ -461,9 +703,11 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
SYS_MSG_TYPE_UNDEFINED,
message);
- message = TUPLE4(hp, am_trace_ts, pid, am_in, mfarity);
- hp += 5;
- hp = patch_ts(message, hp);
+
+ message = TUPLE5(hp, am_trace_ts, pid, am_in, mfarity,
+ NIL /* Will be overwritten by timestamp */);
+ hp += 6;
+ hp[-1] = write_ts(ts_type, hp, NULL, NULL);
do_send_to_port(trace_port->common.id,
trace_port,
@@ -480,7 +724,7 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
* It is assumed that 'message' is not an 'out' message.
*
* 'c_p' is the currently executing process, "tracee" is the traced process
- * which 'message' concerns => if (*tracee_flags & F_TIMESTAMP),
+ * which 'message' concerns => if (*tracee_flags & F_TIMESTAMP_MASK),
* 'message' must contain a timestamp.
*/
static void
@@ -488,8 +732,9 @@ send_to_port(Process *c_p, Eterm message,
Eterm *tracer_pid, Uint *tracee_flags) {
Port* trace_port;
#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4)
- Eterm ts, *hp;
+ int ts_type;
+#define LOCAL_HEAP_SIZE ERTS_TRACE_PATCH_TS_MAX_SIZE
+ Eterm ts;
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
#endif
@@ -518,7 +763,7 @@ send_to_port(Process *c_p, Eterm message,
*/
if ( c_p == NULL ||
- (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP))) {
+ (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP_MASK))) {
#endif
do_send_to_port(*tracer_pid,
trace_port,
@@ -537,22 +782,12 @@ send_to_port(Process *c_p, Eterm message,
*/
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- if (*tracee_flags & F_TIMESTAMP) {
- ASSERT(is_tuple(message));
- hp = tuple_val(message);
- ts = hp[arityval(hp[0])];
- } else {
- /* A fake schedule might be needed,
- * but this message does not contain a timestamp.
- * Create a dummy trace message with timestamp to be
- * passed to do_send_schedfix_to_port().
- */
- Uint ms,s,us;
- GET_NOW(&ms, &s, &us);
- hp = local_heap;
- ts = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
- hp += 4;
- }
+ /* A fake schedule might be needed.
+ * Create a dummy trace message with timestamp to be
+ * passed to do_send_schedfix_to_port().
+ */
+ ts_type = TFLGS_TS_TYPE(c_p);
+ ts = write_ts(ts_type, local_heap, NULL, NULL);
trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
do_send_to_port(*tracer_pid,
@@ -571,7 +806,7 @@ send_to_port(Process *c_p, Eterm message,
* just after writning the real trace message, and now gets scheduled
* in again.
*/
- do_send_schedfix_to_port(trace_port, c_p->common.id, ts);
+ do_send_schedfix_to_port(trace_port, c_p->common.id, ts, ts_type);
}
erts_port_release(trace_port);
@@ -642,20 +877,19 @@ profile_send(Eterm from, Eterm message) {
/* A fake schedule out/in message pair will be sent,
* if the driver so requests.
- * If (timestamp == NIL), one is fetched from GET_NOW().
*
* 'c_p' is the currently executing process, may be NULL.
*/
static void
seq_trace_send_to_port(Process *c_p,
Eterm seq_tracer,
- Eterm message,
- Eterm timestamp)
+ Eterm message)
{
Port* trace_port;
#ifndef ERTS_SMP
- Eterm ts, *hp;
-#define LOCAL_HEAP_SIZE (4)
+ int ts_type;
+ Eterm ts;
+#define LOCAL_HEAP_SIZE ERTS_TRACE_PATCH_TS_MAX_SIZE
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#endif
@@ -680,7 +914,7 @@ seq_trace_send_to_port(Process *c_p,
}
if (c_p == NULL
- || (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP))) {
+ || (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP_MASK))) {
#endif
do_send_to_port(seq_tracer,
trace_port,
@@ -697,20 +931,12 @@ seq_trace_send_to_port(Process *c_p,
* with 'running' and 'timestamp'.
*/
- if (timestamp != NIL) {
- ts = timestamp;
- } else {
- /* A fake schedule might be needed,
- * but this message does not contain a timestamp.
- * Create a dummy trace message with timestamp to be
- * passed to do_send_schedfix_to_port().
- */
- Uint ms,s,us;
- GET_NOW(&ms, &s, &us);
- hp = local_heap;
- ts = TUPLE3(hp, make_small(ms), make_small(s), make_small(us));
- hp += 4;
- }
+ /* A fake schedule might be needed.
+ * Create a dummy trace message with timestamp to be
+ * passed to do_send_schedfix_to_port().
+ */
+ ts_type = TFLGS_TS_TYPE(c_p);
+ ts = write_ts(ts_type, local_heap, NULL, NULL);
trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
do_send_to_port(seq_tracer,
@@ -729,7 +955,7 @@ seq_trace_send_to_port(Process *c_p,
* just after writing the real trace message, and now gets scheduled
* in again.
*/
- do_send_schedfix_to_port(trace_port, c_p->common.id, ts);
+ do_send_schedfix_to_port(trace_port, c_p->common.id, ts, ts_type);
}
erts_port_release(trace_port);
@@ -739,32 +965,6 @@ seq_trace_send_to_port(Process *c_p,
#endif
}
-#define TS_HEAP_WORDS 5
-#define TS_SIZE(p) ((ERTS_TRACE_FLAGS((p)) & F_TIMESTAMP) \
- ? TS_HEAP_WORDS \
- : 0)
-
-/*
- * Patch a timestamp into a tuple. The tuple must be the last thing
- * built on the heap.
- *
- * Returns the new hp pointer.
-*/
-static Eterm*
-patch_ts(Eterm tuple, Eterm* hp)
-{
- Uint ms, s, us;
- Eterm* ptr = tuple_val(tuple);
- int arity = arityval(*ptr);
-
- ASSERT((ptr+arity+1) == hp);
- ptr[0] = make_arityval(arity+1);
- ptr[1] = am_trace_ts;
- GET_NOW(&ms, &s, &us);
- *hp = TUPLE3(hp+1, make_small(ms), make_small(s), make_small(us));
- return hp+5;
-}
-
static ERTS_INLINE void
send_to_tracer(Process *tracee,
ERTS_TRACER_REF_TYPE tracer_ref,
@@ -777,13 +977,13 @@ send_to_tracer(Process *tracee,
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(tracee) & F_TIMESTAMP)
- *hpp = patch_ts(msg, *hpp);
-
- if (is_internal_pid(ERTS_TRACER_PROC(tracee)))
+ if (is_internal_pid(ERTS_TRACER_PROC(tracee))) {
+ PATCH_TS(TFLGS_TS_TYPE(tracee), msg, *hpp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(tracee->common.id, tracer_ref, msg, bp);
+ }
else {
ASSERT(is_internal_port(ERTS_TRACER_PROC(tracee)));
+ PATCH_TS(TFLGS_TS_TYPE(tracee), msg, *hpp, NULL, NULL);
send_to_port(no_fake_sched ? NULL : tracee,
msg,
&ERTS_TRACER_PROC(tracee),
@@ -797,7 +997,7 @@ send_to_tracer(Process *tracee,
static void
trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
{
-#define LOCAL_HEAP_SIZE (5+4+1+TS_HEAP_WORDS)
+#define LOCAL_HEAP_SIZE (5+4+1+ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
Eterm tmp, mess, *hp;
ErlHeapFragment *bp = NULL;
@@ -853,7 +1053,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
size += 4;
if (sched_no)
size += 1;
- size += TS_SIZE(p);
+ size += PATCH_TS_SIZE(p);
hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
}
@@ -927,7 +1127,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
}
if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (11)
+#define LOCAL_HEAP_SIZE (6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -935,9 +1135,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -956,7 +1154,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
sz_msg = size_object(msg);
sz_to = size_object(to);
- need = sz_msg + sz_to + 6 + TS_SIZE(p);
+ need = sz_msg + sz_to + 6 + PATCH_TS_SIZE(p);
hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
@@ -973,10 +1171,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- patch_ts(mess, hp);
- }
-
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -993,7 +1188,7 @@ trace_receive(Process *rp, Eterm msg)
Eterm* hp;
if (is_internal_port(ERTS_TRACER_PROC(rp))) {
-#define LOCAL_HEAP_SIZE (10)
+#define LOCAL_HEAP_SIZE (5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -1001,9 +1196,7 @@ trace_receive(Process *rp, Eterm msg)
mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(rp), mess, hp, NULL, NULL);
send_to_port(rp, mess, &ERTS_TRACER_PROC(rp), &ERTS_TRACE_FLAGS(rp));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -1022,7 +1215,7 @@ trace_receive(Process *rp, Eterm msg)
sz_msg = size_object(msg);
- hsz = sz_msg + 5 + TS_SIZE(rp);
+ hsz = sz_msg + 5 + PATCH_TS_SIZE(rp);
hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, tracer_ref);
@@ -1032,10 +1225,7 @@ trace_receive(Process *rp, Eterm msg)
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
- patch_ts(mess, hp);
- }
-
+ PATCH_TS(TFLGS_TS_TYPE(rp), mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(rp->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -1081,6 +1271,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
Eterm type_atom;
int sz_exit;
Eterm seq_tracer;
+ int ts_type;
seq_tracer = erts_get_system_seq_tracer();
@@ -1108,8 +1299,10 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
return; /* no need to send anything */
}
+ ts_type = ERTS_SEQTFLGS2TSTYPE(unsigned_val(SEQ_TRACE_T_FLAGS(token)));
+
if (is_internal_port(seq_tracer)) {
-#define LOCAL_HEAP_SIZE (64)
+#define LOCAL_HEAP_SIZE (60 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -1125,17 +1318,17 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token),
receiver, msg);
hp += 6;
+
erts_smp_mtx_lock(&smq_mtx);
- if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & SEQ_TRACE_TIMESTAMP) == 0) {
+ if (!ts_type) {
mess = TUPLE3(hp, am_seq_trace, label, mess);
- seq_trace_send_to_port(NULL, seq_tracer, mess, NIL);
+ seq_trace_send_to_port(NULL, seq_tracer, mess);
} else {
- Uint ms,s,us,ts;
- GET_NOW(&ms, &s, &us);
- ts = TUPLE3(hp, make_small(ms),make_small(s), make_small(us));
- hp += 4;
- mess = TUPLE4(hp, am_seq_trace, label, mess, ts);
- seq_trace_send_to_port(process, seq_tracer, mess, ts);
+ mess = TUPLE4(hp, am_seq_trace, label, mess,
+ NIL /* Will be overwritten by timestamp */);
+ hp += 5;
+ hp[-1] = write_ts(ts_type, hp, NULL, NULL);
+ seq_trace_send_to_port(process, seq_tracer, mess);
}
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -1170,8 +1363,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
sz_lastcnt_serial = 3; /* TUPLE2 */
sz_msg = size_object(msg);
- sz_ts = ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & SEQ_TRACE_TIMESTAMP) ?
- 5 : 0);
+ sz_ts = patch_ts_size(ts_type);
if (exitfrom != NIL) {
sz_exit = 4; /* create {'EXIT',exitfrom,msg} */
sz_exitfrom = size_object(exitfrom);
@@ -1215,14 +1407,20 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
erts_smp_mtx_lock(&smq_mtx);
- if (sz_ts) {/* timestamp should be included */
- Uint ms,s,us,ts;
- GET_NOW(&ms, &s, &us);
- ts = TUPLE3(hp, make_small(ms),make_small(s), make_small(us));
- hp += 4;
- mess = TUPLE4(hp, am_seq_trace, label, mess, ts);
- } else {
+ if (!ts_type)
mess = TUPLE3(hp, am_seq_trace, label, mess);
+ else {
+ mess = TUPLE4(hp, am_seq_trace, label, mess,
+ NIL /* Will be overwritten by timestamp */);
+ hp += 5;
+ /* Write timestamp in element 6 of the 'msg' tuple */
+ hp[-1] = write_ts(ts_type, hp, bp,
+#ifndef ERTS_SMP
+ tracer
+#else
+ NULL
+#endif
+ );
}
#ifdef ERTS_SMP
@@ -1245,7 +1443,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
void
erts_trace_return_to(Process *p, BeamInstr *pc)
{
-#define LOCAL_HEAP_SIZE (4+5+5)
+#define LOCAL_HEAP_SIZE (4+5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
Eterm* hp;
Eterm mfa;
Eterm mess;
@@ -1270,9 +1468,7 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
if (is_internal_port(ERTS_TRACER_PROC(p))) {
send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
@@ -1319,6 +1515,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
Eterm mod, name;
int arity;
Uint meta_flags, *tracee_flags;
+ int ts_type;
#ifdef ERTS_SMP
Eterm tracee;
#endif
@@ -1354,7 +1551,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
* meta trace =>
* use fixed flag set instead of process flags
*/
- meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
#ifdef ERTS_SMP
tracee = NIL;
@@ -1368,8 +1565,10 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
name = fi[1];
arity = fi[2];
+ ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
+
if (is_internal_port(*tracer_pid)) {
-#define LOCAL_HEAP_SIZE (4+6+5)
+#define LOCAL_HEAP_SIZE (4+6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
@@ -1378,9 +1577,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(ts_type, mess, hp, NULL, NULL);
send_to_port(p, mess, tracer_pid, tracee_flags);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -1391,24 +1588,15 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
ERTS_TRACER_REF_TYPE tracer_ref;
unsigned size;
unsigned retval_size;
-#ifdef DEBUG
- Eterm* limit;
-#endif
ASSERT(is_internal_pid(*tracer_pid));
ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
-
+
retval_size = size_object(retval);
- size = 6 + 4 + retval_size;
- if (*tracee_flags & F_TIMESTAMP) {
- size += 1+4;
- }
+ size = 6 + 4 + retval_size + patch_ts_size(ts_type);
hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-#ifdef DEBUG
- limit = hp + size;
-#endif
/*
* Build the trace tuple and put it into receive queue of the tracer process.
@@ -1422,11 +1610,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ASSERT(hp == limit);
+ PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
@@ -1449,6 +1633,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
Eterm cv;
Eterm mess;
Uint meta_flags, *tracee_flags;
+ int ts_type;
#ifdef ERTS_SMP
Eterm tracee;
#endif
@@ -1487,15 +1672,17 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
* meta trace =>
* use fixed flag set instead of process flags
*/
- meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
#ifdef ERTS_SMP
tracee = NIL;
#endif
}
+ ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
+
if (is_internal_port(*tracer_pid)) {
-#define LOCAL_HEAP_SIZE (4+3+6+5)
+#define LOCAL_HEAP_SIZE (4+3+6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -1508,10 +1695,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
hp += 6;
ASSERT((hp - local_heap) <= LOCAL_HEAP_SIZE);
erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp); /* hp += 5 */
- ASSERT((hp - local_heap) == LOCAL_HEAP_SIZE);
- }
+ PATCH_TS(ts_type, mess, hp, NULL, NULL);
send_to_port(p, mess, tracer_pid, tracee_flags);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -1522,24 +1706,15 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
ERTS_TRACER_REF_TYPE tracer_ref;
unsigned size;
unsigned value_size;
-#ifdef DEBUG
- Eterm* limit;
-#endif
ASSERT(is_internal_pid(*tracer_pid));
ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
value_size = size_object(value);
- size = 6 + 4 + 3 + value_size;
- if (*tracee_flags & F_TIMESTAMP) {
- size += 1+4;
- }
+ size = 6 + 4 + 3 + value_size + patch_ts_size(ts_type);
hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-#ifdef DEBUG
- limit = hp + size;
-#endif
/*
* Build the trace tuple and put it into receive queue of the tracer process.
@@ -1556,11 +1731,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ASSERT(hp == limit);
+ PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
@@ -1593,6 +1764,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
Eterm pam_result = am_true;
Eterm mess;
Uint meta_flags, *tracee_flags;
+ int ts_type;
#ifdef ERTS_SMP
Eterm tracee;
#endif
@@ -1634,7 +1806,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
/* No trace messages for sensitive processes. */
return 0;
}
- meta_flags = F_TRACE_CALLS | F_TIMESTAMP;
+ meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
#ifdef ERTS_SMP
tracee = NIL;
@@ -1677,8 +1849,10 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
}
args = transformed_args;
+ ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
+
if (is_internal_port(*tracer_pid)) {
- Eterm local_heap[64+MAX_ARG];
+ Eterm local_heap[64+ERTS_TRACE_PATCH_TS_MAX_SIZE+MAX_ARG];
hp = local_heap;
if (!erts_is_valid_tracer_port(*tracer_pid)) {
@@ -1777,9 +1951,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
*hp++ = pam_result;
}
erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(ts_type, mess, hp, NULL, NULL);
send_to_port(p, mess, tracer_pid, tracee_flags);
erts_smp_mtx_unlock(&smq_mtx);
erts_match_set_release_result(p);
@@ -1798,9 +1970,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
unsigned sizes[MAX_ARG];
unsigned pam_result_size = 0;
int invalid_tracer;
-#ifdef DEBUG
- Eterm* limit;
-#endif
+
ASSERT(is_internal_pid(*tracer_pid));
tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
@@ -1892,10 +2062,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
size += sizes[i];
}
}
- if (*tracee_flags & F_TIMESTAMP) {
- size += 1 + 4;
- /* One element in trace tuple + timestamp tuple. */
- }
+ size += patch_ts_size(ts_type);
if (pam_result != am_true) {
pam_result_size = size_object(pam_result);
size += 1 + pam_result_size;
@@ -1903,9 +2070,6 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
}
hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-#ifdef DEBUG
- limit = hp + size;
-#endif
/*
* Build the the {M,F,A} tuple in the message buffer.
@@ -1948,11 +2112,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
erts_smp_mtx_lock(&smq_mtx);
- if (*tracee_flags & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
- ASSERT(hp == limit);
+ PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
@@ -1987,9 +2147,7 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, NULL, NULL);
send_to_port(
#ifndef ERTS_SMP
/* No fake schedule out and in again after an exit */
@@ -2019,7 +2177,7 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
sz_data = size_object(data);
- need = sz_data + 5 + TS_SIZE(t_p);
+ need = sz_data + 5 + PATCH_TS_SIZE(t_p);
hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
@@ -2029,9 +2187,7 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
@@ -2065,9 +2221,7 @@ trace_proc_spawn(Process *p, Eterm pid,
mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, pid, mfa);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -2088,7 +2242,7 @@ trace_proc_spawn(Process *p, Eterm pid,
sz_args = size_object(args);
sz_pid = size_object(pid);
- need = sz_args + 4 + 6 + TS_SIZE(p);
+ need = sz_args + 4 + 6 + PATCH_TS_SIZE(p);
hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
@@ -2101,9 +2255,7 @@ trace_proc_spawn(Process *p, Eterm pid,
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
@@ -2140,11 +2292,6 @@ void save_calls(Process *p, Export *e)
void
trace_gc(Process *p, Eterm what)
{
- ERTS_DECL_AM(bin_vheap_size);
- ERTS_DECL_AM(bin_vheap_block_size);
- ERTS_DECL_AM(bin_old_vheap_size);
- ERTS_DECL_AM(bin_old_vheap_block_size);
-
ErlHeapFragment *bp = NULL;
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF; /* Initialized
@@ -2155,43 +2302,10 @@ trace_gc(Process *p, Eterm what)
Eterm msg = NIL;
Uint size;
- Eterm tags[] = {
- am_old_heap_block_size,
- am_heap_block_size,
- am_mbuf_size,
- am_recent_size,
- am_stack_size,
- am_old_heap_size,
- am_heap_size,
- AM_bin_vheap_size,
- AM_bin_vheap_block_size,
- AM_bin_old_vheap_size,
- AM_bin_old_vheap_block_size
- };
-
- UWord values[] = {
- OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
- HEAP_SIZE(p),
- MBUF_SIZE(p),
- HIGH_WATER(p) - HEAP_START(p),
- STACK_START(p) - p->stop,
- OLD_HEAP(p) ? OLD_HTOP(p) - OLD_HEAP(p) : 0,
- HEAP_TOP(p) - HEAP_START(p),
- MSO(p).overhead,
- BIN_VHEAP_SZ(p),
- BIN_OLD_VHEAP(p),
- BIN_OLD_VHEAP_SZ(p)
- };
#define LOCAL_HEAP_SIZE \
- (sizeof(values)/sizeof(*values)) * \
- (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE) + \
- 5/*4-tuple */ + TS_HEAP_WORDS
+ (ERTS_PROCESS_GC_INFO_MAX_SIZE) + \
+ 5/*4-tuple */ + ERTS_TRACE_PATCH_TS_MAX_SIZE
DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
-#ifdef DEBUG
- Eterm* limit;
-#endif
-
- ERTS_CT_ASSERT(sizeof(values)/sizeof(*values) == sizeof(tags)/sizeof(Eterm));
UseTmpHeap(LOCAL_HEAP_SIZE,p);
@@ -2199,12 +2313,9 @@ trace_gc(Process *p, Eterm what)
hp = local_heap;
#ifdef DEBUG
size = 0;
- (void) erts_bld_atom_uword_2tup_list(NULL,
- &size,
- sizeof(values)/sizeof(*values),
- tags,
- values);
- size += 5/*4-tuple*/ + TS_SIZE(p);
+ (void) erts_process_gc_info(p, &size, NULL);
+
+ size += 5/*4-tuple*/ + PATCH_TS_SIZE(p);
#endif
} else {
ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
@@ -2214,40 +2325,30 @@ trace_gc(Process *p, Eterm what)
ERTS_TRACE_FLAGS(p));
size = 0;
- (void) erts_bld_atom_uword_2tup_list(NULL,
- &size,
- sizeof(values)/sizeof(*values),
- tags,
- values);
- size += 5/*4-tuple*/ + TS_SIZE(p);
+ (void) erts_process_gc_info(p, &size, NULL);
+
+ size += 5/*4-tuple*/ + PATCH_TS_SIZE(p);
hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
}
-#ifdef DEBUG
- limit = hp + size;
ASSERT(size <= LOCAL_HEAP_SIZE);
-#endif
- msg = erts_bld_atom_uword_2tup_list(&hp,
- NULL,
- sizeof(values)/sizeof(*values),
- tags,
- values);
+ msg = erts_process_gc_info(p, NULL, &hp);
msg = TUPLE4(hp, am_trace, p->common.id, what, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(msg, hp);
- }
- ASSERT(hp == limit);
- if (is_internal_port(ERTS_TRACER_PROC(p)))
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
+ PATCH_TS(TFLGS_TS_TYPE(p), msg, hp, NULL, NULL);
send_to_port(p, msg, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- else
+ }
+ else {
+ PATCH_TS(TFLGS_TS_TYPE(p), msg, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, msg, bp);
+ }
erts_smp_mtx_unlock(&smq_mtx);
UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
#undef LOCAL_HEAP_SIZE
@@ -2571,19 +2672,18 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
void
profile_scheduler(Eterm scheduler_id, Eterm state) {
- Eterm *hp, msg, timestamp;
- Uint Ms, s, us;
+ Eterm *hp, msg;
+ ErlHeapFragment *bp = NULL;
#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 7)
+#define LOCAL_HEAP_SIZE (7 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
#else
- ErlHeapFragment *bp;
Uint hsz;
- hsz = 4 + 7;
+ hsz = 7 + patch_ts_size(erts_system_profile_ts_type)-1;
bp = new_message_buffer(hsz);
hp = bp->mem;
@@ -2603,10 +2703,13 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
break;
}
- GET_NOW(&Ms, &s, &us);
- timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id, state,
- make_small(active_sched), timestamp); hp += 7;
+ msg = TUPLE6(hp, am_profile, am_scheduler, scheduler_id,
+ state, make_small(active_sched),
+ NIL /* Will be overwritten by timestamp */);
+ hp += 7;
+
+ /* Write timestamp in element 6 of the 'msg' tuple */
+ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
#ifndef ERTS_SMP
profile_send(NIL, msg);
@@ -2677,7 +2780,7 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
Eterm* hp;
if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (5+6)
+#define LOCAL_HEAP_SIZE (6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2686,9 +2789,7 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
/* No fake schedule */
send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2702,7 +2803,7 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- sz_data = 6 + TS_SIZE(p);
+ sz_data = 6 + PATCH_TS_SIZE(p);
ERTS_GET_TRACER_REF(tracer_ref,
ERTS_TRACER_PROC(p),
@@ -2715,9 +2816,7 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
@@ -2741,7 +2840,7 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
|| erts_thr_progress_is_blocking());
if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
-#define LOCAL_HEAP_SIZE (5+5)
+#define LOCAL_HEAP_SIZE (5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2749,9 +2848,7 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, NULL, NULL);
/* No fake schedule */
send_to_port(NULL,mess,&ERTS_TRACER_PROC(t_p),&ERTS_TRACE_FLAGS(t_p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2765,7 +2862,7 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
- sz_data = 5 + TS_SIZE(t_p);
+ sz_data = 5 + PATCH_TS_SIZE(t_p);
ERTS_GET_TRACER_REF(tracer_ref,
ERTS_TRACER_PROC(t_p),
@@ -2778,9 +2875,7 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+ PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
@@ -2808,7 +2903,7 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
Eterm sched_id = am_undefined;
if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (5+6)
+#define LOCAL_HEAP_SIZE (6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2831,9 +2926,8 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
hp += ws;
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
+
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
/* No fake scheduling */
send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
@@ -2853,7 +2947,7 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
ERTS_TRACER_PROC(p),
ERTS_TRACE_FLAGS(p));
- hp = ERTS_ALLOC_SYSMSG_HEAP(ws+TS_SIZE(p), &bp, &off_heap, tracer_ref);
+ hp = ERTS_ALLOC_SYSMSG_HEAP(ws+PATCH_TS_SIZE(p), &bp, &off_heap, tracer_ref);
if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
#ifdef ERTS_SMP
@@ -2871,10 +2965,7 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
erts_smp_mtx_lock(&smq_mtx);
- if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
- hp = patch_ts(mess, hp);
- }
-
+ PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -2884,13 +2975,12 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
void
profile_runnable_port(Port *p, Eterm status) {
- Uint Ms, s, us;
- Eterm *hp, msg, timestamp;
-
+ Eterm *hp, msg;
+ ErlHeapFragment *bp = NULL;
Eterm count = make_small(0);
#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 6)
+#define LOCAL_HEAP_SIZE (6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2898,10 +2988,9 @@ profile_runnable_port(Port *p, Eterm status) {
hp = local_heap;
#else
- ErlHeapFragment *bp;
Uint hsz;
- hsz = 4 + 6;
+ hsz = 6 + patch_ts_size(erts_system_profile_ts_type)-1;
bp = new_message_buffer(hsz);
hp = bp->mem;
@@ -2909,9 +2998,12 @@ profile_runnable_port(Port *p, Eterm status) {
erts_smp_mtx_lock(&smq_mtx);
- GET_NOW(&Ms, &s, &us);
- timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE5(hp, am_profile, p->common.id, status, count, timestamp); hp += 6;
+ msg = TUPLE5(hp, am_profile, p->common.id, status, count,
+ NIL /* Will be overwritten by timestamp */);
+ hp += 6;
+
+ /* Write timestamp in element 5 of the 'msg' tuple */
+ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
#ifndef ERTS_SMP
profile_send(p->common.id, msg);
@@ -2926,20 +3018,19 @@ profile_runnable_port(Port *p, Eterm status) {
/* Process profiling */
void
profile_runnable_proc(Process *p, Eterm status){
- Uint Ms, s, us;
- Eterm *hp, msg, timestamp;
+ Eterm *hp, msg;
Eterm where = am_undefined;
+ ErlHeapFragment *bp = NULL;
#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 6 + 4)
+#define LOCAL_HEAP_SIZE (4 + 6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
#else
- ErlHeapFragment *bp;
- Uint hsz = 4 + 6 + 4;
+ Uint hsz = 4 + 6 + patch_ts_size(erts_system_profile_ts_type)-1;
#endif
if (!p->current) {
@@ -2948,7 +3039,7 @@ profile_runnable_proc(Process *p, Eterm status){
#ifdef ERTS_SMP
if (!p->current) {
- hsz = 4 + 6;
+ hsz -= 4;
}
bp = new_message_buffer(hsz);
@@ -2963,9 +3054,13 @@ profile_runnable_proc(Process *p, Eterm status){
erts_smp_mtx_lock(&smq_mtx);
- GET_NOW(&Ms, &s, &us);
- timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE5(hp, am_profile, p->common.id, status, where, timestamp); hp += 6;
+ msg = TUPLE5(hp, am_profile, p->common.id, status, where,
+ NIL /* Will be overwritten by timestamp */);
+ hp += 6;
+
+ /* Write timestamp in element 5 of the 'msg' tuple */
+ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
+
#ifndef ERTS_SMP
profile_send(p->common.id, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 7405490f76..a0058264d7 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -18,8 +18,38 @@
* %CopyrightEnd%
*/
+#ifndef ERL_TRACE_H__FLAGS__
+#define ERL_TRACE_H__FLAGS__
+/*
+ * NOTE! The bits used for these flags matter. The flag with
+ * the least significant bit will take precedence!
+ *
+ * The "now timestamp" has highest precedence due to
+ * compatibility reasons.
+ */
+#define ERTS_TRACE_FLG_NOW_TIMESTAMP (1 << 0)
+#define ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP (1 << 1)
+#define ERTS_TRACE_FLG_MONOTONIC_TIMESTAMP (1 << 2)
+
+/*
+ * The bits used effects trace flags (of processes and ports)
+ * as well as sequential trace flags. If changed make sure
+ * these arn't messed up...
+ */
+#define ERTS_TRACE_TS_TYPE_BITS 3
+#define ERTS_TRACE_TS_TYPE_MASK \
+ ((1 << ERTS_TRACE_TS_TYPE_BITS) - 1)
+
+#define ERTS_TFLGS2TSTYPE(TFLGS) \
+ ((int) (((TFLGS) >> ERTS_TRACE_FLAGS_TS_TYPE_SHIFT) \
+ & ERTS_TRACE_TS_TYPE_MASK))
+#define ERTS_SEQTFLGS2TSTYPE(SEQTFLGS) \
+ ((int) (((SEQTFLGS) >> ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT) \
+ & ERTS_TRACE_TS_TYPE_MASK))
+
+#endif /* ERL_TRACE_H__FLAGS__ */
-#ifndef ERL_TRACE_H__
+#if !defined(ERL_TRACE_H__) && !defined(ERTS_ONLY_INCLUDE_TRACE_FLAGS)
#define ERL_TRACE_H__
struct binary;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 3f5925765d..e8a7573e86 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1448,6 +1448,8 @@ ERTS_GLB_FORCE_INLINE int erts_is_literal(Eterm tptr, Eterm *ptr)
#endif
+Eterm erts_msacc_request(Process *c_p, int action, Eterm *threads);
+
/*
** Call_trace uses this API for the parameter matching functions
*/
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 93c591b124..41b0fcdd1b 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -50,6 +50,7 @@
#include "erl_map.h"
#include "erl_bif_unique.h"
#include "erl_hl_timer.h"
+#include "erl_time.h"
extern ErlDrvEntry fd_driver_entry;
extern ErlDrvEntry vanilla_driver_entry;
@@ -705,6 +706,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
error_number = error_type = 0;
if (driver->start) {
+ ERTS_MSACC_PUSH_STATE_M();
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_in, am_start);
}
@@ -715,6 +717,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
DTRACE3(driver_start, process_str, driver->name, port_str);
}
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
fpe_was_unmasked = erts_block_fpe();
drv_data = (*driver->start)(ERTS_Port2ErlDrvPort(port), name, opts);
if (((SWord) drv_data) == -1)
@@ -734,6 +737,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
}
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
port->caller = NIL;
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_out, am_start);
@@ -1709,6 +1713,7 @@ call_driver_outputv(int bang_op,
else {
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErlDrvSizeT size = evp->size;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
@@ -1729,6 +1734,8 @@ call_driver_outputv(int bang_op,
esdp->io.out += (Uint64) size;
else
erts_atomic64_add_nob(&bytes_out, (erts_aint64_t) size);
+
+ ERTS_MSACC_POP_STATE_M();
}
}
@@ -1809,6 +1816,7 @@ call_driver_output(int bang_op,
send_badsig(prt);
else {
ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
@@ -1828,6 +1836,8 @@ call_driver_output(int bang_op,
esdp->io.out += (Uint64) size;
else
erts_atomic64_add_nob(&bytes_out, (erts_aint64_t) size);
+
+ ERTS_MSACC_POP_STATE_M();
}
}
@@ -3472,6 +3482,7 @@ static void flush_port(Port *p)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
if (p->drv_ptr->flush != NULL) {
+ ERTS_MSACC_PUSH_STATE_M();
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_flush)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p)
@@ -3481,9 +3492,11 @@ static void flush_port(Port *p)
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(p, am_in, am_flush);
}
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
fpe_was_unmasked = erts_block_fpe();
(*p->drv_ptr->flush)((ErlDrvData)p->drv_data);
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(p, am_out, am_flush);
}
@@ -3531,6 +3544,7 @@ terminate_port(Port *prt)
drv = prt->drv_ptr;
if ((drv != NULL) && (drv->stop != NULL)) {
int fpe_was_unmasked = erts_block_fpe();
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_stop)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(connected_id, prt)
@@ -3539,6 +3553,7 @@ terminate_port(Port *prt)
#endif
(*drv->stop)((ErlDrvData)prt->drv_data);
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
#ifdef ERTS_SMP
if (prt->xports)
erts_port_handle_xports(prt);
@@ -3849,6 +3864,7 @@ call_driver_control(Eterm caller,
ErlDrvSizeT *from_size)
{
ErlDrvSSizeT cres;
+ ERTS_MSACC_PUSH_STATE_M();
if (!prt->drv_ptr->control)
return ERTS_PORT_OP_BADARG;
@@ -3862,6 +3878,8 @@ call_driver_control(Eterm caller,
command, size);
}
#endif
+
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
prt->caller = caller;
cres = prt->drv_ptr->control((ErlDrvData) prt->drv_data,
@@ -3872,6 +3890,8 @@ call_driver_control(Eterm caller,
*from_size);
prt->caller = NIL;
+ ERTS_MSACC_POP_STATE_M();
+
if (cres < 0)
return ERTS_PORT_OP_BADARG;
@@ -4259,6 +4279,7 @@ call_driver_call(Eterm caller,
unsigned *ret_flagsp)
{
ErlDrvSSizeT cres;
+ ERTS_MSACC_PUSH_STATE_M();
if (!prt->drv_ptr->call)
return ERTS_PORT_OP_BADARG;
@@ -4274,6 +4295,8 @@ call_driver_call(Eterm caller,
}
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
+
prt->caller = caller;
cres = prt->drv_ptr->call((ErlDrvData) prt->drv_data,
command,
@@ -4284,6 +4307,8 @@ call_driver_call(Eterm caller,
ret_flagsp);
prt->caller = NIL;
+ ERTS_MSACC_POP_STATE_M();
+
if (cres <= 0
|| ((byte) (*resp_bufp)[0]) != VERSION_MAGIC)
return ERTS_PORT_OP_BADARG;
@@ -4987,6 +5012,7 @@ int get_port_flags(ErlDrvPort ix)
void erts_raw_port_command(Port* p, byte* buf, Uint len)
{
int fpe_was_unmasked;
+ ERTS_MSACC_PUSH_STATE_M();
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
@@ -5007,9 +5033,11 @@ void erts_raw_port_command(Port* p, byte* buf, Uint len)
DTRACE4(driver_output, "-raw-", port_str, p->name, len);
}
#endif
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
fpe_was_unmasked = erts_block_fpe();
(*p->drv_ptr->output)((ErlDrvData)p->drv_data, (char*) buf, (int) len);
erts_unblock_fpe(fpe_was_unmasked);
+ ERTS_MSACC_POP_STATE_M();
}
int async_ready(Port *p, void* data)
@@ -5021,6 +5049,7 @@ int async_ready(Port *p, void* data)
if (p) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
if (p->drv_ptr->ready_async != NULL) {
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_ready_async)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p)
@@ -5029,6 +5058,7 @@ int async_ready(Port *p, void* data)
#endif
(*p->drv_ptr->ready_async)((ErlDrvData)p->drv_data, data);
need_free = 0;
+ ERTS_MSACC_POP_STATE_M();
}
erts_port_driver_callback_epilogue(p, NULL);
@@ -6840,6 +6870,28 @@ driver_get_now(ErlDrvNowData *now_data)
return 0;
}
+ErlDrvTime
+erl_drv_monotonic_time(ErlDrvTimeUnit time_unit)
+{
+ return (ErlDrvTime) erts_napi_monotonic_time((int) time_unit);
+}
+
+ErlDrvTime
+erl_drv_time_offset(ErlDrvTimeUnit time_unit)
+{
+ return (ErlDrvTime) erts_napi_time_offset((int) time_unit);
+}
+
+ErlDrvTime
+erl_drv_convert_time_unit(ErlDrvTime val,
+ ErlDrvTimeUnit from,
+ ErlDrvTimeUnit to)
+{
+ return (ErlDrvTime) erts_napi_convert_time_unit((ErtsMonotonicTime) val,
+ (int) from,
+ (int) to);
+}
+
static void ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon)
{
RefThing *refp;
@@ -7030,6 +7082,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
void (*callback)(ErlDrvData drv_data, ErlDrvMonitor *monitor);
ErlDrvMonitor drv_monitor;
int fpe_was_unmasked;
+ ERTS_MSACC_PUSH_STATE_M();
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(prt->drv_ptr != NULL);
@@ -7041,6 +7094,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
callback = prt->drv_ptr->process_exit;
ASSERT(callback != NULL);
ref_to_driver_monitor(ref,&drv_monitor);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
DRV_MONITOR_UNLOCK_PDL(prt);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_process_exit)) {
@@ -7052,6 +7106,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
(*callback)((ErlDrvData) (prt->drv_data), &drv_monitor);
erts_unblock_fpe(fpe_was_unmasked);
DRV_MONITOR_LOCK_PDL(prt);
+ ERTS_MSACC_POP_STATE_M();
/* remove monitor *after* callback */
rmon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref);
DRV_MONITOR_UNLOCK_PDL(prt);
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 081c4108a0..9e53b4bfcc 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -988,6 +988,13 @@ move Discarded x==0 | move Something x==0 => move Something x=0
%endif
+call_ext u==0 u$func:os:perf_counter/0 => \
+ i_perf_counter
+call_ext_last u==0 u$func:os:perf_counter/0 D => \
+ i_perf_counter | deallocate_return D
+call_ext_only u==0 u$func:os:perf_counter/0 => \
+ i_perf_counter | return
+
#
# The general case for BIFs that have no special instructions.
# A BIF used in the tail must be followed by a return instruction.
@@ -1027,6 +1034,8 @@ i_apply_fun_only
i_hibernate
+i_perf_counter
+
call_bif e
#
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 53f8313daa..e77b4b2dc8 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -136,6 +136,17 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# define ERTS_LIKELY(BOOL) (BOOL)
# define ERTS_UNLIKELY(BOOL) (BOOL)
#endif
+
+#if ERTS_AT_LEAST_GCC_VSN__(2, 96, 0)
+#ifndef __llvm__
+# define ERTS_WRITE_UNLIKELY(X) X __attribute__ ((section ("ERTS_LOW_WRITE") ))
+#else
+# define ERTS_WRITE_UNLIKELY(X) X __attribute__ ((section ("__DATA,ERTS_LOW_WRITE") ))
+#endif
+#else
+# define ERTS_WRITE_UNLIKELY(X) X
+#endif
+
#ifdef __GNUC__
# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 5)
# define ERTS_DECLARE_DUMMY(X) X __attribute__ ((unused))
@@ -772,6 +783,10 @@ int local_to_univ(Sint *year, Sint *month, Sint *day,
void get_now(Uint*, Uint*, Uint*);
struct ErtsSchedulerData_;
ErtsMonotonicTime erts_get_monotonic_time(struct ErtsSchedulerData_ *);
+ErtsMonotonicTime erts_get_time_offset(void);
+void
+erts_make_timestamp_value(Uint* megasec, Uint* sec, Uint* microsec,
+ ErtsMonotonicTime mtime, ErtsMonotonicTime offset);
void get_sys_now(Uint*, Uint*, Uint*);
void set_break_quit(void (*)(void), void (*)(void));
diff --git a/erts/emulator/beam/time.c b/erts/emulator/beam/time.c
index 0ab6661c9f..48cb39333a 100644
--- a/erts/emulator/beam/time.c
+++ b/erts/emulator/beam/time.c
@@ -278,9 +278,14 @@ ErtsMonotonicTime
erts_check_next_timeout_time(ErtsSchedulerData *esdp)
{
ErtsTimerWheel *tiw = esdp->timer_wheel;
+ ErtsMonotonicTime time;
+ ERTS_MSACC_DECLARE_CACHE_X();
if (tiw->true_next_timeout_time)
return tiw->next_timeout_time;
- return find_next_timeout(esdp, tiw, 1, 0, 0);
+ ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_X(ERTS_MSACC_STATE_TIMERS);
+ time = find_next_timeout(esdp, tiw, 1, 0, 0);
+ ERTS_MSACC_POP_STATE_M_X();
+ return time;
}
#ifndef ERTS_TW_DEBUG
@@ -336,6 +341,7 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
{
int tiw_pos_ix, slots, yielded_slot_restarted, yield_count;
ErtsMonotonicTime bump_to, tmp_slots, old_pos;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
yield_count = ERTS_TWHEEL_BUMP_YIELD_LIMIT;
@@ -371,6 +377,7 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
tiw->next_timeout_time = curr_time + ERTS_MONOTONIC_DAY;
tiw->pos = bump_to;
tiw->yield_slot = ERTS_TWHEEL_SLOT_INACTIVE;
+ ERTS_MSACC_POP_STATE_M_X();
return;
}
@@ -382,6 +389,7 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
tiw->yield_slot = ERTS_TWHEEL_SLOT_AT_ONCE;
tiw->true_next_timeout_time = 1;
tiw->next_timeout_time = ERTS_CLKTCKS_TO_MONOTONIC(old_pos);
+ ERTS_MSACC_POP_STATE_M_X();
return;
}
@@ -400,8 +408,10 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
p = tiw->at_once.head;
}
- if (tiw->pos >= bump_to)
+ if (tiw->pos >= bump_to) {
+ ERTS_MSACC_POP_STATE_M_X();
break;
+ }
if (tiw->nto == 0)
goto empty_wheel;
@@ -478,6 +488,7 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
tiw->yield_slot = tiw_pos_ix;
tiw->yield_slots_left = slots;
tiw->yield_start_pos = old_pos;
+ ERTS_MSACC_POP_STATE_M_X();
return; /* Yield! */
}
@@ -500,6 +511,7 @@ erts_bump_timers(ErtsTimerWheel *tiw, ErtsMonotonicTime curr_time)
/* Search at most two seconds ahead... */
(void) find_next_timeout(NULL, tiw, 0, curr_time, ERTS_SEC_TO_MONOTONIC(2));
+ ERTS_MSACC_POP_STATE_M_X();
}
Uint
@@ -569,6 +581,7 @@ erts_twheel_set_timer(ErtsTimerWheel *tiw,
ErtsMonotonicTime timeout_pos)
{
ErtsMonotonicTime timeout_time;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
p->u.func.timeout = timeout;
p->u.func.cancel = cancel;
@@ -612,6 +625,7 @@ erts_twheel_set_timer(ErtsTimerWheel *tiw,
tiw->true_next_timeout_time = 1;
tiw->next_timeout_time = timeout_time;
}
+ ERTS_MSACC_POP_STATE_M_X();
}
void
@@ -620,11 +634,13 @@ erts_twheel_cancel_timer(ErtsTimerWheel *tiw, ErtsTWheelTimer *p)
if (p->slot != ERTS_TWHEEL_SLOT_INACTIVE) {
ErlCancelProc cancel;
void *arg;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M_X(ERTS_MSACC_STATE_TIMERS);
remove_timer(tiw, p);
cancel = p->u.func.cancel;
arg = p->u.func.arg;
if (cancel)
(*cancel)(arg);
+ ERTS_MSACC_POP_STATE_M_X();
}
}