aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
authorLukas Larsson <[email protected]>2016-04-15 15:10:55 +0200
committerLukas Larsson <[email protected]>2016-04-15 15:10:55 +0200
commitbe7f01689e71cbc1896ea5c11fb0ebc6f7f6dd8a (patch)
treee7db1292fe6aa167408b7a7c93289fe2cca7e5c1 /erts/emulator/beam
parent67df3b3792857a2b4885c0acbeaa7f32f3594b0c (diff)
parentcff38617986001e0a5f3f48de20acbeccceea978 (diff)
downloadotp-be7f01689e71cbc1896ea5c11fb0ebc6f7f6dd8a.tar.gz
otp-be7f01689e71cbc1896ea5c11fb0ebc6f7f6dd8a.tar.bz2
otp-be7f01689e71cbc1896ea5c11fb0ebc6f7f6dd8a.zip
Merge branch 'lukas/erts/tracer-module-and-more/PR-1009/OTP-10268'
* lukas/erts/tracer-module-and-more/PR-1009/OTP-10268: (26 commits) erts: Don't trace on link events when port is dead erts: more logging in trace_bif_SUITE trace_bif_return erts: Make trace_delivered go via sys msg dispatcher again erts: Add comment about future trace optimizations erts: Optimize tracer reload test erts: Document erlang:match_spec_test/3 runtime_tools: Make dbg work with erl_tracer modules runtime_rools: Allow new timestamp trace flags runtime_tools: Lots of dbg docs updates erts: Deallocate heap fragments from trace nif calls eprof: Fix tests after tracer module incompatabilities fprof: update to work with new spawned trace event erts: Add 'spawned' trace event to 'procs' trace flag erts: send and receive no longer need status lock erts: Do 'unregister' as "self-tracing" erts: Silence harmless valgrind warning in dec_term erts: Fix end_per_testcase crash in local_trace_SUITE observer: Update ttb to work with tracing on ports runtime_tools: Update dbg to work with tracing on ports erts: Add tracing examples in match spec docs ...
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names15
-rw-r--r--erts/emulator/beam/beam_bif_load.c7
-rw-r--r--erts/emulator/beam/beam_bp.c169
-rw-r--r--erts/emulator/beam/beam_bp.h16
-rw-r--r--erts/emulator/beam/beam_emu.c14
-rw-r--r--erts/emulator/beam/bif.c101
-rw-r--r--erts/emulator/beam/bif.tab5
-rw-r--r--erts/emulator/beam/code_ix.c1
-rw-r--r--erts/emulator/beam/dist.c20
-rw-r--r--erts/emulator/beam/erl_alloc.c2
-rw-r--r--erts/emulator/beam/erl_alloc.types3
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c5
-rw-r--r--erts/emulator/beam/erl_bif_port.c27
-rw-r--r--erts/emulator/beam/erl_bif_trace.c600
-rw-r--r--erts/emulator/beam/erl_db_util.c74
-rw-r--r--erts/emulator/beam/erl_gc.c6
-rw-r--r--erts/emulator/beam/erl_hl_timer.c6
-rw-r--r--erts/emulator/beam/erl_lock_check.c5
-rw-r--r--erts/emulator/beam/erl_map.h2
-rw-r--r--erts/emulator/beam/erl_message.c166
-rw-r--r--erts/emulator/beam/erl_message.h110
-rw-r--r--erts/emulator/beam/erl_msacc.c2
-rw-r--r--erts/emulator/beam/erl_nif.c353
-rw-r--r--erts/emulator/beam/erl_nif.h10
-rw-r--r--erts/emulator/beam/erl_port.h2
-rw-r--r--erts/emulator/beam/erl_port_task.c2
-rw-r--r--erts/emulator/beam/erl_process.c229
-rw-r--r--erts/emulator/beam/erl_process.h27
-rw-r--r--erts/emulator/beam/erl_process_lock.c191
-rw-r--r--erts/emulator/beam/erl_process_lock.h38
-rw-r--r--erts/emulator/beam/erl_ptab.h8
-rw-r--r--erts/emulator/beam/erl_time_sup.c2
-rw-r--r--erts/emulator/beam/erl_trace.c2818
-rw-r--r--erts/emulator/beam/erl_trace.h88
-rw-r--r--erts/emulator/beam/erlang_dtrace.d29
-rw-r--r--erts/emulator/beam/external.c8
-rw-r--r--erts/emulator/beam/global.h28
-rw-r--r--erts/emulator/beam/io.c262
-rw-r--r--erts/emulator/beam/register.c14
-rw-r--r--erts/emulator/beam/sys.h9
-rw-r--r--erts/emulator/beam/utils.c2
42 files changed, 2819 insertions, 2659 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index d466f00028..8c51f788c0 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -161,6 +161,7 @@ atom close
atom closed
atom code
atom command
+atom commandv
atom compact
atom compat_rel
atom compile
@@ -197,6 +198,7 @@ atom dirty_cpu_schedulers_online
atom dirty_io
atom disable_trace
atom disabled
+atom discard
atom display_items
atom dist
atom dist_cmd
@@ -231,6 +233,7 @@ atom exception_trace
atom extended
atom Eq='=:='
atom Eqeq='=='
+atom erl_tracer
atom erlang
atom ERROR='ERROR'
atom error_handler
@@ -243,6 +246,9 @@ atom exact_reductions
atom exclusive
atom exit_status
atom existing
+atom existing_processes
+atom existing_ports
+atom existing
atom exiting
atom exports
atom external
@@ -353,6 +359,7 @@ atom match
atom match_limit
atom match_limit_recursion
atom match_spec
+atom match_spec_result
atom max
atom maximum
atom max_tables max_processes
@@ -401,6 +408,8 @@ atom net_kernel_terminated
atom never_utf
atom new
atom new_index
+atom new_processes
+atom new_ports
atom new_uniq
atom newline
atom next
@@ -540,6 +549,7 @@ atom scheme
atom scientific
atom scope
atom seconds
+atom send_to_non_existing_process
atom sensitive
atom sequential_tracer
atom sequential_trace_token
@@ -561,6 +571,7 @@ atom size
atom sl_alloc
atom spawn_executable
atom spawn_driver
+atom spawned
atom ssl_tls
atom stack_size
atom start
@@ -569,6 +580,7 @@ atom static
atom stderr_to_stdout
atom stop
atom stream
+atom strict_monotonic
atom strict_monotonic_timestamp
atom sunrm
atom suspend
@@ -596,8 +608,9 @@ atom total_active_tasks
atom total_heap_size
atom total_run_queue_lengths
atom tpkt
-atom trace trace_ts traced
+atom trace trace_ts traced
atom trace_control_word
+atom trace_status
atom tracer
atom trap_exit
atom trim
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 320dee6668..b10250dc49 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -719,13 +719,13 @@ set_default_trace_pattern(Eterm module)
Binary *match_spec;
Binary *meta_match_spec;
struct trace_pattern_flags trace_pattern_flags;
- Eterm meta_tracer_pid;
+ ErtsTracer meta_tracer;
erts_get_default_trace_pattern(&trace_pattern_is_on,
&match_spec,
&meta_match_spec,
&trace_pattern_flags,
- &meta_tracer_pid);
+ &meta_tracer);
if (trace_pattern_is_on) {
Eterm mfa[1];
mfa[0] = module;
@@ -733,7 +733,7 @@ set_default_trace_pattern(Eterm module)
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid, 1);
+ meta_tracer, 1);
}
}
@@ -1264,6 +1264,7 @@ delete_code(Module* modp)
modp->curr.code_length = 0;
modp->curr.catches = BEAM_CATCHES_NIL;
modp->curr.nif = NULL;
+
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 0a13454951..2ee98ed7b5 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -92,15 +92,15 @@ get_mtime(Process *c_p)
/*
** Helpers
*/
-static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
- int local, Binary* ms, Eterm tracer_pid);
+static ErtsTracer do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, ErtsTracer tracer);
static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid);
+ enum erts_break_op count_op, ErtsTracer tracer);
static void set_function_break(BeamInstr *pc,
Binary *match_spec,
Uint break_flags,
enum erts_break_op count_op,
- Eterm tracer_pid);
+ ErtsTracer tracer);
static void clear_break(BpFunctions* f, Uint break_flags);
static int clear_function_break(BeamInstr *pc, Uint break_flags);
@@ -108,7 +108,7 @@ static int clear_function_break(BeamInstr *pc, Uint break_flags);
static BpDataTime* get_time_break(BeamInstr *pc);
static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
-static void bp_meta_unref(BpMetaPid* bmp);
+static void bp_meta_unref(BpMetaTracer* bmt);
static void bp_count_unref(BpCount* bcp);
static void bp_time_unref(BpDataTime* bdt);
static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
@@ -302,7 +302,7 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
MatchSetUnref(dst->local_ms);
}
if (flags & ERTS_BPF_META_TRACE) {
- bp_meta_unref(dst->meta_pid);
+ bp_meta_unref(dst->meta_tracer);
MatchSetUnref(dst->meta_ms);
}
if (flags & ERTS_BPF_COUNT) {
@@ -343,8 +343,8 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
MatchSetRef(dst->local_ms);
}
if (flags & ERTS_BPF_META_TRACE) {
- dst->meta_pid = src->meta_pid;
- erts_refc_inc(&dst->meta_pid->refc, 1);
+ dst->meta_tracer = src->meta_tracer;
+ erts_refc_inc(&dst->meta_tracer->refc, 1);
dst->meta_ms = src->meta_ms;
MatchSetRef(dst->meta_ms);
}
@@ -436,13 +436,13 @@ uninstall_breakpoint(BeamInstr* pc)
void
erts_set_trace_break(BpFunctions* f, Binary *match_spec)
{
- set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true);
+ set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, erts_tracer_true);
}
void
-erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid)
+erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
{
- set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
+ set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
@@ -450,13 +450,13 @@ erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- set_function_break(pc, match_spec, flags, 0, NIL);
+ set_function_break(pc, match_spec, flags, 0, erts_tracer_nil);
}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid)
+erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, ErtsTracer tracer)
{
- set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
+ set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
@@ -464,7 +464,7 @@ erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
{
set_function_break(pc, NULL,
ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
@@ -474,21 +474,21 @@ erts_clear_time_trace_bif(BeamInstr *pc) {
void
erts_set_debug_break(BpFunctions* f) {
- set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL);
+ set_break(f, NULL, ERTS_BPF_DEBUG, 0, erts_tracer_nil);
}
void
erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
{
set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
{
set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
@@ -625,19 +625,26 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
- (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true);
+ (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, erts_tracer_true);
} else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
- (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true);
+ (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, erts_tracer_true);
}
if (bp_flags & ERTS_BPF_META_TRACE) {
- Eterm old_pid;
- Eterm new_pid;
-
- old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
- new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid);
- if (new_pid != old_pid) {
- erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid);
+ ErtsTracer old_tracer, new_tracer;
+
+ old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
+
+ new_tracer = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_tracer);
+ if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
+ if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
+ &bp->meta_tracer->tracer,
+ (erts_aint_t)new_tracer,
+ (erts_aint_t)old_tracer)) {
+ ERTS_TRACER_CLEAR(&old_tracer);
+ } else {
+ ERTS_TRACER_CLEAR(&new_tracer);
+ }
}
}
@@ -645,7 +652,8 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
erts_smp_atomic_inc_nob(&bp->count->acount);
}
- if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && erts_is_tracer_proc_valid(c_p)) {
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE
+ && ERTS_TRACER_PROC_IS_ENABLED(c_p)) {
Eterm w;
erts_trace_time_call(c_p, I, bp->time);
w = (BeamInstr) *c_p->cp;
@@ -690,7 +698,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
Eterm (*func)(Process*, Eterm*, BeamInstr*);
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
- Eterm meta_tracer_pid = NIL;
+ ErtsTracer meta_tracer = erts_tracer_nil;
int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
* is actually in the
* export entry */
@@ -718,23 +726,32 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
IS_TRACED_FL(p, F_TRACE_CALLS)) {
int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
flags = erts_call_trace(p, ep->code, bp->local_ms, args,
- local, &ERTS_TRACER_PROC(p));
+ local, &ERTS_TRACER(p));
}
if (bp_flags & ERTS_BPF_META_TRACE) {
- Eterm tpid1, tpid2;
+ ErtsTracer old_tracer;
- tpid1 = tpid2 =
- (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
+ old_tracer = meta_tracer;
flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
- 0, &tpid2);
- meta_tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
+ 0, &meta_tracer);
+
+ if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
+ ErtsTracer new_tracer = erts_tracer_nil;
+ erts_tracer_update(&new_tracer, meta_tracer);
+ if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
+ &bp->meta_tracer->tracer,
+ (erts_aint_t)new_tracer,
+ (erts_aint_t)old_tracer)) {
+ ERTS_TRACER_CLEAR(&old_tracer);
+ } else {
+ ERTS_TRACER_CLEAR(&new_tracer);
+ }
}
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
IS_TRACED_FL(p, F_TRACE_CALLS) &&
- erts_is_tracer_proc_valid(p)) {
+ ERTS_TRACER_PROC_IS_ENABLED(p)) {
BeamInstr *pc = (BeamInstr *)ep->code+3;
erts_trace_time_call(p, pc, bp->time);
}
@@ -778,8 +795,6 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
if (reason != TRAP) {
Eterm class;
Eterm value = p->fvalue;
- DeclareTmpHeapNoproc(nocatch,3);
- UseTmpHeapNoproc(3);
/* Expand error value like in handle_error() */
if (reason & EXF_ARGLIST) {
Eterm *tp;
@@ -788,7 +803,8 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
value = tp[1];
}
if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- value = TUPLE2(nocatch, am_nocatch, value);
+ Eterm *hp = HAlloc(p, 3);
+ value = TUPLE2(hp, am_nocatch, value);
reason = EXC_ERROR;
}
/* Note: expand_error_value() could theoretically
@@ -801,11 +817,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
erts_trace_exception(p, ep->code, class, value,
- &meta_tracer_pid);
+ &meta_tracer);
}
if (flags & MATCH_SET_EXCEPTION_TRACE) {
erts_trace_exception(p, ep->code, class, value,
- &ERTS_TRACER_PROC(p));
+ &ERTS_TRACER(p));
}
if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
/* can only happen if(local)*/
@@ -827,7 +843,6 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
}
- UnUseTmpHeapNoproc(3);
if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
@@ -836,11 +851,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
} else {
if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ erts_trace_return(p, ep->code, result, &meta_tracer);
}
/* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p));
+ erts_trace_return(p, ep->code, result, &ERTS_TRACER(p));
}
if (flags & MATCH_SET_RETURN_TO_TRACE) {
/* can only happen if(local)*/
@@ -857,9 +872,9 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
return result;
}
-static Eterm
+static ErtsTracer
do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
- int local, Binary* ms, Eterm tracer_pid)
+ int local, Binary* ms, ErtsTracer tracer)
{
Eterm* cpp;
int return_to_trace = 0;
@@ -899,7 +914,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
ASSERT(is_CP(*cpp));
}
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid);
+ flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
if (cpp) {
c_p->cp = cp_save;
@@ -910,7 +925,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
need += 1;
}
if (flags & MATCH_SET_RX_TRACE) {
- need += 3;
+ need += 3 + size_object(tracer);
}
if (need) {
ASSERT(c_p->htop <= E && E <= c_p->hend);
@@ -926,14 +941,15 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
E[0] = make_cp(c_p->cp);
c_p->cp = beam_return_to_trace;
}
- if (flags & MATCH_SET_RX_TRACE) {
+ if (flags & MATCH_SET_RX_TRACE)
+ {
E -= 3;
+ c_p->stop = E;
ASSERT(c_p->htop <= E && E <= c_p->hend);
ASSERT(is_CP((Eterm) (UWord) (I - 3)));
- ASSERT(am_true == tracer_pid ||
- is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
+ ASSERT(IS_TRACER_VALID(tracer));
E[2] = make_cp(c_p->cp);
- E[1] = tracer_pid;
+ E[1] = copy_object(tracer, c_p);
E[0] = make_cp(I - 3); /* We ARE at the beginning of an
instruction,
the funcinfo is above i. */
@@ -942,9 +958,9 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- c_p->stop = E;
- return tracer_pid;
+ } else
+ c_p->stop = E;
+ return tracer;
}
void
@@ -1098,9 +1114,9 @@ erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
return 0;
}
-int
+int
erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret)
+ ErtsTracer *tracer_ret)
{
GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
@@ -1108,9 +1124,8 @@ erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
if (match_spec_ret) {
*match_spec_ret = bp->meta_ms;
}
- if (tracer_pid_ret) {
- *tracer_pid_ret =
- (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ if (tracer_ret) {
+ *tracer_ret = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
}
return 1;
}
@@ -1391,7 +1406,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
static void
set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid)
+ enum erts_break_op count_op, ErtsTracer tracer)
{
Uint i;
Uint n;
@@ -1400,13 +1415,13 @@ set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
for (i = 0; i < n; i++) {
BeamInstr* pc = f->matching[i].pc;
set_function_break(pc, match_spec, break_flags,
- count_op, tracer_pid);
+ count_op, tracer);
}
}
static void
set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid)
+ enum erts_break_op count_op, ErtsTracer tracer)
{
GenericBp* g;
GenericBpData* bp;
@@ -1439,7 +1454,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
MatchSetUnref(bp->local_ms);
} else if (common & ERTS_BPF_META_TRACE) {
MatchSetUnref(bp->meta_ms);
- bp_meta_unref(bp->meta_pid);
+ bp_meta_unref(bp->meta_tracer);
} else if (common & ERTS_BPF_COUNT) {
if (count_op == erts_break_stop) {
bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
@@ -1474,13 +1489,15 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
MatchSetRef(match_spec);
bp->local_ms = match_spec;
} else if (break_flags & ERTS_BPF_META_TRACE) {
- BpMetaPid* bmp;
+ BpMetaTracer* bmt;
+ ErtsTracer meta_tracer = erts_tracer_nil;
MatchSetRef(match_spec);
bp->meta_ms = match_spec;
- bmp = Alloc(sizeof(BpMetaPid));
- erts_refc_init(&bmp->refc, 1);
- erts_smp_atomic_init_nob(&bmp->pid, tracer_pid);
- bp->meta_pid = bmp;
+ bmt = Alloc(sizeof(BpMetaTracer));
+ erts_refc_init(&bmt->refc, 1);
+ erts_tracer_update(&meta_tracer, tracer); /* copy tracer */
+ erts_smp_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer);
+ bp->meta_tracer = bmt;
} else if (break_flags & ERTS_BPF_COUNT) {
BpCount* bcp;
@@ -1544,7 +1561,7 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
}
if (common & ERTS_BPF_META_TRACE) {
MatchSetUnref(bp->meta_ms);
- bp_meta_unref(bp->meta_pid);
+ bp_meta_unref(bp->meta_tracer);
}
if (common & ERTS_BPF_COUNT) {
ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
@@ -1560,10 +1577,12 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
}
static void
-bp_meta_unref(BpMetaPid* bmp)
+bp_meta_unref(BpMetaTracer* bmt)
{
- if (erts_refc_dectest(&bmp->refc, 0) <= 0) {
- Free(bmp);
+ if (erts_refc_dectest(&bmt->refc, 0) <= 0) {
+ ErtsTracer trc = erts_smp_atomic_read_nob(&bmt->tracer);
+ ERTS_TRACER_CLEAR(&trc);
+ Free(bmt);
}
}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index f9eca94e2a..bb171be8a6 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -55,15 +55,15 @@ typedef struct {
} BpCount;
typedef struct {
- erts_smp_atomic_t pid;
+ erts_smp_atomic_t tracer;
erts_refc_t refc;
-} BpMetaPid;
+} BpMetaTracer;
typedef struct generic_bp_data {
Uint flags;
Binary* local_ms; /* Match spec for local call trace */
Binary* meta_ms; /* Match spec for meta trace */
- BpMetaPid* meta_pid; /* Meta trace pid */
+ BpMetaTracer* meta_tracer; /* Meta tracer */
BpCount* count; /* For call count */
BpDataTime* time; /* For time trace */
} GenericBpData;
@@ -132,10 +132,10 @@ void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
void erts_clear_call_trace_bif(BeamInstr *pc, int local);
void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
- Eterm tracer_pid);
+ ErtsTracer tracer);
void erts_clear_mtrace_break(BpFunctions *f);
void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
- Eterm tracer_pid);
+ ErtsTracer tracer);
void erts_clear_mtrace_bif(BeamInstr *pc);
void erts_set_debug_break(BpFunctions *f);
@@ -150,13 +150,13 @@ void erts_clear_export_break(Module *modp, BeamInstr* pc);
BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
- Uint32 *ret_flags, Eterm *tracer_pid);
+ Uint32 *ret_flags, ErtsTracer *tracer);
int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_rte);
+ ErtsTracer *tracer_ret);
int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret);
+ ErtsTracer *tracer_ret);
int erts_is_native_break(BeamInstr *pc);
int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 901419c989..254bfe4ba0 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -3346,7 +3346,7 @@ do { \
OpCase(normal_exit): {
SWAPOUT;
c_p->freason = EXC_NORMAL;
- c_p->arity = 0; /* In case this process will ever be garbed again. */
+ c_p->arity = 0; /* In case this process will never be garbed again. */
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
erts_do_exit_process(c_p, am_normal);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
@@ -3460,7 +3460,7 @@ do { \
typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
NifF* fp = vbf = (NifF*) I[1];
struct enif_environment_t env;
- erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2]);
+ erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
live_hf_end = c_p->mbuf;
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
if (env.exception_thrown)
@@ -4588,7 +4588,7 @@ do { \
SWAPOUT; /* Needed for shared heap */
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_trace_return(c_p, code, r(0), E+1/*Process tracer*/);
+ erts_trace_return(c_p, code, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
SWAPIN;
c_p->cp = NULL;
@@ -5228,7 +5228,8 @@ next_catch(Process* c_p, Eterm *reg) {
BeamInstr *cpp = c_p->cp;
if (cpp == beam_exception_trace) {
erts_trace_exception(c_p, cp_val(ptr[0]),
- reg[1], reg[2], ptr+1);
+ reg[1], reg[2],
+ ERTS_TRACER_FROM_ETERM(ptr+1));
/* Skip return_trace parameters */
ptr += 2;
} else if (cpp == beam_return_trace) {
@@ -5255,7 +5256,8 @@ next_catch(Process* c_p, Eterm *reg) {
}
if (cp_val(*prev) == beam_exception_trace) {
erts_trace_exception(c_p, cp_val(ptr[0]),
- reg[1], reg[2], ptr+1);
+ reg[1], reg[2],
+ ERTS_TRACER_FROM_ETERM(ptr+1));
}
/* Skip return_trace parameters */
ptr += 2;
@@ -6049,7 +6051,7 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
return -1;
}
#else /* ERTS_SMP */
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
if (!c_p->msg.len)
#endif
erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 754e11f047..ed5b2983dd 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -123,15 +123,12 @@ static int insert_internal_link(Process* p, Eterm rpid)
erts_add_link(&ERTS_P_LINKS(p), LINK_PID, rp->common.id);
erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, p->common.id);
- ASSERT(is_nil(ERTS_TRACER_PROC(p))
- || is_internal_pid(ERTS_TRACER_PROC(p))
- || is_internal_port(ERTS_TRACER_PROC(p)));
+ ASSERT(IS_TRACER_VALID(ERTS_TRACER(p)));
if (IS_TRACED(p)) {
if (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1)) {
ERTS_TRACE_FLAGS(rp) |= (ERTS_TRACE_FLAGS(p) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(rp) = ERTS_TRACER_PROC(p); /* maybe steal */
-
+ erts_tracer_replace(&rp->common, ERTS_TRACER(p));
if (ERTS_TRACE_FLAGS(p) & F_TRACE_SOL1) { /* maybe override */
ERTS_TRACE_FLAGS(rp) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
@@ -140,7 +137,8 @@ static int insert_internal_link(Process* p, Eterm rpid)
}
}
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(p, rp, am_getting_linked, p->common.id);
+ trace_proc(p, p == rp ? rp_locks : ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ rp, am_getting_linked, p->common.id);
if (p == rp)
erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN);
@@ -159,7 +157,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
DistEntry *dep;
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
- trace_proc(BIF_P, BIF_P, am_link, BIF_ARG_1);
+ trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_P, am_link, BIF_ARG_1);
}
/* check that the pid or port which is our argument is OK */
@@ -613,7 +611,7 @@ erts_queue_monitor_message(Process *p,
ref_copy = copy_struct(ref, ref_size, &hp, ohp);
tup = TUPLE5(hp, am_DOWN, ref_copy, type, item_copy, reason_copy);
- erts_queue_message(p, p_locksp, msgp, tup, NIL);
+ erts_queue_message(p, p_locksp, msgp, tup);
}
static BIF_RETTYPE
@@ -1001,6 +999,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
Process *rp;
DistEntry *dep;
ErtsLink *l = NULL, *rl = NULL;
+ ErtsProcLocks cp_locks = ERTS_PROC_LOCK_MAIN;
/*
* SMP specific note concerning incoming exit signals:
@@ -1015,7 +1014,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
*/
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
- trace_proc(BIF_P, BIF_P, am_unlink, BIF_ARG_1);
+ trace_proc(BIF_P, cp_locks, BIF_P, am_unlink, BIF_ARG_1);
}
if (is_internal_port(BIF_ARG_1)) {
@@ -1120,10 +1119,10 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ cp_locks |= ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS;
+
/* get process struct */
- rp = erts_pid2proc_opt(BIF_P, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_LINK
- | ERTS_PROC_LOCK_STATUS),
+ rp = erts_pid2proc_opt(BIF_P, cp_locks,
BIF_ARG_1, ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
@@ -1149,14 +1148,17 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
erts_destroy_link(rl);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) {
- trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->common.id);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ cp_locks &= ~ERTS_PROC_LOCK_STATUS;
+ trace_proc(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK),
+ rp, am_getting_unlinked, BIF_P->common.id);
}
if (rp != BIF_P)
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
@@ -1909,7 +1911,7 @@ static Sint remote_send(Process *p, DistEntry *dep,
}
if (res >= 0) {
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, full_to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -1928,7 +1930,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
Eterm* tp;
if (is_internal_pid(to)) {
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -1957,7 +1959,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
rp = erts_proc_lookup_raw(id);
if (rp) {
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -1973,7 +1975,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
goto port_common;
}
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -2004,11 +2006,20 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
port_common:
ret_val = 0;
-
+
if (pt) {
int ps_flags = ctx->suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
*refp = NIL;
+ if (IS_TRACED_FL(p, F_TRACE_SEND)) /* trace once only !! */
+ trace_send(p, portid, msg);
+
+ if (have_seqtrace(SEQ_TRACE_TOKEN(p))) {
+ seq_trace_update_send(p);
+ seq_trace_output(SEQ_TRACE_TOKEN(p), msg,
+ SEQ_TRACE_SEND, portid, p);
+ }
+
switch (erts_port_command(p, ps_flags, pt, msg, refp)) {
case ERTS_PORT_OP_CALLER_EXIT:
/* We are exiting... */
@@ -2041,18 +2052,10 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
break;
}
}
-
- if (IS_TRACED(p)) /* trace once only !! */
- trace_send(p, portid, msg);
+
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
-
- if (have_seqtrace(SEQ_TRACE_TOKEN(p))) {
- seq_trace_update_send(p);
- seq_trace_output(SEQ_TRACE_TOKEN(p), msg,
- SEQ_TRACE_SEND, portid, p);
- }
-
+
if (ERTS_PROC_IS_EXITING(p)) {
KILL_CATCHES(p); /* Must exit */
return SEND_USER_ERROR;
@@ -2075,7 +2078,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
if (dep == erts_this_dist_entry) {
Eterm id;
erts_deref_dist_entry(dep);
- if (IS_TRACED(p))
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -2106,7 +2109,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext* ctx)
}
return ret;
} else {
- if (IS_TRACED(p)) /* XXX Is this really neccessary ??? */
+ if (IS_TRACED_FL(p, F_TRACE_SEND))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
@@ -4337,12 +4340,34 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
} else if (BIF_ARG_1 == am_trace_control_word) {
BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_2));
} else if (BIF_ARG_1 == am_sequential_tracer) {
- Eterm old_value = erts_set_system_seq_tracer(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- BIF_ARG_2);
- if (old_value != THE_NON_VALUE) {
- BIF_RET(old_value);
- }
+ ErtsTracer new_seq_tracer, old_seq_tracer;
+ Eterm ret;
+
+ if (BIF_ARG_2 == am_false)
+ new_seq_tracer = erts_tracer_nil;
+ else
+ new_seq_tracer = erts_term_to_tracer(THE_NON_VALUE, BIF_ARG_2);
+
+ if (new_seq_tracer == THE_NON_VALUE)
+ goto error;
+
+ old_seq_tracer = erts_set_system_seq_tracer(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ new_seq_tracer);
+
+ ERTS_TRACER_CLEAR(&new_seq_tracer);
+
+ if (old_seq_tracer == THE_NON_VALUE)
+ goto error;
+
+ if (ERTS_TRACER_IS_NIL(old_seq_tracer))
+ BIF_RET(am_false);
+
+ ret = erts_tracer_to_term(BIF_P, old_seq_tracer);
+
+ ERTS_TRACER_CLEAR(&old_seq_tracer);
+
+ BIF_RET(ret);
} else if (BIF_ARG_1 == make_small(1)) {
int i, max;
ErtsMessage* mp;
@@ -4679,7 +4704,7 @@ skip_current_msgq(Process *c_p)
res = 0;
}
else {
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
c_p->msg.save = c_p->msg.last;
res = 1;
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 9bb77190d6..58cd31cee9 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -181,9 +181,8 @@ bif erlang:port_set_data/2
bif erlang:port_get_data/1
# Tracing & debugging.
-bif erlang:trace_pattern/2
-bif erlang:trace_pattern/3
-bif erlang:trace/3
+bif erts_internal:trace_pattern/3
+bif erts_internal:trace/3
bif erlang:trace_info/2
bif erlang:trace_delivered/1
bif erlang:seq_trace/2
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index 9da750e366..ec6267711b 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -94,6 +94,7 @@ void erts_commit_staging_code_ix(void)
ix = (ix + 1) % ERTS_NUM_CODE_IX;
erts_smp_atomic32_set_nob(&the_staging_code_index, ix);
export_staging_unlock();
+ erts_tracer_nif_clear();
CIX_TRACE("activate");
}
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 88449adb8e..52fd57e101 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -337,7 +337,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
erts_destroy_link(rlnk);
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
- trace_proc(NULL, rp, am_getting_unlinked, sublnk->pid);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, sublnk->pid);
}
}
erts_smp_proc_unlock(rp, rp_locks);
@@ -397,7 +397,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
msgp = erts_alloc_message_heap(rp, &rp_locks,
3, &hp, &ohp);
tup = TUPLE2(hp, am_nodedown, name);
- erts_queue_message(rp, &rp_locks, msgp, tup, NIL);
+ erts_queue_message(rp, &rp_locks, msgp, tup);
}
erts_smp_proc_unlock(rp, rp_locks);
}
@@ -1275,7 +1275,7 @@ int erts_net_message(Port *prt,
erts_smp_de_links_unlock(dep);
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, rp, am_getting_linked, from);
+ trace_proc(NULL, 0, rp, am_getting_linked, from);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
break;
@@ -1300,7 +1300,7 @@ int erts_net_message(Port *prt,
lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && lnk != NULL) {
- trace_proc(NULL, rp, am_getting_unlinked, from);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, from);
}
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
@@ -1628,7 +1628,11 @@ int erts_net_message(Port *prt,
ERTS_XSIG_FLG_IGN_KILL);
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
- trace_proc(NULL, rp, am_getting_unlinked, from);
+ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
+ rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
+ }
+ trace_proc(NULL, 0, rp, am_getting_unlinked, from);
}
}
erts_smp_proc_unlock(rp, rp_locks);
@@ -1719,7 +1723,7 @@ decode_error:
}
data_error:
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- erts_deliver_port_exit(prt, dep->cid, am_killed, 0);
+ erts_deliver_port_exit(prt, dep->cid, am_killed, 0, 1);
ERTS_SMP_CHK_NO_PROC_LOCKS;
return -1;
}
@@ -2089,7 +2093,7 @@ erts_dist_command(Port *prt, int reds_limit)
erts_smp_de_runlock(dep);
if (status & ERTS_DE_SFLG_EXITING) {
- erts_deliver_port_exit(prt, prt->common.id, am_killed, 0);
+ erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1);
erts_deref_dist_entry(dep);
return reds + ERTS_PORT_REDS_DIST_CMD_EXIT;
}
@@ -3313,7 +3317,7 @@ send_nodes_mon_msg(Process *rp,
}
ASSERT(hend == hp);
- erts_queue_message(rp, rp_locksp, mp, msg, NIL);
+ erts_queue_message(rp, rp_locksp, mp, msg);
}
static void
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index cfe0bc3205..a6519bd9e4 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -3210,7 +3210,7 @@ reply_alloc_info(void *vair)
if (hp != hp_end)
erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1);
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (air->req_sched == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 9482ab9265..d2fe440d47 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -270,6 +270,9 @@ type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues
type NEW_TIME_OFFSET SHORT_LIVED SYSTEM new_time_offset
type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request
+type TRACER_NIF LONG_LIVED SYSTEM tracer_nif
+type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
+type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index a79ce11563..1e2db38442 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1737,7 +1737,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
hp += REF_THING_SIZE;
mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
}
- erts_queue_message(proc, &rp_locks, mp, mess, am_undefined);
+ erts_queue_message(proc, &rp_locks, mp, mess);
erts_smp_proc_unlock(proc, rp_locks);
ERTS_SMP_CHK_NO_PROC_LOCKS;
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 68f6abfcdc..7a72b0d8cc 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -846,6 +846,7 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
if (unlock_locks)
erts_smp_proc_unlock(rp, unlock_locks);
+
}
/*
@@ -2162,8 +2163,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = build_snifs_term(&hp, NULL, NIL);
BIF_RET(res);
} else if (BIF_ARG_1 == am_sequential_tracer) {
- val = erts_get_system_seq_tracer();
- ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false);
+ ErtsTracer seq_tracer = erts_get_system_seq_tracer();
+ val = erts_tracer_to_term(BIF_P, seq_tracer);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 71b6d78094..37f4e1de49 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -79,6 +79,7 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2)
}
if (port->drv_ptr->flags & ERL_DRV_FLAG_USE_INIT_ACK) {
+
/* Copied from erl_port_task.c */
port->async_open_port = erts_alloc(ERTS_ALC_T_PRTSD,
sizeof(*port->async_open_port));
@@ -109,6 +110,10 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2)
erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id);
erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, port->common.id);
+ if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS))
+ trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, BIF_P,
+ am_link, port->common.id);
+
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
erts_port_release(port);
@@ -339,8 +344,7 @@ BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1)
if (!prt)
BIF_RET(am_badarg);
-
- switch (erts_port_exit(BIF_P, 0, prt, prt->common.id, am_normal, &ref)) {
+ switch (erts_port_exit(BIF_P, 0, prt, BIF_P->common.id, am_normal, &ref)) {
case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
case ERTS_PORT_OP_DROPPED:
@@ -373,7 +377,7 @@ BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2)
ref = NIL;
#endif
- switch (erts_port_connect(BIF_P, 0, prt, prt->common.id, BIF_ARG_2, &ref)) {
+ switch (erts_port_connect(BIF_P, 0, prt, BIF_P->common.id, BIF_ARG_2, &ref)) {
case ERTS_PORT_OP_CALLER_EXIT:
case ERTS_PORT_OP_BADARG:
case ERTS_PORT_OP_DROPPED:
@@ -911,7 +915,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
}
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
}
@@ -928,21 +932,22 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
DTRACE3(port_open, process_str, name_buf, port_str);
}
#endif
+
+ if (port && IS_TRACED_FL(port, F_TRACE_PORTS))
+ trace_port(port, am_getting_linked, p->common.id);
+
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
+ }
+
if (!port) {
DEBUGF(("open_driver returned (%d:%d)\n",
err_typep ? *err_typep : 4711,
err_nump ? *err_nump : 4711));
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
goto do_return;
}
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
if (linebuf && port->linebuf == NULL){
port->linebuf = allocate_linebuf(linebuf);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index a9443ee8df..ff2018aa27 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -52,7 +52,7 @@ static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
-static Eterm erts_default_meta_tracer_pid;
+static ErtsTracer erts_default_meta_tracer;
static struct { /* Protected by code write permission */
int current;
@@ -75,8 +75,6 @@ static BIF_RETTYPE
system_monitor(Process *p, Eterm monitor_pid, Eterm list);
static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/
-static int already_traced(Process *p, Process *tracee_p, Eterm tracer);
-static int port_already_traced(Process *p, Port *tracee_port, Eterm tracer);
static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
@@ -94,21 +92,15 @@ erts_bif_trace_init(void)
erts_default_match_spec = NULL;
erts_default_meta_match_spec = NULL;
erts_default_trace_pattern_flags = erts_trace_pattern_flags_off;
- erts_default_meta_tracer_pid = NIL;
+ erts_default_meta_tracer = erts_tracer_nil;
}
/*
* Turn on/off call tracing for the given function(s).
*/
-
-Eterm
-trace_pattern_2(BIF_ALIST_2)
-{
- return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL);
-}
Eterm
-trace_pattern_3(BIF_ALIST_3)
+erts_internal_trace_pattern_3(BIF_ALIST_3)
{
return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
@@ -125,11 +117,10 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
Eterm l;
struct trace_pattern_flags flags = erts_trace_pattern_flags_off;
int is_global;
- Process *meta_tracer_proc = p;
- Eterm meta_tracer_pid = p->common.id;
+ ErtsTracer meta_tracer = erts_tracer_nil;
if (!erts_try_seize_code_write_permission(p)) {
- ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
+ ERTS_BIF_YIELD3(bif_export[BIF_erts_internal_trace_pattern_3], p, MFA, Pattern, flaglist);
}
finish_bp.current = -1;
@@ -160,31 +151,11 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
is_global = 0;
for(l = flaglist; is_list(l); l = CDR(list_val(l))) {
if (is_tuple(CAR(list_val(l)))) {
- Eterm *tp = tuple_val(CAR(list_val(l)));
-
- if (arityval(tp[0]) != 2 || tp[1] != am_meta) {
- goto error;
- }
- meta_tracer_pid = tp[2];
- if (is_internal_pid(meta_tracer_pid)) {
- meta_tracer_proc = erts_pid2proc(NULL, 0, meta_tracer_pid, 0);
- if (!meta_tracer_proc) {
- goto error;
- }
- } else if (is_internal_port(meta_tracer_pid)) {
- Port *meta_tracer_port;
- meta_tracer_proc = NULL;
- meta_tracer_port = (erts_port_lookup(
- meta_tracer_pid,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP));
- if (!meta_tracer_port)
- goto error;
- } else {
- goto error;
- }
- if (is_global) {
- goto error;
- }
+ meta_tracer = erts_term_to_tracer(am_meta, CAR(list_val(l)));
+ if (meta_tracer == THE_NON_VALUE) {
+ meta_tracer = erts_tracer_nil;
+ goto error;
+ }
flags.breakpoint = 1;
flags.meta = 1;
} else {
@@ -202,6 +173,8 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
flags.breakpoint = 1;
flags.meta = 1;
+ if (ERTS_TRACER_IS_NIL(meta_tracer))
+ meta_tracer = erts_term_to_tracer(THE_NON_VALUE, p->common.id);
break;
case am_global:
if (flags.breakpoint) {
@@ -252,14 +225,11 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetUnref(erts_default_meta_match_spec);
erts_default_meta_match_spec = match_prog_set;
MatchSetRef(erts_default_meta_match_spec);
- erts_default_meta_tracer_pid = meta_tracer_pid;
- if (meta_tracer_proc) {
- ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
- }
+ erts_tracer_update(&erts_default_meta_tracer, meta_tracer);
} else if (! flags.breakpoint) {
MatchSetUnref(erts_default_meta_match_spec);
erts_default_meta_match_spec = NULL;
- erts_default_meta_tracer_pid = NIL;
+ ERTS_TRACER_CLEAR(&erts_default_meta_tracer);
}
if (erts_default_trace_pattern_flags.breakpoint &&
flags.breakpoint) {
@@ -340,20 +310,18 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
if (is_small(mfa[2])) {
mfa[2] = signed_val(mfa[2]);
}
-
- if (meta_tracer_proc) {
- ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
- }
matches = erts_set_trace_pattern(p, mfa, specified,
match_prog_set, match_prog_set,
- on, flags, meta_tracer_pid, 0);
+ on, flags, meta_tracer, 0);
}
error:
MatchSetUnref(match_prog_set);
UnUseTmpHeap(3,p);
+ ERTS_TRACER_CLEAR(&meta_tracer);
+
#ifdef ERTS_SMP
if (finish_bp.current >= 0) {
ASSERT(matches >= 0);
@@ -404,7 +372,7 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
Binary **match_spec,
Binary **meta_match_spec,
struct trace_pattern_flags *trace_pattern_flags,
- Eterm *meta_tracer_pid)
+ ErtsTracer *meta_tracer)
{
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
erts_smp_thr_progress_is_blocking());
@@ -416,8 +384,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
*meta_match_spec = erts_default_meta_match_spec;
if (trace_pattern_flags)
*trace_pattern_flags = erts_default_trace_pattern_flags;
- if (meta_tracer_pid)
- *meta_tracer_pid = erts_default_meta_tracer_pid;
+ if (meta_tracer)
+ *meta_tracer = erts_default_meta_tracer;
}
int erts_is_default_trace_enabled(void)
@@ -465,12 +433,12 @@ erts_trace_flag2bit(Eterm flag)
** occurred in the argument list.
*/
int
-erts_trace_flags(Eterm List,
- Uint *pMask, Eterm *pTracer, int *pCpuTimestamp)
+erts_trace_flags(Eterm List,
+ Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp)
{
Eterm list = List;
Uint mask = 0;
- Eterm tracer = NIL;
+ ErtsTracer tracer = erts_tracer_nil;
int cpu_timestamp = 0;
while (is_list(list)) {
@@ -483,33 +451,73 @@ erts_trace_flags(Eterm List,
cpu_timestamp = !0;
#endif
} else if (is_tuple(item)) {
- Eterm* tp = tuple_val(item);
-
- if (arityval(tp[0]) != 2 || tp[1] != am_tracer) goto error;
- if (is_internal_pid(tp[2]) || is_internal_port(tp[2])) {
- tracer = tp[2];
- } else goto error;
+ tracer = erts_term_to_tracer(am_tracer, item);
+ if (tracer == THE_NON_VALUE)
+ goto error;
} else goto error;
list = CDR(list_val(list));
}
if (is_not_nil(list)) goto error;
- if (pMask && mask) *pMask = mask;
- if (pTracer && tracer != NIL) *pTracer = tracer;
- if (pCpuTimestamp && cpu_timestamp) *pCpuTimestamp = cpu_timestamp;
+ if (pMask && mask) *pMask = mask;
+ if (pTracer && !ERTS_TRACER_IS_NIL(tracer)) *pTracer = tracer;
+ if (pCpuTimestamp && cpu_timestamp) *pCpuTimestamp = cpu_timestamp;
return !0;
error:
return 0;
}
-Eterm trace_3(BIF_ALIST_3)
+static ERTS_INLINE int
+start_trace(Process *c_p, ErtsTracer tracer,
+ ErtsPTabElementCommon *common,
+ int on, int mask)
+{
+ /* We can use the common part of both port+proc without checking what it is
+ In the code below port is used for both proc and port */
+ Port *port = (Port*)common;
+
+ /*
+ * SMP build assumes that either system is blocked or:
+ * * main lock is held on c_p
+ * * all locks are held on port common
+ */
+
+ if (!ERTS_TRACER_IS_NIL(tracer)) {
+ if ((ERTS_TRACE_FLAGS(port) & TRACEE_FLAGS)
+ && !ERTS_TRACER_COMPARE(ERTS_TRACER(port), tracer)) {
+ /* This tracee is already being traced, and not by the
+ * tracer to be */
+ if (erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCKS_ALL,
+ common, am_trace_status)) {
+ /* The tracer is still in use */
+ return 1;
+ }
+ /* Current tracer now invalid */
+ }
+ }
+
+ if (on)
+ ERTS_TRACE_FLAGS(port) |= mask;
+ else
+ ERTS_TRACE_FLAGS(port) &= ~mask;
+
+ if ((ERTS_TRACE_FLAGS(port) & TRACEE_FLAGS) == 0) {
+ tracer = erts_tracer_nil;
+ erts_tracer_replace(common, erts_tracer_nil);
+ } else if (!ERTS_TRACER_IS_NIL(tracer))
+ erts_tracer_replace(common, tracer);
+
+ return 0;
+}
+
+Eterm erts_internal_trace_3(BIF_ALIST_3)
{
Process* p = BIF_P;
Eterm pid_spec = BIF_ARG_1;
Eterm how = BIF_ARG_2;
Eterm list = BIF_ARG_3;
int on;
- Eterm tracer = NIL;
+ ErtsTracer tracer = erts_tracer_nil;
int matches = 0;
Uint mask = 0;
int cpu_ts = 0;
@@ -522,41 +530,24 @@ Eterm trace_3(BIF_ALIST_3)
}
if (!erts_try_seize_code_write_permission(BIF_P)) {
- ERTS_BIF_YIELD3(bif_export[BIF_trace_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ ERTS_BIF_YIELD3(bif_export[BIF_erts_internal_trace_3],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
- if (is_nil(tracer) || is_internal_pid(tracer)) {
- Process *tracer_proc = erts_pid2proc(p,
- ERTS_PROC_LOCK_MAIN,
- is_nil(tracer) ? p->common.id : tracer,
- ERTS_PROC_LOCKS_ALL);
- if (!tracer_proc)
- goto error;
- ERTS_TRACE_FLAGS(tracer_proc) |= F_TRACER;
- erts_smp_proc_unlock(tracer_proc,
- (tracer_proc == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
- } else if (is_internal_port(tracer)) {
- Port *tracer_port = erts_id2port_sflgs(tracer,
- p,
- ERTS_PROC_LOCK_MAIN,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (!tracer_port)
- goto error;
- ERTS_TRACE_FLAGS(tracer_port) |= F_TRACER;
- erts_port_release(tracer_port);
- } else
- goto error;
-
switch (how) {
case am_false:
on = 0;
break;
case am_true:
on = 1;
- if (is_nil(tracer))
- tracer = p->common.id;
+ if (ERTS_TRACER_IS_NIL(tracer))
+ tracer = erts_term_to_tracer(am_tracer, p->common.id);
+
+ if (tracer == THE_NON_VALUE) {
+ tracer = erts_tracer_nil;
+ goto error;
+ }
+
break;
default:
goto error;
@@ -575,34 +566,20 @@ Eterm trace_3(BIF_ALIST_3)
}
#endif
- if (pid_spec == tracer)
- goto error;
-
tracee_port = erts_id2port_sflgs(pid_spec,
p,
ERTS_PROC_LOCK_MAIN,
ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
if (!tracee_port)
goto error;
-
- if (tracer != NIL && port_already_traced(p, tracee_port, tracer)) {
+
+ if (start_trace(p, tracer, &tracee_port->common, on, mask)) {
erts_port_release(tracee_port);
goto already_traced;
- }
-
- if (on)
- ERTS_TRACE_FLAGS(tracee_port) |= mask;
- else
- ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
-
- if (!ERTS_TRACE_FLAGS(tracee_port))
- ERTS_TRACER_PROC(tracee_port) = NIL;
- else if (tracer != NIL)
- ERTS_TRACER_PROC(tracee_port) = tracer;
-
- erts_port_release(tracee_port);
-
- matches = 1;
+ }
+ erts_port_release(tracee_port);
+ matches = 1;
} else if (is_pid(pid_spec)) {
Process *tracee_p;
@@ -615,33 +592,19 @@ Eterm trace_3(BIF_ALIST_3)
* and not about to be tracing.
*/
- if (pid_spec == tracer)
- goto error;
-
tracee_p = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
pid_spec, ERTS_PROC_LOCKS_ALL);
if (!tracee_p)
goto error;
- if (tracer != NIL && already_traced(p, tracee_p, tracer)) {
+ if (start_trace(tracee_p, tracer, &tracee_p->common, on, mask)) {
erts_smp_proc_unlock(tracee_p,
(tracee_p == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
goto already_traced;
- }
-
- if (on)
- ERTS_TRACE_FLAGS(tracee_p) |= mask;
- else
- ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
-
- if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS) == 0)
- ERTS_TRACER_PROC(tracee_p) = NIL;
- else if (tracer != NIL)
- ERTS_TRACER_PROC(tracee_p) = tracer;
-
- erts_smp_proc_unlock(tracee_p,
+ }
+ erts_smp_proc_unlock(tracee_p,
(tracee_p == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
@@ -692,18 +655,27 @@ Eterm trace_3(BIF_ALIST_3)
}
#endif
- if (pid_spec == am_all || pid_spec == am_existing) {
+ if (pid_spec == am_all || pid_spec == am_existing ||
+ pid_spec == am_ports || pid_spec == am_processes ||
+ pid_spec == am_existing_ports || pid_spec == am_existing_processes
+ ) {
int i;
int procs = 0;
int ports = 0;
int mods = 0;
if (mask & (ERTS_PROC_TRACEE_FLAGS & ~ERTS_TRACEE_MODIFIER_FLAGS))
- procs = 1;
+ procs = pid_spec != am_ports && pid_spec != am_existing_ports;
if (mask & (ERTS_PORT_TRACEE_FLAGS & ~ERTS_TRACEE_MODIFIER_FLAGS))
- ports = 1;
- if (mask & ERTS_TRACEE_MODIFIER_FLAGS)
- mods = 1;
+ ports = pid_spec != am_processes && pid_spec != am_existing_processes;
+ if (mask & ERTS_TRACEE_MODIFIER_FLAGS) {
+ if (pid_spec == am_ports || pid_spec == am_existing_ports)
+ ports = 1;
+ else if (pid_spec == am_processes || pid_spec == am_existing_processes)
+ procs = 1;
+ else
+ mods = 1;
+ }
#ifdef ERTS_SMP
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
@@ -719,22 +691,7 @@ Eterm trace_3(BIF_ALIST_3)
Process* tracee_p = erts_pix2proc(i);
if (! tracee_p)
continue;
- if (tracer != NIL) {
- if (tracee_p->common.id == tracer)
- continue;
- if (already_traced(NULL, tracee_p, tracer))
- continue;
- }
- if (on) {
- ERTS_TRACE_FLAGS(tracee_p) |= mask;
- } else {
- ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
- }
- if(!(ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)) {
- ERTS_TRACER_PROC(tracee_p) = NIL;
- } else if (tracer != NIL) {
- ERTS_TRACER_PROC(tracee_p) = tracer;
- }
+ start_trace(p, tracer, &tracee_p->common, on, mask);
matches++;
}
}
@@ -749,33 +706,26 @@ Eterm trace_3(BIF_ALIST_3)
state = erts_atomic32_read_nob(&tracee_port->state);
if (state & ERTS_PORT_SFLGS_DEAD)
continue;
- if (tracer != NIL) {
- if (tracee_port->common.id == tracer)
- continue;
- if (port_already_traced(NULL, tracee_port, tracer))
- continue;
- }
-
- if (on) ERTS_TRACE_FLAGS(tracee_port) |= mask;
- else ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
-
- if (!(ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)) {
- ERTS_TRACER_PROC(tracee_port) = NIL;
- } else if (tracer != NIL) {
- ERTS_TRACER_PROC(tracee_port) = tracer;
- }
- /* matches are not counted for ports since it would violate compatibility */
- /* This could be a reason to modify this function or make a new one. */
+ start_trace(p, tracer, &tracee_port->common, on, mask);
+ matches++;
}
}
}
- if (pid_spec == am_all || pid_spec == am_new) {
- Uint def_flags = mask;
- Eterm def_tracer = tracer;
+ if (pid_spec == am_all || pid_spec == am_new
+ || pid_spec == am_ports || pid_spec == am_processes
+ || pid_spec == am_new_ports || pid_spec == am_new_processes
+ ) {
ok = 1;
- erts_change_default_tracing(on, &def_flags, &def_tracer);
+ if (mask & ERTS_PROC_TRACEE_FLAGS &&
+ pid_spec != am_ports && pid_spec != am_new_ports)
+ erts_change_default_proc_tracing(
+ on, mask & ERTS_PROC_TRACEE_FLAGS, tracer);
+ if (mask & ERTS_PORT_TRACEE_FLAGS &&
+ pid_spec != am_processes && pid_spec != am_new_processes)
+ erts_change_default_port_tracing(
+ on, mask & ERTS_PORT_TRACEE_FLAGS, tracer);
#ifdef HAVE_ERTS_NOW_CPU
if (cpu_ts && !on) {
@@ -801,6 +751,7 @@ Eterm trace_3(BIF_ALIST_3)
}
#endif
erts_release_code_write_permission();
+ ERTS_TRACER_CLEAR(&tracer);
BIF_RET(make_small(matches));
@@ -810,6 +761,8 @@ Eterm trace_3(BIF_ALIST_3)
error:
+ ERTS_TRACER_CLEAR(&tracer);
+
#ifdef ERTS_SMP
if (system_blocked) {
erts_smp_thr_progress_unblock();
@@ -821,88 +774,6 @@ Eterm trace_3(BIF_ALIST_3)
BIF_ERROR(p, BADARG);
}
-/* Check that the process to be traced is not already traced
- * by a valid other tracer than the tracer to be.
- */
-static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
-{
- /*
- * SMP build assumes that either system is blocked or:
- * * main lock is held on c_p
- * * all locks are held on port tracee_p
- */
- if ((ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)
- && ERTS_TRACER_PROC(tracee_port) != tracer) {
- /* This tracee is already being traced, and not by the
- * tracer to be */
- if (is_internal_port(ERTS_TRACER_PROC(tracee_port))) {
- if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_port))) {
- /* Current trace port now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else if(is_internal_pid(ERTS_TRACER_PROC(tracee_port))) {
- Process *tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_port));
- if (!tracer_p) {
- /* Current trace process now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else {
- remove_tracer:
- ERTS_TRACE_FLAGS(tracee_port) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(tracee_port) = NIL;
- }
- }
- return 0;
-}
-
-/* Check that the process to be traced is not already traced
- * by a valid other tracer than the tracer to be.
- */
-static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
-{
- /*
- * SMP build assumes that either system is blocked or:
- * * main lock is held on c_p
- * * all locks multiple are held on tracee_p
- */
- if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)
- && ERTS_TRACER_PROC(tracee_p) != tracer) {
- /* This tracee is already being traced, and not by the
- * tracer to be */
- if (is_internal_port(ERTS_TRACER_PROC(tracee_p))) {
- if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_p))) {
- /* Current trace port now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else if(is_internal_pid(ERTS_TRACER_PROC(tracee_p))) {
- Process *tracer_p;
-
- tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_p));
- if (!tracer_p) {
- /* Current trace process now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else {
- remove_tracer:
- ERTS_TRACE_FLAGS(tracee_p) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(tracee_p) = NIL;
- }
- }
- return 0;
-}
-
/*
* Return information about a process or an external function being traced.
*/
@@ -920,7 +791,7 @@ Eterm trace_info_2(BIF_ALIST_2)
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
- } else if (is_atom(What) || is_pid(What)) {
+ } else if (is_atom(What) || is_pid(What) || is_port(What)) {
res = trace_info_pid(p, What, Key);
} else if (is_tuple(What)) {
res = trace_info_func(p, What, Key);
@@ -936,41 +807,52 @@ static Eterm
trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
{
Eterm tracer;
- Uint trace_flags;
+ Uint trace_flags = am_false;
Eterm* hp;
- if (pid_spec == am_new) {
- erts_get_default_tracing(&trace_flags, &tracer);
+ if (pid_spec == am_new || pid_spec == am_new_processes) {
+ ErtsTracer def_tracer;
+ erts_get_default_proc_tracing(&trace_flags, &def_tracer);
+ tracer = erts_tracer_to_term(p, def_tracer);
+ ERTS_TRACER_CLEAR(&def_tracer);
+ } else if (pid_spec == am_new_ports) {
+ ErtsTracer def_tracer;
+ erts_get_default_port_tracing(&trace_flags, &def_tracer);
+ tracer = erts_tracer_to_term(p, def_tracer);
+ ERTS_TRACER_CLEAR(&def_tracer);
+ } else if (is_internal_port(pid_spec)) {
+ Port *tracee;
+ tracee = erts_id2port_sflgs(pid_spec, p, ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
+ if (!tracee)
+ return am_undefined;
+
+ if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
+ erts_is_tracer_proc_enabled(NULL, 0, &tracee->common, am_trace_status);
+
+ tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
+
+ erts_port_release(tracee);
+
} else if (is_internal_pid(pid_spec)) {
Process *tracee;
tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- pid_spec, ERTS_PROC_LOCKS_ALL);
+ pid_spec, ERTS_PROC_LOCK_MAIN);
- if (!tracee) {
+ if (!tracee)
return am_undefined;
- } else {
- tracer = ERTS_TRACER_PROC(tracee);
- trace_flags = ERTS_TRACE_FLAGS(tracee);
- }
- if (is_internal_pid(tracer)) {
- if (!erts_proc_lookup(tracer)) {
- reset_tracer:
- ERTS_TRACE_FLAGS(tracee) &= ~TRACEE_FLAGS;
- trace_flags = ERTS_TRACE_FLAGS(tracee);
- tracer = ERTS_TRACER_PROC(tracee) = NIL;
- }
- }
- else if (is_internal_port(tracer)) {
- if (!erts_is_valid_tracer_port(tracer))
- goto reset_tracer;
- }
-#ifdef ERTS_SMP
- erts_smp_proc_unlock(tracee,
- (tracee == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
-#endif
+ if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
+ erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN,
+ &tracee->common, am_trace_status);
+
+ tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
+
+ if (tracee != p)
+ erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
} else if (is_external_pid(pid_spec)
&& external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
return am_undefined;
@@ -1024,8 +906,10 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
HRelease(p,limit,hp+3);
return TUPLE2(hp, key, flag_list);
} else if (key == am_tracer) {
- hp = HAlloc(p, 3);
- return TUPLE2(hp, key, tracer); /* Local pid or port */
+ if (tracer == am_false)
+ tracer = NIL;
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, tracer);
} else {
goto error;
}
@@ -1054,11 +938,11 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
*/
static int function_is_traced(Process *p,
Eterm mfa[3],
- Binary **ms, /* out */
- Binary **ms_meta, /* out */
- Eterm *tracer_pid_meta, /* out */
- Uint *count, /* out */
- Eterm *call_time) /* out */
+ Binary **ms, /* out */
+ Binary **ms_meta, /* out */
+ ErtsTracer *tracer_pid_meta, /* out */
+ Uint *count, /* out */
+ Eterm *call_time) /* out */
{
Export e;
Export* ep;
@@ -1123,7 +1007,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm traced = am_false;
Eterm match_spec = am_false;
Eterm retval = am_false;
- Eterm meta = am_false;
+ ErtsTracer meta = erts_tracer_nil;
Eterm call_time = NIL;
int r;
@@ -1193,7 +1077,10 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
retval = match_spec;
break;
case am_meta:
- retval = meta;
+ retval = erts_tracer_to_term(p, meta);
+ if (retval == am_false)
+ /* backwards compatibility */
+ retval = NIL;
break;
case am_meta_match_spec:
if (r & FUNC_TRACE_META_TRACE) {
@@ -1216,7 +1103,8 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
}
break;
case am_all: {
- Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false;
+ Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false,
+ m = am_false;
if (ms) {
match_spec = MatchSetGetSource(ms);
@@ -1235,6 +1123,9 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
if (r & FUNC_TRACE_TIME_TRACE) {
ct = call_time;
}
+
+ m = erts_tracer_to_term(p, meta);
+
hp = HAlloc(p, (3+2)*6);
retval = NIL;
t = TUPLE2(hp, am_call_count, c); hp += 3;
@@ -1243,7 +1134,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, match_spec_meta); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
- t = TUPLE2(hp, am_meta, meta); hp += 3;
+ t = TUPLE2(hp, am_meta, m); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
@@ -1306,7 +1197,8 @@ trace_info_on_load(Process* p, Eterm key)
case am_meta:
hp = HAlloc(p, 3);
if (erts_default_trace_pattern_flags.meta) {
- return TUPLE2(hp, key, erts_default_meta_tracer_pid);
+ ASSERT(!ERTS_TRACER_IS_NIL(erts_default_meta_tracer));
+ return TUPLE2(hp, key, erts_tracer_to_term(p, erts_default_meta_tracer));
} else {
return TUPLE2(hp, key, am_false);
}
@@ -1345,7 +1237,7 @@ trace_info_on_load(Process* p, Eterm key)
}
case am_all:
{
- Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t;
+ Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t, m;
if (erts_default_trace_pattern_flags.local ||
(! erts_default_trace_pattern_flags.breakpoint)) {
@@ -1363,6 +1255,8 @@ trace_info_on_load(Process* p, Eterm key)
MatchSetGetSource(erts_default_meta_match_spec);
meta_match_spec = copy_object(meta_match_spec, p);
}
+ m = (erts_default_trace_pattern_flags.meta
+ ? erts_tracer_to_term(p, erts_default_meta_tracer) : am_false);
hp = HAlloc(p, (3+2)*5 + 3);
t = TUPLE2(hp, am_call_count,
(erts_default_trace_pattern_flags.call_count
@@ -1370,9 +1264,7 @@ trace_info_on_load(Process* p, Eterm key)
r = CONS(hp, t, r); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, meta_match_spec); hp += 3;
r = CONS(hp, t, r); hp += 2;
- t = TUPLE2(hp, am_meta,
- (erts_default_trace_pattern_flags.meta
- ? erts_default_meta_tracer_pid : am_false)); hp += 3;
+ t = TUPLE2(hp, am_meta, m); hp += 3;
r = CONS(hp, t, r); hp += 2;
t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
r = CONS(hp, t, r); hp += 2;
@@ -1397,7 +1289,7 @@ int
erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
- Eterm meta_tracer_pid, int is_blocking)
+ ErtsTracer meta_tracer, int is_blocking)
{
const ErtsCodeIndex code_ix = erts_active_code_ix();
int matches = 0;
@@ -1487,7 +1379,7 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
}
if (flags.meta) {
erts_set_mtrace_bif(pc, meta_match_prog_set,
- meta_tracer_pid);
+ meta_tracer);
m = 1;
}
if (flags.call_time) {
@@ -1527,7 +1419,7 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
}
if (flags.meta) {
erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
- meta_tracer_pid);
+ meta_tracer);
}
if (flags.call_count) {
erts_set_count_break(&finish_bp.f, on);
@@ -2336,50 +2228,86 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
}
/* End: Trace for System Profiling */
-BIF_RETTYPE
-trace_delivered_1(BIF_ALIST_1)
+/* Trace delivered send an aux work message to all schedulers
+ and when all schedulers have acknowledged that they have seen
+ the message the message is sent to the requesting process.
+
+ IMPORTANT: We have to make sure that the all messages sent
+ using enif_send have been delivered before we send the message
+ to the caller.
+
+ There used to be a separate implementation for when only a pid
+ is passed in, but since this is not performance critical code
+ we now use the same approach for both.
+*/
+
+typedef struct {
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[REF_THING_SIZE];
+ Eterm target;
+ erts_smp_atomic32_t refc;
+} ErtsTraceDeliveredAll;
+
+static void
+reply_trace_delivered_all(void *vtdarp)
{
- DECL_AM(trace_delivered);
-#ifdef ERTS_SMP
- ErlHeapFragment *bp;
-#else
- ErtsProcLocks locks = 0;
-#endif
- Eterm *hp;
- Eterm msg, ref, msg_ref;
- Process *p;
- if (BIF_ARG_1 == am_all) {
- p = NULL;
- } else if (! (p = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCKS_ALL))) {
- if (is_not_internal_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- }
-
- ref = erts_make_ref(BIF_P);
+ ErtsTraceDeliveredAll *tdarp = (ErtsTraceDeliveredAll *) vtdarp;
+ if (erts_smp_atomic32_dec_read_nob(&tdarp->refc) == 0) {
+ Eterm ref_copy, msg;
+ Process *rp = tdarp->proc;
+ Eterm *hp = NULL;
+ ErlOffHeap *ohp;
#ifdef ERTS_SMP
- bp = new_message_buffer(REF_THING_SIZE + 4);
- hp = &bp->mem[0];
- msg_ref = STORE_NC(&hp, &bp->off_heap, ref);
+ ErlHeapFragment *bp;
+ bp = new_message_buffer(4 + NC_HEAP_SIZE(tdarp->ref));
+ hp = &bp->mem[0];
+ ohp = &bp->off_heap;
#else
- hp = HAlloc(BIF_P, 4);
- msg_ref = ref;
+ ErtsProcLocks rp_locks = 0;
+ ErtsMessage *mp;
+ mp = erts_alloc_message_heap(
+ rp, &rp_locks, 4 + NC_HEAP_SIZE(tdarp->ref), &hp, &ohp);
#endif
- msg = TUPLE3(hp, AM_trace_delivered, BIF_ARG_1, msg_ref);
+ ref_copy = STORE_NC(&hp, ohp, tdarp->ref);
+ msg = TUPLE3(hp, am_trace_delivered, tdarp->target, ref_copy);
#ifdef ERTS_SMP
- erts_send_sys_msg_proc(BIF_P->common.id, BIF_P->common.id, msg, bp);
- if (p)
- erts_smp_proc_unlock(p,
- (BIF_P == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
+ erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp);
#else
- erts_send_message(BIF_P, BIF_P, &locks, msg, ERTS_SND_FLG_NO_SEQ_TRACE);
+ erts_queue_message(rp, &rp_locks, mp, msg);
#endif
- BIF_RET(ref);
+ erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp);
+ erts_proc_dec_refc(rp);
+ }
+}
+
+BIF_RETTYPE
+trace_delivered_1(BIF_ALIST_1)
+{
+
+ if (BIF_ARG_1 == am_all || is_internal_pid(BIF_ARG_1)) {
+ Eterm *hp, ref;
+ ErtsTraceDeliveredAll *tdarp =
+ erts_alloc(ERTS_ALC_T_MISC_AUX_WORK, sizeof(ErtsTraceDeliveredAll));
+
+ tdarp->proc = BIF_P;
+ ref = erts_make_ref(BIF_P);
+ hp = &tdarp->ref_heap[0];
+ tdarp->ref = STORE_NC(&hp, NULL, ref);
+ tdarp->target = BIF_ARG_1;
+ erts_smp_atomic32_init_nob(&tdarp->refc,
+ (erts_aint32_t) erts_no_schedulers);
+ erts_proc_add_refc(BIF_P, 1);
+ erts_schedule_multi_misc_aux_work(0,
+ erts_no_schedulers,
+ reply_trace_delivered_all,
+ (void *) tdarp);
+ BIF_RET(ref);
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
}
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index be14386b14..76b96637ae 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -135,21 +135,22 @@ get_proc(Process *cp, Uint32 cp_locks, Eterm id, Uint32 id_locks)
static Eterm
-set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
+set_tracee_flags(Process *tracee_p, ErtsTracer tracer,
+ Uint d_flags, Uint e_flags) {
Eterm ret;
Uint flags;
- if (tracer == NIL) {
+ if (ERTS_TRACER_IS_NIL(tracer)) {
flags = ERTS_TRACE_FLAGS(tracee_p) & ~TRACEE_FLAGS;
} else {
flags = ((ERTS_TRACE_FLAGS(tracee_p) & ~d_flags) | e_flags);
- if (! flags) tracer = NIL;
+ if (! flags) tracer = erts_tracer_nil;
}
- ret = ((ERTS_TRACER_PROC(tracee_p) != tracer
+ ret = ((!ERTS_TRACER_COMPARE(ERTS_TRACER(tracee_p),tracer)
|| ERTS_TRACE_FLAGS(tracee_p) != flags)
? am_true
: am_false);
- ERTS_TRACER_PROC(tracee_p) = tracer;
+ erts_tracer_replace(&tracee_p->common, tracer);
ERTS_TRACE_FLAGS(tracee_p) = flags;
return ret;
}
@@ -163,40 +164,16 @@ set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
** returns fail_term on failure. Fails if tracer pid or port is invalid.
*/
static Eterm
-set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer,
+set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer,
Uint d_flags, Uint e_flags) {
- Eterm ret = fail_term;
- Process *tracer_p;
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCKS_ALL ==
- erts_proc_lc_my_proc_locks(tracee_p));
-
- if (is_internal_pid(tracer)
- && (tracer_p =
- erts_pid2proc(tracee_p, ERTS_PROC_LOCKS_ALL,
- tracer, ERTS_PROC_LOCKS_ALL))) {
- if (tracee_p != tracer_p) {
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- ERTS_TRACE_FLAGS(tracer_p) |= (ERTS_TRACE_FLAGS(tracee_p)
- ? F_TRACER
- : 0);
- erts_smp_proc_unlock(tracer_p, ERTS_PROC_LOCKS_ALL);
- }
- } else if (is_internal_port(tracer)) {
- Port *tracer_port =
- erts_id2port_sflgs(tracer,
- tracee_p,
- ERTS_PROC_LOCKS_ALL,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (tracer_port) {
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- erts_port_release(tracer_port);
- }
- } else {
- ASSERT(is_nil(tracer));
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- }
- return ret;
+
+ ERTS_SMP_LC_ASSERT(
+ ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p)
+ || erts_thr_progress_is_blocking());
+
+ if (ERTS_TRACER_IS_NIL(tracer) || erts_is_tracer_enabled(tracee_p, tracer))
+ return set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
+ return fail_term;
}
/*
@@ -2358,7 +2335,7 @@ restart:
case matchEnableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), 0, n);
+ set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2371,7 +2348,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), 0, n);
+ set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n);
esp[-1] = am_true;
}
}
@@ -2379,7 +2356,7 @@ restart:
case matchDisableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), n, 0);
+ set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2392,7 +2369,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), n, 0);
+ set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0);
esp[-1] = am_true;
}
}
@@ -2428,7 +2405,7 @@ restart:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = ERTS_TRACER_PROC(c_p);
+ ErtsTracer tracer = erts_tracer_nil;
/* XXX Atomicity note: Not fully atomic. Default tracer
* is sampled from current process but applied to
* tracee and tracer later after releasing main
@@ -2440,29 +2417,34 @@ restart:
* {trace,[],[{{tracer,Tracer}}]} is much, much older.
*/
int cputs = 0;
+ erts_tracer_update(&tracer, ERTS_TRACER(c_p));
if (! erts_trace_flags(esp[-1], &d_flags, &tracer, &cputs) ||
! erts_trace_flags(esp[-2], &e_flags, &tracer, &cputs) ||
cputs ) {
(--esp)[-1] = FAIL_TERM;
+ ERTS_TRACER_CLEAR(&tracer);
break;
}
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
(--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer,
d_flags, e_flags);
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ ERTS_TRACER_CLEAR(&tracer);
}
break;
case matchTrace3:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = ERTS_TRACER_PROC(c_p);
+ ErtsTracer tracer = erts_tracer_nil;
/* XXX Atomicity note. Not fully atomic. See above.
* Above it could possibly be solved, but not here.
*/
int cputs = 0;
Eterm tracee = (--esp)[0];
+
+ erts_tracer_update(&tracer, ERTS_TRACER(c_p));
if (! erts_trace_flags(esp[-1], &d_flags, &tracer, &cputs) ||
! erts_trace_flags(esp[-2], &e_flags, &tracer, &cputs) ||
@@ -2470,6 +2452,7 @@ restart:
! (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN,
tracee, ERTS_PROC_LOCKS_ALL))) {
(--esp)[-1] = FAIL_TERM;
+ ERTS_TRACER_CLEAR(&tracer);
break;
}
if (tmpp == c_p) {
@@ -2483,6 +2466,7 @@ restart:
erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
+ ERTS_TRACER_CLEAR(&tracer);
}
break;
case matchCatch: /* Match success, now build result */
@@ -5084,7 +5068,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
lint_res = db_match_set_lint(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
mps = db_match_set_compile(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
}
-
+
if (mps == NULL) {
hp = HAlloc(p,3);
ret = TUPLE2(hp, am_error, lint_res);
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 62417fa05c..f33ade27f3 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -2233,9 +2233,7 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
n++;
}
#endif
- ASSERT(is_nil(ERTS_TRACER_PROC(p)) ||
- is_internal_pid(ERTS_TRACER_PROC(p)) ||
- is_internal_port(ERTS_TRACER_PROC(p)));
+ ASSERT(IS_TRACER_VALID(ERTS_TRACER(p)));
ASSERT(is_pid(follow_moved(p->group_leader, (Eterm) 0)));
if (is_not_immed(p->group_leader)) {
@@ -2905,7 +2903,7 @@ reply_gc_info(void *vgcirp)
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (gcirp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 1a621863bb..8e201d5711 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -1248,7 +1248,7 @@ hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs)
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = tmr->btm.bp;
erts_queue_message(proc, &proc_locks, mp,
- tmr->btm.message, NIL);
+ tmr->btm.message);
erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_SEND);
queued_message = 1;
proc_locks &= ~ERTS_PROC_LOCKS_MSG_SEND;
@@ -1980,7 +1980,7 @@ access_sched_local_btm(Process *c_p, Eterm pid,
ERTS_HLT_ASSERT(hp + (async ? 4 : 3) == hp_end);
- erts_queue_message(proc, &proc_locks, mp, msg, NIL);
+ erts_queue_message(proc, &proc_locks, mp, msg);
if (c_p)
proc_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -2111,7 +2111,7 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp,
msg = TUPLE3(hp, tag, tref, res);
- erts_queue_message(c_p, &proc_locks, mp, msg, NIL);
+ erts_queue_message(c_p, &proc_locks, mp, msg);
proc_locks &= ~ERTS_PROC_LOCK_MAIN;
if (proc_locks)
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 9231fb1b34..39c0617143 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -97,6 +97,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry_links", "address" },
{ "code_write_permission", NULL },
{ "proc_status", "pid" },
+ { "proc_trace", "pid" },
{ "ports_snapshot", NULL },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
@@ -148,6 +149,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
{ "sys_msg_q", NULL },
+ { "tracer_mtx", NULL },
{ "port_table", NULL },
#endif
{ "mtrace_op", NULL },
@@ -160,9 +162,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proclist_pre_alloc_lock", "address" },
{ "xports_list_pre_alloc_lock", "address" },
{ "inet_buffer_stack_lock", NULL },
- { "gc_info", NULL },
- { "io_wake", NULL },
- { "timer_wheel", NULL },
{ "system_block", NULL },
{ "timeofday", NULL },
{ "get_time", NULL },
diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h
index a40070d00d..7af9100906 100644
--- a/erts/emulator/beam/erl_map.h
+++ b/erts/emulator/beam/erl_map.h
@@ -66,7 +66,7 @@ typedef struct flatmap_s {
/* erl_term.h stuff */
-#define flatmap_get_values(x) (((Eterm *)(x)) + 3)
+#define flatmap_get_values(x) (((Eterm *)(x)) + sizeof(flatmap_t)/sizeof(Eterm))
#define flatmap_get_keys(x) (((Eterm *)tuple_val(((flatmap_t *)(x))->keys)) + 1)
#define flatmap_get_size(x) (((flatmap_t*)(x))->size)
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 7596747b91..9beff52835 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -324,7 +324,7 @@ erts_queue_dist_message(Process *rcvr,
tok_label, tok_lastcnt, tok_serial);
}
#endif
- erts_queue_message(rcvr, rcvr_locks, mp, msg, token);
+ erts_queue_message(rcvr, rcvr_locks, mp, msg);
}
}
else {
@@ -349,7 +349,7 @@ erts_queue_dist_message(Process *rcvr,
}
#endif
- LINK_MESSAGE(rcvr, mp);
+ LINK_MESSAGE(rcvr, mp, &mp->next, 1);
if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
@@ -364,31 +364,34 @@ erts_queue_dist_message(Process *rcvr,
}
}
-/* Add a message last in message queue */
+/* Add messages last in message queue */
static Sint
-queue_message(Process *c_p,
- Process* receiver,
- erts_aint32_t *receiver_state,
- ErtsProcLocks *receiver_locks,
- ErtsMessage* mp,
- Eterm message,
- Eterm seq_trace_token
-#ifdef USE_VM_PROBES
- , Eterm dt_utag
-#endif
- )
+queue_messages(Process *c_p,
+ Process* receiver,
+ erts_aint32_t *receiver_state,
+ ErtsProcLocks *receiver_locks,
+ ErtsMessage* first,
+ ErtsMessage** last,
+ Uint len)
{
Sint res;
int locked_msgq = 0;
erts_aint32_t state;
- ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+ ASSERT(is_value(ERL_MESSAGE_TERM(first)));
+ ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined ||
+ ERL_MESSAGE_TOKEN(first) == NIL ||
+ is_tuple(ERL_MESSAGE_TOKEN(first)));
#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
+ *receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+#endif
if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
- ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
+ ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
if (receiver_state)
state = *receiver_state;
@@ -417,16 +420,10 @@ queue_message(Process *c_p,
/* Drop message if receiver is exiting or has a pending exit... */
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_cleanup_messages(mp);
+ erts_cleanup_messages(first);
return 0;
}
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = dt_utag;
-#endif
-
res = receiver->msg.len;
#ifdef ERTS_SMP
if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
@@ -440,75 +437,83 @@ queue_message(Process *c_p,
*/
res += receiver->msg_inq.len;
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
- LINK_MESSAGE_PRIVQ(receiver, mp);
+ LINK_MESSAGE_PRIVQ(receiver, first, last, len);
}
else
#endif
{
- LINK_MESSAGE(receiver, mp);
+ LINK_MESSAGE(receiver, first, last, len);
}
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
+ ErtsMessage *msg = first;
+
#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_queued)) {
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
- Sint tok_label = 0;
- Sint tok_lastcnt = 0;
- Sint tok_serial = 0;
-
- dtrace_proc_str(receiver, receiver_name);
- if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
+ if (DTRACE_ENABLED(message_queued)) {
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);
+
+ dtrace_proc_str(receiver, receiver_name);
+ if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
+ }
+ DTRACE6(message_queued,
+ receiver_name, size_object(ERL_MESSAGE_TERM(msg)),
+ receiver->msg.len,
+ tok_label, tok_lastcnt, tok_serial);
}
- DTRACE6(message_queued,
- receiver_name, size_object(message), receiver->msg.len,
- tok_label, tok_lastcnt, tok_serial);
- }
#endif
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE))
- trace_receive(receiver, message);
+ while (msg) {
+ trace_receive(receiver, ERL_MESSAGE_TERM(msg));
+ msg = msg->next;
+ }
+
+ }
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(receiver,
#ifdef ERTS_SMP
- *receiver_locks
+ erts_proc_notify_new_message(receiver, *receiver_locks);
#else
- 0
-#endif
- );
-
-#ifndef ERTS_SMP
+ erts_proc_notify_new_message(receiver, 0);
ERTS_HOLE_CHECK(receiver);
#endif
return res;
}
-void
-#ifdef USE_VM_PROBES
-erts_queue_message_probe(Process* receiver, ErtsProcLocks *receiver_locks,
- ErtsMessage* mp,
- Eterm message, Eterm seq_trace_token, Eterm dt_utag)
-#else
+static Sint
+queue_message(Process *c_p,
+ Process* receiver,
+ erts_aint32_t *receiver_state,
+ ErtsProcLocks *receiver_locks,
+ ErtsMessage* mp, Eterm msg)
+{
+ ERL_MESSAGE_TERM(mp) = msg;
+ return queue_messages(c_p, receiver, receiver_state, receiver_locks,
+ mp, &mp->next, 1 );
+}
+
+Sint
erts_queue_message(Process* receiver, ErtsProcLocks *receiver_locks,
- ErtsMessage* mp,
- Eterm message, Eterm seq_trace_token)
-#endif
+ ErtsMessage* mp, Eterm msg)
{
- queue_message(NULL,
- receiver,
- NULL,
- receiver_locks,
- mp,
- message,
- seq_trace_token
-#ifdef USE_VM_PROBES
- , dt_utag
-#endif
- );
+ return queue_message(NULL, receiver, NULL, receiver_locks, mp, msg);
+}
+
+
+Sint
+erts_queue_messages(Process* receiver, ErtsProcLocks *receiver_locks,
+ ErtsMessage* first, ErtsMessage** last, Uint len)
+{
+ return queue_messages(NULL, receiver, NULL, receiver_locks,
+ first, last, len);
}
void
@@ -591,7 +596,7 @@ erts_try_alloc_message_on_heap(Process *pp,
#if defined(ERTS_SMP)
*plp & ERTS_PROC_LOCK_MAIN
#else
- 1
+ pp
#endif
) {
#ifdef ERTS_SMP
@@ -621,7 +626,7 @@ erts_try_alloc_message_on_heap(Process *pp,
*on_heap_p = !0;
}
#ifdef ERTS_SMP
- else if (erts_smp_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) {
+ else if (pp && erts_smp_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) {
locked_main = 1;
*psp = erts_smp_atomic32_read_nob(&pp->state);
*plp |= ERTS_PROC_LOCK_MAIN;
@@ -821,17 +826,15 @@ erts_send_message(Process* sender,
#endif
}
+ ERL_MESSAGE_TOKEN(mp) = token;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = utag;
+#endif
res = queue_message(sender,
receiver,
&receiver_state,
receiver_locks,
- mp,
- message,
- token
-#ifdef USE_VM_PROBES
- , utag
-#endif
- );
+ mp, message);
BM_SWAP_TIMER(send,system);
@@ -887,7 +890,8 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
/* the trace token must in this case be updated by the caller */
seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL);
temptoken = copy_struct(token, sz_token, &hp, ohp);
- erts_queue_message(to, to_locksp, mp, save, temptoken);
+ ERL_MESSAGE_TOKEN(mp) = temptoken;
+ erts_queue_message(to, to_locksp, mp, save);
} else {
sz_from = IS_CONST(from) ? 0 : size_object(from);
#ifdef SHCOPY_SEND
@@ -909,7 +913,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
? from
: copy_struct(from, sz_from, &hp, ohp));
save = TUPLE3(hp, am_EXIT, from_copy, mess);
- erts_queue_message(to, to_locksp, mp, save, NIL);
+ erts_queue_message(to, to_locksp, mp, save);
}
}
@@ -1484,7 +1488,7 @@ erts_factory_message_create(ErtsHeapFactory* factory,
int on_heap;
erts_aint32_t state;
- state = erts_smp_atomic32_read_nob(&proc->state);
+ state = proc ? erts_smp_atomic32_read_nob(&proc->state) : 0;
if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
msgp = erts_alloc_message(sz, &hp);
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index ad4e65274c..608cf552a2 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -182,48 +182,64 @@ typedef struct {
Sint len; /* queue length */
} ErlMessageInQueue;
+typedef struct erl_trace_message_queue__ {
+ struct erl_trace_message_queue__ *next; /* point to the next receiver */
+ Eterm receiver;
+ ErtsMessage* first;
+ ErtsMessage** last; /* point to the last next pointer */
+ Sint len; /* queue length */
+} ErlTraceMessageQueue;
+
#endif
/* Get "current" message */
#define PEEK_MESSAGE(p) (*(p)->msg.save)
+#ifdef USE_VM_PROBES
+#define LINK_MESSAGE_DTAG(mp, dt) ERL_MESSAGE_DT_UTAG(mp) = dt
+#else
+#define LINK_MESSAGE_DTAG(mp, dt)
+#endif
-/* Add message last in private message queue */
-#define LINK_MESSAGE_PRIVQ(p, mp) do { \
- *(p)->msg.last = (mp); \
- (p)->msg.last = &(mp)->next; \
- (p)->msg.len++; \
-} while (0)
-
+#define LINK_MESSAGE_IMPL(p, first_msg, last_msg, num_msgs, where) do { \
+ *(p)->where.last = (first_msg); \
+ (p)->where.last = (last_msg); \
+ (p)->where.len += (num_msgs); \
+ } while(0)
#ifdef ERTS_SMP
-/* Move in message queue to end of private message queue */
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \
-do { \
- if ((P)->msg_inq.first) { \
- *(P)->msg.last = (P)->msg_inq.first; \
- (P)->msg.last = (P)->msg_inq.last; \
- (P)->msg.len += (P)->msg_inq.len; \
- (P)->msg_inq.first = NULL; \
- (P)->msg_inq.last = &(P)->msg_inq.first; \
- (P)->msg_inq.len = 0; \
- } \
-} while (0)
-
-/* Add message last in message queue */
-#define LINK_MESSAGE(p, mp) do { \
- *(p)->msg_inq.last = (mp); \
- (p)->msg_inq.last = &(mp)->next; \
- (p)->msg_inq.len++; \
-} while(0)
+/* Add message last in private message queue */
+#define LINK_MESSAGE_PRIVQ(p, first_msg, last_msg, len) \
+ do { \
+ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \
+ } while (0)
+
+/* Add message last_msg in message queue */
+#define LINK_MESSAGE(p, first_msg, last_msg, len) \
+ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg_inq)
+
+#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p) \
+ do { \
+ if (p->msg_inq.first) { \
+ *p->msg.last = p->msg_inq.first; \
+ p->msg.last = p->msg_inq.last; \
+ p->msg.len += p->msg_inq.len; \
+ p->msg_inq.first = NULL; \
+ p->msg_inq.last = &p->msg_inq.first; \
+ p->msg_inq.len = 0; \
+ } \
+ } while (0)
#else
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P)
+#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p)
-/* Add message last in message queue */
-#define LINK_MESSAGE(p, mp) LINK_MESSAGE_PRIVQ((p), (mp))
+/* Add message last_msg in message queue */
+#define LINK_MESSAGE(p, first_msg, last_msg, len) \
+ do { \
+ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \
+ } while(0)
#endif
@@ -259,23 +275,30 @@ do { \
(HEAP_FRAG_P)->off_heap.overhead = 0; \
} while (0)
+#ifdef USE_VM_PROBES
+#define ERL_MESSAGE_DT_UTAG_INIT(MP) ERL_MESSAGE_DT_UTAG(MP) = NIL
+#else
+#define ERL_MESSAGE_DT_UTAG_INIT(MP) do{ } while (0)
+#endif
+
+#define ERTS_INIT_MESSAGE(MP) \
+ do { \
+ (MP)->next = NULL; \
+ ERL_MESSAGE_TERM(MP) = THE_NON_VALUE; \
+ ERL_MESSAGE_TOKEN(MP) = NIL; \
+ ERL_MESSAGE_DT_UTAG_INIT(MP); \
+ MP->data.attached = NULL; \
+ } while (0)
+
void init_message(void);
ErlHeapFragment* new_message_buffer(Uint);
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
void erts_queue_dist_message(Process*, ErtsProcLocks*, ErtsDistExternal *, Eterm);
-#ifdef USE_VM_PROBES
-void erts_queue_message_probe(Process*, ErtsProcLocks*, ErtsMessage*,
- Eterm message, Eterm seq_trace_token, Eterm dt_utag);
-#define erts_queue_message(RP,RL,BP,Msg,SEQ) \
- erts_queue_message_probe((RP),(RL),(BP),(Msg),(SEQ),NIL)
-#else
-void erts_queue_message(Process*, ErtsProcLocks*, ErtsMessage*,
- Eterm message, Eterm seq_trace_token);
-#define erts_queue_message_probe(RP,RL,BP,Msg,SEQ,TAG) \
- erts_queue_message((RP),(RL),(BP),(Msg),(SEQ))
-#endif
+Sint erts_queue_message(Process*, ErtsProcLocks*,ErtsMessage*, Eterm);
+Sint erts_queue_messages(Process*, ErtsProcLocks*,
+ ErtsMessage*, ErtsMessage**, Uint);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
@@ -354,9 +377,7 @@ ERTS_GLB_FORCE_INLINE ErtsMessage *erts_alloc_message(Uint sz, Eterm **hpp)
if (sz == 0) {
mp = erts_alloc_message_ref();
- mp->next = NULL;
- ERL_MESSAGE_TERM(mp) = NIL;
- mp->data.attached = NULL;
+ ERTS_INIT_MESSAGE(mp);
if (hpp)
*hpp = NULL;
return mp;
@@ -365,8 +386,7 @@ ERTS_GLB_FORCE_INLINE ErtsMessage *erts_alloc_message(Uint sz, Eterm **hpp)
mp = erts_alloc(ERTS_ALC_T_MSG,
sizeof(ErtsMessage) + (sz - 1)*sizeof(Eterm));
- mp->next = NULL;
- ERL_MESSAGE_TERM(mp) = NIL;
+ ERTS_INIT_MESSAGE(mp);
mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
ERTS_INIT_HEAP_FRAG(&mp->hfrag, sz, sz);
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 71e3fd8b6e..d0f305900a 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -257,7 +257,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);
- erts_queue_message(rp, &rp_locks, msgp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, msgp, msg);
if (esdp && msaccrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 1f5793f534..73c0eb8eba 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -92,11 +92,12 @@ static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need)
}
static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp)
-{
+{
env->hp = hp;
- if (env->heap_frag == NULL) {
+ if (env->heap_frag == NULL) {
ASSERT(HEAP_LIMIT(env->proc) == env->hp_end);
- HEAP_TOP(env->proc) = env->hp;
+ ASSERT(env->hp + need > env->hp_end);
+ HEAP_TOP(env->proc) = env->hp;
}
else {
env->heap_frag->used_size = hp - env->heap_frag->mem;
@@ -120,7 +121,8 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, unsigned may_need)
}
#endif
-void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif)
+void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
+ Process* tracee)
{
env->mod_nif = mod_nif;
env->proc = p;
@@ -130,17 +132,7 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif)
env->fpe_was_unmasked = erts_block_fpe();
env->tmp_obj_list = NULL;
env->exception_thrown = 0;
-}
-
-static void pre_nif_noproc(ErlNifEnv* env, struct erl_module_nif* mod_nif)
-{
- env->mod_nif = mod_nif;
- env->proc = NULL;
- env->hp = NULL;
- env->hp_end = NULL;
- env->heap_frag = NULL;
- env->fpe_was_unmasked = erts_block_fpe();
- env->tmp_obj_list = NULL;
+ env->tracee = tracee;
}
/* Temporary object header, auto-deallocated when NIF returns
@@ -180,13 +172,6 @@ void erts_post_nif(ErlNifEnv* env)
free_tmp_objs(env);
}
-static void post_nif_noproc(ErlNifEnv* env)
-{
- erts_unblock_fpe(env->fpe_was_unmasked);
- free_tmp_objs(env);
-}
-
-
/* Flush out our cached heap pointers to allow an ordinary HAlloc
*/
static void flush_env(ErlNifEnv* env)
@@ -247,18 +232,20 @@ struct enif_msg_environment_t
Process phony_proc;
};
-ErlNifEnv* enif_alloc_env(void)
+static ERTS_INLINE void
+setup_nif_env(struct enif_msg_environment_t* msg_env,
+ struct erl_module_nif* mod,
+ Process* tracee)
{
- struct enif_msg_environment_t* msg_env =
- erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */
-
- msg_env->env.hp = phony_heap;
+
+ msg_env->env.hp = phony_heap;
msg_env->env.hp_end = phony_heap;
msg_env->env.heap_frag = NULL;
- msg_env->env.mod_nif = NULL;
+ msg_env->env.mod_nif = mod;
msg_env->env.tmp_obj_list = NULL;
msg_env->env.proc = &msg_env->phony_proc;
+ msg_env->env.exception_thrown = 0;
memset(&msg_env->phony_proc, 0, sizeof(Process));
HEAP_START(&msg_env->phony_proc) = phony_heap;
HEAP_TOP(&msg_env->phony_proc) = phony_heap;
@@ -270,6 +257,14 @@ ErlNifEnv* enif_alloc_env(void)
msg_env->phony_proc.space_verified = 0;
msg_env->phony_proc.space_verified_from = NULL;
#endif
+ msg_env->env.tracee = tracee;
+}
+
+ErlNifEnv* enif_alloc_env(void)
+{
+ struct enif_msg_environment_t* msg_env =
+ erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
+ setup_nif_env(msg_env, NULL, NULL);
return &msg_env->env;
}
void enif_free_env(ErlNifEnv* env)
@@ -278,6 +273,20 @@ void enif_free_env(ErlNifEnv* env)
erts_free(ERTS_ALC_T_NIF, env);
}
+static ERTS_INLINE void pre_nif_noproc(struct enif_msg_environment_t* msg_env,
+ struct erl_module_nif* mod,
+ Process* tracee)
+{
+ setup_nif_env(msg_env, mod, tracee);
+ msg_env->env.fpe_was_unmasked = erts_block_fpe();
+}
+
+static ERTS_INLINE void post_nif_noproc(struct enif_msg_environment_t* msg_env)
+{
+ erts_unblock_fpe(msg_env->env.fpe_was_unmasked);
+ enif_clear_env(&msg_env->env);
+}
+
static ERTS_INLINE void clear_offheap(ErlOffHeap* oh)
{
oh->first = NULL;
@@ -304,22 +313,111 @@ void enif_clear_env(ErlNifEnv* env)
ASSERT(!is_offheap(&MSO(p)));
free_tmp_objs(env);
}
-int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
- ErlNifEnv* msg_env, ERL_NIF_TERM msg)
+
+#ifdef ERTS_SMP
+#ifdef DEBUG
+static int enif_send_delay = 0;
+#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0)
+#else
+#ifdef ERTS_PROC_LOCK_OWN_IMPL
+#define ERTS_FORCE_ENIF_SEND_DELAY() 0
+#else
+/*
+ * We always schedule messages if we do not use our own
+ * process lock implementation, as if we try to do a trylock on
+ * a lock that might already be locked by the same thread.
+ * And what happens then with different mutex implementations
+ * is not always guaranteed.
+ */
+#define ERTS_FORCE_ENIF_SEND_DELAY() 1
+#endif
+#endif
+
+int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
+{
+ ErlTraceMessageQueue *msgq, **last_msgq;
+ int reds = 0;
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ msgq = c_p->trace_msg_q;
+
+ if (!msgq)
+ goto error;
+
+ do {
+ Process* rp;
+ ErtsProcLocks rp_locks;
+ ErtsMessage *first, **last;
+ Uint len;
+
+ first = msgq->first;
+ last = msgq->last;
+ len = msgq->len;
+ msgq->first = NULL;
+ msgq->last = &msgq->first;
+ msgq->len = 0;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ ASSERT(len != 0);
+
+ rp = erts_proc_lookup(msgq->receiver);
+ if (rp) {
+ rp_locks = 0;
+ if (rp->common.id == c_p->common.id)
+ rp_locks = c_p_locks;
+ erts_queue_messages(rp, &rp_locks, first, last, len);
+ if (rp->common.id == c_p->common.id)
+ rp_locks &= ~c_p_locks;
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ reds += len;
+ } else {
+ erts_cleanup_messages(first);
+ }
+ reds += 1;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ msgq = msgq->next;
+ } while (msgq);
+
+ last_msgq = &c_p->trace_msg_q;
+
+ while (*last_msgq) {
+ msgq = *last_msgq;
+ if (msgq->len == 0) {
+ *last_msgq = msgq->next;
+ erts_free(ERTS_ALC_T_TRACE_MSG_QUEUE, msgq);
+ } else {
+ last_msgq = &msgq->next;
+ }
+ }
+
+error:
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ return reds;
+}
+
+#endif
+
+int
+enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
+ ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
- ErtsProcLocks rp_locks = 0;
+ ErtsProcLocks rp_locks = 0, lc_locks = 0, c_p_locks = ERTS_PROC_LOCK_MAIN;
Process* rp;
Process* c_p;
ErtsMessage *mp;
Eterm receiver = to_pid->pid;
int flush_me = 0;
- int scheduler = erts_get_scheduler_id() != 0;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ int scheduler = esdp ? esdp->no : 0;
if (env != NULL) {
c_p = env->proc;
if (receiver == c_p->common.id) {
- rp_locks = ERTS_PROC_LOCK_MAIN;
+ rp_locks = c_p_locks;
flush_me = 1;
}
}
@@ -329,12 +427,13 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#else
erts_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM");
#endif
- }
+ }
rp = (scheduler
? erts_proc_lookup(receiver)
: erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
receiver, rp_locks, ERTS_P2P_FLG_INC_REFC));
+
if (rp == NULL) {
ASSERT(env == NULL || receiver != c_p->common.id);
return 0;
@@ -353,7 +452,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
menv->env.heap_frag = NULL;
MBUF(&menv->phony_proc) = NULL;
}
- ASSERT(!is_offheap(&MSO(&menv->phony_proc)));
} else {
Uint sz = size_object(msg);
Eterm *hp;
@@ -362,14 +460,91 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ASSERT(hp == mp->hfrag.mem+mp->hfrag.used_size);
}
- if (flush_me) {
- flush_env(env); /* Needed for ERTS_HOLE_CHECK */
+ ERL_MESSAGE_TERM(mp) = msg;
+
+ if (flush_me) {
+ flush_env(env); /* Needed for ERTS_HOLE_CHECK */
+ }
+
+ if (!env || !env->tracee) {
+
+ if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND))
+ trace_send(c_p, receiver, msg);
+
+#ifndef ERTS_SMP
+ }
+#endif
+
+ erts_queue_message(rp, &rp_locks, mp, msg);
+#ifdef ERTS_SMP
}
- erts_queue_message(rp, &rp_locks, mp, msg, am_undefined);
+ else {
+ /* This clause is taken when the nif is called in the context
+ of a traced process. We do not know which locks we have
+ so we have to do a try lock and if that fails we enqueue
+ the message in a special trace message output queue of the
+ tracee */
+ ErlTraceMessageQueue *msgq;
+ Process *t_p = env->tracee;
+
+
+ erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
+
+ msgq = t_p->trace_msg_q;
+
+ while (msgq != NULL) {
+ if (msgq->receiver == receiver) {
+ break;
+ }
+ msgq = msgq->next;
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ lc_locks = erts_proc_lc_my_proc_locks(rp);
+ rp_locks |= lc_locks;
+ if (receiver == c_p->common.id)
+ c_p_locks |= lc_locks;
+#endif
+ if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
+ rp_locks & ERTS_PROC_LOCK_MSGQ ||
+ erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+
+ if (!msgq) {
+
+ msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
+ sizeof(ErlTraceMessageQueue));
+ msgq->receiver = receiver;
+ msgq->first = mp;
+ msgq->last = &mp->next;
+ msgq->len = 1;
+
+ /* Insert in linked list */
+ msgq->next = t_p->trace_msg_q;
+ t_p->trace_msg_q = msgq;
+
+ erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+
+ erts_schedule_flush_trace_messages(t_p->common.id);
+
+ } else {
+ msgq->len++;
+ *msgq->last = mp;
+ msgq->last = &mp->next;
+ erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ }
+ } else {
+ erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ rp_locks &= ~ERTS_PROC_LOCK_TRACE;
+ rp_locks |= ERTS_PROC_LOCK_MSGQ;
+ erts_queue_message(rp, &rp_locks, mp, msg);
+ }
+ }
+#endif
+
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ if (rp_locks & ~lc_locks)
+ erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
if (flush_me) {
@@ -398,6 +573,9 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
if (!prt)
return 0;
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, env->proc->common.id, am_command, msg);
+
return erts_port_output_async(prt, env->proc->common.id, msg);
}
@@ -1485,10 +1663,10 @@ static void close_lib(struct erl_module_nif* lib)
ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0);
if (lib->entry != NULL && lib->entry->unload != NULL) {
- ErlNifEnv env;
- pre_nif_noproc(&env, lib);
- lib->entry->unload(&env, lib->priv_data);
- post_nif_noproc(&env);
+ struct enif_msg_environment_t msg_env;
+ pre_nif_noproc(&msg_env, lib, NULL);
+ lib->entry->unload(&msg_env.env, lib->priv_data);
+ post_nif_noproc(&msg_env);
}
if (!erts_is_static_nif(lib->handle))
erts_sys_ddll_close(lib->handle);
@@ -1634,10 +1812,10 @@ static void nif_resource_dtor(Binary* bin)
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
if (type->dtor != NULL) {
- ErlNifEnv env;
- pre_nif_noproc(&env, type->owner);
- type->dtor(&env,resource->data);
- post_nif_noproc(&env);
+ struct enif_msg_environment_t msg_env;
+ pre_nif_noproc(&msg_env, type->owner, NULL);
+ type->dtor(&msg_env.env, resource->data);
+ post_nif_noproc(&msg_env);
}
if (erts_refc_dectest(&type->refc, 0) == 0) {
ASSERT(type->next == NULL);
@@ -2797,7 +2975,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
old_func = next_func(mod->curr.nif->entry, &old_incr, old_func);
}
- erts_pre_nif(&env, BIF_P, lib);
+ erts_pre_nif(&env, BIF_P, lib, NULL);
veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
erts_post_nif(&env);
if (veto) {
@@ -2818,7 +2996,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
goto error;
}
- erts_pre_nif(&env, BIF_P, lib);
+ erts_pre_nif(&env, BIF_P, lib, NULL);
veto = entry->upgrade(&env, &lib->priv_data, &mod->old.nif->priv_data, BIF_ARG_2);
erts_post_nif(&env);
if (veto) {
@@ -2829,7 +3007,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
commit_opened_resource_types(lib);
}
else if (entry->load != NULL) { /********* Initial load ***********/
- erts_pre_nif(&env, BIF_P, lib);
+ erts_pre_nif(&env, BIF_P, lib, NULL);
veto = entry->load(&env, &lib->priv_data, BIF_ARG_2);
erts_post_nif(&env);
if (veto) {
@@ -2916,6 +3094,9 @@ erts_unload_nif(struct erl_module_nif* lib)
ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
+
+ erts_tracer_nif_clear();
+
for (rt = resource_type_list.next;
rt != &resource_type_list;
rt = next) {
@@ -2958,6 +3139,76 @@ void erl_nif_init()
resource_type_list.owner = NULL;
resource_type_list.module = THE_NON_VALUE;
resource_type_list.name = THE_NON_VALUE;
+
+}
+
+int erts_nif_get_funcs(struct erl_module_nif* mod,
+ ErlNifFunc **funcs)
+{
+ *funcs = mod->entry->funcs;
+ return mod->entry->num_of_funcs;
+}
+
+Eterm erts_nif_call_function(Process *p, Process *tracee,
+ struct erl_module_nif* mod,
+ ErlNifFunc *fun, int argc, Eterm *argv)
+{
+ Eterm nif_result;
+#ifdef DEBUG
+ /* Verify that function is part of this module */
+ int i;
+ for (i = 0; i < mod->entry->num_of_funcs; i++)
+ if (fun == mod->entry->funcs+i)
+ break;
+ ASSERT(i < mod->entry->num_of_funcs);
+ if (p)
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
+ || erts_smp_thr_progress_is_blocking());
+#endif
+ if (p) {
+ /* This is almost a normal nif call like in beam_emu,
+ except that any heap fragment created in the nif will be
+ discarded without checking if anything in it is live.
+ This is because we cannot do a GC here as we don't know
+ the number of live registers that have to be preserved.
+ This means that any heap part of the returned term may
+ not be used outside this function. */
+ struct enif_environment_t env;
+ ErlHeapFragment *orig_hf = MBUF(p);
+ ErlOffHeap orig_oh = MSO(p);
+ ASSERT(is_internal_pid(p->common.id));
+ MBUF(p) = NULL;
+ clear_offheap(&MSO(p));
+
+ erts_pre_nif(&env, p, mod, tracee);
+ nif_result = (*fun->fptr)(&env, argc, argv);
+ if (env.exception_thrown)
+ nif_result = THE_NON_VALUE;
+ erts_post_nif(&env);
+
+ /* Free any offheap and heap fragments created in nif */
+ if (MSO(p).first) {
+ erts_cleanup_offheap(&MSO(p));
+ clear_offheap(&MSO(p));
+ }
+ if (MBUF(p))
+ free_message_buffer(MBUF(p));
+
+ /* restore original heap fragment list */
+ MBUF(p) = orig_hf;
+ MSO(p) = orig_oh;
+ } else {
+ /* Nif call was done without a process context,
+ so we create a phony one. */
+ struct enif_msg_environment_t msg_env;
+ pre_nif_noproc(&msg_env, mod, tracee);
+ nif_result = (*fun->fptr)(&msg_env.env, argc, argv);
+ if (msg_env.env.exception_thrown)
+ nif_result = THE_NON_VALUE;
+ post_nif_noproc(&msg_env);
+ }
+
+ return nif_result;
}
#ifdef USE_VM_PROBES
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 44a2581436..3964f7f679 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -87,16 +87,16 @@ typedef ErlNifSInt64 ErlNifTime;
#define ERL_NIF_TIME_ERROR ((ErlNifSInt64) ERTS_NAPI_TIME_ERROR__)
typedef enum {
- ERL_NIF_SEC = ERTS_NAPI_SEC__,
- ERL_NIF_MSEC = ERTS_NAPI_MSEC__,
- ERL_NIF_USEC = ERTS_NAPI_USEC__,
- ERL_NIF_NSEC = ERTS_NAPI_NSEC__
+ ERL_NIF_SEC = ERTS_NAPI_SEC__,
+ ERL_NIF_MSEC = ERTS_NAPI_MSEC__,
+ ERL_NIF_USEC = ERTS_NAPI_USEC__,
+ ERL_NIF_NSEC = ERTS_NAPI_NSEC__
} ErlNifTimeUnit;
struct enif_environment_t;
typedef struct enif_environment_t ErlNifEnv;
-typedef struct
+typedef struct enif_func_t
{
const char* name;
unsigned arity;
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 59aa034f48..c2588e718d 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -944,7 +944,7 @@ erts_schedule_proc2port_signal(Process *,
ErtsPortTaskHandle *,
ErtsProc2PortSigCallback);
-int erts_deliver_port_exit(Port *, Eterm, Eterm, int);
+int erts_deliver_port_exit(Port *, Eterm, Eterm, int, int);
/*
* Port signal flags
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 30ce181ebb..3102e44c11 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1766,6 +1766,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
if (!(state & ERTS_PORT_SFLGS_DEAD)) {
DTRACE_DRIVER(driver_timeout, pp);
LTTNG_DRIVER(driver_timeout, pp);
+ if (IS_TRACED_FL(pp, F_TRACE_RECEIVE))
+ trace_port(pp, am_receive, am_timeout);
(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
}
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index d222e79f56..a520005c35 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -444,7 +444,8 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
typedef enum {
ERTS_PSTT_GC, /* Garbage Collect */
ERTS_PSTT_CPC, /* Check Process Code */
- ERTS_PSTT_COHMQ /* Change off heap message queue */
+ ERTS_PSTT_COHMQ, /* Change off heap message queue */
+ ERTS_PSTT_FTMQ /* Flush trace msg queue */
} ErtsProcSysTaskType;
#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
@@ -1138,7 +1139,7 @@ reply_sched_wall_time(void *vswtrp)
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (swtrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -1217,7 +1218,7 @@ reply_system_check(void *vscrp)
hpp = &hp;
msg = STORE_NC(hpp, ohp, scrp->ref);
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (scrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -2311,7 +2312,7 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
ERTS_PORT_SFLG_HALT);
erts_smp_atomic32_inc_nob(&erts_halt_progress);
if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING)))
- erts_deliver_port_exit(prt, prt->common.id, am_killed, 0);
+ erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1);
}
erts_port_release(prt);
@@ -9315,6 +9316,7 @@ Process *schedule(Process *p, int calls)
esdp = erts_scheduler_data;
ASSERT(esdp->current_process == p);
#endif
+
reds = actual_reds = calls - esdp->virtual_reds;
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
@@ -9327,27 +9329,39 @@ Process *schedule(Process *p, int calls)
p->reds += actual_reds;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+#ifdef ERTS_SMP
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_TRACE);
+ if (p->trace_msg_q) {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_TRACE);
+ erts_schedule_flush_trace_messages(p->common.id);
+ } else
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_TRACE);
+#endif
- state = erts_smp_atomic32_read_acqb(&p->state);
+ state = erts_smp_atomic32_read_nob(&p->state);
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- if (state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) {
+ if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, ((state & ERTS_PSFLG_FREE)
- ? am_out_exited
- : am_out_exiting));
+ trace_sched(p, ERTS_PROC_LOCK_MAIN,
+ ((state & ERTS_PSFLG_FREE)
+ ? am_out_exited
+ : am_out_exiting));
}
else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
- trace_sched(p, am_out);
- else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(p, am_out);
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
}
}
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
+ /* have to re-read state after taking lock */
+ state = erts_smp_atomic32_read_nob(&p->state);
+
#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
@@ -9844,27 +9858,27 @@ Process *schedule(Process *p, int calls)
p->fcalls = reds;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
if (IS_TRACED(p)) {
if (state & ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, am_in_exiting);
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
}
else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
- trace_sched(p, am_in);
- else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(p, am_in);
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
}
if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
}
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#ifdef ERTS_SMP
- if (is_not_nil(ERTS_TRACER_PROC(p)))
- erts_check_my_tracer_proc(p);
+ /* Clears tracer if it has been removed */
+ (void)ERTS_TRACER_PROC_IS_ENABLED(p);
#endif
if (state & ERTS_PSFLG_RUNNING_SYS) {
@@ -9996,7 +10010,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
ASSERT(hp_start + hsz == hp);
#endif
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -10164,8 +10178,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
{
int garbage_collected = 0;
erts_aint32_t state = *statep;
- int max_reds = in_reds;
- int reds = 0;
+ int reds = in_reds;
int qmask = 0;
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
@@ -10193,12 +10206,12 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
if (c_p->flags & F_DISABLE_GC) {
save_gc_task(c_p, st, st_prio);
st = NULL;
- reds++;
+ reds--;
}
else {
if (!garbage_collected) {
FLAGS(c_p) |= F_NEED_FULLSWEEP;
- reds += erts_garbage_collect_nobump(c_p,
+ reds -= erts_garbage_collect_nobump(c_p,
0,
c_p->arg_reg,
c_p->arity);
@@ -10207,21 +10220,30 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
st_res = am_true;
}
break;
- case ERTS_PSTT_CPC:
+ case ERTS_PSTT_CPC: {
+ int cpc_reds = 0;
st_res = erts_check_process_code(c_p,
st->arg[0],
unsigned_val(st->arg[1]),
- &reds);
+ &cpc_reds);
+ reds -= cpc_reds;
if (is_non_value(st_res)) {
/* Needed gc, but gc was disabled */
save_gc_task(c_p, st, st_prio);
st = NULL;
}
break;
+ }
case ERTS_PSTT_COHMQ:
- reds += erts_complete_off_heap_message_queue_change(c_p);
+ reds -= erts_complete_off_heap_message_queue_change(c_p);
+ st_res = am_true;
+ break;
+#ifdef ERTS_SMP
+ case ERTS_PSTT_FTMQ:
+ reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
st_res = am_true;
break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -10231,11 +10253,11 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
reds += notify_sys_task_executed(c_p, st, st_res);
state = erts_smp_atomic32_read_acqb(&c_p->state);
- } while (qmask && reds < max_reds);
+ } while (qmask && reds > 0);
*statep = state;
- return reds;
+ return in_reds - reds;
}
static int
@@ -10267,6 +10289,12 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
case ERTS_PSTT_COHMQ:
st_res = am_false;
break;
+#ifdef ERTS_SMP
+ case ERTS_PSTT_FTMQ:
+ reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
+ st_res = am_true;
+ break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -10402,8 +10430,8 @@ badarg:
BIF_ERROR(BIF_P, BADARG);
}
-void
-erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
+static void
+erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
{
Process *rp = erts_proc_lookup(pid);
if (rp) {
@@ -10413,7 +10441,7 @@ erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
ERTS_PROC_SYS_TASK_SIZE(0));
- st->type = ERTS_PSTT_COHMQ;
+ st->type = type;
st->requester = NIL;
st->reply_tag = NIL;
st->req_id = NIL;
@@ -10428,6 +10456,18 @@ erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
}
}
+void
+erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
+{
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ);
+}
+
+void
+erts_schedule_flush_trace_messages(Eterm pid)
+{
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ);
+}
+
static void
save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
{
@@ -10886,6 +10926,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm res = THE_NON_VALUE;
erts_aint32_t state = 0;
erts_aint32_t prio = (erts_aint32_t) PRIORITY_NORMAL;
+ ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL;
#ifdef SHCOPY_SPAWN
erts_shcopy_t info;
INITIALIZE_SHCOPY(info);
@@ -11062,7 +11103,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
: STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
- erts_get_default_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER_PROC(p));
+ erts_get_default_proc_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER(p));
p->msg.first = NULL;
p->msg.last = &p->msg.first;
@@ -11098,21 +11139,58 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->last_old_htop = NULL;
#endif
+#ifdef ERTS_SMP
+ p->trace_msg_q = NULL;
+ p->scheduler_data = NULL;
+ p->suspendee = NIL;
+ p->pending_suspenders = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
+#endif
+
+#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
+ p->fp_exception = 0;
+#endif
+
if (IS_TRACED(parent)) {
if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) {
ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
}
- if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
- trace_proc_spawn(parent, p->common.id, mod, func, args);
- }
- if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
/* Overrides TRACE_CHILDREN */
ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
}
+ if (so->flags & SPO_LINK && ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ }
+ }
+ if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
+ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ trace_proc_spawn(parent, am_spawn, p->common.id, mod, func, args);
+ if (so->flags & SPO_LINK)
+ trace_proc(parent, locks, parent, am_link, p->common.id);
+ }
+ }
+
+ if (IS_TRACED_FL(p, F_TRACE_PROCS)) {
+ if (locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) {
+ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_smp_proc_unlock(p, locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE));
+ erts_smp_proc_unlock(parent, locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE));
+ }
+ trace_proc_spawn(p, am_spawned, parent->common.id, mod, func, args);
+ if (so->flags & SPO_LINK)
+ trace_proc(p, locks, p, am_getting_linked, parent->common.id);
}
/*
@@ -11123,10 +11201,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef DEBUG
int ret;
#endif
- if (IS_TRACED_FL(parent, F_TRACE_PROCS)) {
- trace_proc(parent, parent, am_link, p->common.id);
- }
-
#ifdef DEBUG
ret = erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
ASSERT(ret == 0);
@@ -11137,17 +11211,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
#endif
- if (IS_TRACED(parent)) {
- if (ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
- ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); /*maybe steal*/
-
- if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
- ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- }
- }
- }
}
/*
@@ -11162,19 +11225,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
so->mref = mref;
}
-#ifdef ERTS_SMP
- p->scheduler_data = NULL;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
- p->pending_exit.reason = THE_NON_VALUE;
- p->pending_exit.bp = NULL;
-#endif
-
-#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
- p->fp_exception = 0;
-#endif
-
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ erts_smp_proc_unlock(p, locks);
res = p->common.id;
@@ -11198,7 +11249,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
error:
- erts_smp_proc_unlock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR);
return res;
}
@@ -11223,7 +11274,7 @@ void erts_init_empty_process(Process *p)
p->rcount = 0;
p->common.id = ERTS_INVALID_PID;
p->reds = 0;
- ERTS_TRACER_PROC(p) = NIL;
+ ERTS_TRACER(p) = erts_tracer_nil;
ERTS_TRACE_FLAGS(p) = F_INITIAL_TRACE_FLAGS;
p->group_leader = ERTS_INVALID_PID;
p->flags = 0;
@@ -11343,7 +11394,7 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->live_hf_end == ERTS_INVALID_HFRAG_PTR);
ASSERT(p->heap == NULL);
ASSERT(p->common.id == ERTS_INVALID_PID);
- ASSERT(ERTS_TRACER_PROC(p) == NIL);
+ ASSERT(ERTS_TRACER_IS_NIL(ERTS_TRACER(p)));
ASSERT(ERTS_TRACE_FLAGS(p) == F_INITIAL_TRACE_FLAGS);
ASSERT(p->group_leader == ERTS_INVALID_PID);
ASSERT(p->next == NULL);
@@ -11690,7 +11741,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
mess = copy_struct(exit_term, term_size, &hp, ohp);
#endif
- erts_queue_message(to, to_locksp, mp, mess, NIL);
+ erts_queue_message(to, to_locksp, mp, mess);
} else {
Eterm temp_token;
Uint sz_token;
@@ -11708,9 +11759,10 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
mess = copy_struct(exit_term, term_size, &hp, ohp);
#endif
/* the trace token must in this case be updated by the caller */
- seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL);
+ seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, to);
temp_token = copy_struct(token, sz_token, &hp, ohp);
- erts_queue_message(to, to_locksp, mp, mess, temp_token);
+ ERL_MESSAGE_TOKEN(mp) = temp_token;
+ erts_queue_message(to, to_locksp, mp, mess);
}
}
@@ -11819,6 +11871,9 @@ send_exit_signal(Process *c_p, /* current process if and only
if ((state & ERTS_PSFLG_TRAP_EXIT)
&& (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
+ /* have to release the status lock in order to send the exit message */
+ erts_smp_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND);
+ *rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
if (have_seqtrace(token) && token_update)
seq_trace_update_send(token_update);
if (is_value(exit_tuple))
@@ -12116,6 +12171,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
DistEntry *dep;
Process *rp;
+
switch(lnk->type) {
case LINK_PID:
if(is_internal_port(item)) {
@@ -12163,7 +12219,11 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- trace_proc(p, rp, am_getting_unlinked, p->common.id);
+ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
+ rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
+ }
+ trace_proc(NULL, 0, rp, am_getting_unlinked, p->common.id);
}
}
}
@@ -12280,8 +12340,6 @@ erts_do_exit_process(Process* p, Eterm reason)
if (IS_TRACED_FL(p, F_TRACE_CALLS))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
- if (IS_TRACED_FL(p,F_TRACE_PROCS))
- trace_proc(p, p, am_exit, reason);
}
erts_trace_check_exiting(p->common.id);
@@ -12297,6 +12355,10 @@ erts_do_exit_process(Process* p, Eterm reason)
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason);
+
+
/*
* p->u.initial of this process can *not* be used anymore;
* will be overwritten by misc termination data.
@@ -12436,6 +12498,9 @@ erts_continue_exit_process(Process *p)
ASSERT(!p->common.u.alive.reg);
}
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, curr_locks, am_out_exited);
+
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
curr_locks = ERTS_PROC_LOCKS_ALL;
@@ -12559,6 +12624,12 @@ erts_continue_exit_process(Process *p)
if (nif_export)
erts_destroy_nif_export(nif_export);
+#ifdef ERTS_SMP
+ erts_flush_trace_messages(p, 0);
+#endif
+
+ ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
+
delete_process(p);
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index c2303eea49..61acf5924b 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1041,6 +1041,7 @@ struct process {
#ifdef ERTS_SMP
ErlMessageInQueue msg_inq;
+ ErlTraceMessageQueue *trace_msg_q;
ErtsPendExit pending_exit;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
@@ -1358,7 +1359,6 @@ extern int erts_system_profile_ts_type;
#define F_TRACE_FLAG(N) (1 << (ERTS_TRACE_TS_TYPE_BITS + (N)))
/* process trace_flags */
-
#define F_NOW_TS (ERTS_TRACE_FLG_NOW_TIMESTAMP \
<< ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
#define F_STRICT_MON_TS (ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP \
@@ -1380,8 +1380,7 @@ extern int erts_system_profile_ts_type;
#define F_TRACE_ARITY_ONLY F_TRACE_FLAG(12)
#define F_TRACE_RETURN_TO F_TRACE_FLAG(13) /* Return_to trace when breakpoint tracing */
#define F_TRACE_SILENT F_TRACE_FLAG(14) /* No call trace msg suppress */
-#define F_TRACER F_TRACE_FLAG(15) /* May be (has been) tracer */
-#define F_EXCEPTION_TRACE F_TRACE_FLAG(16) /* May have exception trace on stack */
+#define F_EXCEPTION_TRACE F_TRACE_FLAG(15) /* May have exception trace on stack */
/* port trace flags, currently the same as process trace flags */
#define F_TRACE_SCHED_PORTS F_TRACE_FLAG(17) /* Trace of port scheduling */
@@ -1410,12 +1409,14 @@ extern int erts_system_profile_ts_type;
| F_TRACE_SCHED_PORTS | F_TRACE_SCHED_NO \
| F_TRACE_SCHED_EXIT)
+
#define ERTS_TRACEE_MODIFIER_FLAGS \
- (F_TRACE_SILENT | F_TIMESTAMP_MASK | F_TRACE_SCHED_NO)
-#define ERTS_PORT_TRACEE_FLAGS \
- (ERTS_TRACEE_MODIFIER_FLAGS | F_TRACE_PORTS | F_TRACE_SCHED_PORTS)
+ (F_TRACE_SILENT | F_TIMESTAMP_MASK | F_TRACE_SCHED_NO \
+ | F_TRACE_RECEIVE | F_TRACE_SEND)
+#define ERTS_PORT_TRACEE_FLAGS \
+ (ERTS_TRACEE_MODIFIER_FLAGS | F_TRACE_PORTS | F_TRACE_SCHED_PORTS)
#define ERTS_PROC_TRACEE_FLAGS \
- ((TRACEE_FLAGS & ~ERTS_PORT_TRACEE_FLAGS) | ERTS_TRACEE_MODIFIER_FLAGS)
+ ((TRACEE_FLAGS & ~ERTS_PORT_TRACEE_FLAGS) | ERTS_TRACEE_MODIFIER_FLAGS)
#define SEQ_TRACE_FLAG(N) (1 << (ERTS_TRACE_TS_TYPE_BITS + (N)))
@@ -1717,6 +1718,8 @@ void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *),
ErtsThrPrgrLaterOp *,
UWord);
void erts_schedule_complete_off_heap_message_queue_change(Eterm pid);
+void erts_schedule_flush_trace_messages(Eterm pid);
+int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_dbg_check_halloc_lock(Process *p);
@@ -1996,7 +1999,6 @@ erts_psd_set(Process *p, int ix, void *data)
#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, NTE) \
erts_psd_set((P), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
-
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, Eterm handler);
@@ -2326,14 +2328,17 @@ erts_alloc_message_heap_state(Process *pp,
ErlOffHeap **ohpp)
{
int on_heap;
+ ErtsMessage *mp;
if ((*psp) & ERTS_PSFLG_OFF_HEAP_MSGQ) {
- ErtsMessage *mp = erts_alloc_message(sz, hpp);
+ mp = erts_alloc_message(sz, hpp);
*ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap;
return mp;
}
- return erts_try_alloc_message_on_heap(pp, psp, plp, sz, hpp, ohpp, &on_heap);
+ mp = erts_try_alloc_message_on_heap(pp, psp, plp, sz, hpp, ohpp, &on_heap);
+ ASSERT(pp || !on_heap);
+ return mp;
}
ERTS_GLB_INLINE ErtsMessage *
@@ -2343,7 +2348,7 @@ erts_alloc_message_heap(Process *pp,
Eterm **hpp,
ErlOffHeap **ohpp)
{
- erts_aint32_t state = erts_smp_atomic32_read_nob(&pp->state);
+ erts_aint32_t state = pp ? erts_smp_atomic32_read_nob(&pp->state) : 0;
return erts_alloc_message_heap_state(pp, &state, plp, sz, hpp, ohpp);
}
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index e1f470d381..a69185bc5c 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -106,6 +106,7 @@ static struct {
Sint16 proc_lock_msgq;
Sint16 proc_lock_btm;
Sint16 proc_lock_status;
+ Sint16 proc_lock_trace;
} lc_id;
#endif
@@ -152,6 +153,7 @@ erts_init_proc_lock(int cpus)
lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
lc_id.proc_lock_btm = erts_lc_get_lock_order_id("proc_btm");
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
+ lc_id.proc_lock_trace = erts_lc_get_lock_order_id("proc_trace");
#endif
}
@@ -1057,6 +1059,11 @@ erts_proc_lock_init(Process *p)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
#endif
+ erts_mtx_init_x(&p->lock.trace, "proc_trace", p->common.id, do_lock_count);
+ ethr_mutex_lock(&p->lock.trace.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.trace.lc);
+#endif
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
@@ -1078,6 +1085,7 @@ erts_proc_lock_fin(Process *p)
erts_mtx_destroy(&p->lock.msgq);
erts_mtx_destroy(&p->lock.btm);
erts_mtx_destroy(&p->lock.status);
+ erts_mtx_destroy(&p->lock.trace);
#endif
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
erts_lcnt_proc_lock_destroy(p);
@@ -1100,26 +1108,29 @@ void erts_lcnt_enable_proc_lock_count(int enable) {
void erts_lcnt_proc_lock_init(Process *p) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) {
erts_lcnt_init_lock_empty(&(p->lock.lcnt_main));
+ erts_lcnt_init_lock_empty(&(p->lock.lcnt_link));
erts_lcnt_init_lock_empty(&(p->lock.lcnt_msgq));
erts_lcnt_init_lock_empty(&(p->lock.lcnt_btm));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_link));
erts_lcnt_init_lock_empty(&(p->lock.lcnt_status));
+ erts_lcnt_init_lock_empty(&(p->lock.lcnt_trace));
} else { /* now the common case */
Eterm pid = (p->common.id != ERTS_INVALID_PID) ? p->common.id : NIL;
erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, pid);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid);
erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, pid);
erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid);
erts_lcnt_init_lock_x(&(p->lock.lcnt_status),"proc_status",ERTS_LCNT_LT_PROCLOCK, pid);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_trace), "proc_trace", ERTS_LCNT_LT_PROCLOCK, pid);
} /* the lock names should really be aligned to four characters */
} /* logic reversed */
void erts_lcnt_proc_lock_destroy(Process *p) {
erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
erts_lcnt_destroy_lock(&(p->lock.lcnt_btm));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_trace));
}
static void lcnt_enable_proc_lock_count(Process *proc, int enable) {
@@ -1138,10 +1149,11 @@ static void lcnt_enable_proc_lock_count(Process *proc, int enable) {
void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock(&(lock->lcnt_main)); }
+ if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock(&(lock->lcnt_msgq)); }
if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock(&(lock->lcnt_status)); }
+ if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock(&(lock->lcnt_trace)); }
}
void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
@@ -1150,44 +1162,50 @@ void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
if (locks & ERTS_PROC_LOCK_MAIN) {
erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line);
}
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
+ }
if (locks & ERTS_PROC_LOCK_MSGQ) {
erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
}
if (locks & ERTS_PROC_LOCK_BTM) {
erts_lcnt_lock_post_x(&(lock->lcnt_btm), file, line);
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
- }
if (locks & ERTS_PROC_LOCK_STATUS) {
erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_trace), file, line);
+ }
}
void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock_unaquire(&(lock->lcnt_main)); }
+ if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock_unaquire(&(lock->lcnt_msgq)); }
if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock_unaquire(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock_unaquire(&(lock->lcnt_status)); }
+ if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock_unaquire(&(lock->lcnt_trace)); }
}
void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_unlock(&(lock->lcnt_main)); }
+ if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_unlock(&(lock->lcnt_msgq)); }
if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_unlock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_unlock(&(lock->lcnt_status)); }
+ if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_unlock(&(lock->lcnt_trace)); }
}
void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_trylock(&(lock->lcnt_main), res); }
+ if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); }
if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_trylock(&(lock->lcnt_msgq), res); }
if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_trylock(&(lock->lcnt_btm), res); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); }
if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_trylock(&(lock->lcnt_status), res); }
+ if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_trylock(&(lock->lcnt_trace), res); }
} /* reversed logic */
#endif /* ERTS_ENABLE_LOCK_COUNT */
@@ -1224,6 +1242,10 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
lck.id = lc_id.proc_lock_status;
erts_lc_lock_x(&lck,file,line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_lock_x(&lck,file,line);
+ }
}
void
@@ -1253,6 +1275,10 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
lck.id = lc_id.proc_lock_status;
erts_lc_trylock_x(locked, &lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_trylock_x(locked, &lck, file, line);
+ }
}
void
@@ -1261,6 +1287,10 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_unlock(&lck);
@@ -1292,6 +1322,10 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_might_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_might_unlock(&lck);
@@ -1323,6 +1357,8 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
erts_lc_might_unlock(&p->lock.btm.lc);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_might_unlock(&p->lock.status.lc);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_might_unlock(&p->lock.trace.lc);
#endif
}
@@ -1354,6 +1390,10 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
lck.id = lc_id.proc_lock_status;
erts_lc_require_lock(&lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_require_lock(&lck, file, line);
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
erts_lc_require_lock(&p->lock.main.lc, file, line);
@@ -1365,6 +1405,8 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
erts_lc_require_lock(&p->lock.btm.lc, file, line);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_require_lock(&p->lock.status.lc, file, line);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_require_lock(&p->lock.trace.lc, file, line);
#endif
}
@@ -1375,6 +1417,10 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_unrequire_lock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_unrequire_lock(&lck);
@@ -1406,6 +1452,8 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
erts_lc_unrequire_lock(&p->lock.btm.lc);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_unrequire_lock(&p->lock.status.lc);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_unrequire_lock(&p->lock.trace.lc);
#endif
}
@@ -1429,6 +1477,8 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_btm;
else if (locks & ERTS_PROC_LOCK_STATUS)
lck.id = lc_id.proc_lock_status;
+ else if (locks & ERTS_PROC_LOCK_TRACE)
+ lck.id = lc_id.proc_lock_trace;
else
erts_lc_fail("Unknown proc lock found");
@@ -1441,14 +1491,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
void erts_proc_lc_chk_only_proc_main(Process *p)
{
-#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
- erts_lc_check_exact(&proc_main, 1);
-#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_check_exact(&p->lock.main.lc, 1);
-#endif
+ erts_proc_lc_chk_only_proc(p, ERTS_PROC_LOCK_MAIN);
}
#if ERTS_PROC_LOCK_OWN_IMPL
@@ -1456,14 +1499,67 @@ void erts_proc_lc_chk_only_proc_main(Process *p)
ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
+void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
+{
+ int have_locks_len = 0;
+#if ERTS_PROC_LOCK_OWN_IMPL
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT};
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_main;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_link;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_btm;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_status;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_lc_lock_t have_locks[6];
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ have_locks[have_locks_len++] = p->lock.main.lc;
+ if (locks & ERTS_PROC_LOCK_LINK)
+ have_locks[have_locks_len++] = p->lock.link.lc;
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ have_locks[have_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ have_locks[have_locks_len++] = p->lock.btm.lc;
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ have_locks[have_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
+#endif
+ erts_lc_check_exact(have_locks, have_locks_len);
+}
+
void
erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
{
int have_locks_len = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t have_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
if (locks & ERTS_PROC_LOCK_MAIN) {
@@ -1486,8 +1582,12 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len].id = lc_id.proc_lock_status;
have_locks[have_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t have_locks[5];
+ erts_lc_lock_t have_locks[6];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
if (locks & ERTS_PROC_LOCK_LINK)
@@ -1498,6 +1598,8 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len++] = p->lock.btm.lc;
if (locks & ERTS_PROC_LOCK_STATUS)
have_locks[have_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
#endif
erts_lc_check(have_locks, have_locks_len, NULL, 0);
}
@@ -1508,12 +1610,14 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
int have_locks_len = 0;
int have_not_locks_len = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t have_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
- erts_lc_lock_t have_not_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_not_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
@@ -1557,9 +1661,17 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
have_not_locks[have_not_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_trace;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t have_locks[5];
- erts_lc_lock_t have_not_locks[5];
+ erts_lc_lock_t have_locks[6];
+ erts_lc_lock_t have_not_locks[6];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
@@ -1581,6 +1693,10 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len++] = p->lock.status.lc;
else
have_not_locks[have_not_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.trace.lc;
#endif
erts_lc_check(have_locks, have_locks_len,
@@ -1590,10 +1706,10 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
ErtsProcLocks
erts_proc_lc_my_proc_locks(Process *p)
{
- int resv[5];
+ int resv[6];
ErtsProcLocks res = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t locks[5] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
+ erts_lc_lock_t locks[6] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
@@ -1607,16 +1723,20 @@ erts_proc_lc_my_proc_locks(Process *p)
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
p->common.id,
+ ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LC_LOCK_INIT(lc_id.proc_lock_trace,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t locks[5] = {p->lock.main.lc,
+ erts_lc_lock_t locks[6] = {p->lock.main.lc,
p->lock.link.lc,
p->lock.msgq.lc,
p->lock.btm.lc,
- p->lock.status.lc};
+ p->lock.status.lc,
+ p->lock.trace.lc};
#endif
- erts_lc_have_locks(resv, locks, 5);
+ erts_lc_have_locks(resv, locks, 6);
if (resv[0])
res |= ERTS_PROC_LOCK_MAIN;
if (resv[1])
@@ -1627,6 +1747,8 @@ erts_proc_lc_my_proc_locks(Process *p)
res |= ERTS_PROC_LOCK_BTM;
if (resv[4])
res |= ERTS_PROC_LOCK_STATUS;
+ if (resv[5])
+ res |= ERTS_PROC_LOCK_TRACE;
return res;
}
@@ -1634,14 +1756,15 @@ erts_proc_lc_my_proc_locks(Process *p)
void
erts_proc_lc_chk_no_proc_locks(char *file, int line)
{
- int resv[5];
- int ids[5] = {lc_id.proc_lock_main,
+ int resv[6];
+ int ids[6] = {lc_id.proc_lock_main,
lc_id.proc_lock_link,
lc_id.proc_lock_msgq,
lc_id.proc_lock_btm,
- lc_id.proc_lock_status};
- erts_lc_have_lock_ids(resv, ids, 5);
- if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4])) {
+ lc_id.proc_lock_status,
+ lc_id.proc_lock_trace};
+ erts_lc_have_lock_ids(resv, ids, 6);
+ if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4] || resv[5])) {
erts_lc_fail("%s:%d: Thread has process locks locked when expected "
"not to have any process locks locked",
file, line);
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index ac324c6f3e..2cccf0697a 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -66,7 +66,7 @@
#endif
-#define ERTS_PROC_LOCK_MAX_BIT 4
+#define ERTS_PROC_LOCK_MAX_BIT 5
typedef erts_aint32_t ErtsProcLocks;
@@ -84,6 +84,7 @@ typedef struct erts_proc_lock_t_ {
erts_lcnt_lock_t lcnt_msgq;
erts_lcnt_lock_t lcnt_btm;
erts_lcnt_lock_t lcnt_status;
+ erts_lcnt_lock_t lcnt_trace;
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_t main;
@@ -91,6 +92,7 @@ typedef struct erts_proc_lock_t_ {
erts_mtx_t msgq;
erts_mtx_t btm;
erts_mtx_t status;
+ erts_mtx_t trace;
#else
# error "no implementation"
#endif
@@ -137,9 +139,18 @@ typedef struct erts_proc_lock_t_ {
* Protects the following fields in the process structure:
* * pending_suspenders
* * suspendee
+ * * sys_tasks
* * ...
*/
-#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT)
+#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << 4)
+
+/*
+ * Trace message lock:
+ * Protects the order in which messages are sent
+ * from trace nifs. This lock is taken inside enif_send.
+ *
+ */
+#define ERTS_PROC_LOCK_TRACE (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT)
/*
* Special fields:
@@ -154,8 +165,8 @@ typedef struct erts_proc_lock_t_ {
* all process locks are held, and are allowed to be read if
* at least one process lock (whichever one doesn't matter)
* is held:
- * * tracer_proc
- * * tracer_flags
+ * * common.tracer
+ * * common.trace_flags
*
* The following fields are only allowed to be accessed if
* both the schedule queue lock and at least one process lock
@@ -198,17 +209,15 @@ typedef struct erts_proc_lock_t_ {
/* ERTS_PROC_LOCKS_* are combinations of process locks */
-#define ERTS_PROC_LOCKS_MSG_RECEIVE (ERTS_PROC_LOCK_MSGQ \
- | ERTS_PROC_LOCK_STATUS)
-#define ERTS_PROC_LOCKS_MSG_SEND (ERTS_PROC_LOCK_MSGQ \
- | ERTS_PROC_LOCK_STATUS)
+#define ERTS_PROC_LOCKS_MSG_RECEIVE ERTS_PROC_LOCK_MSGQ
+#define ERTS_PROC_LOCKS_MSG_SEND ERTS_PROC_LOCK_MSGQ
#define ERTS_PROC_LOCKS_XSIG_SEND ERTS_PROC_LOCK_STATUS
#define ERTS_PROC_LOCKS_ALL \
((((ErtsProcLocks) 1) << (ERTS_PROC_LOCK_MAX_BIT + 1)) - 1)
#define ERTS_PROC_LOCKS_ALL_MINOR (ERTS_PROC_LOCKS_ALL \
- & ~ERTS_PROC_LOCK_MAIN)
+ & ~ERTS_PROC_LOCK_MAIN)
#define ERTS_PIX_LOCKS_BITS 10
@@ -260,6 +269,7 @@ void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_only_proc_main(Process *p);
+void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_no_proc_locks(char *file, int line);
ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p);
int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks);
@@ -477,9 +487,15 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCK_STATUS)
if (erts_mtx_trylock(&p->lock.status) == EBUSY)
goto busy_status;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ if (erts_mtx_trylock(&p->lock.trace) == EBUSY)
+ goto busy_trace;
return 0;
+busy_trace:
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_unlock(&p->lock.trace);
busy_status:
if (locks & ERTS_PROC_LOCK_BTM)
erts_mtx_unlock(&p->lock.btm);
@@ -568,6 +584,8 @@ erts_smp_proc_lock__(Process *p,
erts_mtx_lock(&p->lock.btm);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_mtx_lock(&p->lock.status);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_lock(&p->lock.trace);
#ifdef ERTS_PROC_LOCK_DEBUG
erts_proc_lock_op_debug(p, locks, 1);
@@ -653,6 +671,8 @@ erts_smp_proc_unlock__(Process *p,
erts_proc_lock_op_debug(p, locks, 0);
#endif
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_unlock(&p->lock.trace);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_mtx_unlock(&p->lock.status);
if (locks & ERTS_PROC_LOCK_BTM)
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index 97df8bc3fc..a5931ffc25 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -37,14 +37,16 @@
#include "erl_alloc.h"
#include "erl_monitors.h"
-#define ERTS_TRACER_PROC(P) ((P)->common.tracer_proc)
+#define ERTS_TRACER(P) ((P)->common.tracer)
+#define ERTS_TRACER_MODULE(T) (CAR(list_val(T)))
+#define ERTS_TRACER_STATE(T) (CDR(list_val(T)))
#define ERTS_TRACE_FLAGS(P) ((P)->common.trace_flags)
#define ERTS_P_LINKS(P) ((P)->common.u.alive.links)
#define ERTS_P_MONITORS(P) ((P)->common.u.alive.monitors)
#define IS_TRACED(p) \
- (ERTS_TRACER_PROC((p)) != NIL)
+ (ERTS_TRACER(p) != NIL)
#define ARE_TRACE_FLAGS_ON(p,tf) \
((ERTS_TRACE_FLAGS((p)) & (tf|F_SENSITIVE)) == (tf))
#define IS_TRACED_FL(p,tf) \
@@ -56,7 +58,7 @@ typedef struct {
erts_atomic_t atmc;
Sint sint;
} refc;
- Eterm tracer_proc;
+ ErtsTracer tracer;
Uint trace_flags;
erts_smp_atomic_t timer;
union {
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 947def7398..346404fe2a 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1966,7 +1966,7 @@ send_time_offset_changed_notifications(void *new_offsetp)
*patch_refp = ref;
ASSERT(hsz == size_object(message_template));
message = copy_struct(message_template, hsz, &hp, ohp);
- erts_queue_message(rp, &rp_locks, mp, message, NIL);
+ erts_queue_message(rp, &rp_locks, mp, message);
}
erts_smp_proc_unlock(rp, rp_locks);
}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 1654ea58d9..bd88769dfc 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -20,6 +20,19 @@
/*
* Support functions for tracing.
+ *
+ * Ideas for future speed improvements in tracing framework:
+ * * Move ErtsTracerNif into ErtsTracer
+ * + Removes need for locking
+ * + Removes hash lookup overhead
+ * + Use a refc on the ErtsTracerNif to know when it can
+ * be freed. We don't want to allocate a separate
+ * ErtsTracerNif for each module used.
+ * * Optimize GenericBp for cache locality by reusing equivalent
+ * GenericBp and GenericBpData in multiple tracer points.
+ * + Possibly we want to use specialized instructions for different
+ * types of trace so that the knowledge of which struct is used
+ * can be in the instruction.
*/
#ifdef HAVE_CONFIG_H
@@ -39,6 +52,7 @@
#include "erl_bits.h"
#include "erl_thr_progress.h"
#include "erl_bif_unique.h"
+#include "erl_map.h"
#if 0
#define DEBUG_PRINTOUTS
@@ -46,17 +60,15 @@
#undef DEBUG_PRINTOUTS
#endif
-extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
-extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-
/* Pseudo export entries. Never filled in with data, only used to
yield unique pointers of the correct type. */
Export exp_send, exp_receive, exp_timeout;
-static Eterm system_seq_tracer;
-static Uint default_trace_flags;
-static Eterm default_tracer;
+static ErtsTracer system_seq_tracer;
+static Uint default_proc_trace_flags;
+static ErtsTracer default_proc_tracer;
+static Uint default_port_trace_flags;
+static ErtsTracer default_port_tracer;
static Eterm system_monitor;
static Eterm system_profile;
@@ -70,8 +82,6 @@ static erts_smp_rwmtx_t sys_trace_rwmtx;
enum ErtsSysMsgType {
SYS_MSG_TYPE_UNDEFINED,
- SYS_MSG_TYPE_TRACE,
- SYS_MSG_TYPE_SEQTRACE,
SYS_MSG_TYPE_SYSMON,
SYS_MSG_TYPE_ERRLGR,
SYS_MSG_TYPE_PROC_MSG,
@@ -298,43 +308,6 @@ write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer)
return res;
}
-/*
- * Patch a timestamp into a tuple. The tuple MUST be the last thing
- * built on the heap before the call, and the timestamp MUST be
- * the last thing after the call. This since patch_ts() might adjust
- * the size of the used area.
- */
-
-#define PATCH_TS__(Type, Tuple, Hp, Bp, Tracer) \
- do { \
- int ts_type__ = (Type); \
- if (ts_type__) \
- patch_ts(ts_type__, (Tuple), (Hp), (Bp), (Tracer)); \
- } while (0)
-
-#ifdef ERTS_SMP
-#define PATCH_TS(Type, Tuple, Hp, Bp, Tracer) \
- PATCH_TS__((Type), (Tuple), (Hp), (Bp), NULL)
-#else
-#define PATCH_TS(Type, Tuple, Hp, Bp, Tracer) \
- PATCH_TS__((Type), (Tuple), (Hp), (Bp), (Tracer))
-#endif
-
-static ERTS_INLINE void
-patch_ts(int ts_type, Eterm tuple, Eterm* hp, ErlHeapFragment *bp, Process *tracer)
-{
- Eterm *tptr = tuple_val(tuple);
- int arity = arityval(*tptr);
-
- ASSERT(ts_type);
- ASSERT((tptr+arity+1) == hp);
-
- tptr[0] = make_arityval(arity+1);
- tptr[1] = am_trace_ts;
-
- *hp = write_ts(ts_type, hp+1, bp, tracer);
-}
-
#ifdef ERTS_SMP
static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
Eterm from,
@@ -349,6 +322,14 @@ static void enqueue_sys_msg(enum ErtsSysMsgType type,
static void init_sys_msg_dispatcher(void);
#endif
+static void init_tracer_nif(void);
+static int tracer_cmp_fun(void*, void*);
+static HashValue tracer_hash_fun(void*);
+static void *tracer_alloc_fun(void*);
+static void tracer_free_fun(void*);
+
+typedef struct ErtsTracerNif_ ErtsTracerNif;
+
void erts_init_trace(void) {
erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
@@ -362,80 +343,41 @@ void erts_init_trace(void) {
erts_bif_trace_init();
erts_system_monitor_clear(NULL);
erts_system_profile_clear(NULL);
- default_trace_flags = F_INITIAL_TRACE_FLAGS;
- default_tracer = NIL;
- system_seq_tracer = am_false;
+ default_proc_trace_flags = F_INITIAL_TRACE_FLAGS;
+ default_proc_tracer = erts_tracer_nil;
+ default_port_trace_flags = F_INITIAL_TRACE_FLAGS;
+ default_port_tracer = erts_tracer_nil;
+ system_seq_tracer = erts_tracer_nil;
#ifdef ERTS_SMP
init_sys_msg_dispatcher();
#endif
+ init_tracer_nif();
}
-static Eterm system_seq_tracer;
-
#define ERTS_ALLOC_SYSMSG_HEAP(SZ, BPP, OHPP, UNUSED) \
(*(BPP) = new_message_buffer((SZ)), \
*(OHPP) = &(*(BPP))->off_heap, \
(*(BPP))->mem)
-#ifdef ERTS_SMP
-#define ERTS_ENQ_TRACE_MSG(FPID, TPID, MSG, BP) \
-do { \
- ERTS_LC_ASSERT(erts_smp_lc_mtx_is_locked(&smq_mtx)); \
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_TRACE, (FPID), (TPID), (MSG), (BP)); \
-} while(0)
-#else
-#define ERTS_ENQ_TRACE_MSG(FPID, TPROC, MSG, BP) \
- do { \
- ErtsMessage *mp__ = erts_alloc_message(0, NULL); \
- mp__->data.heap_frag = (BP); \
- erts_queue_message((TPROC), NULL, mp__, (MSG), NIL); \
- } while (0)
-#endif
-
-/*
- * NOTE that the ERTS_GET_TRACER_REF() returns from the function (!!!)
- * using it, and resets the parameters used if the tracer is invalid, i.e.,
- * use it with extreme care!
- */
-#ifdef ERTS_SMP
-#define ERTS_NULL_TRACER_REF NIL
-#define ERTS_TRACER_REF_TYPE Eterm
- /* In the smp case, we never find the tracer invalid here (the sys
- message dispatcher thread takes care of that). */
-#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
-do { (RES) = (TPID); } while(0)
-int
-erts_is_tracer_proc_valid(Process* p)
-{
- return 1;
-}
-#else
-#define ERTS_NULL_TRACER_REF NULL
-#define ERTS_TRACER_REF_TYPE Process *
-#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
-do { \
- (RES) = erts_proc_lookup((TPID)); \
- if (!(RES) || !(ERTS_TRACE_FLAGS((RES)) & F_TRACER)) { \
- (TPID) = NIL; \
- (TRACEE_FLGS) &= ~TRACEE_FLAGS; \
- return; \
- } \
-} while (0)
-int
-erts_is_tracer_proc_valid(Process* p)
-{
- Process* tracer;
+static ERTS_INLINE int
+send_to_tracer_nif_raw(Process *c_p, Process *tracee, const ErtsTracer tracer,
+ Uint trace_flags, Eterm t_p_id, ErtsTracerNif *tnif,
+ Eterm tag, Eterm msg, Eterm extra, Eterm pam_result);
+static ERTS_INLINE int
+send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
+ Eterm t_p_id, ErtsTracerNif *tnif, Eterm tag,
+ Eterm msg, Eterm extra);
+static ERTS_INLINE Eterm
+call_enabled_tracer(Process *c_p, const ErtsTracer tracer,
+ ErtsTracerNif **tnif_ref, Eterm tag, Eterm t_p_id);
+static int
+is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p,
+ ErtsTracerNif **tnif_ret, Eterm tag);
- tracer = erts_proc_lookup(ERTS_TRACER_PROC(p));
- if (tracer && ERTS_TRACE_FLAGS(tracer) & F_TRACER) {
- return 1;
- } else {
- ERTS_TRACER_PROC(p) = NIL;
- ERTS_TRACE_FLAGS(p) = ~TRACEE_FLAGS;
- return 0;
- }
-}
-#endif
+#define SEND_TO_TRACER(c_p, tag, msg) \
+ send_to_tracer_nif(c_p, &(c_p)->common, (c_p)->common.id, NULL, tag, \
+ msg, THE_NON_VALUE)
static Uint active_sched;
@@ -450,19 +392,6 @@ static void
exiting_reset(Eterm exiting)
{
erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
- if (exiting == default_tracer) {
- default_tracer = NIL;
- default_trace_flags &= TRACEE_FLAGS;
-#ifdef DEBUG
- default_trace_flags |= F_INITIAL_TRACE_FLAGS;
-#endif
- }
- if (exiting == system_seq_tracer) {
-#ifdef DEBUG_PRINTOUTS
- erts_fprintf(stderr, "seq tracer %T exited\n", exiting);
-#endif
- system_seq_tracer = am_false;
- }
if (exiting == system_monitor) {
#ifdef ERTS_SMP
system_monitor = NIL;
@@ -487,11 +416,7 @@ erts_trace_check_exiting(Eterm exiting)
{
int reset = 0;
erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
- if (exiting == default_tracer)
- reset = 1;
- else if (exiting == system_seq_tracer)
- reset = 1;
- else if (exiting == system_monitor)
+ if (exiting == system_monitor)
reset = 1;
else if (exiting == system_profile)
reset = 1;
@@ -500,23 +425,26 @@ erts_trace_check_exiting(Eterm exiting)
exiting_reset(exiting);
}
-static ERTS_INLINE int
-is_valid_tracer(Eterm tracer)
+ErtsTracer
+erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new)
{
- return erts_proc_lookup(tracer) || erts_is_valid_tracer_port(tracer);
-}
-
-Eterm
-erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, Eterm new)
-{
- Eterm old;
-
- if (new != am_false && !is_valid_tracer(new))
- return THE_NON_VALUE;
+ ErtsTracer old;
+
+ if (!ERTS_TRACER_IS_NIL(new)) {
+ Eterm nif_result = call_enabled_tracer(
+ NULL, new, NULL,
+ am_trace_status, am_undefined);
+ switch (nif_result) {
+ case am_trace: break;
+ default:
+ return THE_NON_VALUE;
+ }
+ }
erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
old = system_seq_tracer;
- system_seq_tracer = new;
+ system_seq_tracer = erts_tracer_nil;
+ erts_tracer_update(&system_seq_tracer, new);
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old);
@@ -525,66 +453,132 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, Eterm new)
return old;
}
-Eterm
+ErtsTracer
erts_get_system_seq_tracer(void)
{
- Eterm st;
+ ErtsTracer st;
erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
st = system_seq_tracer;
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "get seq tracer %T\n", st);
#endif
erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+
+ if (st != erts_tracer_nil &&
+ call_enabled_tracer(NULL, st, NULL, am_trace_status, am_undefined) == am_remove) {
+ erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil);
+ st = erts_tracer_nil;
+ }
+
return st;
}
static ERTS_INLINE void
-get_default_tracing(Uint *flagsp, Eterm *tracerp)
-{
- if (!(default_trace_flags & TRACEE_FLAGS))
- default_tracer = NIL;
-
- if (is_nil(default_tracer)) {
- default_trace_flags &= ~TRACEE_FLAGS;
- } else if (is_internal_pid(default_tracer)) {
- if (!erts_proc_lookup(default_tracer)) {
- reset_tracer:
- default_trace_flags &= ~TRACEE_FLAGS;
- default_tracer = NIL;
- }
+get_default_tracing(Uint *flagsp, ErtsTracer *tracerp,
+ Uint *default_trace_flags,
+ ErtsTracer *default_tracer)
+{
+ if (!(*default_trace_flags & TRACEE_FLAGS))
+ ERTS_TRACER_CLEAR(default_tracer);
+
+ if (ERTS_TRACER_IS_NIL(*default_tracer)) {
+ *default_trace_flags &= ~TRACEE_FLAGS;
} else {
- ASSERT(is_internal_port(default_tracer));
- if (!erts_is_valid_tracer_port(default_tracer))
- goto reset_tracer;
+ Eterm nif_result = call_enabled_tracer(
+ NULL, *default_tracer, NULL,
+ am_trace_status, am_undefined);
+ switch (nif_result) {
+ case am_trace: break;
+ default: {
+ ErtsTracer curr_default_tracer = *default_tracer;
+ if (tracerp) {
+ /* we only have a rlock, so we have to unlock and then rwlock */
+ erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ }
+ /* check if someone else changed default tracer
+ while we got the write lock, if so we don't do
+ anything. */
+ if (curr_default_tracer == *default_tracer) {
+ *default_trace_flags &= ~TRACEE_FLAGS;
+ ERTS_TRACER_CLEAR(default_tracer);
+ }
+ if (tracerp) {
+ erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+ erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ }
+ }
+ }
}
if (flagsp)
- *flagsp = default_trace_flags;
- if (tracerp)
- *tracerp = default_tracer;
+ *flagsp = *default_trace_flags;
+ if (tracerp) {
+ erts_tracer_update(tracerp,*default_tracer);
+ }
+}
+
+static ERTS_INLINE void
+erts_change_default_tracing(int setflags, Uint flags,
+ const ErtsTracer tracer,
+ Uint *default_trace_flags,
+ ErtsTracer *default_tracer)
+{
+ if (setflags)
+ *default_trace_flags |= flags;
+ else
+ *default_trace_flags &= ~flags;
+
+ erts_tracer_update(default_tracer, tracer);
+
+ get_default_tracing(NULL, NULL, default_trace_flags, default_tracer);
}
void
-erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp)
+erts_change_default_proc_tracing(int setflags, Uint flagsp,
+ const ErtsTracer tracer)
{
erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
- if (flagsp) {
- if (setflags)
- default_trace_flags |= *flagsp;
- else
- default_trace_flags &= ~(*flagsp);
- }
- if (tracerp)
- default_tracer = *tracerp;
- get_default_tracing(flagsp, tracerp);
+ erts_change_default_tracing(
+ setflags, flagsp, tracer,
+ &default_proc_trace_flags,
+ &default_proc_tracer);
erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
}
void
-erts_get_default_tracing(Uint *flagsp, Eterm *tracerp)
+erts_change_default_port_tracing(int setflags, Uint flagsp,
+ const ErtsTracer tracer)
+{
+ erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
+ erts_change_default_tracing(
+ setflags, flagsp, tracer,
+ &default_port_trace_flags,
+ &default_port_tracer);
+ erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+}
+
+void
+erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp)
{
erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
- get_default_tracing(flagsp, tracerp);
+ *tracerp = erts_tracer_nil; /* initialize */
+ get_default_tracing(
+ flagsp, tracerp,
+ &default_proc_trace_flags,
+ &default_proc_tracer);
+ erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+}
+
+void
+erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp)
+{
+ erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ *tracerp = erts_tracer_nil; /* initialize */
+ get_default_tracing(
+ flagsp, tracerp,
+ &default_port_trace_flags,
+ &default_port_tracer);
erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
}
@@ -622,29 +616,21 @@ erts_get_system_profile(void) {
return profile;
}
-#ifdef ERTS_SMP
-static void
-do_send_to_port(Eterm to,
- Port* unused_port,
- Eterm from,
- enum ErtsSysMsgType type,
- Eterm message)
-{
- Uint sz = size_object(message);
- ErlHeapFragment *bp = new_message_buffer(sz);
- Uint *hp = bp->mem;
- Eterm msg = copy_struct(message, sz, &hp, &bp->off_heap);
-
- enqueue_sys_msg_unlocked(type, from, to, msg, bp);
-}
-#define WRITE_SYS_MSG_TO_PORT write_sys_msg_to_port
+#ifdef HAVE_ERTS_NOW_CPU
+# define GET_NOW(m, s, u) \
+do { \
+ if (erts_cpu_timestamp) \
+ erts_get_now_cpu(m, s, u); \
+ else \
+ get_now(m, s, u); \
+} while (0)
#else
-#define WRITE_SYS_MSG_TO_PORT do_send_to_port
+# define GET_NOW(m, s, u) do {get_now(m, s, u);} while (0)
#endif
static void
-WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
+write_sys_msg_to_port(Eterm unused_to,
Port* trace_port,
Eterm unused_from,
enum ErtsSysMsgType unused_type,
@@ -671,150 +657,6 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
erts_free(ERTS_ALC_T_TMP, (void *) buffer);
}
-
-#ifndef ERTS_SMP
-/* Send {trace_ts, Pid, out, 0, Timestamp}
- * followed by {trace_ts, Pid, in, 0, NewTimestamp}
- *
- * 'NewTimestamp' through patch_ts().
- */
-static void
-do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp, int ts_type) {
-#define LOCAL_HEAP_SIZE (5+5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- Eterm message;
- Eterm *hp;
- Eterm mfarity;
-
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- ASSERT(is_pid(pid));
- ASSERT(is_tuple(timestamp));
- ASSERT(*tuple_val(timestamp) == make_arityval(3));
-
- hp = local_heap;
- mfarity = make_small(0);
- message = TUPLE5(hp, am_trace_ts, pid, am_out, mfarity, timestamp);
- /* Note, hp is deliberately NOT incremented since it will be reused */
-
- do_send_to_port(trace_port->common.id,
- trace_port,
- pid,
- SYS_MSG_TYPE_UNDEFINED,
- message);
-
-
- message = TUPLE5(hp, am_trace_ts, pid, am_in, mfarity,
- NIL /* Will be overwritten by timestamp */);
- hp += 6;
- hp[-1] = write_ts(ts_type, hp, NULL, NULL);
-
- do_send_to_port(trace_port->common.id,
- trace_port,
- pid,
- SYS_MSG_TYPE_UNDEFINED,
- message);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-}
-#endif
-
-/* If (c_p != NULL), a fake schedule out/in message pair will be sent,
- * if the driver so requests.
- * It is assumed that 'message' is not an 'out' message.
- *
- * 'c_p' is the currently executing process, "tracee" is the traced process
- * which 'message' concerns => if (*tracee_flags & F_TIMESTAMP_MASK),
- * 'message' must contain a timestamp.
- */
-static void
-send_to_port(Process *c_p, Eterm message,
- Eterm *tracer_pid, Uint *tracee_flags) {
- Port* trace_port;
-#ifndef ERTS_SMP
- int ts_type;
-#define LOCAL_HEAP_SIZE ERTS_TRACE_PATCH_TS_MAX_SIZE
- Eterm ts;
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
-#endif
-
- ASSERT(is_internal_port(*tracer_pid));
-#ifdef ERTS_SMP
- if (is_not_internal_port(*tracer_pid))
- return;
-
- trace_port = NULL;
-#else
-
- trace_port = erts_id2port_sflgs(*tracer_pid,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
-
- if (!trace_port) {
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
- return;
- }
-
- /*
- * Make a fake schedule only if the current process is traced
- * with 'running' and 'timestamp'.
- */
-
- if ( c_p == NULL ||
- (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP_MASK))) {
-#endif
- do_send_to_port(*tracer_pid,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_TRACE,
- message);
-#ifndef ERTS_SMP
- erts_port_release(trace_port);
- return;
- }
-
- /*
- * Note that the process being traced for some type of trace messages
- * (e.g. getting_linked) need not be the current process. That other
- * process might not have timestamps enabled.
- */
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- /* A fake schedule might be needed.
- * Create a dummy trace message with timestamp to be
- * passed to do_send_schedfix_to_port().
- */
- ts_type = TFLGS_TS_TYPE(c_p);
- ts = write_ts(ts_type, local_heap, NULL, NULL);
-
- trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
- do_send_to_port(*tracer_pid,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_TRACE,
- message);
-
- if (trace_port->control_flags & PORT_CONTROL_FLAG_HEAVY) {
- /* The driver has just informed us that the last write took a
- * non-neglectible amount of time.
- *
- * We need to fake some trace messages to compensate for the time the
- * current process had to sacrifice for the writing of the previous
- * trace message. We pretend that the process got scheduled out
- * just after writning the real trace message, and now gets scheduled
- * in again.
- */
- do_send_schedfix_to_port(trace_port, c_p->common.id, ts, ts_type);
- }
-
- erts_port_release(trace_port);
-
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#endif
-}
-
#ifndef ERTS_SMP
/* Profile send
* Checks if profiler is port or process
@@ -843,11 +685,11 @@ profile_send(Eterm from, Eterm message) {
0,
ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
if (profiler_port) {
- do_send_to_port(profiler,
- profiler_port,
- NIL, /* or current process->common.id */
- SYS_MSG_TYPE_SYSPROF,
- message);
+ write_sys_msg_to_port(profiler,
+ profiler_port,
+ NIL, /* or current process->common.id */
+ SYS_MSG_TYPE_SYSPROF,
+ message);
erts_port_release(profiler_port);
}
@@ -867,154 +709,26 @@ profile_send(Eterm from, Eterm message) {
else
msg = copy_struct(message, sz, &hp, &mp->hfrag.off_heap);
- erts_queue_message(profile_p, NULL, mp, msg, NIL);
+ erts_queue_message(profile_p, NULL, mp, msg);
}
}
#endif
-
-/* A fake schedule out/in message pair will be sent,
- * if the driver so requests.
- *
- * 'c_p' is the currently executing process, may be NULL.
- */
static void
-seq_trace_send_to_port(Process *c_p,
- Eterm seq_tracer,
- Eterm message)
+trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
{
- Port* trace_port;
-#ifndef ERTS_SMP
- int ts_type;
- Eterm ts;
-#define LOCAL_HEAP_SIZE ERTS_TRACE_PATCH_TS_MAX_SIZE
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#endif
-
- ASSERT(is_internal_port(seq_tracer));
-#ifdef ERTS_SMP
- if (is_not_internal_port(seq_tracer))
- return;
-
- trace_port = NULL;
-#else
- trace_port = erts_id2port_sflgs(seq_tracer,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (!trace_port) {
- system_seq_tracer = am_false;
-#ifndef ERTS_SMP
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#endif
- return;
- }
-
- if (c_p == NULL
- || (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP_MASK))) {
-#endif
- do_send_to_port(seq_tracer,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_SEQTRACE,
- message);
-
-#ifndef ERTS_SMP
- erts_port_release(trace_port);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- return;
- }
- /* Make a fake schedule only if the current process is traced
- * with 'running' and 'timestamp'.
- */
-
- /* A fake schedule might be needed.
- * Create a dummy trace message with timestamp to be
- * passed to do_send_schedfix_to_port().
- */
- ts_type = TFLGS_TS_TYPE(c_p);
- ts = write_ts(ts_type, local_heap, NULL, NULL);
-
- trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
- do_send_to_port(seq_tracer,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_SEQTRACE,
- message);
-
- if (trace_port->control_flags & PORT_CONTROL_FLAG_HEAVY) {
- /* The driver has just informed us that the last write took a
- * non-neglectible amount of time.
- *
- * We need to fake some trace messages to compensate for the time the
- * current process had to sacrifice for the writing of the previous
- * trace message. We pretend that the process got scheduled out
- * just after writing the real trace message, and now gets scheduled
- * in again.
- */
- do_send_schedfix_to_port(trace_port, c_p->common.id, ts, ts_type);
- }
-
- erts_port_release(trace_port);
-
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#endif
-}
-
-static ERTS_INLINE void
-send_to_tracer(Process *tracee,
- ERTS_TRACER_REF_TYPE tracer_ref,
- Eterm msg,
- Eterm **hpp,
- ErlHeapFragment *bp,
- int no_fake_sched)
-{
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(tracee));
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (is_internal_pid(ERTS_TRACER_PROC(tracee))) {
- PATCH_TS(TFLGS_TS_TYPE(tracee), msg, *hpp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(tracee->common.id, tracer_ref, msg, bp);
- }
- else {
- ASSERT(is_internal_port(ERTS_TRACER_PROC(tracee)));
- PATCH_TS(TFLGS_TS_TYPE(tracee), msg, *hpp, NULL, NULL);
- send_to_port(no_fake_sched ? NULL : tracee,
- msg,
- &ERTS_TRACER_PROC(tracee),
- &ERTS_TRACE_FLAGS(tracee));
- }
-
- erts_smp_mtx_unlock(&smq_mtx);
-
-}
-
-static void
-trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
-{
-#define LOCAL_HEAP_SIZE (5+4+1+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
- Eterm tmp, mess, *hp;
- ErlHeapFragment *bp = NULL;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF;
- int sched_no, curr_func, to_port, no_fake_sched;
+ Eterm tmp, *hp;
+ int curr_func;
+ ErtsTracerNif *tnif = NULL;
- if (is_nil(ERTS_TRACER_PROC(p)))
+ if (ERTS_TRACER_IS_NIL(ERTS_TRACER(p)))
return;
- no_fake_sched = never_fake_sched;
-
switch (what) {
case am_out:
case am_out_exiting:
case am_out_exited:
- no_fake_sched = 1;
- break;
case am_in:
case am_in_exiting:
break;
@@ -1023,16 +737,8 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
break;
}
- sched_no = IS_TRACED_FL(p, F_TRACE_SCHED_NO);
- to_port = is_internal_port(ERTS_TRACER_PROC(p));
-
- if (!to_port) {
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
- }
+ if (!is_tracer_proc_enabled(p, locks, &p->common, &tnif, what))
+ return;
if (ERTS_PROC_IS_EXITING(p))
curr_func = 0;
@@ -1042,44 +748,16 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
curr_func = p->current != NULL;
}
- UseTmpHeap(LOCAL_HEAP_SIZE,p);
-
- if (to_port)
- hp = local_heap;
- else {
- Uint size = 5;
- if (curr_func)
- size += 4;
- if (sched_no)
- size += 1;
- size += PATCH_TS_SIZE(p);
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
- }
-
if (!curr_func) {
tmp = make_small(0);
} else {
+ hp = HAlloc(p, 4);
tmp = TUPLE3(hp,p->current[0],p->current[1],make_small(p->current[2]));
hp += 4;
}
- if (!sched_no) {
- mess = TUPLE4(hp, am_trace, p->common.id, what, tmp);
- hp += 5;
- }
- else {
-#ifdef ERTS_SMP
- Eterm sched_id = make_small(p->scheduler_data->no);
-#else
- Eterm sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, tmp);
- hp += 6;
- }
-
- send_to_tracer(p, tracer_ref, mess, &hp, bp, no_fake_sched);
- UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
-#undef LOCAL_HEAP_SIZE
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif,
+ what, tmp, THE_NON_VALUE);
}
/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
@@ -1089,9 +767,9 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
* 'out_exiting', or 'out_exited'.
*/
void
-trace_sched(Process *p, Eterm what)
+trace_sched(Process *p, ErtsProcLocks locks, Eterm what)
{
- trace_sched_aux(p, what, 0);
+ trace_sched_aux(p, locks, what);
}
/* Send {trace_ts, Pid, Send, Msg, DestPid, Timestamp}
@@ -1102,140 +780,51 @@ trace_sched(Process *p, Eterm what)
void
trace_send(Process *p, Eterm to, Eterm msg)
{
- Eterm operation;
- unsigned sz_msg;
- unsigned sz_to;
- Eterm* hp;
- Eterm mess;
-
- if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND)) {
- return;
- }
+ Eterm operation = am_send;
+ ErtsTracerNif *tnif = NULL;
+
+ ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND));
- operation = am_send;
if (is_internal_pid(to)) {
if (!erts_proc_lookup(to))
goto send_to_non_existing_process;
}
else if(is_external_pid(to)
&& external_pid_dist_entry(to) == erts_this_dist_entry) {
- char *s;
send_to_non_existing_process:
- s = "send_to_non_existing_process";
- operation = am_atom_put(s, sys_strlen(s));
+ operation = am_send_to_non_existing_process;
}
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Uint need;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- sz_msg = size_object(msg);
- sz_to = size_object(to);
- need = sz_msg + sz_to + 6 + PATCH_TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- to = copy_struct(to,
- sz_to,
- &hp,
- off_heap);
- msg = copy_struct(msg,
- sz_msg,
- &hp,
- off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ if (is_tracer_proc_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, operation))
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, operation, msg, to);
}
/* Send {trace_ts, Pid, receive, Msg, Timestamp}
* or {trace, Pid, receive, Msg}
*/
void
-trace_receive(Process *rp, Eterm msg)
+trace_receive(Process *c_p, Eterm msg)
{
- Eterm mess;
- size_t sz_msg;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(rp))) {
-#define LOCAL_HEAP_SIZE (5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(rp), mess, hp, NULL, NULL);
- send_to_port(rp, mess, &ERTS_TRACER_PROC(rp), &ERTS_TRACE_FLAGS(rp));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Uint hsz;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(rp)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(rp),
- ERTS_TRACE_FLAGS(rp));
-
- sz_msg = size_object(msg);
-
- hsz = sz_msg + 5 + PATCH_TS_SIZE(rp);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, tracer_ref);
-
- msg = copy_struct(msg, sz_msg, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(rp), mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(rp->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ ErtsTracerNif *tnif = NULL;
+ if (is_tracer_proc_enabled(NULL, 0, &c_p->common,
+ &tnif, am_receive))
+ send_to_tracer_nif(NULL, &c_p->common, c_p->common.id,
+ tnif, am_receive, msg, THE_NON_VALUE);
}
int
seq_trace_update_send(Process *p)
{
- Eterm seq_tracer = erts_get_system_seq_tracer();
+ ErtsTracer seq_tracer = erts_get_system_seq_tracer();
ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
- if ((p->common.id == seq_tracer) || have_no_seqtrace(SEQ_TRACE_TOKEN(p))) {
+ if (have_no_seqtrace(SEQ_TRACE_TOKEN(p)) ||
+ (seq_tracer != NIL &&
+ call_enabled_tracer( NULL, seq_tracer, NULL, am_trace_status,
+ p ? p->common.id : am_undefined) != am_trace)
+#ifdef USE_VM_PROBES
+ || (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
+#endif
+ ) {
return 0;
}
SEQ_TRACE_TOKEN_SENDER(p) = p->common.id;
@@ -1263,20 +852,28 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
Eterm receiver, Process *process, Eterm exitfrom)
{
Eterm mess;
- ErlHeapFragment* bp;
Eterm* hp;
Eterm label;
Eterm lastcnt_serial;
Eterm type_atom;
- int sz_exit;
- Eterm seq_tracer;
- int ts_type;
+ ErtsTracer seq_tracer;
+ int seq_tracer_flags = 0;
+#define LOCAL_HEAP_SIZE (64)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
seq_tracer = erts_get_system_seq_tracer();
ASSERT(is_tuple(token) || is_nil(token));
- if (SEQ_TRACE_T_SENDER(token) == seq_tracer || token == NIL ||
- (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE)) {
+ if (token == NIL || (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE) ||
+ ERTS_TRACER_IS_NIL(seq_tracer) ||
+ call_enabled_tracer(
+ NULL, seq_tracer, NULL, am_trace_status,
+ process ? process->common.id : am_undefined) != am_trace) {
+ return;
+ }
+
+ if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & type) == 0) {
+ /* No flags set, nothing to do */
return;
}
@@ -1289,151 +886,29 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
return; /* To avoid warning */
}
- if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & type) == 0) {
- /* No flags set, nothing to do */
- return;
- }
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- if (seq_tracer == am_false) {
- return; /* no need to send anything */
+ hp = local_heap;
+ label = SEQ_TRACE_T_LABEL(token);
+ lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
+ SEQ_TRACE_T_SERIAL(token));
+ hp += 3;
+ if (exitfrom != NIL) {
+ msg = TUPLE3(hp, am_EXIT, exitfrom, msg);
+ hp += 4;
}
+ mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token),
+ receiver, msg);
+ hp += 6;
- ts_type = ERTS_SEQTFLGS2TSTYPE(unsigned_val(SEQ_TRACE_T_FLAGS(token)));
+ seq_tracer_flags |= ERTS_SEQTFLGS2TFLGS(unsigned_val(SEQ_TRACE_T_FLAGS(token)));
- if (is_internal_port(seq_tracer)) {
-#define LOCAL_HEAP_SIZE (60 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ send_to_tracer_nif_raw(NULL, process, seq_tracer, seq_tracer_flags,
+ label, NULL, am_seq_trace, mess,
+ THE_NON_VALUE, am_true);
- hp = local_heap;
- label = SEQ_TRACE_T_LABEL(token);
- lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
- SEQ_TRACE_T_SERIAL(token));
- hp += 3;
- if (exitfrom != NIL) {
- msg = TUPLE3(hp, am_EXIT, exitfrom, msg);
- hp += 4;
- }
- mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token),
- receiver, msg);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
- if (!ts_type) {
- mess = TUPLE3(hp, am_seq_trace, label, mess);
- seq_trace_send_to_port(NULL, seq_tracer, mess);
- } else {
- mess = TUPLE4(hp, am_seq_trace, label, mess,
- NIL /* Will be overwritten by timestamp */);
- hp += 5;
- hp[-1] = write_ts(ts_type, hp, NULL, NULL);
- seq_trace_send_to_port(process, seq_tracer, mess);
- }
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
-#ifndef ERTS_SMP
- Process* tracer;
-#endif
- Eterm sender_copy;
- Eterm receiver_copy;
- Eterm m2;
- Uint sz_label, sz_lastcnt_serial, sz_msg, sz_ts, sz_sender,
- sz_exitfrom, sz_receiver;
-
- ASSERT(is_internal_pid(seq_tracer));
-
-#ifndef ERTS_SMP
-
- tracer = erts_proc_lookup(seq_tracer);
- if (!tracer) {
- system_seq_tracer = am_false;
- return; /* no need to send anything */
- }
-#endif
- if (receiver == seq_tracer) {
- return; /* no need to send anything */
- }
-
- sz_label = size_object(SEQ_TRACE_T_LABEL(token));
- sz_sender = size_object(SEQ_TRACE_T_SENDER(token));
- sz_receiver = size_object(receiver);
- sz_lastcnt_serial = 3; /* TUPLE2 */
- sz_msg = size_object(msg);
-
- sz_ts = patch_ts_size(ts_type);
- if (exitfrom != NIL) {
- sz_exit = 4; /* create {'EXIT',exitfrom,msg} */
- sz_exitfrom = size_object(exitfrom);
- }
- else {
- sz_exit = 0;
- sz_exitfrom = 0;
- }
- bp = new_message_buffer(4 /* TUPLE3 */ + sz_ts + 6 /* TUPLE5 */
- + sz_lastcnt_serial + sz_label + sz_msg
- + sz_exit + sz_exitfrom
- + sz_sender + sz_receiver);
- hp = bp->mem;
- label = copy_struct(SEQ_TRACE_T_LABEL(token), sz_label, &hp, &bp->off_heap);
- lastcnt_serial = TUPLE2(hp,SEQ_TRACE_T_LASTCNT(token),SEQ_TRACE_T_SERIAL(token));
- hp += 3;
- m2 = copy_struct(msg, sz_msg, &hp, &bp->off_heap);
- if (sz_exit) {
- Eterm exitfrom_copy = copy_struct(exitfrom,
- sz_exitfrom,
- &hp,
- &bp->off_heap);
- m2 = TUPLE3(hp, am_EXIT, exitfrom_copy, m2);
- hp += 4;
- }
- sender_copy = copy_struct(SEQ_TRACE_T_SENDER(token),
- sz_sender,
- &hp,
- &bp->off_heap);
- receiver_copy = copy_struct(receiver,
- sz_receiver,
- &hp,
- &bp->off_heap);
- mess = TUPLE5(hp,
- type_atom,
- lastcnt_serial,
- sender_copy,
- receiver_copy,
- m2);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (!ts_type)
- mess = TUPLE3(hp, am_seq_trace, label, mess);
- else {
- mess = TUPLE4(hp, am_seq_trace, label, mess,
- NIL /* Will be overwritten by timestamp */);
- hp += 5;
- /* Write timestamp in element 6 of the 'msg' tuple */
- hp[-1] = write_ts(ts_type, hp, bp,
-#ifndef ERTS_SMP
- tracer
-#else
- NULL
-#endif
- );
- }
-
-#ifdef ERTS_SMP
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SEQTRACE, NIL, NIL, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
-#else
- /* trace_token must be NIL here */
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(tracer, NULL, mp, mess, NIL);
- }
-#endif
- }
}
/* Send {trace_ts, Pid, return_to, {Mod, Func, Arity}, Timestamp}
@@ -1442,63 +917,19 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
void
erts_trace_return_to(Process *p, BeamInstr *pc)
{
-#define LOCAL_HEAP_SIZE (4+5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- Eterm* hp;
Eterm mfa;
- Eterm mess;
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
BeamInstr *code_ptr = find_function_from_pc(pc);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
if (!code_ptr) {
mfa = am_undefined;
} else {
+ Eterm *hp = HAlloc(p, 4);
mfa = TUPLE3(hp, code_ptr[0], code_ptr[1], make_small(code_ptr[2]));
- hp += 4;
}
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_return_to, mfa);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
- /*
- * Find the tracer.
- */
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- size = size_object(mess);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-
- /*
- * Copy the trace message into the buffer and enqueue it.
- */
- mess = copy_struct(mess, size, &hp, off_heap);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- }
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
+ SEND_TO_TRACER(p, am_return_to, mfa);
}
@@ -1506,114 +937,53 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
* or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
*/
void
-erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
+erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa;
- Eterm mess;
- Eterm mod, name;
+ Eterm mfa, mod, name;
int arity;
Uint meta_flags, *tracee_flags;
- int ts_type;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
-
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
+
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
+ tracer = &ERTS_TRACER(p);
}
- if (is_nil(*tracer_pid)) {
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
/* Trace disabled */
return;
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
- return;
- }
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
+ if (! (*tracee_flags & F_TRACE_CALLS)) {
+ return;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
+ */
meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
- }
- if (! (*tracee_flags & F_TRACE_CALLS)) {
- return;
}
-
+
mod = fi[0];
name = fi[1];
arity = fi[2];
-
- ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
-
- if (is_internal_port(*tracer_pid)) {
-#define LOCAL_HEAP_SIZE (4+6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
- mfa = TUPLE3(hp, mod, name, make_small(arity));
- hp += 4;
- mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(ts_type, mess, hp, NULL, NULL);
- send_to_port(p, mess, tracer_pid, tracee_flags);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
- unsigned retval_size;
-
- ASSERT(is_internal_pid(*tracer_pid));
-
- ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
-
- retval_size = size_object(retval);
- size = 6 + 4 + retval_size + patch_ts_size(ts_type);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-
- /*
- * Build the trace tuple and put it into receive queue of the tracer process.
- */
-
- mfa = TUPLE3(hp, mod, name, make_small(arity));
- hp += 4;
- retval = copy_struct(retval, retval_size, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ hp = HAlloc(p, 4);
+ mfa = TUPLE3(hp, mod, name, make_small(arity));
+ hp += 4;
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ NULL, am_return_from, mfa, retval, am_true);
}
/* Send {trace_ts, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
@@ -1625,116 +995,50 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
*/
void
erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
- Eterm *tracer_pid)
+ ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa_tuple;
- Eterm cv;
- Eterm mess;
+ Eterm mfa_tuple, cv;
Uint meta_flags, *tracee_flags;
- int ts_type;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
-
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
+
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
+ tracer = &ERTS_TRACER(p);
}
- if (is_nil(*tracer_pid)) {
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
/* Trace disabled */
return;
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
- return;
- }
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
- if (! (*tracee_flags & F_TRACE_CALLS)) {
- return;
- }
+ if (! (*tracee_flags & F_TRACE_CALLS)) {
+ return;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
+ */
meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
}
-
- ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
-
- if (is_internal_port(*tracer_pid)) {
-#define LOCAL_HEAP_SIZE (4+3+6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
- hp += 4;
- cv = TUPLE2(hp, class, value);
- hp += 3;
- mess = TUPLE5(hp, am_trace, p->common.id, am_exception_from, mfa_tuple, cv);
- hp += 6;
- ASSERT((hp - local_heap) <= LOCAL_HEAP_SIZE);
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(ts_type, mess, hp, NULL, NULL);
- send_to_port(p, mess, tracer_pid, tracee_flags);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
- unsigned value_size;
-
- ASSERT(is_internal_pid(*tracer_pid));
-
- ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
-
- value_size = size_object(value);
- size = 6 + 4 + 3 + value_size + patch_ts_size(ts_type);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-
- /*
- * Build the trace tuple and put it into receive queue of the tracer process.
- */
-
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm) mfa[2]));
- hp += 4;
- value = copy_struct(value, value_size, &hp, off_heap);
- cv = TUPLE2(hp, class, value);
- hp += 3;
- mess = TUPLE5(hp, am_trace, p->common.id,
- am_exception_from, mfa_tuple, cv);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ hp = HAlloc(p, 7);;
+ mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
+ hp += 4;
+ cv = TUPLE2(hp, class, value);
+ hp += 3;
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ NULL, am_exception_from, mfa_tuple, cv, am_true);
}
/*
@@ -1753,7 +1057,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
*/
Uint32
erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
- Eterm* args, int local, Eterm *tracer_pid)
+ Eterm* args, int local, ErtsTracer *tracer)
{
Eterm* hp;
Eterm mfa_tuple;
@@ -1761,55 +1065,50 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
int i;
Uint32 return_flags;
Eterm pam_result = am_true;
- Eterm mess;
Uint meta_flags, *tracee_flags;
- int ts_type;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
+ ErtsTracerNif *tnif = NULL;
Eterm transformed_args[MAX_ARG];
- DeclareTypedTmpHeap(ErlSubBin,sub_bin_heap,p);
+ ErtsTracer pre_ms_tracer = erts_tracer_nil;
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
- /* Breakpoint trace enabled without specifying tracer =>
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
+ /* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
- }
- if (is_nil(*tracer_pid)) {
- /* Trace disabled */
- return 0;
+ tracer = &ERTS_TRACER(p);
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
+ /* Trace disabled */
return 0;
}
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
+ if (!is_tracer_proc_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, am_call)) {
+ return 0;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
- if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
- /* No trace messages for sensitive processes. */
- return 0;
- }
+ */
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
+ /* No trace messages for sensitive processes. */
+ return 0;
+ }
meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
+ switch (call_enabled_tracer(p, *tracer, &tnif, am_call, p->common.id)) {
+ default:
+ case am_remove: *tracer = erts_tracer_nil;
+ case am_discard: return 0;
+ case am_trace: break;
+ }
}
/*
@@ -1820,18 +1119,13 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* temporarily convert any match contexts to sub binaries.
*/
arity = (Eterm) mfa[2];
- UseTmpHeap(ERL_SUB_BIN_SIZE,p);
-#ifdef DEBUG
- sub_bin_heap->thing_word = 0;
-#endif
for (i = 0; i < arity; i++) {
Eterm arg = args[i];
if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(arg);
ErlBinMatchBuffer* mb = &ms->mb;
Uint bit_size;
-
- ASSERT(sub_bin_heap->thing_word == 0); /* At most one of match context */
+ ErlSubBin *sub_bin_heap = (ErlSubBin *)HAlloc(p, ERL_SUB_BIN_SIZE);
bit_size = mb->size - mb->offset;
sub_bin_heap->thing_word = HEADER_SUB_BIN;
@@ -1848,275 +1142,98 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
}
args = transformed_args;
- ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
-
- if (is_internal_port(*tracer_pid)) {
- Eterm local_heap[64+ERTS_TRACE_PATCH_TS_MAX_SIZE+MAX_ARG];
- hp = local_heap;
+ /*
+ * If there is a PAM program, run it. Return if it fails.
+ *
+ * Some precedence rules:
+ *
+ * - No proc flags, e.g 'silent' or 'return_to'
+ * has any effect on meta trace.
+ * - The 'silent' process trace flag silences all call
+ * related messages, e.g 'call', 'return_to' and 'return_from'.
+ * - The {message,_} PAM function does not affect {return_trace}.
+ * - The {message,false} PAM function shall give the same
+ * 'call' trace message as no PAM match.
+ * - The {message,true} PAM function shall give the same
+ * 'call' trace message as a nonexistent PAM program.
+ */
- if (!erts_is_valid_tracer_port(*tracer_pid)) {
-#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &ERTS_TRACER_PROC(p));
- if (is_not_nil(tracee))
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
-#ifdef ERTS_SMP
- if (is_not_nil(tracee))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
-
- /*
- * If there is a PAM program, run it. Return if it fails.
- *
- * Some precedence rules:
- *
- * - No proc flags, e.g 'silent' or 'return_to'
- * has any effect on meta trace.
- * - The 'silent' process trace flag silences all call
- * related messages, e.g 'call', 'return_to' and 'return_from'.
- * - The {message,_} PAM function does not affect {return_trace}.
- * - The {message,false} PAM function shall give the same
- * 'call' trace message as no PAM match.
- * - The {message,true} PAM function shall give the same
- * 'call' trace message as a nonexistent PAM program.
- */
-
- /* BEGIN this code should be the same for port and pid trace */
- return_flags = 0;
- if (match_spec) {
- pam_result = erts_match_set_run(p, match_spec, args, arity,
- ERTS_PAM_TMP_RESULT, &return_flags);
- if (is_non_value(pam_result)) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- }
- if (tracee_flags == &meta_flags) {
- /* Meta trace */
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- } else {
- /* Non-meta trace */
- if (*tracee_flags & F_TRACE_SILENT) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
- return_flags |= MATCH_SET_RETURN_TO_TRACE;
- }
- }
- /* END this code should be the same for port and pid trace */
-
- /*
- * Build the the {M,F,A} tuple in the local heap.
- * (A is arguments or arity.)
- */
-
- if (*tracee_flags & F_TRACE_ARITY_ONLY) {
- mfa_tuple = make_small(arity);
- } else {
- mfa_tuple = NIL;
- for (i = arity-1; i >= 0; i--) {
- mfa_tuple = CONS(hp, args[i], mfa_tuple);
- hp += 2;
- }
- }
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
- hp += 4;
-
- /*
- * Build the trace tuple and send it to the port.
- */
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
- hp += 5;
- if (pam_result != am_true) {
- hp[-5] = make_arityval(5);
- *hp++ = pam_result;
- }
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(ts_type, mess, hp, NULL, NULL);
- send_to_port(p, mess, tracer_pid, tracee_flags);
- erts_smp_mtx_unlock(&smq_mtx);
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return *tracer_pid == NIL ? 0 : return_flags;
+ return_flags = 0;
+ if (match_spec) {
+ /* we have to make a copy of the tracer here as the match spec
+ may remove it, and we still want to generate a trace message */
+ erts_tracer_update(&pre_ms_tracer, *tracer);
+ tracer = &pre_ms_tracer;
+ pam_result = erts_match_set_run(p, match_spec, args, arity,
+ ERTS_PAM_TMP_RESULT, &return_flags);
+ if (is_non_value(pam_result)) {
+ erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return 0;
+ }
+ }
+ if (tracee_flags == &meta_flags) {
+ /* Meta trace */
+ if (pam_result == am_false) {
+ erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return return_flags;
+ }
} else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- Process *tracer;
- ERTS_TRACER_REF_TYPE tracer_ref;
-#ifdef ERTS_SMP
- Eterm tpid;
-#endif
- unsigned size;
- unsigned sizes[MAX_ARG];
- unsigned pam_result_size = 0;
- int invalid_tracer;
-
- ASSERT(is_internal_pid(*tracer_pid));
-
- tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- *tracer_pid, ERTS_PROC_LOCK_STATUS);
- if (!tracer)
- invalid_tracer = 1;
- else {
- invalid_tracer = !(ERTS_TRACE_FLAGS(tracer) & F_TRACER);
- erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
- }
+ /* Non-meta trace */
+ if (*tracee_flags & F_TRACE_SILENT) {
+ erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return 0;
+ }
+ if (pam_result == am_false) {
+ erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return return_flags;
+ }
+ if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
+ return_flags |= MATCH_SET_RETURN_TO_TRACE;
+ }
+ }
- if (invalid_tracer) {
-#ifdef ERTS_SMP
- ASSERT(is_nil(tracee)
- || tracer_pid == &ERTS_TRACER_PROC(p));
- if (is_not_nil(tracee))
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
-#ifdef ERTS_SMP
- if (is_not_nil(tracee))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
+ ASSERT(!ERTS_TRACER_IS_NIL(*tracer));
-#ifdef ERTS_SMP
- tpid = *tracer_pid; /* Need to save tracer pid,
- since *tracer_pid might
- be reset by erts_match_set_run() */
- tracer_ref = tpid;
-#else
- tracer_ref = tracer;
-#endif
-
- /*
- * If there is a PAM program, run it. Return if it fails.
- *
- * See the rules above in the port trace code.
- */
-
- /* BEGIN this code should be the same for port and pid trace */
- return_flags = 0;
- if (match_spec) {
- pam_result = erts_match_set_run(p, match_spec, args, arity,
- ERTS_PAM_TMP_RESULT, &return_flags);
- if (is_non_value(pam_result)) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- }
- if (tracee_flags == &meta_flags) {
- /* Meta trace */
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- } else {
- /* Non-meta trace */
- if (*tracee_flags & F_TRACE_SILENT) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
- return_flags |= MATCH_SET_RETURN_TO_TRACE;
- }
- }
- /* END this code should be the same for port and pid trace */
-
- /*
- * Calculate number of words needed on heap.
- */
-
- size = 4 + 5; /* Trace tuple + MFA tuple. */
- if (! (*tracee_flags & F_TRACE_ARITY_ONLY)) {
- size += 2*arity;
- for (i = arity-1; i >= 0; i--) {
- sizes[i] = size_object(args[i]);
- size += sizes[i];
- }
- }
- size += patch_ts_size(ts_type);
- if (pam_result != am_true) {
- pam_result_size = size_object(pam_result);
- size += 1 + pam_result_size;
- /* One element in trace tuple + term size. */
- }
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+ /*
+ * Build the the {M,F,A} tuple in the local heap.
+ * (A is arguments or arity.)
+ */
- /*
- * Build the the {M,F,A} tuple in the message buffer.
- * (A is arguments or arity.)
- */
-
- if (*tracee_flags & F_TRACE_ARITY_ONLY) {
- mfa_tuple = make_small(arity);
- } else {
- mfa_tuple = NIL;
- for (i = arity-1; i >= 0; i--) {
- Eterm term = copy_struct(args[i], sizes[i], &hp, off_heap);
- mfa_tuple = CONS(hp, term, mfa_tuple);
- hp += 2;
- }
- }
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
- hp += 4;
-
- /*
- * Copy the PAM result (if any) onto the heap.
- */
-
- if (pam_result != am_true) {
- pam_result = copy_struct(pam_result, pam_result_size, &hp, off_heap);
- }
- erts_match_set_release_result(p);
+ if (*tracee_flags & F_TRACE_ARITY_ONLY) {
+ hp = HAlloc(p, 4);
+ mfa_tuple = make_small(arity);
+ } else {
+ hp = HAlloc(p, 4 + arity * 2);
+ mfa_tuple = NIL;
+ for (i = arity-1; i >= 0; i--) {
+ mfa_tuple = CONS(hp, args[i], mfa_tuple);
+ hp += 2;
+ }
+ }
+ mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
+ hp += 4;
- /*
- * Build the trace tuple and enqueue it.
- */
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
- hp += 5;
- if (pam_result != am_true) {
- hp[-5] = make_arityval(5);
- *hp++ = pam_result;
- }
+ /*
+ * Build the trace tuple and send it to the port.
+ */
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ tnif, am_call, mfa_tuple, THE_NON_VALUE, pam_result);
+ erts_match_set_release_result(p);
- erts_smp_mtx_lock(&smq_mtx);
+ if (match_spec && tracer == &pre_ms_tracer)
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
- PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
+ return return_flags;
}
/* Sends trace message:
@@ -2128,69 +1245,13 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* 't_p' is the traced process.
*/
void
-trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
+trace_proc(Process *c_p, ErtsProcLocks c_p_locks,
+ Process *t_p, Eterm what, Eterm data)
{
- Eterm mess;
- Eterm* hp;
- int need;
-
- ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0)
- || erts_thr_progress_is_blocking());
- if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
-#define LOCAL_HEAP_SIZE (5+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
-
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, NULL, NULL);
- send_to_port(
-#ifndef ERTS_SMP
- /* No fake schedule out and in again after an exit */
- what == am_exit ? NULL : c_p,
-#else
- /* Fake schedule out and in are never sent when smp enabled */
- c_p,
-#endif
- mess,
- &ERTS_TRACER_PROC(t_p),
- &ERTS_TRACE_FLAGS(t_p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Eterm tmp;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- size_t sz_data;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(t_p),
- ERTS_TRACE_FLAGS(t_p));
-
- sz_data = size_object(data);
-
- need = sz_data + 5 + PATCH_TS_SIZE(t_p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- tmp = copy_struct(data, sz_data, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, tmp);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ ErtsTracerNif *tnif = NULL;
+ if (is_tracer_proc_enabled(c_p, c_p_locks, &t_p->common, &tnif, what))
+ send_to_tracer_nif(c_p, &t_p->common, t_p->common.id, tnif,
+ what, data, THE_NON_VALUE);
}
@@ -2202,62 +1263,20 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
* and 'args' may be a deep term.
*/
void
-trace_proc_spawn(Process *p, Eterm pid,
+trace_proc_spawn(Process *p, Eterm what, Eterm pid,
Eterm mod, Eterm func, Eterm args)
{
- Eterm mfa;
- Eterm mess;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (4+6+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ ErtsTracerNif *tnif = NULL;
+ if (is_tracer_proc_enabled(p, ERTS_PROC_LOCKS_ALL &
+ ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE),
+ &p->common, &tnif, what)) {
+ Eterm mfa;
+ Eterm* hp;
- hp = local_heap;
- mfa = TUPLE3(hp, mod, func, args);
- hp += 4;
- mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, pid, mfa);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Eterm tmp;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- size_t sz_args, sz_pid;
- Uint need;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- sz_args = size_object(args);
- sz_pid = size_object(pid);
- need = sz_args + 4 + 6 + PATCH_TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- tmp = copy_struct(args, sz_args, &hp, off_heap);
- mfa = TUPLE3(hp, mod, func, tmp);
- hp += 4;
- tmp = copy_struct(pid, sz_pid, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, tmp, mfa);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
+ hp = HAlloc(p, 4);
+ mfa = TUPLE3(hp, mod, func, args);
+ hp += 4;
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, what, pid, mfa);
}
}
@@ -2291,66 +1310,22 @@ void save_calls(Process *p, Export *e)
void
trace_gc(Process *p, Eterm what)
{
- ErlHeapFragment *bp = NULL;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF; /* Initialized
- to eliminate
- compiler
- warning */
+ ErtsTracerNif *tnif = NULL;
Eterm* hp;
Eterm msg = NIL;
- Uint size;
+ Uint size = 0;
-#define LOCAL_HEAP_SIZE \
- (ERTS_PROCESS_GC_INFO_MAX_SIZE) + \
- 5/*4-tuple */ + ERTS_TRACE_PATCH_TS_MAX_SIZE
- DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
+ if (is_tracer_proc_enabled(
+ p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, what)) {
- UseTmpHeap(LOCAL_HEAP_SIZE,p);
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
- hp = local_heap;
-#ifdef DEBUG
- size = 0;
(void) erts_process_gc_info(p, &size, NULL);
+ hp = HAlloc(p, size);
- size += 5/*4-tuple*/ + PATCH_TS_SIZE(p);
-#endif
- } else {
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- size = 0;
- (void) erts_process_gc_info(p, &size, NULL);
-
- size += 5/*4-tuple*/ + PATCH_TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
- }
-
- ASSERT(size <= LOCAL_HEAP_SIZE);
+ msg = erts_process_gc_info(p, NULL, &hp);
- msg = erts_process_gc_info(p, NULL, &hp);
-
- msg = TUPLE4(hp, am_trace, p->common.id, what, msg);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
- PATCH_TS(TFLGS_TS_TYPE(p), msg, hp, NULL, NULL);
- send_to_port(p, msg, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- }
- else {
- PATCH_TS(TFLGS_TS_TYPE(p), msg, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, msg, bp);
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, what,
+ msg, THE_NON_VALUE);
}
- erts_smp_mtx_unlock(&smq_mtx);
- UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
-#undef LOCAL_HEAP_SIZE
}
void
@@ -2412,7 +1387,7 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
}
@@ -2477,7 +1452,7 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
}
@@ -2552,7 +1527,7 @@ monitor_long_gc(Process *p, Uint time) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
}
@@ -2627,7 +1602,7 @@ monitor_large_heap(Process *p) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
}
@@ -2659,7 +1634,7 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
@@ -2756,71 +1731,15 @@ profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint M
}
-
-/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
- * or {trace, Pid, What, {Mod, Func, Arity}}
- *
- * where 'What' is supposed to be 'in' or 'out'.
- *
- * Virtual scheduling do not fake scheduling for ports.
- */
-
-
-void trace_virtual_sched(Process *p, Eterm what)
-{
- trace_sched_aux(p, what, 1);
-}
-
/* Port profiling */
void
trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
- Eterm mess;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
- /* No fake schedule */
- send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- size_t sz_data;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- sz_data = 6 + PATCH_TS_SIZE(p);
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
-
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
-
+ ErtsTracerNif *tnif = NULL;
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ if (is_tracer_proc_enabled(NULL, 0, &p->common, &tnif, am_open))
+ send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, am_open,
+ calling_pid, drv_name);
}
/* Sends trace message:
@@ -2832,52 +1751,208 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
*/
void
trace_port(Port *t_p, Eterm what, Eterm data) {
- Eterm mess;
- Eterm* hp;
+ ErtsTracerNif *tnif = NULL;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ if (is_tracer_proc_enabled(NULL, 0, &t_p->common, &tnif, what))
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif,
+ what, data, THE_NON_VALUE);
+}
- if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
-#define LOCAL_HEAP_SIZE (5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, NULL, NULL);
- /* No fake schedule */
- send_to_port(NULL,mess,&ERTS_TRACER_PROC(t_p),&ERTS_TRACE_FLAGS(t_p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
+static Eterm
+trace_port_tmp_binary(char *bin, Sint sz, Binary **bptrp, Eterm **hp)
+{
+ if (sz <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin *hb = (ErlHeapBin *)*hp;
+ hb->thing_word = header_heap_bin(sz);
+ hb->size = sz;
+ sys_memcpy(hb->data, bin, sz);
+ *hp += heap_bin_size(sz);
+ return make_binary(hb);
} else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- size_t sz_data;
- ERTS_TRACER_REF_TYPE tracer_ref;
+ ProcBin* pb = (ProcBin *)*hp;
+ Binary *bptr = erts_bin_nrml_alloc(sz);
+ erts_refc_init(&bptr->refc, 1);
+ sys_memcpy(bptr->orig_bytes, bin, sz);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = sz;
+ pb->next = NULL;
+ pb->val = bptr;
+ pb->bytes = (byte*) bptr->orig_bytes;
+ pb->flags = 0;
+ *bptrp = bptr;
+ *hp += PROC_BIN_SIZE;
+ return make_binary(pb);
+ }
+}
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
+/* Sends trace message:
+ * {trace, PortPid, 'receive', {pid(), {command, iolist()}}}
+ * {trace, PortPid, 'receive', {pid(), {control, pid()}}}
+ * {trace, PortPid, 'receive', {pid(), exit}}
+ *
+ */
+void
+trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...)
+{
+ ErtsTracerNif *tnif = NULL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ if (is_tracer_proc_enabled(NULL, 0, &t_p->common, &tnif, am_receive)) {
+ /* We can use a stack heap here, as the nif is called in the
+ context of a port */
+#define LOCAL_HEAP_SIZE (3 + 3 + heap_bin_size(ERL_ONHEAP_BIN_LIMIT) + 3)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
+
+ Eterm *hp, data, *orig_hp = NULL;
+ Binary *bptr = NULL;
+ va_list args;
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ hp = local_heap;
+
+ if (what == am_close) {
+ data = what;
+ } else {
+ Eterm arg;
+ va_start(args, what);
+ if (what == am_command) {
+ char *bin = va_arg(args, char *);
+ Sint sz = va_arg(args, Sint);
+ va_end(args);
+ arg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
+ } else if (what == am_call || what == am_control) {
+ unsigned int command = va_arg(args, unsigned int);
+ char *bin = va_arg(args, char *);
+ Sint sz = va_arg(args, Sint);
+ Eterm cmd;
+ va_end(args);
+ arg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
+#if defined(ARCH_32)
+ if (!IS_USMALL(0, command)) {
+ *hp = make_pos_bignum_header(1);
+ BIG_DIGIT(hp, 0) = (Uint)command;
+ cmd = make_big(hp);
+ hp += 2;
+ } else
+#endif
+ {
+ cmd = make_small((Sint)command);
+ }
+ arg = TUPLE2(hp, cmd, arg);
+ hp += 3;
+ } else if (what == am_commandv) {
+ ErlIOVec *evp = va_arg(args, ErlIOVec*);
+ int i;
+ va_end(args);
+ if ((6 + evp->vsize * (2+PROC_BIN_SIZE+ERL_SUB_BIN_SIZE)) > LOCAL_HEAP_SIZE) {
+ hp = erts_alloc(ERTS_ALC_T_TMP,
+ (6 + evp->vsize * (2+PROC_BIN_SIZE+ERL_SUB_BIN_SIZE)) * sizeof(Eterm));
+ orig_hp = hp;
+ }
+ arg = NIL;
+ /* Convert each element in the ErlIOVec to a sub bin that points
+ to a procbin. We don't have to increment the proc bin refc as
+ the port task keeps the reference alive. */
+ for (i = evp->vsize-1; i >= 0; i--) {
+ if (evp->iov[i].iov_len) {
+ ProcBin* pb = (ProcBin*)hp;
+ ErlSubBin *sb;
+ ASSERT(evp->binv[i]);
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->val = ErlDrvBinary2Binary(evp->binv[i]);
+ pb->size = pb->val->orig_size;
+ pb->next = NULL;
+ pb->bytes = (byte*) pb->val->orig_bytes;
+ pb->flags = 0;
+ hp += PROC_BIN_SIZE;
+
+ sb = (ErlSubBin*) hp;
+ sb->thing_word = HEADER_SUB_BIN;
+ sb->size = evp->iov[i].iov_len;
+ sb->offs = (byte*)(evp->iov[i].iov_base) - pb->bytes;
+ sb->orig = make_binary(pb);
+ sb->bitoffs = 0;
+ sb->bitsize = 0;
+ sb->is_writable = 0;
+ hp += ERL_SUB_BIN_SIZE;
+
+ arg = CONS(hp, make_binary(sb), arg);
+ hp += 2;
+ }
+ }
+ what = am_command;
+ } else {
+ arg = va_arg(args, Eterm);
+ va_end(args);
+ }
+ data = TUPLE2(hp, what, arg);
+ hp += 3;
+ }
- sz_data = 5 + PATCH_TS_SIZE(t_p);
+ data = TUPLE2(hp, caller, data);
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif,
+ am_receive, data, THE_NON_VALUE);
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(t_p),
- ERTS_TRACE_FLAGS(t_p));
+ if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
+ erts_bin_free(bptr);
- hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
+ if (orig_hp)
+ erts_free(ERTS_ALC_T_TMP, orig_hp);
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ }
+#undef LOCAL_HEAP_SIZE
+}
- erts_smp_mtx_lock(&smq_mtx);
+void
+trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists)
+{
+ ErtsTracerNif *tnif = NULL;
+ Eterm op = exists ? am_send : am_send_to_non_existing_process;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ if (is_tracer_proc_enabled(NULL, 0, &t_p->common, &tnif, op))
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif,
+ op, msg, receiver);
+}
- PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, bp, tracer_ref);
+void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
+{
+ ErtsTracerNif *tnif = NULL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ if (is_tracer_proc_enabled(NULL, 0, &t_p->common, &tnif, am_send)) {
+ Eterm msg;
+ Binary* bptr = NULL;
+#define LOCAL_HEAP_SIZE (3 + 3 + heap_bin_size(ERL_ONHEAP_BIN_LIMIT))
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
+ Eterm *hp;
+
+ ERTS_CT_ASSERT(heap_bin_size(ERL_ONHEAP_BIN_LIMIT) >= PROC_BIN_SIZE);
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ hp = local_heap;
+
+ msg = trace_port_tmp_binary(bin, sz, &bptr, &hp);
+
+ msg = TUPLE2(hp, what, msg);
+ hp += 3;
+ msg = TUPLE2(hp, t_p->common.id, msg);
+ hp += 3;
+
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif,
+ am_send, msg, to);
+ if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
+ erts_bin_free(bptr);
+
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+#undef LOCAL_HEAP_SIZE
}
}
@@ -2891,83 +1966,18 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
void
trace_sched_ports(Port *p, Eterm what) {
- trace_sched_ports_where(p,what, make_small(0));
+ trace_sched_ports_where(p, what, make_small(0));
}
-void
-trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
- Eterm mess;
- Eterm* hp;
- int ws = 5;
- Eterm sched_id = am_undefined;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
-#ifdef ERTS_SMP
- ErtsSchedulerData *esd = erts_get_scheduler_data();
- if (esd) sched_id = make_small(esd->no);
- else sched_id = am_undefined;
-#else
- sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
- ws = 6;
- } else {
- mess = TUPLE4(hp, am_trace, p->common.id, what, where);
- ws = 5;
- }
- hp += ws;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
-
- /* No fake scheduling */
- send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) ws = 6; /* Make place for scheduler id */
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(ws+PATCH_TS_SIZE(p), &bp, &off_heap, tracer_ref);
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
-#ifdef ERTS_SMP
- ErtsSchedulerData *esd = erts_get_scheduler_data();
- if (esd) sched_id = make_small(esd->no);
- else sched_id = am_undefined;
-#else
- sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
- } else {
- mess = TUPLE4(hp, am_trace, p->common.id, what, where);
- }
- hp += ws;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+void
+trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) {
+ ErtsTracerNif *tnif = NULL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
+ || erts_thr_progress_is_blocking());
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ if (is_tracer_proc_enabled(NULL, 0, &t_p->common, &tnif, what))
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id,
+ tnif, what, where, THE_NON_VALUE);
}
/* Port profiling */
@@ -3083,28 +2093,6 @@ profile_runnable_proc(Process *p, Eterm status){
#ifdef ERTS_SMP
-void
-erts_check_my_tracer_proc(Process *p)
-{
- if (is_internal_pid(ERTS_TRACER_PROC(p))) {
- Process *tracer = erts_pid2proc(p,
- ERTS_PROC_LOCK_MAIN,
- ERTS_TRACER_PROC(p),
- ERTS_PROC_LOCK_STATUS);
- int invalid_tracer = (!tracer
- || !(ERTS_TRACE_FLAGS(tracer) & F_TRACER));
- if (tracer)
- erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
- if (invalid_tracer) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- ERTS_TRACE_FLAGS(p) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(p) = NIL;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
-}
-
-
typedef struct ErtsSysMsgQ_ ErtsSysMsgQ;
struct ErtsSysMsgQ_ {
ErtsSysMsgQ *next;
@@ -3182,12 +2170,6 @@ static void
print_msg_type(ErtsSysMsgQ *smqp)
{
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- erts_fprintf(stderr, "TRACE ");
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- erts_fprintf(stderr, "SEQTRACE ");
- break;
case SYS_MSG_TYPE_SYSMON:
erts_fprintf(stderr, "SYSMON ");
break;
@@ -3198,8 +2180,8 @@ print_msg_type(ErtsSysMsgQ *smqp)
erts_fprintf(stderr, "ERRLGR ");
break;
case SYS_MSG_TYPE_PROC_MSG:
- erts_fprintf(stderr, "PROC_MSG ");
- break;
+ erts_fprintf(stderr, "PROC_MSG ");
+ break;
default:
erts_fprintf(stderr, "??? ");
break;
@@ -3211,17 +2193,6 @@ static void
sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
{
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- /* Invalid tracer_proc's are removed when processes
- are scheduled in. */
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- /* Reset seq_tracer if it hasn't changed */
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
- if (system_seq_tracer == receiver)
- system_seq_tracer = am_false;
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
- break;
case SYS_MSG_TYPE_SYSMON:
if (receiver == NIL
&& !erts_system_monitor_long_gc
@@ -3282,7 +2253,7 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
break;
}
case SYS_MSG_TYPE_PROC_MSG:
- break;
+ break;
default:
ASSERT(0);
}
@@ -3400,13 +2371,9 @@ sys_msg_dispatcher_func(void *unused)
print_msg_type(smqp);
#endif
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- case SYS_MSG_TYPE_PROC_MSG:
- receiver = smqp->to;
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- receiver = erts_get_system_seq_tracer();
- break;
+ case SYS_MSG_TYPE_PROC_MSG:
+ receiver = smqp->to;
+ break;
case SYS_MSG_TYPE_SYSMON:
receiver = erts_get_system_monitor();
if (smqp->from == receiver) {
@@ -3441,16 +2408,8 @@ sys_msg_dispatcher_func(void *unused)
if (is_internal_pid(receiver)) {
proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
- if (!proc
- || (smqp->type == SYS_MSG_TYPE_TRACE
- && !(ERTS_TRACE_FLAGS(proc) & F_TRACER))) {
+ if (!proc) {
/* Bad tracer */
-#ifdef DEBUG_PRINTOUTS
- if (smqp->type == SYS_MSG_TYPE_TRACE && proc)
- erts_fprintf(stderr,
- "<tracer alive but missing "
- "F_TRACER flag> ");
-#endif
goto failure;
}
else {
@@ -3458,7 +2417,7 @@ sys_msg_dispatcher_func(void *unused)
queue_proc_msg:
mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = smqp->bp;
- erts_queue_message(proc,&proc_locks,mp,smqp->msg,NIL);
+ erts_queue_message(proc,&proc_locks,mp,smqp->msg);
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "delivered\n");
#endif
@@ -3524,12 +2483,6 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
for (sm = sys_message_queue; sm; sm = sm->next) {
Eterm to;
switch (sm->type) {
- case SYS_MSG_TYPE_TRACE:
- to = sm->to;
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- to = erts_get_system_seq_tracer();
- break;
case SYS_MSG_TYPE_SYSMON:
to = erts_get_system_monitor();
break;
@@ -3567,3 +2520,498 @@ init_sys_msg_dispatcher(void)
}
#endif
+
+#include "erl_nif.h"
+
+struct ErtsTracerNif_ {
+ HashBucket hb;
+ Eterm module;
+ struct erl_module_nif* nif_mod;
+ ErlNifFunc *enabled;
+ ErlNifFunc *trace;
+};
+
+static Hash *tracer_hash = NULL;
+static erts_smp_rwmtx_t tracer_mtx;
+
+static ErtsTracerNif *
+load_tracer_nif(const ErtsTracer tracer)
+{
+ Module* mod = erts_get_module(ERTS_TRACER_MODULE(tracer),
+ erts_active_code_ix());
+ struct erl_module_instance *instance;
+ ErlNifFunc *funcs;
+ int num_of_funcs;
+ ErtsTracerNif tnif_tmpl, *tnif;
+ int i;
+
+ if (mod && mod->curr.nif != NULL) {
+ instance = &mod->curr;
+ } else {
+ return NULL;
+ }
+
+ tnif_tmpl.enabled = NULL;
+ tnif_tmpl.trace = NULL;
+ tnif_tmpl.nif_mod = instance->nif;
+ tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
+
+ num_of_funcs = erts_nif_get_funcs(instance->nif, &funcs);
+
+ for(i = 0; i < num_of_funcs; i++) {
+ if (strcmp("enabled",funcs[i].name) == 0 && funcs[i].arity == 3) {
+ tnif_tmpl.enabled = funcs + i;
+ } else if (strcmp("trace",funcs[i].name) == 0 && funcs[i].arity == 6) {
+ tnif_tmpl.trace = funcs + i;
+ }
+ }
+
+ if (tnif_tmpl.enabled == NULL ||
+ tnif_tmpl.trace == NULL ) {
+ return NULL;
+ }
+
+ erts_smp_rwmtx_rwlock(&tracer_mtx);
+ tnif = hash_put(tracer_hash, &tnif_tmpl);
+ erts_smp_rwmtx_rwunlock(&tracer_mtx);
+
+ return tnif;
+}
+
+static ERTS_INLINE ErtsTracerNif *
+lookup_tracer_nif(const ErtsTracer tracer)
+{
+ ErtsTracerNif tnif_tmpl;
+ ErtsTracerNif *tnif;
+ tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
+ erts_smp_rwmtx_rlock(&tracer_mtx);
+ if ((tnif = hash_get(tracer_hash, &tnif_tmpl)) == NULL) {
+ erts_smp_rwmtx_runlock(&tracer_mtx);
+ tnif = load_tracer_nif(tracer);
+ ASSERT(!tnif || tnif->nif_mod);
+ return tnif;
+ }
+ erts_smp_rwmtx_runlock(&tracer_mtx);
+ ASSERT(tnif->nif_mod);
+ return tnif;
+}
+
+/* This function converts an Erlang tracer term to ErtsTracer.
+ It returns THE_NON_VALUE if an invalid tracer term was given.
+ Accepted input is:
+ pid() || port() || {prefix, pid()} || {prefix, port()} ||
+ {prefix, atom(), term()} || {atom(), term()}
+ */
+ErtsTracer
+erts_term_to_tracer(Eterm prefix, Eterm t)
+{
+ ErtsTracer tracer = erts_tracer_nil;
+ ASSERT(is_atom(prefix) || prefix == THE_NON_VALUE);
+ if (!is_nil(t)) {
+ Eterm module = am_erl_tracer, state = THE_NON_VALUE;
+ Eterm hp[2];
+ if (is_tuple(t)) {
+ Eterm *tp = tuple_val(t);
+ if (prefix != THE_NON_VALUE) {
+ if (arityval(tp[0]) == 2 && tp[1] == prefix)
+ t = tp[2];
+ else if (arityval(tp[0]) == 3 && tp[1] == prefix && is_atom(tp[2])) {
+ module = tp[2];
+ state = tp[3];
+ }
+ } else {
+ if (arityval(tp[0]) == 2 && is_atom(tp[2])) {
+ module = tp[1];
+ state = tp[2];
+ }
+ }
+ }
+ if (state == THE_NON_VALUE && (is_internal_pid(t) || is_internal_port(t)))
+ state = t;
+ if (state == THE_NON_VALUE)
+ return THE_NON_VALUE;
+ erts_tracer_update(&tracer, CONS(hp, module, state));
+ }
+ if (!lookup_tracer_nif(tracer)) {
+ ASSERT(ERTS_TRACER_MODULE(tracer) != am_erl_tracer);
+ ERTS_TRACER_CLEAR(&tracer);
+ return THE_NON_VALUE;
+ }
+ return tracer;
+}
+
+Eterm
+erts_tracer_to_term(Process *p, ErtsTracer tracer)
+{
+ if (ERTS_TRACER_IS_NIL(tracer))
+ return am_false;
+ if (ERTS_TRACER_MODULE(tracer) == am_erl_tracer)
+ /* Have to manage these specifically in order to be
+ backwards compatible */
+ return ERTS_TRACER_STATE(tracer);
+ else {
+ Eterm *hp = HAlloc(p, 3);
+ return TUPLE2(hp, ERTS_TRACER_MODULE(tracer),
+ copy_object(ERTS_TRACER_STATE(tracer), p));
+ }
+}
+
+
+static ERTS_INLINE int
+send_to_tracer_nif_raw(Process *c_p, Process *tracee,
+ const ErtsTracer tracer, Uint tracee_flags,
+ Eterm t_p_id, ErtsTracerNif *tnif, Eterm tag, Eterm msg,
+ Eterm extra, Eterm pam_result)
+{
+ if (tnif || (tnif = lookup_tracer_nif(tracer)) != NULL) {
+#define MAP_SIZE 3
+ Eterm argv[6],
+ local_heap[3+MAP_SIZE /* values */+(MAP_SIZE+1 /* keys */)];
+ flatmap_t *map = (flatmap_t*)(local_heap+(MAP_SIZE+1));
+ Eterm *map_values = flatmap_get_values(map);
+
+ int argc = 6;
+
+ argv[0] = tag;
+ argv[1] = ERTS_TRACER_STATE(tracer);
+ argv[2] = t_p_id;
+ argv[3] = msg;
+ argv[4] = extra == THE_NON_VALUE ? am_undefined : extra;
+ argv[5] = make_flatmap(map);
+
+ map->thing_word = MAP_HEADER_FLATMAP;
+ map->size = MAP_SIZE;
+ map->keys = TUPLE3(local_heap, am_match_spec_result, am_scheduler_id,
+ am_timestamp);
+
+ *map_values++ = pam_result;
+ if (tracee_flags & F_TRACE_SCHED_NO)
+ *map_values++ = make_small(erts_get_scheduler_id());
+ else
+ *map_values++ = am_undefined;
+ if (tracee_flags & F_NOW_TS)
+#ifdef HAVE_ERTS_NOW_CPU
+ if (erts_cpu_timestamp)
+ *map_values++ = am_cpu_timestamp;
+ else
+#endif
+ *map_values++ = am_timestamp;
+ else if (tracee_flags & F_STRICT_MON_TS)
+ *map_values++ = am_strict_monotonic;
+ else if (tracee_flags & F_MON_TS)
+ *map_values++ = am_monotonic;
+ else
+ *map_values++ = am_undefined;
+
+#undef MAP_SIZE
+ erts_nif_call_function(c_p, tracee ? tracee : c_p,
+ tnif->nif_mod, tnif->trace, argc, argv);
+ }
+ return 1;
+}
+
+
+static ERTS_INLINE int
+send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
+ Eterm t_p_id, ErtsTracerNif *tnif, Eterm tag,
+ Eterm msg, Eterm extra)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ if (c_p) {
+ /* We have to hold the main lock of the currently executing process */
+ erts_proc_lc_chk_have_proc_locks(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ if (is_internal_pid(t_p->id)) {
+ /* We have to have at least one lock */
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL);
+ } else {
+ ASSERT(is_internal_port(t_p->id));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p));
+ }
+#endif
+
+ return send_to_tracer_nif_raw(c_p,
+ is_internal_pid(t_p->id) ? (Process*)t_p : NULL,
+ t_p->tracer, t_p->trace_flags,
+ t_p_id, tnif, tag, msg, extra,
+ am_true);
+}
+
+static ERTS_INLINE Eterm
+call_enabled_tracer(Process *c_p, const ErtsTracer tracer,
+ ErtsTracerNif **tnif_ret, Eterm tag, Eterm t_p_id)
+{
+ ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
+ if (tnif) {
+ Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id};
+ if (tnif_ret) *tnif_ret = tnif;
+ return erts_nif_call_function(
+ c_p, NULL, tnif->nif_mod, tnif->enabled, 3, argv);
+ }
+ return am_remove;
+}
+
+static int
+is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p,
+ ErtsTracerNif **tnif_ret, Eterm tag)
+{
+ Eterm nif_result;
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ if (c_p)
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks
+ || erts_thr_progress_is_blocking());
+ if (is_internal_pid(t_p->id)) {
+ /* We have to have at least one lock */
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL
+ || erts_thr_progress_is_blocking());
+ } else {
+ ASSERT(is_internal_port(t_p->id));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
+ || erts_thr_progress_is_blocking());
+ }
+#endif
+
+ nif_result = call_enabled_tracer(c_p, t_p->tracer, tnif_ret, tag, t_p->id);
+ switch (nif_result) {
+ case am_discard: return 0;
+ case am_trace: return 1;
+ case THE_NON_VALUE:
+ case am_remove: break;
+ default:
+ /* only am_remove should be returned, but if
+ something else is returned we fall-through
+ and remove the tracer. */
+ ASSERT(0);
+ }
+
+ /* Only remove tracer on self() and ports */
+ if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) {
+ ErtsProcLocks c_p_xlocks = 0;
+ if (is_internal_pid(t_p->id)) {
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+ if (c_p_locks != ERTS_PROC_LOCKS_ALL) {
+ c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL;
+ if (erts_smp_proc_trylock(c_p, c_p_xlocks) == EBUSY) {
+ erts_smp_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ }
+ erts_tracer_replace(t_p, erts_tracer_nil);
+ t_p->trace_flags &= ~TRACEE_FLAGS;
+
+ if (c_p_xlocks)
+ erts_smp_proc_unlock(c_p, c_p_xlocks);
+ }
+
+
+ return 0;
+}
+
+int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p, Eterm type)
+{
+ return is_tracer_proc_enabled(c_p, c_p_locks, t_p, NULL, am_trace_status);
+}
+
+int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer)
+{
+ ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
+ if (tnif) {
+ Eterm nif_result = call_enabled_tracer(c_p, tracer, &tnif,
+ am_trace_status,
+ c_p->common.id);
+ switch (nif_result) {
+ case am_discard:
+ case am_trace: return 1;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ if (is_internal_pid(t_p->id) && !erts_thr_progress_is_blocking()) {
+ erts_proc_lc_chk_have_proc_locks((Process*)t_p, ERTS_PROC_LOCKS_ALL);
+ } else if (is_internal_port(t_p->id)) {
+ ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
+ || erts_thr_progress_is_blocking());
+ }
+#endif
+ if (ERTS_TRACER_COMPARE(t_p->tracer, tracer))
+ return;
+
+ erts_tracer_update(&t_p->tracer, tracer);
+}
+
+static void free_tracer(void *p)
+{
+ ErtsTracer tracer = (ErtsTracer)p;
+
+ if (is_immed(ERTS_TRACER_STATE(tracer))) {
+ erts_free(ERTS_ALC_T_HEAP_FRAG, ptr_val(tracer));
+ } else {
+ ErlHeapFragment *hf = (void*)((char*)(ptr_val(tracer)) - offsetof(ErlHeapFragment, mem));
+ free_message_buffer(hf);
+ }
+}
+
+/* un-define erts_tracer_update before implementation */
+#ifdef erts_tracer_update
+#undef erts_tracer_update
+#endif
+
+/*
+ * ErtsTracer is either NIL, 'true' or [Mod | State]
+ *
+ * - If State is immediate then the memory for
+ * the cons cell is just two words + sizeof(ErtsThrPrgrLaterOp) large.
+ * - If State is a complex term then the cons cell
+ * is allocated in an ErlHeapFragment where the cons
+ * ptr points to the mem field. So in order to get the
+ * ptr to the fragment you do this:
+ * (char*)(ptr_val(tracer)) - offsetof(ErlHeapFragment, mem)
+ * Normally you shouldn't have to care about this though
+ * as erts_tracer_update takes care of it for you.
+ *
+ * When ErtsTracer is stored in the stack as part of a
+ * return trace, the cons cell is stored on the heap of
+ * the process.
+ *
+ * The cons cell is not always stored on the heap as:
+ * 1) for port/meta tracing there is no heap
+ * 2) we would need the main lock in order to
+ * read the tracer which is undesirable.
+ *
+ * One way to optimize this (memory wise) is to keep an refc and only bump
+ * the refc when *tracer is NIL.
+ */
+void
+erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
+{
+ ErlHeapFragment *hf;
+
+ if (is_not_nil(*tracer)) {
+ Uint offs = 2;
+ UWord size = 2 * sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp);
+ ASSERT(is_list(*tracer));
+ if (is_not_immed(ERTS_TRACER_STATE(*tracer))) {
+ hf = (void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem)));
+ offs = hf->used_size;
+ size = hf->alloc_size * sizeof(Eterm) + sizeof(ErlHeapFragment);
+ ASSERT(offs == size_object(*tracer));
+ }
+ /* We schedule the free:ing of the tracer until after a thread progress
+ has been made so that we know that no schedulers have any references
+ to it. Because we do this, it is possible to release all locks of a
+ process/port and still use the ErtsTracer of that port/process
+ without having to worry if it is free'd.
+ */
+ erts_schedule_thr_prgr_later_cleanup_op(
+ free_tracer, (void*)(*tracer),
+ (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs),
+ size);
+ }
+
+ if (is_nil(new_tracer)) {
+ *tracer = new_tracer;
+ } else if (is_immed(ERTS_TRACER_STATE(new_tracer))) {
+ /* If tracer state is an immediate we only allocate a 2 Eterm heap.
+ Not sure if it is worth it, we save 4 words (sizeof(ErlHeapFragment))
+ per tracer. */
+ Eterm *hp = erts_alloc(ERTS_ALC_T_HEAP_FRAG,
+ 2*sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp));
+ *tracer = CONS(hp, ERTS_TRACER_MODULE(new_tracer),
+ ERTS_TRACER_STATE(new_tracer));
+ } else {
+ Eterm *hp, tracer_state = ERTS_TRACER_STATE(new_tracer),
+ tracer_module = ERTS_TRACER_MODULE(new_tracer);
+ Uint sz = size_object(tracer_state);
+ hf = new_message_buffer(sz + 2 /* cons cell */ + (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm));
+ hp = hf->mem + 2;
+ hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm);
+ *tracer = copy_struct(tracer_state, sz, &hp, &hf->off_heap);
+ *tracer = CONS(hf->mem, tracer_module, *tracer);
+ ASSERT((void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))) == hf);
+ }
+}
+
+static void init_tracer_nif()
+{
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx");
+
+ erts_tracer_nif_clear();
+
+}
+
+int erts_tracer_nif_clear()
+{
+
+ erts_smp_rwmtx_rlock(&tracer_mtx);
+ if (!tracer_hash || tracer_hash->nobjs) {
+
+ HashFunctions hf;
+ hf.hash = tracer_hash_fun;
+ hf.cmp = tracer_cmp_fun;
+ hf.alloc = tracer_alloc_fun;
+ hf.free = tracer_free_fun;
+ hf.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ hf.meta_free = (HMFREE_FUN) erts_free;
+ hf.meta_print = (HMPRINT_FUN) erts_print;
+
+ erts_smp_rwmtx_runlock(&tracer_mtx);
+ erts_smp_rwmtx_rwlock(&tracer_mtx);
+
+ if (tracer_hash)
+ hash_delete(tracer_hash);
+
+ tracer_hash = hash_new(ERTS_ALC_T_TRACER_NIF, "tracer_hash", 10, hf);
+
+ erts_smp_rwmtx_rwunlock(&tracer_mtx);
+ return 1;
+ }
+
+ erts_smp_rwmtx_runlock(&tracer_mtx);
+ return 0;
+}
+
+static int tracer_cmp_fun(void* a, void* b)
+{
+ return ((ErtsTracerNif*)a)->module != ((ErtsTracerNif*)b)->module;
+}
+
+static HashValue tracer_hash_fun(void* obj)
+{
+ return make_internal_hash(((ErtsTracerNif*)obj)->module);
+}
+
+static void *tracer_alloc_fun(void* tmpl)
+{
+ ErtsTracerNif *obj = erts_alloc(ERTS_ALC_T_TRACER_NIF,
+ sizeof(ErtsTracerNif) +
+ sizeof(ErtsThrPrgrLaterOp));
+ memcpy(obj, tmpl, sizeof(*obj));
+ return obj;
+}
+
+static void tracer_free_fun_cb(void* obj)
+{
+ erts_free(ERTS_ALC_T_TRACER_NIF, obj);
+}
+
+static void tracer_free_fun(void* obj)
+{
+ ErtsTracerNif *tnif = obj;
+ erts_schedule_thr_prgr_later_op(
+ tracer_free_fun_cb, obj,
+ (ErtsThrPrgrLaterOp*)(tnif + 1));
+
+}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 1da27ee128..177fd373a6 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -46,6 +46,8 @@
#define ERTS_SEQTFLGS2TSTYPE(SEQTFLGS) \
((int) (((SEQTFLGS) >> ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT) \
& ERTS_TRACE_TS_TYPE_MASK))
+#define ERTS_SEQTFLGS2TFLGS(SEQTFLGS) \
+ (ERTS_SEQTFLGS2TSTYPE(SEQTFLGS) << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
#endif /* ERL_TRACE_H__FLAGS__ */
@@ -62,15 +64,19 @@ void erts_system_profile_clear(Process *c_p);
/* erl_trace.c */
void erts_init_trace(void);
void erts_trace_check_exiting(Eterm exiting);
-Eterm erts_set_system_seq_tracer(Process *c_p,
- ErtsProcLocks c_p_locks,
- Eterm new);
-Eterm erts_get_system_seq_tracer(void);
-void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
-void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
+ErtsTracer erts_set_system_seq_tracer(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ ErtsTracer new);
+ErtsTracer erts_get_system_seq_tracer(void);
+void erts_change_default_proc_tracing(int setflags, Uint flagsp,
+ const ErtsTracer tracerp);
+void erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp);
+void erts_change_default_port_tracing(int setflags, Uint flagsp,
+ const ErtsTracer tracerp);
+void erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp);
void erts_set_system_monitor(Eterm monitor);
Eterm erts_get_system_monitor(void);
-int erts_is_tracer_proc_valid(Process* p);
+int erts_is_tracer_valid(Process* p);
#ifdef ERTS_SMP
void erts_check_my_tracer_proc(Process *);
@@ -81,28 +87,32 @@ void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
Eterm,
ErlHeapFragment *));
void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
+void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
#endif
-void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
void trace_send(Process*, Eterm, Eterm);
-void trace_receive(Process*, Eterm);
-Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec, Eterm* args,
- int local, Eterm *tracer_pid);
-void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid);
+void trace_receive(Process *, Eterm);
+Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec,
+ Eterm* args, int local, ErtsTracer *tracer);
+void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval,
+ ErtsTracer *tracer);
void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
- Eterm *tracer);
+ ErtsTracer *tracer);
void erts_trace_return_to(Process *p, BeamInstr *pc);
-void trace_sched(Process*, Eterm);
-void trace_proc(Process*, Process*, Eterm, Eterm);
-void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
+void trace_sched(Process*, ErtsProcLocks, Eterm);
+void trace_proc(Process*, ErtsProcLocks, Process*, Eterm, Eterm);
+void trace_proc_spawn(Process*, Eterm what, Eterm pid, Eterm mod, Eterm func, Eterm args);
void save_calls(Process *p, Export *);
void trace_gc(Process *p, Eterm what);
/* port tracing */
-void trace_virtual_sched(Process*, Eterm);
+void trace_virtual_sched(Process*, ErtsProcLocks, Eterm);
void trace_sched_ports(Port *pp, Eterm);
void trace_sched_ports_where(Port *pp, Eterm, Eterm);
void trace_port(Port *, Eterm what, Eterm data);
void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name);
+void trace_port_receive(Port *, Eterm calling_pid, Eterm tag, ...);
+void trace_port_send(Port *, Eterm to, Eterm msg, int exists);
+void trace_port_send_binary(Port *, Eterm to, Eterm what, char *bin, Sint sz);
/* system_profile */
void erts_set_system_profile(Eterm profile);
@@ -121,7 +131,7 @@ void monitor_large_heap(Process *p);
void monitor_generic(Process *p, Eterm type, Eterm spec);
Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
- Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
+ Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp);
Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
#ifdef ERTS_SMP
@@ -161,15 +171,53 @@ int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
struct binary* match_prog_set,
struct binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid, int is_blocking);
+ ErtsTracer meta_tracer, int is_blocking);
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct binary **match_spec,
struct binary **meta_match_spec,
struct trace_pattern_flags *trace_pattern_flags,
- Eterm *meta_tracer_pid);
+ ErtsTracer *meta_tracer);
int erts_is_default_trace_enabled(void);
void erts_bif_trace_init(void);
int erts_finish_breakpointing(void);
+/* Nif tracer functions */
+int erts_is_tracer_proc_enabled(Process *c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p, Eterm type);
+int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer);
+Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer);
+ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term);
+void erts_tracer_replace(ErtsPTabElementCommon *t_p,
+ const ErtsTracer new_tracer);
+void erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer);
+int erts_tracer_nif_clear(void);
+
+#define erts_tracer_update(t,n) do { if (*(t) != (n)) erts_tracer_update(t,n); } while(0)
+#define ERTS_TRACER_CLEAR(t) erts_tracer_update(t, erts_tracer_nil)
+
+static const ErtsTracer
+ERTS_DECLARE_DUMMY(erts_tracer_true) = am_true;
+
+static const ErtsTracer
+ERTS_DECLARE_DUMMY(erts_tracer_nil) = NIL;
+
+#define ERTS_TRACER_COMPARE(t1, t2) \
+ (EQ((t1), (t2)))
+
+#define ERTS_TRACER_IS_NIL(t1) ERTS_TRACER_COMPARE(t1, erts_tracer_nil)
+
+#define IS_TRACER_VALID(tracer) \
+ (ERTS_TRACER_COMPARE(tracer,erts_tracer_true) \
+ || ERTS_TRACER_IS_NIL(tracer) \
+ || (is_list(tracer) && is_atom(CAR(list_val(tracer)))))
+
+#define ERTS_TRACER_FROM_ETERM(termp) \
+ ((ErtsTracer*)(termp))
+
+#define ERTS_TRACER_PROC_IS_ENABLED(PROC) \
+ (!ERTS_TRACER_IS_NIL(ERTS_TRACER(PROC)) \
+ && erts_is_tracer_proc_enabled(PROC, ERTS_PROC_LOCK_MAIN, \
+ &(PROC)->common, am_trace_status))
+
#endif /* ERL_TRACE_H__ */
diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d
index 73ef5a108a..237889e0f5 100644
--- a/erts/emulator/beam/erlang_dtrace.d
+++ b/erts/emulator/beam/erlang_dtrace.d
@@ -700,6 +700,35 @@ provider erlang {
*/
probe efile_drv__return(int, int, char *, int, int, int);
+
+/*
+ * The set of probes called by the erlang tracer nif backend. In order
+ * to receive events on these you both have to enable tracing in erlang
+ * using the trace bifs and also from dtrace/systemtap.
+ */
+
+
+ /**
+ * A trace message of type `event` was triggered by process `p`.
+ *
+ *
+ * @param p the PID (string form) of the process
+ * @param event the event that was triggered (i.e. call or spawn)
+ * @param state the state of the tracer nif as a string
+ * @param arg1 first argument to the trace event
+ * @param arg2 second argument to the trace event
+ */
+ probe trace(char *p, char *event, char *state, char *arg1, char *arg2);
+
+ /**
+ * A sequence trace message of type `label` was triggered.
+ *
+ * @param state the state of the tracer nif as a string
+ * @param label the seq trace label
+ * @param seq_info the seq trace info tuple as a string
+ */
+ probe trace_seq(char *state, char *label, char *seq_info);
+
/*
* NOTE:
* For formatting int64_t arguments within a D script, see:
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 9f43240b7e..723c25ff77 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -3958,9 +3958,13 @@ error:
* Must unlink all off-heap objects that may have been
* linked into the process.
*/
- if (factory->hp < hp) { /* Sometimes we used hp and sometimes factory->hp */
- factory->hp = hp; /* the largest must be the freshest */
+ if (factory->mode != FACTORY_CLOSED) {
+ if (factory->hp < hp) { /* Sometimes we used hp and sometimes factory->hp */
+ factory->hp = hp; /* the largest must be the freshest */
+ }
}
+ else ASSERT(factory->hp == hp);
+
error_hamt:
erts_factory_undo(factory);
PSTACK_DESTROY(hamt_array);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 1d3704f0af..a7bc990deb 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -44,6 +44,8 @@
#include "erl_port.h"
#include "erl_gc.h"
+struct enif_func_t;
+
struct enif_environment_t /* ErlNifEnv */
{
struct erl_module_nif* mod_nif;
@@ -54,14 +56,21 @@ struct enif_environment_t /* ErlNifEnv */
int fpe_was_unmasked;
struct enif_tmp_obj_t* tmp_obj_list;
int exception_thrown; /* boolean */
+ Process *tracee;
};
extern void erts_pre_nif(struct enif_environment_t*, Process*,
- struct erl_module_nif*);
+ struct erl_module_nif*, Process* tracee);
extern void erts_post_nif(struct enif_environment_t* env);
extern Eterm erts_nif_taints(Process* p);
extern void erts_print_nif_taints(int to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
extern void erl_nif_init(void);
+extern int erts_nif_get_funcs(struct erl_module_nif*,
+ struct enif_func_t **funcs);
+extern Eterm erts_nif_call_function(Process *p, Process *tracee,
+ struct erl_module_nif*,
+ struct enif_func_t *,
+ int argc, Eterm *argv);
/* Driver handle (wrapper for old plain handle) */
#define ERL_DE_OK 0
@@ -1532,10 +1541,15 @@ ERTS_GLB_INLINE void dtrace_fun_decode(Process *process,
ERTS_GLB_INLINE void
dtrace_pid_str(Eterm pid, char *process_buf)
{
- erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "<%lu.%lu.%lu>",
- pid_channel_no(pid),
- pid_number(pid),
- pid_serial(pid));
+ if (is_pid(pid))
+ erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "<%lu.%lu.%lu>",
+ pid_channel_no(pid),
+ pid_number(pid),
+ pid_serial(pid));
+ else if (is_port(pid))
+ erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
+ port_channel_no(pid),
+ port_number(pid));
}
ERTS_GLB_INLINE void
@@ -1547,9 +1561,7 @@ dtrace_proc_str(Process *process, char *process_buf)
ERTS_GLB_INLINE void
dtrace_port_str(Port *port, char *port_buf)
{
- erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
- port_channel_no(port->common.id),
- port_number(port->common.id));
+ dtrace_pid_str(port->common.id, port_buf);
}
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index de73da8e22..b14ca77a04 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -88,7 +88,7 @@ int erts_port_parallelism = 0;
static erts_atomic64_t bytes_in;
static erts_atomic64_t bytes_out;
-static void deliver_result(Eterm sender, Eterm pid, Eterm res);
+static void deliver_result(Port *p, Eterm sender, Eterm pid, Eterm res);
static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *);
static void terminate_port(Port *p);
static void pdl_init(void);
@@ -212,6 +212,7 @@ static ERTS_INLINE void
kill_port(Port *pp)
{
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_TRACER_CLEAR(&ERTS_TRACER(pp));
erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */
erts_port_task_free_port(pp);
/* In non-smp case the port structure may have been deallocated now */
@@ -404,7 +405,7 @@ static Port *create_port(char *name,
prt->os_pid = -1;
/* Set default tracing */
- erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
+ erts_get_default_port_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER(prt));
ERTS_CT_ASSERT(offsetof(Port,common) == 0);
@@ -709,7 +710,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
if (driver->start) {
ERTS_MSACC_PUSH_STATE_M();
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(port, am_in, am_start);
+ trace_sched_ports_where(port, am_in, am_open);
}
port->caller = pid;
#ifdef USE_VM_PROBES
@@ -753,7 +754,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
ERTS_MSACC_POP_STATE_M();
port->caller = NIL;
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(port, am_out, am_start);
+ trace_sched_ports_where(port, am_out, am_open);
}
#ifdef ERTS_SMP
if (port->xports)
@@ -1300,7 +1301,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
reds_left_in = CONTEXT_REDS/10;
else {
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(c_p, am_out);
+ trace_sched(c_p, ERTS_PROC_LOCK_MAIN, am_out);
/*
* No status lock held while sending runnable
* proc trace messages. It is however not needed
@@ -1384,7 +1385,7 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
}
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(c_p, am_in);
+ trace_sched(c_p, ERTS_PROC_LOCK_MAIN, am_in);
/*
* No status lock held while sending runnable
* proc trace messages. It is however not needed
@@ -1447,7 +1448,7 @@ queue_port_sched_op_reply(Process *rp,
erts_factory_trim_and_close(factory, &msg, 1);
- erts_queue_message(rp, rp_locksp, factory->message, msg, NIL);
+ erts_queue_message(rp, rp_locksp, factory->message, msg);
}
static void
@@ -1535,9 +1536,8 @@ erts_schedule_proc2port_signal(Process *c_p,
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
c_p->msg.save = c_p->msg.last;
- erts_smp_proc_unlock(c_p,
- (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCKS_MSG_RECEIVE));
+ erts_smp_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE
+ | ERTS_PROC_LOCK_MAIN));
}
@@ -1742,6 +1742,10 @@ call_driver_outputv(int bang_op,
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
|| ERTS_IS_CRASH_DUMPING);
+
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, caller, am_commandv, evp);
+
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_outputv)) {
DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
@@ -1868,6 +1872,9 @@ call_driver_output(int bang_op,
}
#endif
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, caller, am_command, bufp, size);
+
prt->caller = caller;
(*drv->output)((ErlDrvData) prt->drv_data, bufp, size);
prt->caller = NIL;
@@ -2589,7 +2596,10 @@ call_deliver_port_exit(int bang_op,
return ERTS_PORT_OP_DROPPED;
}
- if (!erts_deliver_port_exit(prt, from, reason, bang_op))
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, from, am_close);
+
+ if (!erts_deliver_port_exit(prt, from, reason, bang_op, broken_link))
return ERTS_PORT_OP_DROPPED;
#ifdef USE_VM_PROBES
@@ -2660,13 +2670,13 @@ erts_port_exit(Process *c_p,
ERTS_PORT_SFLGS_INVALID_LOOKUP,
0,
!refp,
- am_exit);
+ am_close);
switch (try_imm_drv_call(&try_call_state)) {
case ERTS_TRY_IMM_DRV_CALL_OK: {
res = call_deliver_port_exit(flags & ERTS_PORT_SIG_FLG_BANG_OP,
- from,
+ c_p ? c_p->common.id : from,
prt,
try_call_state.state,
reason,
@@ -2745,8 +2755,11 @@ set_port_connected(int bang_op,
return ERTS_PORT_OP_DROPPED;
}
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, from, am_connect, connect);
+
ERTS_PORT_SET_CONNECTED(prt, connect);
- deliver_result(prt->common.id, from, am_connected);
+ deliver_result(prt, prt->common.id, from, am_connected);
#ifdef USE_VM_PROBES
if(DTRACE_ENABLED(port_command)) {
@@ -2768,10 +2781,22 @@ set_port_connected(int bang_op,
erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, prt->common.id);
erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, connect);
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS))
+ trace_proc(NULL, 0, rp, am_getting_linked, prt->common.id);
+
ERTS_PORT_SET_CONNECTED(prt, connect);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_getting_linked, connect);
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, from, am_connect, connect);
+ if (IS_TRACED_FL(prt, F_TRACE_SEND)) {
+ Eterm hp[3];
+ trace_port_send(prt, from, TUPLE2(hp, prt->common.id, am_connected), 1);
+ }
+
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_connect)) {
DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
@@ -2874,8 +2899,11 @@ static void
port_unlink(Port *prt, Eterm from)
{
ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from);
- if (lnk)
+ if (lnk) {
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_getting_unlinked, from);
erts_destroy_link(lnk);
+ }
}
static int
@@ -2945,10 +2973,10 @@ port_link_failure(Eterm port_id, Eterm linker)
NIL,
NULL,
0);
- if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ if (xres >= 0) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, rp, am_getting_unlinked, port_id);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, port_id);
}
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -2959,10 +2987,15 @@ port_link_failure(Eterm port_id, Eterm linker)
static void
port_link(Port *prt, erts_aint32_t state, Eterm to)
{
- if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP))
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_getting_linked, to);
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, to);
- else
+ } else {
port_link_failure(prt->common.id, to);
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_unlink, to);
+ }
}
static int
@@ -2970,8 +3003,9 @@ port_sig_link(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigd
{
if (op == ERTS_PROC2PORT_SIG_EXEC)
port_link(prt, state, sigdp->u.link.to);
- else
+ else {
port_link_failure(sigdp->u.link.port, sigdp->u.link.to);
+ }
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
return ERTS_PORT_REDS_LINK;
@@ -3391,7 +3425,7 @@ static int read_linebuf(LineBufContext *bp)
}
static void
-deliver_result(Eterm sender, Eterm pid, Eterm res)
+deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res)
{
Process *rp;
ErtsProcLocks rp_locks = 0;
@@ -3399,12 +3433,22 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ASSERT(!prt || prt->common.id == sender);
+#ifdef ERTS_SMP
+ ASSERT(!prt || erts_lc_is_port_locked(prt));
+#endif
+
ASSERT(is_internal_port(sender) && is_internal_pid(pid));
rp = (scheduler
? erts_proc_lookup(pid)
: erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_INC_REFC));
+ if (prt && IS_TRACED_FL(prt, F_TRACE_SEND)) {
+ Eterm hp[3];
+ trace_port_send(prt, pid, TUPLE2(hp, sender, res), !!rp);
+ }
+
if (rp) {
Eterm tuple;
ErtsMessage *mp;
@@ -3417,7 +3461,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
sz_res + 3, &hp, &ohp);
res = copy_struct(res, sz_res, &hp, ohp);
tuple = TUPLE2(hp, sender, res);
- erts_queue_message(rp, &rp_locks, mp, tuple, NIL);
+ erts_queue_message(rp, &rp_locks, mp, tuple);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -3449,6 +3493,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
ErlOffHeap *ohp;
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
+ int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -3471,7 +3516,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
if (!rp)
return;
- mp = erts_alloc_message_heap(rp, &rp_locks, need, &hp, &ohp);
+ mp = erts_alloc_message_heap(trace_send ? NULL : rp, &rp_locks, need, &hp, &ohp);
listp = NIL;
if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) {
@@ -3513,7 +3558,11 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
- erts_queue_message(rp, &rp_locks, mp, tuple, am_undefined);
+ if (trace_send)
+ trace_port_send(prt, to, tuple, 1);
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, &rp_locks, mp, tuple);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
@@ -3592,6 +3641,7 @@ deliver_vec_message(Port* prt, /* Port */
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
erts_aint32_t state;
+ int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -3619,7 +3669,7 @@ deliver_vec_message(Port* prt, /* Port */
need += (hlen+csize)*2;
}
- mp = erts_alloc_message_heap(rp, &rp_locks, need, &hp, &ohp);
+ mp = erts_alloc_message_heap(trace_send ? NULL : rp, &rp_locks, need, &hp, &ohp);
listp = NIL;
iov += vsize;
@@ -3680,7 +3730,11 @@ deliver_vec_message(Port* prt, /* Port */
tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
- erts_queue_message(rp, &rp_locks, mp, tuple, am_undefined);
+ if (IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send(prt, to, tuple, 1);
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, &rp_locks, mp, tuple);
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
@@ -3820,6 +3874,11 @@ terminate_port(Port *prt)
ASSERT(!prt->xports);
#endif
}
+
+ if (is_internal_port(send_closed_port_id)
+ && IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send(prt, connected_id, am_closed, 1);
+
if(drv->handle != NULL) {
erts_smp_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(drv->handle);
@@ -3851,7 +3910,7 @@ terminate_port(Port *prt)
erts_flush_async_exit(erts_halt_code, "");
}
if (is_internal_port(send_closed_port_id))
- deliver_result(send_closed_port_id, connected_id, am_closed);
+ deliver_result(NULL, send_closed_port_id, connected_id, am_closed);
}
void
@@ -3884,7 +3943,7 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
typedef struct {
- Eterm port;
+ Port *port;
Eterm reason;
} SweepContext;
@@ -3893,10 +3952,13 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
SweepContext *psc = vpsc;
DistEntry *dep;
Process *rp;
-
+ Eterm port_id = psc->port->common.id;
ASSERT(lnk->type == LINK_PID);
-
+
+ if (IS_TRACED_FL(psc->port, F_TRACE_PORTS))
+ trace_port(psc->port, am_unlink, lnk->pid);
+
if (is_external_pid(lnk->pid)) {
dep = external_pid_dist_entry(lnk->pid);
if(dep != erts_this_dist_entry) {
@@ -3909,9 +3971,9 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
case ERTS_DSIG_PREP_NOT_CONNECTED:
break;
case ERTS_DSIG_PREP_CONNECTED:
- erts_remove_dist_link(&dld, psc->port, lnk->pid, dep);
+ erts_remove_dist_link(&dld, port_id, lnk->pid, dep);
erts_destroy_dist_link(&dld);
- code = erts_dsig_send_exit(&dsd, psc->port, lnk->pid,
+ code = erts_dsig_send_exit(&dsd, port_id, lnk->pid,
psc->reason);
ASSERT(code == ERTS_DSIG_SEND_OK);
break;
@@ -3925,23 +3987,25 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
ASSERT(is_internal_pid(lnk->pid));
rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
if (rp) {
- ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), psc->port);
+ ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), port_id);
if (rlnk) {
int xres = erts_send_exit_signal(NULL,
- psc->port,
+ port_id,
rp,
&rp_locks,
psc->reason,
NIL,
NULL,
0);
- if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ if (xres >= 0) {
+ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
+ rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
+ }
/* We didn't exit the process and it is traced */
- if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- trace_proc(NULL, rp, am_getting_unlinked,
- psc->port);
- }
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS))
+ trace_proc(NULL, 0, rp, am_getting_unlinked, port_id);
}
erts_destroy_link(rlnk);
}
@@ -3963,7 +4027,8 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
*/
int
-erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
+erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed,
+ int drop_normal)
{
ErtsLink *lnk;
Eterm rreason;
@@ -3993,8 +4058,10 @@ erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
| ERTS_PORT_SFLG_CLOSING))
return 0;
- if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(p) && from != p->common.id)
+ if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(p)
+ && from != p->common.id && drop_normal) {
return 0;
+ }
set_state_flags = ERTS_PORT_SFLG_EXITING;
if (send_closed)
@@ -4005,9 +4072,8 @@ erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
state = erts_atomic32_read_bor_mb(&p->state, set_state_flags);
state |= set_state_flags;
- if (IS_TRACED_FL(p, F_TRACE_PORTS)) {
+ if (IS_TRACED_FL(p, F_TRACE_PORTS))
trace_port(p, am_closed, reason);
- }
erts_trace_check_exiting(p->common.id);
@@ -4017,7 +4083,7 @@ erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
(void) erts_unregister_name(NULL, 0, p, p->common.u.alive.reg->name);
{
- SweepContext sc = {p->common.id, rreason};
+ SweepContext sc = {p, rreason};
lnk = ERTS_P_LINKS(p);
ERTS_P_LINKS(p) = NULL;
erts_sweep_links(lnk, &sweep_one_link, &sc);
@@ -4139,7 +4205,10 @@ call_driver_control(Eterm caller,
command, size);
}
#endif
-
+
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, caller, am_control, command, bufp, size);
+
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
#ifdef USE_LTTNG_VM_TRACEPOINTS
@@ -4166,6 +4235,9 @@ call_driver_control(Eterm caller,
if (cres < 0)
return ERTS_PORT_OP_BADARG;
+ if (IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send_binary(prt, caller, am_control, *resp_bufp, cres);
+
*from_size = (ErlDrvSizeT) cres;
return ERTS_PORT_OP_DONE;
@@ -4575,6 +4647,9 @@ call_driver_call(Eterm caller,
}
#endif
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, caller, am_call, command, bufp, size);
+
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_PORT);
prt->caller = caller;
@@ -4593,6 +4668,9 @@ call_driver_call(Eterm caller,
|| ((byte) (*resp_bufp)[0]) != VERSION_MAGIC)
return ERTS_PORT_OP_BADARG;
+ if (IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send_binary(prt, caller, am_call, *resp_bufp, cres);
+
*from_size = (ErlDrvSizeT) cres;
return ERTS_PORT_OP_DONE;
@@ -5036,7 +5114,8 @@ reply_io_bytes(void *vreq)
eout = erts_bld_uint64(&hp, NULL, out);
msg = TUPLE4(hp, ref, make_small(sched_id), ein, eout);
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (req->sched_id == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -5507,6 +5586,7 @@ void driver_report_exit(ErlDrvPort ix, int status)
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
Port* prt = erts_drvport2port(ix);
+ int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return;
@@ -5523,13 +5603,17 @@ void driver_report_exit(ErlDrvPort ix, int status)
if (!rp)
return;
- mp = erts_alloc_message_heap(rp, &rp_locks, 3+3, &hp, &ohp);
+ mp = erts_alloc_message_heap(trace_send ? NULL : rp, &rp_locks, 3+3, &hp, &ohp);
tuple = TUPLE2(hp, am_exit_status, make_small(status));
hp += 3;
tuple = TUPLE2(hp, prt->common.id, tuple);
- erts_queue_message(rp, &rp_locks, mp, tuple, am_undefined);
+ if (IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send(prt, pid, tuple, 1);
+
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, &rp_locks, mp, tuple);
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
@@ -5623,13 +5707,13 @@ cleanup_b2t_states(struct b2t_states__ *b2tsp)
*/
static int
-driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
+driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
{
#define HEAP_EXTRA 200
#define ERTS_DDT_FAIL do { res = -1; goto done; } while (0)
Uint need = 0;
int depth = 0;
- int res;
+ int res = 0;
ErlDrvTermData* ptr;
ErlDrvTermData* ptr_end;
DECLARE_ESTACK(stack);
@@ -5848,11 +5932,26 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
? erts_proc_lookup(to)
: erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_INC_REFC));
if (!rp) {
- res = 0;
- goto done;
- }
+ if (!prt || !IS_TRACED_FL(prt, F_TRACE_SEND))
+ goto done;
+ if (!erts_is_tracer_proc_enabled(NULL, 0, &prt->common, am_send))
+ goto done;
- (void) erts_factory_message_create(&factory, rp, &rp_locks, need);
+ res = -2;
+
+ /* We allocate a temporary heap to be used to create
+ the message that may be sent using tracing */
+ erts_factory_tmp_init(&factory, erts_alloc(ERTS_ALC_T_DRIVER, need*sizeof(Eterm)),
+ need, ERTS_ALC_T_DRIVER);
+
+ } else {
+ /* We force the creation of a heap fragment (rp == NULL) when send
+ tracing so that we don't have the main lock of the process while
+ tracing */
+ Process *trace_rp = prt && IS_TRACED_FL(prt, F_TRACE_SEND) ? NULL : rp;
+ (void) erts_factory_message_create(&factory, trace_rp, &rp_locks, need);
+ res = 1;
+ }
/*
* Interpret the instructions and build the term.
@@ -6115,22 +6214,40 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
ESTACK_PUSH(stack, mess);
}
- res = 1;
-
done:
if (res > 0) {
mess = ESTACK_POP(stack); /* get resulting value */
erts_factory_trim_and_close(&factory, &mess, 1);
+
+ if (prt && IS_TRACED_FL(prt, F_TRACE_SEND))
+ trace_port_send(prt, to, mess, 1);
+
/* send message */
- erts_queue_message(rp, &rp_locks, factory.message, mess, am_undefined);
+ ERL_MESSAGE_TOKEN(factory.message) = am_undefined;
+ erts_queue_message(rp, &rp_locks, factory.message, mess);
+ }
+ else if (res == -2) {
+ /* this clause only happens when we were requested to
+ generate a send trace, but the process to send to
+ did not exist any more */
+ mess = ESTACK_POP(stack); /* get resulting value */
+
+ trace_port_send(prt, to, mess, 0);
+
+ erts_factory_trim_and_close(&factory, &mess, 1);
+ erts_free(ERTS_ALC_T_DRIVER, factory.hp_start);
+ res = 0;
}
else {
if (b2t.ix > b2t.used)
b2t.used = b2t.ix;
for (b2t.ix = 0; b2t.ix < b2t.used; b2t.ix++)
erts_binary2term_abort(&b2t.state[b2t.ix]);
- erts_factory_undo(&factory);
+ if (factory.mode != FACTORY_CLOSED) {
+ ERL_MESSAGE_TERM(factory.message) = am_undefined;
+ erts_factory_undo(&factory);
+ }
}
if (rp) {
if (rp_locks)
@@ -6146,7 +6263,8 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
}
static ERTS_INLINE int
-deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p)
+deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p,
+ Port **trace_prt)
{
#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
@@ -6174,8 +6292,11 @@ deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p)
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
erts_thr_progress_unmanaged_continue(dhndl);
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
- }
+ } else
#endif
+ {
+ *trace_prt = prt;
+ }
ERTS_SMP_LC_ASSERT(dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED
? erts_lc_is_port_locked(prt)
: !erts_lc_is_port_locked(prt));
@@ -6186,10 +6307,11 @@ int erl_drv_output_term(ErlDrvTermData port_id, ErlDrvTermData* data, int len)
{
/* May be called from arbitrary thread */
Eterm connected;
- int res = deliver_term_check_port(port_id, &connected);
+ Port *prt = NULL;
+ int res = deliver_term_check_port(port_id, &connected, &prt);
if (res <= 0)
return res;
- return driver_deliver_term(connected, data, len);
+ return driver_deliver_term(prt, connected, data, len);
}
/*
@@ -6213,7 +6335,7 @@ driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len)
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
- return driver_deliver_term(ERTS_PORT_GET_CONNECTED(prt), data, len);
+ return driver_deliver_term(prt, ERTS_PORT_GET_CONNECTED(prt), data, len);
}
int erl_drv_send_term(ErlDrvTermData port_id,
@@ -6222,10 +6344,11 @@ int erl_drv_send_term(ErlDrvTermData port_id,
int len)
{
/* May be called from arbitrary thread */
- int res = deliver_term_check_port(port_id, NULL);
+ Port *prt = NULL;
+ int res = deliver_term_check_port(port_id, NULL, &prt);
if (res <= 0)
return res;
- return driver_deliver_term(to, data, len);
+ return driver_deliver_term(prt, to, data, len);
}
/*
@@ -6244,20 +6367,21 @@ driver_send_term(ErlDrvPort drvport,
* to make this access safe without using a less efficient
* internal data representation for ErlDrvPort.
*/
+ Port* prt = NULL;
ERTS_SMP_CHK_NO_PROC_LOCKS;
#ifdef ERTS_SMP
if (erts_thr_progress_is_managed_thread())
#endif
{
erts_aint32_t state;
- Port* prt = erts_drvport2port_state(drvport, &state);
+ prt = erts_drvport2port_state(drvport, &state);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
return -1; /* invalid (dead) */
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
}
- return driver_deliver_term(to, data, len);
+ return driver_deliver_term(prt, to, data, len);
}
@@ -7433,7 +7557,7 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
if (state & ERTS_PORT_SFLG_CLOSING) {
terminate_port(prt);
} else if (eof && (state & ERTS_PORT_SFLG_SOFT_EOF)) {
- deliver_result(prt->common.id, ERTS_PORT_GET_CONNECTED(prt), am_eof);
+ deliver_result(prt, prt->common.id, ERTS_PORT_GET_CONNECTED(prt), am_eof);
} else {
/* XXX UGLY WORK AROUND, Let erts_deliver_port_exit() terminate the port */
if (prt->port_data_lock)
@@ -7441,7 +7565,7 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
prt->ioq.size = 0;
if (prt->port_data_lock)
driver_pdl_unlock(prt->port_data_lock);
- erts_deliver_port_exit(prt, prt->common.id, eof ? am_normal : term, 0);
+ erts_deliver_port_exit(prt, prt->common.id, eof ? am_normal : term, 0, 0);
}
return 0;
}
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 071f6aee08..77f79fcea4 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -226,7 +226,8 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
rp = (RegProc*) hash_put(&process_reg, (void*) &r);
if (proc && rp->p == proc) {
if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
- trace_proc(c_p, proc, am_register, name);
+ trace_proc(proc, ERTS_PROC_LOCK_MAIN,
+ proc, am_register, name);
}
proc->common.u.alive.reg = rp;
}
@@ -470,8 +471,8 @@ int erts_unregister_name(Process *c_p,
int res = 0;
RegProc r, *rp;
Port *port = c_prt;
+ ErtsProcLocks current_c_p_locks = 0;
#ifdef ERTS_SMP
- ErtsProcLocks current_c_p_locks;
/*
* SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name',
@@ -537,8 +538,12 @@ int erts_unregister_name(Process *c_p,
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
rp->pt->common.u.alive.reg = NULL;
-
+
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
+ if (current_c_p_locks) {
+ erts_smp_proc_unlock(c_p, current_c_p_locks);
+ current_c_p_locks = 0;
+ }
trace_port(port, am_unregister, r.name);
}
@@ -555,7 +560,8 @@ int erts_unregister_name(Process *c_p,
#endif
rp->p->common.u.alive.reg = NULL;
if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
- trace_proc(c_p, rp->p, am_unregister, r.name);
+ trace_proc(rp->p, (c_p == rp->p) ? c_p_locks : ERTS_PROC_LOCK_MAIN,
+ rp->p, am_unregister, r.name);
}
#ifdef ERTS_SMP
if (rp->p != c_p) {
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 8b3eb2db1d..748fba15c7 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -644,6 +644,15 @@ typedef struct preload {
unsigned char* code; /* Code pointer */
} Preload;
+/*
+ * ErtsTracer is either NIL, 'true' or [Mod | State]
+ *
+ * If set to NIL, it means no tracer.
+ * If set to 'true' it means the current process' tracer.
+ * If set to [Mod | State], there is a tracer.
+ * See erts_tracer_update for more details
+ */
+typedef Eterm ErtsTracer;
/*
* This structure contains options to all built in drivers.
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 384b340a1c..b280995488 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -2298,7 +2298,7 @@ static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(p, NULL /* only used for smp build */, mp, message, NIL);
+ erts_queue_message(p, NULL /* only used for smp build */, mp, message);
}
#endif
}