aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
authorLukas Larsson <[email protected]>2015-12-10 11:10:46 +0100
committerLukas Larsson <[email protected]>2016-04-15 15:06:27 +0200
commit37092dab15448ef6a078800e3ff0cc41880ea765 (patch)
treecb8a30fe427cec5f82d96b3e05258d4fd263e58f /erts
parent13bd4ca2493d8a76f9d835c27163b56ba86c2aef (diff)
downloadotp-37092dab15448ef6a078800e3ff0cc41880ea765.tar.gz
otp-37092dab15448ef6a078800e3ff0cc41880ea765.tar.bz2
otp-37092dab15448ef6a078800e3ff0cc41880ea765.zip
erts: Implement tracer modules
Add the possibility to use modules as trace data receivers. The functions in the module have to be nifs as otherwise complex trace probes will be very hard to handle (complex means trace probes for ports for example). This commit changes the way that the ptab->tracer field works from always being an immediate, to now be NIL if no tracer is present or else be the tuple {TracerModule, TracerState} where TracerModule is an atom that is later used to lookup the appropriate tracer callbacks to call and TracerState is just passed to the tracer callback. The default process and port tracers have been rewritten to use the new API. This commit also changes the order which trace messages are delivered to the potential tracer process. Any enif_send done in a tracer module may be delayed indefinitely because of lock order issues. If a message is delayed any other trace message send from that process is also delayed so that order is preserved for each traced entity. This means that for some trace events (i.e. send/receive) the events may come in an unintuitive order (receive before send) to the trace receiver. Timestamps are taken when the trace message is generated so trace messages from differented processes may arrive with the timestamp out of order. Both the erlang:trace and seq_trace:set_system_tracer accept the new tracer module tracers and also the backwards compatible arguments. OTP-10267
Diffstat (limited to 'erts')
-rw-r--r--erts/doc/src/Makefile15
-rw-r--r--erts/doc/src/erl_nif.xml3
-rw-r--r--erts/doc/src/erl_tracer.xml324
-rw-r--r--erts/doc/src/erlang.xml46
-rw-r--r--erts/doc/src/match_spec.xml11
-rw-r--r--erts/doc/src/ref_man.xml1
-rw-r--r--erts/doc/src/specs.xml1
-rw-r--r--erts/emulator/Makefile.in20
-rw-r--r--erts/emulator/beam/atom.names7
-rw-r--r--erts/emulator/beam/beam_bif_load.c7
-rw-r--r--erts/emulator/beam/beam_bp.c169
-rw-r--r--erts/emulator/beam/beam_bp.h16
-rw-r--r--erts/emulator/beam/beam_emu.c10
-rw-r--r--erts/emulator/beam/bif.c66
-rw-r--r--erts/emulator/beam/bif.tab5
-rw-r--r--erts/emulator/beam/code_ix.c1
-rw-r--r--erts/emulator/beam/dist.c16
-rw-r--r--erts/emulator/beam/erl_alloc.c2
-rw-r--r--erts/emulator/beam/erl_alloc.types3
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c5
-rw-r--r--erts/emulator/beam/erl_bif_port.c7
-rw-r--r--erts/emulator/beam/erl_bif_trace.c543
-rw-r--r--erts/emulator/beam/erl_db_util.c74
-rw-r--r--erts/emulator/beam/erl_gc.c6
-rw-r--r--erts/emulator/beam/erl_hl_timer.c6
-rw-r--r--erts/emulator/beam/erl_lock_check.c5
-rw-r--r--erts/emulator/beam/erl_message.c160
-rw-r--r--erts/emulator/beam/erl_message.h110
-rw-r--r--erts/emulator/beam/erl_msacc.c2
-rw-r--r--erts/emulator/beam/erl_nif.c252
-rw-r--r--erts/emulator/beam/erl_nif.h8
-rw-r--r--erts/emulator/beam/erl_process.c216
-rw-r--r--erts/emulator/beam/erl_process.h8
-rw-r--r--erts/emulator/beam/erl_process_lock.c191
-rw-r--r--erts/emulator/beam/erl_process_lock.h32
-rw-r--r--erts/emulator/beam/erl_ptab.h8
-rw-r--r--erts/emulator/beam/erl_time_sup.c2
-rw-r--r--erts/emulator/beam/erl_trace.c2530
-rw-r--r--erts/emulator/beam/erl_trace.h78
-rw-r--r--erts/emulator/beam/erlang_dtrace.d29
-rw-r--r--erts/emulator/beam/global.h26
-rw-r--r--erts/emulator/beam/io.c34
-rw-r--r--erts/emulator/beam/register.c8
-rw-r--r--erts/emulator/beam/sys.h9
-rw-r--r--erts/emulator/beam/utils.c2
-rw-r--r--erts/emulator/hipe/hipe_debug.c2
-rw-r--r--erts/emulator/nifs/common/erl_tracer_nif.c250
-rw-r--r--erts/emulator/test/Makefile2
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl98
-rw-r--r--erts/emulator/test/trace_SUITE.erl193
-rw-r--r--erts/emulator/test/tracer_SUITE.erl622
-rw-r--r--erts/emulator/test/tracer_SUITE_data/Makefile.src8
-rw-r--r--erts/emulator/test/tracer_SUITE_data/tracer_test.c122
-rw-r--r--erts/emulator/test/tracer_test.erl55
-rwxr-xr-xerts/emulator/utils/make_driver_tab13
-rw-r--r--erts/preloaded/ebin/erl_tracer.beambin0 -> 1864 bytes
-rw-r--r--erts/preloaded/src/Makefile3
-rw-r--r--erts/preloaded/src/erl_tracer.erl41
-rw-r--r--erts/preloaded/src/erlang.erl67
-rw-r--r--erts/preloaded/src/erts_internal.erl29
-rw-r--r--erts/preloaded/src/init.erl4
62 files changed, 3913 insertions, 2672 deletions
diff --git a/erts/doc/src/Makefile b/erts/doc/src/Makefile
index e02e89238e..b96cbbce40 100644
--- a/erts/doc/src/Makefile
+++ b/erts/doc/src/Makefile
@@ -50,12 +50,14 @@ XML_REF1_FILES = epmd.xml \
XML_REF3_EFILES = \
erl_prim_loader.xml \
erlang.xml \
+ erl_tracer.xml \
init.xml \
zlib.xml
XML_REF3_FILES = \
driver_entry.xml \
erl_nif.xml \
+ erl_tracer.xml \
erl_driver.xml \
erl_prim_loader.xml \
erlang.xml \
@@ -154,18 +156,9 @@ clean:
rm -f $(SPECDIR)/*
rm -f errs core *~
-$(SPECDIR)/specs_driver_entry.xml:
+$(SPECDIR)/specs_%.xml:
escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
- -o$(dir $@) -module driver_entry
-$(SPECDIR)/specs_erl_nif.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
- -o$(dir $@) -module erl_nif
-$(SPECDIR)/specs_erl_driver.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
- -o$(dir $@) -module erl_driver
-$(SPECDIR)/specs_erts_alloc.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
- -o$(dir $@) -module erts_alloc
+ -o$(dir $@) -module $(patsubst $(SPECDIR)/specs_%.xml,%,$@)
# ----------------------------------------------------
# Release Target
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index 5f56d8b595..7546f7ef81 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -566,8 +566,7 @@ typedef struct {
<code type="none">
typedef void ErlNifResourceDtor(ErlNifEnv* env, void* obj);
</code>
- <p>The function prototype of a resource destructor function.
- A destructor function is not allowed to call any term-making functions.</p>
+ <p>The function prototype of a resource destructor function.</p>
</item>
<tag><marker id="ErlNifCharEncoding"/>ErlNifCharEncoding</tag>
<item>
diff --git a/erts/doc/src/erl_tracer.xml b/erts/doc/src/erl_tracer.xml
new file mode 100644
index 0000000000..1e8e78b25f
--- /dev/null
+++ b/erts/doc/src/erl_tracer.xml
@@ -0,0 +1,324 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2016</year><year>2016</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>erl_tracer</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <module>erl_tracer</module>
+ <modulesummary>Erlang Tracer Behaviour</modulesummary>
+ <description>
+ <p>A behaviour module for implementing the back end of the erlang
+ tracing system. The functions in this module will be called whenever
+ a trace probe is triggered. Both the <c>enabled</c> and <c>trace</c>
+ functions are called in the context of the entity that triggered the
+ trace probe.
+ This means that the overhead by having the tracing enabled will be
+ greatly effected by how much time is spent in these functions. So do as
+ little work as possible in these functions.</p>
+ <note>
+ <p>All functions in this behaviour have to be implemented as NIF's.
+ This is a limitation that may the lifted in the future.
+ There is an <seealso marker="#example">example tracer module nif</seealso>
+ implementation at the end of this page.</p>
+ </note>
+ <warning>
+ <p>Do not send messages or issue port commands to the <c>Tracee</c>
+ in any of the callbacks. Doing so is not allowed and can cause all
+ sorts of strange behaviour, including but not limited to infinite
+ recursions.</p>
+ </warning>
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="trace_tag" />
+ <desc>
+ <p>The different trace tags that the tracer will be called with.
+ Each trace tag is described in greater detail in
+ <seealso marker="#trace">Module:trace/6</seealso>
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="tracee" />
+ <desc>
+ <p>The process or port that the trace belongs to.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="trace_opts" />
+ <desc>
+ <p>The options for the tracee.
+ <taglist>
+ <tag><c>timestamp</c></tag>
+ <item>If not set to <c>undefined</c>, the tracer has been requested to
+ include a timestamp.</item>
+ <tag><c>match_spec_result</c></tag>
+ <item>If not set to <c>true</c>, the tracer has been requested to
+ include the output of a match specification that was run.</item>
+ <tag><c>scheduler_id</c></tag>
+ <item>Set to a number of the scheduler id is to be included by the tracer.
+ Otherwise it is set to <c>undefined</c>.</item>
+ </taglist>
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="tracer_state" />
+ <desc>
+ <p>
+ The state which is given when calling
+ <seealso marker="erlang#trace-3"><c>erlang:trace(PidPortSpec,true,[{tracer,Module,TracerState}])</c></seealso>.
+ The tracer state is an immutable value that is passed to erl_tracer callbacks and should
+ contain all the data that is needed to generate the trace event.
+ </p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <section>
+ <title>CALLBACK FUNCTIONS</title>
+ <p>The following functions
+ should be exported from a <c>erl_tracer</c> callback module.</p>
+ </section>
+ <marker id="enabled"></marker>
+ <funcs>
+ <func>
+ <name>Module:enabled(TraceTag, TracerState, Tracee) -> Result</name>
+ <fsummary>Check if a trace event should be generated.</fsummary>
+ <type>
+ <v>TraceTag = <seealso marker="#type-trace_tag">trace_tag()</seealso> | trace_status</v>
+ <v>TracerState = term()</v>
+ <v>Tracee = <seealso marker="#type-trace_tag">tracee()</seealso></v>
+ <v>Result = trace | discard | remove</v>
+ </type>
+ <desc>
+ <p>This callback will be called whenever a trace point is triggered. It
+ allows the tracer to decide whether a trace should be generated or not.
+ This check is made as early as possible in order to limit the amount of
+ overhead associated with tracing. If <c>trace</c> is returned the
+ necessary trace data will be created and the trace call-back of the tracer
+ will be called. If <c>discard</c> is returned, this trace call
+ will be discarded and no call to trace will be done. If
+ <c>remove</c> is returned, the VM will attempt to remove this tracer
+ from the tracee, together with any trace flags set on the tracee.
+ </p>
+ <p><c>trace_status</c> is a special type of <c>TraceTag</c> which is used
+ to check if the tracer should still be active. It is called in multiple
+ scenarios, but most significantly it is used when tracing is started
+ using this tracer.</p>
+ <p>This function may be called multiple times per trace point, so it
+ is important that it is both fast and side effect free.</p>
+ </desc>
+ </func>
+ <marker id="trace"></marker>
+ <func>
+ <name>Module:trace(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
+ <fsummary>Check if a trace event should be generated.</fsummary>
+ <type>
+ <v>TraceTag = <seealso marker="#type-trace_tag">trace_tag()</seealso></v>
+ <v>TracerState = term()</v>
+ <v>Tracee = <seealso marker="#type-trace_tag">tracee()</seealso></v>
+ <v>FirstTraceTerm = term()</v>
+ <v>SecondTraceTerm = term() | undefined</v>
+ <v>Opts = <seealso marker="#type-trace_opts">trace_opts()</seealso></v>
+ <v>Result = ok</v>
+ </type>
+ <desc>
+ <p>This callback will be called when a trace point is triggered and
+ the <seealso marker="#enabled">Module:enabled/3</seealso>
+ callback returned <c>trace</c>. In it any side effects needed by
+ the tracer should be done. The trace point payload is located in
+ the <c>FirstTraceTerm</c> and <c>SecondTraceTerm</c>. The content
+ of the TraceTerms depends on which <c>TraceTag</c> has been triggered.
+ The <c>FirstTraceTerm</c> and <c>SecondTraceTerm</c> correspond to the
+ fourth and fifth slot in the trace tuples described in
+ <seealso marker="erlang#trace_3_trace_messages">erlang:trace/3</seealso>.
+ If the tuple only has four elements, <c>SecondTraceTerm</c> will be
+ <c>undefined</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="trace">Module:trace(seq_trace, TracerState, Label, SeqTraceInfo, undefined, Opts) -> Result</name>
+ <fsummary>Check if a sequence trace event should be generated.</fsummary>
+ <type>
+ <v>TracerState = term()</v>
+ <v>Label = term()</v>
+ <v>SeqTraceInfo = term()</v>
+ <v>Opts = <seealso marker="#type-trace_opts">trace_opts()</seealso></v>
+ <v>Result = ok</v>
+ </type>
+ <desc>
+ <p>The <c>TraceTag</c> <c>seq_trace</c> is handled a little bit
+ differently. There is not <c>Tracee</c> for seq_trace, instead the
+ <c>Label</c> associated with the seq_trace event is given.
+ For more info on what <c>Label</c> and <c>SeqTraceInfo</c> can be
+ see the <seealso marker="kernel:seq_trace">seq_trace</seealso> manual.</p>
+ </desc>
+ </func>
+ </funcs>
+ <section>
+ <marker id="example"></marker>
+ <title>Erl Tracer Module example</title>
+ <p>In the example below a tracer module with a nif backend sends a message
+ for each <c>send</c> trace tag containing only the sender and receiver.
+ Using this tracer module, a much more lightweight message tracer is
+ used that only records who sent messages to who.</p>
+ <p>Here is an example session using it on Linux.</p>
+ <pre>
+$ gcc -I erts-8.0/include/ -fPIC -shared -o erl_msg_tracer.so erl_msg_tracer.c
+$ erl
+Erlang/OTP 19 [DEVELOPMENT] [erts-8.0] [source-ed2b56b] [64-bit] [smp:8:8] [async-threads:10] [hipe] [kernel-poll:false]
+
+Eshell V8.0 (abort with ^G)
+1&gt; c(erl_msg_tracer), erl_msg_tracer:load().
+ok
+2&gt; Tracer = spawn(fun F() -&gt; receive M -&gt; io:format("~p~n",[M]), F() end end).
+&lt;0.37.0&gt;
+3&gt; erlang:trace(new, true, [send,{tracer, erl_msg_tracer, Tracer}]).
+0
+{&lt;0.39.0&gt;,&lt;0.27.0&gt;}
+4&gt; {ok, D} = file:open("/tmp/tmp.data",[write]).
+{trace,#Port&lt;0.486&gt;,&lt;0.40.0&gt;}
+{trace,&lt;0.40.0&gt;,&lt;0.21.0&gt;}
+{trace,#Port&lt;0.487&gt;,&lt;0.4.0&gt;}
+{trace,#Port&lt;0.488&gt;,&lt;0.4.0&gt;}
+{trace,#Port&lt;0.489&gt;,&lt;0.4.0&gt;}
+{trace,#Port&lt;0.490&gt;,&lt;0.4.0&gt;}
+{ok,&lt;0.40.0&gt;}
+{trace,&lt;0.41.0&gt;,&lt;0.27.0&gt;}
+5&gt;
+ </pre>
+ <p>erl_msg_tracer.erl</p>
+ <pre>
+-module(erl_msg_tracer).
+
+-export([enabled/3, trace/6, load/0]).
+
+load() ->
+ erlang:load_nif("erl_msg_tracer", []).
+
+enabled(_, _, _) ->
+ error.
+
+trace(_, _, _,_, _, _) ->
+ error.
+ </pre>
+ <p>erl_msg_tracer.c</p>
+ <pre>
+#include "erl_nif.h"
+
+/* NIF interface declarations */
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
+static void unload(ErlNifEnv* env, void* priv_data);
+
+/* The NIFs: */
+static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM trace(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+
+static ErlNifFunc nif_funcs[] = {
+ {"enabled", 3, enabled},
+ {"trace", 6, trace}
+};
+
+ERL_NIF_INIT(erl_msg_tracer, nif_funcs, load, NULL, upgrade, unload)
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ *priv_data = NULL;
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data,
+ ERL_NIF_TERM load_info)
+{
+ if (*old_priv_data != NULL || *priv_data != NULL) {
+ return -1; /* Don't know how to do that */
+ }
+ if (load(env, priv_data, load_info)) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * argv[0]: Trace Tag
+ * argv[1]: TracerState
+ * argv[2]: Tracee
+ */
+static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifPid to_pid;
+ if (enif_get_local_pid(env, argv[1], &amp;to_pid))
+ if (!enif_is_process_alive(env, &amp;to_pid))
+ /* tracer is dead so we should remove this trace point */
+ return enif_make_atom(env, "remove");
+
+ /* Only generate trace for when tracer != tracee */
+ if (enif_is_identical(argv[1], argv[2]))
+ return enif_make_atom(env, "discard");
+
+ /* Only trigger trace messages on 'send' */
+ if (enif_is_identical(enif_make_atom(env, "send"), argv[0]))
+ return enif_make_atom(env, "trace");
+
+ /* Have to answer trace_status */
+ if (enif_is_identical(enif_make_atom(env, "trace_status"), argv[0]))
+ return enif_make_atom(env, "trace");
+
+ return enif_make_atom(env, "discard");
+}
+
+/*
+ * argv[0]: Trace Tag, should only be 'send'
+ * argv[1]: TracerState, process to send {argv[2], argv[4]} to
+ * argv[2]: Tracee
+ * argv[3]: Message, ignored
+ * argv[4]: Recipient
+ * argv[5]: Options, ignored
+ */
+static ERL_NIF_TERM trace(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifPid to_pid;
+
+ if (enif_get_local_pid(env, argv[1], &amp;to_pid)) {
+ ERL_NIF_TERM msg = enif_make_tuple3(env, enif_make_atom(env, "trace"), argv[2], argv[4]);
+ enif_send(env, &amp;to_pid, NULL, msg);
+ }
+
+ return enif_make_atom(env, "ok");
+}
+ </pre>
+ </section>
+</erlref>
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index ef577c82bf..86bdb1dfe6 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -8371,7 +8371,7 @@ timestamp() ->
<taglist>
<tag><c>all</c></tag>
<item>
- <p>Sets all trace flags except <c>{tracer, Tracer}</c> and
+ <p>Sets all trace flags except <c>tracer</c> and
<c>cpu_timestamp</c>, which are in their nature different
than the others.</p>
</item>
@@ -8529,12 +8529,20 @@ timestamp() ->
<item>
<p>Specifies where to send the trace messages. <c>Tracer</c>
must be the process identifier of a local process
- or the port identifier
- of a local port. If this flag is not given, trace
- messages are sent to the process that called
- <c>erlang:trace/3</c>.</p>
+ or the port identifier of a local port.</p>
+ </item>
+ <tag><c>{tracer, TracerModule, TracerState}</c></tag>
+ <item>
+ <p>Specifies that a tracer module should be called
+ instead of sending a trace message. The tracer module
+ can then ignore or change the trace message. For more details
+ on how to write a tracer module see <seealso marker="erl_tracer">
+ erl_tracer</seealso>
+ </p>
</item>
</taglist>
+ <p>If no <c>tracer</c> is given, the calling process
+ will be receiving all of the trace messages</p>
<p>The effect of combining <c>set_on_first_link</c> with
<c>set_on_link</c> is the same as having
<c>set_on_first_link</c> alone. Likewise for
@@ -8706,9 +8714,9 @@ timestamp() ->
garbage collection.</p>
</item>
</taglist>
- <p>If the tracing process dies, the flags are silently
- removed.</p>
- <p>Only one process can trace a particular process. Therefore,
+ <p>If the tracing process/port dies or the tracer module returns
+ <c>remove</c>, the flags are silently removed.</p>
+ <p>Each process can only be traced by one tracer. Therefore,
attempts to trace an already traced process fail.</p>
<p>Returns: A number indicating the number of processes that
matched <c><anno>PidSpec</anno></c>.
@@ -8716,7 +8724,7 @@ timestamp() ->
identifier, the return value is <c>1</c>.
If <c><anno>PidSpec</anno></c>
is <c>all</c> or <c>existing</c>, the return value is
- the number of processes running, excluding tracer processes.
+ the number of processes running.
If <c><anno>PidSpec</anno></c> is <c>new</c>, the return value is
<c>0</c>.</p>
<p>Failure: <c>badarg</c> if the specified arguments are
@@ -8750,7 +8758,7 @@ timestamp() ->
has not been traced by someone, but if this is the case,
<em>no</em> trace messages have been delivered when the
<c>trace_delivered</c> message arrives.</p>
- <p>Notice that that <c><anno>Tracee</anno></c> must refer
+ <p>Notice that <c><anno>Tracee</anno></c> must refer
to a process currently,
or previously existing on the same node as the caller of
<c>erlang:trace_delivered(<anno>Tracee</anno>)</c> resides on.
@@ -8801,7 +8809,8 @@ timestamp() ->
</item>
<tag><c>tracer</c></tag>
<item>
- <p>Returns the identifier for process or port tracing this
+ <p>Returns the identifier for process, port or a tuple containing
+ the tracer module and tracer state tracing this
process. If this process is not being traced, the return
value is <c>[]</c>.</p>
</item>
@@ -8830,8 +8839,8 @@ timestamp() ->
</item>
<tag><c>meta</c></tag>
<item>
- <p>Returns the meta-trace tracer process or port for this
- function, if it has one. If the function is not
+ <p>Returns the meta-trace tracer process, port or trace module
+ for this function, if it has one. If the function is not
meta-traced, the returned value is <c>false</c>. If
the function is meta-traced but has once detected that
the tracer process is invalid, the returned value is [].</p>
@@ -8999,13 +9008,12 @@ timestamp() ->
the process, a <c>return_to</c> message is also sent
when this function returns to its caller.</p>
</item>
- <tag><c>meta | {meta, <anno>Pid</anno>}</c></tag>
+ <tag><c>meta | {meta, <anno>Pid</anno>} | {meta, <anno>TracerModule</anno>, <anno>TracerState</anno>}</c>
+ </tag>
<item>
<p>Turns on or off meta-tracing for all types of function
- calls. Trace messages are sent to the tracer process
- or port <c><anno>Pid</anno></c> whenever any of the specified
- functions are called, regardless of how they are called.
- If no <c><anno>Pid</anno></c> is specified,
+ calls. Trace messages are sent to the tracer whenever any of
+ the specified functions are called. If no tracer is specified,
<c>self()</c> is used as a default tracer process.</p>
<p>Meta-tracing traces all processes and does not care
about the process trace flags set by <c>trace/3</c>,
@@ -9013,7 +9021,7 @@ timestamp() ->
<c>[call, timestamp]</c>.</p>
<p>The match specification function <c>{return_trace}</c>
works with meta-trace and sends its trace message to the
- same tracer process.</p>
+ same tracer.</p>
</item>
<tag><c>call_count</c></tag>
<item>
diff --git a/erts/doc/src/match_spec.xml b/erts/doc/src/match_spec.xml
index 975f01cf2c..b49e1483aa 100644
--- a/erts/doc/src/match_spec.xml
+++ b/erts/doc/src/match_spec.xml
@@ -287,7 +287,7 @@
can <em>not</em> be one of the atoms <c><![CDATA[all]]></c>, <c><![CDATA[new]]></c> or
<c><![CDATA[existing]]></c> (unless, of course, they are registered names).
<c><![CDATA[P2]]></c> can <em>not</em> be <c><![CDATA[cpu_timestamp]]></c> nor
- <c><![CDATA[{tracer,_}]]></c>.
+ <c><![CDATA[tracer]]></c>.
Returns <c><![CDATA[true]]></c> and may only be used in
the <c><![CDATA[MatchBody]]></c> part when tracing.
</p>
@@ -298,7 +298,7 @@
be either a process identifier or a registered name and is given
as the first argument to the match_spec function.
<c><![CDATA[P2]]></c> can <em>not</em> be <c><![CDATA[cpu_timestamp]]></c> nor
- <c><![CDATA[{tracer,_}]]></c>. Returns
+ <c><![CDATA[tracer]]></c>. Returns
<c><![CDATA[true]]></c> and may only be used in the <c><![CDATA[MatchBody]]></c> part
when tracing.
</p>
@@ -308,11 +308,14 @@
disable list is applied first, but effectively all changes
are applied atomically. The trace flags
are the same as for <c><![CDATA[erlang:trace/3]]></c> not including
- <c><![CDATA[cpu_timestamp]]></c> but including <c><![CDATA[{tracer,_}]]></c>. If a
+ <c><![CDATA[cpu_timestamp]]></c> but including <c><![CDATA[tracer]]></c>. If a
tracer is specified in both lists, the tracer in the
enable list takes precedence. If no tracer is specified the
same tracer as the process executing the match spec is
- used. With three parameters to this function the first is
+ used. When using a <seealso marker="erl_tracer">tracer module</seealso>
+ the module has to be loaded before the match specification is executed.
+ If it is not loaded the match will fail.
+ With three parameters to this function the first is
either a process identifier or the registered name of a
process to set trace flags on, the second is the disable
list, and the third is the enable list. Returns
diff --git a/erts/doc/src/ref_man.xml b/erts/doc/src/ref_man.xml
index aa245ec08a..e45402a397 100644
--- a/erts/doc/src/ref_man.xml
+++ b/erts/doc/src/ref_man.xml
@@ -56,5 +56,6 @@
<xi:include href="driver_entry.xml"/>
<xi:include href="erts_alloc.xml"/>
<xi:include href="erl_nif.xml"/>
+ <xi:include href="erl_tracer.xml"/>
</application>
diff --git a/erts/doc/src/specs.xml b/erts/doc/src/specs.xml
index 41a3984659..ed6be650e5 100644
--- a/erts/doc/src/specs.xml
+++ b/erts/doc/src/specs.xml
@@ -2,6 +2,7 @@
<specs xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="../specs/specs_erl_prim_loader.xml"/>
<xi:include href="../specs/specs_erlang.xml"/>
+ <xi:include href="../specs/specs_erl_tracer.xml"/>
<xi:include href="../specs/specs_init.xml"/>
<xi:include href="../specs/specs_zlib.xml"/>
</specs>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 26a737619e..fb486c917f 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -577,7 +577,7 @@ GENERATE += $(TARGET)/erl_version.h
# driver table
$(TTF_DIR)/driver_tab.c: Makefile.in utils/make_driver_tab
- $(gen_verbose)LANG=C $(PERL) utils/make_driver_tab -o $@ -nifs $(STATIC_NIF_LIBS) -drivers $(DRV_OBJS) $(STATIC_DRIVER_LIBS)
+ $(gen_verbose)LANG=C $(PERL) utils/make_driver_tab -o $@ -nifs $(NIF_OBJS) $(STATIC_NIF_LIBS) -drivers $(DRV_OBJS) $(STATIC_DRIVER_LIBS)
GENERATE += $(TTF_DIR)/driver_tab.c
@@ -600,8 +600,8 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \
$(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \
$(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam
-
+ $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erl_tracer.beam
$(gen_verbose)LANG=C $(PERL) utils/make_preload $(MAKE_PRELOAD_EXTRA) -rc $^ > $@
else
PRELOAD_OBJ = $(OBJDIR)/preload.o
@@ -616,7 +616,8 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \
$(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \
$(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam
+ $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erl_tracer.beam
$(gen_verbose)LANG=C $(PERL) utils/make_preload -old $^ > $@
endif
@@ -690,6 +691,9 @@ $(OBJDIR)/%.o: drivers/common/%.c
$(OBJDIR)/%.o: drivers/$(ERLANG_OSTYPE)/%.c
$(V_CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE) -I../etc/$(ERLANG_OSTYPE) -c $< -o $@
+$(OBJDIR)/%.o: nifs/common/%.c
+ $(V_CC) $(CFLAGS) -DLIBSCTP=$(LIBSCTP) $(INCLUDES) -Inifs/common -Inifs/$(ERLANG_OSTYPE) -c $< -o $@
+
# ----------------------------------------------------------------------
# Specials
#
@@ -780,6 +784,7 @@ RUN_OBJS = \
$(OBJDIR)/erl_msacc.o
LTTNG_OBJS = $(OBJDIR)/erlang_lttng.o
+NIF_OBJS = $(OBJDIR)/erl_tracer_nif.o
ifeq ($(TARGET),win32)
DRV_OBJS = \
@@ -889,7 +894,7 @@ endif
BASE_OBJS = $(EMU_OBJS) $(RUN_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS) $(LTTNG_OBJS)
-before_DTrace_OBJS = $(BASE_OBJS) $(DRV_OBJS)
+before_DTrace_OBJS = $(BASE_OBJS) $(DRV_OBJS) $(NIF_OBJS)
DTRACE_OBJS =
ifdef DTRACE_ENABLED_2STEP
@@ -1042,6 +1047,7 @@ endif
BEAM_SRC=$(wildcard beam/*.c)
DRV_COMMON_SRC=$(wildcard drivers/common/*.c)
DRV_OSTYPE_SRC=$(wildcard drivers/$(ERLANG_OSTYPE)/*.c)
+NIF_COMMON_SRC=$(wildcard nifs/common/*.c)
ALL_SYS_SRC=$(wildcard sys/$(ERLANG_OSTYPE)/*.c) $(wildcard sys/common/*.c)
# We use $(shell ls) here instead of wildcard as $(wildcard ) resolved at
# loadtime of the makefile and at that time these files are not generated yet.
@@ -1074,7 +1080,7 @@ MG_FLAG=-MG
endif
DEP_CC=$(CC)
-DEP_FLAGS=-MM $(MG_FLAG) $(CFLAGS) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE)
+DEP_FLAGS=-MM $(MG_FLAG) $(CFLAGS) $(INCLUDES) -Inifs/common -Idrivers/common -Idrivers/$(ERLANG_OSTYPE)
SYS_SRC=$(ALL_SYS_SRC)
endif
@@ -1099,6 +1105,8 @@ $(TTF_DIR)/depend.mk: $(TTF_DIR)/GENERATED $(PRELOAD_SRC)
| $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
$(V_at)$(DEP_CC) $(DEP_FLAGS) -I../etc/$(ERLANG_OSTYPE) $(DRV_OSTYPE_SRC) \
| $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
+ $(V_at)$(DEP_CC) $(DEP_FLAGS) $(NIF_COMMON_SRC) \
+ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
$(V_at)$(DEP_CC) $(DEP_FLAGS) $(SYS_SRC) \
| $(SED_DEPEND) >> $(TTF_DIR)/depend.mk
$(V_at)$(DEP_CC) $(DEP_FLAGS) $(TARGET_SRC) \
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index d466f00028..aeea6cc989 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -197,6 +197,7 @@ atom dirty_cpu_schedulers_online
atom dirty_io
atom disable_trace
atom disabled
+atom discard
atom display_items
atom dist
atom dist_cmd
@@ -231,6 +232,7 @@ atom exception_trace
atom extended
atom Eq='=:='
atom Eqeq='=='
+atom erl_tracer
atom erlang
atom ERROR='ERROR'
atom error_handler
@@ -353,6 +355,7 @@ atom match
atom match_limit
atom match_limit_recursion
atom match_spec
+atom match_spec_result
atom max
atom maximum
atom max_tables max_processes
@@ -569,6 +572,7 @@ atom static
atom stderr_to_stdout
atom stop
atom stream
+atom strict_monotonic
atom strict_monotonic_timestamp
atom sunrm
atom suspend
@@ -596,8 +600,9 @@ atom total_active_tasks
atom total_heap_size
atom total_run_queue_lengths
atom tpkt
-atom trace trace_ts traced
+atom trace trace_ts traced
atom trace_control_word
+atom trace_status
atom tracer
atom trap_exit
atom trim
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 320dee6668..b10250dc49 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -719,13 +719,13 @@ set_default_trace_pattern(Eterm module)
Binary *match_spec;
Binary *meta_match_spec;
struct trace_pattern_flags trace_pattern_flags;
- Eterm meta_tracer_pid;
+ ErtsTracer meta_tracer;
erts_get_default_trace_pattern(&trace_pattern_is_on,
&match_spec,
&meta_match_spec,
&trace_pattern_flags,
- &meta_tracer_pid);
+ &meta_tracer);
if (trace_pattern_is_on) {
Eterm mfa[1];
mfa[0] = module;
@@ -733,7 +733,7 @@ set_default_trace_pattern(Eterm module)
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid, 1);
+ meta_tracer, 1);
}
}
@@ -1264,6 +1264,7 @@ delete_code(Module* modp)
modp->curr.code_length = 0;
modp->curr.catches = BEAM_CATCHES_NIL;
modp->curr.nif = NULL;
+
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 0a13454951..2ee98ed7b5 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -92,15 +92,15 @@ get_mtime(Process *c_p)
/*
** Helpers
*/
-static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
- int local, Binary* ms, Eterm tracer_pid);
+static ErtsTracer do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, ErtsTracer tracer);
static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid);
+ enum erts_break_op count_op, ErtsTracer tracer);
static void set_function_break(BeamInstr *pc,
Binary *match_spec,
Uint break_flags,
enum erts_break_op count_op,
- Eterm tracer_pid);
+ ErtsTracer tracer);
static void clear_break(BpFunctions* f, Uint break_flags);
static int clear_function_break(BeamInstr *pc, Uint break_flags);
@@ -108,7 +108,7 @@ static int clear_function_break(BeamInstr *pc, Uint break_flags);
static BpDataTime* get_time_break(BeamInstr *pc);
static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
-static void bp_meta_unref(BpMetaPid* bmp);
+static void bp_meta_unref(BpMetaTracer* bmt);
static void bp_count_unref(BpCount* bcp);
static void bp_time_unref(BpDataTime* bdt);
static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
@@ -302,7 +302,7 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
MatchSetUnref(dst->local_ms);
}
if (flags & ERTS_BPF_META_TRACE) {
- bp_meta_unref(dst->meta_pid);
+ bp_meta_unref(dst->meta_tracer);
MatchSetUnref(dst->meta_ms);
}
if (flags & ERTS_BPF_COUNT) {
@@ -343,8 +343,8 @@ consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
MatchSetRef(dst->local_ms);
}
if (flags & ERTS_BPF_META_TRACE) {
- dst->meta_pid = src->meta_pid;
- erts_refc_inc(&dst->meta_pid->refc, 1);
+ dst->meta_tracer = src->meta_tracer;
+ erts_refc_inc(&dst->meta_tracer->refc, 1);
dst->meta_ms = src->meta_ms;
MatchSetRef(dst->meta_ms);
}
@@ -436,13 +436,13 @@ uninstall_breakpoint(BeamInstr* pc)
void
erts_set_trace_break(BpFunctions* f, Binary *match_spec)
{
- set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true);
+ set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, erts_tracer_true);
}
void
-erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid)
+erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, ErtsTracer tracer)
{
- set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
+ set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
@@ -450,13 +450,13 @@ erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
{
Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
- set_function_break(pc, match_spec, flags, 0, NIL);
+ set_function_break(pc, match_spec, flags, 0, erts_tracer_nil);
}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid)
+erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, ErtsTracer tracer)
{
- set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
+ set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer);
}
void
@@ -464,7 +464,7 @@ erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
{
set_function_break(pc, NULL,
ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
@@ -474,21 +474,21 @@ erts_clear_time_trace_bif(BeamInstr *pc) {
void
erts_set_debug_break(BpFunctions* f) {
- set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL);
+ set_break(f, NULL, ERTS_BPF_DEBUG, 0, erts_tracer_nil);
}
void
erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
{
set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
{
set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
- count_op, NIL);
+ count_op, erts_tracer_nil);
}
void
@@ -625,19 +625,26 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
- (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true);
+ (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, erts_tracer_true);
} else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
- (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true);
+ (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, erts_tracer_true);
}
if (bp_flags & ERTS_BPF_META_TRACE) {
- Eterm old_pid;
- Eterm new_pid;
-
- old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
- new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid);
- if (new_pid != old_pid) {
- erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid);
+ ErtsTracer old_tracer, new_tracer;
+
+ old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
+
+ new_tracer = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_tracer);
+ if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
+ if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
+ &bp->meta_tracer->tracer,
+ (erts_aint_t)new_tracer,
+ (erts_aint_t)old_tracer)) {
+ ERTS_TRACER_CLEAR(&old_tracer);
+ } else {
+ ERTS_TRACER_CLEAR(&new_tracer);
+ }
}
}
@@ -645,7 +652,8 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
erts_smp_atomic_inc_nob(&bp->count->acount);
}
- if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && erts_is_tracer_proc_valid(c_p)) {
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE
+ && ERTS_TRACER_PROC_IS_ENABLED(c_p)) {
Eterm w;
erts_trace_time_call(c_p, I, bp->time);
w = (BeamInstr) *c_p->cp;
@@ -690,7 +698,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
Eterm (*func)(Process*, Eterm*, BeamInstr*);
Export* ep = bif_export[bif_index];
Uint32 flags = 0, flags_meta = 0;
- Eterm meta_tracer_pid = NIL;
+ ErtsTracer meta_tracer = erts_tracer_nil;
int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
* is actually in the
* export entry */
@@ -718,23 +726,32 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
IS_TRACED_FL(p, F_TRACE_CALLS)) {
int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
flags = erts_call_trace(p, ep->code, bp->local_ms, args,
- local, &ERTS_TRACER_PROC(p));
+ local, &ERTS_TRACER(p));
}
if (bp_flags & ERTS_BPF_META_TRACE) {
- Eterm tpid1, tpid2;
+ ErtsTracer old_tracer;
- tpid1 = tpid2 =
- (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
+ old_tracer = meta_tracer;
flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
- 0, &tpid2);
- meta_tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
+ 0, &meta_tracer);
+
+ if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
+ ErtsTracer new_tracer = erts_tracer_nil;
+ erts_tracer_update(&new_tracer, meta_tracer);
+ if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
+ &bp->meta_tracer->tracer,
+ (erts_aint_t)new_tracer,
+ (erts_aint_t)old_tracer)) {
+ ERTS_TRACER_CLEAR(&old_tracer);
+ } else {
+ ERTS_TRACER_CLEAR(&new_tracer);
+ }
}
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
IS_TRACED_FL(p, F_TRACE_CALLS) &&
- erts_is_tracer_proc_valid(p)) {
+ ERTS_TRACER_PROC_IS_ENABLED(p)) {
BeamInstr *pc = (BeamInstr *)ep->code+3;
erts_trace_time_call(p, pc, bp->time);
}
@@ -778,8 +795,6 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
if (reason != TRAP) {
Eterm class;
Eterm value = p->fvalue;
- DeclareTmpHeapNoproc(nocatch,3);
- UseTmpHeapNoproc(3);
/* Expand error value like in handle_error() */
if (reason & EXF_ARGLIST) {
Eterm *tp;
@@ -788,7 +803,8 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
value = tp[1];
}
if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- value = TUPLE2(nocatch, am_nocatch, value);
+ Eterm *hp = HAlloc(p, 3);
+ value = TUPLE2(hp, am_nocatch, value);
reason = EXC_ERROR;
}
/* Note: expand_error_value() could theoretically
@@ -801,11 +817,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
erts_trace_exception(p, ep->code, class, value,
- &meta_tracer_pid);
+ &meta_tracer);
}
if (flags & MATCH_SET_EXCEPTION_TRACE) {
erts_trace_exception(p, ep->code, class, value,
- &ERTS_TRACER_PROC(p));
+ &ERTS_TRACER(p));
}
if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
/* can only happen if(local)*/
@@ -827,7 +843,6 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
}
- UnUseTmpHeapNoproc(3);
if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
@@ -836,11 +851,11 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
} else {
if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ erts_trace_return(p, ep->code, result, &meta_tracer);
}
/* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p));
+ erts_trace_return(p, ep->code, result, &ERTS_TRACER(p));
}
if (flags & MATCH_SET_RETURN_TO_TRACE) {
/* can only happen if(local)*/
@@ -857,9 +872,9 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
return result;
}
-static Eterm
+static ErtsTracer
do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
- int local, Binary* ms, Eterm tracer_pid)
+ int local, Binary* ms, ErtsTracer tracer)
{
Eterm* cpp;
int return_to_trace = 0;
@@ -899,7 +914,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
ASSERT(is_CP(*cpp));
}
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid);
+ flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
if (cpp) {
c_p->cp = cp_save;
@@ -910,7 +925,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
need += 1;
}
if (flags & MATCH_SET_RX_TRACE) {
- need += 3;
+ need += 3 + size_object(tracer);
}
if (need) {
ASSERT(c_p->htop <= E && E <= c_p->hend);
@@ -926,14 +941,15 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
E[0] = make_cp(c_p->cp);
c_p->cp = beam_return_to_trace;
}
- if (flags & MATCH_SET_RX_TRACE) {
+ if (flags & MATCH_SET_RX_TRACE)
+ {
E -= 3;
+ c_p->stop = E;
ASSERT(c_p->htop <= E && E <= c_p->hend);
ASSERT(is_CP((Eterm) (UWord) (I - 3)));
- ASSERT(am_true == tracer_pid ||
- is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
+ ASSERT(IS_TRACER_VALID(tracer));
E[2] = make_cp(c_p->cp);
- E[1] = tracer_pid;
+ E[1] = copy_object(tracer, c_p);
E[0] = make_cp(I - 3); /* We ARE at the beginning of an
instruction,
the funcinfo is above i. */
@@ -942,9 +958,9 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- c_p->stop = E;
- return tracer_pid;
+ } else
+ c_p->stop = E;
+ return tracer;
}
void
@@ -1098,9 +1114,9 @@ erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
return 0;
}
-int
+int
erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret)
+ ErtsTracer *tracer_ret)
{
GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
@@ -1108,9 +1124,8 @@ erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
if (match_spec_ret) {
*match_spec_ret = bp->meta_ms;
}
- if (tracer_pid_ret) {
- *tracer_pid_ret =
- (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ if (tracer_ret) {
+ *tracer_ret = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
}
return 1;
}
@@ -1391,7 +1406,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
static void
set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid)
+ enum erts_break_op count_op, ErtsTracer tracer)
{
Uint i;
Uint n;
@@ -1400,13 +1415,13 @@ set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
for (i = 0; i < n; i++) {
BeamInstr* pc = f->matching[i].pc;
set_function_break(pc, match_spec, break_flags,
- count_op, tracer_pid);
+ count_op, tracer);
}
}
static void
set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
- enum erts_break_op count_op, Eterm tracer_pid)
+ enum erts_break_op count_op, ErtsTracer tracer)
{
GenericBp* g;
GenericBpData* bp;
@@ -1439,7 +1454,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
MatchSetUnref(bp->local_ms);
} else if (common & ERTS_BPF_META_TRACE) {
MatchSetUnref(bp->meta_ms);
- bp_meta_unref(bp->meta_pid);
+ bp_meta_unref(bp->meta_tracer);
} else if (common & ERTS_BPF_COUNT) {
if (count_op == erts_break_stop) {
bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
@@ -1474,13 +1489,15 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
MatchSetRef(match_spec);
bp->local_ms = match_spec;
} else if (break_flags & ERTS_BPF_META_TRACE) {
- BpMetaPid* bmp;
+ BpMetaTracer* bmt;
+ ErtsTracer meta_tracer = erts_tracer_nil;
MatchSetRef(match_spec);
bp->meta_ms = match_spec;
- bmp = Alloc(sizeof(BpMetaPid));
- erts_refc_init(&bmp->refc, 1);
- erts_smp_atomic_init_nob(&bmp->pid, tracer_pid);
- bp->meta_pid = bmp;
+ bmt = Alloc(sizeof(BpMetaTracer));
+ erts_refc_init(&bmt->refc, 1);
+ erts_tracer_update(&meta_tracer, tracer); /* copy tracer */
+ erts_smp_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer);
+ bp->meta_tracer = bmt;
} else if (break_flags & ERTS_BPF_COUNT) {
BpCount* bcp;
@@ -1544,7 +1561,7 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
}
if (common & ERTS_BPF_META_TRACE) {
MatchSetUnref(bp->meta_ms);
- bp_meta_unref(bp->meta_pid);
+ bp_meta_unref(bp->meta_tracer);
}
if (common & ERTS_BPF_COUNT) {
ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
@@ -1560,10 +1577,12 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
}
static void
-bp_meta_unref(BpMetaPid* bmp)
+bp_meta_unref(BpMetaTracer* bmt)
{
- if (erts_refc_dectest(&bmp->refc, 0) <= 0) {
- Free(bmp);
+ if (erts_refc_dectest(&bmt->refc, 0) <= 0) {
+ ErtsTracer trc = erts_smp_atomic_read_nob(&bmt->tracer);
+ ERTS_TRACER_CLEAR(&trc);
+ Free(bmt);
}
}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index f9eca94e2a..bb171be8a6 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -55,15 +55,15 @@ typedef struct {
} BpCount;
typedef struct {
- erts_smp_atomic_t pid;
+ erts_smp_atomic_t tracer;
erts_refc_t refc;
-} BpMetaPid;
+} BpMetaTracer;
typedef struct generic_bp_data {
Uint flags;
Binary* local_ms; /* Match spec for local call trace */
Binary* meta_ms; /* Match spec for meta trace */
- BpMetaPid* meta_pid; /* Meta trace pid */
+ BpMetaTracer* meta_tracer; /* Meta tracer */
BpCount* count; /* For call count */
BpDataTime* time; /* For time trace */
} GenericBpData;
@@ -132,10 +132,10 @@ void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
void erts_clear_call_trace_bif(BeamInstr *pc, int local);
void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
- Eterm tracer_pid);
+ ErtsTracer tracer);
void erts_clear_mtrace_break(BpFunctions *f);
void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
- Eterm tracer_pid);
+ ErtsTracer tracer);
void erts_clear_mtrace_bif(BeamInstr *pc);
void erts_set_debug_break(BpFunctions *f);
@@ -150,13 +150,13 @@ void erts_clear_export_break(Module *modp, BeamInstr* pc);
BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
- Uint32 *ret_flags, Eterm *tracer_pid);
+ Uint32 *ret_flags, ErtsTracer *tracer);
int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_rte);
+ ErtsTracer *tracer_ret);
int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret);
+ ErtsTracer *tracer_ret);
int erts_is_native_break(BeamInstr *pc);
int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 901419c989..c356863b60 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -4588,7 +4588,7 @@ do { \
SWAPOUT; /* Needed for shared heap */
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- erts_trace_return(c_p, code, r(0), E+1/*Process tracer*/);
+ erts_trace_return(c_p, code, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
SWAPIN;
c_p->cp = NULL;
@@ -5228,7 +5228,8 @@ next_catch(Process* c_p, Eterm *reg) {
BeamInstr *cpp = c_p->cp;
if (cpp == beam_exception_trace) {
erts_trace_exception(c_p, cp_val(ptr[0]),
- reg[1], reg[2], ptr+1);
+ reg[1], reg[2],
+ ERTS_TRACER_FROM_ETERM(ptr+1));
/* Skip return_trace parameters */
ptr += 2;
} else if (cpp == beam_return_trace) {
@@ -5255,7 +5256,8 @@ next_catch(Process* c_p, Eterm *reg) {
}
if (cp_val(*prev) == beam_exception_trace) {
erts_trace_exception(c_p, cp_val(ptr[0]),
- reg[1], reg[2], ptr+1);
+ reg[1], reg[2],
+ ERTS_TRACER_FROM_ETERM(ptr+1));
}
/* Skip return_trace parameters */
ptr += 2;
@@ -6049,7 +6051,7 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
return -1;
}
#else /* ERTS_SMP */
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
if (!c_p->msg.len)
#endif
erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 754e11f047..d633a982ca 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -123,15 +123,12 @@ static int insert_internal_link(Process* p, Eterm rpid)
erts_add_link(&ERTS_P_LINKS(p), LINK_PID, rp->common.id);
erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, p->common.id);
- ASSERT(is_nil(ERTS_TRACER_PROC(p))
- || is_internal_pid(ERTS_TRACER_PROC(p))
- || is_internal_port(ERTS_TRACER_PROC(p)));
+ ASSERT(IS_TRACER_VALID(ERTS_TRACER(p)));
if (IS_TRACED(p)) {
if (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1)) {
ERTS_TRACE_FLAGS(rp) |= (ERTS_TRACE_FLAGS(p) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(rp) = ERTS_TRACER_PROC(p); /* maybe steal */
-
+ erts_tracer_replace(&rp->common, ERTS_TRACER(p));
if (ERTS_TRACE_FLAGS(p) & F_TRACE_SOL1) { /* maybe override */
ERTS_TRACE_FLAGS(rp) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
@@ -140,7 +137,8 @@ static int insert_internal_link(Process* p, Eterm rpid)
}
}
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(p, rp, am_getting_linked, p->common.id);
+ trace_proc(p, p == rp ? rp_locks : ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ rp, am_getting_linked, p->common.id);
if (p == rp)
erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN);
@@ -159,7 +157,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
DistEntry *dep;
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
- trace_proc(BIF_P, BIF_P, am_link, BIF_ARG_1);
+ trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_P, am_link, BIF_ARG_1);
}
/* check that the pid or port which is our argument is OK */
@@ -613,7 +611,7 @@ erts_queue_monitor_message(Process *p,
ref_copy = copy_struct(ref, ref_size, &hp, ohp);
tup = TUPLE5(hp, am_DOWN, ref_copy, type, item_copy, reason_copy);
- erts_queue_message(p, p_locksp, msgp, tup, NIL);
+ erts_queue_message(p, p_locksp, msgp, tup);
}
static BIF_RETTYPE
@@ -1001,6 +999,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
Process *rp;
DistEntry *dep;
ErtsLink *l = NULL, *rl = NULL;
+ ErtsProcLocks cp_locks = ERTS_PROC_LOCK_MAIN;
/*
* SMP specific note concerning incoming exit signals:
@@ -1015,7 +1014,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
*/
if (IS_TRACED_FL(BIF_P, F_TRACE_PROCS)) {
- trace_proc(BIF_P, BIF_P, am_unlink, BIF_ARG_1);
+ trace_proc(BIF_P, cp_locks, BIF_P, am_unlink, BIF_ARG_1);
}
if (is_internal_port(BIF_ARG_1)) {
@@ -1120,10 +1119,10 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ cp_locks |= ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS;
+
/* get process struct */
- rp = erts_pid2proc_opt(BIF_P, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_LINK
- | ERTS_PROC_LOCK_STATUS),
+ rp = erts_pid2proc_opt(BIF_P, cp_locks,
BIF_ARG_1, ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
@@ -1149,14 +1148,17 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
erts_destroy_link(rl);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) {
- trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->common.id);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ cp_locks &= ~ERTS_PROC_LOCK_STATUS;
+ trace_proc(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK),
+ rp, am_getting_unlinked, BIF_P->common.id);
}
if (rp != BIF_P)
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN);
BIF_RET(am_true);
@@ -4337,12 +4339,34 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
} else if (BIF_ARG_1 == am_trace_control_word) {
BIF_RET(db_set_trace_control_word(BIF_P, BIF_ARG_2));
} else if (BIF_ARG_1 == am_sequential_tracer) {
- Eterm old_value = erts_set_system_seq_tracer(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- BIF_ARG_2);
- if (old_value != THE_NON_VALUE) {
- BIF_RET(old_value);
- }
+ ErtsTracer new_seq_tracer, old_seq_tracer;
+ Eterm ret;
+
+ if (BIF_ARG_2 == am_false)
+ new_seq_tracer = erts_tracer_nil;
+ else
+ new_seq_tracer = erts_term_to_tracer(THE_NON_VALUE, BIF_ARG_2);
+
+ if (new_seq_tracer == THE_NON_VALUE)
+ goto error;
+
+ old_seq_tracer = erts_set_system_seq_tracer(BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ new_seq_tracer);
+
+ ERTS_TRACER_CLEAR(&new_seq_tracer);
+
+ if (old_seq_tracer == THE_NON_VALUE)
+ goto error;
+
+ if (ERTS_TRACER_IS_NIL(old_seq_tracer))
+ BIF_RET(am_false);
+
+ ret = erts_tracer_to_term(BIF_P, old_seq_tracer);
+
+ ERTS_TRACER_CLEAR(&old_seq_tracer);
+
+ BIF_RET(ret);
} else if (BIF_ARG_1 == make_small(1)) {
int i, max;
ErtsMessage* mp;
@@ -4679,7 +4703,7 @@ skip_current_msgq(Process *c_p)
res = 0;
}
else {
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
c_p->msg.save = c_p->msg.last;
res = 1;
}
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 9bb77190d6..58cd31cee9 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -181,9 +181,8 @@ bif erlang:port_set_data/2
bif erlang:port_get_data/1
# Tracing & debugging.
-bif erlang:trace_pattern/2
-bif erlang:trace_pattern/3
-bif erlang:trace/3
+bif erts_internal:trace_pattern/3
+bif erts_internal:trace/3
bif erlang:trace_info/2
bif erlang:trace_delivered/1
bif erlang:seq_trace/2
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index 9da750e366..ec6267711b 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -94,6 +94,7 @@ void erts_commit_staging_code_ix(void)
ix = (ix + 1) % ERTS_NUM_CODE_IX;
erts_smp_atomic32_set_nob(&the_staging_code_index, ix);
export_staging_unlock();
+ erts_tracer_nif_clear();
CIX_TRACE("activate");
}
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 88449adb8e..8cd3f5fbe6 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -337,7 +337,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
erts_destroy_link(rlnk);
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
- trace_proc(NULL, rp, am_getting_unlinked, sublnk->pid);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, sublnk->pid);
}
}
erts_smp_proc_unlock(rp, rp_locks);
@@ -397,7 +397,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
msgp = erts_alloc_message_heap(rp, &rp_locks,
3, &hp, &ohp);
tup = TUPLE2(hp, am_nodedown, name);
- erts_queue_message(rp, &rp_locks, msgp, tup, NIL);
+ erts_queue_message(rp, &rp_locks, msgp, tup);
}
erts_smp_proc_unlock(rp, rp_locks);
}
@@ -1275,7 +1275,7 @@ int erts_net_message(Port *prt,
erts_smp_de_links_unlock(dep);
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, rp, am_getting_linked, from);
+ trace_proc(NULL, 0, rp, am_getting_linked, from);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
break;
@@ -1300,7 +1300,7 @@ int erts_net_message(Port *prt,
lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && lnk != NULL) {
- trace_proc(NULL, rp, am_getting_unlinked, from);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, from);
}
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
@@ -1628,7 +1628,11 @@ int erts_net_message(Port *prt,
ERTS_XSIG_FLG_IGN_KILL);
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
- trace_proc(NULL, rp, am_getting_unlinked, from);
+ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
+ rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
+ }
+ trace_proc(NULL, 0, rp, am_getting_unlinked, from);
}
}
erts_smp_proc_unlock(rp, rp_locks);
@@ -3313,7 +3317,7 @@ send_nodes_mon_msg(Process *rp,
}
ASSERT(hend == hp);
- erts_queue_message(rp, rp_locksp, mp, msg, NIL);
+ erts_queue_message(rp, rp_locksp, mp, msg);
}
static void
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index cfe0bc3205..a6519bd9e4 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -3210,7 +3210,7 @@ reply_alloc_info(void *vair)
if (hp != hp_end)
erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1);
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (air->req_sched == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 9482ab9265..d2fe440d47 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -270,6 +270,9 @@ type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues
type NEW_TIME_OFFSET SHORT_LIVED SYSTEM new_time_offset
type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request
+type TRACER_NIF LONG_LIVED SYSTEM tracer_nif
+type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
+type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index a79ce11563..1e2db38442 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1737,7 +1737,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
hp += REF_THING_SIZE;
mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
}
- erts_queue_message(proc, &rp_locks, mp, mess, am_undefined);
+ erts_queue_message(proc, &rp_locks, mp, mess);
erts_smp_proc_unlock(proc, rp_locks);
ERTS_SMP_CHK_NO_PROC_LOCKS;
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 68f6abfcdc..7a72b0d8cc 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -846,6 +846,7 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
if (unlock_locks)
erts_smp_proc_unlock(rp, unlock_locks);
+
}
/*
@@ -2162,8 +2163,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = build_snifs_term(&hp, NULL, NIL);
BIF_RET(res);
} else if (BIF_ARG_1 == am_sequential_tracer) {
- val = erts_get_system_seq_tracer();
- ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false);
+ ErtsTracer seq_tracer = erts_get_system_seq_tracer();
+ val = erts_tracer_to_term(BIF_P, seq_tracer);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 71b6d78094..eb55bd585e 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -79,6 +79,7 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2)
}
if (port->drv_ptr->flags & ERL_DRV_FLAG_USE_INIT_ACK) {
+
/* Copied from erl_port_task.c */
port->async_open_port = erts_alloc(ERTS_ALC_T_PRTSD,
sizeof(*port->async_open_port));
@@ -911,7 +912,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
}
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
}
@@ -935,13 +936,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
err_typep ? *err_typep : 4711,
err_nump ? *err_nump : 4711));
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
}
goto do_return;
}
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
}
if (linebuf && port->linebuf == NULL){
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index a9443ee8df..049899211f 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -52,7 +52,7 @@ static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
-static Eterm erts_default_meta_tracer_pid;
+static ErtsTracer erts_default_meta_tracer;
static struct { /* Protected by code write permission */
int current;
@@ -75,8 +75,6 @@ static BIF_RETTYPE
system_monitor(Process *p, Eterm monitor_pid, Eterm list);
static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/
-static int already_traced(Process *p, Process *tracee_p, Eterm tracer);
-static int port_already_traced(Process *p, Port *tracee_port, Eterm tracer);
static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
@@ -94,21 +92,15 @@ erts_bif_trace_init(void)
erts_default_match_spec = NULL;
erts_default_meta_match_spec = NULL;
erts_default_trace_pattern_flags = erts_trace_pattern_flags_off;
- erts_default_meta_tracer_pid = NIL;
+ erts_default_meta_tracer = erts_tracer_nil;
}
/*
* Turn on/off call tracing for the given function(s).
*/
-
-Eterm
-trace_pattern_2(BIF_ALIST_2)
-{
- return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL);
-}
Eterm
-trace_pattern_3(BIF_ALIST_3)
+erts_internal_trace_pattern_3(BIF_ALIST_3)
{
return trace_pattern(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
@@ -125,11 +117,10 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
Eterm l;
struct trace_pattern_flags flags = erts_trace_pattern_flags_off;
int is_global;
- Process *meta_tracer_proc = p;
- Eterm meta_tracer_pid = p->common.id;
+ ErtsTracer meta_tracer = erts_tracer_nil;
if (!erts_try_seize_code_write_permission(p)) {
- ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
+ ERTS_BIF_YIELD3(bif_export[BIF_erts_internal_trace_pattern_3], p, MFA, Pattern, flaglist);
}
finish_bp.current = -1;
@@ -160,31 +151,11 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
is_global = 0;
for(l = flaglist; is_list(l); l = CDR(list_val(l))) {
if (is_tuple(CAR(list_val(l)))) {
- Eterm *tp = tuple_val(CAR(list_val(l)));
-
- if (arityval(tp[0]) != 2 || tp[1] != am_meta) {
- goto error;
- }
- meta_tracer_pid = tp[2];
- if (is_internal_pid(meta_tracer_pid)) {
- meta_tracer_proc = erts_pid2proc(NULL, 0, meta_tracer_pid, 0);
- if (!meta_tracer_proc) {
- goto error;
- }
- } else if (is_internal_port(meta_tracer_pid)) {
- Port *meta_tracer_port;
- meta_tracer_proc = NULL;
- meta_tracer_port = (erts_port_lookup(
- meta_tracer_pid,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP));
- if (!meta_tracer_port)
- goto error;
- } else {
- goto error;
- }
- if (is_global) {
- goto error;
- }
+ meta_tracer = erts_term_to_tracer(am_meta, CAR(list_val(l)));
+ if (meta_tracer == THE_NON_VALUE) {
+ meta_tracer = erts_tracer_nil;
+ goto error;
+ }
flags.breakpoint = 1;
flags.meta = 1;
} else {
@@ -202,6 +173,8 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
flags.breakpoint = 1;
flags.meta = 1;
+ if (ERTS_TRACER_IS_NIL(meta_tracer))
+ meta_tracer = erts_term_to_tracer(THE_NON_VALUE, p->common.id);
break;
case am_global:
if (flags.breakpoint) {
@@ -252,14 +225,11 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetUnref(erts_default_meta_match_spec);
erts_default_meta_match_spec = match_prog_set;
MatchSetRef(erts_default_meta_match_spec);
- erts_default_meta_tracer_pid = meta_tracer_pid;
- if (meta_tracer_proc) {
- ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
- }
+ erts_tracer_update(&erts_default_meta_tracer, meta_tracer);
} else if (! flags.breakpoint) {
MatchSetUnref(erts_default_meta_match_spec);
erts_default_meta_match_spec = NULL;
- erts_default_meta_tracer_pid = NIL;
+ ERTS_TRACER_CLEAR(&erts_default_meta_tracer);
}
if (erts_default_trace_pattern_flags.breakpoint &&
flags.breakpoint) {
@@ -340,20 +310,18 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
if (is_small(mfa[2])) {
mfa[2] = signed_val(mfa[2]);
}
-
- if (meta_tracer_proc) {
- ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
- }
matches = erts_set_trace_pattern(p, mfa, specified,
match_prog_set, match_prog_set,
- on, flags, meta_tracer_pid, 0);
+ on, flags, meta_tracer, 0);
}
error:
MatchSetUnref(match_prog_set);
UnUseTmpHeap(3,p);
+ ERTS_TRACER_CLEAR(&meta_tracer);
+
#ifdef ERTS_SMP
if (finish_bp.current >= 0) {
ASSERT(matches >= 0);
@@ -404,7 +372,7 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
Binary **match_spec,
Binary **meta_match_spec,
struct trace_pattern_flags *trace_pattern_flags,
- Eterm *meta_tracer_pid)
+ ErtsTracer *meta_tracer)
{
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
erts_smp_thr_progress_is_blocking());
@@ -416,8 +384,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
*meta_match_spec = erts_default_meta_match_spec;
if (trace_pattern_flags)
*trace_pattern_flags = erts_default_trace_pattern_flags;
- if (meta_tracer_pid)
- *meta_tracer_pid = erts_default_meta_tracer_pid;
+ if (meta_tracer)
+ *meta_tracer = erts_default_meta_tracer;
}
int erts_is_default_trace_enabled(void)
@@ -465,12 +433,12 @@ erts_trace_flag2bit(Eterm flag)
** occurred in the argument list.
*/
int
-erts_trace_flags(Eterm List,
- Uint *pMask, Eterm *pTracer, int *pCpuTimestamp)
+erts_trace_flags(Eterm List,
+ Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp)
{
Eterm list = List;
Uint mask = 0;
- Eterm tracer = NIL;
+ ErtsTracer tracer = erts_tracer_nil;
int cpu_timestamp = 0;
while (is_list(list)) {
@@ -483,33 +451,72 @@ erts_trace_flags(Eterm List,
cpu_timestamp = !0;
#endif
} else if (is_tuple(item)) {
- Eterm* tp = tuple_val(item);
-
- if (arityval(tp[0]) != 2 || tp[1] != am_tracer) goto error;
- if (is_internal_pid(tp[2]) || is_internal_port(tp[2])) {
- tracer = tp[2];
- } else goto error;
+ tracer = erts_term_to_tracer(am_tracer, item);
+ if (tracer == THE_NON_VALUE)
+ goto error;
} else goto error;
list = CDR(list_val(list));
}
if (is_not_nil(list)) goto error;
- if (pMask && mask) *pMask = mask;
- if (pTracer && tracer != NIL) *pTracer = tracer;
- if (pCpuTimestamp && cpu_timestamp) *pCpuTimestamp = cpu_timestamp;
+ if (pMask && mask) *pMask = mask;
+ if (pTracer && !ERTS_TRACER_IS_NIL(tracer)) *pTracer = tracer;
+ if (pCpuTimestamp && cpu_timestamp) *pCpuTimestamp = cpu_timestamp;
return !0;
error:
return 0;
}
-Eterm trace_3(BIF_ALIST_3)
+static ERTS_INLINE int
+start_trace(Process *c_p, ErtsTracer tracer,
+ ErtsPTabElementCommon *common,
+ int on, int mask)
+{
+ /* We can use the common part of both port+proc without checking what it is
+ In the code below port is used for both proc and port */
+ Port *port = (Port*)common;
+
+ /*
+ * SMP build assumes that either system is blocked or:
+ * * main lock is held on c_p
+ * * all locks are held on port common
+ */
+
+ if (!ERTS_TRACER_IS_NIL(tracer)) {
+ if ((ERTS_TRACE_FLAGS(port) & TRACEE_FLAGS)
+ && !ERTS_TRACER_COMPARE(ERTS_TRACER(port), tracer)) {
+ /* This tracee is already being traced, and not by the
+ * tracer to be */
+ if (erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCKS_ALL, common)) {
+ /* The tracer is still in use */
+ return 1;
+ }
+ /* Current tracer now invalid */
+ }
+ }
+
+ if (on)
+ ERTS_TRACE_FLAGS(port) |= mask;
+ else
+ ERTS_TRACE_FLAGS(port) &= ~mask;
+
+ if ((ERTS_TRACE_FLAGS(port) & TRACEE_FLAGS) == 0) {
+ tracer = erts_tracer_nil;
+ erts_tracer_replace(common, erts_tracer_nil);
+ } else if (!ERTS_TRACER_IS_NIL(tracer))
+ erts_tracer_replace(common, tracer);
+
+ return 0;
+}
+
+Eterm erts_internal_trace_3(BIF_ALIST_3)
{
Process* p = BIF_P;
Eterm pid_spec = BIF_ARG_1;
Eterm how = BIF_ARG_2;
Eterm list = BIF_ARG_3;
int on;
- Eterm tracer = NIL;
+ ErtsTracer tracer = erts_tracer_nil;
int matches = 0;
Uint mask = 0;
int cpu_ts = 0;
@@ -522,41 +529,24 @@ Eterm trace_3(BIF_ALIST_3)
}
if (!erts_try_seize_code_write_permission(BIF_P)) {
- ERTS_BIF_YIELD3(bif_export[BIF_trace_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ ERTS_BIF_YIELD3(bif_export[BIF_erts_internal_trace_3],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
- if (is_nil(tracer) || is_internal_pid(tracer)) {
- Process *tracer_proc = erts_pid2proc(p,
- ERTS_PROC_LOCK_MAIN,
- is_nil(tracer) ? p->common.id : tracer,
- ERTS_PROC_LOCKS_ALL);
- if (!tracer_proc)
- goto error;
- ERTS_TRACE_FLAGS(tracer_proc) |= F_TRACER;
- erts_smp_proc_unlock(tracer_proc,
- (tracer_proc == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
- } else if (is_internal_port(tracer)) {
- Port *tracer_port = erts_id2port_sflgs(tracer,
- p,
- ERTS_PROC_LOCK_MAIN,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (!tracer_port)
- goto error;
- ERTS_TRACE_FLAGS(tracer_port) |= F_TRACER;
- erts_port_release(tracer_port);
- } else
- goto error;
-
switch (how) {
case am_false:
on = 0;
break;
case am_true:
on = 1;
- if (is_nil(tracer))
- tracer = p->common.id;
+ if (ERTS_TRACER_IS_NIL(tracer))
+ tracer = erts_term_to_tracer(am_tracer, p->common.id);
+
+ if (tracer == THE_NON_VALUE) {
+ tracer = erts_tracer_nil;
+ goto error;
+ }
+
break;
default:
goto error;
@@ -575,34 +565,20 @@ Eterm trace_3(BIF_ALIST_3)
}
#endif
- if (pid_spec == tracer)
- goto error;
-
tracee_port = erts_id2port_sflgs(pid_spec,
p,
ERTS_PROC_LOCK_MAIN,
ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
if (!tracee_port)
goto error;
-
- if (tracer != NIL && port_already_traced(p, tracee_port, tracer)) {
+
+ if (start_trace(p, tracer, &tracee_port->common, on, mask)) {
erts_port_release(tracee_port);
goto already_traced;
- }
-
- if (on)
- ERTS_TRACE_FLAGS(tracee_port) |= mask;
- else
- ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
-
- if (!ERTS_TRACE_FLAGS(tracee_port))
- ERTS_TRACER_PROC(tracee_port) = NIL;
- else if (tracer != NIL)
- ERTS_TRACER_PROC(tracee_port) = tracer;
-
- erts_port_release(tracee_port);
-
- matches = 1;
+ }
+ erts_port_release(tracee_port);
+ matches = 1;
} else if (is_pid(pid_spec)) {
Process *tracee_p;
@@ -615,33 +591,19 @@ Eterm trace_3(BIF_ALIST_3)
* and not about to be tracing.
*/
- if (pid_spec == tracer)
- goto error;
-
tracee_p = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
pid_spec, ERTS_PROC_LOCKS_ALL);
if (!tracee_p)
goto error;
- if (tracer != NIL && already_traced(p, tracee_p, tracer)) {
+ if (start_trace(tracee_p, tracer, &tracee_p->common, on, mask)) {
erts_smp_proc_unlock(tracee_p,
(tracee_p == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
goto already_traced;
- }
-
- if (on)
- ERTS_TRACE_FLAGS(tracee_p) |= mask;
- else
- ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
-
- if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS) == 0)
- ERTS_TRACER_PROC(tracee_p) = NIL;
- else if (tracer != NIL)
- ERTS_TRACER_PROC(tracee_p) = tracer;
-
- erts_smp_proc_unlock(tracee_p,
+ }
+ erts_smp_proc_unlock(tracee_p,
(tracee_p == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
@@ -719,22 +681,7 @@ Eterm trace_3(BIF_ALIST_3)
Process* tracee_p = erts_pix2proc(i);
if (! tracee_p)
continue;
- if (tracer != NIL) {
- if (tracee_p->common.id == tracer)
- continue;
- if (already_traced(NULL, tracee_p, tracer))
- continue;
- }
- if (on) {
- ERTS_TRACE_FLAGS(tracee_p) |= mask;
- } else {
- ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
- }
- if(!(ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)) {
- ERTS_TRACER_PROC(tracee_p) = NIL;
- } else if (tracer != NIL) {
- ERTS_TRACER_PROC(tracee_p) = tracer;
- }
+ start_trace(p, tracer, &tracee_p->common, on, mask);
matches++;
}
}
@@ -749,21 +696,7 @@ Eterm trace_3(BIF_ALIST_3)
state = erts_atomic32_read_nob(&tracee_port->state);
if (state & ERTS_PORT_SFLGS_DEAD)
continue;
- if (tracer != NIL) {
- if (tracee_port->common.id == tracer)
- continue;
- if (port_already_traced(NULL, tracee_port, tracer))
- continue;
- }
-
- if (on) ERTS_TRACE_FLAGS(tracee_port) |= mask;
- else ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
-
- if (!(ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)) {
- ERTS_TRACER_PROC(tracee_port) = NIL;
- } else if (tracer != NIL) {
- ERTS_TRACER_PROC(tracee_port) = tracer;
- }
+ start_trace(p, tracer, &tracee_port->common, on, mask);
/* matches are not counted for ports since it would violate compatibility */
/* This could be a reason to modify this function or make a new one. */
}
@@ -772,10 +705,9 @@ Eterm trace_3(BIF_ALIST_3)
if (pid_spec == am_all || pid_spec == am_new) {
Uint def_flags = mask;
- Eterm def_tracer = tracer;
ok = 1;
- erts_change_default_tracing(on, &def_flags, &def_tracer);
+ erts_change_default_tracing(on, &def_flags, tracer);
#ifdef HAVE_ERTS_NOW_CPU
if (cpu_ts && !on) {
@@ -801,6 +733,7 @@ Eterm trace_3(BIF_ALIST_3)
}
#endif
erts_release_code_write_permission();
+ ERTS_TRACER_CLEAR(&tracer);
BIF_RET(make_small(matches));
@@ -810,6 +743,8 @@ Eterm trace_3(BIF_ALIST_3)
error:
+ ERTS_TRACER_CLEAR(&tracer);
+
#ifdef ERTS_SMP
if (system_blocked) {
erts_smp_thr_progress_unblock();
@@ -821,88 +756,6 @@ Eterm trace_3(BIF_ALIST_3)
BIF_ERROR(p, BADARG);
}
-/* Check that the process to be traced is not already traced
- * by a valid other tracer than the tracer to be.
- */
-static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
-{
- /*
- * SMP build assumes that either system is blocked or:
- * * main lock is held on c_p
- * * all locks are held on port tracee_p
- */
- if ((ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)
- && ERTS_TRACER_PROC(tracee_port) != tracer) {
- /* This tracee is already being traced, and not by the
- * tracer to be */
- if (is_internal_port(ERTS_TRACER_PROC(tracee_port))) {
- if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_port))) {
- /* Current trace port now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else if(is_internal_pid(ERTS_TRACER_PROC(tracee_port))) {
- Process *tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_port));
- if (!tracer_p) {
- /* Current trace process now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else {
- remove_tracer:
- ERTS_TRACE_FLAGS(tracee_port) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(tracee_port) = NIL;
- }
- }
- return 0;
-}
-
-/* Check that the process to be traced is not already traced
- * by a valid other tracer than the tracer to be.
- */
-static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
-{
- /*
- * SMP build assumes that either system is blocked or:
- * * main lock is held on c_p
- * * all locks multiple are held on tracee_p
- */
- if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)
- && ERTS_TRACER_PROC(tracee_p) != tracer) {
- /* This tracee is already being traced, and not by the
- * tracer to be */
- if (is_internal_port(ERTS_TRACER_PROC(tracee_p))) {
- if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_p))) {
- /* Current trace port now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else if(is_internal_pid(ERTS_TRACER_PROC(tracee_p))) {
- Process *tracer_p;
-
- tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_p));
- if (!tracer_p) {
- /* Current trace process now invalid
- * - discard it and approve the new. */
- goto remove_tracer;
- } else
- return 1;
- }
- else {
- remove_tracer:
- ERTS_TRACE_FLAGS(tracee_p) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(tracee_p) = NIL;
- }
- }
- return 0;
-}
-
/*
* Return information about a process or an external function being traced.
*/
@@ -936,41 +789,30 @@ static Eterm
trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
{
Eterm tracer;
- Uint trace_flags;
+ Uint trace_flags = am_false;
Eterm* hp;
if (pid_spec == am_new) {
- erts_get_default_tracing(&trace_flags, &tracer);
+ ErtsTracer def_tracer;
+ erts_get_default_tracing(&trace_flags, &def_tracer);
+ tracer = erts_tracer_to_term(p, def_tracer);
+ ERTS_TRACER_CLEAR(&def_tracer);
} else if (is_internal_pid(pid_spec)) {
Process *tracee;
tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- pid_spec, ERTS_PROC_LOCKS_ALL);
+ pid_spec, ERTS_PROC_LOCK_MAIN);
- if (!tracee) {
+ if (!tracee)
return am_undefined;
- } else {
- tracer = ERTS_TRACER_PROC(tracee);
- trace_flags = ERTS_TRACE_FLAGS(tracee);
- }
- if (is_internal_pid(tracer)) {
- if (!erts_proc_lookup(tracer)) {
- reset_tracer:
- ERTS_TRACE_FLAGS(tracee) &= ~TRACEE_FLAGS;
- trace_flags = ERTS_TRACE_FLAGS(tracee);
- tracer = ERTS_TRACER_PROC(tracee) = NIL;
- }
- }
- else if (is_internal_port(tracer)) {
- if (!erts_is_valid_tracer_port(tracer))
- goto reset_tracer;
- }
-#ifdef ERTS_SMP
- erts_smp_proc_unlock(tracee,
- (tracee == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
-#endif
+ if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
+ erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN, &tracee->common);
+
+ tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
+
+ if (tracee != p)
+ erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
} else if (is_external_pid(pid_spec)
&& external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
return am_undefined;
@@ -1024,8 +866,10 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
HRelease(p,limit,hp+3);
return TUPLE2(hp, key, flag_list);
} else if (key == am_tracer) {
- hp = HAlloc(p, 3);
- return TUPLE2(hp, key, tracer); /* Local pid or port */
+ if (tracer == am_false)
+ tracer = NIL;
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, tracer);
} else {
goto error;
}
@@ -1054,11 +898,11 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
*/
static int function_is_traced(Process *p,
Eterm mfa[3],
- Binary **ms, /* out */
- Binary **ms_meta, /* out */
- Eterm *tracer_pid_meta, /* out */
- Uint *count, /* out */
- Eterm *call_time) /* out */
+ Binary **ms, /* out */
+ Binary **ms_meta, /* out */
+ ErtsTracer *tracer_pid_meta, /* out */
+ Uint *count, /* out */
+ Eterm *call_time) /* out */
{
Export e;
Export* ep;
@@ -1123,7 +967,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm traced = am_false;
Eterm match_spec = am_false;
Eterm retval = am_false;
- Eterm meta = am_false;
+ ErtsTracer meta = erts_tracer_nil;
Eterm call_time = NIL;
int r;
@@ -1193,7 +1037,10 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
retval = match_spec;
break;
case am_meta:
- retval = meta;
+ retval = erts_tracer_to_term(p, meta);
+ if (retval == am_false)
+ /* backwards compatibility */
+ retval = NIL;
break;
case am_meta_match_spec:
if (r & FUNC_TRACE_META_TRACE) {
@@ -1216,7 +1063,8 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
}
break;
case am_all: {
- Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false;
+ Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false,
+ m = am_false;
if (ms) {
match_spec = MatchSetGetSource(ms);
@@ -1235,6 +1083,9 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
if (r & FUNC_TRACE_TIME_TRACE) {
ct = call_time;
}
+
+ m = erts_tracer_to_term(p, meta);
+
hp = HAlloc(p, (3+2)*6);
retval = NIL;
t = TUPLE2(hp, am_call_count, c); hp += 3;
@@ -1243,7 +1094,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, match_spec_meta); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
- t = TUPLE2(hp, am_meta, meta); hp += 3;
+ t = TUPLE2(hp, am_meta, m); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
@@ -1306,7 +1157,8 @@ trace_info_on_load(Process* p, Eterm key)
case am_meta:
hp = HAlloc(p, 3);
if (erts_default_trace_pattern_flags.meta) {
- return TUPLE2(hp, key, erts_default_meta_tracer_pid);
+ ASSERT(!ERTS_TRACER_IS_NIL(erts_default_meta_tracer));
+ return TUPLE2(hp, key, erts_tracer_to_term(p, erts_default_meta_tracer));
} else {
return TUPLE2(hp, key, am_false);
}
@@ -1345,7 +1197,7 @@ trace_info_on_load(Process* p, Eterm key)
}
case am_all:
{
- Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t;
+ Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t, m;
if (erts_default_trace_pattern_flags.local ||
(! erts_default_trace_pattern_flags.breakpoint)) {
@@ -1363,6 +1215,8 @@ trace_info_on_load(Process* p, Eterm key)
MatchSetGetSource(erts_default_meta_match_spec);
meta_match_spec = copy_object(meta_match_spec, p);
}
+ m = (erts_default_trace_pattern_flags.meta
+ ? erts_tracer_to_term(p, erts_default_meta_tracer) : am_false);
hp = HAlloc(p, (3+2)*5 + 3);
t = TUPLE2(hp, am_call_count,
(erts_default_trace_pattern_flags.call_count
@@ -1370,9 +1224,7 @@ trace_info_on_load(Process* p, Eterm key)
r = CONS(hp, t, r); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, meta_match_spec); hp += 3;
r = CONS(hp, t, r); hp += 2;
- t = TUPLE2(hp, am_meta,
- (erts_default_trace_pattern_flags.meta
- ? erts_default_meta_tracer_pid : am_false)); hp += 3;
+ t = TUPLE2(hp, am_meta, m); hp += 3;
r = CONS(hp, t, r); hp += 2;
t = TUPLE2(hp, am_match_spec, match_spec); hp += 3;
r = CONS(hp, t, r); hp += 2;
@@ -1397,7 +1249,7 @@ int
erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
- Eterm meta_tracer_pid, int is_blocking)
+ ErtsTracer meta_tracer, int is_blocking)
{
const ErtsCodeIndex code_ix = erts_active_code_ix();
int matches = 0;
@@ -1487,7 +1339,7 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
}
if (flags.meta) {
erts_set_mtrace_bif(pc, meta_match_prog_set,
- meta_tracer_pid);
+ meta_tracer);
m = 1;
}
if (flags.call_time) {
@@ -1527,7 +1379,7 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
}
if (flags.meta) {
erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
- meta_tracer_pid);
+ meta_tracer);
}
if (flags.call_count) {
erts_set_count_break(&finish_bp.f, on);
@@ -2336,50 +2188,79 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
}
/* End: Trace for System Profiling */
-BIF_RETTYPE
-trace_delivered_1(BIF_ALIST_1)
+/* Trace delivered send an aux work message to all schedulers
+ and when all schedulers have acknowledged that they have seen
+ the message the message is sent to the requesting process.
+
+ IMPORTANT: We have to make sure that the all messages sent
+ using enif_send have been delivered before we send the message
+ to the caller.
+
+ There used to be a separate implementation for when only a pid
+ is passed in, but since this is not performance critical code
+ we now use the same approach for both.
+*/
+
+typedef struct {
+ Process *proc;
+ Eterm ref;
+ Eterm ref_heap[REF_THING_SIZE];
+ Eterm target;
+ erts_smp_atomic32_t refc;
+} ErtsTraceDeliveredAll;
+
+static void
+reply_trace_delivered_all(void *vtdarp)
{
- DECL_AM(trace_delivered);
-#ifdef ERTS_SMP
- ErlHeapFragment *bp;
-#else
- ErtsProcLocks locks = 0;
-#endif
- Eterm *hp;
- Eterm msg, ref, msg_ref;
- Process *p;
- if (BIF_ARG_1 == am_all) {
- p = NULL;
- } else if (! (p = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCKS_ALL))) {
- if (is_not_internal_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
- }
-
- ref = erts_make_ref(BIF_P);
+ ErtsTraceDeliveredAll *tdarp = (ErtsTraceDeliveredAll *) vtdarp;
+ ErtsProcLocks rp_locks = 0;
-#ifdef ERTS_SMP
- bp = new_message_buffer(REF_THING_SIZE + 4);
- hp = &bp->mem[0];
- msg_ref = STORE_NC(&hp, &bp->off_heap, ref);
-#else
- hp = HAlloc(BIF_P, 4);
- msg_ref = ref;
-#endif
+ if (erts_smp_atomic32_dec_read_nob(&tdarp->refc) == 0) {
+ Process *rp = tdarp->proc;
+ Eterm *hp = NULL;
+ ErlOffHeap *ohp = NULL;
+ ErtsMessage *mp = NULL;
+ Eterm ref_copy, msg;
- msg = TUPLE3(hp, AM_trace_delivered, BIF_ARG_1, msg_ref);
+ mp = erts_alloc_message_heap(
+ rp, &rp_locks, 4 + NC_HEAP_SIZE(tdarp->ref), &hp, &ohp);
-#ifdef ERTS_SMP
- erts_send_sys_msg_proc(BIF_P->common.id, BIF_P->common.id, msg, bp);
- if (p)
- erts_smp_proc_unlock(p,
- (BIF_P == p
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
-#else
- erts_send_message(BIF_P, BIF_P, &locks, msg, ERTS_SND_FLG_NO_SEQ_TRACE);
-#endif
+ ref_copy = STORE_NC(&hp, ohp, tdarp->ref);
+ msg = TUPLE3(hp, am_trace_delivered, tdarp->target, ref_copy);
+
+ erts_queue_message(rp, &rp_locks, mp, msg);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
- BIF_RET(ref);
+ erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp);
+ erts_proc_dec_refc(rp);
+ }
+}
+
+BIF_RETTYPE
+trace_delivered_1(BIF_ALIST_1)
+{
+
+ if (BIF_ARG_1 == am_all || is_internal_pid(BIF_ARG_1)) {
+ Eterm *hp, ref;
+ ErtsTraceDeliveredAll *tdarp =
+ erts_alloc(ERTS_ALC_T_MISC_AUX_WORK, sizeof(ErtsTraceDeliveredAll));
+
+ tdarp->proc = BIF_P;
+ ref = erts_make_ref(BIF_P);
+ hp = &tdarp->ref_heap[0];
+ tdarp->ref = STORE_NC(&hp, NULL, ref);
+ tdarp->target = BIF_ARG_1;
+ erts_smp_atomic32_init_nob(&tdarp->refc,
+ (erts_aint32_t) erts_no_schedulers);
+ erts_proc_add_refc(BIF_P, 1);
+ erts_schedule_multi_misc_aux_work(0,
+ erts_no_schedulers,
+ reply_trace_delivered_all,
+ (void *) tdarp);
+ BIF_RET(ref);
+ } else {
+ BIF_ERROR(BIF_P, BADARG);
+ }
}
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index be14386b14..76b96637ae 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -135,21 +135,22 @@ get_proc(Process *cp, Uint32 cp_locks, Eterm id, Uint32 id_locks)
static Eterm
-set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
+set_tracee_flags(Process *tracee_p, ErtsTracer tracer,
+ Uint d_flags, Uint e_flags) {
Eterm ret;
Uint flags;
- if (tracer == NIL) {
+ if (ERTS_TRACER_IS_NIL(tracer)) {
flags = ERTS_TRACE_FLAGS(tracee_p) & ~TRACEE_FLAGS;
} else {
flags = ((ERTS_TRACE_FLAGS(tracee_p) & ~d_flags) | e_flags);
- if (! flags) tracer = NIL;
+ if (! flags) tracer = erts_tracer_nil;
}
- ret = ((ERTS_TRACER_PROC(tracee_p) != tracer
+ ret = ((!ERTS_TRACER_COMPARE(ERTS_TRACER(tracee_p),tracer)
|| ERTS_TRACE_FLAGS(tracee_p) != flags)
? am_true
: am_false);
- ERTS_TRACER_PROC(tracee_p) = tracer;
+ erts_tracer_replace(&tracee_p->common, tracer);
ERTS_TRACE_FLAGS(tracee_p) = flags;
return ret;
}
@@ -163,40 +164,16 @@ set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
** returns fail_term on failure. Fails if tracer pid or port is invalid.
*/
static Eterm
-set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer,
+set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer,
Uint d_flags, Uint e_flags) {
- Eterm ret = fail_term;
- Process *tracer_p;
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCKS_ALL ==
- erts_proc_lc_my_proc_locks(tracee_p));
-
- if (is_internal_pid(tracer)
- && (tracer_p =
- erts_pid2proc(tracee_p, ERTS_PROC_LOCKS_ALL,
- tracer, ERTS_PROC_LOCKS_ALL))) {
- if (tracee_p != tracer_p) {
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- ERTS_TRACE_FLAGS(tracer_p) |= (ERTS_TRACE_FLAGS(tracee_p)
- ? F_TRACER
- : 0);
- erts_smp_proc_unlock(tracer_p, ERTS_PROC_LOCKS_ALL);
- }
- } else if (is_internal_port(tracer)) {
- Port *tracer_port =
- erts_id2port_sflgs(tracer,
- tracee_p,
- ERTS_PROC_LOCKS_ALL,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (tracer_port) {
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- erts_port_release(tracer_port);
- }
- } else {
- ASSERT(is_nil(tracer));
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- }
- return ret;
+
+ ERTS_SMP_LC_ASSERT(
+ ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p)
+ || erts_thr_progress_is_blocking());
+
+ if (ERTS_TRACER_IS_NIL(tracer) || erts_is_tracer_enabled(tracee_p, tracer))
+ return set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
+ return fail_term;
}
/*
@@ -2358,7 +2335,7 @@ restart:
case matchEnableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), 0, n);
+ set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2371,7 +2348,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), 0, n);
+ set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n);
esp[-1] = am_true;
}
}
@@ -2379,7 +2356,7 @@ restart:
case matchDisableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), n, 0);
+ set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2392,7 +2369,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), n, 0);
+ set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0);
esp[-1] = am_true;
}
}
@@ -2428,7 +2405,7 @@ restart:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = ERTS_TRACER_PROC(c_p);
+ ErtsTracer tracer = erts_tracer_nil;
/* XXX Atomicity note: Not fully atomic. Default tracer
* is sampled from current process but applied to
* tracee and tracer later after releasing main
@@ -2440,29 +2417,34 @@ restart:
* {trace,[],[{{tracer,Tracer}}]} is much, much older.
*/
int cputs = 0;
+ erts_tracer_update(&tracer, ERTS_TRACER(c_p));
if (! erts_trace_flags(esp[-1], &d_flags, &tracer, &cputs) ||
! erts_trace_flags(esp[-2], &e_flags, &tracer, &cputs) ||
cputs ) {
(--esp)[-1] = FAIL_TERM;
+ ERTS_TRACER_CLEAR(&tracer);
break;
}
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
(--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer,
d_flags, e_flags);
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ ERTS_TRACER_CLEAR(&tracer);
}
break;
case matchTrace3:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = ERTS_TRACER_PROC(c_p);
+ ErtsTracer tracer = erts_tracer_nil;
/* XXX Atomicity note. Not fully atomic. See above.
* Above it could possibly be solved, but not here.
*/
int cputs = 0;
Eterm tracee = (--esp)[0];
+
+ erts_tracer_update(&tracer, ERTS_TRACER(c_p));
if (! erts_trace_flags(esp[-1], &d_flags, &tracer, &cputs) ||
! erts_trace_flags(esp[-2], &e_flags, &tracer, &cputs) ||
@@ -2470,6 +2452,7 @@ restart:
! (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN,
tracee, ERTS_PROC_LOCKS_ALL))) {
(--esp)[-1] = FAIL_TERM;
+ ERTS_TRACER_CLEAR(&tracer);
break;
}
if (tmpp == c_p) {
@@ -2483,6 +2466,7 @@ restart:
erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
+ ERTS_TRACER_CLEAR(&tracer);
}
break;
case matchCatch: /* Match success, now build result */
@@ -5084,7 +5068,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
lint_res = db_match_set_lint(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
mps = db_match_set_compile(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
}
-
+
if (mps == NULL) {
hp = HAlloc(p,3);
ret = TUPLE2(hp, am_error, lint_res);
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 62417fa05c..f33ade27f3 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -2233,9 +2233,7 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
n++;
}
#endif
- ASSERT(is_nil(ERTS_TRACER_PROC(p)) ||
- is_internal_pid(ERTS_TRACER_PROC(p)) ||
- is_internal_port(ERTS_TRACER_PROC(p)));
+ ASSERT(IS_TRACER_VALID(ERTS_TRACER(p)));
ASSERT(is_pid(follow_moved(p->group_leader, (Eterm) 0)));
if (is_not_immed(p->group_leader)) {
@@ -2905,7 +2903,7 @@ reply_gc_info(void *vgcirp)
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (gcirp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 1a621863bb..8e201d5711 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -1248,7 +1248,7 @@ hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs)
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = tmr->btm.bp;
erts_queue_message(proc, &proc_locks, mp,
- tmr->btm.message, NIL);
+ tmr->btm.message);
erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_SEND);
queued_message = 1;
proc_locks &= ~ERTS_PROC_LOCKS_MSG_SEND;
@@ -1980,7 +1980,7 @@ access_sched_local_btm(Process *c_p, Eterm pid,
ERTS_HLT_ASSERT(hp + (async ? 4 : 3) == hp_end);
- erts_queue_message(proc, &proc_locks, mp, msg, NIL);
+ erts_queue_message(proc, &proc_locks, mp, msg);
if (c_p)
proc_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -2111,7 +2111,7 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp,
msg = TUPLE3(hp, tag, tref, res);
- erts_queue_message(c_p, &proc_locks, mp, msg, NIL);
+ erts_queue_message(c_p, &proc_locks, mp, msg);
proc_locks &= ~ERTS_PROC_LOCK_MAIN;
if (proc_locks)
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 9231fb1b34..39c0617143 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -97,6 +97,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry_links", "address" },
{ "code_write_permission", NULL },
{ "proc_status", "pid" },
+ { "proc_trace", "pid" },
{ "ports_snapshot", NULL },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
@@ -148,6 +149,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
{ "sys_msg_q", NULL },
+ { "tracer_mtx", NULL },
{ "port_table", NULL },
#endif
{ "mtrace_op", NULL },
@@ -160,9 +162,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proclist_pre_alloc_lock", "address" },
{ "xports_list_pre_alloc_lock", "address" },
{ "inet_buffer_stack_lock", NULL },
- { "gc_info", NULL },
- { "io_wake", NULL },
- { "timer_wheel", NULL },
{ "system_block", NULL },
{ "timeofday", NULL },
{ "get_time", NULL },
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 7596747b91..1cb02ec274 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -324,7 +324,7 @@ erts_queue_dist_message(Process *rcvr,
tok_label, tok_lastcnt, tok_serial);
}
#endif
- erts_queue_message(rcvr, rcvr_locks, mp, msg, token);
+ erts_queue_message(rcvr, rcvr_locks, mp, msg);
}
}
else {
@@ -349,7 +349,7 @@ erts_queue_dist_message(Process *rcvr,
}
#endif
- LINK_MESSAGE(rcvr, mp);
+ LINK_MESSAGE(rcvr, mp, &mp->next, 1);
if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
@@ -364,31 +364,34 @@ erts_queue_dist_message(Process *rcvr,
}
}
-/* Add a message last in message queue */
+/* Add messages last in message queue */
static Sint
-queue_message(Process *c_p,
- Process* receiver,
- erts_aint32_t *receiver_state,
- ErtsProcLocks *receiver_locks,
- ErtsMessage* mp,
- Eterm message,
- Eterm seq_trace_token
-#ifdef USE_VM_PROBES
- , Eterm dt_utag
-#endif
- )
+queue_messages(Process *c_p,
+ Process* receiver,
+ erts_aint32_t *receiver_state,
+ ErtsProcLocks *receiver_locks,
+ ErtsMessage* first,
+ ErtsMessage** last,
+ Uint len)
{
Sint res;
int locked_msgq = 0;
erts_aint32_t state;
- ERTS_SMP_LC_ASSERT(*receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+ ASSERT(is_value(ERL_MESSAGE_TERM(first)));
+ ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined ||
+ ERL_MESSAGE_TOKEN(first) == NIL ||
+ is_tuple(ERL_MESSAGE_TOKEN(first)));
#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
+ *receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+#endif
if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
- ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
+ ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
if (receiver_state)
state = *receiver_state;
@@ -417,16 +420,10 @@ queue_message(Process *c_p,
/* Drop message if receiver is exiting or has a pending exit... */
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_cleanup_messages(mp);
+ erts_cleanup_messages(first);
return 0;
}
- ERL_MESSAGE_TERM(mp) = message;
- ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
-#ifdef USE_VM_PROBES
- ERL_MESSAGE_DT_UTAG(mp) = dt_utag;
-#endif
-
res = receiver->msg.len;
#ifdef ERTS_SMP
if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
@@ -440,75 +437,83 @@ queue_message(Process *c_p,
*/
res += receiver->msg_inq.len;
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
- LINK_MESSAGE_PRIVQ(receiver, mp);
+ LINK_MESSAGE_PRIVQ(receiver, first, last, len);
}
else
#endif
{
- LINK_MESSAGE(receiver, mp);
+ LINK_MESSAGE(receiver, first, last, len);
}
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
+ ErtsMessage *msg = first;
+
#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_queued)) {
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
- Sint tok_label = 0;
- Sint tok_lastcnt = 0;
- Sint tok_serial = 0;
-
- dtrace_proc_str(receiver, receiver_name);
- if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
- tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
+ if (DTRACE_ENABLED(message_queued)) {
+ DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);
+
+ dtrace_proc_str(receiver, receiver_name);
+ if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
+ tok_label = signed_val(SEQ_TRACE_T_LABEL(seq_trace_token));
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(seq_trace_token));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(seq_trace_token));
+ }
+ DTRACE6(message_queued,
+ receiver_name, size_object(ERL_MESSAGE_TERM(msg)),
+ receiver->msg.len,
+ tok_label, tok_lastcnt, tok_serial);
}
- DTRACE6(message_queued,
- receiver_name, size_object(message), receiver->msg.len,
- tok_label, tok_lastcnt, tok_serial);
- }
#endif
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE))
- trace_receive(receiver, message);
+ while (msg) {
+ trace_receive(receiver, ERL_MESSAGE_TERM(msg));
+ msg = msg->next;
+ }
+
+ }
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(receiver,
#ifdef ERTS_SMP
- *receiver_locks
+ erts_proc_notify_new_message(receiver, *receiver_locks);
#else
- 0
-#endif
- );
-
-#ifndef ERTS_SMP
+ erts_proc_notify_new_message(receiver, 0);
ERTS_HOLE_CHECK(receiver);
#endif
return res;
}
-void
-#ifdef USE_VM_PROBES
-erts_queue_message_probe(Process* receiver, ErtsProcLocks *receiver_locks,
- ErtsMessage* mp,
- Eterm message, Eterm seq_trace_token, Eterm dt_utag)
-#else
+static Sint
+queue_message(Process *c_p,
+ Process* receiver,
+ erts_aint32_t *receiver_state,
+ ErtsProcLocks *receiver_locks,
+ ErtsMessage* mp, Eterm msg)
+{
+ ERL_MESSAGE_TERM(mp) = msg;
+ return queue_messages(c_p, receiver, receiver_state, receiver_locks,
+ mp, &mp->next, 1 );
+}
+
+Sint
erts_queue_message(Process* receiver, ErtsProcLocks *receiver_locks,
- ErtsMessage* mp,
- Eterm message, Eterm seq_trace_token)
-#endif
+ ErtsMessage* mp, Eterm msg)
{
- queue_message(NULL,
- receiver,
- NULL,
- receiver_locks,
- mp,
- message,
- seq_trace_token
-#ifdef USE_VM_PROBES
- , dt_utag
-#endif
- );
+ return queue_message(NULL, receiver, NULL, receiver_locks, mp, msg);
+}
+
+
+Sint
+erts_queue_messages(Process* receiver, ErtsProcLocks *receiver_locks,
+ ErtsMessage* first, ErtsMessage** last, Uint len)
+{
+ return queue_messages(NULL, receiver, NULL, receiver_locks,
+ first, last, len);
}
void
@@ -821,17 +826,15 @@ erts_send_message(Process* sender,
#endif
}
+ ERL_MESSAGE_TOKEN(mp) = token;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = utag;
+#endif
res = queue_message(sender,
receiver,
&receiver_state,
receiver_locks,
- mp,
- message,
- token
-#ifdef USE_VM_PROBES
- , utag
-#endif
- );
+ mp, message);
BM_SWAP_TIMER(send,system);
@@ -887,7 +890,8 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
/* the trace token must in this case be updated by the caller */
seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL);
temptoken = copy_struct(token, sz_token, &hp, ohp);
- erts_queue_message(to, to_locksp, mp, save, temptoken);
+ ERL_MESSAGE_TOKEN(mp) = temptoken;
+ erts_queue_message(to, to_locksp, mp, save);
} else {
sz_from = IS_CONST(from) ? 0 : size_object(from);
#ifdef SHCOPY_SEND
@@ -909,7 +913,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
? from
: copy_struct(from, sz_from, &hp, ohp));
save = TUPLE3(hp, am_EXIT, from_copy, mess);
- erts_queue_message(to, to_locksp, mp, save, NIL);
+ erts_queue_message(to, to_locksp, mp, save);
}
}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index ad4e65274c..608cf552a2 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -182,48 +182,64 @@ typedef struct {
Sint len; /* queue length */
} ErlMessageInQueue;
+typedef struct erl_trace_message_queue__ {
+ struct erl_trace_message_queue__ *next; /* point to the next receiver */
+ Eterm receiver;
+ ErtsMessage* first;
+ ErtsMessage** last; /* point to the last next pointer */
+ Sint len; /* queue length */
+} ErlTraceMessageQueue;
+
#endif
/* Get "current" message */
#define PEEK_MESSAGE(p) (*(p)->msg.save)
+#ifdef USE_VM_PROBES
+#define LINK_MESSAGE_DTAG(mp, dt) ERL_MESSAGE_DT_UTAG(mp) = dt
+#else
+#define LINK_MESSAGE_DTAG(mp, dt)
+#endif
-/* Add message last in private message queue */
-#define LINK_MESSAGE_PRIVQ(p, mp) do { \
- *(p)->msg.last = (mp); \
- (p)->msg.last = &(mp)->next; \
- (p)->msg.len++; \
-} while (0)
-
+#define LINK_MESSAGE_IMPL(p, first_msg, last_msg, num_msgs, where) do { \
+ *(p)->where.last = (first_msg); \
+ (p)->where.last = (last_msg); \
+ (p)->where.len += (num_msgs); \
+ } while(0)
#ifdef ERTS_SMP
-/* Move in message queue to end of private message queue */
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \
-do { \
- if ((P)->msg_inq.first) { \
- *(P)->msg.last = (P)->msg_inq.first; \
- (P)->msg.last = (P)->msg_inq.last; \
- (P)->msg.len += (P)->msg_inq.len; \
- (P)->msg_inq.first = NULL; \
- (P)->msg_inq.last = &(P)->msg_inq.first; \
- (P)->msg_inq.len = 0; \
- } \
-} while (0)
-
-/* Add message last in message queue */
-#define LINK_MESSAGE(p, mp) do { \
- *(p)->msg_inq.last = (mp); \
- (p)->msg_inq.last = &(mp)->next; \
- (p)->msg_inq.len++; \
-} while(0)
+/* Add message last in private message queue */
+#define LINK_MESSAGE_PRIVQ(p, first_msg, last_msg, len) \
+ do { \
+ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \
+ } while (0)
+
+/* Add message last_msg in message queue */
+#define LINK_MESSAGE(p, first_msg, last_msg, len) \
+ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg_inq)
+
+#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p) \
+ do { \
+ if (p->msg_inq.first) { \
+ *p->msg.last = p->msg_inq.first; \
+ p->msg.last = p->msg_inq.last; \
+ p->msg.len += p->msg_inq.len; \
+ p->msg_inq.first = NULL; \
+ p->msg_inq.last = &p->msg_inq.first; \
+ p->msg_inq.len = 0; \
+ } \
+ } while (0)
#else
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P)
+#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p)
-/* Add message last in message queue */
-#define LINK_MESSAGE(p, mp) LINK_MESSAGE_PRIVQ((p), (mp))
+/* Add message last_msg in message queue */
+#define LINK_MESSAGE(p, first_msg, last_msg, len) \
+ do { \
+ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \
+ } while(0)
#endif
@@ -259,23 +275,30 @@ do { \
(HEAP_FRAG_P)->off_heap.overhead = 0; \
} while (0)
+#ifdef USE_VM_PROBES
+#define ERL_MESSAGE_DT_UTAG_INIT(MP) ERL_MESSAGE_DT_UTAG(MP) = NIL
+#else
+#define ERL_MESSAGE_DT_UTAG_INIT(MP) do{ } while (0)
+#endif
+
+#define ERTS_INIT_MESSAGE(MP) \
+ do { \
+ (MP)->next = NULL; \
+ ERL_MESSAGE_TERM(MP) = THE_NON_VALUE; \
+ ERL_MESSAGE_TOKEN(MP) = NIL; \
+ ERL_MESSAGE_DT_UTAG_INIT(MP); \
+ MP->data.attached = NULL; \
+ } while (0)
+
void init_message(void);
ErlHeapFragment* new_message_buffer(Uint);
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
void erts_queue_dist_message(Process*, ErtsProcLocks*, ErtsDistExternal *, Eterm);
-#ifdef USE_VM_PROBES
-void erts_queue_message_probe(Process*, ErtsProcLocks*, ErtsMessage*,
- Eterm message, Eterm seq_trace_token, Eterm dt_utag);
-#define erts_queue_message(RP,RL,BP,Msg,SEQ) \
- erts_queue_message_probe((RP),(RL),(BP),(Msg),(SEQ),NIL)
-#else
-void erts_queue_message(Process*, ErtsProcLocks*, ErtsMessage*,
- Eterm message, Eterm seq_trace_token);
-#define erts_queue_message_probe(RP,RL,BP,Msg,SEQ,TAG) \
- erts_queue_message((RP),(RL),(BP),(Msg),(SEQ))
-#endif
+Sint erts_queue_message(Process*, ErtsProcLocks*,ErtsMessage*, Eterm);
+Sint erts_queue_messages(Process*, ErtsProcLocks*,
+ ErtsMessage*, ErtsMessage**, Uint);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
@@ -354,9 +377,7 @@ ERTS_GLB_FORCE_INLINE ErtsMessage *erts_alloc_message(Uint sz, Eterm **hpp)
if (sz == 0) {
mp = erts_alloc_message_ref();
- mp->next = NULL;
- ERL_MESSAGE_TERM(mp) = NIL;
- mp->data.attached = NULL;
+ ERTS_INIT_MESSAGE(mp);
if (hpp)
*hpp = NULL;
return mp;
@@ -365,8 +386,7 @@ ERTS_GLB_FORCE_INLINE ErtsMessage *erts_alloc_message(Uint sz, Eterm **hpp)
mp = erts_alloc(ERTS_ALC_T_MSG,
sizeof(ErtsMessage) + (sz - 1)*sizeof(Eterm));
- mp->next = NULL;
- ERL_MESSAGE_TERM(mp) = NIL;
+ ERTS_INIT_MESSAGE(mp);
mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
ERTS_INIT_HEAP_FRAG(&mp->hfrag, sz, sz);
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 71e3fd8b6e..d0f305900a 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -257,7 +257,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);
- erts_queue_message(rp, &rp_locks, msgp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, msgp, msg);
if (esdp && msaccrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 61dba4ef82..f35c08450c 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -130,17 +130,7 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif)
env->fpe_was_unmasked = erts_block_fpe();
env->tmp_obj_list = NULL;
env->exception_thrown = 0;
-}
-
-static void pre_nif_noproc(ErlNifEnv* env, struct erl_module_nif* mod_nif)
-{
- env->mod_nif = mod_nif;
- env->proc = NULL;
- env->hp = NULL;
- env->hp_end = NULL;
- env->heap_frag = NULL;
- env->fpe_was_unmasked = erts_block_fpe();
- env->tmp_obj_list = NULL;
+ env->tracee = NULL;
}
/* Temporary object header, auto-deallocated when NIF returns
@@ -180,13 +170,6 @@ void erts_post_nif(ErlNifEnv* env)
free_tmp_objs(env);
}
-static void post_nif_noproc(ErlNifEnv* env)
-{
- erts_unblock_fpe(env->fpe_was_unmasked);
- free_tmp_objs(env);
-}
-
-
/* Flush out our cached heap pointers to allow an ordinary HAlloc
*/
static void flush_env(ErlNifEnv* env)
@@ -247,18 +230,20 @@ struct enif_msg_environment_t
Process phony_proc;
};
-ErlNifEnv* enif_alloc_env(void)
+static ERTS_INLINE void
+setup_nif_env(struct enif_msg_environment_t* msg_env,
+ struct erl_module_nif* mod)
{
- struct enif_msg_environment_t* msg_env =
- erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
Eterm* phony_heap = (Eterm*) msg_env; /* dummy non-NULL ptr */
-
- msg_env->env.hp = phony_heap;
+
+ msg_env->env.hp = phony_heap;
msg_env->env.hp_end = phony_heap;
msg_env->env.heap_frag = NULL;
- msg_env->env.mod_nif = NULL;
+ msg_env->env.mod_nif = mod;
msg_env->env.tmp_obj_list = NULL;
+ msg_env->env.fpe_was_unmasked = erts_block_fpe();
msg_env->env.proc = &msg_env->phony_proc;
+ msg_env->env.exception_thrown = 0;
memset(&msg_env->phony_proc, 0, sizeof(Process));
HEAP_START(&msg_env->phony_proc) = phony_heap;
HEAP_TOP(&msg_env->phony_proc) = phony_heap;
@@ -270,6 +255,13 @@ ErlNifEnv* enif_alloc_env(void)
msg_env->phony_proc.space_verified = 0;
msg_env->phony_proc.space_verified_from = NULL;
#endif
+}
+
+ErlNifEnv* enif_alloc_env(void)
+{
+ struct enif_msg_environment_t* msg_env =
+ erts_alloc_fnf(ERTS_ALC_T_NIF, sizeof(struct enif_msg_environment_t));
+ setup_nif_env(msg_env, NULL);
return &msg_env->env;
}
void enif_free_env(ErlNifEnv* env)
@@ -302,24 +294,114 @@ void enif_clear_env(ErlNifEnv* env)
menv->env.hp = menv->env.hp_end = HEAP_TOP(p);
ASSERT(!is_offheap(&MSO(p)));
+ erts_unblock_fpe(env->fpe_was_unmasked);
free_tmp_objs(env);
}
-int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
- ErlNifEnv* msg_env, ERL_NIF_TERM msg)
+
+#ifdef ERTS_SMP
+#ifdef DEBUG
+static int enif_send_delay = 0;
+#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0)
+#else
+#ifdef ERTS_PROC_LOCK_OWN_IMPL
+#define ERTS_FORCE_ENIF_SEND_DELAY() 0
+#else
+/*
+ * We always schedule messages if we do not use our own
+ * process lock implementation, as if we try to do a trylock on
+ * a lock that might already be locked by the same thread.
+ * And what happens then with different mutex implementations
+ * is not always guaranteed.
+ */
+#define ERTS_FORCE_ENIF_SEND_DELAY() 1
+#endif
+#endif
+
+int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
+{
+ ErlTraceMessageQueue *msgq, **last_msgq;
+ int reds = 0;
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ msgq = c_p->trace_msg_q;
+
+ if (!msgq)
+ goto error;
+
+ do {
+ Process* rp;
+ ErtsProcLocks rp_locks;
+ ErtsMessage *first, **last;
+ Uint len;
+
+ first = msgq->first;
+ last = msgq->last;
+ len = msgq->len;
+ msgq->first = NULL;
+ msgq->last = &msgq->first;
+ msgq->len = 0;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ ASSERT(len != 0);
+
+ rp = erts_proc_lookup(msgq->receiver);
+ if (rp) {
+ rp_locks = 0;
+ if (rp->common.id == c_p->common.id)
+ rp_locks = c_p_locks;
+ erts_queue_messages(rp, &rp_locks, first, last, len);
+ if (rp->common.id == c_p->common.id)
+ rp_locks &= ~c_p_locks;
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ reds += len;
+ } else {
+ erts_cleanup_messages(first);
+ }
+ reds += 1;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ msgq = msgq->next;
+ } while (msgq);
+
+ last_msgq = &c_p->trace_msg_q;
+
+ while (*last_msgq) {
+ msgq = *last_msgq;
+ if (msgq->len == 0) {
+ *last_msgq = msgq->next;
+ erts_free(ERTS_ALC_T_TRACE_MSG_QUEUE, msgq);
+ } else {
+ last_msgq = &msgq->next;
+ }
+ }
+
+error:
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+
+ return reds;
+}
+
+#endif
+
+int
+enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
+ ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
- ErtsProcLocks rp_locks = 0;
+ ErtsProcLocks rp_locks = 0, lc_locks = 0, c_p_locks = ERTS_PROC_LOCK_MAIN;
Process* rp;
Process* c_p;
ErtsMessage *mp;
Eterm receiver = to_pid->pid;
int flush_me = 0;
- int scheduler = erts_get_scheduler_id() != 0;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ int scheduler = esdp ? esdp->no : 0;
if (env != NULL) {
c_p = env->proc;
if (receiver == c_p->common.id) {
- rp_locks = ERTS_PROC_LOCK_MAIN;
+ rp_locks = c_p_locks;
flush_me = 1;
}
}
@@ -329,7 +411,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#else
erts_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM");
#endif
- }
+ }
rp = (scheduler
? erts_proc_lookup(receiver)
@@ -353,7 +435,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
menv->env.heap_frag = NULL;
MBUF(&menv->phony_proc) = NULL;
}
- ASSERT(!is_offheap(&MSO(&menv->phony_proc)));
} else {
Uint sz = size_object(msg);
Eterm *hp;
@@ -362,14 +443,83 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ASSERT(hp == mp->hfrag.mem+mp->hfrag.used_size);
}
- if (flush_me) {
- flush_env(env); /* Needed for ERTS_HOLE_CHECK */
+ ERL_MESSAGE_TERM(mp) = msg;
+
+ if (flush_me) {
+ flush_env(env); /* Needed for ERTS_HOLE_CHECK */
+ }
+
+ if (!env || !env->tracee) {
+ erts_queue_message(rp, &rp_locks, mp, msg);
+ }
+#ifdef ERTS_SMP
+ else {
+ /* This clause is taken when the nif is called in the context
+ of a traced process. We do not know which locks we have
+ so we have to do a try lock and if that fails we en queue
+ the message in a special trace message output queue of the
+ tracee */
+ ErlTraceMessageQueue *msgq;
+ Process *t_p = env->tracee;
+
+
+ erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
+
+ msgq = t_p->trace_msg_q;
+
+ while (msgq != NULL) {
+ if (msgq->receiver == receiver) {
+ break;
+ }
+ msgq = msgq->next;
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ lc_locks = erts_proc_lc_my_proc_locks(rp);
+ rp_locks |= lc_locks;
+ if (receiver == c_p->common.id)
+ c_p_locks |= lc_locks;
+#endif
+ if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
+ rp_locks & ERTS_PROC_LOCK_MSGQ ||
+ erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+
+ if (!msgq) {
+
+ msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
+ sizeof(ErlTraceMessageQueue));
+ msgq->receiver = receiver;
+ msgq->first = mp;
+ msgq->last = &mp->next;
+ msgq->len = 1;
+
+ /* Insert in linked list */
+ msgq->next = t_p->trace_msg_q;
+ t_p->trace_msg_q = msgq;
+
+ erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+
+ erts_schedule_flush_trace_messages(t_p->common.id);
+
+ } else {
+ msgq->len++;
+ *msgq->last = mp;
+ msgq->last = &mp->next;
+ erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ }
+ } else {
+ erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ rp_locks &= ~ERTS_PROC_LOCK_TRACE;
+ rp_locks |= ERTS_PROC_LOCK_MSGQ;
+ erts_queue_message(rp, &rp_locks, mp, msg);
+ }
}
- erts_queue_message(rp, &rp_locks, mp, msg, am_undefined);
+#endif
+
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ if (rp_locks & ~lc_locks)
+ erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
if (flush_me) {
@@ -1485,10 +1635,10 @@ static void close_lib(struct erl_module_nif* lib)
ASSERT(erts_refc_read(&lib->rt_dtor_cnt,0) == 0);
if (lib->entry != NULL && lib->entry->unload != NULL) {
- ErlNifEnv env;
- pre_nif_noproc(&env, lib);
- lib->entry->unload(&env, lib->priv_data);
- post_nif_noproc(&env);
+ struct enif_msg_environment_t msg_env;
+ setup_nif_env(&msg_env, lib);
+ lib->entry->unload(&msg_env.env, lib->priv_data);
+ enif_clear_env(&msg_env.env);
}
if (!erts_is_static_nif(lib->handle))
erts_sys_ddll_close(lib->handle);
@@ -1634,10 +1784,10 @@ static void nif_resource_dtor(Binary* bin)
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == &nif_resource_dtor);
if (type->dtor != NULL) {
- ErlNifEnv env;
- pre_nif_noproc(&env, type->owner);
- type->dtor(&env,resource->data);
- post_nif_noproc(&env);
+ struct enif_msg_environment_t msg_env;
+ setup_nif_env(&msg_env, type->owner);
+ type->dtor(&msg_env.env, resource->data);
+ enif_clear_env(&msg_env.env);
}
if (erts_refc_dectest(&type->refc, 0) == 0) {
ASSERT(type->next == NULL);
@@ -2916,6 +3066,9 @@ erts_unload_nif(struct erl_module_nif* lib)
ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
+
+ erts_tracer_nif_clear();
+
for (rt = resource_type_list.next;
rt != &resource_type_list;
rt = next) {
@@ -2958,6 +3111,7 @@ void erl_nif_init()
resource_type_list.owner = NULL;
resource_type_list.module = THE_NON_VALUE;
resource_type_list.name = THE_NON_VALUE;
+
}
int erts_nif_get_funcs(struct erl_module_nif* mod,
@@ -2967,7 +3121,8 @@ int erts_nif_get_funcs(struct erl_module_nif* mod,
return mod->entry->num_of_funcs;
}
-Eterm erts_nif_call_function(Process *p, struct erl_module_nif* mod,
+Eterm erts_nif_call_function(Process *p, Process *tracee,
+ struct erl_module_nif* mod,
ErlNifFunc *fun, int argc, Eterm *argv)
{
Eterm nif_result;
@@ -2979,13 +3134,14 @@ Eterm erts_nif_call_function(Process *p, struct erl_module_nif* mod,
break;
ASSERT(i < mod->entry->num_of_funcs);
if (p)
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
+ || erts_smp_thr_progress_is_blocking());
#endif
if (p) {
struct enif_environment_t env;
ASSERT(is_internal_pid(p->common.id));
erts_pre_nif(&env, p, mod);
- env.may_delay_enif_send = 1;
+ env.tracee = tracee;
nif_result = (*fun->fptr)(&env, argc, argv);
if (env.exception_thrown)
nif_result = THE_NON_VALUE;
@@ -2995,7 +3151,7 @@ Eterm erts_nif_call_function(Process *p, struct erl_module_nif* mod,
so we create a phony one. */
struct enif_msg_environment_t msg_env;
setup_nif_env(&msg_env, mod);
- msg_env.env.may_delay_enif_send = 1;
+ msg_env.env.tracee = tracee;
nif_result = (*fun->fptr)(&msg_env.env, argc, argv);
if (msg_env.env.exception_thrown)
nif_result = THE_NON_VALUE;
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 33785ecaca..3964f7f679 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -87,10 +87,10 @@ typedef ErlNifSInt64 ErlNifTime;
#define ERL_NIF_TIME_ERROR ((ErlNifSInt64) ERTS_NAPI_TIME_ERROR__)
typedef enum {
- ERL_NIF_SEC = ERTS_NAPI_SEC__,
- ERL_NIF_MSEC = ERTS_NAPI_MSEC__,
- ERL_NIF_USEC = ERTS_NAPI_USEC__,
- ERL_NIF_NSEC = ERTS_NAPI_NSEC__
+ ERL_NIF_SEC = ERTS_NAPI_SEC__,
+ ERL_NIF_MSEC = ERTS_NAPI_MSEC__,
+ ERL_NIF_USEC = ERTS_NAPI_USEC__,
+ ERL_NIF_NSEC = ERTS_NAPI_NSEC__
} ErlNifTimeUnit;
struct enif_environment_t;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index d222e79f56..e7d0d127e2 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -444,7 +444,8 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
typedef enum {
ERTS_PSTT_GC, /* Garbage Collect */
ERTS_PSTT_CPC, /* Check Process Code */
- ERTS_PSTT_COHMQ /* Change off heap message queue */
+ ERTS_PSTT_COHMQ, /* Change off heap message queue */
+ ERTS_PSTT_FTMQ /* Flush trace msg queue */
} ErtsProcSysTaskType;
#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
@@ -1138,7 +1139,7 @@ reply_sched_wall_time(void *vswtrp)
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (swtrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -1217,7 +1218,7 @@ reply_system_check(void *vscrp)
hpp = &hp;
msg = STORE_NC(hpp, ohp, scrp->ref);
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (scrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -9315,6 +9316,7 @@ Process *schedule(Process *p, int calls)
esdp = erts_scheduler_data;
ASSERT(esdp->current_process == p);
#endif
+
reds = actual_reds = calls - esdp->virtual_reds;
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
@@ -9327,27 +9329,39 @@ Process *schedule(Process *p, int calls)
p->reds += actual_reds;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+#ifdef ERTS_SMP
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_TRACE);
+ if (p->trace_msg_q) {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_TRACE);
+ erts_schedule_flush_trace_messages(p->common.id);
+ } else
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_TRACE);
+#endif
- state = erts_smp_atomic32_read_acqb(&p->state);
+ state = erts_smp_atomic32_read_nob(&p->state);
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- if (state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) {
+ if ((state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, ((state & ERTS_PSFLG_FREE)
- ? am_out_exited
- : am_out_exiting));
+ trace_sched(p, ERTS_PROC_LOCK_MAIN,
+ ((state & ERTS_PSFLG_FREE)
+ ? am_out_exited
+ : am_out_exiting));
}
else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
- trace_sched(p, am_out);
- else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(p, am_out);
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_out);
}
}
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
+ /* have to re-read state after taking lock */
+ state = erts_smp_atomic32_read_nob(&p->state);
+
#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
@@ -9844,27 +9858,27 @@ Process *schedule(Process *p, int calls)
p->fcalls = reds;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
if (IS_TRACED(p)) {
if (state & ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, am_in_exiting);
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
}
else {
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
- trace_sched(p, am_in);
- else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(p, am_in);
+ if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED) ||
+ ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
+ trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in);
}
if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
}
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#ifdef ERTS_SMP
- if (is_not_nil(ERTS_TRACER_PROC(p)))
- erts_check_my_tracer_proc(p);
+ /* Clears tracer if it has been removed */
+ (void)ERTS_TRACER_PROC_IS_ENABLED(p);
#endif
if (state & ERTS_PSFLG_RUNNING_SYS) {
@@ -9996,7 +10010,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
ASSERT(hp_start + hsz == hp);
#endif
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -10164,8 +10178,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
{
int garbage_collected = 0;
erts_aint32_t state = *statep;
- int max_reds = in_reds;
- int reds = 0;
+ int reds = in_reds;
int qmask = 0;
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
@@ -10193,12 +10206,12 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
if (c_p->flags & F_DISABLE_GC) {
save_gc_task(c_p, st, st_prio);
st = NULL;
- reds++;
+ reds--;
}
else {
if (!garbage_collected) {
FLAGS(c_p) |= F_NEED_FULLSWEEP;
- reds += erts_garbage_collect_nobump(c_p,
+ reds -= erts_garbage_collect_nobump(c_p,
0,
c_p->arg_reg,
c_p->arity);
@@ -10207,21 +10220,30 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
st_res = am_true;
}
break;
- case ERTS_PSTT_CPC:
+ case ERTS_PSTT_CPC: {
+ int cpc_reds = 0;
st_res = erts_check_process_code(c_p,
st->arg[0],
unsigned_val(st->arg[1]),
- &reds);
+ &cpc_reds);
+ reds -= cpc_reds;
if (is_non_value(st_res)) {
/* Needed gc, but gc was disabled */
save_gc_task(c_p, st, st_prio);
st = NULL;
}
break;
+ }
case ERTS_PSTT_COHMQ:
- reds += erts_complete_off_heap_message_queue_change(c_p);
+ reds -= erts_complete_off_heap_message_queue_change(c_p);
st_res = am_true;
break;
+#ifdef ERTS_SMP
+ case ERTS_PSTT_FTMQ:
+ reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
+ st_res = am_true;
+ break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -10231,11 +10253,11 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
reds += notify_sys_task_executed(c_p, st, st_res);
state = erts_smp_atomic32_read_acqb(&c_p->state);
- } while (qmask && reds < max_reds);
+ } while (qmask && reds > 0);
*statep = state;
- return reds;
+ return in_reds - reds;
}
static int
@@ -10267,6 +10289,12 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
case ERTS_PSTT_COHMQ:
st_res = am_false;
break;
+#ifdef ERTS_SMP
+ case ERTS_PSTT_FTMQ:
+ reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
+ st_res = am_true;
+ break;
+#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -10402,8 +10430,8 @@ badarg:
BIF_ERROR(BIF_P, BADARG);
}
-void
-erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
+static void
+erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
{
Process *rp = erts_proc_lookup(pid);
if (rp) {
@@ -10413,7 +10441,7 @@ erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
ERTS_PROC_SYS_TASK_SIZE(0));
- st->type = ERTS_PSTT_COHMQ;
+ st->type = type;
st->requester = NIL;
st->reply_tag = NIL;
st->req_id = NIL;
@@ -10428,6 +10456,18 @@ erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
}
}
+void
+erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
+{
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ);
+}
+
+void
+erts_schedule_flush_trace_messages(Eterm pid)
+{
+ erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ);
+}
+
static void
save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
{
@@ -10886,6 +10926,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm res = THE_NON_VALUE;
erts_aint32_t state = 0;
erts_aint32_t prio = (erts_aint32_t) PRIORITY_NORMAL;
+ ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL;
#ifdef SHCOPY_SPAWN
erts_shcopy_t info;
INITIALIZE_SHCOPY(info);
@@ -11062,7 +11103,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
: STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
- erts_get_default_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER_PROC(p));
+ erts_get_default_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER(p));
p->msg.first = NULL;
p->msg.last = &p->msg.first;
@@ -11098,21 +11139,47 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->last_old_htop = NULL;
#endif
+#ifdef ERTS_SMP
+ p->trace_msg_q = NULL;
+ p->scheduler_data = NULL;
+ p->suspendee = NIL;
+ p->pending_suspenders = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
+#endif
+
+#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
+ p->fp_exception = 0;
+#endif
+
if (IS_TRACED(parent)) {
if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) {
ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
- }
- if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
- trace_proc_spawn(parent, p->common.id, mod, func, args);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
}
- if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
/* Overrides TRACE_CHILDREN */
ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
}
+ if (so->flags & SPO_LINK && ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
+ erts_tracer_replace(&p->common, ERTS_TRACER(parent));
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ }
+ }
+ if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
+ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ trace_proc_spawn(parent, p->common.id, mod, func, args);
+ if (so->flags & SPO_LINK)
+ trace_proc(parent, locks, parent, am_link, p->common.id);
+ }
}
/*
@@ -11123,10 +11190,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef DEBUG
int ret;
#endif
- if (IS_TRACED_FL(parent, F_TRACE_PROCS)) {
- trace_proc(parent, parent, am_link, p->common.id);
- }
-
#ifdef DEBUG
ret = erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
ASSERT(ret == 0);
@@ -11137,17 +11200,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
#endif
- if (IS_TRACED(parent)) {
- if (ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
- ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
- ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); /*maybe steal*/
-
- if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
- ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- }
- }
- }
}
/*
@@ -11162,19 +11214,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
so->mref = mref;
}
-#ifdef ERTS_SMP
- p->scheduler_data = NULL;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
- p->pending_exit.reason = THE_NON_VALUE;
- p->pending_exit.bp = NULL;
-#endif
-
-#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
- p->fp_exception = 0;
-#endif
-
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ erts_smp_proc_unlock(p, locks);
res = p->common.id;
@@ -11198,7 +11238,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
error:
- erts_smp_proc_unlock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR);
return res;
}
@@ -11223,7 +11263,7 @@ void erts_init_empty_process(Process *p)
p->rcount = 0;
p->common.id = ERTS_INVALID_PID;
p->reds = 0;
- ERTS_TRACER_PROC(p) = NIL;
+ ERTS_TRACER(p) = erts_tracer_nil;
ERTS_TRACE_FLAGS(p) = F_INITIAL_TRACE_FLAGS;
p->group_leader = ERTS_INVALID_PID;
p->flags = 0;
@@ -11343,7 +11383,7 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->live_hf_end == ERTS_INVALID_HFRAG_PTR);
ASSERT(p->heap == NULL);
ASSERT(p->common.id == ERTS_INVALID_PID);
- ASSERT(ERTS_TRACER_PROC(p) == NIL);
+ ASSERT(ERTS_TRACER_IS_NIL(ERTS_TRACER(p)));
ASSERT(ERTS_TRACE_FLAGS(p) == F_INITIAL_TRACE_FLAGS);
ASSERT(p->group_leader == ERTS_INVALID_PID);
ASSERT(p->next == NULL);
@@ -11690,7 +11730,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
mess = copy_struct(exit_term, term_size, &hp, ohp);
#endif
- erts_queue_message(to, to_locksp, mp, mess, NIL);
+ erts_queue_message(to, to_locksp, mp, mess);
} else {
Eterm temp_token;
Uint sz_token;
@@ -11708,9 +11748,10 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
mess = copy_struct(exit_term, term_size, &hp, ohp);
#endif
/* the trace token must in this case be updated by the caller */
- seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL);
+ seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, to);
temp_token = copy_struct(token, sz_token, &hp, ohp);
- erts_queue_message(to, to_locksp, mp, mess, temp_token);
+ ERL_MESSAGE_TOKEN(mp) = temp_token;
+ erts_queue_message(to, to_locksp, mp, mess);
}
}
@@ -11819,6 +11860,9 @@ send_exit_signal(Process *c_p, /* current process if and only
if ((state & ERTS_PSFLG_TRAP_EXIT)
&& (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
+ /* have to release the status lock in order to send the exit message */
+ erts_smp_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND);
+ *rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
if (have_seqtrace(token) && token_update)
seq_trace_update_send(token_update);
if (is_value(exit_tuple))
@@ -12116,6 +12160,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
DistEntry *dep;
Process *rp;
+
switch(lnk->type) {
case LINK_PID:
if(is_internal_port(item)) {
@@ -12163,7 +12208,11 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- trace_proc(p, rp, am_getting_unlinked, p->common.id);
+ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
+ rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
+ }
+ trace_proc(NULL, 0, rp, am_getting_unlinked, p->common.id);
}
}
}
@@ -12280,8 +12329,6 @@ erts_do_exit_process(Process* p, Eterm reason)
if (IS_TRACED_FL(p, F_TRACE_CALLS))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
- if (IS_TRACED_FL(p,F_TRACE_PROCS))
- trace_proc(p, p, am_exit, reason);
}
erts_trace_check_exiting(p->common.id);
@@ -12297,6 +12344,10 @@ erts_do_exit_process(Process* p, Eterm reason)
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason);
+
+
/*
* p->u.initial of this process can *not* be used anymore;
* will be overwritten by misc termination data.
@@ -12436,6 +12487,9 @@ erts_continue_exit_process(Process *p)
ASSERT(!p->common.u.alive.reg);
}
+ if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT))
+ trace_sched(p, curr_locks, am_out_exited);
+
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
curr_locks = ERTS_PROC_LOCKS_ALL;
@@ -12559,6 +12613,12 @@ erts_continue_exit_process(Process *p)
if (nif_export)
erts_destroy_nif_export(nif_export);
+#ifdef ERTS_SMP
+ erts_flush_trace_messages(p, 0);
+#endif
+
+ ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
+
delete_process(p);
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index c2303eea49..8261ae28f8 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1041,6 +1041,7 @@ struct process {
#ifdef ERTS_SMP
ErlMessageInQueue msg_inq;
+ ErlTraceMessageQueue *trace_msg_q;
ErtsPendExit pending_exit;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
@@ -1358,7 +1359,6 @@ extern int erts_system_profile_ts_type;
#define F_TRACE_FLAG(N) (1 << (ERTS_TRACE_TS_TYPE_BITS + (N)))
/* process trace_flags */
-
#define F_NOW_TS (ERTS_TRACE_FLG_NOW_TIMESTAMP \
<< ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
#define F_STRICT_MON_TS (ERTS_TRACE_FLG_STRICT_MONOTONIC_TIMESTAMP \
@@ -1380,8 +1380,7 @@ extern int erts_system_profile_ts_type;
#define F_TRACE_ARITY_ONLY F_TRACE_FLAG(12)
#define F_TRACE_RETURN_TO F_TRACE_FLAG(13) /* Return_to trace when breakpoint tracing */
#define F_TRACE_SILENT F_TRACE_FLAG(14) /* No call trace msg suppress */
-#define F_TRACER F_TRACE_FLAG(15) /* May be (has been) tracer */
-#define F_EXCEPTION_TRACE F_TRACE_FLAG(16) /* May have exception trace on stack */
+#define F_EXCEPTION_TRACE F_TRACE_FLAG(15) /* May have exception trace on stack */
/* port trace flags, currently the same as process trace flags */
#define F_TRACE_SCHED_PORTS F_TRACE_FLAG(17) /* Trace of port scheduling */
@@ -1717,6 +1716,8 @@ void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *),
ErtsThrPrgrLaterOp *,
UWord);
void erts_schedule_complete_off_heap_message_queue_change(Eterm pid);
+void erts_schedule_flush_trace_messages(Eterm pid);
+int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_dbg_check_halloc_lock(Process *p);
@@ -1996,7 +1997,6 @@ erts_psd_set(Process *p, int ix, void *data)
#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, NTE) \
erts_psd_set((P), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
-
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, Eterm handler);
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index e1f470d381..a69185bc5c 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -106,6 +106,7 @@ static struct {
Sint16 proc_lock_msgq;
Sint16 proc_lock_btm;
Sint16 proc_lock_status;
+ Sint16 proc_lock_trace;
} lc_id;
#endif
@@ -152,6 +153,7 @@ erts_init_proc_lock(int cpus)
lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
lc_id.proc_lock_btm = erts_lc_get_lock_order_id("proc_btm");
lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
+ lc_id.proc_lock_trace = erts_lc_get_lock_order_id("proc_trace");
#endif
}
@@ -1057,6 +1059,11 @@ erts_proc_lock_init(Process *p)
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
#endif
+ erts_mtx_init_x(&p->lock.trace, "proc_trace", p->common.id, do_lock_count);
+ ethr_mutex_lock(&p->lock.trace.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.trace.lc);
+#endif
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
@@ -1078,6 +1085,7 @@ erts_proc_lock_fin(Process *p)
erts_mtx_destroy(&p->lock.msgq);
erts_mtx_destroy(&p->lock.btm);
erts_mtx_destroy(&p->lock.status);
+ erts_mtx_destroy(&p->lock.trace);
#endif
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
erts_lcnt_proc_lock_destroy(p);
@@ -1100,26 +1108,29 @@ void erts_lcnt_enable_proc_lock_count(int enable) {
void erts_lcnt_proc_lock_init(Process *p) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) {
erts_lcnt_init_lock_empty(&(p->lock.lcnt_main));
+ erts_lcnt_init_lock_empty(&(p->lock.lcnt_link));
erts_lcnt_init_lock_empty(&(p->lock.lcnt_msgq));
erts_lcnt_init_lock_empty(&(p->lock.lcnt_btm));
- erts_lcnt_init_lock_empty(&(p->lock.lcnt_link));
erts_lcnt_init_lock_empty(&(p->lock.lcnt_status));
+ erts_lcnt_init_lock_empty(&(p->lock.lcnt_trace));
} else { /* now the common case */
Eterm pid = (p->common.id != ERTS_INVALID_PID) ? p->common.id : NIL;
erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, pid);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid);
erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, pid);
erts_lcnt_init_lock_x(&(p->lock.lcnt_btm), "proc_btm", ERTS_LCNT_LT_PROCLOCK, pid);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, pid);
erts_lcnt_init_lock_x(&(p->lock.lcnt_status),"proc_status",ERTS_LCNT_LT_PROCLOCK, pid);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_trace), "proc_trace", ERTS_LCNT_LT_PROCLOCK, pid);
} /* the lock names should really be aligned to four characters */
} /* logic reversed */
void erts_lcnt_proc_lock_destroy(Process *p) {
erts_lcnt_destroy_lock(&(p->lock.lcnt_main));
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
erts_lcnt_destroy_lock(&(p->lock.lcnt_msgq));
erts_lcnt_destroy_lock(&(p->lock.lcnt_btm));
- erts_lcnt_destroy_lock(&(p->lock.lcnt_link));
erts_lcnt_destroy_lock(&(p->lock.lcnt_status));
+ erts_lcnt_destroy_lock(&(p->lock.lcnt_trace));
}
static void lcnt_enable_proc_lock_count(Process *proc, int enable) {
@@ -1138,10 +1149,11 @@ static void lcnt_enable_proc_lock_count(Process *proc, int enable) {
void erts_lcnt_proc_lock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock(&(lock->lcnt_main)); }
+ if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock(&(lock->lcnt_msgq)); }
if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock(&(lock->lcnt_status)); }
+ if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock(&(lock->lcnt_trace)); }
}
void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
@@ -1150,44 +1162,50 @@ void erts_lcnt_proc_lock_post_x(erts_proc_lock_t *lock, ErtsProcLocks locks,
if (locks & ERTS_PROC_LOCK_MAIN) {
erts_lcnt_lock_post_x(&(lock->lcnt_main), file, line);
}
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
+ }
if (locks & ERTS_PROC_LOCK_MSGQ) {
erts_lcnt_lock_post_x(&(lock->lcnt_msgq), file, line);
}
if (locks & ERTS_PROC_LOCK_BTM) {
erts_lcnt_lock_post_x(&(lock->lcnt_btm), file, line);
}
- if (locks & ERTS_PROC_LOCK_LINK) {
- erts_lcnt_lock_post_x(&(lock->lcnt_link), file, line);
- }
if (locks & ERTS_PROC_LOCK_STATUS) {
erts_lcnt_lock_post_x(&(lock->lcnt_status), file, line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ erts_lcnt_lock_post_x(&(lock->lcnt_trace), file, line);
+ }
}
void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_lock_unaquire(&(lock->lcnt_main)); }
+ if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_lock_unaquire(&(lock->lcnt_msgq)); }
if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_lock_unaquire(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_lock_unaquire(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_lock_unaquire(&(lock->lcnt_status)); }
+ if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_lock_unaquire(&(lock->lcnt_trace)); }
}
void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_unlock(&(lock->lcnt_main)); }
+ if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_unlock(&(lock->lcnt_msgq)); }
if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_unlock(&(lock->lcnt_btm)); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_unlock(&(lock->lcnt_link)); }
if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_unlock(&(lock->lcnt_status)); }
+ if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_unlock(&(lock->lcnt_trace)); }
}
void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res) {
if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK)) return;
if (locks & ERTS_PROC_LOCK_MAIN) { erts_lcnt_trylock(&(lock->lcnt_main), res); }
+ if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); }
if (locks & ERTS_PROC_LOCK_MSGQ) { erts_lcnt_trylock(&(lock->lcnt_msgq), res); }
if (locks & ERTS_PROC_LOCK_BTM) { erts_lcnt_trylock(&(lock->lcnt_btm), res); }
- if (locks & ERTS_PROC_LOCK_LINK) { erts_lcnt_trylock(&(lock->lcnt_link), res); }
if (locks & ERTS_PROC_LOCK_STATUS) { erts_lcnt_trylock(&(lock->lcnt_status), res); }
+ if (locks & ERTS_PROC_LOCK_TRACE) { erts_lcnt_trylock(&(lock->lcnt_trace), res); }
} /* reversed logic */
#endif /* ERTS_ENABLE_LOCK_COUNT */
@@ -1224,6 +1242,10 @@ erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line
lck.id = lc_id.proc_lock_status;
erts_lc_lock_x(&lck,file,line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_lock_x(&lck,file,line);
+ }
}
void
@@ -1253,6 +1275,10 @@ erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked,
lck.id = lc_id.proc_lock_status;
erts_lc_trylock_x(locked, &lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_trylock_x(locked, &lck, file, line);
+ }
}
void
@@ -1261,6 +1287,10 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_unlock(&lck);
@@ -1292,6 +1322,10 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_might_unlock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_might_unlock(&lck);
@@ -1323,6 +1357,8 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
erts_lc_might_unlock(&p->lock.btm.lc);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_might_unlock(&p->lock.status.lc);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_might_unlock(&p->lock.trace.lc);
#endif
}
@@ -1354,6 +1390,10 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
lck.id = lc_id.proc_lock_status;
erts_lc_require_lock(&lck, file, line);
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_require_lock(&lck, file, line);
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
if (locks & ERTS_PROC_LOCK_MAIN)
erts_lc_require_lock(&p->lock.main.lc, file, line);
@@ -1365,6 +1405,8 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char *file,
erts_lc_require_lock(&p->lock.btm.lc, file, line);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_require_lock(&p->lock.status.lc, file, line);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_require_lock(&p->lock.trace.lc, file, line);
#endif
}
@@ -1375,6 +1417,10 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ lck.id = lc_id.proc_lock_trace;
+ erts_lc_unrequire_lock(&lck);
+ }
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
erts_lc_unrequire_lock(&lck);
@@ -1406,6 +1452,8 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
erts_lc_unrequire_lock(&p->lock.btm.lc);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_lc_unrequire_lock(&p->lock.status.lc);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_lc_unrequire_lock(&p->lock.trace.lc);
#endif
}
@@ -1429,6 +1477,8 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_btm;
else if (locks & ERTS_PROC_LOCK_STATUS)
lck.id = lc_id.proc_lock_status;
+ else if (locks & ERTS_PROC_LOCK_TRACE)
+ lck.id = lc_id.proc_lock_trace;
else
erts_lc_fail("Unknown proc lock found");
@@ -1441,14 +1491,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
void erts_proc_lc_chk_only_proc_main(Process *p)
{
-#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->common.id,
- ERTS_LC_FLG_LT_PROCLOCK);
- erts_lc_check_exact(&proc_main, 1);
-#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_check_exact(&p->lock.main.lc, 1);
-#endif
+ erts_proc_lc_chk_only_proc(p, ERTS_PROC_LOCK_MAIN);
}
#if ERTS_PROC_LOCK_OWN_IMPL
@@ -1456,14 +1499,67 @@ void erts_proc_lc_chk_only_proc_main(Process *p)
ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
+void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks)
+{
+ int have_locks_len = 0;
+#if ERTS_PROC_LOCK_OWN_IMPL
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT};
+ if (locks & ERTS_PROC_LOCK_MAIN) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_main;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_LINK) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_link;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_MSGQ) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_BTM) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_btm;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_STATUS) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_status;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_lc_lock_t have_locks[6];
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ have_locks[have_locks_len++] = p->lock.main.lc;
+ if (locks & ERTS_PROC_LOCK_LINK)
+ have_locks[have_locks_len++] = p->lock.link.lc;
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ have_locks[have_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_BTM)
+ have_locks[have_locks_len++] = p->lock.btm.lc;
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ have_locks[have_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
+#endif
+ erts_lc_check_exact(have_locks, have_locks_len);
+}
+
void
erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
{
int have_locks_len = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t have_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
if (locks & ERTS_PROC_LOCK_MAIN) {
@@ -1486,8 +1582,12 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len].id = lc_id.proc_lock_status;
have_locks[have_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t have_locks[5];
+ erts_lc_lock_t have_locks[6];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
if (locks & ERTS_PROC_LOCK_LINK)
@@ -1498,6 +1598,8 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len++] = p->lock.btm.lc;
if (locks & ERTS_PROC_LOCK_STATUS)
have_locks[have_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
#endif
erts_lc_check(have_locks, have_locks_len, NULL, 0);
}
@@ -1508,12 +1610,14 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
int have_locks_len = 0;
int have_not_locks_len = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t have_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
- erts_lc_lock_t have_not_locks[5] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ erts_lc_lock_t have_not_locks[6] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
+ ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
@@ -1557,9 +1661,17 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
have_not_locks[have_not_locks_len++].extra = p->common.id;
}
+ if (locks & ERTS_PROC_LOCK_TRACE) {
+ have_locks[have_locks_len].id = lc_id.proc_lock_trace;
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+ else {
+ have_not_locks[have_not_locks_len].id = lc_id.proc_lock_trace;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
+ }
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t have_locks[5];
- erts_lc_lock_t have_not_locks[5];
+ erts_lc_lock_t have_locks[6];
+ erts_lc_lock_t have_not_locks[6];
if (locks & ERTS_PROC_LOCK_MAIN)
have_locks[have_locks_len++] = p->lock.main.lc;
@@ -1581,6 +1693,10 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
have_locks[have_locks_len++] = p->lock.status.lc;
else
have_not_locks[have_not_locks_len++] = p->lock.status.lc;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ have_locks[have_locks_len++] = p->lock.trace.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.trace.lc;
#endif
erts_lc_check(have_locks, have_locks_len,
@@ -1590,10 +1706,10 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
ErtsProcLocks
erts_proc_lc_my_proc_locks(Process *p)
{
- int resv[5];
+ int resv[6];
ErtsProcLocks res = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
- erts_lc_lock_t locks[5] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
+ erts_lc_lock_t locks[6] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
@@ -1607,16 +1723,20 @@ erts_proc_lc_my_proc_locks(Process *p)
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
p->common.id,
+ ERTS_LC_FLG_LT_PROCLOCK),
+ ERTS_LC_LOCK_INIT(lc_id.proc_lock_trace,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_lc_lock_t locks[5] = {p->lock.main.lc,
+ erts_lc_lock_t locks[6] = {p->lock.main.lc,
p->lock.link.lc,
p->lock.msgq.lc,
p->lock.btm.lc,
- p->lock.status.lc};
+ p->lock.status.lc,
+ p->lock.trace.lc};
#endif
- erts_lc_have_locks(resv, locks, 5);
+ erts_lc_have_locks(resv, locks, 6);
if (resv[0])
res |= ERTS_PROC_LOCK_MAIN;
if (resv[1])
@@ -1627,6 +1747,8 @@ erts_proc_lc_my_proc_locks(Process *p)
res |= ERTS_PROC_LOCK_BTM;
if (resv[4])
res |= ERTS_PROC_LOCK_STATUS;
+ if (resv[5])
+ res |= ERTS_PROC_LOCK_TRACE;
return res;
}
@@ -1634,14 +1756,15 @@ erts_proc_lc_my_proc_locks(Process *p)
void
erts_proc_lc_chk_no_proc_locks(char *file, int line)
{
- int resv[5];
- int ids[5] = {lc_id.proc_lock_main,
+ int resv[6];
+ int ids[6] = {lc_id.proc_lock_main,
lc_id.proc_lock_link,
lc_id.proc_lock_msgq,
lc_id.proc_lock_btm,
- lc_id.proc_lock_status};
- erts_lc_have_lock_ids(resv, ids, 5);
- if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4])) {
+ lc_id.proc_lock_status,
+ lc_id.proc_lock_trace};
+ erts_lc_have_lock_ids(resv, ids, 6);
+ if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3] || resv[4] || resv[5])) {
erts_lc_fail("%s:%d: Thread has process locks locked when expected "
"not to have any process locks locked",
file, line);
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index ac324c6f3e..d81ef63af2 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -66,7 +66,7 @@
#endif
-#define ERTS_PROC_LOCK_MAX_BIT 4
+#define ERTS_PROC_LOCK_MAX_BIT 5
typedef erts_aint32_t ErtsProcLocks;
@@ -84,6 +84,7 @@ typedef struct erts_proc_lock_t_ {
erts_lcnt_lock_t lcnt_msgq;
erts_lcnt_lock_t lcnt_btm;
erts_lcnt_lock_t lcnt_status;
+ erts_lcnt_lock_t lcnt_trace;
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_mtx_t main;
@@ -91,6 +92,7 @@ typedef struct erts_proc_lock_t_ {
erts_mtx_t msgq;
erts_mtx_t btm;
erts_mtx_t status;
+ erts_mtx_t trace;
#else
# error "no implementation"
#endif
@@ -137,9 +139,18 @@ typedef struct erts_proc_lock_t_ {
* Protects the following fields in the process structure:
* * pending_suspenders
* * suspendee
+ * * sys_tasks
* * ...
*/
-#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT)
+#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << 4)
+
+/*
+ * Trace message lock:
+ * Protects the order in which messages are sent
+ * from trace nifs. This lock is taken inside enif_send.
+ *
+ */
+#define ERTS_PROC_LOCK_TRACE (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT)
/*
* Special fields:
@@ -154,8 +165,8 @@ typedef struct erts_proc_lock_t_ {
* all process locks are held, and are allowed to be read if
* at least one process lock (whichever one doesn't matter)
* is held:
- * * tracer_proc
- * * tracer_flags
+ * * common.tracer
+ * * common.trace_flags
*
* The following fields are only allowed to be accessed if
* both the schedule queue lock and at least one process lock
@@ -208,7 +219,7 @@ typedef struct erts_proc_lock_t_ {
((((ErtsProcLocks) 1) << (ERTS_PROC_LOCK_MAX_BIT + 1)) - 1)
#define ERTS_PROC_LOCKS_ALL_MINOR (ERTS_PROC_LOCKS_ALL \
- & ~ERTS_PROC_LOCK_MAIN)
+ & ~ERTS_PROC_LOCK_MAIN)
#define ERTS_PIX_LOCKS_BITS 10
@@ -260,6 +271,7 @@ void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_only_proc_main(Process *p);
+void erts_proc_lc_chk_only_proc(Process *p, ErtsProcLocks locks);
void erts_proc_lc_chk_no_proc_locks(char *file, int line);
ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p);
int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks);
@@ -477,9 +489,15 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCK_STATUS)
if (erts_mtx_trylock(&p->lock.status) == EBUSY)
goto busy_status;
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ if (erts_mtx_trylock(&p->lock.trace) == EBUSY)
+ goto busy_trace;
return 0;
+busy_trace:
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_unlock(&p->lock.trace);
busy_status:
if (locks & ERTS_PROC_LOCK_BTM)
erts_mtx_unlock(&p->lock.btm);
@@ -568,6 +586,8 @@ erts_smp_proc_lock__(Process *p,
erts_mtx_lock(&p->lock.btm);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_mtx_lock(&p->lock.status);
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_lock(&p->lock.trace);
#ifdef ERTS_PROC_LOCK_DEBUG
erts_proc_lock_op_debug(p, locks, 1);
@@ -653,6 +673,8 @@ erts_smp_proc_unlock__(Process *p,
erts_proc_lock_op_debug(p, locks, 0);
#endif
+ if (locks & ERTS_PROC_LOCK_TRACE)
+ erts_mtx_unlock(&p->lock.trace);
if (locks & ERTS_PROC_LOCK_STATUS)
erts_mtx_unlock(&p->lock.status);
if (locks & ERTS_PROC_LOCK_BTM)
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index 97df8bc3fc..a5931ffc25 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -37,14 +37,16 @@
#include "erl_alloc.h"
#include "erl_monitors.h"
-#define ERTS_TRACER_PROC(P) ((P)->common.tracer_proc)
+#define ERTS_TRACER(P) ((P)->common.tracer)
+#define ERTS_TRACER_MODULE(T) (CAR(list_val(T)))
+#define ERTS_TRACER_STATE(T) (CDR(list_val(T)))
#define ERTS_TRACE_FLAGS(P) ((P)->common.trace_flags)
#define ERTS_P_LINKS(P) ((P)->common.u.alive.links)
#define ERTS_P_MONITORS(P) ((P)->common.u.alive.monitors)
#define IS_TRACED(p) \
- (ERTS_TRACER_PROC((p)) != NIL)
+ (ERTS_TRACER(p) != NIL)
#define ARE_TRACE_FLAGS_ON(p,tf) \
((ERTS_TRACE_FLAGS((p)) & (tf|F_SENSITIVE)) == (tf))
#define IS_TRACED_FL(p,tf) \
@@ -56,7 +58,7 @@ typedef struct {
erts_atomic_t atmc;
Sint sint;
} refc;
- Eterm tracer_proc;
+ ErtsTracer tracer;
Uint trace_flags;
erts_smp_atomic_t timer;
union {
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 947def7398..346404fe2a 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1966,7 +1966,7 @@ send_time_offset_changed_notifications(void *new_offsetp)
*patch_refp = ref;
ASSERT(hsz == size_object(message_template));
message = copy_struct(message_template, hsz, &hp, ohp);
- erts_queue_message(rp, &rp_locks, mp, message, NIL);
+ erts_queue_message(rp, &rp_locks, mp, message);
}
erts_smp_proc_unlock(rp, rp_locks);
}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 1654ea58d9..1e6ae8c82e 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -39,6 +39,7 @@
#include "erl_bits.h"
#include "erl_thr_progress.h"
#include "erl_bif_unique.h"
+#include "erl_map.h"
#if 0
#define DEBUG_PRINTOUTS
@@ -46,17 +47,13 @@
#undef DEBUG_PRINTOUTS
#endif
-extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
-extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-
/* Pseudo export entries. Never filled in with data, only used to
yield unique pointers of the correct type. */
Export exp_send, exp_receive, exp_timeout;
-static Eterm system_seq_tracer;
+static ErtsTracer system_seq_tracer;
static Uint default_trace_flags;
-static Eterm default_tracer;
+static ErtsTracer default_tracer;
static Eterm system_monitor;
static Eterm system_profile;
@@ -70,11 +67,8 @@ static erts_smp_rwmtx_t sys_trace_rwmtx;
enum ErtsSysMsgType {
SYS_MSG_TYPE_UNDEFINED,
- SYS_MSG_TYPE_TRACE,
- SYS_MSG_TYPE_SEQTRACE,
SYS_MSG_TYPE_SYSMON,
SYS_MSG_TYPE_ERRLGR,
- SYS_MSG_TYPE_PROC_MSG,
SYS_MSG_TYPE_SYSPROF
};
@@ -298,43 +292,6 @@ write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer)
return res;
}
-/*
- * Patch a timestamp into a tuple. The tuple MUST be the last thing
- * built on the heap before the call, and the timestamp MUST be
- * the last thing after the call. This since patch_ts() might adjust
- * the size of the used area.
- */
-
-#define PATCH_TS__(Type, Tuple, Hp, Bp, Tracer) \
- do { \
- int ts_type__ = (Type); \
- if (ts_type__) \
- patch_ts(ts_type__, (Tuple), (Hp), (Bp), (Tracer)); \
- } while (0)
-
-#ifdef ERTS_SMP
-#define PATCH_TS(Type, Tuple, Hp, Bp, Tracer) \
- PATCH_TS__((Type), (Tuple), (Hp), (Bp), NULL)
-#else
-#define PATCH_TS(Type, Tuple, Hp, Bp, Tracer) \
- PATCH_TS__((Type), (Tuple), (Hp), (Bp), (Tracer))
-#endif
-
-static ERTS_INLINE void
-patch_ts(int ts_type, Eterm tuple, Eterm* hp, ErlHeapFragment *bp, Process *tracer)
-{
- Eterm *tptr = tuple_val(tuple);
- int arity = arityval(*tptr);
-
- ASSERT(ts_type);
- ASSERT((tptr+arity+1) == hp);
-
- tptr[0] = make_arityval(arity+1);
- tptr[1] = am_trace_ts;
-
- *hp = write_ts(ts_type, hp+1, bp, tracer);
-}
-
#ifdef ERTS_SMP
static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
Eterm from,
@@ -349,6 +306,14 @@ static void enqueue_sys_msg(enum ErtsSysMsgType type,
static void init_sys_msg_dispatcher(void);
#endif
+static void init_tracer_nif(void);
+static int tracer_cmp_fun(void*, void*);
+static HashValue tracer_hash_fun(void*);
+static void *tracer_alloc_fun(void*);
+static void tracer_free_fun(void*);
+
+typedef struct ErtsTracerNif_ ErtsTracerNif;
+
void erts_init_trace(void) {
erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
@@ -363,79 +328,38 @@ void erts_init_trace(void) {
erts_system_monitor_clear(NULL);
erts_system_profile_clear(NULL);
default_trace_flags = F_INITIAL_TRACE_FLAGS;
- default_tracer = NIL;
- system_seq_tracer = am_false;
+ default_tracer = erts_tracer_nil;
+ system_seq_tracer = erts_tracer_nil;
#ifdef ERTS_SMP
init_sys_msg_dispatcher();
#endif
+ init_tracer_nif();
}
-static Eterm system_seq_tracer;
-
#define ERTS_ALLOC_SYSMSG_HEAP(SZ, BPP, OHPP, UNUSED) \
(*(BPP) = new_message_buffer((SZ)), \
*(OHPP) = &(*(BPP))->off_heap, \
(*(BPP))->mem)
-#ifdef ERTS_SMP
-#define ERTS_ENQ_TRACE_MSG(FPID, TPID, MSG, BP) \
-do { \
- ERTS_LC_ASSERT(erts_smp_lc_mtx_is_locked(&smq_mtx)); \
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_TRACE, (FPID), (TPID), (MSG), (BP)); \
-} while(0)
-#else
-#define ERTS_ENQ_TRACE_MSG(FPID, TPROC, MSG, BP) \
- do { \
- ErtsMessage *mp__ = erts_alloc_message(0, NULL); \
- mp__->data.heap_frag = (BP); \
- erts_queue_message((TPROC), NULL, mp__, (MSG), NIL); \
- } while (0)
-#endif
-
-/*
- * NOTE that the ERTS_GET_TRACER_REF() returns from the function (!!!)
- * using it, and resets the parameters used if the tracer is invalid, i.e.,
- * use it with extreme care!
- */
-#ifdef ERTS_SMP
-#define ERTS_NULL_TRACER_REF NIL
-#define ERTS_TRACER_REF_TYPE Eterm
- /* In the smp case, we never find the tracer invalid here (the sys
- message dispatcher thread takes care of that). */
-#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
-do { (RES) = (TPID); } while(0)
-int
-erts_is_tracer_proc_valid(Process* p)
-{
- return 1;
-}
-#else
-#define ERTS_NULL_TRACER_REF NULL
-#define ERTS_TRACER_REF_TYPE Process *
-#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
-do { \
- (RES) = erts_proc_lookup((TPID)); \
- if (!(RES) || !(ERTS_TRACE_FLAGS((RES)) & F_TRACER)) { \
- (TPID) = NIL; \
- (TRACEE_FLGS) &= ~TRACEE_FLAGS; \
- return; \
- } \
-} while (0)
-int
-erts_is_tracer_proc_valid(Process* p)
-{
- Process* tracer;
+static ERTS_INLINE int
+send_to_tracer_nif_raw(Process *c_p, Process *tracee, const ErtsTracer tracer,
+ Uint trace_flags, Eterm t_p_id, ErtsTracerNif *tnif,
+ Eterm tag, Eterm msg, Eterm extra, Eterm pam_result);
+static ERTS_INLINE int
+send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
+ Eterm t_p_id, ErtsTracerNif *tnif, Eterm tag,
+ Eterm msg, Eterm extra);
+static ERTS_INLINE Eterm
+call_enabled_tracer(Process *c_p, const ErtsTracer tracer,
+ ErtsTracerNif **tnif_ref, Eterm tag, Eterm t_p_id);
+static int
+is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p,
+ ErtsTracerNif **tnif_ret, Eterm tag);
- tracer = erts_proc_lookup(ERTS_TRACER_PROC(p));
- if (tracer && ERTS_TRACE_FLAGS(tracer) & F_TRACER) {
- return 1;
- } else {
- ERTS_TRACER_PROC(p) = NIL;
- ERTS_TRACE_FLAGS(p) = ~TRACEE_FLAGS;
- return 0;
- }
-}
-#endif
+#define SEND_TO_TRACER(c_p, tag, msg) \
+ send_to_tracer_nif(c_p, &(c_p)->common, (c_p)->common.id, NULL, tag, \
+ msg, THE_NON_VALUE)
static Uint active_sched;
@@ -450,19 +374,6 @@ static void
exiting_reset(Eterm exiting)
{
erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
- if (exiting == default_tracer) {
- default_tracer = NIL;
- default_trace_flags &= TRACEE_FLAGS;
-#ifdef DEBUG
- default_trace_flags |= F_INITIAL_TRACE_FLAGS;
-#endif
- }
- if (exiting == system_seq_tracer) {
-#ifdef DEBUG_PRINTOUTS
- erts_fprintf(stderr, "seq tracer %T exited\n", exiting);
-#endif
- system_seq_tracer = am_false;
- }
if (exiting == system_monitor) {
#ifdef ERTS_SMP
system_monitor = NIL;
@@ -487,11 +398,7 @@ erts_trace_check_exiting(Eterm exiting)
{
int reset = 0;
erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
- if (exiting == default_tracer)
- reset = 1;
- else if (exiting == system_seq_tracer)
- reset = 1;
- else if (exiting == system_monitor)
+ if (exiting == system_monitor)
reset = 1;
else if (exiting == system_profile)
reset = 1;
@@ -500,23 +407,26 @@ erts_trace_check_exiting(Eterm exiting)
exiting_reset(exiting);
}
-static ERTS_INLINE int
-is_valid_tracer(Eterm tracer)
-{
- return erts_proc_lookup(tracer) || erts_is_valid_tracer_port(tracer);
-}
-
-Eterm
-erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, Eterm new)
+ErtsTracer
+erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new)
{
- Eterm old;
-
- if (new != am_false && !is_valid_tracer(new))
- return THE_NON_VALUE;
+ ErtsTracer old;
+
+ if (!ERTS_TRACER_IS_NIL(new)) {
+ Eterm nif_result = call_enabled_tracer(
+ NULL, new, NULL,
+ am_trace_status, am_undefined);
+ switch (nif_result) {
+ case am_trace: break;
+ default:
+ return THE_NON_VALUE;
+ }
+ }
erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
old = system_seq_tracer;
- system_seq_tracer = new;
+ system_seq_tracer = erts_tracer_nil;
+ erts_tracer_update(&system_seq_tracer, new);
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old);
@@ -525,47 +435,56 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, Eterm new)
return old;
}
-Eterm
+ErtsTracer
erts_get_system_seq_tracer(void)
{
- Eterm st;
+ ErtsTracer st;
erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
st = system_seq_tracer;
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "get seq tracer %T\n", st);
#endif
erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+
+ if (st != erts_tracer_nil &&
+ call_enabled_tracer(NULL, st, NULL, am_trace_status, am_undefined) == am_remove) {
+ erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil);
+ st = erts_tracer_nil;
+ }
+
return st;
}
static ERTS_INLINE void
-get_default_tracing(Uint *flagsp, Eterm *tracerp)
+get_default_tracing(Uint *flagsp, ErtsTracer *tracerp)
{
if (!(default_trace_flags & TRACEE_FLAGS))
- default_tracer = NIL;
+ ERTS_TRACER_CLEAR(&default_tracer);
- if (is_nil(default_tracer)) {
+ if (ERTS_TRACER_IS_NIL(default_tracer)) {
default_trace_flags &= ~TRACEE_FLAGS;
- } else if (is_internal_pid(default_tracer)) {
- if (!erts_proc_lookup(default_tracer)) {
- reset_tracer:
- default_trace_flags &= ~TRACEE_FLAGS;
- default_tracer = NIL;
- }
} else {
- ASSERT(is_internal_port(default_tracer));
- if (!erts_is_valid_tracer_port(default_tracer))
- goto reset_tracer;
+ Eterm nif_result = call_enabled_tracer(
+ NULL, default_tracer, NULL,
+ am_trace_status, am_undefined);
+ switch (nif_result) {
+ case am_trace: break;
+ default:
+ default_trace_flags &= ~TRACEE_FLAGS;
+ ERTS_TRACER_CLEAR(&default_tracer);
+ }
}
if (flagsp)
*flagsp = default_trace_flags;
- if (tracerp)
- *tracerp = default_tracer;
+ if (tracerp) {
+ erts_tracer_update(tracerp,default_tracer);
+ }
}
void
-erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp)
+erts_change_default_tracing(int setflags, Uint *flagsp,
+ const ErtsTracer tracer)
{
erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
if (flagsp) {
@@ -574,16 +493,18 @@ erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp)
else
default_trace_flags &= ~(*flagsp);
}
- if (tracerp)
- default_tracer = *tracerp;
- get_default_tracing(flagsp, tracerp);
+
+ erts_tracer_update(&default_tracer, tracer);
+
+ get_default_tracing(flagsp, NULL);
erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
}
void
-erts_get_default_tracing(Uint *flagsp, Eterm *tracerp)
+erts_get_default_tracing(Uint *flagsp, ErtsTracer *tracerp)
{
erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ *tracerp = erts_tracer_nil; /* initialize */
get_default_tracing(flagsp, tracerp);
erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
}
@@ -622,29 +543,21 @@ erts_get_system_profile(void) {
return profile;
}
-#ifdef ERTS_SMP
-static void
-do_send_to_port(Eterm to,
- Port* unused_port,
- Eterm from,
- enum ErtsSysMsgType type,
- Eterm message)
-{
- Uint sz = size_object(message);
- ErlHeapFragment *bp = new_message_buffer(sz);
- Uint *hp = bp->mem;
- Eterm msg = copy_struct(message, sz, &hp, &bp->off_heap);
- enqueue_sys_msg_unlocked(type, from, to, msg, bp);
-}
-
-#define WRITE_SYS_MSG_TO_PORT write_sys_msg_to_port
+#ifdef HAVE_ERTS_NOW_CPU
+# define GET_NOW(m, s, u) \
+do { \
+ if (erts_cpu_timestamp) \
+ erts_get_now_cpu(m, s, u); \
+ else \
+ get_now(m, s, u); \
+} while (0)
#else
-#define WRITE_SYS_MSG_TO_PORT do_send_to_port
+# define GET_NOW(m, s, u) do {get_now(m, s, u);} while (0)
#endif
static void
-WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
+write_sys_msg_to_port(Eterm unused_to,
Port* trace_port,
Eterm unused_from,
enum ErtsSysMsgType unused_type,
@@ -671,150 +584,6 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
erts_free(ERTS_ALC_T_TMP, (void *) buffer);
}
-
-#ifndef ERTS_SMP
-/* Send {trace_ts, Pid, out, 0, Timestamp}
- * followed by {trace_ts, Pid, in, 0, NewTimestamp}
- *
- * 'NewTimestamp' through patch_ts().
- */
-static void
-do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp, int ts_type) {
-#define LOCAL_HEAP_SIZE (5+5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- Eterm message;
- Eterm *hp;
- Eterm mfarity;
-
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- ASSERT(is_pid(pid));
- ASSERT(is_tuple(timestamp));
- ASSERT(*tuple_val(timestamp) == make_arityval(3));
-
- hp = local_heap;
- mfarity = make_small(0);
- message = TUPLE5(hp, am_trace_ts, pid, am_out, mfarity, timestamp);
- /* Note, hp is deliberately NOT incremented since it will be reused */
-
- do_send_to_port(trace_port->common.id,
- trace_port,
- pid,
- SYS_MSG_TYPE_UNDEFINED,
- message);
-
-
- message = TUPLE5(hp, am_trace_ts, pid, am_in, mfarity,
- NIL /* Will be overwritten by timestamp */);
- hp += 6;
- hp[-1] = write_ts(ts_type, hp, NULL, NULL);
-
- do_send_to_port(trace_port->common.id,
- trace_port,
- pid,
- SYS_MSG_TYPE_UNDEFINED,
- message);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-}
-#endif
-
-/* If (c_p != NULL), a fake schedule out/in message pair will be sent,
- * if the driver so requests.
- * It is assumed that 'message' is not an 'out' message.
- *
- * 'c_p' is the currently executing process, "tracee" is the traced process
- * which 'message' concerns => if (*tracee_flags & F_TIMESTAMP_MASK),
- * 'message' must contain a timestamp.
- */
-static void
-send_to_port(Process *c_p, Eterm message,
- Eterm *tracer_pid, Uint *tracee_flags) {
- Port* trace_port;
-#ifndef ERTS_SMP
- int ts_type;
-#define LOCAL_HEAP_SIZE ERTS_TRACE_PATCH_TS_MAX_SIZE
- Eterm ts;
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
-#endif
-
- ASSERT(is_internal_port(*tracer_pid));
-#ifdef ERTS_SMP
- if (is_not_internal_port(*tracer_pid))
- return;
-
- trace_port = NULL;
-#else
-
- trace_port = erts_id2port_sflgs(*tracer_pid,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
-
- if (!trace_port) {
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
- return;
- }
-
- /*
- * Make a fake schedule only if the current process is traced
- * with 'running' and 'timestamp'.
- */
-
- if ( c_p == NULL ||
- (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP_MASK))) {
-#endif
- do_send_to_port(*tracer_pid,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_TRACE,
- message);
-#ifndef ERTS_SMP
- erts_port_release(trace_port);
- return;
- }
-
- /*
- * Note that the process being traced for some type of trace messages
- * (e.g. getting_linked) need not be the current process. That other
- * process might not have timestamps enabled.
- */
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- /* A fake schedule might be needed.
- * Create a dummy trace message with timestamp to be
- * passed to do_send_schedfix_to_port().
- */
- ts_type = TFLGS_TS_TYPE(c_p);
- ts = write_ts(ts_type, local_heap, NULL, NULL);
-
- trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
- do_send_to_port(*tracer_pid,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_TRACE,
- message);
-
- if (trace_port->control_flags & PORT_CONTROL_FLAG_HEAVY) {
- /* The driver has just informed us that the last write took a
- * non-neglectible amount of time.
- *
- * We need to fake some trace messages to compensate for the time the
- * current process had to sacrifice for the writing of the previous
- * trace message. We pretend that the process got scheduled out
- * just after writning the real trace message, and now gets scheduled
- * in again.
- */
- do_send_schedfix_to_port(trace_port, c_p->common.id, ts, ts_type);
- }
-
- erts_port_release(trace_port);
-
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#endif
-}
-
#ifndef ERTS_SMP
/* Profile send
* Checks if profiler is port or process
@@ -843,11 +612,11 @@ profile_send(Eterm from, Eterm message) {
0,
ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
if (profiler_port) {
- do_send_to_port(profiler,
- profiler_port,
- NIL, /* or current process->common.id */
- SYS_MSG_TYPE_SYSPROF,
- message);
+ write_sys_msg_to_port(profiler,
+ profiler_port,
+ NIL, /* or current process->common.id */
+ SYS_MSG_TYPE_SYSPROF,
+ message);
erts_port_release(profiler_port);
}
@@ -867,154 +636,26 @@ profile_send(Eterm from, Eterm message) {
else
msg = copy_struct(message, sz, &hp, &mp->hfrag.off_heap);
- erts_queue_message(profile_p, NULL, mp, msg, NIL);
+ erts_queue_message(profile_p, NULL, mp, msg);
}
}
#endif
-
-/* A fake schedule out/in message pair will be sent,
- * if the driver so requests.
- *
- * 'c_p' is the currently executing process, may be NULL.
- */
static void
-seq_trace_send_to_port(Process *c_p,
- Eterm seq_tracer,
- Eterm message)
+trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
{
- Port* trace_port;
-#ifndef ERTS_SMP
- int ts_type;
- Eterm ts;
-#define LOCAL_HEAP_SIZE ERTS_TRACE_PATCH_TS_MAX_SIZE
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#endif
+ Eterm tmp, *hp;
+ int curr_func;
+ ErtsTracerNif *tnif = NULL;
- ASSERT(is_internal_port(seq_tracer));
-#ifdef ERTS_SMP
- if (is_not_internal_port(seq_tracer))
+ if (ERTS_TRACER_IS_NIL(ERTS_TRACER(p)))
return;
- trace_port = NULL;
-#else
- trace_port = erts_id2port_sflgs(seq_tracer,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (!trace_port) {
- system_seq_tracer = am_false;
-#ifndef ERTS_SMP
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#endif
- return;
- }
-
- if (c_p == NULL
- || (! IS_TRACED_FL(c_p, F_TRACE_SCHED | F_TIMESTAMP_MASK))) {
-#endif
- do_send_to_port(seq_tracer,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_SEQTRACE,
- message);
-
-#ifndef ERTS_SMP
- erts_port_release(trace_port);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- return;
- }
- /* Make a fake schedule only if the current process is traced
- * with 'running' and 'timestamp'.
- */
-
- /* A fake schedule might be needed.
- * Create a dummy trace message with timestamp to be
- * passed to do_send_schedfix_to_port().
- */
- ts_type = TFLGS_TS_TYPE(c_p);
- ts = write_ts(ts_type, local_heap, NULL, NULL);
-
- trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
- do_send_to_port(seq_tracer,
- trace_port,
- c_p ? c_p->common.id : NIL,
- SYS_MSG_TYPE_SEQTRACE,
- message);
-
- if (trace_port->control_flags & PORT_CONTROL_FLAG_HEAVY) {
- /* The driver has just informed us that the last write took a
- * non-neglectible amount of time.
- *
- * We need to fake some trace messages to compensate for the time the
- * current process had to sacrifice for the writing of the previous
- * trace message. We pretend that the process got scheduled out
- * just after writing the real trace message, and now gets scheduled
- * in again.
- */
- do_send_schedfix_to_port(trace_port, c_p->common.id, ts, ts_type);
- }
-
- erts_port_release(trace_port);
-
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#endif
-}
-
-static ERTS_INLINE void
-send_to_tracer(Process *tracee,
- ERTS_TRACER_REF_TYPE tracer_ref,
- Eterm msg,
- Eterm **hpp,
- ErlHeapFragment *bp,
- int no_fake_sched)
-{
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(tracee));
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (is_internal_pid(ERTS_TRACER_PROC(tracee))) {
- PATCH_TS(TFLGS_TS_TYPE(tracee), msg, *hpp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(tracee->common.id, tracer_ref, msg, bp);
- }
- else {
- ASSERT(is_internal_port(ERTS_TRACER_PROC(tracee)));
- PATCH_TS(TFLGS_TS_TYPE(tracee), msg, *hpp, NULL, NULL);
- send_to_port(no_fake_sched ? NULL : tracee,
- msg,
- &ERTS_TRACER_PROC(tracee),
- &ERTS_TRACE_FLAGS(tracee));
- }
-
- erts_smp_mtx_unlock(&smq_mtx);
-
-}
-
-static void
-trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
-{
-#define LOCAL_HEAP_SIZE (5+4+1+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
- Eterm tmp, mess, *hp;
- ErlHeapFragment *bp = NULL;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF;
- int sched_no, curr_func, to_port, no_fake_sched;
-
- if (is_nil(ERTS_TRACER_PROC(p)))
- return;
-
- no_fake_sched = never_fake_sched;
-
switch (what) {
case am_out:
case am_out_exiting:
case am_out_exited:
- no_fake_sched = 1;
- break;
case am_in:
case am_in_exiting:
break;
@@ -1023,16 +664,8 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
break;
}
- sched_no = IS_TRACED_FL(p, F_TRACE_SCHED_NO);
- to_port = is_internal_port(ERTS_TRACER_PROC(p));
-
- if (!to_port) {
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
- }
+ if (!is_tracer_proc_enabled(p, locks, &p->common, &tnif, what))
+ return;
if (ERTS_PROC_IS_EXITING(p))
curr_func = 0;
@@ -1042,44 +675,16 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
curr_func = p->current != NULL;
}
- UseTmpHeap(LOCAL_HEAP_SIZE,p);
-
- if (to_port)
- hp = local_heap;
- else {
- Uint size = 5;
- if (curr_func)
- size += 4;
- if (sched_no)
- size += 1;
- size += PATCH_TS_SIZE(p);
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
- }
-
if (!curr_func) {
tmp = make_small(0);
} else {
+ hp = HAlloc(p, 4);
tmp = TUPLE3(hp,p->current[0],p->current[1],make_small(p->current[2]));
hp += 4;
}
- if (!sched_no) {
- mess = TUPLE4(hp, am_trace, p->common.id, what, tmp);
- hp += 5;
- }
- else {
-#ifdef ERTS_SMP
- Eterm sched_id = make_small(p->scheduler_data->no);
-#else
- Eterm sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, tmp);
- hp += 6;
- }
-
- send_to_tracer(p, tracer_ref, mess, &hp, bp, no_fake_sched);
- UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
-#undef LOCAL_HEAP_SIZE
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif,
+ what, tmp, THE_NON_VALUE);
}
/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
@@ -1089,9 +694,9 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
* 'out_exiting', or 'out_exited'.
*/
void
-trace_sched(Process *p, Eterm what)
+trace_sched(Process *p, ErtsProcLocks locks, Eterm what)
{
- trace_sched_aux(p, what, 0);
+ trace_sched_aux(p, locks, what);
}
/* Send {trace_ts, Pid, Send, Msg, DestPid, Timestamp}
@@ -1102,17 +707,13 @@ trace_sched(Process *p, Eterm what)
void
trace_send(Process *p, Eterm to, Eterm msg)
{
- Eterm operation;
- unsigned sz_msg;
- unsigned sz_to;
- Eterm* hp;
- Eterm mess;
-
+ Eterm operation = am_send;
+ ErtsTracerNif *tnif = NULL;
+
if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND)) {
return;
}
- operation = am_send;
if (is_internal_pid(to)) {
if (!erts_proc_lookup(to))
goto send_to_non_existing_process;
@@ -1125,117 +726,36 @@ trace_send(Process *p, Eterm to, Eterm msg)
operation = am_atom_put(s, sys_strlen(s));
}
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Uint need;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- sz_msg = size_object(msg);
- sz_to = size_object(to);
- need = sz_msg + sz_to + 6 + PATCH_TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- to = copy_struct(to,
- sz_to,
- &hp,
- off_heap);
- msg = copy_struct(msg,
- sz_msg,
- &hp,
- off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ if (is_tracer_proc_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, operation))
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, operation, msg, to);
}
/* Send {trace_ts, Pid, receive, Msg, Timestamp}
* or {trace, Pid, receive, Msg}
*/
void
-trace_receive(Process *rp, Eterm msg)
+trace_receive(Process *c_p, Eterm msg)
{
- Eterm mess;
- size_t sz_msg;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(rp))) {
-#define LOCAL_HEAP_SIZE (5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(rp), mess, hp, NULL, NULL);
- send_to_port(rp, mess, &ERTS_TRACER_PROC(rp), &ERTS_TRACE_FLAGS(rp));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Uint hsz;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(rp)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(rp),
- ERTS_TRACE_FLAGS(rp));
-
- sz_msg = size_object(msg);
-
- hsz = sz_msg + 5 + PATCH_TS_SIZE(rp);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, tracer_ref);
-
- msg = copy_struct(msg, sz_msg, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(rp), mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(rp->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ ErtsTracerNif *tnif = NULL;
+ if (is_tracer_proc_enabled(NULL, 0, &c_p->common,
+ &tnif, am_receive))
+ send_to_tracer_nif(NULL, &c_p->common, c_p->common.id,
+ tnif, am_receive, msg, THE_NON_VALUE);
}
int
seq_trace_update_send(Process *p)
{
- Eterm seq_tracer = erts_get_system_seq_tracer();
+ ErtsTracer seq_tracer = erts_get_system_seq_tracer();
ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
- if ((p->common.id == seq_tracer) || have_no_seqtrace(SEQ_TRACE_TOKEN(p))) {
+ if (have_no_seqtrace(SEQ_TRACE_TOKEN(p)) ||
+ (seq_tracer != NIL &&
+ call_enabled_tracer( NULL, seq_tracer, NULL, am_trace_status,
+ p ? p->common.id : am_undefined) != am_trace)
+#ifdef USE_VM_PROBES
+ || (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
+#endif
+ ) {
return 0;
}
SEQ_TRACE_TOKEN_SENDER(p) = p->common.id;
@@ -1263,20 +783,28 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
Eterm receiver, Process *process, Eterm exitfrom)
{
Eterm mess;
- ErlHeapFragment* bp;
Eterm* hp;
Eterm label;
Eterm lastcnt_serial;
Eterm type_atom;
- int sz_exit;
- Eterm seq_tracer;
- int ts_type;
+ ErtsTracer seq_tracer;
+ int seq_tracer_flags = 0;
+#define LOCAL_HEAP_SIZE (64)
+ DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
seq_tracer = erts_get_system_seq_tracer();
ASSERT(is_tuple(token) || is_nil(token));
- if (SEQ_TRACE_T_SENDER(token) == seq_tracer || token == NIL ||
- (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE)) {
+ if (token == NIL || (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE) ||
+ ERTS_TRACER_IS_NIL(seq_tracer) ||
+ call_enabled_tracer(
+ NULL, seq_tracer, NULL, am_trace_status,
+ process ? process->common.id : am_undefined) != am_trace) {
+ return;
+ }
+
+ if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & type) == 0) {
+ /* No flags set, nothing to do */
return;
}
@@ -1289,151 +817,29 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
return; /* To avoid warning */
}
- if ((unsigned_val(SEQ_TRACE_T_FLAGS(token)) & type) == 0) {
- /* No flags set, nothing to do */
- return;
- }
+ UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- if (seq_tracer == am_false) {
- return; /* no need to send anything */
+ hp = local_heap;
+ label = SEQ_TRACE_T_LABEL(token);
+ lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
+ SEQ_TRACE_T_SERIAL(token));
+ hp += 3;
+ if (exitfrom != NIL) {
+ msg = TUPLE3(hp, am_EXIT, exitfrom, msg);
+ hp += 4;
}
+ mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token),
+ receiver, msg);
+ hp += 6;
- ts_type = ERTS_SEQTFLGS2TSTYPE(unsigned_val(SEQ_TRACE_T_FLAGS(token)));
-
- if (is_internal_port(seq_tracer)) {
-#define LOCAL_HEAP_SIZE (60 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ seq_tracer_flags |= ERTS_SEQTFLGS2TFLGS(unsigned_val(SEQ_TRACE_T_FLAGS(token)));
- hp = local_heap;
- label = SEQ_TRACE_T_LABEL(token);
- lastcnt_serial = TUPLE2(hp, SEQ_TRACE_T_LASTCNT(token),
- SEQ_TRACE_T_SERIAL(token));
- hp += 3;
- if (exitfrom != NIL) {
- msg = TUPLE3(hp, am_EXIT, exitfrom, msg);
- hp += 4;
- }
- mess = TUPLE5(hp, type_atom, lastcnt_serial, SEQ_TRACE_T_SENDER(token),
- receiver, msg);
- hp += 6;
+ send_to_tracer_nif_raw(NULL, process, seq_tracer, seq_tracer_flags,
+ label, NULL, am_seq_trace, mess,
+ THE_NON_VALUE, am_true);
- erts_smp_mtx_lock(&smq_mtx);
- if (!ts_type) {
- mess = TUPLE3(hp, am_seq_trace, label, mess);
- seq_trace_send_to_port(NULL, seq_tracer, mess);
- } else {
- mess = TUPLE4(hp, am_seq_trace, label, mess,
- NIL /* Will be overwritten by timestamp */);
- hp += 5;
- hp[-1] = write_ts(ts_type, hp, NULL, NULL);
- seq_trace_send_to_port(process, seq_tracer, mess);
- }
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
-#ifndef ERTS_SMP
- Process* tracer;
-#endif
- Eterm sender_copy;
- Eterm receiver_copy;
- Eterm m2;
- Uint sz_label, sz_lastcnt_serial, sz_msg, sz_ts, sz_sender,
- sz_exitfrom, sz_receiver;
-
- ASSERT(is_internal_pid(seq_tracer));
-
-#ifndef ERTS_SMP
-
- tracer = erts_proc_lookup(seq_tracer);
- if (!tracer) {
- system_seq_tracer = am_false;
- return; /* no need to send anything */
- }
-#endif
- if (receiver == seq_tracer) {
- return; /* no need to send anything */
- }
-
- sz_label = size_object(SEQ_TRACE_T_LABEL(token));
- sz_sender = size_object(SEQ_TRACE_T_SENDER(token));
- sz_receiver = size_object(receiver);
- sz_lastcnt_serial = 3; /* TUPLE2 */
- sz_msg = size_object(msg);
-
- sz_ts = patch_ts_size(ts_type);
- if (exitfrom != NIL) {
- sz_exit = 4; /* create {'EXIT',exitfrom,msg} */
- sz_exitfrom = size_object(exitfrom);
- }
- else {
- sz_exit = 0;
- sz_exitfrom = 0;
- }
- bp = new_message_buffer(4 /* TUPLE3 */ + sz_ts + 6 /* TUPLE5 */
- + sz_lastcnt_serial + sz_label + sz_msg
- + sz_exit + sz_exitfrom
- + sz_sender + sz_receiver);
- hp = bp->mem;
- label = copy_struct(SEQ_TRACE_T_LABEL(token), sz_label, &hp, &bp->off_heap);
- lastcnt_serial = TUPLE2(hp,SEQ_TRACE_T_LASTCNT(token),SEQ_TRACE_T_SERIAL(token));
- hp += 3;
- m2 = copy_struct(msg, sz_msg, &hp, &bp->off_heap);
- if (sz_exit) {
- Eterm exitfrom_copy = copy_struct(exitfrom,
- sz_exitfrom,
- &hp,
- &bp->off_heap);
- m2 = TUPLE3(hp, am_EXIT, exitfrom_copy, m2);
- hp += 4;
- }
- sender_copy = copy_struct(SEQ_TRACE_T_SENDER(token),
- sz_sender,
- &hp,
- &bp->off_heap);
- receiver_copy = copy_struct(receiver,
- sz_receiver,
- &hp,
- &bp->off_heap);
- mess = TUPLE5(hp,
- type_atom,
- lastcnt_serial,
- sender_copy,
- receiver_copy,
- m2);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (!ts_type)
- mess = TUPLE3(hp, am_seq_trace, label, mess);
- else {
- mess = TUPLE4(hp, am_seq_trace, label, mess,
- NIL /* Will be overwritten by timestamp */);
- hp += 5;
- /* Write timestamp in element 6 of the 'msg' tuple */
- hp[-1] = write_ts(ts_type, hp, bp,
-#ifndef ERTS_SMP
- tracer
-#else
- NULL
-#endif
- );
- }
-
-#ifdef ERTS_SMP
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SEQTRACE, NIL, NIL, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
-#else
- /* trace_token must be NIL here */
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(tracer, NULL, mp, mess, NIL);
- }
-#endif
- }
}
/* Send {trace_ts, Pid, return_to, {Mod, Func, Arity}, Timestamp}
@@ -1442,63 +848,19 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
void
erts_trace_return_to(Process *p, BeamInstr *pc)
{
-#define LOCAL_HEAP_SIZE (4+5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- Eterm* hp;
Eterm mfa;
- Eterm mess;
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
BeamInstr *code_ptr = find_function_from_pc(pc);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
if (!code_ptr) {
mfa = am_undefined;
} else {
+ Eterm *hp = HAlloc(p, 4);
mfa = TUPLE3(hp, code_ptr[0], code_ptr[1], make_small(code_ptr[2]));
- hp += 4;
}
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_return_to, mfa);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
-
- /*
- * Find the tracer.
- */
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- size = size_object(mess);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-
- /*
- * Copy the trace message into the buffer and enqueue it.
- */
- mess = copy_struct(mess, size, &hp, off_heap);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- }
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
+ SEND_TO_TRACER(p, am_return_to, mfa);
}
@@ -1506,114 +868,53 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
* or {trace, Pid, return_from, {Mod, Name, Arity}, Retval}
*/
void
-erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
+erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa;
- Eterm mess;
- Eterm mod, name;
+ Eterm mfa, mod, name;
int arity;
Uint meta_flags, *tracee_flags;
- int ts_type;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
-
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
+
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
+ tracer = &ERTS_TRACER(p);
}
- if (is_nil(*tracer_pid)) {
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
/* Trace disabled */
return;
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
- return;
- }
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
+ if (! (*tracee_flags & F_TRACE_CALLS)) {
+ return;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
+ */
meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
}
- if (! (*tracee_flags & F_TRACE_CALLS)) {
- return;
- }
-
+
mod = fi[0];
name = fi[1];
arity = fi[2];
-
- ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
-
- if (is_internal_port(*tracer_pid)) {
-#define LOCAL_HEAP_SIZE (4+6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
- mfa = TUPLE3(hp, mod, name, make_small(arity));
- hp += 4;
- mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(ts_type, mess, hp, NULL, NULL);
- send_to_port(p, mess, tracer_pid, tracee_flags);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
- unsigned retval_size;
-
- ASSERT(is_internal_pid(*tracer_pid));
-
- ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
-
- retval_size = size_object(retval);
- size = 6 + 4 + retval_size + patch_ts_size(ts_type);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-
- /*
- * Build the trace tuple and put it into receive queue of the tracer process.
- */
-
- mfa = TUPLE3(hp, mod, name, make_small(arity));
- hp += 4;
- retval = copy_struct(retval, retval_size, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ hp = HAlloc(p, 4);
+ mfa = TUPLE3(hp, mod, name, make_small(arity));
+ hp += 4;
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ NULL, am_return_from, mfa, retval, am_true);
}
/* Send {trace_ts, Pid, exception_from, {Mod, Name, Arity}, {Class,Value},
@@ -1625,116 +926,50 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
*/
void
erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
- Eterm *tracer_pid)
+ ErtsTracer *tracer)
{
Eterm* hp;
- Eterm mfa_tuple;
- Eterm cv;
- Eterm mess;
+ Eterm mfa_tuple, cv;
Uint meta_flags, *tracee_flags;
- int ts_type;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
-
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
+
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
+ tracer = &ERTS_TRACER(p);
}
- if (is_nil(*tracer_pid)) {
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
/* Trace disabled */
return;
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
- return;
- }
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
- if (! (*tracee_flags & F_TRACE_CALLS)) {
- return;
- }
+ if (! (*tracee_flags & F_TRACE_CALLS)) {
+ return;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
+ */
meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
}
-
- ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
-
- if (is_internal_port(*tracer_pid)) {
-#define LOCAL_HEAP_SIZE (4+3+6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
- hp += 4;
- cv = TUPLE2(hp, class, value);
- hp += 3;
- mess = TUPLE5(hp, am_trace, p->common.id, am_exception_from, mfa_tuple, cv);
- hp += 6;
- ASSERT((hp - local_heap) <= LOCAL_HEAP_SIZE);
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(ts_type, mess, hp, NULL, NULL);
- send_to_port(p, mess, tracer_pid, tracee_flags);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- unsigned size;
- unsigned value_size;
-
- ASSERT(is_internal_pid(*tracer_pid));
-
- ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
-
- value_size = size_object(value);
- size = 6 + 4 + 3 + value_size + patch_ts_size(ts_type);
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
-
- /*
- * Build the trace tuple and put it into receive queue of the tracer process.
- */
-
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm) mfa[2]));
- hp += 4;
- value = copy_struct(value, value_size, &hp, off_heap);
- cv = TUPLE2(hp, class, value);
- hp += 3;
- mess = TUPLE5(hp, am_trace, p->common.id,
- am_exception_from, mfa_tuple, cv);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ hp = HAlloc(p, 7);;
+ mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], make_small((Eterm)mfa[2]));
+ hp += 4;
+ cv = TUPLE2(hp, class, value);
+ hp += 3;
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ NULL, am_exception_from, mfa_tuple, cv, am_true);
}
/*
@@ -1753,7 +988,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
*/
Uint32
erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
- Eterm* args, int local, Eterm *tracer_pid)
+ Eterm* args, int local, ErtsTracer *tracer)
{
Eterm* hp;
Eterm mfa_tuple;
@@ -1761,55 +996,50 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
int i;
Uint32 return_flags;
Eterm pam_result = am_true;
- Eterm mess;
Uint meta_flags, *tracee_flags;
- int ts_type;
-#ifdef ERTS_SMP
- Eterm tracee;
-#endif
+ ErtsTracerNif *tnif = NULL;
Eterm transformed_args[MAX_ARG];
- DeclareTypedTmpHeap(ErlSubBin,sub_bin_heap,p);
+ ErtsTracer pre_ms_tracer = erts_tracer_nil;
- ASSERT(tracer_pid);
- if (*tracer_pid == am_true) {
- /* Breakpoint trace enabled without specifying tracer =>
+ ASSERT(tracer);
+ if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) {
+ /* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &ERTS_TRACER_PROC(p);
- }
- if (is_nil(*tracer_pid)) {
- /* Trace disabled */
- return 0;
+ tracer = &ERTS_TRACER(p);
}
- ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->common.id) {
- /* Do not generate trace messages to oneself */
+ if (ERTS_TRACER_IS_NIL(*tracer)) {
+ /* Trace disabled */
return 0;
}
- if (tracer_pid == &ERTS_TRACER_PROC(p)) {
+ ASSERT(IS_TRACER_VALID(*tracer));
+ if (tracer == &ERTS_TRACER(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
-#ifdef ERTS_SMP
- tracee = p->common.id;
-#endif
+ if (!is_tracer_proc_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, am_call)) {
+ return 0;
+ }
} else {
/* Tracer not specified in process structure =>
* tracer specified in breakpoint =>
* meta trace =>
* use fixed flag set instead of process flags
- */
- if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
- /* No trace messages for sensitive processes. */
- return 0;
- }
+ */
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
+ /* No trace messages for sensitive processes. */
+ return 0;
+ }
meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
-#ifdef ERTS_SMP
- tracee = NIL;
-#endif
+ switch (call_enabled_tracer(p, *tracer, &tnif, am_call, p->common.id)) {
+ default:
+ case am_remove: *tracer = erts_tracer_nil;
+ case am_discard: return 0;
+ case am_trace: break;
+ }
}
/*
@@ -1820,18 +1050,13 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* temporarily convert any match contexts to sub binaries.
*/
arity = (Eterm) mfa[2];
- UseTmpHeap(ERL_SUB_BIN_SIZE,p);
-#ifdef DEBUG
- sub_bin_heap->thing_word = 0;
-#endif
for (i = 0; i < arity; i++) {
Eterm arg = args[i];
if (is_boxed(arg) && header_is_bin_matchstate(*boxed_val(arg))) {
ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(arg);
ErlBinMatchBuffer* mb = &ms->mb;
Uint bit_size;
-
- ASSERT(sub_bin_heap->thing_word == 0); /* At most one of match context */
+ ErlSubBin *sub_bin_heap = (ErlSubBin *)HAlloc(p, ERL_SUB_BIN_SIZE);
bit_size = mb->size - mb->offset;
sub_bin_heap->thing_word = HEADER_SUB_BIN;
@@ -1848,275 +1073,98 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
}
args = transformed_args;
- ts_type = ERTS_TFLGS2TSTYPE(*tracee_flags);
-
- if (is_internal_port(*tracer_pid)) {
- Eterm local_heap[64+ERTS_TRACE_PATCH_TS_MAX_SIZE+MAX_ARG];
- hp = local_heap;
+ /*
+ * If there is a PAM program, run it. Return if it fails.
+ *
+ * Some precedence rules:
+ *
+ * - No proc flags, e.g 'silent' or 'return_to'
+ * has any effect on meta trace.
+ * - The 'silent' process trace flag silences all call
+ * related messages, e.g 'call', 'return_to' and 'return_from'.
+ * - The {message,_} PAM function does not affect {return_trace}.
+ * - The {message,false} PAM function shall give the same
+ * 'call' trace message as no PAM match.
+ * - The {message,true} PAM function shall give the same
+ * 'call' trace message as a nonexistent PAM program.
+ */
- if (!erts_is_valid_tracer_port(*tracer_pid)) {
-#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &ERTS_TRACER_PROC(p));
- if (is_not_nil(tracee))
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
-#ifdef ERTS_SMP
- if (is_not_nil(tracee))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
-
- /*
- * If there is a PAM program, run it. Return if it fails.
- *
- * Some precedence rules:
- *
- * - No proc flags, e.g 'silent' or 'return_to'
- * has any effect on meta trace.
- * - The 'silent' process trace flag silences all call
- * related messages, e.g 'call', 'return_to' and 'return_from'.
- * - The {message,_} PAM function does not affect {return_trace}.
- * - The {message,false} PAM function shall give the same
- * 'call' trace message as no PAM match.
- * - The {message,true} PAM function shall give the same
- * 'call' trace message as a nonexistent PAM program.
- */
-
- /* BEGIN this code should be the same for port and pid trace */
- return_flags = 0;
- if (match_spec) {
- pam_result = erts_match_set_run(p, match_spec, args, arity,
- ERTS_PAM_TMP_RESULT, &return_flags);
- if (is_non_value(pam_result)) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- }
- if (tracee_flags == &meta_flags) {
- /* Meta trace */
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- } else {
- /* Non-meta trace */
- if (*tracee_flags & F_TRACE_SILENT) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
- return_flags |= MATCH_SET_RETURN_TO_TRACE;
- }
- }
- /* END this code should be the same for port and pid trace */
-
- /*
- * Build the the {M,F,A} tuple in the local heap.
- * (A is arguments or arity.)
- */
-
- if (*tracee_flags & F_TRACE_ARITY_ONLY) {
- mfa_tuple = make_small(arity);
- } else {
- mfa_tuple = NIL;
- for (i = arity-1; i >= 0; i--) {
- mfa_tuple = CONS(hp, args[i], mfa_tuple);
- hp += 2;
- }
- }
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
- hp += 4;
-
- /*
- * Build the trace tuple and send it to the port.
- */
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
- hp += 5;
- if (pam_result != am_true) {
- hp[-5] = make_arityval(5);
- *hp++ = pam_result;
- }
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(ts_type, mess, hp, NULL, NULL);
- send_to_port(p, mess, tracer_pid, tracee_flags);
- erts_smp_mtx_unlock(&smq_mtx);
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return *tracer_pid == NIL ? 0 : return_flags;
+ return_flags = 0;
+ if (match_spec) {
+ /* we have to make a copy of the tracer here as the match spec
+ may remove it, and we still want to generate a trace message */
+ erts_tracer_update(&pre_ms_tracer, *tracer);
+ tracer = &pre_ms_tracer;
+ pam_result = erts_match_set_run(p, match_spec, args, arity,
+ ERTS_PAM_TMP_RESULT, &return_flags);
+ if (is_non_value(pam_result)) {
+ erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return 0;
+ }
+ }
+ if (tracee_flags == &meta_flags) {
+ /* Meta trace */
+ if (pam_result == am_false) {
+ erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return return_flags;
+ }
} else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- Process *tracer;
- ERTS_TRACER_REF_TYPE tracer_ref;
-#ifdef ERTS_SMP
- Eterm tpid;
-#endif
- unsigned size;
- unsigned sizes[MAX_ARG];
- unsigned pam_result_size = 0;
- int invalid_tracer;
-
- ASSERT(is_internal_pid(*tracer_pid));
-
- tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- *tracer_pid, ERTS_PROC_LOCK_STATUS);
- if (!tracer)
- invalid_tracer = 1;
- else {
- invalid_tracer = !(ERTS_TRACE_FLAGS(tracer) & F_TRACER);
- erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
- }
+ /* Non-meta trace */
+ if (*tracee_flags & F_TRACE_SILENT) {
+ erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return 0;
+ }
+ if (pam_result == am_false) {
+ erts_match_set_release_result(p);
+ UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ return return_flags;
+ }
+ if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
+ return_flags |= MATCH_SET_RETURN_TO_TRACE;
+ }
+ }
- if (invalid_tracer) {
-#ifdef ERTS_SMP
- ASSERT(is_nil(tracee)
- || tracer_pid == &ERTS_TRACER_PROC(p));
- if (is_not_nil(tracee))
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- *tracee_flags &= ~TRACEE_FLAGS;
- *tracer_pid = NIL;
-#ifdef ERTS_SMP
- if (is_not_nil(tracee))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
+ ASSERT(!ERTS_TRACER_IS_NIL(*tracer));
-#ifdef ERTS_SMP
- tpid = *tracer_pid; /* Need to save tracer pid,
- since *tracer_pid might
- be reset by erts_match_set_run() */
- tracer_ref = tpid;
-#else
- tracer_ref = tracer;
-#endif
-
- /*
- * If there is a PAM program, run it. Return if it fails.
- *
- * See the rules above in the port trace code.
- */
-
- /* BEGIN this code should be the same for port and pid trace */
- return_flags = 0;
- if (match_spec) {
- pam_result = erts_match_set_run(p, match_spec, args, arity,
- ERTS_PAM_TMP_RESULT, &return_flags);
- if (is_non_value(pam_result)) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- }
- if (tracee_flags == &meta_flags) {
- /* Meta trace */
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- } else {
- /* Non-meta trace */
- if (*tracee_flags & F_TRACE_SILENT) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return 0;
- }
- if (pam_result == am_false) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
- if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {
- return_flags |= MATCH_SET_RETURN_TO_TRACE;
- }
- }
- /* END this code should be the same for port and pid trace */
-
- /*
- * Calculate number of words needed on heap.
- */
-
- size = 4 + 5; /* Trace tuple + MFA tuple. */
- if (! (*tracee_flags & F_TRACE_ARITY_ONLY)) {
- size += 2*arity;
- for (i = arity-1; i >= 0; i--) {
- sizes[i] = size_object(args[i]);
- size += sizes[i];
- }
- }
- size += patch_ts_size(ts_type);
- if (pam_result != am_true) {
- pam_result_size = size_object(pam_result);
- size += 1 + pam_result_size;
- /* One element in trace tuple + term size. */
- }
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+ /*
+ * Build the the {M,F,A} tuple in the local heap.
+ * (A is arguments or arity.)
+ */
- /*
- * Build the the {M,F,A} tuple in the message buffer.
- * (A is arguments or arity.)
- */
-
- if (*tracee_flags & F_TRACE_ARITY_ONLY) {
- mfa_tuple = make_small(arity);
- } else {
- mfa_tuple = NIL;
- for (i = arity-1; i >= 0; i--) {
- Eterm term = copy_struct(args[i], sizes[i], &hp, off_heap);
- mfa_tuple = CONS(hp, term, mfa_tuple);
- hp += 2;
- }
- }
- mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
- hp += 4;
-
- /*
- * Copy the PAM result (if any) onto the heap.
- */
-
- if (pam_result != am_true) {
- pam_result = copy_struct(pam_result, pam_result_size, &hp, off_heap);
- }
- erts_match_set_release_result(p);
+ if (*tracee_flags & F_TRACE_ARITY_ONLY) {
+ hp = HAlloc(p, 4);
+ mfa_tuple = make_small(arity);
+ } else {
+ hp = HAlloc(p, 4 + arity * 2);
+ mfa_tuple = NIL;
+ for (i = arity-1; i >= 0; i--) {
+ mfa_tuple = CONS(hp, args[i], mfa_tuple);
+ hp += 2;
+ }
+ }
+ mfa_tuple = TUPLE3(hp, (Eterm) mfa[0], (Eterm) mfa[1], mfa_tuple);
+ hp += 4;
- /*
- * Build the trace tuple and enqueue it.
- */
-
- mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
- hp += 5;
- if (pam_result != am_true) {
- hp[-5] = make_arityval(5);
- *hp++ = pam_result;
- }
+ /*
+ * Build the trace tuple and send it to the port.
+ */
+ send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
+ tnif, am_call, mfa_tuple, THE_NON_VALUE, pam_result);
+ erts_match_set_release_result(p);
- erts_smp_mtx_lock(&smq_mtx);
+ if (match_spec && tracer == &pre_ms_tracer)
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
- PATCH_TS(ts_type, mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(tracee, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- return return_flags;
- }
+ return return_flags;
}
/* Sends trace message:
@@ -2128,69 +1176,13 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* 't_p' is the traced process.
*/
void
-trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
+trace_proc(Process *c_p, ErtsProcLocks c_p_locks,
+ Process *t_p, Eterm what, Eterm data)
{
- Eterm mess;
- Eterm* hp;
- int need;
-
- ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0)
- || erts_thr_progress_is_blocking());
- if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
-#define LOCAL_HEAP_SIZE (5+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
-
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, NULL, NULL);
- send_to_port(
-#ifndef ERTS_SMP
- /* No fake schedule out and in again after an exit */
- what == am_exit ? NULL : c_p,
-#else
- /* Fake schedule out and in are never sent when smp enabled */
- c_p,
-#endif
- mess,
- &ERTS_TRACER_PROC(t_p),
- &ERTS_TRACE_FLAGS(t_p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Eterm tmp;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- size_t sz_data;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(t_p),
- ERTS_TRACE_FLAGS(t_p));
-
- sz_data = size_object(data);
-
- need = sz_data + 5 + PATCH_TS_SIZE(t_p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- tmp = copy_struct(data, sz_data, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, tmp);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ ErtsTracerNif *tnif = NULL;
+ if (is_tracer_proc_enabled(c_p, c_p_locks, &t_p->common, &tnif, what))
+ send_to_tracer_nif(c_p, &t_p->common, t_p->common.id, tnif,
+ what, data, THE_NON_VALUE);
}
@@ -2202,62 +1194,20 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
* and 'args' may be a deep term.
*/
void
-trace_proc_spawn(Process *p, Eterm pid,
+trace_proc_spawn(Process *p, Eterm pid,
Eterm mod, Eterm func, Eterm args)
{
- Eterm mfa;
- Eterm mess;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (4+6+5)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
+ ErtsTracerNif *tnif = NULL;
+ if (is_tracer_proc_enabled(p, ERTS_PROC_LOCKS_ALL &
+ ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE),
+ &p->common, &tnif, am_spawn)) {
+ Eterm mfa;
+ Eterm* hp;
- hp = local_heap;
- mfa = TUPLE3(hp, mod, func, args);
- hp += 4;
- mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, pid, mfa);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
- send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- Eterm tmp;
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
- size_t sz_args, sz_pid;
- Uint need;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- sz_args = size_object(args);
- sz_pid = size_object(pid);
- need = sz_args + 4 + 6 + PATCH_TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
-
- tmp = copy_struct(args, sz_args, &hp, off_heap);
- mfa = TUPLE3(hp, mod, func, tmp);
- hp += 4;
- tmp = copy_struct(pid, sz_pid, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, tmp, mfa);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
+ hp = HAlloc(p, 4);
+ mfa = TUPLE3(hp, mod, func, args);
+ hp += 4;
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, am_spawn, pid, mfa);
}
}
@@ -2291,66 +1241,22 @@ void save_calls(Process *p, Export *e)
void
trace_gc(Process *p, Eterm what)
{
- ErlHeapFragment *bp = NULL;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF; /* Initialized
- to eliminate
- compiler
- warning */
+ ErtsTracerNif *tnif = NULL;
Eterm* hp;
Eterm msg = NIL;
- Uint size;
-
-#define LOCAL_HEAP_SIZE \
- (ERTS_PROCESS_GC_INFO_MAX_SIZE) + \
- 5/*4-tuple */ + ERTS_TRACE_PATCH_TS_MAX_SIZE
- DeclareTmpHeap(local_heap,LOCAL_HEAP_SIZE,p);
+ Uint size = 0;
- UseTmpHeap(LOCAL_HEAP_SIZE,p);
+ if (is_tracer_proc_enabled(
+ p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, what)) {
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
- hp = local_heap;
-#ifdef DEBUG
- size = 0;
(void) erts_process_gc_info(p, &size, NULL);
+ hp = HAlloc(p, size);
- size += 5/*4-tuple*/ + PATCH_TS_SIZE(p);
-#endif
- } else {
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- size = 0;
- (void) erts_process_gc_info(p, &size, NULL);
+ msg = erts_process_gc_info(p, NULL, &hp);
- size += 5/*4-tuple*/ + PATCH_TS_SIZE(p);
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(size, &bp, &off_heap, tracer_ref);
+ send_to_tracer_nif(p, &p->common, p->common.id, tnif, what,
+ msg, THE_NON_VALUE);
}
-
- ASSERT(size <= LOCAL_HEAP_SIZE);
-
- msg = erts_process_gc_info(p, NULL, &hp);
-
- msg = TUPLE4(hp, am_trace, p->common.id, what, msg);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
- PATCH_TS(TFLGS_TS_TYPE(p), msg, hp, NULL, NULL);
- send_to_port(p, msg, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- }
- else {
- PATCH_TS(TFLGS_TS_TYPE(p), msg, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, msg, bp);
- }
- erts_smp_mtx_unlock(&smq_mtx);
- UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
-#undef LOCAL_HEAP_SIZE
}
void
@@ -2412,7 +1318,7 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
}
@@ -2477,7 +1383,7 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
}
@@ -2552,7 +1458,7 @@ monitor_long_gc(Process *p, Uint time) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
}
@@ -2627,7 +1533,7 @@ monitor_large_heap(Process *p) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
}
@@ -2659,7 +1565,7 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg, NIL);
+ erts_queue_message(monitor_p, NULL, mp, msg);
}
#endif
@@ -2756,71 +1662,13 @@ profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint M
}
-
-/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
- * or {trace, Pid, What, {Mod, Func, Arity}}
- *
- * where 'What' is supposed to be 'in' or 'out'.
- *
- * Virtual scheduling do not fake scheduling for ports.
- */
-
-
-void trace_virtual_sched(Process *p, Eterm what)
-{
- trace_sched_aux(p, what, 1);
-}
-
/* Port profiling */
void
trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
- Eterm mess;
- Eterm* hp;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
- hp += 6;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
- /* No fake schedule */
- send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- size_t sz_data;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- sz_data = 6 + PATCH_TS_SIZE(p);
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
-
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
- hp += 6;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ send_to_tracer_nif(NULL, &p->common, calling_pid, NULL, am_open,
+ p->common.id, drv_name);
}
/* Sends trace message:
@@ -2832,53 +1680,11 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
*/
void
trace_port(Port *t_p, Eterm what, Eterm data) {
- Eterm mess;
- Eterm* hp;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
-
- if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
-#define LOCAL_HEAP_SIZE (5+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
- erts_smp_mtx_lock(&smq_mtx);
- PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, NULL, NULL);
- /* No fake schedule */
- send_to_port(NULL,mess,&ERTS_TRACER_PROC(t_p),&ERTS_TRACE_FLAGS(t_p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- size_t sz_data;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
-
- sz_data = 5 + PATCH_TS_SIZE(t_p);
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(t_p),
- ERTS_TRACE_FLAGS(t_p));
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
-
- mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
- hp += 5;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(t_p), mess, hp, bp, tracer_ref);
-
- ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, NULL,
+ am_open, data, THE_NON_VALUE);
}
/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
@@ -2891,83 +1697,14 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
void
trace_sched_ports(Port *p, Eterm what) {
- trace_sched_ports_where(p,what, make_small(0));
+ trace_sched_ports_where(p, what, make_small(0));
}
-void
-trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
- Eterm mess;
- Eterm* hp;
- int ws = 5;
- Eterm sched_id = am_undefined;
-
- if (is_internal_port(ERTS_TRACER_PROC(p))) {
-#define LOCAL_HEAP_SIZE (6+ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
-#ifdef ERTS_SMP
- ErtsSchedulerData *esd = erts_get_scheduler_data();
- if (esd) sched_id = make_small(esd->no);
- else sched_id = am_undefined;
-#else
- sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
- ws = 6;
- } else {
- mess = TUPLE4(hp, am_trace, p->common.id, what, where);
- ws = 5;
- }
- hp += ws;
-
- erts_smp_mtx_lock(&smq_mtx);
-
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, NULL, NULL);
-
- /* No fake scheduling */
- send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
- erts_smp_mtx_unlock(&smq_mtx);
- } else {
- ErlHeapFragment *bp;
- ErlOffHeap *off_heap;
- ERTS_TRACER_REF_TYPE tracer_ref;
-
- ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) ws = 6; /* Make place for scheduler id */
-
- ERTS_GET_TRACER_REF(tracer_ref,
- ERTS_TRACER_PROC(p),
- ERTS_TRACE_FLAGS(p));
-
- hp = ERTS_ALLOC_SYSMSG_HEAP(ws+PATCH_TS_SIZE(p), &bp, &off_heap, tracer_ref);
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) {
-#ifdef ERTS_SMP
- ErtsSchedulerData *esd = erts_get_scheduler_data();
- if (esd) sched_id = make_small(esd->no);
- else sched_id = am_undefined;
-#else
- sched_id = make_small(1);
-#endif
- mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
- } else {
- mess = TUPLE4(hp, am_trace, p->common.id, what, where);
- }
- hp += ws;
-
- erts_smp_mtx_lock(&smq_mtx);
+void
+trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) {
- PATCH_TS(TFLGS_TS_TYPE(p), mess, hp, bp, tracer_ref);
- ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
- erts_smp_mtx_unlock(&smq_mtx);
- }
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id,
+ NULL, am_open, where, THE_NON_VALUE);
}
/* Port profiling */
@@ -3083,28 +1820,6 @@ profile_runnable_proc(Process *p, Eterm status){
#ifdef ERTS_SMP
-void
-erts_check_my_tracer_proc(Process *p)
-{
- if (is_internal_pid(ERTS_TRACER_PROC(p))) {
- Process *tracer = erts_pid2proc(p,
- ERTS_PROC_LOCK_MAIN,
- ERTS_TRACER_PROC(p),
- ERTS_PROC_LOCK_STATUS);
- int invalid_tracer = (!tracer
- || !(ERTS_TRACE_FLAGS(tracer) & F_TRACER));
- if (tracer)
- erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
- if (invalid_tracer) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- ERTS_TRACE_FLAGS(p) &= ~TRACEE_FLAGS;
- ERTS_TRACER_PROC(p) = NIL;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
-}
-
-
typedef struct ErtsSysMsgQ_ ErtsSysMsgQ;
struct ErtsSysMsgQ_ {
ErtsSysMsgQ *next;
@@ -3170,24 +1885,11 @@ erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_error_logger, msg, bp);
}
-void
-erts_send_sys_msg_proc(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
-{
- ASSERT(is_internal_pid(to));
- enqueue_sys_msg(SYS_MSG_TYPE_PROC_MSG, from, to, msg, bp);
-}
-
#ifdef DEBUG_PRINTOUTS
static void
print_msg_type(ErtsSysMsgQ *smqp)
{
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- erts_fprintf(stderr, "TRACE ");
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- erts_fprintf(stderr, "SEQTRACE ");
- break;
case SYS_MSG_TYPE_SYSMON:
erts_fprintf(stderr, "SYSMON ");
break;
@@ -3197,9 +1899,6 @@ print_msg_type(ErtsSysMsgQ *smqp)
case SYS_MSG_TYPE_ERRLGR:
erts_fprintf(stderr, "ERRLGR ");
break;
- case SYS_MSG_TYPE_PROC_MSG:
- erts_fprintf(stderr, "PROC_MSG ");
- break;
default:
erts_fprintf(stderr, "??? ");
break;
@@ -3211,17 +1910,6 @@ static void
sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
{
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- /* Invalid tracer_proc's are removed when processes
- are scheduled in. */
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- /* Reset seq_tracer if it hasn't changed */
- erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
- if (system_seq_tracer == receiver)
- system_seq_tracer = am_false;
- erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
- break;
case SYS_MSG_TYPE_SYSMON:
if (receiver == NIL
&& !erts_system_monitor_long_gc
@@ -3281,8 +1969,6 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
no_elgger, tag, CAR(list_val(tp[3])));
break;
}
- case SYS_MSG_TYPE_PROC_MSG:
- break;
default:
ASSERT(0);
}
@@ -3400,13 +2086,6 @@ sys_msg_dispatcher_func(void *unused)
print_msg_type(smqp);
#endif
switch (smqp->type) {
- case SYS_MSG_TYPE_TRACE:
- case SYS_MSG_TYPE_PROC_MSG:
- receiver = smqp->to;
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- receiver = erts_get_system_seq_tracer();
- break;
case SYS_MSG_TYPE_SYSMON:
receiver = erts_get_system_monitor();
if (smqp->from == receiver) {
@@ -3441,16 +2120,8 @@ sys_msg_dispatcher_func(void *unused)
if (is_internal_pid(receiver)) {
proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
- if (!proc
- || (smqp->type == SYS_MSG_TYPE_TRACE
- && !(ERTS_TRACE_FLAGS(proc) & F_TRACER))) {
+ if (!proc) {
/* Bad tracer */
-#ifdef DEBUG_PRINTOUTS
- if (smqp->type == SYS_MSG_TYPE_TRACE && proc)
- erts_fprintf(stderr,
- "<tracer alive but missing "
- "F_TRACER flag> ");
-#endif
goto failure;
}
else {
@@ -3458,7 +2129,7 @@ sys_msg_dispatcher_func(void *unused)
queue_proc_msg:
mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = smqp->bp;
- erts_queue_message(proc,&proc_locks,mp,smqp->msg,NIL);
+ erts_queue_message(proc,&proc_locks,mp,smqp->msg);
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "delivered\n");
#endif
@@ -3524,12 +2195,6 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
for (sm = sys_message_queue; sm; sm = sm->next) {
Eterm to;
switch (sm->type) {
- case SYS_MSG_TYPE_TRACE:
- to = sm->to;
- break;
- case SYS_MSG_TYPE_SEQTRACE:
- to = erts_get_system_seq_tracer();
- break;
case SYS_MSG_TYPE_SYSMON:
to = erts_get_system_monitor();
break;
@@ -3567,3 +2232,498 @@ init_sys_msg_dispatcher(void)
}
#endif
+
+#include "erl_nif.h"
+
+struct ErtsTracerNif_ {
+ HashBucket hb;
+ Eterm module;
+ struct erl_module_nif* nif_mod;
+ ErlNifFunc *enabled;
+ ErlNifFunc *trace;
+};
+
+static Hash *tracer_hash = NULL;
+static erts_smp_rwmtx_t tracer_mtx;
+
+static ErtsTracerNif *
+load_tracer_nif(const ErtsTracer tracer)
+{
+ Module* mod = erts_get_module(ERTS_TRACER_MODULE(tracer),
+ erts_active_code_ix());
+ struct erl_module_instance *instance;
+ ErlNifFunc *funcs;
+ int num_of_funcs;
+ ErtsTracerNif tnif_tmpl, *tnif;
+ int i;
+
+ if (mod && mod->curr.nif != NULL) {
+ instance = &mod->curr;
+ } else {
+ return NULL;
+ }
+
+ tnif_tmpl.enabled = NULL;
+ tnif_tmpl.trace = NULL;
+ tnif_tmpl.nif_mod = instance->nif;
+ tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
+
+ num_of_funcs = erts_nif_get_funcs(instance->nif, &funcs);
+
+ for(i = 0; i < num_of_funcs; i++) {
+ if (strcmp("enabled",funcs[i].name) == 0 && funcs[i].arity == 3) {
+ tnif_tmpl.enabled = funcs + i;
+ } else if (strcmp("trace",funcs[i].name) == 0 && funcs[i].arity == 6) {
+ tnif_tmpl.trace = funcs + i;
+ }
+ }
+
+ if (tnif_tmpl.enabled == NULL ||
+ tnif_tmpl.trace == NULL ) {
+ return NULL;
+ }
+
+ erts_smp_rwmtx_rwlock(&tracer_mtx);
+ tnif = hash_put(tracer_hash, &tnif_tmpl);
+ erts_smp_rwmtx_rwunlock(&tracer_mtx);
+
+ return tnif;
+}
+
+static ERTS_INLINE ErtsTracerNif *
+lookup_tracer_nif(const ErtsTracer tracer)
+{
+ ErtsTracerNif tnif_tmpl;
+ ErtsTracerNif *tnif;
+ tnif_tmpl.module = ERTS_TRACER_MODULE(tracer);
+ erts_smp_rwmtx_rlock(&tracer_mtx);
+ if ((tnif = hash_get(tracer_hash, &tnif_tmpl)) == NULL) {
+ erts_smp_rwmtx_runlock(&tracer_mtx);
+ tnif = load_tracer_nif(tracer);
+ ASSERT(!tnif || tnif->nif_mod);
+ return tnif;
+ }
+ erts_smp_rwmtx_runlock(&tracer_mtx);
+ ASSERT(tnif->nif_mod);
+ return tnif;
+}
+
+/* This function converts an Erlang tracer term to ErtsTracer.
+ It returns THE_NON_VALUE if an invalid tracer term was given.
+ Accepted input is:
+ pid() || port() || {prefix, pid()} || {prefix, port()} ||
+ {prefix, atom(), term()} || {atom(), term()}
+ */
+ErtsTracer
+erts_term_to_tracer(Eterm prefix, Eterm t)
+{
+ ErtsTracer tracer = erts_tracer_nil;
+ ASSERT(is_atom(prefix) || prefix == THE_NON_VALUE);
+ if (!is_nil(t)) {
+ Eterm module = am_erl_tracer, state = THE_NON_VALUE;
+ Eterm hp[2];
+ if (is_tuple(t)) {
+ Eterm *tp = tuple_val(t);
+ if (prefix != THE_NON_VALUE) {
+ if (arityval(tp[0]) == 2 && tp[1] == prefix)
+ t = tp[2];
+ else if (arityval(tp[0]) == 3 && tp[1] == prefix && is_atom(tp[2])) {
+ module = tp[2];
+ state = tp[3];
+ }
+ } else {
+ if (arityval(tp[0]) == 2 && is_atom(tp[2])) {
+ module = tp[1];
+ state = tp[2];
+ }
+ }
+ }
+ if (state == THE_NON_VALUE && (is_internal_pid(t) || is_internal_port(t)))
+ state = t;
+ if (state == THE_NON_VALUE)
+ return THE_NON_VALUE;
+ erts_tracer_update(&tracer, CONS(hp, module, state));
+ }
+ if (!lookup_tracer_nif(tracer)) {
+ ASSERT(ERTS_TRACER_MODULE(tracer) != am_erl_tracer);
+ ERTS_TRACER_CLEAR(&tracer);
+ return THE_NON_VALUE;
+ }
+ return tracer;
+}
+
+Eterm
+erts_tracer_to_term(Process *p, ErtsTracer tracer)
+{
+ if (ERTS_TRACER_IS_NIL(tracer))
+ return am_false;
+ if (ERTS_TRACER_MODULE(tracer) == am_erl_tracer)
+ /* Have to manage these specifically in order to be
+ backwards compatible */
+ return ERTS_TRACER_STATE(tracer);
+ else {
+ Eterm *hp = HAlloc(p, 3);
+ return TUPLE2(hp, ERTS_TRACER_MODULE(tracer),
+ copy_object(ERTS_TRACER_STATE(tracer), p));
+ }
+}
+
+
+static ERTS_INLINE int
+send_to_tracer_nif_raw(Process *c_p, Process *tracee,
+ const ErtsTracer tracer, Uint tracee_flags,
+ Eterm t_p_id, ErtsTracerNif *tnif, Eterm tag, Eterm msg,
+ Eterm extra, Eterm pam_result)
+{
+ if (tnif || (tnif = lookup_tracer_nif(tracer)) != NULL) {
+#define MAP_SIZE 3
+ Eterm argv[6],
+ local_heap[3+MAP_SIZE /* values */+(MAP_SIZE+1 /* keys */)];
+ flatmap_t *map = (flatmap_t*)(local_heap+(MAP_SIZE+1));
+ Eterm *map_values = flatmap_get_values(map);
+
+ int argc = 6;
+
+ argv[0] = tag;
+ argv[1] = ERTS_TRACER_STATE(tracer);
+ argv[2] = t_p_id;
+ argv[3] = msg;
+ argv[4] = extra == THE_NON_VALUE ? am_undefined : extra;
+ argv[5] = make_flatmap(map);
+
+ map->thing_word = MAP_HEADER_FLATMAP;
+ map->size = MAP_SIZE;
+ map->keys = TUPLE3(local_heap, am_match_spec_result, am_scheduler_id,
+ am_timestamp);
+
+ *map_values++ = pam_result;
+ if (tracee_flags & F_TRACE_SCHED_NO)
+ *map_values++ = make_small(erts_get_scheduler_id());
+ else
+ *map_values++ = am_undefined;
+ if (tracee_flags & F_NOW_TS)
+#ifdef HAVE_ERTS_NOW_CPU
+ if (erts_cpu_timestamp)
+ *map_values++ = am_cpu_timestamp;
+ else
+#endif
+ *map_values++ = am_timestamp;
+ else if (tracee_flags & F_STRICT_MON_TS)
+ *map_values++ = am_strict_monotonic;
+ else if (tracee_flags & F_MON_TS)
+ *map_values++ = am_monotonic;
+ else
+ *map_values++ = am_undefined;
+
+#undef MAP_SIZE
+ erts_nif_call_function(c_p, tracee ? tracee : c_p,
+ tnif->nif_mod, tnif->trace, argc, argv);
+ }
+ return 1;
+}
+
+
+static ERTS_INLINE int
+send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
+ Eterm t_p_id, ErtsTracerNif *tnif, Eterm tag,
+ Eterm msg, Eterm extra)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ if (c_p) {
+ /* We have to hold the main lock of the currently executing process */
+ erts_proc_lc_chk_have_proc_locks(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ if (is_internal_pid(t_p->id)) {
+ /* We have to have at least one lock */
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL);
+ } else {
+ ASSERT(is_internal_port(t_p->id));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p));
+ }
+#endif
+
+ return send_to_tracer_nif_raw(c_p,
+ is_internal_pid(t_p->id) ? (Process*)t_p : NULL,
+ t_p->tracer, t_p->trace_flags,
+ t_p_id, tnif, tag, msg, extra,
+ am_true);
+}
+
+static ERTS_INLINE Eterm
+call_enabled_tracer(Process *c_p, const ErtsTracer tracer,
+ ErtsTracerNif **tnif_ret, Eterm tag, Eterm t_p_id)
+{
+ ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
+ if (tnif) {
+ Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id};
+ if (tnif_ret) *tnif_ret = tnif;
+ return erts_nif_call_function(
+ c_p, NULL, tnif->nif_mod, tnif->enabled, 3, argv);
+ }
+ return am_remove;
+}
+
+static int
+is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p,
+ ErtsTracerNif **tnif_ret, Eterm tag)
+{
+ Eterm nif_result;
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ if (c_p)
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks
+ || erts_thr_progress_is_blocking());
+ if (is_internal_pid(t_p->id)) {
+ /* We have to have at least one lock */
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL
+ || erts_thr_progress_is_blocking());
+ } else {
+ ASSERT(is_internal_port(t_p->id));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
+ || erts_thr_progress_is_blocking());
+ }
+#endif
+
+ nif_result = call_enabled_tracer(c_p, t_p->tracer, tnif_ret, tag, t_p->id);
+ switch (nif_result) {
+ case am_discard: return 0;
+ case am_trace: return 1;
+ case THE_NON_VALUE:
+ case am_remove: break;
+ default:
+ /* only am_remove should be returned, but if
+ something else is returned we fall-through
+ and remove the tracer. */
+ ASSERT(0);
+ }
+
+ /* Only remove tracer on self() and ports */
+ if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) {
+ ErtsProcLocks c_p_xlocks = 0;
+ if (is_internal_pid(t_p->id)) {
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+ if (c_p_locks != ERTS_PROC_LOCKS_ALL) {
+ c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL;
+ if (erts_smp_proc_trylock(c_p, c_p_xlocks) == EBUSY) {
+ erts_smp_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ }
+ erts_tracer_replace(t_p, erts_tracer_nil);
+ t_p->trace_flags &= ~TRACEE_FLAGS;
+
+ if (c_p_xlocks)
+ erts_smp_proc_unlock(c_p, c_p_xlocks);
+ }
+
+
+ return 0;
+}
+
+int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p)
+{
+ return is_tracer_proc_enabled(c_p, c_p_locks, t_p, NULL, am_trace_status);
+}
+
+int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer)
+{
+ ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
+ if (tnif) {
+ Eterm nif_result = call_enabled_tracer(c_p, tracer, &tnif,
+ am_trace_status,
+ c_p->common.id);
+ switch (nif_result) {
+ case am_discard:
+ case am_trace: return 1;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ if (is_internal_pid(t_p->id) && !erts_thr_progress_is_blocking()) {
+ erts_proc_lc_chk_have_proc_locks((Process*)t_p, ERTS_PROC_LOCKS_ALL);
+ } else if (is_internal_port(t_p->id)) {
+ ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)
+ || erts_thr_progress_is_blocking());
+ }
+#endif
+ if (ERTS_TRACER_COMPARE(t_p->tracer, tracer))
+ return;
+
+ erts_tracer_update(&t_p->tracer, tracer);
+}
+
+static void free_tracer(void *p)
+{
+ ErtsTracer tracer = (ErtsTracer)p;
+
+ if (is_immed(ERTS_TRACER_STATE(tracer))) {
+ erts_free(ERTS_ALC_T_HEAP_FRAG, ptr_val(tracer));
+ } else {
+ ErlHeapFragment *hf = (void*)((char*)(ptr_val(tracer)) - offsetof(ErlHeapFragment, mem));
+ free_message_buffer(hf);
+ }
+}
+
+/* un-define erts_tracer_update before implementation */
+#ifdef erts_tracer_update
+#undef erts_tracer_update
+#endif
+
+/*
+ * ErtsTracer is either NIL, 'true' or [Mod | State]
+ *
+ * - If State is immediate then the memory for
+ * the cons cell is just two words + sizeof(ErtsThrPrgrLaterOp) large.
+ * - If State is a complex term then the cons cell
+ * is allocated in an ErlHeapFragment where the cons
+ * ptr points to the mem field. So in order to get the
+ * ptr to the fragment you do this:
+ * (char*)(ptr_val(tracer)) - offsetof(ErlHeapFragment, mem)
+ * Normally you shouldn't have to care about this though
+ * as erts_tracer_update takes care of it for you.
+ *
+ * When ErtsTracer is stored in the stack as part of a
+ * return trace, the cons cell is stored on the heap of
+ * the process.
+ *
+ * The cons cell is not always stored on the heap as:
+ * 1) for port/meta tracing there is no heap
+ * 2) we would need the main lock in order to
+ * read the tracer which is undesirable.
+ *
+ * One way to optimize this (memory wise) is to keep an refc and only bump
+ * the refc when *tracer is NIL.
+ */
+void
+erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer)
+{
+ ErlHeapFragment *hf;
+
+ if (is_not_nil(*tracer)) {
+ Uint offs = 2;
+ UWord size = 2 * sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp);
+ ASSERT(is_list(*tracer));
+ if (is_not_immed(ERTS_TRACER_STATE(*tracer))) {
+ hf = (void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem)));
+ offs = hf->used_size;
+ size = hf->alloc_size * sizeof(Eterm) + sizeof(ErlHeapFragment);
+ ASSERT(offs == size_object(*tracer));
+ }
+ /* We schedule the free:ing of the tracer until after a thread progress
+ has been made so that we know that no schedulers have any references
+ to it. Because we do this, it is possible to release all locks of a
+ process/port and still use the ErtsTracer of that port/process
+ without having to worry if it is free'd.
+ */
+ erts_schedule_thr_prgr_later_cleanup_op(
+ free_tracer, (void*)(*tracer),
+ (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs),
+ size);
+ }
+
+ if (is_nil(new_tracer)) {
+ *tracer = new_tracer;
+ } else if (is_immed(ERTS_TRACER_STATE(new_tracer))) {
+ /* If tracer state is an immediate we only allocate a 2 Eterm heap.
+ Not sure if it is worth it, we save 4 words (sizeof(ErlHeapFragment))
+ per tracer. */
+ Eterm *hp = erts_alloc(ERTS_ALC_T_HEAP_FRAG,
+ 2*sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp));
+ *tracer = CONS(hp, ERTS_TRACER_MODULE(new_tracer),
+ ERTS_TRACER_STATE(new_tracer));
+ } else {
+ Eterm *hp, tracer_state = ERTS_TRACER_STATE(new_tracer),
+ tracer_module = ERTS_TRACER_MODULE(new_tracer);
+ Uint sz = size_object(tracer_state);
+ hf = new_message_buffer(sz + 2 /* cons cell */ + (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm));
+ hp = hf->mem + 2;
+ hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm);
+ *tracer = copy_struct(tracer_state, sz, &hp, &hf->off_heap);
+ *tracer = CONS(hf->mem, tracer_module, *tracer);
+ ASSERT((void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))) == hf);
+ }
+}
+
+static void init_tracer_nif()
+{
+ erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx");
+
+ erts_tracer_nif_clear();
+
+}
+
+int erts_tracer_nif_clear()
+{
+
+ erts_smp_rwmtx_rlock(&tracer_mtx);
+ if (!tracer_hash || tracer_hash->nobjs) {
+
+ HashFunctions hf;
+ hf.hash = tracer_hash_fun;
+ hf.cmp = tracer_cmp_fun;
+ hf.alloc = tracer_alloc_fun;
+ hf.free = tracer_free_fun;
+ hf.meta_alloc = (HMALLOC_FUN) erts_alloc;
+ hf.meta_free = (HMFREE_FUN) erts_free;
+ hf.meta_print = (HMPRINT_FUN) erts_print;
+
+ erts_smp_rwmtx_runlock(&tracer_mtx);
+ erts_smp_rwmtx_rwlock(&tracer_mtx);
+
+ if (tracer_hash)
+ hash_delete(tracer_hash);
+
+ tracer_hash = hash_new(ERTS_ALC_T_TRACER_NIF, "tracer_hash", 10, hf);
+
+ erts_smp_rwmtx_rwunlock(&tracer_mtx);
+ return 1;
+ }
+
+ erts_smp_rwmtx_runlock(&tracer_mtx);
+ return 0;
+}
+
+static int tracer_cmp_fun(void* a, void* b)
+{
+ return ((ErtsTracerNif*)a)->module != ((ErtsTracerNif*)b)->module;
+}
+
+static HashValue tracer_hash_fun(void* obj)
+{
+ return make_internal_hash(((ErtsTracerNif*)obj)->module);
+}
+
+static void *tracer_alloc_fun(void* tmpl)
+{
+ ErtsTracerNif *obj = erts_alloc(ERTS_ALC_T_TRACER_NIF,
+ sizeof(ErtsTracerNif) +
+ sizeof(ErtsThrPrgrLaterOp));
+ memcpy(obj, tmpl, sizeof(*obj));
+ return obj;
+}
+
+static void tracer_free_fun_cb(void* obj)
+{
+ erts_free(ERTS_ALC_T_TRACER_NIF, obj);
+}
+
+static void tracer_free_fun(void* obj)
+{
+ ErtsTracerNif *tnif = obj;
+ erts_schedule_thr_prgr_later_op(
+ tracer_free_fun_cb, obj,
+ (ErtsThrPrgrLaterOp*)(tnif + 1));
+
+}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 1da27ee128..61f82a6b93 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -46,6 +46,8 @@
#define ERTS_SEQTFLGS2TSTYPE(SEQTFLGS) \
((int) (((SEQTFLGS) >> ERTS_SEQ_TRACE_FLAGS_TS_TYPE_SHIFT) \
& ERTS_TRACE_TS_TYPE_MASK))
+#define ERTS_SEQTFLGS2TFLGS(SEQTFLGS) \
+ (ERTS_SEQTFLGS2TSTYPE(SEQTFLGS) << ERTS_TRACE_FLAGS_TS_TYPE_SHIFT)
#endif /* ERL_TRACE_H__FLAGS__ */
@@ -62,15 +64,16 @@ void erts_system_profile_clear(Process *c_p);
/* erl_trace.c */
void erts_init_trace(void);
void erts_trace_check_exiting(Eterm exiting);
-Eterm erts_set_system_seq_tracer(Process *c_p,
- ErtsProcLocks c_p_locks,
- Eterm new);
-Eterm erts_get_system_seq_tracer(void);
-void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
-void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
+ErtsTracer erts_set_system_seq_tracer(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ ErtsTracer new);
+ErtsTracer erts_get_system_seq_tracer(void);
+void erts_change_default_tracing(int setflags, Uint *flagsp,
+ const ErtsTracer tracerp);
+void erts_get_default_tracing(Uint *flagsp, ErtsTracer *tracerp);
void erts_set_system_monitor(Eterm monitor);
Eterm erts_get_system_monitor(void);
-int erts_is_tracer_proc_valid(Process* p);
+int erts_is_tracer_valid(Process* p);
#ifdef ERTS_SMP
void erts_check_my_tracer_proc(Process *);
@@ -83,22 +86,22 @@ void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
#endif
-void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
void trace_send(Process*, Eterm, Eterm);
-void trace_receive(Process*, Eterm);
-Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec, Eterm* args,
- int local, Eterm *tracer_pid);
-void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid);
+void trace_receive(Process *, Eterm);
+Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec,
+ Eterm* args, int local, ErtsTracer *tracer);
+void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval,
+ ErtsTracer *tracer);
void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
- Eterm *tracer);
+ ErtsTracer *tracer);
void erts_trace_return_to(Process *p, BeamInstr *pc);
-void trace_sched(Process*, Eterm);
-void trace_proc(Process*, Process*, Eterm, Eterm);
+void trace_sched(Process*, ErtsProcLocks, Eterm);
+void trace_proc(Process*, ErtsProcLocks, Process*, Eterm, Eterm);
void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
void save_calls(Process *p, Export *);
void trace_gc(Process *p, Eterm what);
/* port tracing */
-void trace_virtual_sched(Process*, Eterm);
+void trace_virtual_sched(Process*, ErtsProcLocks, Eterm);
void trace_sched_ports(Port *pp, Eterm);
void trace_sched_ports_where(Port *pp, Eterm, Eterm);
void trace_port(Port *, Eterm what, Eterm data);
@@ -121,7 +124,7 @@ void monitor_large_heap(Process *p);
void monitor_generic(Process *p, Eterm type, Eterm spec);
Uint erts_trace_flag2bit(Eterm flag);
int erts_trace_flags(Eterm List,
- Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
+ Uint *pMask, ErtsTracer *pTracer, int *pCpuTimestamp);
Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
#ifdef ERTS_SMP
@@ -161,15 +164,52 @@ int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
struct binary* match_prog_set,
struct binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid, int is_blocking);
+ ErtsTracer meta_tracer, int is_blocking);
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct binary **match_spec,
struct binary **meta_match_spec,
struct trace_pattern_flags *trace_pattern_flags,
- Eterm *meta_tracer_pid);
+ ErtsTracer *meta_tracer);
int erts_is_default_trace_enabled(void);
void erts_bif_trace_init(void);
int erts_finish_breakpointing(void);
+/* Nif tracer functions */
+int erts_is_tracer_proc_enabled(Process *c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p);
+int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer);
+Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer);
+ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term);
+void erts_tracer_replace(ErtsPTabElementCommon *t_p,
+ const ErtsTracer new_tracer);
+void erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer);
+int erts_tracer_nif_clear(void);
+
+#define erts_tracer_update(t,n) do { if (*(t) != (n)) erts_tracer_update(t,n); } while(0)
+#define ERTS_TRACER_CLEAR(t) erts_tracer_update(t, erts_tracer_nil)
+
+static const ErtsTracer
+ERTS_DECLARE_DUMMY(erts_tracer_true) = am_true;
+
+static const ErtsTracer
+ERTS_DECLARE_DUMMY(erts_tracer_nil) = NIL;
+
+#define ERTS_TRACER_COMPARE(t1, t2) \
+ (EQ((t1), (t2)))
+
+#define ERTS_TRACER_IS_NIL(t1) ERTS_TRACER_COMPARE(t1, erts_tracer_nil)
+
+#define IS_TRACER_VALID(tracer) \
+ (ERTS_TRACER_COMPARE(tracer,erts_tracer_true) \
+ || ERTS_TRACER_IS_NIL(tracer) \
+ || (is_list(tracer) && is_atom(CAR(list_val(tracer)))))
+
+#define ERTS_TRACER_FROM_ETERM(termp) \
+ ((ErtsTracer*)(termp))
+
+#define ERTS_TRACER_PROC_IS_ENABLED(PROC) \
+ (!ERTS_TRACER_IS_NIL(ERTS_TRACER(PROC)) \
+ && erts_is_tracer_proc_enabled(PROC, ERTS_PROC_LOCK_MAIN, &(PROC)->common))
+
#endif /* ERL_TRACE_H__ */
diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d
index 73ef5a108a..237889e0f5 100644
--- a/erts/emulator/beam/erlang_dtrace.d
+++ b/erts/emulator/beam/erlang_dtrace.d
@@ -700,6 +700,35 @@ provider erlang {
*/
probe efile_drv__return(int, int, char *, int, int, int);
+
+/*
+ * The set of probes called by the erlang tracer nif backend. In order
+ * to receive events on these you both have to enable tracing in erlang
+ * using the trace bifs and also from dtrace/systemtap.
+ */
+
+
+ /**
+ * A trace message of type `event` was triggered by process `p`.
+ *
+ *
+ * @param p the PID (string form) of the process
+ * @param event the event that was triggered (i.e. call or spawn)
+ * @param state the state of the tracer nif as a string
+ * @param arg1 first argument to the trace event
+ * @param arg2 second argument to the trace event
+ */
+ probe trace(char *p, char *event, char *state, char *arg1, char *arg2);
+
+ /**
+ * A sequence trace message of type `label` was triggered.
+ *
+ * @param state the state of the tracer nif as a string
+ * @param label the seq trace label
+ * @param seq_info the seq trace info tuple as a string
+ */
+ probe trace_seq(char *state, char *label, char *seq_info);
+
/*
* NOTE:
* For formatting int64_t arguments within a D script, see:
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 1d3704f0af..5c2e8bbd09 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -44,6 +44,8 @@
#include "erl_port.h"
#include "erl_gc.h"
+struct enif_func_t;
+
struct enif_environment_t /* ErlNifEnv */
{
struct erl_module_nif* mod_nif;
@@ -54,6 +56,7 @@ struct enif_environment_t /* ErlNifEnv */
int fpe_was_unmasked;
struct enif_tmp_obj_t* tmp_obj_list;
int exception_thrown; /* boolean */
+ Process *tracee;
};
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*);
@@ -62,6 +65,12 @@ extern Eterm erts_nif_taints(Process* p);
extern void erts_print_nif_taints(int to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
extern void erl_nif_init(void);
+extern int erts_nif_get_funcs(struct erl_module_nif*,
+ struct enif_func_t **funcs);
+extern Eterm erts_nif_call_function(Process *p, Process *tracee,
+ struct erl_module_nif*,
+ struct enif_func_t *,
+ int argc, Eterm *argv);
/* Driver handle (wrapper for old plain handle) */
#define ERL_DE_OK 0
@@ -1532,10 +1541,15 @@ ERTS_GLB_INLINE void dtrace_fun_decode(Process *process,
ERTS_GLB_INLINE void
dtrace_pid_str(Eterm pid, char *process_buf)
{
- erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "<%lu.%lu.%lu>",
- pid_channel_no(pid),
- pid_number(pid),
- pid_serial(pid));
+ if (is_pid(pid))
+ erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "<%lu.%lu.%lu>",
+ pid_channel_no(pid),
+ pid_number(pid),
+ pid_serial(pid));
+ else if (is_port(pid))
+ erts_snprintf(process_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
+ port_channel_no(pid),
+ port_number(pid));
}
ERTS_GLB_INLINE void
@@ -1547,9 +1561,7 @@ dtrace_proc_str(Process *process, char *process_buf)
ERTS_GLB_INLINE void
dtrace_port_str(Port *port, char *port_buf)
{
- erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
- port_channel_no(port->common.id),
- port_number(port->common.id));
+ dtrace_pid_str(port->common.id, port_buf);
}
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index de73da8e22..a35c305046 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -212,6 +212,7 @@ static ERTS_INLINE void
kill_port(Port *pp)
{
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ ERTS_TRACER_CLEAR(&ERTS_TRACER(pp));
erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */
erts_port_task_free_port(pp);
/* In non-smp case the port structure may have been deallocated now */
@@ -404,7 +405,7 @@ static Port *create_port(char *name,
prt->os_pid = -1;
/* Set default tracing */
- erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
+ erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER(prt));
ERTS_CT_ASSERT(offsetof(Port,common) == 0);
@@ -1300,7 +1301,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
reds_left_in = CONTEXT_REDS/10;
else {
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(c_p, am_out);
+ trace_sched(c_p, ERTS_PROC_LOCK_MAIN, am_out);
/*
* No status lock held while sending runnable
* proc trace messages. It is however not needed
@@ -1384,7 +1385,7 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
}
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
- trace_virtual_sched(c_p, am_in);
+ trace_sched(c_p, ERTS_PROC_LOCK_MAIN, am_in);
/*
* No status lock held while sending runnable
* proc trace messages. It is however not needed
@@ -1447,7 +1448,7 @@ queue_port_sched_op_reply(Process *rp,
erts_factory_trim_and_close(factory, &msg, 1);
- erts_queue_message(rp, rp_locksp, factory->message, msg, NIL);
+ erts_queue_message(rp, rp_locksp, factory->message, msg);
}
static void
@@ -1535,9 +1536,8 @@ erts_schedule_proc2port_signal(Process *c_p,
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
c_p->msg.save = c_p->msg.last;
- erts_smp_proc_unlock(c_p,
- (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCKS_MSG_RECEIVE));
+ erts_smp_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE
+ | ERTS_PROC_LOCK_MAIN));
}
@@ -2948,7 +2948,7 @@ port_link_failure(Eterm port_id, Eterm linker)
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(NULL, rp, am_getting_unlinked, port_id);
+ trace_proc(NULL, 0, rp, am_getting_unlinked, port_id);
}
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -3417,7 +3417,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
sz_res + 3, &hp, &ohp);
res = copy_struct(res, sz_res, &hp, ohp);
tuple = TUPLE2(hp, sender, res);
- erts_queue_message(rp, &rp_locks, mp, tuple, NIL);
+ erts_queue_message(rp, &rp_locks, mp, tuple);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -3513,7 +3513,8 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
- erts_queue_message(rp, &rp_locks, mp, tuple, am_undefined);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, &rp_locks, mp, tuple);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
@@ -3680,7 +3681,8 @@ deliver_vec_message(Port* prt, /* Port */
tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
- erts_queue_message(rp, &rp_locks, mp, tuple, am_undefined);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, &rp_locks, mp, tuple);
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
@@ -3939,7 +3941,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- trace_proc(NULL, rp, am_getting_unlinked,
+ trace_proc(NULL, 0, rp, am_getting_unlinked,
psc->port);
}
}
@@ -5036,7 +5038,7 @@ reply_io_bytes(void *vreq)
eout = erts_bld_uint64(&hp, NULL, out);
msg = TUPLE4(hp, ref, make_small(sched_id), ein, eout);
- erts_queue_message(rp, &rp_locks, mp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg);
if (req->sched_id == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -5529,7 +5531,8 @@ void driver_report_exit(ErlDrvPort ix, int status)
hp += 3;
tuple = TUPLE2(hp, prt->common.id, tuple);
- erts_queue_message(rp, &rp_locks, mp, tuple, am_undefined);
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
+ erts_queue_message(rp, &rp_locks, mp, tuple);
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
@@ -6123,7 +6126,8 @@ driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
mess = ESTACK_POP(stack); /* get resulting value */
erts_factory_trim_and_close(&factory, &mess, 1);
/* send message */
- erts_queue_message(rp, &rp_locks, factory.message, mess, am_undefined);
+ ERL_MESSAGE_TOKEN(factory.message) = am_undefined;
+ erts_queue_message(rp, &rp_locks, factory.message, mess);
}
else {
if (b2t.ix > b2t.used)
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 071f6aee08..f4a889b8f8 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -226,7 +226,8 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
rp = (RegProc*) hash_put(&process_reg, (void*) &r);
if (proc && rp->p == proc) {
if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
- trace_proc(c_p, proc, am_register, name);
+ trace_proc(proc, ERTS_PROC_LOCK_MAIN,
+ proc, am_register, name);
}
proc->common.u.alive.reg = rp;
}
@@ -470,8 +471,8 @@ int erts_unregister_name(Process *c_p,
int res = 0;
RegProc r, *rp;
Port *port = c_prt;
+ ErtsProcLocks current_c_p_locks = 0;
#ifdef ERTS_SMP
- ErtsProcLocks current_c_p_locks;
/*
* SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name',
@@ -555,7 +556,8 @@ int erts_unregister_name(Process *c_p,
#endif
rp->p->common.u.alive.reg = NULL;
if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
- trace_proc(c_p, rp->p, am_unregister, r.name);
+ trace_proc(c_p, current_c_p_locks,
+ rp->p, am_unregister, r.name);
}
#ifdef ERTS_SMP
if (rp->p != c_p) {
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 8b3eb2db1d..748fba15c7 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -644,6 +644,15 @@ typedef struct preload {
unsigned char* code; /* Code pointer */
} Preload;
+/*
+ * ErtsTracer is either NIL, 'true' or [Mod | State]
+ *
+ * If set to NIL, it means no tracer.
+ * If set to 'true' it means the current process' tracer.
+ * If set to [Mod | State], there is a tracer.
+ * See erts_tracer_update for more details
+ */
+typedef Eterm ErtsTracer;
/*
* This structure contains options to all built in drivers.
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 384b340a1c..b280995488 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -2298,7 +2298,7 @@ static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(p, NULL /* only used for smp build */, mp, message, NIL);
+ erts_queue_message(p, NULL /* only used for smp build */, mp, message);
}
#endif
}
diff --git a/erts/emulator/hipe/hipe_debug.c b/erts/emulator/hipe/hipe_debug.c
index ba41f34012..ace489452f 100644
--- a/erts/emulator/hipe/hipe_debug.c
+++ b/erts/emulator/hipe/hipe_debug.c
@@ -195,7 +195,7 @@ void hipe_print_pcb(Process *p)
U("rcount ", rcount);
U("id ", common.id);
U("reds ", reds);
- U("tracer_pr..", common.tracer_proc);
+ U("tracer ", common.tracer);
U("trace_fla..", common.trace_flags);
U("group_lea..", group_leader);
U("flags ", flags);
diff --git a/erts/emulator/nifs/common/erl_tracer_nif.c b/erts/emulator/nifs/common/erl_tracer_nif.c
new file mode 100644
index 0000000000..a1e0e581a4
--- /dev/null
+++ b/erts/emulator/nifs/common/erl_tracer_nif.c
@@ -0,0 +1,250 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson 2015. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Purpose: NIF library for process/port tracer
+ *
+ */
+
+
+#define STATIC_ERLANG_NIF 1
+
+#include "erl_nif.h"
+#include "config.h"
+#include "sys.h"
+
+#ifdef VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+/* NIF interface declarations */
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
+static void unload(ErlNifEnv* env, void* priv_data);
+
+/* The NIFs: */
+static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM trace(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+
+static ErlNifFunc nif_funcs[] = {
+ {"enabled", 3, enabled},
+ {"trace", 6, trace}
+};
+
+
+ERL_NIF_INIT(erl_tracer, nif_funcs, load, NULL, upgrade, unload)
+
+#define ATOMS \
+ ATOM_DECL(call); \
+ ATOM_DECL(command); \
+ ATOM_DECL(cpu_timestamp); \
+ ATOM_DECL(discard); \
+ ATOM_DECL(exception_from); \
+ ATOM_DECL(match_spec_result); \
+ ATOM_DECL(monotonic); \
+ ATOM_DECL(ok); \
+ ATOM_DECL(remove); \
+ ATOM_DECL(return_from); \
+ ATOM_DECL(scheduler_id); \
+ ATOM_DECL(send); \
+ ATOM_DECL(send_to_non_existing_process); \
+ ATOM_DECL(seq_trace); \
+ ATOM_DECL(spawn); \
+ ATOM_DECL(strict_monotonic); \
+ ATOM_DECL(timestamp); \
+ ATOM_DECL(trace); \
+ ATOM_DECL(trace_ts); \
+ ATOM_DECL(true); \
+ ATOM_DECL(undefined);
+
+#define ATOM_DECL(A) static ERL_NIF_TERM atom_##A
+ATOMS
+#undef ATOM_DECL
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+
+#define ATOM_DECL(A) atom_##A = enif_make_atom(env, #A)
+ATOMS
+#undef ATOM_DECL
+
+ *priv_data = NULL;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data,
+ ERL_NIF_TERM load_info)
+{
+ if (*old_priv_data != NULL) {
+ return -1; /* Don't know how to do that */
+ }
+ if (*priv_data != NULL) {
+ return -1; /* Don't know how to do that */
+ }
+ if (load(env, priv_data, load_info)) {
+ return -1;
+ }
+ return 0;
+}
+
+static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifPid to_pid;
+ ErlNifPort to_port;
+ ASSERT(argc == 3);
+
+ if (enif_get_local_pid(env, argv[1], &to_pid)) {
+ if (!enif_is_process_alive(env, &to_pid))
+ /* tracer is dead so we should remove this trace point */
+ return atom_remove;
+ } else if (enif_get_local_port(env, argv[1], &to_port)) {
+ if (!enif_is_port_alive(env, &to_port))
+ /* tracer is dead so we should remove this trace point */
+ return atom_remove;
+ }
+
+ /* Only generate trace for when tracer != tracee */
+ if (enif_is_identical(argv[1], argv[2])) {
+ return atom_discard;
+ }
+
+ return atom_trace;
+}
+
+/*
+ -spec trace(seq_trace, TracerState :: pid() | port(),
+ Label :: non_neg_integer(),
+ Msg :: term(),
+ Opts :: map()) -> ignored();
+ trace(Tag :: atom(), TracerState :: pid() | port(),
+ Tracee :: pid() || port() || undefined,
+ Msg :: term(),
+ Opts :: map()) -> ignored().
+ -spec trace(Tag :: atom(), TracerState :: pid() | port(),
+ Tracee :: pid() || port() || undefined,
+ Msg :: term(),
+ Extra :: term(),
+ Opts :: map()) -> ignored().
+*/
+static ERL_NIF_TERM trace(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM value, msg, tt[8], opts;
+ ErlNifPid to_pid;
+ ErlNifPort to_port;
+ size_t tt_sz = 0;
+ int is_port = 0;
+ ASSERT(argc == 6);
+
+ if (!enif_get_local_pid(env, argv[1], &to_pid)) {
+ if (!enif_get_local_port(env, argv[1], &to_port)) {
+ /* This only fails if argv[1] is a not a local port/pid
+ which should not happen as it is checked in enabled */
+ ASSERT(0);
+ return atom_ok;
+ }
+ is_port = 1;
+ }
+
+ if (!enif_is_identical(argv[4], atom_undefined)) {
+ tt[tt_sz++] = atom_trace;
+ tt[tt_sz++] = argv[2];
+ tt[tt_sz++] = argv[0];
+ tt[tt_sz++] = argv[3];
+ tt[tt_sz++] = argv[4];
+ } else {
+ if (enif_is_identical(argv[0], atom_seq_trace)) {
+ tt[tt_sz++] = atom_seq_trace;
+ tt[tt_sz++] = argv[2];
+ tt[tt_sz++] = argv[3];
+ } else {
+ tt[tt_sz++] = atom_trace;
+ tt[tt_sz++] = argv[2];
+ tt[tt_sz++] = argv[0];
+ tt[tt_sz++] = argv[3];
+ }
+ }
+
+ opts = argv[5];
+
+ if (enif_get_map_value(env, opts, atom_match_spec_result,
+ &value)
+ && !enif_is_identical(value, atom_true)) {
+ tt[tt_sz++] = value;
+ }
+
+ if (enif_get_map_value(env, opts, atom_scheduler_id, &value)
+ && !enif_is_identical(value, atom_undefined)) {
+ tt[tt_sz++] = value;
+ }
+
+ if (enif_get_map_value(env, opts, atom_timestamp, &value)
+ && !enif_is_identical(value, atom_undefined)) {
+ ERL_NIF_TERM ts;
+ if (enif_is_identical(value, atom_monotonic)) {
+ ErlNifTime mon = enif_monotonic_time(ERL_NIF_NSEC);
+ ts = enif_make_int64(env, mon);
+ } else if (enif_is_identical(value, atom_strict_monotonic)) {
+ ErlNifTime mon = enif_monotonic_time(ERL_NIF_NSEC);
+ ERL_NIF_TERM unique = enif_make_unique_integer(
+ env, ERL_NIF_UNIQUE_MONOTONIC);
+ ts = enif_make_tuple2(env, enif_make_int64(env, mon), unique);
+ } else if (enif_is_identical(value, atom_timestamp)) {
+ ts = enif_now_time(env);
+ } else if (enif_is_identical(value, atom_cpu_timestamp)) {
+ ts = enif_cpu_time(env);
+ } else {
+ ASSERT(0);
+ goto error;
+ }
+ tt[tt_sz++] = ts;
+ if (tt[0] == atom_trace)
+ tt[0] = atom_trace_ts;
+ }
+
+ msg = enif_make_tuple_from_array(env, tt, tt_sz);
+
+ if (is_port) {
+ ErlNifBinary bin;
+
+ if (!enif_term_to_binary(env, msg, &bin))
+ goto error;
+
+ msg = enif_make_binary(env, &bin);
+
+ if (!enif_port_command(env, &to_port, NULL, msg))
+ /* port has probably died, enabled will clean up */;
+
+ enif_release_binary(&bin);
+ } else {
+
+ if (!enif_send(env, &to_pid, NULL, msg))
+ /* process has probably died, enabled will clean up */;
+ }
+
+error:
+
+ return atom_ok;
+}
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index 2221b5830c..92182e3890 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -110,6 +110,8 @@ MODULES= \
trace_meta_SUITE \
trace_call_count_SUITE \
trace_call_time_SUITE \
+ tracer_SUITE \
+ tracer_test \
scheduler_SUITE \
old_scheduler_SUITE \
unique_SUITE \
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index 6b8d14b487..ea973276db 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -23,7 +23,7 @@
-export([all/0, suite/0, not_run/1]).
-export([test_1/1, test_2/1, test_3/1, bad_match_spec_bin/1,
trace_control_word/1, silent/1, silent_no_ms/1, silent_test/1,
- ms_trace2/1, ms_trace3/1, boxed_and_small/1,
+ ms_trace2/1, ms_trace3/1, ms_trace_dead/1, boxed_and_small/1,
destructive_in_test_bif/1, guard_exceptions/1,
empty_list/1,
unary_plus/1, unary_minus/1, moving_labels/1]).
@@ -49,7 +49,7 @@ all() ->
false ->
[test_1, test_2, test_3, bad_match_spec_bin,
trace_control_word, silent, silent_no_ms, silent_test, ms_trace2,
- ms_trace3, boxed_and_small, destructive_in_test_bif,
+ ms_trace3, ms_trace_dead, boxed_and_small, destructive_in_test_bif,
guard_exceptions, unary_plus, unary_minus, fpe,
moving_labels,
faulty_seq_trace,
@@ -500,6 +500,8 @@ ms_trace2(Config) when is_list(Config) ->
[[call,return_to],[]]},
ms_trace2}]
end),
+ %% Silence valgrind
+ erlang:trace_pattern({?MODULE,fn,'_'},[],[]),
ok.
@@ -595,7 +597,35 @@ ms_trace3(Config) when is_list(Config) ->
end),
ok.
-
+ms_trace_dead(doc) ->
+ ["Test that a dead tracer is removed using ms"];
+ms_trace_dead(suite) -> [];
+ms_trace_dead(Config) when is_list(Config) ->
+ Self = self(),
+ TFun = fun F() -> receive M -> Self ! M, F() end end,
+ {Tracer, MRef} = spawn_monitor(TFun),
+ MetaTracer = spawn_link(TFun),
+ erlang:trace_pattern({?MODULE, f1, '_'},
+ [{'_',[],[{trace,[],
+ [call,{const,{tracer,Tracer}}]}]}],
+ [{meta, MetaTracer}]),
+ erlang:trace_pattern({?MODULE, f2, '_'}, []),
+ ?MODULE:f2(1,2),
+ ?MODULE:f1(1),
+ {tracer,Tracer} = erlang:trace_info(self(), tracer),
+ {flags,[call]} = erlang:trace_info(self(), flags),
+ ?MODULE:f2(2,3),
+ receive {trace, Self, call, {?MODULE, f2, _}} -> ok end,
+ exit(Tracer, stop),
+ receive {'DOWN',MRef,_,_,_} -> ok end,
+ ?MODULE:f1(2),
+ {tracer,[]} = erlang:trace_info(self(), tracer),
+ ?MODULE:f2(3,4),
+ TRef = erlang:trace_delivered(all),
+ receive {trace_delivered, _, TRef} -> ok end,
+ receive {trace_ts, Self, call, {?MODULE, f1, _}, _} -> ok end,
+ receive {trace_ts, Self, call, {?MODULE, f1, _}, _} -> ok end,
+ receive M -> ct:fail({unexpected, M}) after 10 -> ok end.
%% Test that destructive operations in test bif does not really happen
destructive_in_test_bif(Config) when is_list(Config) ->
@@ -905,34 +935,40 @@ collect([]) ->
collect([TM | TMs]) ->
io:format( "Expecting: ~p~n", [TM]),
receive
- M0 ->
- M = case element(1, M0) of
- trace_ts ->
- list_to_tuple(lists:reverse(
- tl(lists:reverse(tuple_to_list(M0)))));
- _ -> M0
- end,
- case is_function(TM,1) of
- true ->
- case (catch TM(M)) of
- true ->
- io:format("Got: ~p~n", [M]),
- collect(TMs);
- _ ->
- io:format("Got unexpected: ~p~n", [M]),
- flush({got_unexpected,M})
- end;
-
- false ->
- case M of
- TM ->
- io:format("Got: ~p~n", [M]),
- collect(TMs);
- _ ->
- io:format("Got unexpected: ~p~n", [M]),
- flush({got_unexpected,M})
- end
- end
+ %% We only look at trace messages with the same tracee
+ %% as the message we are looking for. This because
+ %% the order of trace messages is only guaranteed from
+ %% within a single process.
+ M0 when element(2, M0) =:= element(2, TM); is_function(TM, 1) ->
+ M = case element(1, M0) of
+ trace_ts ->
+ list_to_tuple(lists:reverse(
+ tl(lists:reverse(tuple_to_list(M0)))));
+ _ -> M0
+ end,
+ case is_function(TM,1) of
+ true ->
+ case (catch TM(M)) of
+ true ->
+ io:format("Got: ~p~n", [M]),
+ collect(TMs);
+ _ ->
+ io:format("Got unexpected: ~p~n", [M]),
+ flush({got_unexpected,M})
+ end;
+
+ false ->
+ case M of
+ TM ->
+ io:format("Got: ~p~n", [M]),
+ collect(TMs);
+ _ ->
+ io:format("Got unexpected: ~p~n", [M]),
+ flush({got_unexpected,M})
+ end
+ end
+ after 15000 ->
+ flush(timeout)
end.
flush(Reason) ->
diff --git a/erts/emulator/test/trace_SUITE.erl b/erts/emulator/test/trace_SUITE.erl
index 7aece926e3..d9974bd6f7 100644
--- a/erts/emulator/test/trace_SUITE.erl
+++ b/erts/emulator/test/trace_SUITE.erl
@@ -24,8 +24,8 @@
%%% Tests the trace BIF.
%%%
--export([all/0, suite/0,
- receive_trace/1, self_send/1,
+-export([all/0, suite/0, link_receive_call_correlation/0,
+ receive_trace/1, link_receive_call_correlation/1, self_send/1,
timeout_trace/1, send_trace/1,
procs_trace/1, dist_procs_trace/1,
suspend/1, mutual_suspend/1, suspend_exit/1, suspender_exit/1,
@@ -48,7 +48,8 @@ suite() ->
{timetrap, {seconds, 5}}].
all() ->
- [cpu_timestamp, receive_trace, self_send, timeout_trace,
+ [cpu_timestamp, receive_trace, link_receive_call_correlation,
+ self_send, timeout_trace,
send_trace, procs_trace, dist_procs_trace, suspend,
mutual_suspend, suspend_exit, suspender_exit,
suspend_system_limit, suspend_opts, suspend_waiting,
@@ -87,10 +88,10 @@ receive_trace(Config) when is_list(Config) ->
1 = erlang:trace(Receiver, true, ['receive']),
Hello = {hello, world},
Receiver ! Hello,
- {trace, Receiver, 'receive', Hello} = receive_first(),
+ {trace, Receiver, 'receive', Hello} = receive_first_trace(),
Hello2 = {hello, again, world},
Receiver ! Hello2,
- {trace, Receiver, 'receive', Hello2} = receive_first(),
+ {trace, Receiver, 'receive', Hello2} = receive_first_trace(),
receive_nothing(),
%% Another process should not be able to trace Receiver.
@@ -104,6 +105,112 @@ receive_trace(Config) when is_list(Config) ->
receive_nothing(),
ok.
+%% Tests that receive of a message always happens before a call with
+%% that message and that links/unlinks are ordered together with the
+%% 'receive'.
+link_receive_call_correlation() ->
+ [{timetrap, {minutes, 5}}].
+link_receive_call_correlation(Config) when is_list(Config) ->
+ Receiver = fun_spawn(fun F() ->
+ receive
+ stop -> ok;
+ M -> receive_msg(M), F()
+ end
+ end),
+ process_flag(trap_exit, true),
+
+ %% Trace the process; make sure that we receive the trace messages.
+ 1 = erlang:trace(Receiver, true, ['receive', procs, call, timestamp, scheduler_id]),
+ 1 = erlang:trace_pattern({?MODULE, receive_msg, '_'}, [], [local]),
+
+ Num = 100000,
+
+ (fun F(0) -> [];
+ F(N) ->
+ if N rem 2 == 0 ->
+ link(Receiver);
+ true ->
+ unlink(Receiver)
+ end,
+ [Receiver ! N | F(N-1)]
+ end)(Num),
+
+ Receiver ! stop,
+ MonRef = erlang:monitor(process, Receiver),
+ receive {'DOWN', MonRef, _, _, _} -> ok end,
+ Ref = erlang:trace_delivered(Receiver),
+ receive {trace_delivered, _, Ref} -> ok end,
+
+ Msgs = (fun F() -> receive M -> [M | F()] after 1 -> [] end end)(),
+
+ case check_consistent(Receiver, Num, Num, Num, Msgs) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ ct:log("~p", [Msgs]),
+ ct:fail({error, Reason})
+ end.
+
+-define(schedid, , _).
+
+check_consistent(_Pid, Recv, Call, _LU, [Msg | _]) when Recv > Call ->
+ {error, Msg};
+check_consistent(Pid, Recv, Call, LU, [Msg | Msgs]) ->
+
+ case Msg of
+ {trace, Pid, 'receive', Recv ?schedid} ->
+ check_consistent(Pid,Recv - 1, Call, LU, Msgs);
+ {trace_ts, Pid, 'receive', Recv ?schedid, _} ->
+ check_consistent(Pid,Recv - 1, Call, LU, Msgs);
+
+ {trace, Pid, call, {?MODULE, receive_msg, [Call]} ?schedid} ->
+ check_consistent(Pid,Recv, Call - 1, LU, Msgs);
+ {trace_ts, Pid, call, {?MODULE, receive_msg, [Call]} ?schedid, _} ->
+ check_consistent(Pid,Recv, Call - 1, LU, Msgs);
+
+ %% We check that for each receive we have gotten a
+ %% getting_linked or getting_unlinked message. Also
+ %% if we receive a getting_linked, then the next
+ %% message we expect to receive is an even number
+ %% and odd number for getting_unlinked.
+ {trace, Pid, getting_linked, _Self ?schedid}
+ when Recv rem 2 == 0, Recv == LU ->
+ check_consistent(Pid, Recv, Call, LU - 1, Msgs);
+ {trace_ts, Pid, getting_linked, _Self ?schedid, _}
+ when Recv rem 2 == 0, Recv == LU ->
+ check_consistent(Pid, Recv, Call, LU - 1, Msgs);
+
+ {trace, Pid, getting_unlinked, _Self ?schedid}
+ when Recv rem 2 == 1, Recv == LU ->
+ check_consistent(Pid, Recv, Call, LU - 1, Msgs);
+ {trace_ts, Pid, getting_unlinked, _Self ?schedid, _}
+ when Recv rem 2 == 1, Recv == LU ->
+ check_consistent(Pid, Recv, Call, LU - 1, Msgs);
+
+ {trace,Pid,'receive',Ignore ?schedid}
+ when Ignore == stop; Ignore == timeout ->
+ check_consistent(Pid, Recv, Call, LU, Msgs);
+ {trace_ts,Pid,'receive',Ignore ?schedid,_}
+ when Ignore == stop; Ignore == timeout ->
+ check_consistent(Pid, Recv, Call, LU, Msgs);
+
+ {trace, Pid, exit, normal ?schedid} ->
+ check_consistent(Pid, Recv, Call, LU, Msgs);
+ {trace_ts, Pid, exit, normal ?schedid, _} ->
+ check_consistent(Pid, Recv, Call, LU, Msgs);
+ {'EXIT', Pid, normal} ->
+ check_consistent(Pid, Recv, Call, LU, Msgs);
+ Msg ->
+ {error, Msg}
+ end;
+check_consistent(_, 0, 0, 0, []) ->
+ ok;
+check_consistent(_, Recv, Call, LU, []) ->
+ {error,{Recv, Call, LU}}.
+
+receive_msg(M) ->
+ M.
+
%% Test that traces are generated for messages sent
%% and received to/from self().
self_send(Config) when is_list(Config) ->
@@ -134,8 +241,8 @@ timeout_trace(Config) when is_list(Config) ->
Process = fun_spawn(fun process/0),
1 = erlang:trace(Process, true, ['receive']),
Process ! timeout_please,
- {trace, Process, 'receive', timeout_please} = receive_first(),
- {trace, Process, 'receive', timeout} = receive_first(),
+ {trace, Process, 'receive', timeout_please} = receive_first_trace(),
+ {trace, Process, 'receive', timeout} = receive_first_trace(),
receive_nothing(),
ok.
@@ -149,13 +256,13 @@ send_trace(Config) when is_list(Config) ->
%% Check that a message sent to another process is traced.
1 = erlang:trace(Sender, true, [send]),
Sender ! {send_please, Receiver, to_receiver},
- {trace, Sender, send, to_receiver, Receiver} = receive_first(),
+ {trace, Sender, send, to_receiver, Receiver} = receive_first_trace(),
receive_nothing(),
%% Check that a message sent to another registered process is traced.
register(?MODULE,Receiver),
Sender ! {send_please, ?MODULE, to_receiver},
- {trace, Sender, send, to_receiver, ?MODULE} = receive_first(),
+ {trace, Sender, send, to_receiver, ?MODULE} = receive_first_trace(),
receive_nothing(),
unregister(?MODULE),
@@ -163,14 +270,14 @@ send_trace(Config) when is_list(Config) ->
Sender ! {send_please, self(), to_myself},
receive to_myself -> ok end,
Self = self(),
- {trace, Sender, send, to_myself, Self} = receive_first(),
+ {trace, Sender, send, to_myself, Self} = receive_first_trace(),
receive_nothing(),
%% Check that a message sent to dead process is traced.
{Pid,Ref} = spawn_monitor(fun() -> ok end),
receive {'DOWN',Ref,_,_,_} -> ok end,
Sender ! {send_please, Pid, to_dead},
- {trace, Sender, send_to_non_existing_process, to_dead, Pid} = receive_first(),
+ {trace, Sender, send_to_non_existing_process, to_dead, Pid} = receive_first_trace(),
receive_nothing(),
%% Check that a message sent to unknown registrated process is traced.
@@ -178,7 +285,7 @@ send_trace(Config) when is_list(Config) ->
1 = erlang:trace(BadargSender, true, [send]),
unlink(BadargSender),
BadargSender ! {send_please, not_registered, to_unknown},
- {trace, BadargSender, send, to_unknown, not_registered} = receive_first(),
+ {trace, BadargSender, send, to_unknown, not_registered} = receive_first_trace(),
receive_nothing(),
%% Another process should not be able to trace Sender.
@@ -211,9 +318,9 @@ procs_trace(Config) when is_list(Config) ->
%% spawn, link
Proc1 ! {spawn_link_please, Self, MFA},
Proc3 = receive {spawned, Proc1, P3} -> P3 end,
- {trace, Proc1, spawn, Proc3, MFA} = receive_first(),
+ {trace, Proc1, spawn, Proc3, MFA} = receive_first_trace(),
io:format("Proc3 = ~p ~n", [Proc3]),
- {trace, Proc1, link, Proc3} = receive_first(),
+ {trace, Proc1, link, Proc3} = receive_first_trace(),
receive_nothing(),
%%
%% getting_unlinked by exit()
@@ -221,60 +328,60 @@ procs_trace(Config) when is_list(Config) ->
Reason3 = make_ref(),
Proc1 ! {send_please, Proc3, {exit_please, Reason3}},
receive {Proc1, {'EXIT', Proc3, Reason3}} -> ok end,
- {trace, Proc1, getting_unlinked, Proc3} = receive_first(),
+ {trace, Proc1, getting_unlinked, Proc3} = receive_first_trace(),
Proc1 ! {trap_exit_please, false},
receive_nothing(),
%%
%% link
Proc1 ! {link_please, Proc2},
- {trace, Proc1, link, Proc2} = receive_first(),
+ {trace, Proc1, link, Proc2} = receive_first_trace(),
receive_nothing(),
%%
%% unlink
Proc1 ! {unlink_please, Proc2},
- {trace, Proc1, unlink, Proc2} = receive_first(),
+ {trace, Proc1, unlink, Proc2} = receive_first_trace(),
receive_nothing(),
%%
%% getting_linked
Proc2 ! {link_please, Proc1},
- {trace, Proc1, getting_linked, Proc2} = receive_first(),
+ {trace, Proc1, getting_linked, Proc2} = receive_first_trace(),
receive_nothing(),
%%
%% getting_unlinked
Proc2 ! {unlink_please, Proc1},
- {trace, Proc1, getting_unlinked, Proc2} = receive_first(),
+ {trace, Proc1, getting_unlinked, Proc2} = receive_first_trace(),
receive_nothing(),
%%
%% register
true = register(Name, Proc1),
- {trace, Proc1, register, Name} = receive_first(),
+ {trace, Proc1, register, Name} = receive_first_trace(),
receive_nothing(),
%%
%% unregister
true = unregister(Name),
- {trace, Proc1, unregister, Name} = receive_first(),
+ {trace, Proc1, unregister, Name} = receive_first_trace(),
receive_nothing(),
%%
%% exit (with registered name, due to link)
Reason4 = make_ref(),
Proc1 ! {spawn_link_please, Self, MFA},
Proc4 = receive {spawned, Proc1, P4} -> P4 end,
- {trace, Proc1, spawn, Proc4, MFA} = receive_first(),
+ {trace, Proc1, spawn, Proc4, MFA} = receive_first_trace(),
io:format("Proc4 = ~p ~n", [Proc4]),
- {trace, Proc1, link, Proc4} = receive_first(),
+ {trace, Proc1, link, Proc4} = receive_first_trace(),
Proc1 ! {register_please, Name, Proc1},
- {trace, Proc1, register, Name} = receive_first(),
+ {trace, Proc1, register, Name} = receive_first_trace(),
Proc4 ! {exit_please, Reason4},
receive {'EXIT', Proc1, Reason4} -> ok end,
- {trace, Proc1, exit, Reason4} = receive_first(),
- {trace, Proc1, unregister, Name} = receive_first(),
+ {trace, Proc1, exit, Reason4} = receive_first_trace(),
+ {trace, Proc1, unregister, Name} = receive_first_trace(),
receive_nothing(),
%%
%% exit (not linked to tracing process)
1 = erlang:trace(Proc2, true, [procs]),
Reason2 = make_ref(),
Proc2 ! {exit_please, Reason2},
- {trace, Proc2, exit, Reason2} = receive_first(),
+ {trace, Proc2, exit, Reason2} = receive_first_trace(),
receive_nothing(),
ok.
@@ -299,45 +406,45 @@ dist_procs_trace(Config) when is_list(Config) ->
Proc1 ! {trap_exit_please, true},
Proc3 = receive {spawned, Proc1, P3} -> P3 end,
io:format("Proc3 = ~p ~n", [Proc3]),
- {trace, Proc1, getting_linked, Proc3} = receive_first(),
+ {trace, Proc1, getting_linked, Proc3} = receive_first_trace(),
Reason3 = make_ref(),
Proc1 ! {send_please, Proc3, {exit_please, Reason3}},
receive {Proc1, {'EXIT', Proc3, Reason3}} -> ok end,
- {trace, Proc1, getting_unlinked, Proc3} = receive_first(),
+ {trace, Proc1, getting_unlinked, Proc3} = receive_first_trace(),
Proc1 ! {trap_exit_please, false},
receive_nothing(),
%%
%% link
Proc1 ! {link_please, Proc2},
- {trace, Proc1, link, Proc2} = receive_first(),
+ {trace, Proc1, link, Proc2} = receive_first_trace(),
receive_nothing(),
%%
%% unlink
Proc1 ! {unlink_please, Proc2},
- {trace, Proc1, unlink, Proc2} = receive_first(),
+ {trace, Proc1, unlink, Proc2} = receive_first_trace(),
receive_nothing(),
%%
%% getting_linked
Proc2 ! {link_please, Proc1},
- {trace, Proc1, getting_linked, Proc2} = receive_first(),
+ {trace, Proc1, getting_linked, Proc2} = receive_first_trace(),
receive_nothing(),
%%
%% getting_unlinked
Proc2 ! {unlink_please, Proc1},
- {trace, Proc1, getting_unlinked, Proc2} = receive_first(),
+ {trace, Proc1, getting_unlinked, Proc2} = receive_first_trace(),
receive_nothing(),
%%
%% exit (with registered name, due to link)
Name = list_to_atom(OtherName),
Reason2 = make_ref(),
Proc1 ! {link_please, Proc2},
- {trace, Proc1, link, Proc2} = receive_first(),
+ {trace, Proc1, link, Proc2} = receive_first_trace(),
Proc1 ! {register_please, Name, Proc1},
- {trace, Proc1, register, Name} = receive_first(),
+ {trace, Proc1, register, Name} = receive_first_trace(),
Proc2 ! {exit_please, Reason2},
receive {'EXIT', Proc1, Reason2} -> ok end,
- {trace, Proc1, exit, Reason2} = receive_first(),
- {trace, Proc1, unregister, Name} = receive_first(),
+ {trace, Proc1, exit, Reason2} = receive_first_trace(),
+ {trace, Proc1, unregister, Name} = receive_first_trace(),
receive_nothing(),
%%
%% Done.
@@ -1247,7 +1354,8 @@ existing_clear(Config) when is_list(Config) ->
[N, M]),
{flags, []} = erlang:trace_info(Self, flags),
{tracer, []} = erlang:trace_info(Self, tracer),
- M = N + 1, % Since trace could not be enabled on the tracer.
+ M = N, % Used to be N + 1, but from 19.0 the tracer is also traced
+
ok.
%% Test that an invalid flag cause badarg
@@ -1317,6 +1425,13 @@ receive_first() ->
Any -> Any
end.
+%% Waits for and returns the first message in the message queue.
+
+receive_first_trace() ->
+ receive
+ Any when element(1,Any) =:= trace; element(1,Any) =:= trace_ts -> Any
+ end.
+
%% Ensures that there is no message in the message queue.
receive_nothing() ->
@@ -1421,7 +1536,7 @@ fun_spawn(Fun, Args) ->
start_node(Name) ->
Pa = filename:dirname(code:which(?MODULE)),
Cookie = atom_to_list(erlang:get_cookie()),
- test_server:start_node(Name, slave,
+ test_server:start_node(Name, slave,
[{args, "-setcookie " ++ Cookie ++" -pa " ++ Pa}]).
stop_node(Node) ->
diff --git a/erts/emulator/test/tracer_SUITE.erl b/erts/emulator/test/tracer_SUITE.erl
new file mode 100644
index 0000000000..c62512e6ba
--- /dev/null
+++ b/erts/emulator/test/tracer_SUITE.erl
@@ -0,0 +1,622 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(tracer_SUITE).
+
+%%%
+%%% Tests the tracer module interface
+%%%
+
+-export([all/0, suite/0,groups/0, init_per_suite/1, end_per_suite/1,
+ init_per_group/2,end_per_group/2, init_per_testcase/2,
+ end_per_testcase/2]).
+-export([load/1, unload/1, reload/1, invalid_tracers/1]).
+-export([send/1, recv/1, spawn/1, exit/1, link/1, unlink/1,
+ getting_linked/1, getting_unlinked/1, register/1, unregister/1,
+ in/1, out/1, gc_start/1, gc_end/1]).
+
+suite() -> [{ct_hooks,[ts_install_cth]},
+ {timetrap, {minutes, 1}}].
+
+all() ->
+ [load, unload, reload, invalid_tracers, {group, basic}].
+
+groups() ->
+ [{ basic, [], [send, recv, spawn, exit, link, unlink, getting_linked,
+ getting_unlinked, register, unregister, in, out,
+ gc_start, gc_end]}].
+
+init_per_suite(Config) ->
+ purge(),
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_GroupName, Config) ->
+ Config.
+
+end_per_group(_GroupName, Config) ->
+ Config.
+
+init_per_testcase(TC, Config) when TC =:= load; TC =:= reload ->
+
+ DataDir = proplists:get_value(data_dir, Config),
+
+ Pid = erlang:spawn(fun F() ->
+ receive
+ {get, Pid} ->
+ Pid ! DataDir,
+ F()
+ end
+ end),
+ register(tracer_test_config, Pid),
+ Config;
+init_per_testcase(_, Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ case catch tracer_test:enabled(trace_status, self(), self()) of
+ discard ->
+ ok;
+ _ ->
+ tracer_test:load(DataDir)
+ end,
+ Config.
+
+end_per_testcase(TC, _Config) when TC =:= load; TC =:= reload ->
+ purge(),
+ exit(whereis(tracer_test_config), kill),
+ ok;
+end_per_testcase(_, _Config) ->
+ purge(),
+ ok.
+
+load(_Config) ->
+ purge(),
+ 1 = erlang:trace(self(), true, [{tracer, tracer_test, []}, call]),
+ purge(),
+ 1 = erlang:trace_pattern({?MODULE, all, 0}, [],
+ [{meta, tracer_test, []}]),
+ ok.
+
+unload(_Config) ->
+
+ ServerFun = fun F(0, undefined) ->
+ receive
+ {N, Pid} -> F(N, Pid)
+ end;
+ F(0, Pid) ->
+ Pid ! done,
+ F(0, undefined);
+ F(N, Pid) ->
+ ?MODULE:all(),
+ F(N-1, Pid)
+ end,
+
+ Pid = erlang:spawn_link(fun() -> ServerFun(0, undefined) end),
+
+
+ Tc = fun(N) ->
+ Pid ! {N, self()},
+ receive done -> ok after 1000 -> ct:fail(timeout) end,
+ trace_delivered(Pid)
+ end,
+
+ 1 = erlang:trace(Pid, true, [{tracer, tracer_test,
+ {#{ call => trace}, self(), []}},
+ call]),
+ 1 = erlang:trace_pattern({?MODULE, all, 0}, [], []),
+
+ Tc(1),
+ receive _ -> ok after 0 -> ct:fail(timeout) end,
+
+ code:purge(tracer_test),
+ code:delete(tracer_test),
+
+ Tc(1),
+ receive M1 -> ct:fail({unexpected_message, M1}) after 0 -> ok end,
+
+ code:purge(tracer_test),
+
+ Tc(1),
+ receive M2 -> ct:fail({unexpected_message, M2}) after 0 -> ok end,
+
+ ok.
+
+%% This testcase is here to make sure there are not
+%% segfaults when reloading the current nifs.
+reload(_Config) ->
+
+ Tracer = spawn_link(fun F() -> receive _M -> F() end end),
+ Tracee = spawn_link(fun F() -> ?MODULE:all(), F() end),
+
+
+ [begin
+ Ref = make_ref(),
+ State = {#{ call => trace }, Tracer, [Ref]},
+ erlang:trace(Tracee, true, [{tracer, tracer_test,State}, call]),
+ erlang:trace_pattern({?MODULE, all, 0}, []),
+
+ false = code:purge(tracer_test),
+ {module, _} = code:load_file(tracer_test),
+
+ %% There is a race involved in between when the internal nif cache
+ %% is purged and when the reload_loop needs the tracer module
+ %% so the tracer may be removed or still there.
+ case erlang:trace_info(Tracee, tracer) of
+ {tracer, []} -> ok;
+ {tracer, {tracer_test, State}} -> ok
+ end,
+
+ false = code:purge(tracer_test),
+ true = code:delete(tracer_test),
+ false = code:purge(tracer_test)
+ end || _ <- lists:seq(1,15)],
+
+ ok.
+
+invalid_tracers(_Config) ->
+ FailTrace = fun(A) ->
+ try erlang:trace(self(), true, A) of
+ _ -> ct:fail(A)
+ catch _:_ -> ok end
+ end,
+
+ FailTrace([{tracer, foobar}, call]),
+ FailTrace([{tracer, foobar, []}, call]),
+ FailTrace([{tracer, make_ref(), []}, call]),
+ FailTrace([{tracer, lists, []}, call]),
+
+ FailTP = fun(MS,FL) ->
+ try erlang:trace_pattern({?MODULE,all,0}, MS, FL) of
+ _ -> ct:fail({MS, FL})
+ catch _:_ -> ok end
+ end,
+
+ FailTP([],[{meta, foobar}]),
+ FailTP([],[{meta, foobar, []}]),
+ FailTP([],[{meta, make_ref(), []}]),
+ FailTP([],[{meta, lists, []}]),
+
+ ok.
+
+
+
+send(_Config) ->
+
+ Self = self(),
+ Tc = fun(Pid) ->
+ Pid ! fun() -> Self ! ok end,
+ receive ok -> ok after 100 -> ct:fail(timeout) end
+ end,
+
+ Expect = fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, send, State, Pid, ok, Self, Opts} = Msg,
+ check_opts(EOpts, Opts)
+ end
+ end,
+ test(send, Tc, Expect).
+
+
+recv(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! ok
+ end,
+
+ Expect = fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {undefined, 'receive', State, Pid, ok, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts)
+ end
+ end,
+
+ test('receive', Tc, Expect, false).
+
+spawn(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! fun() -> erlang:spawn(lists,seq,[1,10]), ok end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, spawn, State, Pid, NewPid,
+ {lists,seq,[1,10]}, Opts} = Msg,
+ check_opts(EOpts, Opts),
+ true = is_pid(NewPid) andalso NewPid /= Pid
+ end
+ end,
+
+ test(spawn, procs, Tc, Expect, true).
+
+exit(_Config) ->
+ Tc = fun(Pid) ->
+ Pid ! fun() -> exit end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, exit, State, Pid, normal, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts)
+ end
+ end,
+
+ test(exit, procs, Tc, Expect, true, true).
+
+link(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! fun() ->
+ SPid = erlang:spawn(fun() -> receive _ -> ok end end),
+ erlang:link(SPid),
+ ok
+ end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, link, State, Pid, NewPid, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts),
+ true = is_pid(NewPid) andalso NewPid /= Pid
+ end
+ end,
+
+ test(link, procs, Tc, Expect, true).
+
+unlink(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! fun() ->
+ SPid = erlang:spawn(fun() -> receive _ -> ok end end),
+ erlang:link(SPid),
+ erlang:unlink(SPid),
+ ok
+ end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, unlink, State, Pid, NewPid, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts),
+ true = is_pid(NewPid) andalso NewPid /= Pid
+ end
+ end,
+
+ test(unlink, procs, Tc, Expect, true).
+
+getting_linked(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! fun() ->
+ Self = self(),
+ erlang:spawn(fun() -> erlang:link(Self) end),
+ ok
+ end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {NewPid, getting_linked, State, Pid, NewPid, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts),
+ true = is_pid(NewPid) andalso NewPid /= Pid
+ end
+ end,
+
+ test(getting_linked, procs, Tc, Expect, false).
+
+getting_unlinked(_Config) ->
+ Tc = fun(Pid) ->
+ Pid ! fun() ->
+ Self = self(),
+ erlang:spawn(fun() ->
+ erlang:link(Self),
+ erlang:unlink(Self)
+ end),
+ ok
+ end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {NewPid, getting_unlinked, State, Pid, NewPid, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts),
+ true = is_pid(NewPid) andalso NewPid /= Pid
+ end
+ end,
+
+ test(getting_unlinked, procs, Tc, Expect, false).
+
+register(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! fun() ->
+ erlang:register(?MODULE, self()),
+ erlang:unregister(?MODULE),
+ ok
+ end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, register, State, Pid, ?MODULE, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts)
+ end
+ end,
+
+ test(register, procs, Tc, Expect, true).
+
+unregister(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! fun() ->
+ erlang:register(?MODULE, self()),
+ erlang:unregister(?MODULE),
+ ok
+ end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, unregister, State, Pid, ?MODULE, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts)
+ end
+ end,
+
+ test(unregister, procs, Tc, Expect, true).
+
+in(_Config) ->
+
+ Tc = fun(Pid) ->
+ Self = self(),
+ Pid ! fun() -> receive after 1 -> Self ! ok end end,
+ receive ok -> ok end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ N = (fun F(N) ->
+ receive
+ Msg ->
+ {Pid, in, State, Pid, _,
+ undefined, Opts} = Msg,
+ check_opts(EOpts, Opts),
+ F(N+1)
+ after 0 -> N
+ end
+ end)(0),
+ true = N > 0
+ end,
+
+ test(in, running, Tc, Expect, true).
+
+out(_Config) ->
+ Tc = fun(Pid) ->
+ Pid ! fun() -> receive after 10 -> exit end end,
+ Ref = erlang:monitor(process, Pid),
+ receive {'DOWN', Ref, _, _, _} -> ok end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ %% We cannot predict how many out schedules there will be
+ N = (fun F(N) ->
+ receive
+ Msg ->
+ {Pid, out, State, Pid, _,
+ undefined, Opts} = Msg,
+ check_opts(EOpts, Opts),
+ F(N+1)
+ after 0 -> N
+ end
+ end)(0),
+ true = N > 0
+ end,
+
+ test(out, running, Tc, Expect, true, true).
+
+gc_start(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! fun() ->
+ erlang:garbage_collect(),
+ ok
+ end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, gc_start, State, Pid, _, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts)
+ end
+ end,
+
+ test(gc_start, garbage_collection, Tc, Expect, true).
+
+gc_end(_Config) ->
+
+ Tc = fun(Pid) ->
+ Pid ! fun() ->
+ erlang:garbage_collect(),
+ ok
+ end
+ end,
+
+ Expect =
+ fun(Pid, State, EOpts) ->
+ receive
+ Msg ->
+ {Pid, gc_end, State, Pid, _, undefined, Opts} = Msg,
+ check_opts(EOpts, Opts)
+ end
+ end,
+
+ test(gc_end, garbage_collection, Tc, Expect, true).
+
+test(Event, Tc, Expect) ->
+ test(Event, Tc, Expect, true).
+test(Event, Tc, Expect, Removes) ->
+ test(Event, Event, Tc, Expect, Removes).
+test(Event, TraceFlag, Tc, Expect, Removes) ->
+ test(Event, TraceFlag, Tc, Expect, Removes, false).
+test(Event, TraceFlag, Tc, Expect, Removes, Dies) ->
+
+ ComplexState = {fun() -> ok end, <<0:(128*8)>>},
+ Opts = #{ timestamp => undefined,
+ scheduler_id => undefined,
+ match_spec_result => true },
+
+ %% Test that trace works
+ State1 = {#{ Event => trace }, self(), ComplexState},
+ Pid1 = start_tracee(),
+ 1 = erlang:trace(Pid1, true, [TraceFlag, {tracer, tracer_test, State1}]),
+ Tc(Pid1),
+ ok = trace_delivered(Pid1),
+
+ Expect(Pid1, State1, Opts),
+ receive M11 -> ct:fail({unexpected, M11}) after 0 -> ok end,
+ if not Dies ->
+ {flags, [TraceFlag]} = erlang:trace_info(Pid1, flags),
+ {tracer, {tracer_test, State1}} = erlang:trace_info(Pid1, tracer),
+ erlang:trace(Pid1, false, [TraceFlag]);
+ true -> ok
+ end,
+
+ %% Test that trace works with scheduler id and timestamp
+ Pid1T = start_tracee(),
+ 1 = erlang:trace(Pid1T, true, [TraceFlag, {tracer, tracer_test, State1},
+ timestamp, scheduler_id]),
+ Tc(Pid1T),
+ ok = trace_delivered(Pid1T),
+
+ Expect(Pid1T, State1, Opts#{ scheduler_id := number,
+ timestamp := timestamp}),
+ receive M11T -> ct:fail({unexpected, M11T}) after 0 -> ok end,
+ if not Dies ->
+ {flags, [scheduler_id, TraceFlag, timestamp]}
+ = erlang:trace_info(Pid1T, flags),
+ {tracer, {tracer_test, State1}} = erlang:trace_info(Pid1T, tracer),
+ erlang:trace(Pid1T, false, [TraceFlag]);
+ true -> ok
+ end,
+
+ %% Test that discard works
+ Pid2 = start_tracee(),
+ State2 = {#{ Event => discard }, self(), ComplexState},
+ 1 = erlang:trace(Pid2, true, [TraceFlag, {tracer, tracer_test, State2}]),
+ Tc(Pid2),
+ ok = trace_delivered(Pid2),
+ receive M2 -> ct:fail({unexpected, M2}) after 0 -> ok end,
+ if not Dies ->
+ {flags, [TraceFlag]} = erlang:trace_info(Pid2, flags),
+ {tracer, {tracer_test, State2}} = erlang:trace_info(Pid2, tracer),
+ erlang:trace(Pid2, false, [TraceFlag]);
+ true ->
+ ok
+ end,
+
+ %% Test that remove works
+ Pid3 = start_tracee(),
+ State3 = {#{ Event => remove }, self(), ComplexState},
+ 1 = erlang:trace(Pid3, true, [TraceFlag, {tracer, tracer_test, State3}]),
+ Tc(Pid3),
+ ok = trace_delivered(Pid3),
+ receive M3 -> ct:fail({unexpected, M3}) after 0 -> ok end,
+ if not Dies ->
+ if Removes ->
+ {flags, []} = erlang:trace_info(Pid3, flags),
+ {tracer, []} = erlang:trace_info(Pid3, tracer);
+ true ->
+ {flags, [TraceFlag]} = erlang:trace_info(Pid3, flags),
+ {tracer, {tracer_test, State3}} = erlang:trace_info(Pid3, tracer)
+ end,
+ erlang:trace(Pid3, false, [TraceFlag]);
+ true ->
+ ok
+ end,
+ ok.
+
+check_opts(#{ scheduler_id := number } = E, #{ scheduler_id := N } = O)
+ when is_integer(N) ->
+ E1 = maps:remove(scheduler_id, E),
+ O1 = maps:remove(scheduler_id, O),
+ if E1 == O1 -> ok;
+ true -> ct:fail({invalid_opts, E, O})
+ end;
+check_opts(Opts, Opts) ->
+ ok;
+check_opts(E,O) ->
+ ct:fail({invalid_opts, E, O}).
+
+start_tracee() ->
+ spawn_link(
+ fun F() ->
+ receive
+ Action when is_function(Action) ->
+ case Action() of
+ ok ->
+ F();
+ Err ->
+ Err
+ end;
+ _ ->
+ F()
+ end
+ end).
+
+trace_delivered(Pid) ->
+ Ref = erlang:trace_delivered(Pid),
+ receive
+ {trace_delivered, Pid, Ref} ->
+ ok
+ after 1000 ->
+ timeout
+ end.
+
+purge() ->
+ %% Make sure module is not loaded
+ case erlang:module_loaded(tracer_test) of
+ true ->
+ code:purge(tracer_test),
+ true = code:delete(tracer_test),
+ code:purge(tracer_test);
+ _ ->
+ ok
+ end.
diff --git a/erts/emulator/test/tracer_SUITE_data/Makefile.src b/erts/emulator/test/tracer_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..154bd70ccc
--- /dev/null
+++ b/erts/emulator/test/tracer_SUITE_data/Makefile.src
@@ -0,0 +1,8 @@
+
+NIF_LIBS = tracer_test@dll@
+
+all: $(NIF_LIBS)
+
+@SHLIB_RULES@
+
+$(NIF_LIBS): tracer_test.c
diff --git a/erts/emulator/test/tracer_SUITE_data/tracer_test.c b/erts/emulator/test/tracer_SUITE_data/tracer_test.c
new file mode 100644
index 0000000000..8b4be1345d
--- /dev/null
+++ b/erts/emulator/test/tracer_SUITE_data/tracer_test.c
@@ -0,0 +1,122 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2014. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#include "erl_nif.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <limits.h>
+
+/* NIF interface declarations */
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info);
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
+static void unload(ErlNifEnv* env, void* priv_data);
+
+/* The NIFs: */
+static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+static ERL_NIF_TERM trace(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+
+static ErlNifFunc nif_funcs[] = {
+ {"enabled", 3, enabled},
+ {"trace", 6, trace}
+};
+
+ERL_NIF_INIT(tracer_test, nif_funcs, load, NULL, upgrade, unload)
+
+static ERL_NIF_TERM atom_discard;
+static ERL_NIF_TERM atom_ok;
+
+#define ASSERT(expr) assert(expr)
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+
+ atom_discard = enif_make_atom(env, "discard");
+ atom_ok = enif_make_atom(env, "ok");
+
+ *priv_data = NULL;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data,
+ ERL_NIF_TERM load_info)
+{
+ if (*old_priv_data != NULL) {
+ return -1; /* Don't know how to do that */
+ }
+ if (*priv_data != NULL) {
+ return -1; /* Don't know how to do that */
+ }
+ if (load(env, priv_data, load_info)) {
+ return -1;
+ }
+ return 0;
+}
+
+static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int state_arity;
+ const ERL_NIF_TERM *state_tuple;
+ ERL_NIF_TERM value;
+ ASSERT(argc == 3);
+
+ if (!enif_get_tuple(env, argv[1], &state_arity, &state_tuple))
+ return atom_discard;
+
+ if (enif_get_map_value(env, state_tuple[0], argv[0], &value)) {
+ return value;
+ } else {
+ return atom_discard;
+ }
+}
+
+static ERL_NIF_TERM trace(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int state_arity;
+ ErlNifPid self, to;
+ ERL_NIF_TERM *tuple, msg;
+ const ERL_NIF_TERM *state_tuple;
+ ASSERT(argc == 6);
+
+ enif_get_tuple(env, argv[1], &state_arity, &state_tuple);
+
+ tuple = enif_alloc(sizeof(ERL_NIF_TERM)*(argc+1));
+ memcpy(tuple+1,argv,sizeof(ERL_NIF_TERM)*argc);
+
+ if (enif_self(env, &self)) {
+ tuple[0] = enif_make_pid(env, &self);
+ } else {
+ tuple[0] = enif_make_atom(env, "undefined");
+ }
+
+ msg = enif_make_tuple_from_array(env, tuple, argc + 1);
+ enif_get_local_pid(env, state_tuple[1], &to);
+ enif_send(env, &to, NULL, msg);
+ enif_free(tuple);
+
+ return atom_ok;
+}
diff --git a/erts/emulator/test/tracer_test.erl b/erts/emulator/test/tracer_test.erl
new file mode 100644
index 0000000000..d4778f4531
--- /dev/null
+++ b/erts/emulator/test/tracer_test.erl
@@ -0,0 +1,55 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(tracer_test).
+
+%%%
+%%% Test tracer
+%%%
+
+-export([enabled/3, trace/6]).
+-export([load/1, load/2]).
+-on_load(load/0).
+
+enabled(_, _, _) ->
+ erlang:nif_error(nif_not_loaded).
+
+trace(_, _, _, _, _, _) ->
+ erlang:nif_error(nif_not_loaded).
+
+load() ->
+ case whereis(tracer_test_config) of
+ undefined ->
+ ok;
+ Pid ->
+ Pid ! {get, self()},
+ receive
+ {Conf, Postfix} ->
+ load(Conf, Postfix);
+ Conf ->
+ load(Conf)
+ end
+ end.
+
+load(DataDir) ->
+ load(DataDir, "").
+load(DataDir, Postfix) ->
+ SoFile = atom_to_list(?MODULE) ++ Postfix,
+ erlang:load_nif(filename:join(DataDir, SoFile) , 0).
diff --git a/erts/emulator/utils/make_driver_tab b/erts/emulator/utils/make_driver_tab
index 646959e8b2..ffb5f58ebf 100755
--- a/erts/emulator/utils/make_driver_tab
+++ b/erts/emulator/utils/make_driver_tab
@@ -31,7 +31,7 @@ my $file = "";
my $nif = "";
my @emu_drivers = ();
my @static_drivers = ();
-my @nifs = ();
+my @static_nifs = ();
my $mode = 1;
while (@ARGV) {
@@ -55,9 +55,14 @@ while (@ARGV) {
push(@static_drivers, $d);
}
if ($mode == 2) {
- push(@nifs, $d);
+ push(@static_nifs, $d);
}
next;
+ } elsif ($mode == 2) {
+ $d = basename $d;
+ $d =~ s/_nif(\..*|)$//; # strip nif.* or just nif
+ push(@static_nifs, $d);
+ next;
}
$d = basename $d;
$d =~ s/drv(\..*|)$//; # strip drv.* or just drv
@@ -120,7 +125,7 @@ typedef struct ErtsStaticNifEntry_ {
EOF
# prototypes
-foreach (@nifs) {
+foreach (@static_nifs) {
my $d = ${_};
$d =~ s/\.debug//; # strip .debug
print "void *".$d."_nif_init(void);\n";
@@ -129,7 +134,7 @@ foreach (@nifs) {
# The array itself
print "static ErtsStaticNifEntry static_nif_tab[] =\n{\n";
-foreach (@nifs) {
+foreach (@static_nifs) {
my $d = ${_};
$d =~ s/\.debug//; # strip .debug
print "{\"${_}\",&".$d."_nif_init},\n";
diff --git a/erts/preloaded/ebin/erl_tracer.beam b/erts/preloaded/ebin/erl_tracer.beam
new file mode 100644
index 0000000000..dcb74c3bf7
--- /dev/null
+++ b/erts/preloaded/ebin/erl_tracer.beam
Binary files differ
diff --git a/erts/preloaded/src/Makefile b/erts/preloaded/src/Makefile
index 1e3de9f1d7..4a447d3a09 100644
--- a/erts/preloaded/src/Makefile
+++ b/erts/preloaded/src/Makefile
@@ -43,7 +43,8 @@ PRE_LOADED_ERL_MODULES = \
otp_ring0 \
erts_code_purger \
erlang \
- erts_internal
+ erts_internal \
+ erl_tracer
PRE_LOADED_BEAM_MODULES = \
prim_eval
diff --git a/erts/preloaded/src/erl_tracer.erl b/erts/preloaded/src/erl_tracer.erl
new file mode 100644
index 0000000000..3415ff3135
--- /dev/null
+++ b/erts/preloaded/src/erl_tracer.erl
@@ -0,0 +1,41 @@
+-module(erl_tracer).
+
+-export([enabled/3, trace/6, on_load/0]).
+
+-type tracee() :: port() | pid() | undefined.
+-type trace_tag() :: send | send_to_non_existing_process | 'receive' |
+ call | return_to | return_from | exception_from |
+ spawn | exit | link | unlink | getting_linked |
+ getting_unlinked | register | unregister | in | out |
+ gc_start | gc_end.
+-type trace_opts() :: #{ match_spec_result => true | term(),
+ scheduler_id => undefined | non_neg_integer(),
+ timestamp => undefined | timestamp | cpu_timestamp |
+ monotonic | strict_monotonic }.
+-type tracer_state() :: term().
+
+on_load() ->
+ case erlang:load_nif(atom_to_list(?MODULE), 0) of
+ ok -> ok
+ end.
+
+%%%
+%%% NIF placeholders
+%%%
+
+-spec enabled(Tag :: trace_tag() | seq_trace | trace_status,
+ TracerState :: tracer_state(),
+ Tracee :: tracee()) ->
+ trace | discard | remove.
+enabled(_, _, _) ->
+ erlang:nif_error(nif_not_loaded).
+
+-spec trace(Tag :: trace_tag() | seq_trace,
+ TracerState :: tracer_state(),
+ Tracee :: tracee(),
+ Msg :: term(),
+ Extra :: term(),
+ Opts :: trace_opts()) -> any().
+
+trace(_, _, _, _, _, _) ->
+ erlang:nif_error(nif_not_loaded).
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 4374bdcd89..8d6d17b043 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -245,12 +245,14 @@
set_on_first_spawn |
set_on_link |
set_on_first_link |
- {tracer, pid() | port()}.
+ {tracer, pid() | port()} |
+ {tracer, module(), term()}.
-type trace_info_item_result() ::
{traced, global | local | false | undefined} |
{match_spec, trace_match_spec() | false | undefined} |
{meta, pid() | port() | false | undefined | []} |
+ {meta, module(), term() } |
{meta_match_spec, trace_match_spec() | false | undefined} |
{call_count, non_neg_integer() | boolean() | undefined} |
{call_time, [{pid(), non_neg_integer(),
@@ -276,6 +278,7 @@
undefined |
{flags, [trace_info_flag()]} |
{tracer, pid() | port() | []} |
+ {tracer, module(), term()} |
trace_info_item_result() |
{all, [ trace_info_item_result() ] | false | undefined}.
@@ -1709,8 +1712,28 @@ time() ->
PidSpec :: pid() | existing | new | all,
How :: boolean(),
FlagList :: [trace_flag()].
-trace(_PidSpec, _How, _FlagList) ->
- erlang:nif_error(undefined).
+trace(PidSpec, How, FlagList) ->
+ %% Make sure that we have loaded the tracer module
+ case lists:keyfind(tracer, 1, FlagList) of
+ {tracer, Module, State} when erlang:is_atom(Module) ->
+ case erlang:module_loaded(Module) of
+ false ->
+ Module:enabled(trace_status, erlang:self(), State);
+ true ->
+ ok
+ end;
+ _ ->
+ ignore
+ end,
+
+ try erts_internal:trace(PidSpec, How, FlagList) of
+ Res -> Res
+ catch E:R ->
+ {_, [_ | CST]} = erlang:process_info(
+ erlang:self(), current_stacktrace),
+ erlang:raise(
+ E, R, [{?MODULE, trace, [PidSpec, How, FlagList], []} | CST])
+ end.
%% trace_delivered/1
-spec erlang:trace_delivered(Tracee) -> Ref when
@@ -2319,7 +2342,7 @@ subtract(_,_) ->
OldState :: preliminary | final | volatile;
%% These are deliberately not documented
(internal_cpu_topology, term()) -> term();
- (sequential_tracer, pid() | port() | false) -> pid() | port() | false;
+ (sequential_tracer, pid() | port() | {module(), term()} | false) -> pid() | port() | false;
(1,0) -> true.
system_flag(_Flag, _Value) ->
@@ -2355,12 +2378,20 @@ tl(_List) ->
| boolean()
| restart
| pause.
-trace_pattern(_MFA, _MatchSpec) ->
- erlang:nif_error(undefined).
+trace_pattern(MFA, MatchSpec) ->
+ try erts_internal:trace_pattern(MFA, MatchSpec, []) of
+ Res -> Res
+ catch E:R ->
+ {_, [_ | CST]} = erlang:process_info(
+ erlang:self(), current_stacktrace),
+ erlang:raise(
+ E, R, [{?MODULE, trace_pattern, [MFA, MatchSpec], []} | CST])
+ end.
-type trace_pattern_flag() ::
global | local |
meta | {meta, Pid :: pid()} |
+ {meta, TracerModule :: module(), TracerState :: term()} |
call_count |
call_time.
@@ -2371,8 +2402,28 @@ trace_pattern(_MFA, _MatchSpec) ->
| restart
| pause,
FlagList :: [ trace_pattern_flag() ].
-trace_pattern(_MFA, _MatchSpec, _FlagList) ->
- erlang:nif_error(undefined).
+trace_pattern(MFA, MatchSpec, FlagList) ->
+ %% Make sure that we have loaded the tracer module
+ case lists:keyfind(meta, 1, FlagList) of
+ {meta, Module, State} when erlang:is_atom(Module) ->
+ case erlang:module_loaded(Module) of
+ false ->
+ Module:enabled(trace_status, erlang:self(), State);
+ true ->
+ ok
+ end;
+ _ ->
+ ignore
+ end,
+
+ try erts_internal:trace_pattern(MFA, MatchSpec, FlagList) of
+ Res -> Res
+ catch E:R ->
+ {_, [_ | CST]} = erlang:process_info(
+ erlang:self(), current_stacktrace),
+ erlang:raise(
+ E, R, [{?MODULE, trace_pattern, [MFA, MatchSpec, FlagList], []} | CST])
+ end.
%% Shadowed by erl_bif_types: erlang:tuple_to_list/1
-spec tuple_to_list(Tuple) -> [term()] when
diff --git a/erts/preloaded/src/erts_internal.erl b/erts/preloaded/src/erts_internal.erl
index 769757ba75..2459ea2a2c 100644
--- a/erts/preloaded/src/erts_internal.erl
+++ b/erts/preloaded/src/erts_internal.erl
@@ -55,7 +55,9 @@
-export([await_microstate_accounting_modifications/3,
gather_microstate_accounting_result/2]).
-%% Auto-import name clash
+-export([trace/3, trace_pattern/3]).
+
+%% Auto import name clash
-export([check_process_code/2]).
%%
@@ -403,3 +405,28 @@ microstate_accounting(Ref, Threads) ->
{Ref, Res} ->
[Res | microstate_accounting(Ref, Threads - 1)]
end.
+
+-spec trace(PidPortSpec, How, FlagList) -> integer() when
+ PidPortSpec :: pid() | port()
+ | all | processes | ports
+ | existing | existing_processes | existing_ports
+ | new | new_processes | new_ports,
+ How :: boolean(),
+ FlagList :: [].
+trace(_PidSpec, _How, _FlagList) ->
+ erlang:nif_error(undefined).
+
+-type trace_pattern_mfa() ::
+ {atom(),atom(),arity() | '_'} | on_load.
+-type trace_match_spec() ::
+ [{[term()] | '_' ,[term()],[term()]}].
+
+-spec trace_pattern(MFA, MatchSpec, FlagList) -> non_neg_integer() when
+ MFA :: trace_pattern_mfa(),
+ MatchSpec :: (MatchSpecList :: trace_match_spec())
+ | boolean()
+ | restart
+ | pause,
+ FlagList :: [ ].
+trace_pattern(_MFA, _MatchSpec, _FlagList) ->
+ erlang:nif_error(undefined).
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index 5d5d2f8012..77684751c8 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -179,6 +179,10 @@ stop(Status) -> init ! {stop,{stop,Status}}, ok.
boot(BootArgs) ->
register(init, self()),
process_flag(trap_exit, true),
+
+ %% Load the tracer nif
+ erl_tracer:on_load(),
+
{Start0,Flags,Args} = parse_boot_args(BootArgs),
Start = map(fun prepare_run_args/1, Start0),
boot(Start, Flags, Args).