From 84adefa331c4159d432d22840663c38f155cd4c1 Mon Sep 17 00:00:00 2001 From: Erlang/OTP Date: Fri, 20 Nov 2009 14:54:40 +0000 Subject: The R13B03 release. --- lib/et/doc/src/et_collector.xml | 374 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 374 insertions(+) create mode 100644 lib/et/doc/src/et_collector.xml (limited to 'lib/et/doc/src/et_collector.xml') diff --git a/lib/et/doc/src/et_collector.xml b/lib/et/doc/src/et_collector.xml new file mode 100644 index 0000000000..01ca7caa5b --- /dev/null +++ b/lib/et/doc/src/et_collector.xml @@ -0,0 +1,374 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + et_collector + Håkan Mattsson + Håkan Mattsson + + Håkan Mattsson + + + %VSN% +
+ et_collector + Collect trace events and provide a backing storage appropriate for iteration + +

Interface module for the Event Trace (ET) application

+
+ + + start_link(Options) -> {ok, CollectorPid} | {error, Reason} + Start a collector process + + Options = [option()] + option() = {parent_pid, pid()} | {event_order, event_order()} | {dict_insert, {filter, collector}, collector_fun()} | {dict_insert, {filter, event_filter_name()}, event_filter_fun()} | {dict_insert, {subscriber, pid()}, dict_val()} | {dict_insert, dict_key(), dict_val()} | {dict_delete, dict_key()} | {trace_client, trace_client()} | {trace_global, boolean()} | {trace_pattern, trace_pattern()} | {trace_port, integer()} | {trace_max_queue, integer()} + event_order() = trace_ts | event_ts + trace_pattern() = {report_module(), extended_dbg_match_spec()} | undefined + report_module() = atom() | undefined <v>extended_dbg_match_spec()() = detail_level() | dbg_match_spec() + detail_level() = min | max | integer(X) when X =< 0, X >= 100 + trace_client() = {event_file, file_name()} | {dbg_trace_type(), dbg_trace_parameters()} + file_name() = string() + collector_fun() = trace_filter_fun() | event_filter_fun() + trace_filter_fun() = fun(TraceData) -> false | true | {true, NewEvent} + event_filter_fun() = fun(Event) -> false | true | {true, NewEvent} + event_filter_name() = atom() + TraceData = erlang_trace_data() + Event = NewEvent = record(event) + dict_key() = term() + dict_val() = term() + CollectorPid = pid() + Reason = term() + + +

Start a collector process.

+

The collector collects trace events and keeps them ordered by their + timestamp. The timestamp may either reflect the time when the + actual trace data was generated (trace_ts) or when the trace data + was transformed into an event record (event_ts). If the time stamp + is missing in the trace data (missing timestamp option to + erlang:trace/4) the trace_ts will be set to the event_ts.

+

Events are reported to the collector directly with the report + function or indirectly via one or more trace clients. All reported + events are first filtered thru the collector filter before they are + stored by the collector. By replacing the default collector filter + with a customized dito it is possible to allow any trace data as + input. The collector filter is a dictionary entry with the + predefined key {filter, collector} and the value is a fun of + arity 1. See et_selector:make_event/1 for interface details, + such as which erlang:trace/1 tuples that are accepted.

+

The collector has a built-in dictionary service. Any term may be + stored as value in the dictionary and bound to a unique key. When + new values are inserted with an existing key, the new values will + overwrite the existing ones. Processes may subscribe on dictionary + updates by using {subscriber, pid()} as dictionary key. All + dictionary updates will be propagated to the subscriber processes + matching the pattern {{subscriber, '_'}, '_'} where the first '_' + is interpreted as a pid().

+

In global trace mode, the collector will automatically start + tracing on all connected Erlang nodes. When a node connects, a port + tracer will be started on that node and a corresponding trace + client on the collector node. By default the global trace pattern + is 'max'.

+

Default values:

+ + parent_pid - self(). + event_order - trace_ts. + trace_global - false. + trace_pattern - undefined. + trace_port - 4711. + trace_max_queue - 50. + +
+
+ + stop(CollectorPid) -> ok + Stop a collector process + + CollectorPid = pid() + + +

Stop a collector process.

+
+
+ + save_event_file(CollectorPid, FileName, Options) -> ok | {error, Reason} + Save the events to a file + + CollectorPid = pid() + FileName = string() + Options = [option()] + Reason = term() + option() = event_option() | file_option() | table_option() + event_option() = existing + file_option() = write | append + table_option() = keep | clear + + +

Save the events to a file.

+

By default the currently stored events (existing) are + written to a brand new file (write) and the events are + kept (keep) after they have been written to the file.

+

Instead of keeping the events after writing them to file, + it is possible to remove all stored events after they + have successfully written to file (clear).

+

The options defaults to existing, write and keep.

+
+
+ + load_event_file(CollectorPid, FileName) -> {ok, BadBytes} | exit(Reason) + Load the event table from a file + + CollectorPid = pid() + FileName = string() + BadBytes = integer(X) where X >= 0 + Reason = term() + + +

Load the event table from a file.

+
+
+ + report(Handle, TraceOrEvent) -> {ok, Continuation} | exit(Reason) + report_event(Handle, DetailLevel, FromTo, Label, Contents) -> {ok, Continuation} | exit(Reason) + report_event(Handle, DetailLevel, From, To, Label, Contents) -> {ok, Continuation} | exit(Reason) + Report an event to the collector + + Handle = Initial | Continuation + Initial = collector_pid() + collector_pid() = pid() + Continuation = record(table_handle) + TraceOrEvent = record(event) | dbg_trace_tuple() | end_of_trace + Reason = term() + DetailLevel = integer(X) when X =< 0, X >= 100 + From = actor() + To = actor() + FromTo = actor() + Label = atom() | string() | term() + Contents = [{Key, Value}] | term() + actor() = term() + + +

Report an event to the collector.

+

All events are filtered thru the collector filter, which + optionally may transform or discard the event. The first + call should use the pid of the collector process as + report handle, while subsequent calls should use the + table handle.

+
+
+ + make_key(Type, Stuff) -> Key + Make a key out of an event record or an old key + + Type = record(table_handle) | trace_ts | event_ts + Stuff = record(event) | Key + Key = record(event_ts) | record(trace_ts) + + +

Make a key out of an event record or an old key.

+
+
+ + get_table_handle(CollectorPid) -> Handle + Return a table handle + + CollectorPid = pid() + Handle = record(table_handle) + + +

Return a table handle.

+
+
+ + get_global_pid() -> CollectorPid | exit(Reason) + Return a the identity of the globally registered collector if there is any + + CollectorPid = pid() + Reason = term() + + +

Return a the identity of the globally registered + collector if there is any.

+
+
+ + change_pattern(CollectorPid, RawPattern) -> {old_pattern, TracePattern} + Change active trace pattern globally on all trace nodes + + CollectorPid = pid() + RawPattern = {report_module(), extended_dbg_match_spec()} + report_module() = atom() | undefined + extended_dbg_match_spec()() = detail_level() | dbg_match_spec() + RawPattern = detail_level() + detail_level() = min | max | integer(X) when X =< 0, X >= 100 + TracePattern = {report_module(), dbg_match_spec_match_spec()} + + +

Change active trace pattern globally on all trace nodes.

+
+
+ + dict_insert(CollectorPid, {filter, collector}, FilterFun) -> ok + dict_insert(CollectorPid, {subscriber, SubscriberPid}, Void) -> ok + dict_insert(CollectorPid, Key, Val) -> ok + Insert a dictionary entry and send a {et, {dict_insert, Key, Val}} tuple to all registered subscribers. + + CollectorPid = pid() + FilterFun = filter_fun() + SubscriberPid = pid() + Void = term() + Key = term() + Val = term() + + +

Insert a dictionary entry + and send a {et, {dict_insert, Key, Val}} tuple + to all registered subscribers.

+

If the entry is a new subscriber, it will imply that + the new subscriber process first will get one message + for each already stored dictionary entry, before it + and all old subscribers will get this particular entry. + The collector process links to and then supervises the + subscriber process. If the subscriber process dies it + will imply that it gets unregistered as with a normal + dict_delete/2.

+
+
+ + dict_lookup(CollectorPid, Key) -> [Val] + Lookup a dictionary entry and return zero or one value + + CollectorPid = pid() + FilterFun = filter_fun() + CollectorPid = pid() + Key = term() + Val = term() + + +

Lookup a dictionary entry and return zero or one value.

+
+
+ + dict_delete(CollectorPid, Key) -> ok + Delete a dictionary entry and send a {et, {dict_delete, Key}} tuple to all registered subscribers. + + CollectorPid = pid() + SubscriberPid = pid() + Key = {subscriber, SubscriberPid} | term() + + +

Delete a dictionary entry + and send a {et, {dict_delete, Key}} tuple + to all registered subscribers.

+

If the deleted entry is a registered subscriber, it will + imply that the subscriber process gets is unregistered as + subscriber as well as it gets it final message.

+
+
+ + dict_match(CollectorPid, Pattern) -> [Match] + Match some dictionary entries + + CollectorPid = pid() + Pattern = '_' | {key_pattern(), val_pattern()} + key_pattern() = ets_match_object_pattern() + val_pattern() = ets_match_object_pattern() + Match = {key(), val()} + key() = term() + val() = term() + + +

Match some dictionary entries

+
+
+ + multicast(_CollectorPid, Msg) -> ok + Sends a message to all registered subscribers + + CollectorPid = pid() + CollectorPid = pid() + Msg = term() + + +

Sends a message to all registered subscribers.

+
+
+ + start_trace_client(CollectorPid, Type, Parameters) -> file_loaded | {trace_client_pid, pid()} | exit(Reason) + Load raw Erlang trace from a file, port or process. + + Type = dbg_trace_client_type() + Parameters = dbg_trace_client_parameters() + Pid = dbg_trace_client_pid() + + +

Load raw Erlang trace from a file, port or process.

+
+
+ + iterate(Handle, Prev, Limit) -> NewAcc + Iterates over the currently stored events + +

Short for iterate(Handle, Prev, Limit, undefined, Prev) -> NewAcc

+
+
+ + iterate(Handle, Prev, Limit, Fun, Acc) -> NewAcc + Iterate over the currently stored events + + Handle = collector_pid() | table_handle() + Prev = first | last | event_key() + Limit = done() | forward() | backward() + collector_pid() = pid() + table_handle() = record(table_handle) + event_key() = record(event) | record(event_ts) | record(trace_ts) + done() = 0 + forward() = infinity | integer(X) where X > 0 + backward() = '-infinity' | integer(X) where X < 0 + Fun = fun(Event, Acc) -> NewAcc <v>Acc = NewAcc = term() + + +

Iterate over the currently stored events.

+

Iterates over the currently stored eventsand applies + a function for each event. The iteration may be performed + forwards or backwards and may be limited to a maximum + number of events (abs(Limit)).

+
+
+ + clear_table(Handle) -> ok + Clear the event table + + Handle = collector_pid() | table_handle() + collector_pid() = pid() + table_handle() = record(table_handle) + + +

Clear the event table.

+
+
+
+ +
+ + -- cgit v1.2.3