diff options
Diffstat (limited to 'lib/kernel')
65 files changed, 4412 insertions, 2640 deletions
diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile index 82869d7b15..29dc73a523 100644 --- a/lib/kernel/doc/src/Makefile +++ b/lib/kernel/doc/src/Makefile @@ -42,6 +42,7 @@ XML_REF3_FILES = application.xml \ disk_log.xml \ erl_boot_server.xml \ erl_ddll.xml \ + erl_epmd.xml \ erl_prim_loader_stub.xml \ erlang_stub.xml \ error_handler.xml \ diff --git a/lib/kernel/doc/src/config.xml b/lib/kernel/doc/src/config.xml index fdb2d29f63..8850c1736b 100644 --- a/lib/kernel/doc/src/config.xml +++ b/lib/kernel/doc/src/config.xml @@ -37,10 +37,10 @@ data in the system configuration file <c>Name.config</c>.</p> <p>Configuration parameter values in the configuration file override the values in the application resource files (see - <seealso marker="app"><c>app(4)</c></seealso>. + <seealso marker="app"><c>app(4)</c></seealso>). The values in the configuration file can be overridden by command-line flags (see - <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>.</p> + <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>).</p> <p>The value of a configuration parameter is retrieved by calling <c>application:get_env/1,2</c>.</p> </description> diff --git a/lib/kernel/doc/src/erl_epmd.xml b/lib/kernel/doc/src/erl_epmd.xml new file mode 100644 index 0000000000..8b076cd2d7 --- /dev/null +++ b/lib/kernel/doc/src/erl_epmd.xml @@ -0,0 +1,104 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2018</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>erl_epmd</title> + <prepared>Timmo Verlaan</prepared> + <docno>1</docno> + <date>2018-02-19</date> + <rev>A</rev> + </header> + <module>erl_epmd</module> + <modulesummary> + Erlang interface towards epmd + </modulesummary> + <description> + <p>This module communicates with the EPMD daemon, see <seealso + marker="erts:epmd">epmd</seealso>. To implement your own epmd module please + see <seealso marker="erts:alt_disco">ERTS User's Guide: How to Implement an + Alternative Service Discovery for Erlang Distribution</seealso></p> + </description> + + <funcs> + <func> + <name name="start_link" arity="0"/> + <fsummary>Callback for erl_distribution supervisor.</fsummary> + <desc> + <p>This function is invoked as this module is added as a child of the + <c>erl_distribution</c> supervisor.</p> + </desc> + </func> + + <func> + <name name="register_node" arity="2"/> + <name name="register_node" arity="3"/> + <fsummary>Registers the node with <c>epmd</c>.</fsummary> + <desc> + <p>Registers the node with <c>epmd</c> and tells epmd what port will be + used for the current node. It returns a creation number. This number is + incremented on each register to help with identifying if a node is + reconnecting to epmd.</p> + </desc> + </func> + + <func> + <name name="port_please" arity="2"/> + <name name="port_please" arity="3"/> + <fsummary>Returns the port number for a given node.</fsummary> + <desc> + <p>Requests the distribution port for the given node of an EPMD + instance. Together with the port it returns a distribution protocol + version which has been 5 since Erlang/OTP R6.</p> + </desc> + </func> + + <func> + <name name="address_please" arity="3"/> + <fsummary>Returns address and port.</fsummary> + <desc> + <p>Called by the distribution module. Resolves the <c>Host</c> to an IP + address.</p> + <p>Another epmd module may return port and distribution protocol version + as well.</p> + </desc> + </func> + + <func> + <name name="names" arity="1"/> + <fsummary>Names of Erlang nodes at a host.</fsummary> + <desc> + <p>Called by <seealso marker="net_adm"><c>net_adm:names/0</c></seealso>. + <c>Host</c> defaults to the localhost. Returns the names and associated + port numbers of the Erlang nodes that <c>epmd</c> registered at the + specified host. Returns <c>{error, address}</c> if <c>epmd</c> is not + operational.</p> + <p><em>Example:</em></p> + <pre> +(arne@dunn)1> <input>erl_epmd:names(localhost).</input> +{ok,[{"arne",40262}]}</pre> + </desc> + </func> + </funcs> + +</erlref> + diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml index cb6165c73e..f418aa5bbe 100644 --- a/lib/kernel/doc/src/error_logger.xml +++ b/lib/kernel/doc/src/error_logger.xml @@ -33,44 +33,35 @@ <description> <note> - <p>In OTP-21, a new API for logging was added to Erlang/OTP. The + <p>In Erlang/OTP 21.0, a new API for logging was added. The old <c>error_logger</c> module can still be used by legacy - code, but new code should use the new API instead.</p> + code, but log events are redirected to the new Logger API. New + code should use the Logger API directly.</p> + <p><c>error_logger</c> is no longer started by default, but is + automatically started when an event handler is added + with <c>error_logger:add_report_handler/1,2</c>. The <c>error_logger</c> + module is then also added as a handler to the new logger.</p> <p>See <seealso marker="logger"><c>logger(3)</c></seealso> and the <seealso marker="logger_chapter">Logging</seealso> chapter - in the user's guide for more information.</p> + in the User's Guide for more information.</p> </note> <p>The Erlang <em>error logger</em> is an event manager (see <seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>), - registered as <c>error_logger</c>. Errors, warnings, and info events - are sent to the error logger from the Erlang runtime system and - the different Erlang/OTP applications. The events are, by default, - logged to the terminal. Notice that an event from a process <c>P</c> is - logged at the node of the group leader of <c>P</c>. This means - that log output is directed to the node from which a process was - created, which not necessarily is the same node as where it is - executing.</p> - <p>Initially, <c>error_logger</c> has only a primitive event - handler, which buffers and prints the raw event messages. During - system startup, the Kernel application replaces this with a - <em>standard event handler</em>, by default one that writes - nicely formatted output to the terminal. Kernel can also be - configured so that events are logged to a file instead, or not logged at all, - see <seealso marker="kernel_app"><c>kernel(6)</c></seealso>.</p> - <p>Also the SASL application, if started, adds its own event - handler, which by default writes supervisor, crash, and progress - reports to the terminal. See - <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso>.</p> - <p>It is recommended that user-defined applications report - errors through the error logger to get uniform reports. - User-defined event handlers can be added to handle application-specific - events, see - <seealso marker="#add_report_handler/1"><c>add_report_handler/1,2</c></seealso>. - Also, a useful event handler is provided in STDLIB for multi-file - logging of events, see - <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>.</p> + registered as <c>error_logger</c>.</p> + <p>Error logger is no longer started by default, but is + automatically started when an event handler is added + with <seealso marker="#add_report_handler/1"> + <c>add_report_handler/1,2</c></seealso>. The <c>error_logger</c> + module is then also added as a handler to the new logger, + causing log events to be forwarded from logger to error logger, + and consequently to all installed error logger event + handlers.</p> + <p>User-defined event handlers can be added to handle application-specific + events.</p> + <p>Existing event handlers provided by STDLIB and SASL are still + available, but are no longer used by OTP.</p> <p>Warning events were introduced in Erlang/OTP R9C and are enabled by default as from Erlang/OTP 18.0. To retain backwards compatibility with existing user-defined event handlers, the warning events can be @@ -99,6 +90,9 @@ The function returns <c>ok</c> if successful.</p> <p>The event handler must be able to handle the events in this module, see section <seealso marker="#events">Events</seealso>.</p> + <p>The first time this function is called, + <c>error_logger</c> is added as a Logger handler, and + the <c>error_logger</c> process is started.</p> </desc> </func> <func> @@ -108,37 +102,40 @@ <p>Deletes an event handler from the error logger by calling <c>gen_event:delete_handler(error_logger, <anno>Handler</anno>, [])</c>, see <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>.</p> + <p>If no more event handlers exist after the deletion, + <c>error_logger</c> is removed as a Logger handler, and + the <c>error_logger</c> process is stopped.</p> </desc> </func> <func> <name name="error_msg" arity="1"/> <name name="error_msg" arity="2"/> <name name="format" arity="2"/> - <fsummary>Send a standard error event to the error logger.</fsummary> + <fsummary>Log a standard error event.</fsummary> <desc> - <p>Sends a standard error event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard error event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. - The event is handled by the standard event handler.</p> + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> -1> <input>error_logger:error_msg("An error occurred in ~p~n", [a_module]).</input> - -=ERROR REPORT==== 11-Aug-2005::14:03:19 === +1> <input>error_logger:error_msg("An error occurred in ~p", [a_module]).</input> +=ERROR REPORT==== 22-May-2018::11:18:43.376917 === An error occurred in a_module ok</pre> <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use - <seealso marker="#error_report/1"><c>error_report/1</c></seealso> - instead.</p> - </warning> - <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -146,79 +143,101 @@ ok</pre> </func> <func> <name name="error_report" arity="1"/> - <fsummary>Send a standard error report event to the error logger.</fsummary> + <fsummary>Log a standard error event.</fsummary> <desc> - <p>Sends a standard error report event to the error logger. - The event is handled by the standard event handler.</p> + <p>Log a standard error event. Error logger forwards the event + to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> 2> <input>error_logger:error_report([{tag1,data1},a_term,{tag2,data}]).</input> - -=ERROR REPORT==== 11-Aug-2005::13:45:41 === +=ERROR REPORT==== 22-May-2018::11:24:23.699306 === tag1: data1 a_term tag2: data ok 3> <input>error_logger:error_report("Serious error in my module").</input> - -=ERROR REPORT==== 11-Aug-2005::13:45:49 === +=ERROR REPORT==== 22-May-2018::11:24:45.972445 === Serious error in my module ok</pre> </desc> </func> <func> <name name="error_report" arity="2"/> - <fsummary>Send a user-defined error report event to the error logger.</fsummary> + <fsummary>Log a user-defined error event.</fsummary> <desc> - <p>Sends a user-defined error report event to the error logger. - An event handler to handle the event is supposed to have been - added. The event is ignored by the standard event handler.</p> + <p>Log a user-defined error event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> <p>It is recommended that <c><anno>Report</anno></c> follows the same structure as for <seealso marker="#error_report/1"><c>error_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> <name name="get_format_depth" arity="0"/> <fsummary>Get the value of the Kernel application variable - <c>logger_format_depth</c>.</fsummary> + <c>error_logger_format_depth</c>.</fsummary> <desc> <p>Returns <c>max(10, Depth)</c>, where <c>Depth</c> is the - value of - <seealso marker="kernel_app#logger_format_depth"> - logger_format_depth</seealso> + value of <c>error_logger_format_depth</c> in the Kernel application, if Depth is an integer. Otherwise, <c>unlimited</c> is returned.</p> - <p>For backwards compatibility, the value - of <c>error_logger_format_depth</c> is used - if <c>logger_format_depth</c> is not set.</p> + <note> + <p>The <c>error_logger_format_depth</c> variable + is <seealso marker="kernel_app#deprecated-configuration-parameters"> + deprecated</seealso> since + the <seealso marker="logger">Logger API</seealso> was + introduced in Erlang/OTP 21.0. The variable, and this + function, are kept for backwards compatibility since they + still might be used by legacy report handlers.</p> + </note> </desc> </func> <func> <name name="info_msg" arity="1"/> <name name="info_msg" arity="2"/> - <fsummary>Send a standard information event to the error logger.</fsummary> + <fsummary>Log a standard information event.</fsummary> <desc> - <p>Sends a standard information event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard information event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. The event is handled by the standard event handler.</p> + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> -1> <input>error_logger:info_msg("Something happened in ~p~n", [a_module]).</input> - -=INFO REPORT==== 11-Aug-2005::14:06:15 === +1> <input>error_logger:info_msg("Something happened in ~p", [a_module]).</input> +=INFO REPORT==== 22-May-2018::12:03:32.612462 === Something happened in a_module ok</pre> <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use <c>info_report/1</c> instead.</p> - </warning> - <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -226,37 +245,52 @@ ok</pre> </func> <func> <name name="info_report" arity="1"/> - <fsummary>Send a standard information report event to the error logger.</fsummary> + <fsummary>Log a standard information event.</fsummary> <desc> - <p>Sends a standard information report event to the error - logger. The event is handled by the standard event handler.</p> + <p>Log a standard information event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> 2> <input>error_logger:info_report([{tag1,data1},a_term,{tag2,data}]).</input> - -=INFO REPORT==== 11-Aug-2005::13:55:09 === +=INFO REPORT==== 22-May-2018::12:06:35.994440 === tag1: data1 a_term tag2: data ok 3> <input>error_logger:info_report("Something strange happened").</input> - -=INFO REPORT==== 11-Aug-2005::13:55:36 === +=INFO REPORT==== 22-May-2018::12:06:49.066872 === Something strange happened ok</pre> </desc> </func> <func> <name name="info_report" arity="2"/> - <fsummary>Send a user-defined information report event to the error logger.</fsummary> + <fsummary>Log a user-defined information event.</fsummary> <desc> - <p>Sends a user-defined information report event to the error - logger. An event handler to handle the event is supposed to - have been added. The event is ignored by the standard event - handler.</p> + <p>Log a user-defined information event. Error logger forwards + the event to Logger, including metadata that allows + backwards compatibility with legacy error logger event + handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> <p>It is recommended that <c><anno>Report</anno></c> follows the same structure as for <seealso marker="#info_report/1"><c>info_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> @@ -355,24 +389,27 @@ ok</pre> <func> <name name="warning_msg" arity="1"/> <name name="warning_msg" arity="2"/> - <fsummary>Send a standard warning event to the error logger.</fsummary> + <fsummary>Log a standard warning event.</fsummary> <desc> - <p>Sends a standard warning event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard warning event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. - The event is handled by the standard event handler. It is tagged - as an error, warning, or info, see + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler. The log + level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> - <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use <c>warning_report/1</c> instead.</p> - </warning> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -380,24 +417,43 @@ ok</pre> </func> <func> <name name="warning_report" arity="1"/> - <fsummary>Send a standard warning report event to the error logger.</fsummary> + <fsummary>Log a standard warning event.</fsummary> <desc> - <p>Sends a standard warning report event to the error logger. - The event is handled by the standard event handler. It is - tagged as an error, warning, or info, see + <p>Log a standard warning event. Error logger forwards the event + to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler. The log + level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> <name name="warning_report" arity="2"/> - <fsummary>Send a user-defined warning report event to the error logger.</fsummary> + <fsummary>Log a user-defined warning event.</fsummary> <desc> - <p>Sends a user-defined warning report event to the error - logger. An event handler to handle the event is supposed to - have been added. The event is ignored by the standard event - handler. It is tagged as an error, warning, or info, - depending on the value of + <p>Log a user-defined warning event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> + <p>The log level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> + <p>It is recommended that <c><anno>Report</anno></c> follows the same + structure as for + <seealso marker="#warning_report/1"><c>warning_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> </desc> </func> </funcs> @@ -461,8 +517,9 @@ ok</pre> <section> <title>See Also</title> <p><seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>, - <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso> - <seealso marker="kernel_app"><c>kernel(6)</c></seealso> + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>, + <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>, + <seealso marker="kernel_app"><c>kernel(6)</c></seealso>, <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p> </section> </erlref> diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml index 9552332948..e6a7962c5a 100644 --- a/lib/kernel/doc/src/inet.xml +++ b/lib/kernel/doc/src/inet.xml @@ -1149,7 +1149,7 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code> <seealso marker="gen_tcp#recv/2"><c>gen_tcp:recv/2</c></seealso> gets <c>{error, closed}</c>. In active mode, the controlling process receives a - <c>{tcp_close, Socket}</c> message, indicating that the + <c>{tcp_closed, Socket}</c> message, indicating that the peer has closed the connection.</p> <p>Setting this option to <c>true</c> allows you to distinguish between a connection that was closed normally, diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml index 6e6990ddda..2eadc70abf 100644 --- a/lib/kernel/doc/src/introduction_chapter.xml +++ b/lib/kernel/doc/src/introduction_chapter.xml @@ -46,7 +46,6 @@ <item>Start, stop, supervision, configuration, and distribution of applications</item> <item>Code loading</item> <item>Logging</item> - <item>Error logging</item> <item>Global name service</item> <item>Supervision of Erlang/OTP</item> <item>Communication with sockets</item> diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml index f96d946a5d..e2a6d30249 100644 --- a/lib/kernel/doc/src/kernel_app.xml +++ b/lib/kernel/doc/src/kernel_app.xml @@ -42,7 +42,6 @@ <item>Start, stop, supervision, configuration, and distribution of applications</item> <item>Code loading</item> <item>Logging</item> - <item>Error logging</item> <item>Global name service</item> <item>Supervision of Erlang/OTP</item> <item>Communication with sockets</item> @@ -122,21 +121,6 @@ application. For more information about configuration parameters, see file <seealso marker="app"><c>app(4)</c></seealso>.</p> <taglist> - <tag><c>browser_cmd = string() | {M,F,A}</c></tag> - <item> - <p>When pressing the <em>Help</em> button in a tool such as Debugger, - the help text (an HTML file <c>File</c>) is by default - displayed in a Netscape browser, which is required to be - operational. This parameter can be used to change the command for - how to display the help text if another browser than Netscape - is preferred, or if another platform than Unix or Windows is - used.</p> - <p>If set to a string <c>Command</c>, the command - <c>"Command File"</c> is evaluated using - <seealso marker="os#cmd/1"><c>os:cmd/1</c></seealso>.</p> - <p>If set to a module-function-args tuple, <c>{M,F,A}</c>, - the call <c>apply(M,F,[File|A])</c> is evaluated.</p> - </item> <tag><c>distributed = [Distrib]</c></tag> <item> <p>Specifies which applications that are distributed and on which @@ -180,140 +164,59 @@ <p>Permissions are described in <seealso marker="application#permit/2"><c>application:permit/2</c></seealso>.</p> </item> - <tag><c>logger_dest = Value</c></tag> + <tag><marker id="logger"/><c>logger = [Config]</c></tag> <item> - <p><c>Value</c> is one of:</p> - <taglist> - <tag><c>tty</c></tag> - <item><p>Installs the standard handler, <seealso marker="logger_std_h"> - <c>logger_std_h(3)</c></seealso>, with <c>type</c> set - to <c>standard_io</c>. This is the default - option.</p></item> - <tag><c>{file, FileName}</c></tag> - <item><p>Installs the standard handler, <seealso marker="logger_std_h"> - <c>logger_std_h(3)</c></seealso>, with <c>type</c> set - to <c>{file, FileName}</c>, where <c>FileName</c> - is a string. The file is opened with encoding UTF-8.</p></item> - <tag><c>{disk_log, FileName}</c></tag> - <item><p>Installs the disk_log handler, <seealso marker="logger_disk_log_h"> - <c>logger_disk_log_h(3)</c></seealso>, with <c>file</c> set - to <c>FileName</c> (a string), and possibly other disk_log - parameters set by the environment variables - <c>logger_disk_log_type</c>, <c>logger_disk_log_maxfiles</c> and - <c>logger_disk_log_maxbytes</c>, - see <seealso marker="#disk_log_vars">below</seealso>. The - file is opened with encoding UTF-8.</p></item> - <tag><c>false</c></tag> - <item> - <p>No standard handler is installed, but - the initial, primitive handler is kept, printing - raw event messages to <c>tty</c>.</p> - </item> - <tag><c>silent</c></tag> - <item> - <p>No standard handler is started, and the initial, - primitive handler is removed.</p> - </item> - </taglist> + <p>Specifies how <seealso marker="logger"><c>logger</c></seealso> should be + configured.</p> + <p>For more details and examples, see the <seealso marker="logger_chapter#logger"> + Configuration</seealso> section in the <seealso marker="logger_chapter"> + Logger User's Guide</seealso>. + </p> </item> - <tag><c>logger_level = Level</c></tag> + <tag><marker id="logger_level"/><c>logger_level = Level</c></tag> <item> - <p><c>Value = emergency | alert | critical | error | warning | + <p><c>Level = emergency | alert | critical | error | warning | notice | info | debug</c></p> <p>This parameter specifies which log levels to log. The specified level, and all levels that are more severe, will be logged.</p> - <p>This configuration parameter is used both for the global - logger level, and for the standard handler started by - the Kernel application (see <c>logger_dest</c> variable above).</p> <p>The default value is <c>info</c>.</p> - </item> - <tag><marker id="disk_log_vars"/> - <c>logger_disk_log_type = halt | wrap</c></tag> - <item/> - <tag><c>logger_disk_log_maxfiles = integer()</c></tag> - <item/> - <tag><c>logger_disk_log_maxbytes = integer()</c></tag> - <item> - <p>If <c>logger_dest</c> is set to {disk_log,File}, then these - parameters specify the configuration to use when opening the - disk log file. They specify the type of disk log, the - maximum number of files (if the type is wrap) and the - maximum size of each file, respectively.</p> - <p>The default values are:</p> - <code> -logger_disk_log_type = wrap -logger_disk_log_maxfiles = 10 -logger_disk_log_maxbytes = 1048576</code> + <p>To change the global log level at run-time, use + <seealso marker="logger#set_logger_config/2"> + <c>logger:set_logger_config(level, error)</c></seealso>.</p> </item> <tag><marker id="logger_sasl_compatible"/> <c>logger_sasl_compatible = boolean()</c></tag> <item> - <p>If this parameter is set to true, then the logger handler - started by kernel will not log any progress-, crash-, or - supervisor reports. If the SASL application is started, + <p>If this parameter is set to true, then the <c>default</c> logger handler + will not log any progress-, crash-, or supervisor reports. + If the SASL application is started, these log events will be sent to a second handler instance - named <c>sasl_h</c>, according to values of the SASL - environment variables <c>sasl_error_logger</c> + named <c>sasl</c>, according to values of the SASL + configuration parameter <c>sasl_error_logger</c> and <c>sasl_errlog_type</c>, see - <seealso marker="sasl:sasl_app#configuration">SASL(6) + <seealso marker="sasl:sasl_app#deprecated_error_logger_config">sasl(6) </seealso></p> <p>The default value is <c>false</c>.</p> <p>See chapter <seealso marker="logger_chapter#compatibility">Backwards compatibility with error_logger</seealso> for more information about handling of the so called SASL reports.</p> + <note><p>This configuration option only effects the <c>default</c> + and <c>sasl</c> handler. Any other handlers are uneffected.</p></note> </item> - <tag><marker id="logger_log_progress"/> - <c>logger_log_progress = boolean()</c></tag> + <tag><marker id="logger_progress_reports"/> + <c>logger_progress_reports = stop | log</c></tag> <item> <p>If <c>logger_sasl_compatible = false</c>, - then <c>logger_log_progress</c> specifies if progress + then <c>logger_progress_reports</c> specifies if progress reports from <c>supervisor</c> - and <c>application_controller</c> shall be logged or - not.</p> + and <c>application_controller</c> shall be logged by the + default logger.</p> <p>If <c>logger_sasl_compatible = true</c>, - then <c>logger_log_progress</c> is ignored.</p> - </item> - <tag><marker id="logger_format_depth"/> - <c>logger_format_depth = Depth</c></tag> - <item> - <p>Can be used to limit the size of the - formatted output from the logger handlers.</p> - - <p><c>Depth</c> is a positive integer representing the maximum - depth to which terms are printed by the logger - handlers included in OTP. This - configuration parameter is used by the default formatter, - <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>, - unless the formatter's <c>depth</c> parameter is explicitly set. - (If you have implemented your own formatter, this configuration - parameter has no effect on that.)</p> - - <p><c>Depth</c> is used as follows: Format strings - received by the formatter are rewritten. - The format controls <c>~p</c> and <c>~w</c> are replaced with - <c>~P</c> and <c>~W</c>, respectively, and <c>Depth</c> is - used as the depth parameter. For details, see - <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB.</p> - - <note><p>A reasonable starting value for <c>Depth</c> is - <c>30</c>. We recommend to test crashing various processes in your - application, examine the logs from the crashes, and then - increase or decrease the value.</p></note> - </item> - <tag><c>logger_max_size = integer() | unlimited</c></tag> - <item> - <p>This parameter specifies a hard maximum size limit (number - of characters) each log event can have when printed by the - default logger formatter. If the resulting string after - formatting an event is bigger than this, it will be - truncated before printed to the handler's destination.</p> - </item> - <tag><c>logger_utc = boolean()</c></tag> - <item> - <p>If set to <c>true</c>, the default formatter will display - all dates in Universal Coordinated Time.</p> + then <c>logger_progress_reports</c> is ignored.</p> + <p>The default value is <c>stop</c></p> + <note><p>This configuration option only effects the <c>default</c> + and <c>sasl</c> handler. Any other handlers are uneffected.</p></note> </item> <tag><c>global_groups = [GroupTuple]</c></tag> <item> @@ -367,7 +270,7 @@ logger_disk_log_maxbytes = 1048576</code> <tag><c>inet_parse_error_log = silent</c></tag> <item> <p>If set, no - <c>error_logger</c> messages are generated when erroneous + <c>logger</c> messages are generated when erroneous lines are found and skipped in the various Inet configuration files.</p> </item> @@ -578,18 +481,29 @@ MaxT = TickTime + TickTime / 4</code> <section> <title>Deprecated Configuration Parameters</title> - <p>In OTP-21, a new API for logging was added to Erlang/OTP. The + <p>In Erlang/OTP 21.0, a new API for logging was added. The old <c>error_logger</c> event manager, and event handlers - running on this manager, will still work, but they are not used + running on this manager, still work, but they are no longer used by default.</p> - <p>The following application environment variables can still be - set, but they will only be used if the corresponding new logger - variables are not set.</p> + <p>The following application configuration parameters can still be + set, but they are only used if the corresponding configuration + parameters for Logger are not set.</p> <taglist> <tag><c>error_logger</c></tag> - <item>Replaced by <c>logger_dest</c></item> + <item>Replaced by setting the type of the default + <seealso marker="logger_std_h#type"><c>logger_std_h</c></seealso> + to the same value. Example: + <code type="none"> +erl -kernel logger '[{handler,default,logger_std_h,#{logger_std_h=>#{type=>{file,"/tmp/erlang.log"}}}}]' + </code> + </item> <tag><c>error_logger_format_depth</c></tag> - <item>Replaced by <c>logger_format_depth</c></item> + <item>Replaced by setting the <seealso marker="logger_formatter#depth"><c>depth</c></seealso> + parameter of the default handlers formatter. Example: + <code type="none"> +erl -kernel logger '[{handler,default,logger_std_h,#{formatter=>{logger_formatter,#{legacy_header=>true,template=>[{logger_formatter,header},"\n",msg,"\n"],depth=>10}}}]' + </code> + </item> </taglist> <p>See <seealso marker="logger_chapter#compatibility">Backwards compatibility with error_logger</seealso> for more @@ -604,12 +518,12 @@ MaxT = TickTime + TickTime / 4</code> <seealso marker="disk_log"><c>disk_log(3)</c></seealso>, <seealso marker="erl_boot_server"><c>erl_boot_server(3)</c></seealso>, <seealso marker="erl_ddll"><c>erl_ddll(3)</c></seealso>, - <seealso marker="error_logger"><c>error_logger(3)</c></seealso>, <seealso marker="file"><c>file(3)</c></seealso>, <seealso marker="global"><c>global(3)</c></seealso>, <seealso marker="global_group"><c>global_group(3)</c></seealso>, <seealso marker="heart"><c>heart(3)</c></seealso>, <seealso marker="inet"><c>inet(3)</c></seealso>, + <seealso marker="logger"><c>logger(3)</c></seealso>, <seealso marker="net_kernel"><c>net_kernel(3)</c></seealso>, <seealso marker="os"><c>os(3)</c></seealso>, <seealso marker="pg2"><c>pg2(3)</c></seealso>, diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml index d901454e62..911eb158da 100644 --- a/lib/kernel/doc/src/logger.xml +++ b/lib/kernel/doc/src/logger.xml @@ -33,10 +33,50 @@ <file>logger.xml</file> </header> <module>logger</module> - <modulesummary>API module for the logger application.</modulesummary> + <modulesummary>API module for logging in Erlang/OTP.</modulesummary> <description> - + <p> + This module is the main API for logging in Erlang/OTP. It + contains functions that allow applications to use a single log + API and the system to manage those log events independently. Use + the <seealso marker="#emergency-1">API functions</seealso> or the log + <seealso marker="#macros">macros</seealso> to log events. For instance, + to log a new error event:</p> + <code> +?LOG_ERROR("error happened because: ~p",[Reason]). %% With macro +logger:error("error happened because: ~p",[Reason]). %% Without macro + </code> + <p>This log event is then sent to the configured log handlers which + by default means that it is be printed to the console. If you want + your systems logs to be printed to a file instead of the console you + must configure the default handler to do so. The simplest way is + to include the following in your <seealso marker="config"><c>sys.config</c></seealso>.</p> + <code> +[{kernel, + [{logger, + [{handler,default,logger_std_h, + #{logger_std_h=>#{type=>{file,"path/to/file.log"}}}}]}]}]. + </code> + <p> + For more information about: + </p> + <list type="bulleted"> + <item>how to use the API, + see <seealso marker="logger_chapter">the User's Guide</seealso>.</item> + <item>how to configure Logger, + see the <seealso marker="logger_chapter#configuration">Configuration</seealso> + section in the User's Guide.</item> + <item>the convinience macros in logger.hrl, + see <seealso marker="#macros">the macro section</seealso>.</item> + <item>what the builtin formatter can do, + see <seealso marker="logger_formatter">logger_formatter</seealso>.</item> + <item>what the builtin handlers can do, + see <seealso marker="logger_std_h">logger_std_h</seealso> and + <seealso marker="logger_disk_log_h">logger_disk_log_h</seealso>.</item> + <item>what builtin filters are available, + see <seealso marker="logger_filters">logger_filters</seealso>.</item> + </list> </description> <datatypes> @@ -47,7 +87,7 @@ </desc> </datatype> <datatype> - <name name="log"/> + <name name="log_event"/> <desc> <p></p> </desc> @@ -72,7 +112,7 @@ <list> <item><c>pid => self()</c></item> <item><c>gl => group_leader()</c></item> - <item><c>time => erlang:monotonic_time(microsecond)</c></item> + <item><c>time => erlang:system_time(microsecond)</c></item> </list> <p>When a log macro is used, Logger also inserts location information:</p> @@ -90,8 +130,31 @@ <c>update_process_metadata/1</c></seealso>.</p> <p>Logger merges all the metadata maps before forwarding the log event to the handlers. If the same keys occur, values - from the log call overwrites process metadata, which in turn - overwrites values set by Logger.</p> + from the log call overwrite process metadata, which in turn + overwrite values set by Logger.</p> + <p>The following custom metadata keys have special meaning:</p> + <taglist> + <tag><c>domain</c></tag> + <item> + <p>The value associated with this key is used by filters + for grouping log events originating from, for example, + specific functional + areas. See <seealso marker="logger_filters#domain-2"> + <c>logger_filters:domain/2</c></seealso> + for a description of how this field can be used.</p> + </item> + <tag><c>report_cb</c></tag> + <item> + <p>If the log message is specified as + a <seealso marker="#type-report"><c>report()</c></seealso>, + the <c>report_cb</c> key can be associated with a fun + (report callback) that converts the report to a format + string and arguments. See + section <seealso marker="logger_chapter#log_message">Log + Message</seealso> in the User's Guide for more + information about report callbacks.</p> + </item> + </taglist> </desc> </datatype> <datatype> @@ -105,7 +168,7 @@ <item><c>filters => []</c></item> <item><c>formatter => {logger_formatter,DefaultFormatterConfig</c>}</item> </list> - <p>See the <seealso marker="logger_formatter#configuration"> + <p>See the <seealso marker="logger_formatter#type-config"> <c>logger_formatter(3)</c></seealso> manual page for information about the default configuration for this formatter.</p> @@ -126,7 +189,8 @@ <datatype> <name name="filter"/> <desc> - <p>A filter which can be installed for logger or for a handler.</p> + <p>A filter which can be installed for the logger part of + Logger, or for a handler.</p> </desc> </datatype> <datatype> @@ -145,8 +209,17 @@ <name name="timestamp"/> <desc> <p>A timestamp produced - with <seealso marker="erts:erlang#monotonic_time-1"> - <c>erlang:monotonic_time(microsecond)</c></seealso>.</p> + with <seealso marker="erts:erlang#system_time-1"> + <c>erlang:system_time(microsecond)</c></seealso>.</p> + </desc> + </datatype> + <datatype> + <name name="formatter_config"/> + <desc> + <p>Configuration data for the + formatter. See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example of a formatter implementation.</p> </desc> </datatype> </datatypes> @@ -174,9 +247,9 @@ <item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item> </list> - <p>All macros expand to a call to logger, where <c>Level</c> is - taken from the macro name, and location data is added. See the - description of + <p>All macros expand to a call to Logger, where <c>Level</c> is + taken from the macro name, and location data is added to the + metadata. See the description of the <seealso marker="#type-metadata"><c>metadata()</c></seealso> type for more information about the location data.</p> @@ -296,23 +369,26 @@ <func> <name name="get_logger_config" arity="0"/> - <fsummary>Lookup the current configuration for logger.</fsummary> + <fsummary>Look up the current configuration for the logger part + of Logger.</fsummary> <desc> - <p>Lookup the current configuration for logger.</p> + <p>Look up the current configuration for the logger part of + Logger.</p> </desc> </func> <func> <name name="get_handler_config" arity="1"/> - <fsummary>Lookup the current configuration for the given handler.</fsummary> + <fsummary>Look up the current configuration for the given + handler.</fsummary> <desc> - <p>Lookup the current configuration for the given handler.</p> + <p>Look up the current configuration for the given handler.</p> </desc> </func> <func> <name name="i" arity="0"/> - <fsummary>Get all logger configurations</fsummary> + <fsummary>Get all Logger configurations</fsummary> <desc> <p>Same as <seealso marker="#i/1"><c>logger:i(term)</c></seealso></p> </desc> @@ -322,42 +398,42 @@ <name name="i" arity="1" clause_i="1"/> <name name="i" arity="1" clause_i="2"/> <name name="i" arity="1" clause_i="3"/> - <fsummary>Get all logger configurations</fsummary> + <fsummary>Get all Logger configurations</fsummary> <desc> - <p>Display or return all current logger configuration.</p> + <p>Display or return all current Logger configurations.</p> <taglist> <tag><c><anno>Action</anno> = string</c></tag> <item> - <p>Return the pretty printed current logger configuration + <p>Return the pretty printed current Logger configuration as iodata.</p> </item> <tag><c><anno>Action</anno> = term</c></tag> <item> - <p>Return the current logger configuration as a term. The - format of this term may change inbetween releases. For a + <p>Return the current Logger configuration as a term. The + format of this term may change between releases. For a stable format use <seealso marker="#get_handler_config/1"> <c>logger:get_handler_config/1</c></seealso> and <seealso marker="#get_logger_config/0"> - <c>logger:get_logger_config/0</c></seealso>. - The same as calling <c>logger:i()</c>.</p> + <c>logger:get_logger_config/0</c></seealso>.</p> + <p>The same as calling <c>logger:i()</c>.</p> </item> <tag><c><anno>Action</anno> = print</c></tag> <item> - <p>Pretty print all the current logger configuration to + <p>Pretty print all the current Logger configuration to standard out. Example:</p> <code><![CDATA[1> logger:i(print). Current logger configuration: Level: info - FilterDefault: log + Filter Default: log Filters: Handlers: - Id: logger_std_h + Id: default Module: logger_std_h Level: info Formatter: Module: logger_formatter - Config: #{template => [{logger_formatter,header},"\n",msg,"\n"], - legacy_header => true} + Config: #{legacy_header => true,single_line => false, + template => [{logger_formatter,header},"\n",msg,"\n"]} Filter Default: stop Filters: Id: stop_progress @@ -368,10 +444,10 @@ Current logger configuration: Config: stop Id: domain Fun: fun logger_filters:domain/2 - Config: {log,prefix_of,[beam,erlang,otp,sasl]} + Config: {log,super,[beam,erlang,otp,sasl]} Id: no_domain Fun: fun logger_filters:domain/2 - Config: {log,no_domain,[]} + Config: {log,undefined,[]} Handler Config: logger_std_h: #{type => standard_io} Level set per module: @@ -384,21 +460,21 @@ Current logger configuration: <func> <name name="add_logger_filter" arity="2"/> - <fsummary>Add a filter to the logger.</fsummary> + <fsummary>Add a filter to the logger part of Logger.</fsummary> <desc> - <p>Add a filter to the logger.</p> + <p>Add a filter to the logger part of Logger.</p> <p>The filter fun is called with the log event as the first parameter, and the specified <c>filter_args()</c> as the second parameter.</p> <p>The return value of the fun specifies if a log event is to be discarded or forwarded to the handlers:</p> <taglist> - <tag><c>log()</c></tag> + <tag><c>log_event()</c></tag> <item> <p>The filter <em>passed</em>. The next logger filter, if any, is applied. If no more logger filters exist, the - log event is forwarded to the handler part of the - logger, where handler filters are applied.</p> + log event is forwarded to the handler part of Logger, + where handler filters are applied.</p> </item> <tag><c>stop</c></tag> <item> @@ -410,13 +486,13 @@ Current logger configuration: <p>The filter has no knowledge of the log event. The next logger filter, if any, is applied. If no more logger filters exist, the value of the <c>filter_default</c> - configuration parameter for the logger specifies if the - log event shall be discarded or forwarded to the handler - part.</p> + configuration parameter for the logger part specifies if + the log event shall be discarded or forwarded to the + handler part.</p> </item> </taglist> - <p>See section <seealso marker="logger_chapter#Filter"> - Filter</seealso> in the User's Guide for more information + <p>See section <seealso marker="logger_chapter#filters"> + Filters</seealso> in the User's Guide for more information about filters.</p> <p>Some built-in filters exist. These are defined in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p> @@ -434,7 +510,7 @@ Current logger configuration: <p>The return value of the fun specifies if a log event is to be discarded or forwarded to the handler callback:</p> <taglist> - <tag><c>log()</c></tag> + <tag><c>log_event()</c></tag> <item> <p>The filter <em>passed</em>. The next handler filter, if any, is applied. If no more filters exist for this @@ -457,7 +533,7 @@ Current logger configuration: </item> </taglist> <p>See - section <seealso marker="logger_chapter#Filter">Filter</seealso> + section <seealso marker="logger_chapter#filters">Filters</seealso> in the User's Guide for more information about filters.</p> <p>Some built-in filters exist. These are defined in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p> @@ -466,10 +542,10 @@ Current logger configuration: <func> <name name="remove_logger_filter" arity="1"/> - <fsummary>Remove a filter from the logger.</fsummary> + <fsummary>Remove a filter from the logger part of Logger.</fsummary> <desc> <p>Remove the filter identified - by <c><anno>FilterId</anno></c> from the logger.</p> + by <c><anno>FilterId</anno></c> from the logger part of Logger.</p> </desc> </func> @@ -489,7 +565,7 @@ Current logger configuration: <desc> <p>Add a handler with the given configuration.</p> <p><c><anno>HandlerId</anno></c> is a unique identifier which - must be used in all subsequent calls reffering to this + must be used in all subsequent calls referring to this handler.</p> </desc> </func> @@ -509,21 +585,21 @@ Current logger configuration: <p>Set the log level for the specified <c><anno>Module</anno></c>.</p> <p>The log level for a module overrides the global log level - of the logger for log event originating from the module in + of Logger for log events originating from the module in question. Notice, however, that it does not override the level configuration for any handler.</p> - <p>For example: Assume that the global log level for the - logger is <c>info</c>, and there is one handler, <c>h1</c>, - with level <c>info</c> and one handler, <c>h2</c>, with + <p>For example: Assume that the global log level for Logger + is <c>info</c>, and there is one handler, <c>h1</c>, with + level <c>info</c> and one handler, <c>h2</c>, with level <c>debug</c>.</p> <p>With this configuration, no debug messages will be logged, since they are all stopped by the global log level.</p> - <p>If the level for <c>mymodule</c> is set now set + <p>If the level for <c>mymodule</c> is now set to <c>debug</c>, then debug events from this module will be logged by the handler <c>h2</c>, but not by handler <c>h1</c>.</p> <p>Debug events from other modules are still not logged.</p> - <p>To change the global log level for the logger, use + <p>To change the global log level for Logger, use <seealso marker="#set_logger_config/2"> <c>logger:set_logger_config(level,Level)</c></seealso>.</p> <p>To change the log level for a handler, use @@ -533,15 +609,16 @@ Current logger configuration: <p>The originating module for a log event is only detected if <c>mfa=>{Module,Function,Arity}</c> exists in the metadata. When log macros are used, this association is - automatically added to all log events. If the logger API + automatically added to all log events. If an API function is called directly, without using a macro, the logging - client must explicitly add this information.</p> + client must explicitly add this information if module + levels shall have any effect.</p> </note> </desc> </func> <func> - <name name="reset_module_level" arity="1"/> + <name name="unset_module_level" arity="1"/> <fsummary>Remove a module specific log setting.</fsummary> <desc> <p>Remove a module specific log setting. After this, the @@ -550,17 +627,71 @@ Current logger configuration: </func> <func> + <name name="add_handlers" arity="1" clause_i="1"/> + <fsummary>Set up log handlers from the application's + configuration parameters.</fsummary> + <desc> + <p>Reads the application configuration parameter <c>logger</c> and + calls <c>logger:add_handlers/1</c> with its contents.</p> + </desc> + </func> + + <func> + <name name="add_handlers" arity="1" clause_i="2"/> + <fsummary>Setup logger handlers.</fsummary> + <type name="config_handler"/> + <desc> + <p>This function should be used by custom Logger handlers to make + configuration consistent no matter which handler the system uses. + Normal usage is to add a call to <c>logger:add_handlers/1</c> + just after the processes that the handler needs are started, + and pass the application's <c>logger</c> configuration as the argument. + For example:</p> + <code> +-behaviour(application). +start(_, []) -> + case supervisor:start_link({local, my_sup}, my_sup, []) of + {ok, Pid} -> + ok = logger:add_handlers(my_app), + {ok, Pid, []}; + Error -> Error + end.</code> + <p>This reads the <c>logger</c> configuration parameter from + the <c>my_all</c> application and starts the configured + handlers. The contents of the configuration use the same + rules as the + <seealso marker="logger_chapter#handler-configuration">logger handler configuration</seealso>. + </p> + <p>If the handler is meant to replace the default handler, the Kernel's + default handler have to be disabled before the new handler is added. + A <c>sys.config</c> file that disables the Kernel handler and adds + a custom handler could look like this:</p> + <code> +[{kernel, + [{logger, + %% Disable the default Kernel handler + [{handler,default,undefined}]}]}, + {my_app, + [{logger, + %% Enable this handler as the default + [{handler,default,my_handler,#{}}]}]}]. + </code> + </desc> + </func> + + <func> <name name="set_logger_config" arity="1"/> - <fsummary>Set configuration data for the logger.</fsummary> + <fsummary>Set configuration data for the logger part of Logger.</fsummary> <desc> - <p>Set configuration data for the logger. This overwrites the - current logger configuration.</p> + <p>Set configuration data for the logger part of Logger. This + overwrites the current logger configuration.</p> <p>To modify the existing configuration, - use <seealso marker="#set_logger_config-2"><c>set_logger_config/2</c> - </seealso>, or read the current configuration + use <seealso marker="#update_logger_config-1"> + <c>update_logger_config/1</c></seealso>, or, if a more + complex merge is needed, read the current configuration with <seealso marker="#get_logger_config-0"><c>get_logger_config/0</c> - </seealso>, then merge in your added or updated - associations before writing it back.</p> + </seealso>, then do the merge before writing the new + configuration back with this function.</p> <p>If a key is removed compared to the current configuration, the default value is used.</p> </desc> @@ -568,12 +699,32 @@ Current logger configuration: <func> <name name="set_logger_config" arity="2"/> - <fsummary>Add or update configuration data for the logger.</fsummary> + <fsummary>Add or update configuration data for the logger part + of Logger.</fsummary> <desc> - <p>Add or update configuration data for the logger. If the - given <c><anno>Key</anno></c> already exists, its associated - value will be changed to <c><anno>Value</anno></c>. If it - doesn't exist, it will be added.</p> + <p>Add or update configuration data for the logger part of + Logger. If the given <c><anno>Key</anno></c> already exists, + its associated value will be changed + to <c><anno>Value</anno></c>. If it does not exist, it will + be added.</p> + </desc> + </func> + + <func> + <name name="update_logger_config" arity="1"/> + <fsummary>Update configuration data for the logger part of + Logger.</fsummary> + <desc> + <p>Update configuration data for the logger part of + Logger. This function behaves as if it was implemented as + follows:</p> + <code type="erl"> +{ok,Old} = logger:get_logger_config(), +logger:set_logger_config(maps:merge(Old,Config)). + </code> + <p>To overwrite the existing configuration without any merge, + use <seealso marker="#set_logger_config-1"><c>set_logger_config/1</c> + </seealso>.</p> </desc> </func> @@ -584,13 +735,14 @@ Current logger configuration: <p>Set configuration data for the specified handler. This overwrites the current handler configuration.</p> <p>To modify the existing configuration, - use <seealso marker="#set_handler_config-3"><c>set_handler_config/3</c> - </seealso>, or read the current configuration + use <seealso marker="#update_handler_config-2"> + <c>update_handler_config/2</c></seealso>, or, if a more + complex merge is needed, read the current configuration with <seealso marker="#get_handler_config-1"><c>get_handler_config/1</c> - </seealso>, then merge in your added or updated - associations before writing it back.</p> + </seealso>, then do the merge before writing the new + configuration back with this function.</p> <p>If a key is removed compared to the current configuration, - and the key is know by Logger, the default value is used. If + and the key is known by Logger, the default value is used. If it is a custom key, then it is up to the handler implementation if the value is removed or a default value is inserted.</p> @@ -605,12 +757,53 @@ Current logger configuration: <p>Add or update configuration data for the specified handler. If the given <c><anno>Key</anno></c> already exists, its associated value will be changed - to <c><anno>Value</anno></c>. If it doesn't exist, it will + to <c><anno>Value</anno></c>. If it does not exist, it will be added.</p> </desc> </func> <func> + <name name="update_handler_config" arity="2"/> + <fsummary>Update configuration data for the specified handler.</fsummary> + <desc> + <p>Update configuration data for the specified handler. This function + behaves as if it was implemented as follows:</p> + <code type="erl"> +{ok,{_,Old}} = logger:get_handler_config(HandlerId), +logger:set_handler_config(HandlerId,maps:merge(Old,Config)). + </code> + <p>To overwrite the existing configuration without any merge, + use <seealso marker="#set_handler_config-2"><c>set_handler_config/2</c> + </seealso>.</p> + </desc> + </func> + + <func> + <name name="update_formatter_config" arity="2"/> + <fsummary>Update the formatter configuration for the specified handler.</fsummary> + <desc> + <p>Update the formatter configuration for the specified handler.</p> + <p>The new configuration is merged with the existing formatter + configuration.</p> + <p>To overwrite the existing configuration without any merge, + use <seealso marker="#set_handler_config-3"> + <c>set_handler_config(HandlerId,formatter, + {FormatterModule,FormatterConfig})</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="update_formatter_config" arity="3"/> + <fsummary>Update the formatter configuration for the specified handler.</fsummary> + <desc> + <p>Update the formatter configuration for the specified handler.</p> + <p>This is equivalent + to <br/><seealso marker="#update_formatter_config-2"> + <c>update_formatter_config(<anno>HandlerId</anno>,#{<anno>Key</anno>=><anno>Value</anno>})</c></seealso></p> + </desc> + </func> + + <func> <name name="compare_levels" arity="2"/> <fsummary>Compare the severity of two log levels.</fsummary> <desc> @@ -650,7 +843,7 @@ Current logger configuration: <p>If process metadata exists for the current process, this function behaves as if it was implemented as follows:</p> <code type="erl"> -logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) +logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)). </code> <p>If no process metadata exists, the function behaves as <seealso marker="#set_process_metadata-1"> @@ -703,18 +896,17 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) </funcs> <section> - <title>Callback Functions</title> + <marker id="handler_callback_functions"/> + <title>Handler Callback Functions</title> <p>The following functions are to be exported from a handler callback module.</p> </section> <funcs> <func> - <name>Module:adding_handler(HandlerId,Config1) -> {ok,Config2} | {error,Reason}</name> + <name>HModule:adding_handler(Config1) -> {ok,Config2} | {error,Reason}</name> <fsummary>An instance of this handler is about to be added.</fsummary> <type> - <v>HandlerId = - <seealso marker="#type-handler_id">handler_id()</seealso></v> <v>Config1 = Config2 = <seealso marker="#type-config">config()</seealso></v> <v>Reason = term()</v> @@ -723,7 +915,9 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) <p>This callback function is optional.</p> <p>The function is called when an new handler is about to be added, and the purpose is to verify the configuration and - initiate all resourced needed by the handler.</p> + initiate all resources needed by the handler.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config1</c>.</p> <p>If everything succeeds, the callback function can add possible default values or internal state values to the configuration, and return the adjusted map @@ -734,28 +928,9 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) </func> <func> - <name>Module:removing_handler(HandlerId,Config) -> ok</name> - <fsummary>The given handler is about to be removed.</fsummary> - <type> - <v>HandlerId = - <seealso marker="#type-handler_id">handler_id()</seealso></v> - <v>Config = - <seealso marker="#type-config">config()</seealso></v> - </type> - <desc> - <p>This callback function is optional.</p> - <p>The function is called when a handler is about to be - removed, and the purpose is to release all resources used by - the handler. The return value is ignored by Logger.</p> - </desc> - </func> - - <func> - <name>Module:changing_config(HandlerId,Config1,Config2) -> {ok,Config3} | {error,Reason}</name> + <name>HModule:changing_config(Config1,Config2) -> {ok,Config3} | {error,Reason}</name> <fsummary>The configuration for this handler is about to change.</fsummary> <type> - <v>HandlerId = - <seealso marker="#type-handler_id">handler_id()</seealso></v> <v>Config1 = Config2 = Config3 = <seealso marker="#type-config">config()</seealso></v> <v>Reason = term()</v> @@ -767,12 +942,108 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) the new configuration.</p> <p><c>Config1</c> is the existing configuration and <c>Config2</c> is the new configuration.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config1</c>.</p> <p>If everything succeeds, the callback function must return a possibly adjusted configuration in <c>{ok,Config3}</c>.</p> <p>If the configuration is faulty, the callback function must return <c>{error,Reason}</c>.</p> </desc> </func> + + <func> + <name>HModule:log(LogEvent,Config) -> void()</name> + <fsummary>Log the given log event.</fsummary> + <type> + <v>LogEvent = + <seealso marker="#type-log_event">log_event()</seealso></v> + <v>Config = + <seealso marker="#type-config">config()</seealso></v> + </type> + <desc> + <p>This callback function is mandatory.</p> + <p>The function is called when all global filters and all + handler filters for the handler in question have passed for + the given log event.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config</c>.</p> + <p>The handler must log the event.</p> + <p>The return value from this function is ignored by + Logger.</p> + </desc> + </func> + + <func> + <name>HModule:removing_handler(Config) -> ok</name> + <fsummary>The given handler is about to be removed.</fsummary> + <type> + <v>Config = + <seealso marker="#type-config">config()</seealso></v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called when a handler is about to be + removed, and the purpose is to release all resources used by + the handler.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config</c>.</p> + <p>The return value is ignored by Logger.</p> + </desc> + </func> + + </funcs> + + <section> + <marker id="formatter_callback_functions"/> + <title>Formatter Callback Functions</title> + <p>The following functions are to be exported from a formatter + callback module.</p> + </section> + + <funcs> + <func> + <name>FModule:check_config(FConfig) -> ok | {error,term()}</name> + <fsummary>Validate the given formatter configuration.</fsummary> + <type> + <v>FConfig = + <seealso marker="#type-formatter_config">formatter_config()</seealso></v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called by a Logger when formatter + configuration is set or modified. The formatter must + validate the given configuration and return <c>ok</c> if it + is correct, and <c>{error,term()}</c> if it is faulty.</p> + <p>See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example implementation. <c>logger_formatter</c> is the + default formatter used by Logger.</p> + </desc> + </func> + <func> + <name>FModule:format(LogEvent,FConfig) -> FormattedLogEntry</name> + <fsummary>Format the given log event.</fsummary> + <type> + <v>LogEvent = + <seealso marker="#type-log_event">log_event()</seealso></v> + <v>FConfig = + <seealso marker="#type-formatter_config">formatter_config()</seealso></v> + <v>FormattedLogEntry = + <seealso marker="unicode#type-chardata">unicode:chardata()</seealso></v> + </type> + <desc> + <p>This callback function is mandatory.</p> + <p>The function can be called by a log handler to convert a + log event term to a printable string. The returned value + can, for example, be printed as a log entry to the console + or a file using <seealso marker="stdlib:io#put_chars-1"> + <c>io:put_chars/1,2</c></seealso>.</p> + <p>See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example implementation. <c>logger_formatter</c> is the + default formatter used by Logger.</p> + </desc> + </func> </funcs> </erlref> diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png Binary files differindex 727609a6ef..901122193a 100644 --- a/lib/kernel/doc/src/logger_arch.png +++ b/lib/kernel/doc/src/logger_arch.png diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml index 519df2ba48..a3eec7bd4b 100644 --- a/lib/kernel/doc/src/logger_chapter.xml +++ b/lib/kernel/doc/src/logger_chapter.xml @@ -30,368 +30,673 @@ <file>logger_chapter.xml</file> </header> + <p>Erlang/OTP 21.0 provides a new standard API for logging + through <c>Logger</c>, which is part of the Kernel + application. Logger consists of the API for issuing log events, + and a customizable backend where log handlers, filters and + formatters can be plugged in.</p> + <p>By default, the Kernel application installs one log handler at + system start. This handler is named <c>default</c>. It receives + and processes standard log events produced by the Erlang runtime + system, standard behaviours and different Erlang/OTP + applications. The log events are by default written to the + terminal.</p> + <p>You can also configure the system so that the default handler + prints log events to a single file, or to a set of wrap logs + via <seealso marker="disk_log"><c>disk_log</c></seealso>.</p> + <p>By confiugration, you can aslo modify or disable the default + handler, replace it by a custom handler, and install additional + handlers.</p> + <section> <title>Overview</title> - <p>Erlang/OTP provides a standard API for logging. The backend of - this API can be used as is, or it can be customized to suite - specific needs.</p> - <p>It consists of two parts - the <em>logger</em> part and the - <em>handler</em> part. The logger will forward log events to one - or more handler(s).</p> + <p>A <em>log event</em> consists of a <em>log level</em>, the + <em>message</em> to be logged, and <em>metadata</em>.</p> + <p>The Logger backend forwards log events from the API, first + through a set of <em>global filters</em>, then through a set + of <em>handler filters</em> for each log handler.</p> + <p>Each filter set consists of a <em>log level check</em>, + followed by zero or more <em>filter functions</em>.</p> + <p>The following figure show a conseptual overview of Logger. The + figure shows two log handlers, but any number of handlers can be + installed.</p> <image file="logger_arch.png"> - <icaption>Conceptual overview</icaption> + <icaption>Conceptual Overview</icaption> </image> - <p><em>Filters</em> can be added to the logger and to each - handler. The filters decide if an event is to be forwarded or - not, and they can also modify all parts of the log event.</p> - - <p>A <em>formatter</em> can be set for each handler. The formatter - does the final formatting of the log event, including the log - message itself, and possibly a timestamp, header and other - metadata.</p> - - <p>In accordance with the Syslog protocol, RFC-5424, eight - severity levels can be specified:</p> - - <table align="left"> - <row> - <cell><strong>Level</strong></cell> - <cell align="center"><strong>Integer</strong></cell> - <cell><strong>Description</strong></cell> - </row> - <row> - <cell>emergency</cell> - <cell align="center">0</cell> - <cell>system is unusable</cell> - </row> - <row> - <cell>alert</cell> - <cell align="center">1</cell> - <cell>action must be taken immediately</cell> - </row> - <row> - <cell>critical</cell> - <cell align="center">2</cell> - <cell>critical contidions</cell> - </row> - <row> - <cell>error</cell> - <cell align="center">3</cell> - <cell>error conditions</cell> - </row> - <row> - <cell>warning</cell> - <cell align="center">4</cell> - <cell>warning conditions</cell> - </row> - <row> - <cell>notice</cell> - <cell align="center">5</cell> - <cell>normal but significant conditions</cell> - </row> - <row> - <cell>info</cell> - <cell align="center">6</cell> - <cell>informational messages</cell> - </row> - <row> - <cell>debug</cell> - <cell align="center">7</cell> - <cell>debug-level messages</cell> - </row> - <tcaption>Severity levels</tcaption> - </table> - - <p>A log event is allowed by Logger if the integer value of - its <c>Level</c> is less than or equal to the currently - configured log level. The log level can be configured globally, - or to allow more verbose logging from a specific part of the - system, per module.</p> - + <p>Log levels are expressed as atoms. Internally in Logger, the + atoms are mapped to integer values, and a log event passes the + log level check if the integer value of its log level is less + than or equal to the currently configured log level. That is, + the check pases if the event is equally or more severe than the + configured level. See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of all log + levels.</p> + <p>The global log level can be overridden by a log level + configured per module. This is to, for instance, allow more + verbose logging from a specific part of the system.</p> + <p>Filter functions can be used for more sophisticated filtering + than the log level check provides. A filter function can stop or + pass a log event, based on any of the event's contents. It can + also modify all parts of the log event. See see + section <seealso marker="#filters">Filters</seealso> for more + details.</p> + <p>If a log event passes through all global filters and all + handler filters for a specific handler, Logger forwards the event + to the handler callback. The handler formats and prints the + event to its destination. See + section <seealso marker="#handlers">Handlers</seealso> for + more details.</p> + <p>Everything up to and including the call to the handler + callbacks is executed on the client process, that is, the + process where the log event was issued. It is up to the handler + implementation if other processes are involved or not.</p> + <p>The handlers are called in sequence, and the order is not + defined.</p> + </section> + <section> + <title>Logger API</title> + <p>The API for logging consists of a set + of <seealso marker="logger#macros">macros</seealso>, and a set + of functions on the form <c>logger:Level/1,2,3</c>, which are + all shortcuts + for <seealso marker="logger#log-2"> + <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p> + <p>The difference between using the macros and the exported + functions is that macros add location (originator) information + to the metadata, and performs lazy evaluation by wrapping the + logger call in a case statement, so it is only evaluated if the + log level of the event passes the global log level check.</p> <section> - <title>Customizable parts</title> - + <marker id="log_level"/> + <title>Log Level</title> + <p>The log level indicates the severity of a event. In + accordance with the Syslog protocol, RFC-5424, eight log + levels can be specified. The following table lists all + possible log levels by name (atom), integer value, and + description:</p> + + <table align="left"> + <row> + <cell><strong>Level</strong></cell> + <cell align="center"><strong>Integer</strong></cell> + <cell><strong>Description</strong></cell> + </row> + <row> + <cell>emergency</cell> + <cell align="center">0</cell> + <cell>system is unusable</cell> + </row> + <row> + <cell>alert</cell> + <cell align="center">1</cell> + <cell>action must be taken immediately</cell> + </row> + <row> + <cell>critical</cell> + <cell align="center">2</cell> + <cell>critical contidions</cell> + </row> + <row> + <cell>error</cell> + <cell align="center">3</cell> + <cell>error conditions</cell> + </row> + <row> + <cell>warning</cell> + <cell align="center">4</cell> + <cell>warning conditions</cell> + </row> + <row> + <cell>notice</cell> + <cell align="center">5</cell> + <cell>normal but significant conditions</cell> + </row> + <row> + <cell>info</cell> + <cell align="center">6</cell> + <cell>informational messages</cell> + </row> + <row> + <cell>debug</cell> + <cell align="center">7</cell> + <cell>debug-level messages</cell> + </row> + <tcaption>Log Levels</tcaption> + </table> + <p>Notice that the integer value is only used internally in + Logger. In the API, you must always use the atom. To compare + the severity of two log levels, + use <seealso marker="logger#compare_levels-2"> + <c>logger:compare_levels/2</c></seealso>.</p> + </section> + <section> + <marker id="log_message"/> + <title>Log Message</title> + <p>The log message contains the information to be logged. The + message can consist of a format string and arguments (given as + two separate parameters in the Logger API), a string or a + report. The latter, which is either a map or a key-value list, + can be accompanied by a report callback specified in the log + event's <seealso marker="#metadata">metadata</seealso>. The + report callback is a convenience function that + the <seealso marker="#formatters">formatter</seealso> can use + to convert the report to a format string and arguments. The + formatter can also use its own conversion function, if no + callback is provided, or if a customized formatting is + desired.</p> + <p>Example, format string and arguments:</p> + <code>logger:error("The file does not exist: ~ts",[Filename])</code> + <p>Example, string:</p> + <code>logger:notice("Something strange happened!")</code> + <p>Example, report, and metadata with report callback:</p> + <code> +logger:debug(#{got => connection_request, id => Id, state => State}, + #{report_cb => fun(R) -> {"~p",[R]} end})</code> + <p>The log message can also be provided through a fun for lazy + evaluation. The fun is only evaluated if the global log level + check passes, and is therefore recommended if it is expensive + to generate the message. The lazy fun must return a string, a + report, or a tuple with format string and arguments.</p> + </section> + <section> + <title>Metadata</title> + <p>Metadata contains additional data associated with a log + message. Logger inserts some metadata fields by default, and + the client can add custom metadata in two different ways:</p> <taglist> - <tag><marker id="Handler"/>Handler</tag> + <tag>Set process metadata</tag> <item> - <p>A handler is defined as a module exporting the following - function:</p> - - <code>log(Log, Config) -> ok</code> - - <p>A handler is called by the logger backend after filtering on - logger level and on handler level for the handler which is - about to be called. The function call is done on the client - process, and it is up to the handler implementation if other - processes are to be involved or not.</p> - - <p>Multiple instances of the same handler can be - added. Configuration is per instance.</p> - + <p>Process metadata is set and updated + with <seealso marker="logger#set_process_metadata-1"> + <c>logger:set_process_metadata/1</c></seealso> + and <seealso marker="logger#update_process_metadata-1"> + <c>logger:update_process metadata/1</c></seealso>, + respectively. This metadata applies to the process on + which these calls are made, and Logger adds the metadata + to all log events issued on that process.</p> </item> - - <tag><marker id="Filter"/>Filter</tag> - <item> - <p>Filters can be set on the logger or on a handler. Logger - filters are applied first, and if passed, the handler filters - for each handler are applied. The handler plugin is only - called if all handler filters for the handler in question also - pass.</p> - - <p>A filter is specified as:</p> - - <code>{fun((Log,Extra) -> Log | stop | ignore), Extra}</code> - - <p>The configuration parameter <c>filter_default</c> - specifies the behavior if all filters return <c>ignore</c>. - <c>filter_default</c> is by default set to <c>log</c>.</p> - - <p>The <c>Extra</c> parameter may contain any data that the - filter needs.</p> - </item> - - <tag><marker id="Formatter"/>Formatter</tag> + <tag>Add metadata to a specifc log event</tag> <item> - <p>A formatter is defined as a module exporting the following - function:</p> - - <code>format(Log,Extra) -> unicode:chardata()</code> - - <p>The formatter plugin is called by each handler, and the - returned string can be printed to the handler's destination - (stdout, file, ...).</p> + <p>Metadata associated with one specifc log event is given + as the last parameter to the log macro or Logger API + function when the event is issued. For example:</p> + <code>?LOG_ERROR("Connection closed",#{context => server})</code> </item> - </taglist> + <p>See the description of + the <seealso marker="logger#type-metadata"> + <c>logger:metadata()</c></seealso> type for information + about which default keys Logger inserts, and how the different + metadata maps are merged.</p> </section> + </section> + <section> + <marker id="filter"/> + <title>Filters</title> + <p>Filters can be global, or attached to a specific + handler. Logger calls the global filters first, and if they all + pass, it calls the handler filters for each handler. Logger + calls the handler callback only if all filters attached to the + handler in question also pass.</p> + <p>A filter is defined as:</p> + <pre>{FilterFun, Extra}</pre> + <p>where <c>FilterFun</c> is a function of arity 2, + and <c>Extra</c> is any term. When applying the filter, Logger + calls the function with the log event as the first argument, + and the value of <c>Extra</c> as the second + argument. See <seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso> for type definitions.</p> + <p>The filter function can return <c>stop</c>, <c>ignore</c> or + the (possibly modified) log event.</p> + <p>If <c>stop</c> is returned, the log event is immediately + discarded. If the filter is global, no handler filters or + callbacks are called. If it is a handler filter, the + corresponding handler callback is not called, but the log event + is forwarded to filters attached to the next handler, if + any.</p> + <p>If the log event is returned, the next filter function is + called with the returned value as the first argument. That is, + if a filter function modifies the log event, the next filter + function receives the modified event. The value returned from + the last filter function is the value that the handler callback + receives.</p> + <p>If the filter function returns <c>ignore</c>, it means that it + did not recognize the log event, and thus leaves to other + filters to decide the event's destiny.</p> + <p>The configuration + option <seealso marker="#filter_default"><c>filter_default</c></seealso> + specifies the behaviour if all filter functions + return <c>ignore</c>, or if no filters + exist. <c>filter_default</c> is by default set to <c>log</c>, + meaning that if all existing filters ignore a log event, Logger + forwards the event to the handler + callback. If <c>filter_default</c> is set to <c>stop</c>, Logger + discards such events.</p> + <p>Global filters are added + with <seealso marker="logger#add_logger_filter-2"> + <c>logger:add_logger_filter/2</c></seealso> + and removed + with <seealso marker="logger#remove_logger_filter-1"> + <c>logger:remove_logger_filter/1</c></seealso>. They can also + be added at system start via the Kernel configuration + parameter <seealso marker="#logger"><c>logger</c></seealso>.</p> + <p>Handler filters are added + with <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso> + and removed + with <seealso marker="logger#remove_handler_filter-2"> + <c>logger:remove_handler_filter/2</c></seealso>. They can also + be specified directly in the configuration when adding a handler + with <seealso marker="logger#add_handler/3"> + <c>logger:add_handler/3</c></seealso> + or via the Kernel configuration + parameter <seealso marker="#logger"><c>logger</c></seealso>.</p> + + <p>To see which filters are currently installed in the system, + use <seealso marker="logger#i-0"><c>logger:i/0</c></seealso>, + or <seealso marker="logger#get_logger_config-0"> + <c>logger:get_logger_config/0</c></seealso> + and <seealso marker="logger#get_handler_config-1"> + <c>logger:get_handler_config/1</c></seealso>. Filters are + listed in the order they are applied, that is, the first + filter in the list is applied first, and so on.</p> + + <p>For convenience, the following built-in filters exist:</p> - <section> - <title>Built-in handlers</title> - - <taglist> - <tag><c>logger_std_h</c></tag> + <taglist> + <tag><seealso marker="logger_filters#domain-2"> + <c>logger_filters:domain/2</c></seealso></tag> <item> - <p>This is the default handler used by OTP. Multiple instances - can be started, and each instance will write log events to a - given destination, console or file. Filters can be used for - selecting which event to send to which handler instance.</p> + <p>Provides a way of filtering log events based on a + <c>domain</c> field in <c>Metadata</c>.</p> </item> - - <tag><c>logger_disk_log_h</c></tag> + <tag><seealso marker="logger_filters#level-2"> + <c>logger_filters:level/2</c></seealso></tag> <item> - <p>This handler behaves much like logger_std_h, except it uses - <seealso marker="disk_log"><c>disk_log</c></seealso> as its - destination.</p> + <p>Provides a way of filtering log events based on the log + level.</p> </item> - - <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag> + <tag><seealso marker="logger_filters#progress-2"> + <c>logger_filters:progress/2</c></seealso></tag> <item> - <p>This handler is to be used for backwards compatibility - only. It is not started by default, but will be automatically - started the first time an event handler is added - with <seealso marker="error_logger#add_report_handler-1"> - <c>error_logger:add_report_handler/1,2</c></seealso>.</p> - - <p>No built-in event handlers exist.</p> + <p>Stops or allows progress reports from <c>supervisor</c> + and <c>application_controller</c>.</p> </item> - </taglist> - </section> - - <section> - <title>Built-in filters</title> - - <taglist> - <tag><c>logger_filters:domain/2</c></tag> + <tag><seealso marker="logger_filters#remote_gl-2"> + <c>logger_filters:remote_gl/2</c></seealso></tag> <item> - <p>This filter provides a way of filtering log events based on a - <c>domain</c> field <c>Metadata</c>. See - <seealso marker="logger_filters#domain-2"> - <c>logger_filters:domain/2</c></seealso></p> + <p>Stops or allows log events originating from a process + that has its group leader on a remote node.</p> </item> + </taglist> + </section> - <tag><c>logger_filters:level/2</c></tag> - <item> - <p>This filter provides a way of filtering log events based - on the log level. See <seealso marker="logger_filters#domain-2"> - <c>logger_filters:domain/2</c></seealso></p> - </item> + <section> + <marker id="handlers"/> + <title>Handlers</title> + <p>A handler is defined as a module exporting at least the + following function:</p> + + <pre><seealso marker="logger#HModule:log-2">log(LogEvent, Config) -> void()</seealso></pre> + + <p>This function is called when a log event has passed through all + global filters, and all handler filters attached to the handler + in question. The function call is executed on the client + process, and it is up to the handler implementation if other + processes are involved or not.</p> + + <p>Logger allows adding multiple instances of a handler + callback. That is, if a callback module implementation allows + it, you can add multiple handler instances using the same + callback module. The different instances are identified by + unique handler identities.</p> + + <p>In addition to the mandatory callback function <c>log/2</c>, a + handler module can export the optional callback + functions <c>adding_handler/1</c>, <c>changing_config/2</c> + and <c>removing_handler/1</c>. See + section <seealso marker="logger#handler_callback_functions">Handler + Callback Functions</seealso> in the logger(3) manual for more + information about these function.</p> + + <p>The following built-in handlers exist:</p> - <tag><c>logger_filters:progress/2</c></tag> - <item> - <p>This filter matches all progress reports - from <c>supervisor</c> and <c>application_controller</c>. - See <seealso marker="logger_filters#progress/2"> - <c>logger_filters:progress/2</c></seealso></p> - </item> + <taglist> + <tag><c>logger_std_h</c></tag> + <item> + <p>This is the default handler used by OTP. Multiple instances + can be started, and each instance will write log events to a + given destination, terminal or file.</p> + </item> - <tag><c>logger_filters:remote_gl/2</c></tag> - <item> - <p>This filter matches all events originating from a process - that has its group leader on a remote node. - See <seealso marker="logger_filters#remote_gl/2"> - <c>logger_filters:remote_gl/2</c></seealso></p> - </item> - </taglist> - </section> + <tag><c>logger_disk_log_h</c></tag> + <item> + <p>This handler behaves much like <c>logger_std_h</c>, except it uses + <seealso marker="disk_log"><c>disk_log</c></seealso> as its + destination.</p> + </item> - <section> - <title>Default formatter</title> + <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag> + <item> + <p>This handler is provided for backwards compatibility + only. It is not started by default, but will be + automatically started the first time an <c>error_logger</c> + event handler is added + with <seealso marker="error_logger#add_report_handler-1"> + <c>error_logger:add_report_handler/1,2</c></seealso>.</p> + + <p>The old <c>error_logger</c> event handlers in STDLIB and + SASL still exist, but they are not added by Erlang/OTP 21.0 + or later.</p> + </item> + </taglist> + </section> - <p>The default formatter is <c>logger_formatter</c>. - See <seealso marker="logger_formatter#format-2"> - <c>logger_formatter:format/2</c></seealso>.</p> - </section> + <section> + <marker id="formatters"/> + <title>Formatters</title> + <p>A formatter can be used by the handler implementation to do the + final formatting of a log event, before printing to the + handler's destination. The handler callback receives the + formatter information as part of the handler configuration, + which is passed as the second argument + to <seealso marker="logger#HModule:log-2"> + <c>HModule:log/2</c></seealso>.</p> + <p>The formatter information consits of a formatter + module, <c>FModule</c> and its + configuration, <c>FConfig</c>. <c>FModule</c> must export the + following function, which can be called by the handler:</p> + <pre><seealso marker="logger#FModule:format-2">format(LogEvent,FConfig) + -> FormattedLogEntry</seealso></pre> + <p>The formatter information for a handler is set as a part of its + configuration when the handler is added. It can also be changed + during runtime + with <seealso marker="logger#set_handler_config-3"> + <c>logger:set_handler_config(HandlerId,formatter,{FModule,FConfig})</c> + </seealso>, which overwrites the current formatter information, + or with <seealso marker="logger#update_formatter_config-2"> + <c>logger:update_formatter_config/2,3</c></seealso>, which + only modifies the formatter configuration.</p> + <p>If the formatter module exports the optional callback + function <seealso marker="logger#FModule:check_config-1"> + <c>check_config(FConfig)</c></seealso>, Logger calls this + function when the formatter information is set or modified, to + verify the validity of the formatter configuration.</p> + <p>If no formatter information is specified for a handler, Logger + uses <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> as default.</p> </section> <section> <title>Configuration</title> + <p>Logger can be configured either when the system starts through + <seealso marker="config">configuration parameters</seealso>, + or at run-time by using the <seealso marker="logger">logger(3)</seealso> + API. The recommended approach is to do the initial configuration in + the <c>sys.config</c> file and then use the API when some configuration + has to be changed at runtime, such as the log level.</p> + <section> - <title>Application environment variables</title> - <p>See <seealso marker="kernel_app#configuration">Kernel(6)</seealso> for - information about the application environment variables that can - be used for configuring logger.</p> + <title>Kernel Configuration Parameters</title> + <p>Logger is best configured by using the configuration parameters + of Kernel. There are four possible configuration parameters: + <seealso marker="#logger"><c>logger</c></seealso>, + <seealso marker="kernel_app#logger_level"><c>logger_level</c></seealso>, + <seealso marker="kernel_app#logger_sasl_compatible"><c>logger_sasl_compatible</c></seealso> and + <seealso marker="kernel_app#logger_progress_reports"><c>logger_progress_reports</c></seealso>. + <c>logger_level</c>, <c>logger_sasl_compatible</c> and <c>logger_progress_reports</c> are described in the + <seealso marker="kernel_app#configuration">Kernel Configuration</seealso>, + while <c>logger</c> is described below.</p> + + <marker id="logger"/> + <p><em>logger</em></p> + <p>The application configuration parameter <c>logger</c> is used to configure + three different Logger aspects; handlers, logger filters and module levels. + The configuration is a list containing tagged tuples that look like this:</p> + <taglist> + <tag><c>DisableHandler = {handler,default,undefined}</c></tag> + <item> + <p>Disable the default handler. This allows another application + to add its own default handler. See <seealso marker="logger#add_handlers/1"> + <c>logger:add_handlers/1</c></seealso> for more details.</p> + <p>Only one entry of this option is allowed.</p></item> + <tag><c>AddHandler = {handler,HandlerId,Module,HandlerConfig}</c></tag> + <item> + <p>Add a handler as if <seealso marker="logger:add_handler/3"> + <c>logger:add_handler(HandlerId,Module,HandlerConfig)</c></seealso> is + called.</p> + <p>It is allowed to have multiple entries of this option.</p></item> + <tag><c>Filters = {filters, default, [Filter]}</c><br/> + <c>FilterDefault = log | stop</c><br/> + <c>Filter = {FilterId, {FilterFun, FilterConfig}}</c></tag> + <item> + <p>Add the specified <seealso marker="logger#add_logger_filter/2"> + logger filters</seealso>.</p> + <p>Only one entry of this option is allowed.</p></item> + <tag><c>ModuleLevel = {module_level, Level, [Module]}</c></tag> + <item> + <p>This option configures <seealso marker="logger#set_module_level/2"> + module log level</seealso>.</p> + <p>It is allowed to have multiple entries of this option.</p></item> + </taglist> + <p>Examples:</p> + <list> + <item> + <p>Output logs into the file "logs/erlang.log"</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{ logger_std_h => #{ type => {file,"log/erlang.log"}}}}]}]}]. + </code> + </item> + <item> + <p>Output logs in single line format</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{ formatter => { logger_formatter,#{ single_line => true}}}}]}]}]. + </code> + </item> + <item> + <p>Add the pid to each log event</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{ formatter => { logger_formatter, + #{ template => [time," ",pid," ",msg,"\n"]}} + }}]}]}]. + </code> + </item> + <item> + <p>Use a different file for debug logging</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{ level => error, + logger_std_h => #{ type => {file, "log/erlang.log"}}}}, + {handler, info, logger_std_h, + #{ level => debug, + logger_std_h => #{ type => {file, "log/debug.log"}}}} + ]}]}]. + </code> + </item> + </list> </section> <section> - <title>Logger configuration</title> + <title>Global Logger Configuration</title> <taglist> - <tag><c>level</c></tag> + <tag><c>level = </c><seealso marker="logger#type-level"> + <c>logger:level()</c></seealso></tag> <item> - <p>Specifies the severity level to log.</p> + <p>Specifies the global log level to log.</p> + <p>See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of + possible log levels.</p> + <p>The initial value of this option is set by the Kernel + configuration + parameter <seealso marker="kernel_app#logger_level"> + <c>logger_level</c></seealso>. It can be changed during + runtime + with <seealso marker="logger#set_logger_config-2"> + <c>logger:set_logger_config(level,NewLevel)</c></seealso>.</p> </item> - <tag><c>filters</c></tag> + <tag><c>filters = [{</c><seealso marker="logger#type-filter_id"> + <c>logger:filter_id()</c></seealso><c>,</c> + <seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso><c>}]</c></tag> <item> - <p>Logger filters are added or removed with + <p>Global filters are added and removed with <seealso marker="logger#add_logger_filter-2"> <c>logger:add_logger_filter/2</c></seealso> and <seealso marker="logger#remove_logger_filter-1"> <c>logger:remove_logger_filter/1</c></seealso>, respectively.</p> - <p>See <seealso marker="#Filter">Filter</seealso> for more - information.</p> - <p>By default, no filters exist.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more information.</p> + <p>Default is <c>[]</c>, that is, no filters exist.</p> </item> - <tag><c>filter_default = log | stop</c></tag> + <tag><marker id="filter_default"/><c>filter_default = log | stop</c></tag> <item> <p>Specifies what to do with an event if all filters - return <c>ignore</c>.</p> + return <c>ignore</c>, or if no filters exist.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more information about how this option is used.</p> <p>Default is <c>log</c>.</p> </item> - <tag><c>handlers</c></tag> - <item> - <p>Handlers are added or removed with - <seealso marker="logger#add_handler-3"> - <c>logger:add_handler/3</c></seealso> and - <seealso marker="logger#remove_handler-1"> - <c>logger:remove_handler/1</c></seealso>, - respectively.</p> - <p>See <seealso marker="#Handler">Handler</seealso> for more - information.</p> - </item> </taglist> </section> <section> <marker id="handler_configuration"/> - <title>Handler configuration</title> + <title>Handler Configuration</title> <taglist> - <tag><c>level</c></tag> + <tag><c>level = </c><seealso marker="logger#type-level"> + <c>logger:level()</c></seealso></tag> <item> - <p>Specifies the severity level to log.</p> + <p>Specifies the log level which the handler logs.</p> + <p>See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of + possible log levels.</p> + <p>The log level can be specified when adding the handler, + or changed during runtime with, for + instance, <seealso marker="logger#set_handler_config/3"> + <c>logger:set_handler_config/3</c></seealso>.</p> + <p>Default is <c>info</c>.</p> </item> - <tag><c>filters</c></tag> + <tag><c>filters = [{</c><seealso marker="logger#type-filter_id"> + <c>logger:filter_id()</c></seealso><c>,</c> + <seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso><c>}]</c></tag> <item> <p>Handler filters can be specified when adding the handler, - or added or removed later with + or added or removed during runtime with <seealso marker="logger#add_handler_filter-3"> <c>logger:add_handler_filter/3</c></seealso> and <seealso marker="logger#remove_handler_filter-2"> <c>logger:remove_handler_filter/2</c></seealso>, respectively.</p> - <p>See <seealso marker="#Filter">Filter</seealso> for more + <p>See <seealso marker="#filters">Filters</seealso> for more information.</p> - <p>By default, no filters exist.</p> + <p>Default is <c>[]</c>, that is, no filters exist.</p> </item> - <tag><c>filter_default = log | stop</c></tag> + <tag><marker id="filter_default"/><c>filter_default = log | stop</c></tag> <item> <p>Specifies what to do with an event if all filters - return <c>ignore</c>.</p> + return <c>ignore</c>, or if no filters exist.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more information about how this option is used.</p> <p>Default is <c>log</c>.</p> </item> - <tag><c>formatter = {Module::module(),Extra::term()}</c></tag> + <tag><c>formatter = {module(),</c><seealso marker="logger#type-formatter_config"> + <c>logger:formatter_config()</c></seealso><c>}</c></tag> <item> - <p>See <seealso marker="#Formatter">Formatter</seealso> for more + <p>The formatter which the handler can use for converting + the log event term to a printable string.</p> + <p>See <seealso marker="#formatters">Formatters</seealso> for more information.</p> - <p>The default module is <seealso marker="logger_formatter"> - <c>logger_formatter</c></seealso>, and <c>Extra</c> is - it's configuration map.</p> + <p>Default + is <c>{logger_formatter,DefaultFormatterConfig}</c>, see + the <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + manual for information about this formatter and its + default configuration.</p> </item> + <tag><c>HandlerConfig, atom() = term()</c></tag> + <item> + <p>Any keys not listed above are considered to be handler + specific configuration. The configuration of the Kernel + handlers can be found in + the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso> + and + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c> + </seealso> manual pages.</p> + </item> </taglist> - <p>Note that <c>level</c> and <c>filters</c> are obeyed by + <p>Notice that <c>level</c> and <c>filters</c> are obeyed by Logger itself before forwarding the log events to each - handler, while <c>formatter</c> is left to the handler - implementation. All Logger's built-in handlers will call the - given formatter before printing.</p> + handler, while <c>formatter</c> and all handle specific + options are left to the handler implementation.</p> + <p>All Logger's built-in handlers will call the given formatter + before printing.</p> </section> </section> <section> <marker id="compatibility"/> - <title>Backwards compatibility with error_logger</title> - <p>Logger provides backwards compatibility with the old + <title>Backwards Compatibility with error_logger</title> + <p>Logger provides backwards compatibility with <c>error_logger</c> in the following ways:</p> <taglist> - <tag>Legacy event handlers</tag> + <tag>API for Logging</tag> <item> - <p>To use event handlers written for <c>error_logger</c>, just - add your event handler with</p> - <code> -error_logger:add_report_handler/1,2. - </code> - <p>This will automatically start the <c>error_logger</c> - event manager, and add <c>error_logger</c> as a - handler to <c>logger</c>, with configuration</p> -<code> -#{level=>info, - filter_default=>log, - filters=>[]}. -</code> - <p>Note that this handler will ignore events that do not - originate from the old <c>error_logger</c> API, or from - within OTP. This means that if your code uses the logger API - for logging, then your log events will be discarded by this - handler.</p> - <p>Also note that <c>error_logger</c> is not overload - protected.</p> - </item> - <tag>Logger API</tag> - <item> - <p>The old <c>error_logger</c> API still exists, but should - only be used by legacy code. It will be removed in a later + <p>The <c>error_logger</c> API still exists, but should only + be used by legacy code. It will be removed in a later release.</p> + <p>Calls + to <seealso marker="error_logger#error_report-1"> + <c>error_logger:error_report/1,2</c></seealso>, + <seealso marker="error_logger#error_msg-1"> + <c>error_logger:error_msg/1,2</c></seealso>, and + corresponding functions for warning and info messages, are + all forwarded to Logger as calls + to <seealso marker="logger#log-3"> + <c>logger:log(Level,Report,Metadata)</c></seealso>.</p> + <p><c>Level = error | warning | info</c> and is taken + from the function name. <c>Report</c> contains the actual + log message, and <c>Metadata</c> contains additional + information which can be used for creating backwards + compatible events for legacy <c>error_logger</c> event + handlers, see + section <seealso marker="#legacy_event_handlers">Legacy + Event Handlers</seealso>.</p> </item> - <tag>Output format</tag> + <tag>Output Format</tag> <item> <p>To get log events on the same format as produced by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>, use the default formatter, <c>logger_formatter</c>, with - configuration parameter <c>legacy_header=>true</c>. This is + configuration parameter <c>legacy_header => true</c>. This is also the default.</p> </item> - <tag>Default format of log events from OTP</tag> + <tag>Default Format of Log Events from OTP</tag> <item> <p>By default, all log events originating from within OTP, except the former so called "SASL reports", look the same as before.</p> </item> - <tag>SASL reports</tag> + <tag><marker id="sasl_reports"/>SASL Reports</tag> <item> <p>By SASL reports we mean supervisor reports, crash reports and progress reports.</p> @@ -401,96 +706,134 @@ error_logger:add_report_handler/1,2. named <c>sasl_report_tty_h</c> and <c>sasl_report_file_h</c>.</p> <p>The destination of these log events were configured by - environment variables for the SASL application.</p> + <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL + configuration parameters</seealso>.</p> <p>Due to the specific event handlers, the output format slightly differed from other log events.</p> - <p>As of OTP-21, the concept of SASL reports is removed, - meaning that the default behavior is as follows:</p> + <p>As of Erlang/OTP 21.0, the concept of SASL reports is + removed, meaning that the default behaviour is as + follows:</p> <list> - <item>Supervisor reports, crash reports and progress reports + <item>Supervisor reports, crash reports, and progress reports are no longer connected to the SASL application.</item> <item>Supervisor reports and crash reports are logged by default.</item> <item>Progress reports are not logged by default, but can be - enabled with the kernel environment - variable <c>logger_log_progress</c>.</item> + enabled with the Kernel configuration + parameter <seealso marker="kernel_app#logger_progress_reports"> + <c>logger_progress_reports</c></seealso>.</item> <item>The output format is the same for all log events.</item> </list> - <p>If the old behavior is preferred, the kernel environment - variable <c>logger_sasl_compatible</c> can be set - to <c>true</c>. The old SASL environment variables can then - be used as before, and the SASL reports will only be printed - if the SASL application is running - through a second log - handler named <c>sasl_h</c>.</p> + <p>If the old behaviour is preferred, the Kernel configuation + parameter <seealso marker="kernel_app:logger_sasl_compatible"> + <c>logger_sasl_compatible</c></seealso> can be set + to <c>true</c>. The + <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL + configuration parameters</seealso> can then be used as + before, and the SASL reports will only be printed if the + SASL application is running, through a second log handler + named <c>sasl</c>.</p> <p>All SASL reports have a metadata - field <c>domain=>[beam,erlang,otp,sasl]</c>, which can be - used, for example, by filters to to stop or allow the - events.</p> + field <c>domain => [beam,erlang,otp,sasl]</c>, which can be + used, for example, by filters to stop or allow the + log events.</p> + <p>See the <seealso marker="sasl:error_logging">SASL User's + Guide</seealso> for more information about the old SASL + error logging functionality.</p> + </item> + <tag><marker id="legacy_event_handlers"/>Legacy Event Handlers</tag> + <item> + <p>To use event handlers written for <c>error_logger</c>, just + add your event handler with</p> + <code> +error_logger:add_report_handler/1,2. + </code> + <p>This will automatically start the <c>error_logger</c> + event manager, and add <c>error_logger</c> as a + handler to <c>logger</c>, with configuration</p> +<code> +#{level => info, + filter_default => log, + filters => []}. +</code> + <p>Notice that this handler will ignore events that do not + originate from the <c>error_logger</c> API, or from within + OTP. This means that if your code uses the Logger API for + logging, then your log events will be discarded by this + handler.</p> + <p>Also notice that <c>error_logger</c> is not overload + protected.</p> </item> </taglist> </section> <section> - <title>Error handling</title> + <title>Error Handling</title> <p>Log data is expected to be either a format string and - arguments, a string (unicode:chardata), or a report (map or + arguments, a string + (<seealso marker="stdlib:unicode#type-chardata"> + <c>unicode:chardata()</c></seealso>), or a report (map or key-value list) which can be converted to a format string and - arguments by the handler. A default report callback should be - included in the log event's metadata, which can be used for - converting the report to a format string and arguments. The - handler might also do a custom conversion if the default format - is not desired.</p> - <p><c>logger</c> does, to a certain extent, check its input data + arguments by the handler. If a report is given, a default report + callback can be included in the log event's metadata. The + handler can use this callback for converting the report to a + format string and arguments. If the format obtained by the + provided callback is not desired, or if there is no provided + callback, the handler must do a custom conversion.</p> + <p>Logger does, to a certain extent, check its input data before forwarding a log event to the handlers, but it does not evaluate conversion funs or check the validity of format strings and arguments. This means that any filter or handler must be careful when formatting the data of a log event, making sure that it does not crash due to bad input data or faulty callbacks.</p> - <p>If a filter or handler still crashes, logger will remove the - filter or handler in question from the configuration, and then - print a short error message on the console. A debug event - containing the crash reason and other details is also issued, - and can be seen if a handler is installed which logs on debug - level.</p> + <p>If a filter or handler still crashes, Logger will remove the + filter or handler in question from the configuration, and print + a short error message to the terminal. A debug event containing + the crash reason and other details is also issued, and can be + seen if a handler logging debug events is installed.</p> </section> <section> <title>Example: add a handler to log debug events to file</title> - <p>When starting an erlang node, the default behavior is that all + <p>When starting an Erlang node, the default behaviour is that all log events with level info and above are logged to the - console. In order to also log debug events, you can either + terminal. In order to also log debug events, you can either change the global log level to <c>debug</c> or add a separate handler to take care of this. In this example we will add a new handler which prints the debug events to a separate file.</p> - <p>First, we add an instance of logger_std_h with + <p>First, we add an instance of <c>logger_std_h</c> with type <c>{file,File}</c>, and we set the handler's level to <c>debug</c>:</p> <pre> -1> <input>Config = #{level=>debug,logger_std_h=>#{type=>{file,"./debug.log"}}}.</input> +1> <input>Config = #{level => debug, logger_std_h => #{type => {file,"./debug.log"}}}.</input> #{logger_std_h => #{type => {file,"./debug.log"}}, level => debug} 2> <input>logger:add_handler(debug_handler,logger_std_h,Config).</input> ok</pre> <p>By default, the handler receives all events - (<c>filter_defalt=log</c>), so we need to add a filter to stop - all non-debug events:</p> + (<c>filter_default=log</c>, see + section <seealso marker="#filters">Filters</seealso> for more + details), so we need to add a filter to stop all non-debug + events. The built-in + filter <seealso marker="logger_filters#level-2"> + <c>logger_filters:level/2</c></seealso> + is used for this:</p> <pre> -3> <input>Fun = fun(#{level:=debug}=Log,_) -> Log; (_,_) -> stop end.</input> -#Fun<erl_eval.12.98642416> -4> <input>logger:add_handler_filter(debug_handler,allow_debug,{Fun,[]}).</input> +3> <input>logger:add_handler_filter(debug_handler,stop_non_debug, + {fun logger_filters:level/2,{stop,neq,debug}}).</input> ok</pre> - <p>And finally, we need to make sure that the logger itself allows + <p>And finally, we need to make sure that Logger itself allows debug events. This can either be done by setting the global - logger level:</p> + log level:</p> <pre> -5> <input>logger:set_logger_config(level,debug).</input> +4> <input>logger:set_logger_config(level,debug).</input> ok</pre> <p>Or by allowing debug events from one or a few modules only:</p> <pre> -6> <input>logger:set_module_level(mymodule,debug).</input> +5> <input>logger:set_module_level(mymodule,debug).</input> ok</pre> </section> @@ -499,107 +842,112 @@ ok</pre> <title>Example: implement a handler</title> <p>The only requirement that a handler MUST fulfill is to export the following function:</p> - <code>log(logger:log(),logger:config()) ->ok</code> - <p>It may also implement the following callbacks:</p> + <code>log(logger:log_event(),logger:config()) -> ok</code> + <p>It can optionally also implement the following callbacks:</p> <code> -adding_handler(logger:handler_id(),logger:config()) -> {ok,logger:config()} | {error,term()} -removing_handler(logger:handler_id(),logger:config()) -> ok -changing_config(logger:handler_id(),logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()} +adding_handler(logger:config()) -> {ok,logger:config()} | {error,term()} +removing_handler(logger:config()) -> ok +changing_config(logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()} </code> - <p>When logger:add_handler(Id,Module,Config) is called, logger - will first call Module:adding_handler(Id,Config), and if it - returns {ok,NewConfig} the NewConfig is written to the - configuration database. After this, the handler may receive log - events as calls to Module:log/2.</p> + <p>When <c>logger:add_handler(Id,Module,Config)</c> is called, + Logger first calls <c>HModule:adding_handler(Config)</c>. If + this function returns <c>{ok,NewConfig}</c>, Logger + writes <c>NewConfig</c> to the configuration database, and + the <c>logger:add_handler/3</c> call returns. After this, the + handler is installed and must be ready to receive log events as + calls to <c>HModule:log/2</c>.</p> <p>A handler can be removed by calling - logger:remove_handler(Id). logger will call - Module:removing_handler(Id,Config), and then remove the handler's - configuration from the configuration database.</p> - <p>When logger:set_handler_config is called, logger calls - Module:changing_config(Id,OldConfig,NewConfig). If this function - returns ok, the NewConfig is written to the configuration - database.</p> - - <p>A simple handler which prints to the console could be - implemented as follows:</p> + <c>logger:remove_handler(Id)</c>. Logger calls + <c>HModule:removing_handler(Config)</c>, and removes the + handler's configuration from the configuration database.</p> + <p>When <c>logger:set_handler_config/2,3</c> + or <c>logger:update_handler_config/2</c> is called, Logger + calls <c>HModule:changing_config(OldConfig,NewConfig)</c>. If + this function returns <c>{ok,NewConfig}</c>, Logger + writes <c>NewConfig</c> to the configuration database.</p> + + <p>A simple handler that prints to the terminal can be implemented + as follows:</p> <code> -module(myhandler). -export([log/2]). -log(Log,#{formatter:={FModule,FConfig}) -> - io:put_chars(FModule:format(Log,FConfig)). +log(LogEvent,#{formatter:={FModule,FConfig}) -> + io:put_chars(FModule:format(LogEvent,FConfig)). </code> <p>A simple handler which prints to file could be implemented like this:</p> <code> -module(myhandler). --export([adding_handler/2, removing_handler/2, log/2]). +-export([adding_handler/1, removing_handler/1, log/2]). -export([init/1, handle_call/3, handle_cast/2, terminate/2]). -adding_handler(Id,Config) -> +adding_handler(Config) -> {ok,Fd} = file:open(File,[append,{encoding,utf8}]), - {ok,Config#{myhandler_fd=>Fd}}. + {ok,Config#{myhandler_fd => Fd}}. -removing_handler(Id,#{myhandler_fd:=Fd}) -> +removing_handler(#{myhandler_fd:=Fd}) -> _ = file:close(Fd), ok. -log(Log,#{myhandler_fd:=Fd,formatter:={FModule,FConfig}}) -> - io:put_chars(Fd,FModule:format(Log,FConfig)). +log(LogEvent,#{myhandler_fd:=Fd,formatter:={FModule,FConfig}}) -> + io:put_chars(Fd,FModule:format(LogEvent,FConfig)). </code> <note><p>The above handlers do not have any overload protection, and all log events are printed directly from the client process.</p></note> - <p>For examples of overload protection, please refer to the - implementation - of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso> - and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c> + <p>For information and examples of overload protection, please + refer to + section <seealso marker="#overload_protection">Protecting the + Handler from Overload</seealso>, and the implementation + of <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso> + and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c> </seealso>.</p> <p>Below is a simpler example of a handler which logs through one single process.</p> <code> -module(myhandler). --export([adding_handler/2, removing_handler/2, log/2]). +-export([adding_handler/1, removing_handler/1, log/2]). -export([init/1, handle_call/3, handle_cast/2, terminate/2]). -adding_handler(Id,Config) -> +adding_handler(Config) -> {ok,Pid} = gen_server:start(?MODULE,Config), - {ok,Config#{myhandler_pid=>Pid}}. + {ok,Config#{myhandler_pid => Pid}}. -removing_handler(Id,#{myhandler_pid:=Pid}) -> +removing_handler(#{myhandler_pid:=Pid}) -> gen_server:stop(Pid). -log(Log,#{myhandler_pid:=Pid} = Config) -> - gen_server:cast(Pid,{log,Log,Config}). +log(LogEvent,#{myhandler_pid:=Pid} = Config) -> + gen_server:cast(Pid,{log,LogEvent,Config}). init(#{myhandler_file:=File}) -> {ok,Fd} = file:open(File,[append,{encoding,utf8}]), - {ok,#{file=>File,fd=>Fd}}. + {ok,#{file => File, fd => Fd}}. handle_call(_,_,State) -> {reply,{error,bad_request},State}. -handle_cast({log,Log,Config},#{fd:=Fd} = State) -> - do_log(Fd,Log,Config), +handle_cast({log,LogEvent,Config},#{fd:=Fd} = State) -> + do_log(Fd,LogEvent,Config), {noreply,State}. terminate(Reason,#{fd:=Fd}) -> _ = file:close(Fd), ok. -do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> - String = FModule:format(Log,FConfig), +do_log(Fd,LogEvent,#{formatter:={FModule,FConfig}}) -> + String = FModule:format(LogEvent,FConfig), io:put_chars(Fd,String). </code> </section> <section> <marker id="overload_protection"/> - <title>Protecting the handler from overload</title> + <title>Protecting the Handler from Overload</title> <p>In order for the built-in handlers to survive, and stay responsive, during periods of high load (i.e. when huge numbers of incoming log requests must be handled), a mechanism for overload protection @@ -610,7 +958,7 @@ do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> as follows:</p> <section> - <title>Message queue length</title> + <title>Message Queue Length</title> <p>The handler process keeps track of the length of its message queue and reacts in different ways depending on the current status. The purpose is to keep the handler in, or (as quickly as possible), @@ -627,7 +975,7 @@ do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> and as long as the length of the message queue is lower, all log requests are handled asynchronously. This simply means that the process sending the log request (by calling a log function in the - logger API) does not wait for a response from the handler but + Logger API) does not wait for a response from the handler but continues executing immediately after the request (i.e. it will not be affected by the time it takes the handler to print to the log device). If the message queue grows larger than this value, however, @@ -661,10 +1009,20 @@ do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> </item> </taglist> - <p>For the overload protection algorithm to work properly, it is a - requirement that:</p> + <p>For the overload protection algorithm to work properly, it is + required that:</p> + + <p><c>toggle_sync_qlen =< drop_new_reqs_qlen =< flush_reqs_qlen</c></p> + + <p>and that:</p> + + <p><c>drop_new_reqs_qlen > 1</c></p> - <p><c>toggle_sync_qlen < drop_new_reqs_qlen < flush_reqs_qlen</c></p> + <p>If <c>toggle_sync_qlen</c> is set to <c>0</c>, the handler will handle all + requests synchronously. Setting the value of <c>toggle_sync_qlen</c> to the same + as <c>drop_new_reqs_qlen</c>, disables the synchronous mode. Likewise, setting + the value of <c>drop_new_reqs_qlen</c> to the same as <c>flush_reqs_qlen</c>, + disables the drop mode.</p> <p>During high load scenarios, the length of the handler message queue rarely grows in a linear and predictable way. Instead, whenever the @@ -695,14 +1053,14 @@ logger:add_handler(my_standard_h, logger_std_h, </section> <section> - <title>Controlling bursts of log requests</title> + <title>Controlling Bursts of Log Requests</title> <p>A potential problem with large bursts of log requests, is that log files may get full or wrapped too quickly (in the latter case overwriting previously logged data that could be of great importance). For this reason, both built-in handlers offer the possibility to set a maximum level of how many requests to process with a certain time frame. With this burst control feature enabled, the handler will take care of bursts of log requests - without choking log files, or the console, with massive amounts of + without choking log files, or the terminal, with massive amounts of printouts. These are the configuration parameters:</p> <taglist> @@ -736,7 +1094,7 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h, </section> <section> - <title>Terminating a large handler</title> + <title>Terminating a Large Handler</title> <p>A handler process may grow large even if it can manage peaks of high load without crashing. The overload protection mechanism includes user configurable levels for a maximum allowed message queue length and maximum allowed memory @@ -773,7 +1131,14 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h, <section> <title>See Also</title> - <p><seealso marker="error_logger"><c>error_logger(3)</c></seealso>, - <seealso marker="sasl:sasl_app"><c>SASL(6)</c></seealso></p> + <p> + <seealso marker="disk_log"><c>disk_log(3)</c></seealso>, + <seealso marker="error_logger"><c>error_logger(3)</c></seealso>, + <seealso marker="logger"><c>logger(3)</c></seealso>, + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>, + <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>, + <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>, + <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>, + <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p> </section> </chapter> diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml index 90cc4fec30..20b49b8ca0 100644 --- a/lib/kernel/doc/src/logger_disk_log_h.xml +++ b/lib/kernel/doc/src/logger_disk_log_h.xml @@ -33,21 +33,21 @@ <file>logger_disk_log_h.xml</file> </header> <module>logger_disk_log_h</module> - <modulesummary>A disk_log based handler for the Logger - application.</modulesummary> + <modulesummary>A disk_log based handler for the Logger.</modulesummary> <description> - <p>This is a handler for the Logger application that offers circular - (wrapped) logs by using the disk_log application. Multiple instances - of this handler can be added to logger, and each instance will print to + <p>This is a handler for Logger that offers circular + (wrapped) logs by using <seealso marker="disk_log"><c>disk_log</c></seealso>. + Multiple instances + of this handler can be added to Logger, and each instance prints to its own disk_log file, created with the name and settings specified in the handler configuration.</p> <p>The default standard handler, <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be - replaced by a disk_log handler at startup of the kernel application. + replaced by a disk_log handler at startup of the Kernel application. See an example of this below.</p> <p>The handler has an overload protection mechanism that will keep the handler - process and the kernel application alive during a high load of log + process and the Kernel application alive during a high load of log requests. How this feature works, and how to modify the configuration, is described in the <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> @@ -121,11 +121,11 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h, #{filesync_repeat_interval => 1000}}). </code> <p>In order to use the disk_log handler instead of the default standard - handler when starting en Erlang node, use the kernel configuration parameter - <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with - value <c>{disk_log,FileName}</c>. Example:</p> + handler when starting an Erlang node, change the Kernel default logger to + use disk_log. Example:</p> <code type="none"> -erl -kernel logger_dest '{disk_log,"./system_disk_log"}' +erl -kernel logger '[{handler,default,logger_disk_log_h, + #{ disk_log_opts => #{ file => "./system_disk_log"}}}]' </code> </description> @@ -141,6 +141,12 @@ erl -kernel logger_dest '{disk_log,"./system_disk_log"}' </funcs> + <section> + <title>See Also</title> + <p><seealso marker="logger"><c>logger(3)</c></seealso></p> + <p><seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso></p> + <p><seealso marker="disk_log"><c>disk_log(3)</c></seealso></p> + </section> </erlref> diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml index c34ec7d14c..f92181ea3f 100644 --- a/lib/kernel/doc/src/logger_filters.xml +++ b/lib/kernel/doc/src/logger_filters.xml @@ -52,57 +52,100 @@ <funcs> <func> <name name="domain" arity="2"/> - <fsummary>Filter log events based on the domain field in metadata.</fsummary> + <fsummary>Filter log events based on the domain field in + metadata.</fsummary> <desc> - <p>This filter provides a way of filtering log events based on a - <c>domain</c> field <c>Metadata</c>.</p> - - <p>The <c><anno>Extra</anno></c> parameter is specified when - adding the filter - via <seealso marker="logger#add_logger_filter-2"> - <c>logger:add_logger_filter/2</c></seealso> - or <seealso marker="logger#add_handler_filter-3"> - <c>logger:add_handler_filter/3</c></seealso>.</p> - - <p>The filter compares the value of the <c>domain</c> field - in the log event's metadata (<c>Domain</c>) - to <c><anno>MatchDomain</anno></c> as follows:</p> - - <taglist> - <tag><c><anno>Compare</anno> = starts_with</c></tag> - <item><p>The filter matches if <c>MatchDomain</c> is a prefix - of <c>Domain</c>.</p></item> - <tag><c><anno>Compare</anno> = prefix_of</c></tag> - <item><p>The filter matches if <c>Domain</c> is a prefix - of <c>MatchDomain</c>.</p></item> - <tag><c><anno>Compare</anno> = equals</c></tag> - <item><p>The filter matches if <c>Domain</c> is equal - to <c>MatchDomain</c>.</p></item> - <tag><c><anno>Compare</anno> = no_domain</c></tag> - <item><p>The filter matches if there is no domain field in - metadata. In this case <c><anno>MatchDomain</anno></c> shall - be <c>[]</c>.</p></item> - </taglist> - - <p>If the filter matches and <c><anno>Action</anno> = - log</c>, the log event is allowed. If the filter matches - and <c><anno>Action</anno> = stop</c>, the log event is - stopped.</p> - - <p>If the filter does not match, it returns <c>ignore</c>, - meaning that other filters, or the value of the - configuration parameter <c>filter_default</c>, will decide - if the event is allowed or not.</p> - - <p>Log events that do not contain any domain field, will - only match when <c><anno>Compare</anno> = no_domain</c>.</p> - - <p>Example: stop all events with - domain <c>[beam,erlang,otp,sasl|_]</c></p> - - <code> + <p>This filter provides a way of filtering log events based on a + <c>domain</c> field in <c>Metadata</c>. This field is + optional, and the purpose of using it is to group log events + from, for example, a specific functional area. This allows + filtering or other specialized treatment in a Logger + handler.</p> + + <p>A domain field must be a list of atoms, creating smaller + and more specialized domains as the list grows longer. The + biggest domain is <c>[]</c>, which comprices all + possible domains.</p> + + <p>For example, consider the following domains:</p> + <pre> +D1 = [beam,erlang,otp] +D2 = [beam,erlang,otp,sasl]</pre> + + <p><c>D1</c> is the biggest of the two, and is said to be a + super-domain of <c>D2</c>. <c>D2</c> is a + sub-domain <c>D1</c>. Both <c>D1</c> and <c>D2</c> are + sub-domains of <c>[]</c></p> + + <p>The above domains are used for logs originating from + Erlang/OTP. D1 specifies that the log event comes from + Erlang/OTP in general, and D2 indicates that the log event + is a so + called <seealso marker="logger_chapter#sasl_reports">SASL + report</seealso>.</p> + + <p>The <c><anno>Extra</anno></c> parameter to + the <c>domain/2</c> function is specified when adding the + filter via <seealso marker="logger#add_logger_filter-2"> + <c>logger:add_logger_filter/2</c></seealso> + or <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso>.</p> + + <p>The filter compares the value of the <c>domain</c> field + in the log event's metadata (<c>Domain</c>) + to <c><anno>MatchDomain</anno></c> as follows:</p> + + <taglist> + <tag><c><anno>Compare</anno> = sub</c></tag> + <item> + <p>The filter matches if <c>Domain</c> is equal to or + a sub-domain of <c>MatchDomain</c>, that is, + if <c>MatchDomain</c> is a prefix of <c>Domain</c>.</p> + </item> + <tag><c><anno>Compare</anno> = super</c></tag> + <item> + <p>The filter matches if <c>Domain</c> is equal to or a + super-domain of <c>MatchDomain</c>, that is, + if <c>Domain</c> is a prefix of <c>MatchDomain</c>.</p> + </item> + <tag><c><anno>Compare</anno> = equal</c></tag> + <item> + <p>The filter matches if <c>Domain</c> is equal + to <c>MatchDomain</c>.</p> + </item> + <tag><c><anno>Compare</anno> = not_equal</c></tag> + <item> + <p>The filter matches if <c>Domain</c> is not equal + to <c>MatchDomain</c>, or if there is no domain field in + metadata.</p> + </item> + <tag><c><anno>Compare</anno> = undefined</c></tag> + <item><p>The filter matches if there is no domain field in + metadata. In this case <c><anno>MatchDomain</anno></c> + must be set to <c>[]</c>.</p> + </item> + </taglist> + + <p>If the filter matches and <c><anno>Action</anno> = log</c>, + the log event is allowed. If the filter matches + and <c><anno>Action</anno> = stop</c>, the log event is + stopped.</p> + + <p>If the filter does not match, it returns <c>ignore</c>, + meaning that other filters, or the value of the + configuration parameter <c>filter_default</c>, decide if the + event is allowed or not.</p> + + <p>Log events that do not contain any domain field, match only + when <c><anno>Compare</anno> = undefined</c> + or <c><anno>Compare</anno> = not_equal</c>.</p> + + <p>Example: stop all events with + domain <c>[beam,erlang,otp,sasl|_]</c></p> + + <code> logger:set_handler_config(h1,filter_default,log). % this is the default -Filter = {fun logger_filters:domain/2,{stop,starts_with,[beam,erlang,otp,sasl]}}. +Filter = {fun logger_filters:domain/2,{stop,sub,[beam,erlang,otp,sasl]}}. logger:add_handler_filter(h1,no_sasl,Filter). ok</code> </desc> diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml index 7df4c88f40..02f89b26be 100644 --- a/lib/kernel/doc/src/logger_formatter.xml +++ b/lib/kernel/doc/src/logger_formatter.xml @@ -39,102 +39,157 @@ <p>Each log handler has a configured formatter specified as a module and a configuration term. The purpose of the formatter is to translate the log events to a final printable string - (<c>unicode:chardata()</c>) which can be written to the output + (<seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c> + </seealso>) which can be written to the output device of the handler.</p> <p><c>logger_formatter</c> is the default formatter used by Logger.</p> </description> - <section> - <title>Configuration</title> - <p>The configuration term for <c>logger_formatter</c> is a map, - and the following keys can be set as configuration - parameters:</p> - <taglist> - <tag><c>chars_limit = pos_integer() | unlimited</c></tag> - <item> - <p>A positive integer representing the value of the option - with the same name to be used when calling - <seealso marker="stdlib:io_lib#format-3">io_lib:format/3</seealso>. - This value limits the total number of characters printed - for each log event. Notice that this is a soft limit. For a - hard truncation limit, see option <c>max_size</c>.</p> - <p>Default is <c>unlimited</c>.</p> - <note> - <p><c>chars_limit</c> has no effect on log messages on - string form. These are expected to be short, but can still - be truncated by the <c>max_size</c> parameter.</p> - </note> - </item> - <tag><c>depth = pos_integer() | unlimited</c></tag> - <item> - <p>A positive integer representing the maximum depth to - which terms shall be printed by this formatter. Format - strings passed to this formatter are rewritten. The format - controls ~p and ~w are replaced with ~P and ~W, - respectively, and the value is used as the depth - parameter. For details, see - <seealso marker="stdlib:io#format-2">io:format/2,3</seealso> - in STDLIB.</p> - <p>Default is <c>unlimited</c>.</p> - <note> - <p><c>depth</c> has no effect on log messages on string - form. These are expected to be short, but can still be - truncated by the <c>max_size</c> parameter.</p> - </note> - </item> - <tag><c>max_size = pos_integer() | unlimited</c></tag> - <item> - <p>A positive integer representing the absolute maximum size a - string returned from this formatter can have. If the - formatted string is longer, after possibly being limited - by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p> - <p>Default is <c>unlimited</c>.</p> - </item> - <tag><c>single_line = boolean()</c></tag> - <item> - <p>If set to <c>true</c>, all newlines in the message are - replaced with <c>", "</c>, and whitespaces following - directly after newlines are removed. Note that newlines - added by the <c>template</c> parameter are not replaced.</p> - <p>Default is <c>true</c>.</p> - </item> - <tag><c>legacy_header = boolen()</c></tag> - <item> - <p>If set to <c>true</c> a header field is added to - logger_formatter's part of <c>Metadata</c>. The value of - this field is a string similar to the header created by the - old <c>error_logger</c> event handlers. It can be included - in the log event by adding the - tuple <c>{logger_formatter,header}</c> to the template. See - section <seealso marker="#default_templates">Default - Templates</seealso> for more information.</p> - <p>Default is <c>false</c>.</p> - </item> - <tag><c>report_cb = fun((</c><seealso marker="logger#type-report"><c>logger:report()</c></seealso><c>) -> {</c><seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso><c>,[term()]})</c></tag> - <item> - <p>A report callback is used by the formatter to transform log - messages on report form to a format string and - arguments. The report callback can be specified in the - metadata for the log event. If no report callback exist in - metadata, <c>logger_formatter</c> will - use <seealso marker="logger#format_report-1"> - <c>logger:format_report/1</c></seealso> as default - callback.</p> - <p>If this configuration parameter is set, it replaces both - the default report callback, and any report callback found - in metadata. That is, all reports are converted by this - configured function.</p> - <p>The value must be a function with arity 1, - returning <c>{Format,Args}</c>, and it will be called with a - report as only argument.</p> - </item> - <tag><c>template = </c><seealso marker="#type-template"><c>template()</c></seealso></tag> - <item> + + <datatypes> + <datatype> + <name name="config"/> + <desc> + <p>The configuration term for <c>logger_formatter</c> is a + <seealso marker="stdlib:maps">map</seealso>, and the + following keys can be set as configuration parameters:</p> + <taglist> + <tag><c>chars_limit = pos_integer() | unlimited</c></tag> + <item> + <p>A positive integer representing the value of the option + with the same name to be used when calling + <seealso marker="stdlib:io_lib#format-3"> + <c>io_lib:format/3</c></seealso>. + This value limits the total number of characters printed + for each log event. Notice that this is a soft limit. For a + hard truncation limit, see option <c>max_size</c>.</p> + <p>Defaults to <c>unlimited</c>.</p> + <note> + <p><c>chars_limit</c> has no effect on log messages on + string form. These are expected to be short, but can + still be truncated by the <c>max_size</c> + parameter.</p> + </note> + </item> + <tag><marker id="depth"/><c>depth = pos_integer() | unlimited</c></tag> + <item> + <p>A positive integer representing the maximum depth to + which terms shall be printed by this formatter. Format + strings passed to this formatter are rewritten. The + format controls ~p and ~w are replaced with ~P and ~W, + respectively, and the value is used as the depth + parameter. For details, see + <seealso marker="stdlib:io#format-2">io:format/2,3</seealso> + in STDLIB.</p> + <p>Defaults to <c>unlimited</c>.</p> + <note> + <p><c>depth</c> has no effect on log messages on string + form. These are expected to be short, but can still be + truncated by the <c>max_size</c> parameter.</p> + </note> + </item> + <tag><c>legacy_header = boolean()</c></tag> + <item> + <p>If set to <c>true</c> a header field is added to + logger_formatter's part of <c>Metadata</c>. The value of + this field is a string similar to the header created by + the old <c>error_logger</c> event handlers. It can be + included in the log event by adding the + tuple <c>{logger_formatter,header}</c> to the + template. See the description of + the <seealso marker="#type-template"><c>template()</c></seealso> + type for more information.</p> + <p>Defaults to <c>false</c>.</p> + </item> + <tag><c>max_size = pos_integer() | unlimited</c></tag> + <item> + <p>A positive integer representing the absolute maximum size a + string returned from this formatter can have. If the + formatted string is longer, after possibly being limited + by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p> + <p>Defaults to <c>unlimited</c>.</p> + </item> + <tag><c>report_cb = fun((</c><seealso marker="logger#type-report"><c>logger:report()</c></seealso><c>) -> {</c><seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso><c>,[term()]})</c></tag> + <item> + <p>A report callback is used by the formatter to transform + log messages on report form to a format string and + arguments. The report callback can be specified in the + metadata for the log event. If no report callback exist + in metadata, <c>logger_formatter</c> will + use <seealso marker="logger#format_report-1"> + <c>logger:format_report/1</c></seealso> as default + callback.</p> + <p>If this configuration parameter is set, it replaces + both the default report callback, and any report + callback found in metadata. That is, all reports are + converted by this configured function.</p> + <p>The value must be a function with arity 1, + returning <c>{Format,Args}</c>, and it will be called + with a report as only argument.</p> + </item> + <tag><c>single_line = boolean()</c></tag> + <item> + <p>If set to <c>true</c>, all newlines in the message are + replaced with <c>", "</c>, and whitespaces following + directly after newlines are removed. Note that newlines + added by the <c>template</c> parameter are not replaced.</p> + <p>Defaults to <c>true</c>.</p> + </item> + <tag><c>template = </c><seealso marker="#type-template"><c>template()</c></seealso></tag> + <item> + <p>The template describes how the formatted string is + composed by combining different data values from the log + event. See the description of + the <seealso marker="#type-template"><c>template()</c></seealso> + type for more information about this.</p> + </item> + <tag><c>time_designator = byte()</c></tag> + <item> + <p>Timestamps are formatted according to RFC3339, and the + time designator is the character used as date and time + separator.</p> + <p>Defaults to <c>$T</c>.</p> + <p>The value of this parameter is used as + the <c>time_designator</c> option + to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2"> + <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p> + </item> + <tag><c>time_offset = integer() | [byte()]</c></tag> + <item> + <p>The time offset, either a string or an integer, to be + used when formatting the timestamp.</p> + <p>An empty string is interpreted as local time. The + values <c>"Z"</c>, <c>"z"</c> or <c>0</c> are + interpreted as Universal Coordinated Time (UTC).</p> + <p>Strings, other than <c>"Z"</c>, <c>"z"</c>, + or <c>""</c>, must be on the form <c>±[hh]:[mm]</c>, for + example <c>"-02:00"</c> or <c>"+00:00"</c>.</p> + <p>Integers must be in microseconds, meaning that the + offset <c>7200000000</c> is equivalent + to <c>"+02:00"</c>.</p> + <p>Defaults to an empty string, meaning that timestamps + are displayed in local time. However, for backwards + compatibility, if the SASL configuration + parameter <seealso marker="sasl:sasl_app#utc_log"> + <c>utc_log</c></seealso><c>=true</c>, the default is + changed to <c>"Z"</c>, meaning that timestamps are displayed + in UTC.</p> + <p>The value of this parameter is used as + the <c>offset</c> option + to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2"> + <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p> + </item> + </taglist> + </desc> + </datatype> + <datatype> + <name name="template"/> + <desc> <p>The template is a list of atoms, tuples and strings. The atoms <c>level</c> or <c>msg</c>, are treated as placeholders for the severity level and the log message, - repectively. Other atoms or tuples are interpreted as + respectively. Other atoms or tuples are interpreted as placeholders for metadata, where atoms are expected to match top level keys, and tuples represent paths to sub keys when the metadata is a nested map. For example the @@ -152,78 +207,85 @@ <p>Strings in the template are printed literally.</p> <p>The default template differs depending on the values of <c>legacy_header</c> - and <c>single_line</c>. See <seealso marker="#default_templates">Default - Templates</seealso> for more information</p> - </item> - <tag><c>utc = boolean()</c></tag> - <item> - <p>If set to <c>true</c>, all dates are displayed in Universal - Coordinated Time.</p> - <p>Default is <c>false</c>.</p> - </item> - </taglist> - </section> - - <section> - <marker id="default_templates"/> - <title>Default templates</title> - - <p>The default value for the <c>template</c> configuration - parameter depends on the value of <c>single_line</c> - and <c>legacy_header</c> as follows.</p> - - <p>The log event used in the examples is:</p> - <code> -?LOG_ERROR("name: ~p~nexit_reason: ~p",[my_reg_name,"It crashed"])</code> - - <taglist> - <tag><c>legacy_header=true</c></tag> - <item> - <p>Default template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p> - - <p>Example log entry:</p> + and <c>single_line</c>:</p> + + <p>The default value for the <c>template</c> configuration + parameter depends on the value of the <c>single_line</c> + and <c>legacy_header</c> configuration parameters as + follows.</p> + + <p>The log event used in the examples is:</p> <code> -=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 === -name: my_reg_name +?LOG_ERROR("name: ~p~nexit_reason: ~p",[my_name,"It crashed"])</code> + + <taglist> + <tag><c>legacy_header=true, single_line=false</c></tag> + <item> + <p>Default + template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p> + + <p>Example log entry:</p> + <code type="none"> +=ERROR REPORT==== 17-May-2018::18:30:19.453447 === +name: my_name exit_reason: "It crashed"</code> - <p>Notice that all eight levels might occur in the heading, - not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as the - old <c>error_logger</c> produced. And microseconds are - added at the end of the timestamp.</p> - </item> + <p>Notice that all eight levels can occur in the heading, + not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as the + old <c>error_logger</c> produced. And microseconds are + added at the end of the timestamp.</p> + </item> - <tag><c>single_line=true</c></tag> - <item> - <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p> + <tag><c>legacy_header=true, single_line=true</c></tag> + <item> + <p>Default + template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p> - <p>Example log entry:</p> - <code>2017-12-29 13:31:49.640317 error: name: my_reg_name, exit_reason: "It crashed"</code> - </item> + <p>Notice that the template is here the same as + for <c>single_line=false</c>, but the resulting log entry + differs in that there is only one line after the + heading:</p> + <code type="none"> +=ERROR REPORT==== 17-May-2018::18:31:06.952665 === +name: my_name, exit_reason: "It crashed"</code> + </item> - <tag><c>legacy_header=false, single_line=false</c></tag> - <item> - <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p> + <tag><c>legacy_header=false, single_line=true</c></tag> + <item> + <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p> - <p>Example log entry:</p> - <code> -2017-12-29 13:32:25.191925 error: -name: my_reg_name -exit_reason: "It crashed"</code> - </item> - </taglist> - </section> + <p>Example log entry:</p> + <code type="none"> +2018-05-17T18:31:31.152864+02:00 error: name: my_name, exit_reason: "It crashed"</code> + </item> - <datatypes> - <datatype> - <name name="template"/> - <desc> + <tag><c>legacy_header=false, single_line=false</c></tag> + <item> + <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p> + + <p>Example log entry:</p> + <code type="none"> +2018-05-17T18:32:20.105422+02:00 error: +name: my_name +exit_reason: "It crashed"</code> + </item> + </taglist> </desc> </datatype> </datatypes> <funcs> <func> + <name name="check_config" arity="1"/> + <fsummary>Validates the given formatter configuration.</fsummary> + <desc> + <p>This callback function is called by Logger when the + formatter configuration for a handler is set or modified. It + returns <c>ok</c> if the configuration is valid, + and <c>{error,term()}</c> if it is faulty.</p> + </desc> + </func> + <func> <name name="format" arity="2"/> <fsummary>Formats the given message.</fsummary> <desc> @@ -245,7 +307,6 @@ exit_reason: "It crashed"</code> </list> </desc> </func> - </funcs> </erlref> diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml index fe9b9ca5a9..a4f2848037 100644 --- a/lib/kernel/doc/src/logger_std_h.xml +++ b/lib/kernel/doc/src/logger_std_h.xml @@ -33,17 +33,17 @@ <file>logger_std_h.xml</file> </header> <module>logger_std_h</module> - <modulesummary>Default handler for the Logger application.</modulesummary> + <modulesummary>Default handler for Logger.</modulesummary> <description> - <p>This is the default handler for the Logger - application. Multiple instances of this handler can be added to - logger, and each instance will print logs to <c>standard_io</c>, + <p>This is the default handler for Logger. + Multiple instances of this handler can be added to + Logger, and each instance will print logs to <c>standard_io</c>, <c>standard_error</c> or to file. The default instance that starts - with kernel is named <c>logger_std_h</c> - which is the name to be used + with Kernel is named <c>default</c> - which is the name to be used for reconfiguration.</p> <p>The handler has an overload protection mechanism that will keep the handler - process and the kernel application alive during a high load of log + process and the Kernel application alive during a high load of log requests. How this feature works, and how to modify the configuration, is described in the <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> @@ -57,7 +57,7 @@ are stored in a sub map with the key <c>logger_std_h</c>. The following keys and values may be specified:</p> <taglist> - <tag><c>type</c></tag> + <tag><marker id="type"/><c>type</c></tag> <item> <p>This will have the value <c>standard_io</c>, <c>standard_error</c>, <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>, @@ -104,12 +104,11 @@ logger:add_handler(my_standard_h, logger_std_h, filesync_repeat_interval => 1000}}). </code> <p>In order to configure the default handler (that starts initially with - the kernel application) to log to file instead of <c>standard_io</c>, - use the kernel configuration parameter - <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with - value <c>{file,FileName}</c>. Example:</p> + the Kernel application) to log to file instead of <c>standard_io</c>, + change the Kernel default logger to use a file. Example:</p> <code type="none"> -erl -kernel logger_dest '{file,"./erl.log"}' +erl -kernel logger '[{handler,default,logger_std_h, + #{ logger_std_h => #{ type => {file,"./log.log"}}}}]' </code> <p>An example of how to replace the standard handler with a disk_log handler at startup can be found in the manual of @@ -128,6 +127,11 @@ erl -kernel logger_dest '{file,"./erl.log"}' </funcs> + <section> + <title>See Also</title> + <p><seealso marker="logger"><c>logger(3)</c></seealso></p> + <p><seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso></p> + </section> </erlref> diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml index c06914d23d..b6c2714664 100644 --- a/lib/kernel/doc/src/ref_man.xml +++ b/lib/kernel/doc/src/ref_man.xml @@ -32,12 +32,15 @@ </description> <xi:include href="kernel_app.xml"/> + <xi:include href="app.xml"/> <xi:include href="application.xml"/> <xi:include href="auth.xml"/> <xi:include href="code.xml"/> + <xi:include href="config.xml"/> <xi:include href="disk_log.xml"/> <xi:include href="erl_boot_server.xml"/> <xi:include href="erl_ddll.xml"/> + <xi:include href="erl_epmd.xml"/> <xi:include href="erl_prim_loader_stub.xml"/> <xi:include href="erlang_stub.xml"/> <xi:include href="error_handler.xml"/> @@ -66,6 +69,4 @@ <xi:include href="user.xml"/> <xi:include href="wrap_log_reader.xml"/> <xi:include href="zlib_stub.xml"/> - <xi:include href="app.xml"/> - <xi:include href="config.xml"/> </application> diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml index bcc422930e..b8c25ca53b 100644 --- a/lib/kernel/doc/src/specs.xml +++ b/lib/kernel/doc/src/specs.xml @@ -6,6 +6,7 @@ <xi:include href="../specs/specs_disk_log.xml"/> <xi:include href="../specs/specs_erl_boot_server.xml"/> <xi:include href="../specs/specs_erl_ddll.xml"/> + <xi:include href="../specs/specs_erl_epmd.xml"/> <xi:include href="../specs/specs_erl_prim_loader_stub.xml"/> <xi:include href="../specs/specs_erlang_stub.xml"/> <xi:include href="../specs/specs_error_handler.xml"/> diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile index eeb8c6ab2f..f265fdd272 100644 --- a/lib/kernel/src/Makefile +++ b/lib/kernel/src/Makefile @@ -118,7 +118,7 @@ MODULES = \ logger_filters \ logger_formatter \ logger_server \ - logger_simple \ + logger_simple_h \ logger_sup \ net \ net_adm \ @@ -279,7 +279,7 @@ $(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../inclu $(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl $(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl $(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl -$(EBIN)/logger_simple.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_simple_h.beam: logger_internal.hrl ../include/logger.hrl $(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl $(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl $(EBIN)/net_kernel.beam: ../include/net_address.hrl diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl index b9cb722575..6906ad0d6e 100644 --- a/lib/kernel/src/application_controller.erl +++ b/lib/kernel/src/application_controller.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -1272,9 +1272,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) -> NewEnv = merge_app_env(ApplEnv, ConfEnv), CmdLineEnv = get_cmd_env(Name), NewEnv2 = merge_app_env(NewEnv, CmdLineEnv), - NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2, - {included_applications, IncApps}), - add_env(Name, NewEnv3), + add_env(Name, NewEnv2), Appl = #appl{name = Name, descr = Descr, id = Id, vsn = Vsn, appl_data = ApplData, inc_apps = IncApps, apps = Apps}, ets:insert(ac_tab, {{loaded, Name}, Appl}), @@ -1292,7 +1290,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) -> {ok, NewS}. unload(AppName, S) -> - {ok, IncApps} = get_env(AppName, included_applications), + {ok, IncApps} = get_key(AppName, included_applications), del_env(AppName), ets:delete(ac_tab, {loaded, AppName}), foldl(fun(App, S1) -> @@ -1583,13 +1581,9 @@ do_change_appl({ok, {ApplData, Env, IncApps, Descr, Id, Vsn, Apps}}, CmdLineEnv = get_cmd_env(AppName), NewEnv2 = merge_app_env(NewEnv1, CmdLineEnv), - %% included_apps is made into an env parameter as well - NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2, - {included_applications, IncApps}), - %% Update ets table with new application env del_env(AppName), - add_env(AppName, NewEnv3), + add_env(AppName, NewEnv2), OldAppl#appl{appl_data=ApplData, descr=Descr, @@ -2019,5 +2013,5 @@ to_string(Term) -> true -> Term; false -> - lists:flatten(io_lib:format("~134217728p", [Term])) + lists:flatten(io_lib:format("~0p", [Term])) end. diff --git a/lib/kernel/src/code.erl b/lib/kernel/src/code.erl index f143a49d2f..7faef93609 100644 --- a/lib/kernel/src/code.erl +++ b/lib/kernel/src/code.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -630,7 +630,7 @@ do_par_recv(N, Good, Bad) -> call(Req) -> code_server:call(Req). --spec start_link() -> {'ok', pid()} | {'error', 'crash'}. +-spec start_link() -> {'ok', pid()}. start_link() -> do_start(). diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl index bbfa2a995d..1a7677295b 100644 --- a/lib/kernel/src/code_server.erl +++ b/lib/kernel/src/code_server.erl @@ -1437,7 +1437,7 @@ error_msg(Format, Args) -> logger ! {log,error,Format,Args, #{pid=>self(), gl=>group_leader(), - time=>erlang:monotonic_time(microsecond), + time=>erlang:system_time(microsecond), error_logger=>#{tag=>error}}}, ok. @@ -1446,7 +1446,7 @@ info_msg(Format, Args) -> logger ! {log,info,Format,Args, #{pid=>self(), gl=>group_leader(), - time=>erlang:monotonic_time(microsecond), + time=>erlang:system_time(microsecond), error_logger=>#{tag=>info_msg}}}, ok. diff --git a/lib/kernel/src/erl_epmd.erl b/lib/kernel/src/erl_epmd.erl index f96bc88913..9a0939972d 100644 --- a/lib/kernel/src/erl_epmd.erl +++ b/lib/kernel/src/erl_epmd.erl @@ -29,10 +29,20 @@ -define(port_please_failure2(Term), noop). -endif. +-ifndef(erlang_daemon_port). +-define(erlang_daemon_port, 4369). +-endif. +-ifndef(epmd_dist_high). +-define(epmd_dist_high, 4370). +-endif. +-ifndef(epmd_dist_low). +-define(epmd_dist_low, 4370). +-endif. + %% External exports -export([start/0, start_link/0, stop/0, port_please/2, port_please/3, names/0, names/1, - register_node/2, register_node/3, open/0, open/1, open/2]). + register_node/2, register_node/3, address_please/3, open/0, open/1, open/2]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -53,7 +63,7 @@ start() -> gen_server:start({local, erl_epmd}, ?MODULE, [], []). - +-spec start_link() -> {ok, pid()} | ignore | {error,term()}. start_link() -> gen_server:start_link({local, erl_epmd}, ?MODULE, [], []). @@ -66,9 +76,22 @@ stop() -> %% return {port, P, Version} | noport %% +-spec port_please(Name, Host) -> {ok, Port, Version} | noport when + Name :: string(), + Host :: inet:ip_address(), + Port :: non_neg_integer(), + Version :: non_neg_integer(). + port_please(Node, Host) -> port_please(Node, Host, infinity). +-spec port_please(Name, Host, Timeout) -> {ok, Port, Version} | noport when + Name :: string(), + Host :: inet:ip_address(), + Timeout :: non_neg_integer() | infinity, + Port :: non_neg_integer(), + Version :: non_neg_integer(). + port_please(Node,HostName, Timeout) when is_atom(HostName) -> port_please1(Node,atom_to_list(HostName), Timeout); port_please(Node,HostName, Timeout) when is_list(HostName) -> @@ -92,10 +115,21 @@ port_please1(Node,HostName, Timeout) -> Else end. +-spec names() -> {ok, [{Name, Port}]} | {error, Reason} when + Name :: string(), + Port :: non_neg_integer(), + Reason :: address | file:posix(). + names() -> {ok, H} = inet:gethostname(), names(H). +-spec names(Host) -> {ok, [{Name, Port}]} | {error, Reason} when + Host :: atom() | string() | inet:ip_address(), + Name :: string(), + Port :: non_neg_integer(), + Reason :: address | file:posix(). + names(HostName) when is_atom(HostName); is_list(HostName) -> case inet:gethostbyname(HostName) of {ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} -> @@ -106,9 +140,22 @@ names(HostName) when is_atom(HostName); is_list(HostName) -> names(EpmdAddr) -> get_names(EpmdAddr). +-spec register_node(Name, Port) -> Result when + Name :: string(), + Port :: non_neg_integer(), + Creation :: non_neg_integer(), + Result :: {ok, Creation} | {error, already_registered} | term(). register_node(Name, PortNo) -> - register_node(Name, PortNo, inet). + register_node(Name, PortNo, inet). + +-spec register_node(Name, Port, Driver) -> Result when + Name :: string(), + Port :: non_neg_integer(), + Driver :: inet_tcp | inet6_tcp | inet | inet6, + Creation :: non_neg_integer(), + Result :: {ok, Creation} | {error, already_registered} | term(). + register_node(Name, PortNo, inet_tcp) -> register_node(Name, PortNo, inet); register_node(Name, PortNo, inet6_tcp) -> @@ -116,6 +163,17 @@ register_node(Name, PortNo, inet6_tcp) -> register_node(Name, PortNo, Family) -> gen_server:call(erl_epmd, {register, Name, PortNo, Family}, infinity). +-spec address_please(Name, Host, AddressFamily) -> Success | {error, term()} when + Name :: string(), + Host :: string() | inet:ip_address(), + AddressFamily :: inet | inet6, + Port :: non_neg_integer(), + Version :: non_neg_integer(), + Success :: {ok, inet:ip_address()} | {ok, inet:ip_address(), Port, Version}. + +address_please(_Name, Host, AddressFamily) -> + inet:getaddr(Host, AddressFamily). + %%%---------------------------------------------------------------------- %%% Callback functions from gen_server %%%---------------------------------------------------------------------- diff --git a/lib/kernel/src/erl_signal_handler.erl b/lib/kernel/src/erl_signal_handler.erl index 22f235d4e4..b76c2a217a 100644 --- a/lib/kernel/src/erl_signal_handler.erl +++ b/lib/kernel/src/erl_signal_handler.erl @@ -19,12 +19,21 @@ -module(erl_signal_handler). -behaviour(gen_event). --export([init/1, format_status/2, +-export([start/0, init/1, format_status/2, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]). -record(state,{}). +start() -> + %% add signal handler + case whereis(erl_signal_server) of + %% in case of minimal mode + undefined -> ok; + _ -> + gen_event:add_handler(erl_signal_server, erl_signal_handler, []) + end. + init(_Args) -> {ok, #state{}}. diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl index 47d0ca5ea3..b3957d0c7e 100644 --- a/lib/kernel/src/error_logger.erl +++ b/lib/kernel/src/error_logger.erl @@ -32,7 +32,7 @@ which_report_handlers/0]). %% logger callbacks --export([adding_handler/2, removing_handler/2, log/2]). +-export([adding_handler/1, removing_handler/1, log/2]). -export([get_format_depth/0, limit_term/1]). @@ -101,9 +101,9 @@ stop() -> %%%----------------------------------------------------------------- %%% Callbacks for logger --spec adding_handler(logger:handler_id(),logger:config()) -> +-spec adding_handler(logger:config()) -> {ok,logger:config()} | {error,term()}. -adding_handler(?MODULE,Config) -> +adding_handler(#{id:=?MODULE}=Config) -> case start() of ok -> {ok,Config}; @@ -111,12 +111,12 @@ adding_handler(?MODULE,Config) -> Error end. --spec removing_handler(logger:handler_id(),logger:config()) -> ok. -removing_handler(?MODULE,_Config) -> +-spec removing_handler(logger:config()) -> ok. +removing_handler(#{id:=?MODULE}) -> stop(), ok. --spec log(logger:log(),logger:config()) -> ok. +-spec log(logger:log_event(),logger:config()) -> ok. log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) -> do_log(Level,Msg,Meta). @@ -529,18 +529,38 @@ logfile(filename) -> Flag :: boolean(). tty(true) -> - case lists:member(error_logger_tty_h, which_report_handlers()) of - false -> - add_report_handler(error_logger_tty_h, []); - true -> - ignore - end, + _ = case lists:member(error_logger_tty_h, which_report_handlers()) of + false -> + case logger:get_handler_config(default) of + {ok,{logger_std_h,#{logger_std_h:=#{type:=standard_io}}}} -> + logger:remove_handler_filter(default, + error_logger_tty_false); + _ -> + logger:add_handler(error_logger_tty_true,logger_std_h, + #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS( + [beam,erlang,otp]), + formatter=>{?DEFAULT_FORMATTER, + ?DEFAULT_FORMAT_CONFIG}, + logger_std_h=>#{type=>standard_io}}) + end; + true -> + ok + end, ok; tty(false) -> - delete_report_handler(error_logger_tty_h). + delete_report_handler(error_logger_tty_h), + _ = logger:remove_handler(error_logger_tty_true), + _ = case logger:get_handler_config(default) of + {ok,{logger_std_h,#{logger_std_h:=#{type:=standard_io}}}} -> + logger:add_handler_filter(default,error_logger_tty_false, + {fun(_,_) -> stop end, ok}); + _ -> + ok + end, + ok. %%%----------------------------------------------------------------- - -spec limit_term(term()) -> term(). limit_term(Term) -> @@ -552,4 +572,9 @@ limit_term(Term) -> -spec get_format_depth() -> 'unlimited' | pos_integer(). get_format_depth() -> - logger:get_format_depth(). + case application:get_env(kernel, error_logger_format_depth) of + {ok, Depth} when is_integer(Depth) -> + max(10, Depth); + undefined -> + unlimited + end. diff --git a/lib/kernel/src/file.erl b/lib/kernel/src/file.erl index 57d8fc7a15..1d4e37196c 100644 --- a/lib/kernel/src/file.erl +++ b/lib/kernel/src/file.erl @@ -69,7 +69,7 @@ %% Types that can be used from other modules -- alphabetically ordered. -export_type([date_time/0, fd/0, file_info/0, filename/0, filename_all/0, - io_device/0, name/0, name_all/0, posix/0]). + io_device/0, mode/0, name/0, name_all/0, posix/0]). %%% Includes and defines -include("file_int.hrl"). diff --git a/lib/kernel/src/file_server.erl b/lib/kernel/src/file_server.erl index ecc1ffbdd6..29eaa23375 100644 --- a/lib/kernel/src/file_server.erl +++ b/lib/kernel/src/file_server.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2000-2017. All Rights Reserved. +%% Copyright Ericsson AB 2000-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -73,7 +73,7 @@ stop() -> %% {stop, Reason} %%---------------------------------------------------------------------- --spec init([]) -> {'ok', state()} | {'stop', term()}. +-spec init([]) -> {'ok', state()}. init([]) -> process_flag(trap_exit, true), @@ -225,7 +225,7 @@ handle_cast(Msg, State) -> %%---------------------------------------------------------------------- -spec handle_info(term(), state()) -> - {'noreply', state()} | {'stop', 'normal', state()}. + {'noreply', state()}. handle_info({'EXIT', Pid, _Reason}, State) when is_pid(Pid) -> ets:delete(?FILE_IO_SERVER_TABLE, Pid), diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl index a6aa0edd15..3526df3600 100644 --- a/lib/kernel/src/gen_sctp.erl +++ b/lib/kernel/src/gen_sctp.erl @@ -118,6 +118,8 @@ open() -> | inet:address_family() | {port,Port} | {type,SockType} + | {netns, file:filename_all()} + | {bind_to_device, binary()} | option(), IP :: inet:ip_address() | any | loopback, Port :: inet:port_number(), diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl index ac61dbc792..253c63528f 100644 --- a/lib/kernel/src/gen_tcp.erl +++ b/lib/kernel/src/gen_tcp.erl @@ -102,6 +102,8 @@ inet:address_family() | {port, inet:port_number()} | {tcp_module, module()} | + {netns, file:filename_all()} | + {bind_to_device, binary()} | option(). -type listen_option() :: {ip, inet:socket_address()} | @@ -111,6 +113,8 @@ {port, inet:port_number()} | {backlog, B :: non_neg_integer()} | {tcp_module, module()} | + {netns, file:filename_all()} | + {bind_to_device, binary()} | option(). -type socket() :: port(). diff --git a/lib/kernel/src/gen_udp.erl b/lib/kernel/src/gen_udp.erl index 3121544719..9ab58011ec 100644 --- a/lib/kernel/src/gen_udp.erl +++ b/lib/kernel/src/gen_udp.erl @@ -97,6 +97,8 @@ open(Port) -> | {ifaddr, inet:socket_address()} | inet:address_family() | {port, inet:port_number()} + | {netns, file:filename_all()} + | {bind_to_device, binary()} | option(), Socket :: socket(), Reason :: inet:posix(). diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl index e3fdb1bb22..b4b50899f7 100644 --- a/lib/kernel/src/inet_tcp_dist.erl +++ b/lib/kernel/src/inet_tcp_dist.erl @@ -283,73 +283,22 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) -> ?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]), [Name, Address] = splitnode(Driver, Node, LongOrShortNames), AddressFamily = Driver:family(), - case inet:getaddr(Address, AddressFamily) of + ErlEpmd = net_kernel:epmd_module(), + {ARMod, ARFun} = get_address_resolver(ErlEpmd), + Timer = dist_util:start_timer(SetupTime), + case ARMod:ARFun(Name, Address, AddressFamily) of + {ok, Ip, TcpPort, Version} -> + ?trace("address_please(~p) -> version ~p~n", + [Node,Version]), + do_setup_connect(Driver, Kernel, Node, Address, AddressFamily, + Ip, TcpPort, Version, Type, MyNode, Timer); {ok, Ip} -> - Timer = dist_util:start_timer(SetupTime), - ErlEpmd = net_kernel:epmd_module(), case ErlEpmd:port_please(Name, Ip) of {port, TcpPort, Version} -> ?trace("port_please(~p) -> version ~p~n", [Node,Version]), - dist_util:reset_timer(Timer), - case - Driver:connect( - Ip, TcpPort, - connect_options([{active, false}, {packet, 2}])) - of - {ok, Socket} -> - HSData = #hs_data{ - kernel_pid = Kernel, - other_node = Node, - this_node = MyNode, - socket = Socket, - timer = Timer, - this_flags = 0, - other_version = Version, - f_send = fun Driver:send/2, - f_recv = fun Driver:recv/3, - f_setopts_pre_nodeup = - fun(S) -> - inet:setopts - (S, - [{active, false}, - {packet, 4}, - nodelay()]) - end, - f_setopts_post_nodeup = - fun(S) -> - inet:setopts - (S, - [{active, true}, - {deliver, port}, - {packet, 4}, - nodelay()]) - end, - - f_getll = fun inet:getll/1, - f_address = - fun(_,_) -> - #net_address{ - address = {Ip,TcpPort}, - host = Address, - protocol = tcp, - family = AddressFamily} - end, - mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end, - mf_getstat = fun ?MODULE:getstat/1, - request_type = Type, - mf_setopts = fun ?MODULE:setopts/2, - mf_getopts = fun ?MODULE:getopts/2 - }, - dist_util:handshake_we_started(HSData); - _ -> - %% Other Node may have closed since - %% port_please ! - ?trace("other node (~p) " - "closed since port_please.~n", - [Node]), - ?shutdown(Node) - end; + do_setup_connect(Driver, Kernel, Node, Address, AddressFamily, + Ip, TcpPort, Version, Type, MyNode, Timer); _ -> ?trace("port_please (~p) " "failed.~n", [Node]), @@ -361,6 +310,71 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) -> ?shutdown(Node) end. +%% +%% Actual setup of connection +%% +do_setup_connect(Driver, Kernel, Node, Address, AddressFamily, + Ip, TcpPort, Version, Type, MyNode, Timer) -> + dist_util:reset_timer(Timer), + case + Driver:connect( + Ip, TcpPort, + connect_options([{active, false}, {packet, 2}])) + of + {ok, Socket} -> + HSData = #hs_data{ + kernel_pid = Kernel, + other_node = Node, + this_node = MyNode, + socket = Socket, + timer = Timer, + this_flags = 0, + other_version = Version, + f_send = fun Driver:send/2, + f_recv = fun Driver:recv/3, + f_setopts_pre_nodeup = + fun(S) -> + inet:setopts + (S, + [{active, false}, + {packet, 4}, + nodelay()]) + end, + f_setopts_post_nodeup = + fun(S) -> + inet:setopts + (S, + [{active, true}, + {deliver, port}, + {packet, 4}, + nodelay()]) + end, + + f_getll = fun inet:getll/1, + f_address = + fun(_,_) -> + #net_address{ + address = {Ip,TcpPort}, + host = Address, + protocol = tcp, + family = AddressFamily} + end, + mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end, + mf_getstat = fun ?MODULE:getstat/1, + request_type = Type, + mf_setopts = fun ?MODULE:setopts/2, + mf_getopts = fun ?MODULE:getopts/2 + }, + dist_util:handshake_we_started(HSData); + _ -> + %% Other Node may have closed since + %% discovery ! + ?trace("other node (~p) " + "closed since discovery (port_please).~n", + [Node]), + ?shutdown(Node) + end. + connect_options(Opts) -> case application:get_env(kernel, inet_dist_connect_options) of {ok,ConnectOpts} -> @@ -430,6 +444,16 @@ get_tcp_address(Driver, Socket) -> }. %% ------------------------------------------------------------ +%% Determine if EPMD module supports address resolving. Default +%% is to use inet:getaddr/2. +%% ------------------------------------------------------------ +get_address_resolver(EpmdModule) -> + case erlang:function_exported(EpmdModule, address_please, 3) of + true -> {EpmdModule, address_please}; + _ -> {inet, getaddr} + end. + +%% ------------------------------------------------------------ %% Do only accept new connection attempts from nodes at our %% own LAN, if the check_ip environment parameter is true. %% ------------------------------------------------------------ diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src index afffcd156e..d873178f55 100644 --- a/lib/kernel/src/kernel.app.src +++ b/lib/kernel/src/kernel.app.src @@ -68,7 +68,7 @@ logger_formatter, logger_h_common, logger_server, - logger_simple, + logger_simple_h, logger_std_h, logger_sup, net, @@ -140,7 +140,10 @@ inet_db, pg2]}, {applications, []}, - {env, []}, + {env, [{logger_level, info}, + {logger_sasl_compatible, false}, + {logger_progress_reports, stop} + ]}, {mod, {kernel, []}}, {runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]} ] diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl index 20aa47f602..b0e8c00bbf 100644 --- a/lib/kernel/src/kernel.erl +++ b/lib/kernel/src/kernel.erl @@ -30,23 +30,13 @@ %%% Callback functions for the kernel application. %%%----------------------------------------------------------------- start(_, []) -> + %% Setup the logger and configure the kernel logger environment + ok = logger:internal_init_logger(), case supervisor:start_link({local, kernel_sup}, kernel, []) of {ok, Pid} -> - %% add signal handler - case whereis(erl_signal_server) of - %% in case of minimal mode - undefined -> ok; - _ -> - ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, []) - end, - %% add error handler - case logger:setup_standard_handler() of - ok -> {ok, Pid, []}; - Error -> - %% Not necessary since the node will crash anyway: - exit(Pid, shutdown), - Error - end; + ok = erl_signal_handler:start(), + ok = logger:add_handlers(kernel), + {ok, Pid, []}; Error -> Error end. @@ -153,7 +143,7 @@ init([]) -> case init:get_argument(mode) of {ok, [["minimal"]]} -> {ok, {SupFlags, - [Code, File, StdError, User, Config, RefC, SafeSup, LoggerSup]}}; + [Code, File, StdError, User, LoggerSup, Config, RefC, SafeSup]}}; _ -> Rpc = #{id => rex, start => {rpc, start_link, []}, diff --git a/lib/kernel/src/kernel_config.erl b/lib/kernel/src/kernel_config.erl index 535083ef27..c5ff1887c2 100644 --- a/lib/kernel/src/kernel_config.erl +++ b/lib/kernel/src/kernel_config.erl @@ -30,11 +30,8 @@ %%%----------------------------------------------------------------- %%% This module implements a process that configures the kernel %%% application. -%%% Its purpose is that in the init phase add an error_logger -%%% and when it dies (when the kernel application dies) deleting the -%%% previously installed error_logger. -%%% Also, this process waits for other nodes at startup, if -%%% specified. +%%% Its purpose is that in the init phase waits for other nodes at startup, +%%% if specified. %%%----------------------------------------------------------------- start_link() -> gen_server:start_link(kernel_config, [], []). diff --git a/lib/kernel/src/kernel_refc.erl b/lib/kernel/src/kernel_refc.erl index 05076dc885..8e04ff99d8 100644 --- a/lib/kernel/src/kernel_refc.erl +++ b/lib/kernel/src/kernel_refc.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2017. All Rights Reserved. +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -44,7 +44,7 @@ scheduler_wall_time(Bool) -> %% Callback functions from gen_server %%----------------------------------------------------------------- --spec init([]) -> {'ok', map()} | {'stop', term()}. +-spec init([]) -> {'ok', map()}. init([]) -> resource(scheduler_wall_time, false), diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl index 98a9937111..3beb3102fa 100644 --- a/lib/kernel/src/logger.erl +++ b/lib/kernel/src/logger.erl @@ -37,18 +37,22 @@ -export([add_handler/3, remove_handler/1, add_logger_filter/2, add_handler_filter/3, remove_logger_filter/1, remove_handler_filter/2, - set_module_level/2, reset_module_level/1, + set_module_level/2, unset_module_level/1, set_logger_config/1, set_logger_config/2, set_handler_config/2, set_handler_config/3, - get_logger_config/0, get_handler_config/1]). + update_logger_config/1, update_handler_config/2, + update_formatter_config/2, update_formatter_config/3, + get_logger_config/0, get_handler_config/1, + add_handlers/1]). + +%% Private configuration +-export([internal_init_logger/0]). %% Misc -export([compare_levels/2]). -export([set_process_metadata/1, update_process_metadata/1, unset_process_metadata/0, get_process_metadata/0]). -export([i/0, i/1]). --export([setup_standard_handler/0, replace_simple_handler/3]). --export([limit_term/1, get_format_depth/0, get_max_size/0, get_utc_config/0]). %% Basic report formatting -export([format_report/1, format_otp_report/1]). @@ -60,11 +64,11 @@ %%%----------------------------------------------------------------- %%% Types --type log() :: #{level:=level(), - msg:={io:format(),[term()]} | - {report,report()} | - {string,unicode:chardata()}, - meta:=metadata()}. +-type log_event() :: #{level:=level(), + msg:={io:format(),[term()]} | + {report,report()} | + {string,unicode:chardata()}, + meta:=metadata()}. -type level() :: emergency | alert | critical | error | warning | notice | info | debug. -type report() :: map() | [{atom(),term()}]. @@ -77,24 +81,32 @@ mfa => {module(),atom(),non_neg_integer()}, file => file:filename(), line => non_neg_integer(), - term() => term()}. + domain => [atom()], + report_cb => fun((report()) -> {io:format(),[term()]}), + atom() => term()}. -type location() :: #{mfa := {module(),atom(),non_neg_integer()}, file := file:filename(), line := non_neg_integer()}. -type handler_id() :: atom(). -type filter_id() :: atom(). --type filter() :: {fun((log(),filter_arg()) -> filter_return()),filter_arg()}. +-type filter() :: {fun((log_event(),filter_arg()) -> + filter_return()),filter_arg()}. -type filter_arg() :: term(). --type filter_return() :: stop | ignore | log(). --type config() :: #{level => level(), +-type filter_return() :: stop | ignore | log_event(). +-type config() :: #{id => handler_id(), + level => level(), filter_default => log | stop, filters => [{filter_id(),filter()}], - formatter => {module(),term()}, - term() => term()}. + formatter => {module(),formatter_config()}, + atom() => term()}. -type timestamp() :: integer(). +-type formatter_config() :: #{atom() => term()}. + +-type config_handler() :: {handler, handler_id(), module(), config()}. --export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0, - filter_id/0,filter/0,filter_arg/0,filter_return/0]). +-export_type([log_event/0,level/0,report/0,msg_fun/0,metadata/0,config/0, + handler_id/0,filter_id/0,filter/0,filter_arg/0,filter_return/0, + config_handler/0,formatter_config/0]). %%%----------------------------------------------------------------- %%% API @@ -357,10 +369,22 @@ set_handler_config(HandlerId,Key,Value) -> set_handler_config(HandlerId,Config) -> logger_server:set_config(HandlerId,Config). +-spec update_logger_config(Config) -> ok | {error,term()} when + Config :: config(). +update_logger_config(Config) -> + logger_server:update_config(logger,Config). + +-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when + HandlerId :: handler_id(), + Config :: config(). +update_handler_config(HandlerId,Config) -> + logger_server:update_config(HandlerId,Config). + -spec get_logger_config() -> {ok,Config} when Config :: config(). get_logger_config() -> - logger_config:get(?LOGGER_TABLE,logger). + {ok,Config} = logger_config:get(?LOGGER_TABLE,logger), + {ok,maps:remove(handlers,Config)}. -spec get_handler_config(HandlerId) -> {ok,{Module,Config}} | {error,term()} when HandlerId :: handler_id(), @@ -369,16 +393,31 @@ get_logger_config() -> get_handler_config(HandlerId) -> logger_config:get(?LOGGER_TABLE,HandlerId). +-spec update_formatter_config(HandlerId,FormatterConfig) -> + ok | {error,term()} when + HandlerId :: config(), + FormatterConfig :: formatter_config(). +update_formatter_config(HandlerId,FormatterConfig) -> + logger_server:update_formatter_config(HandlerId,FormatterConfig). + +-spec update_formatter_config(HandlerId,Key,Value) -> + ok | {error,term()} when + HandlerId :: config(), + Key :: atom(), + Value :: term(). +update_formatter_config(HandlerId,Key,Value) -> + logger_server:update_formatter_config(HandlerId,#{Key=>Value}). + -spec set_module_level(Module,Level) -> ok | {error,term()} when Module :: module(), Level :: level(). set_module_level(Module,Level) -> logger_server:set_module_level(Module,Level). --spec reset_module_level(Module) -> ok | {error,term()} when +-spec unset_module_level(Module) -> ok | {error,term()} when Module :: module(). -reset_module_level(Module) -> - logger_server:reset_module_level(Module). +unset_module_level(Module) -> + logger_server:unset_module_level(Module). %%%----------------------------------------------------------------- %%% Misc @@ -441,8 +480,9 @@ i() -> i(_Action = print) -> io:put_chars(i(string)); i(_Action = string) -> - #{logger := #{level := Level, handlers := Handlers, - filters := Filters, filter_default := FilterDefault}, + #{logger := #{level := Level, + filters := Filters, + filter_default := FilterDefault}, handlers := HandlerConfigs, module_levels := Modules} = i(term), [io_lib:format("Current logger configuration:~n", []), @@ -451,16 +491,15 @@ i(_Action = string) -> io_lib:format(" Filters: ~n", []), print_filters(4, Filters), io_lib:format(" Handlers: ~n", []), - print_handlers([C || {Id, _, _} = C <- HandlerConfigs, - lists:member(Id, Handlers)]), + print_handlers(HandlerConfigs), io_lib:format(" Level set per module: ~n", []), print_module_levels(Modules) ]; i(_Action = term) -> {Logger, Handlers, Modules} = logger_config:get(tid()), - #{logger=>Logger, - handlers=>Handlers, - module_levels=>Modules}. + #{logger=>maps:remove(handlers,Logger), + handlers=>lists:keysort(1,Handlers), + module_levels=>lists:keysort(1,Modules)}. print_filters(Indent, {Id, {Fun, Config}}) -> io_lib:format("~sId: ~p~n" @@ -504,204 +543,209 @@ print_module_levels({Module,Level}) -> print_module_levels(ModuleLevels) -> lists:map(fun print_module_levels/1, ModuleLevels). --spec setup_standard_handler() -> ok | {error,term()}. -setup_standard_handler() -> - case get_logger_type() of - {ok,silent} -> - Level = get_logger_level(), - ok = set_logger_config(level,Level), - remove_handler(logger_simple); - {ok,Type} -> - Level = get_logger_level(), - ok = set_logger_config(level,Level), - Filters = get_logger_filters(), - setup_standard_handler(Type,#{level=>Level, - filter_default=>stop, - filters=>Filters}); - Error -> - Error +-spec internal_init_logger() -> ok | {error,term()}. +%% This function is responsible for config of the logger +%% This is done before add_handlers because we want the +%% logger settings to take effect before the kernel supervisor +%% tree is started. +internal_init_logger() -> + try + ok = logger:set_logger_config(level, get_logger_level()), + ok = logger:set_logger_config(filter_default, get_logger_filter_default()), + + [case logger:add_logger_filter(Id, Filter) of + ok -> ok; + {error, Reason} -> throw(Reason) + end || {Id, Filter} <- get_logger_filters()], + + _ = [[case logger:set_module_level(Module, Level) of + ok -> ok; + {error, Reason} -> throw(Reason) + end || Module <- Modules] + || {module_level, Level, Modules} <- get_logger_env()], + + case logger:set_handler_config(simple,filters, + get_default_handler_filters()) of + ok -> ok; + {error,{not_found,simple}} -> ok + end, + + init_kernel_handlers() + catch throw:Reason -> + ?LOG_ERROR("Invalid logger config: ~p", [Reason]), + {error, {bad_config, {kernel, Reason}}} end. --spec setup_standard_handler(Type,Config) -> ok | {error,term()} when - Type :: tty | standard_io | standard_error | {file,File} | - {file,File,Modes} | {disk_log,LogOpts} | false, - File :: file:filename(), - Modes :: [term()], % [file:mode()], or more specific? - Config :: config(), - LogOpts :: map(). -setup_standard_handler(false,#{level:=Level,filters:=Filters}) -> - case set_handler_config(logger_simple,level,Level) of - ok -> - set_handler_config(logger_simple,filters,Filters); - Error -> - Error - end; -setup_standard_handler(Type,Config) -> - {Module,TypeConfig} = get_type_config(Type), - replace_simple_handler(?STANDARD_HANDLER, - Module, - maps:merge(Config,TypeConfig)). - --spec replace_simple_handler(Id,Module,Config) -> ok | {error,term()} when - Id :: handler_id(), - Module :: module(), - Config :: config(). -replace_simple_handler(Id,Module,Config) -> - _ = code:ensure_loaded(Module), - DoBuffer = erlang:function_exported(Module,swap_buffer,2), - case add_handler(Id,Module,Config#{wait_for_buffer=>DoBuffer}) of - ok -> - if DoBuffer -> - {ok,Buffered} = logger_simple:get_buffer(), - _ = remove_handler(logger_simple), - Module:swap_buffer(?STANDARD_HANDLER,Buffered); - true -> - _ = remove_handler(logger_simple), - ok - end, - ok; - Error -> - Error +-spec init_kernel_handlers() -> ok | {error,term()}. +%% Setup the kernel environment variables to be correct +%% The actual handlers are started by a call to add_handlers. +init_kernel_handlers() -> + try + case get_logger_type() of + {ok,silent} -> + ok = logger:remove_handler(simple); + {ok,false} -> + ok; + {ok,Type} -> + init_default_config(Type) + end + catch throw:Reason -> + ?LOG_ERROR("Invalid default handler config: ~p", [Reason]), + {error, {bad_config, {kernel, Reason}}} + end. + +-spec add_handlers(Application) -> ok | {error,term()} when + Application :: atom(); + (HandlerConfig) -> ok | {error,term()} when + HandlerConfig :: [config_handler()]. +%% This function is responsible for resolving the handler config +%% and then starting the correct handlers. This is done after the +%% kernel supervisor tree has been started as it needs the logger_sup. +add_handlers(App) when is_atom(App) -> + add_handlers(application:get_env(App, logger, [])); +add_handlers(HandlerConfig) -> + try + check_logger_config(HandlerConfig), + DefaultAdded = + lists:foldl( + fun({handler, default = Id, Module, Config}, _) + when not is_map_key(filters, Config) -> + %% The default handler should have a couple of extra filters + %% set on it by default. + DefConfig = #{ filter_default => stop, + filters => get_default_handler_filters()}, + setup_handler(Id, Module, maps:merge(DefConfig,Config)), + true; + ({handler, Id, Module, Config}, Default) -> + setup_handler(Id, Module, Config), + Default orelse Id == default; + (_, Default) -> Default + end, false, HandlerConfig), + %% If a default handler was added we try to remove the simple_logger + %% If the simple logger exists it will replay its log events + %% to the handler(s) added in the fold above. + _ = [case logger:remove_handler(simple) of + ok -> ok; + {error,{not_found,simple}} -> ok + end || DefaultAdded], + ok + catch throw:Reason -> + ?LOG_ERROR("Invalid logger handler config: ~p", [Reason]), + {error, {bad_config, {handler, Reason}}} end. +setup_handler(Id, Module, Config) -> + case logger:add_handler(Id, Module, Config) of + ok -> ok; + {error, Reason} -> throw(Reason) + end. + +check_logger_config(_) -> + ok. + +-spec get_logger_type() -> {ok, standard_io | false | silent | + {file, file:name_all()} | + {file, file:name_all(), [file:mode()]}}. get_logger_type() -> - Type0 = - case application:get_env(kernel, logger_dest) of - undefined -> - application:get_env(kernel, error_logger); - T -> - T - end, - case Type0 of + case application:get_env(kernel, error_logger) of {ok, tty} -> - {ok, tty}; + {ok, standard_io}; {ok, {file, File}} when is_list(File) -> {ok, {file, File}}; {ok, {file, File, Modes}} when is_list(File), is_list(Modes) -> {ok, {file, File, Modes}}; - {ok, {disk_log, File}} when is_list(File) -> - {ok, {disk_log, get_disk_log_config(File)}}; {ok, false} -> {ok, false}; {ok, silent} -> {ok, silent}; undefined -> - {ok, tty}; % default value + case lists:member({handler,default,undefined}, get_logger_env()) of + true -> + {ok, false}; + false -> + {ok, standard_io} % default value + end; {ok, Bad} -> - {error,{bad_config, {kernel, {logger_dest, Bad}}}} + throw({error_logger, Bad}) end. -get_disk_log_config(File) -> - Config1 = - case application:get_env(kernel,logger_disk_log_maxfiles) of - undefined -> #{}; - {ok,MF} -> #{max_no_files=>MF} - end, - Config2 = - case application:get_env(kernel,logger_disk_log_maxbytes) of - undefined -> Config1; - {ok,MB} -> Config1#{max_no_bytes=>MB} - end, - Config3 = - case application:get_env(kernel,logger_disk_log_type) of - undefined -> Config2; - {ok,T} -> Config1#{type=>T} - end, - Config3#{file=>File}. - get_logger_level() -> - case application:get_env(kernel,logger_level) of - undefined -> info; - {ok,Level} when ?IS_LEVEL(Level) -> Level + case application:get_env(kernel,logger_level,info) of + Level when ?IS_LEVEL(Level) -> + Level; + Level -> + throw({logger_level, Level}) + end. + +get_logger_filter_default() -> + case lists:keyfind(filters,1,get_logger_env()) of + {filters,Default,_} -> + Default; + false -> + log end. get_logger_filters() -> + lists:foldl( + fun({filters, _, Filters}, _Acc) -> + Filters; + (_, Acc) -> + Acc + end, [], get_logger_env()). + +%% This function looks at the kernel logger environment +%% and updates it so that the correct logger is configured +init_default_config(Type) when Type==standard_io; + Type==standard_error; + element(1,Type)==file -> + Env = get_logger_env(), + DefaultFormatter = #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}, + DefaultConfig = DefaultFormatter#{logger_std_h=>#{type=>Type}}, + NewLoggerEnv = + case lists:keyfind(default, 2, Env) of + {handler, default, Module, Config} -> + lists:map( + fun({handler, default, logger_std_h, _}) -> + %% Only want to add the logger_std_h config + %% if not configured by user AND the default + %% handler is still the logger_std_h. + {handler, default, Module, maps:merge(DefaultConfig,Config)}; + ({handler, default, logger_disk_log_h, _}) -> + %% Add default formatter. The point of this + %% is to get the expected formatter config + %% for the default handler, since this + %% differs from the default values that + %% logger_formatter itself adds. + {handler, default, logger_disk_log_h, maps:merge(DefaultFormatter,Config)}; + (Other) -> + Other + end, Env); + _ -> + %% Nothing has been configured, use default + [{handler, default, logger_std_h, DefaultConfig} | Env] + end, + application:set_env(kernel, logger, NewLoggerEnv, [{timeout,infinity}]); +init_default_config(Type) -> + throw({illegal_logger_type,Type}). + +get_default_handler_filters() -> case application:get_env(kernel, logger_sasl_compatible, false) of true -> ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp]); false -> Extra = - case application:get_env(kernel, logger_log_progress, false) of - true -> + case application:get_env(kernel, logger_progress_reports, stop) of + log -> []; - false -> + stop -> [{stop_progress, {fun logger_filters:progress/2,stop}}] end, Extra ++ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp,sasl]) end. -get_type_config({disk_log,LogOpts}) -> - {logger_disk_log_h,#{disk_log_opts=>LogOpts}}; -get_type_config(tty) -> - %% This is only for backwards compatibility with error_logger and - %% old kernel and sasl environment variables - get_type_config(standard_io); -get_type_config(Type) when Type==standard_io; - Type==standard_error; - element(1,Type)==file -> - {logger_std_h,#{logger_std_h=>#{type=>Type}}}; -get_type_config(Type) -> - {error,{illegal_logger_type,Type}}. - -%%%----------------------------------------------------------------- --spec limit_term(term()) -> term(). - -limit_term(Term) -> - try get_format_depth() of - unlimited -> Term; - D -> io_lib:limit_term(Term, D) - catch error:badarg -> - %% This could happen during system termination, after - %% application_controller process is dead. - unlimited - end. - --spec get_format_depth() -> 'unlimited' | pos_integer(). - -get_format_depth() -> - Depth = - case application:get_env(kernel, logger_format_depth) of - {ok, D} when is_integer(D) -> - D; - undefined -> - case application:get_env(kernel, error_logger_format_depth) of - {ok, D} when is_integer(D) -> - D; - undefined -> - unlimited - end - end, - max(10, Depth). - --spec get_max_size() -> 'unlimited' | pos_integer(). - -get_max_size() -> - case application:get_env(kernel, logger_max_size) of - {ok, Size} when is_integer(Size) -> - max(50, Size); - undefined -> - unlimited - end. - --spec get_utc_config() -> boolean(). - -get_utc_config() -> - %% Kernel's logger_utc configuration overrides SASL utc_log, which - %% in turn overrides stdlib config - in order to have uniform - %% timestamps in log messages - case application:get_env(kernel, logger_utc) of - {ok, Val} -> Val; - undefined -> - case application:get_env(sasl, utc_log) of - {ok, Val} -> Val; - undefined -> - case application:get_env(stdlib, utc_log) of - {ok, Val} -> Val; - undefined -> false - end - end - end. +get_logger_env() -> + application:get_env(kernel, logger, []). %%%----------------------------------------------------------------- %%% Internal @@ -819,7 +863,7 @@ proc_meta() -> default(pid) -> self(); default(gl) -> group_leader(); -default(time) -> erlang:monotonic_time(microsecond). +default(time) -> erlang:system_time(microsecond). %% Remove everything upto and including this module from the stacktrace filter_stacktrace(Module,[{Module,_,_,_}|_]) -> diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl index d9f5aa6faf..b3cf7d67dd 100644 --- a/lib/kernel/src/logger_backend.erl +++ b/lib/kernel/src/logger_backend.erl @@ -58,7 +58,7 @@ call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) -> debug, [{logger,removed_failing_handler}, {handler,{Id,Module}}, - {log,Log1}, + {log_event,Log1}, {config,Config1}, {reason,{C,R,filter_stacktrace(S)}}]); {error,{not_found,_}} -> @@ -122,7 +122,7 @@ handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) -> [{logger,removed_failing_filter}, {filter,Filter}, {owner,Owner}, - {log,Log}, + {log_event,Log}, {reason,Reason}]); _ -> ok diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl index 799aea9617..1d35c2e068 100644 --- a/lib/kernel/src/logger_config.erl +++ b/lib/kernel/src/logger_config.erl @@ -24,14 +24,14 @@ allow/2,allow/3, get/2, get/3, get/1, create/3, create/4, set/3, - set_module_level/3,reset_module_level/2, + set_module_level/3,unset_module_level/2, cache_module_level/2, level_to_int/1]). -include("logger_internal.hrl"). new(Name) -> - _ = ets:new(Name,[set,protected,named_table]), + _ = ets:new(Name,[set,protected,named_table,{write_concurrency,true}]), ets:whereis(Name). delete(Tid,Id) -> @@ -109,7 +109,7 @@ set_module_level(Tid,Module,Level) -> ets:insert(Tid,{Module,level_to_int(Level)}), ok. -reset_module_level(Tid,Module) -> +unset_module_level(Tid,Module) -> ets:delete(Tid,Module), % should possibley overwrite instead of delete? ok. diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl index 0150fa781a..773aa75bc6 100644 --- a/lib/kernel/src/logger_disk_log_h.erl +++ b/lib/kernel/src/logger_disk_log_h.erl @@ -34,8 +34,8 @@ %% logger callbacks -export([log/2, - adding_handler/2, removing_handler/2, - changing_config/3, swap_buffer/2]). + adding_handler/1, removing_handler/1, + changing_config/2, swap_buffer/2]). %%%=================================================================== %%% API @@ -108,8 +108,8 @@ reset(Name) -> %%%----------------------------------------------------------------- %%% Handler being added -adding_handler(Name, Config) -> - case check_config(adding, Name, Config) of +adding_handler(#{id:=Name}=Config) -> + case check_config(adding, Config) of {ok, Config1} -> %% create initial handler state by merging defaults with config HConfig = maps:get(?MODULE, Config1, #{}), @@ -136,10 +136,9 @@ adding_handler(Name, Config) -> %%%----------------------------------------------------------------- %%% Updating handler config -changing_config(Name, - OldConfig=#{id:=Id, disk_log_opts:=DLOpts}, - NewConfig=#{id:=Id, disk_log_opts:=DLOpts}) -> - case check_config(changing, Name, NewConfig) of +changing_config(OldConfig=#{id:=Name, disk_log_opts:=DLOpts}, + NewConfig=#{id:=Name, disk_log_opts:=DLOpts}) -> + case check_config(changing, NewConfig) of Result = {ok,NewConfig1} -> try gen_server:call(Name, {change_config,OldConfig,NewConfig1}, ?DEFAULT_CALL_TIMEOUT) of @@ -151,12 +150,10 @@ changing_config(Name, Error -> Error end; -changing_config(_Name, OldConfig, NewConfig) -> +changing_config(OldConfig, NewConfig) -> {error,{illegal_config_change,OldConfig,NewConfig}}. -check_config(adding, Name, Config0) -> - %% Merge in defaults on top level - Config = maps:merge(#{id => Name}, Config0), +check_config(adding, #{id:=Name}=Config) -> %% Merge in defaults on handler level LogOpts0 = maps:get(disk_log_opts, Config, #{}), LogOpts = merge_default_logopts(Name, LogOpts0), @@ -173,7 +170,7 @@ check_config(adding, Name, Config0) -> Error -> Error end; -check_config(changing, _Name, Config) -> +check_config(changing, Config) -> MyConfig = maps:get(?MODULE, Config, #{}), case check_my_config(maps:to_list(MyConfig)) of ok -> {ok,Config}; @@ -223,7 +220,7 @@ check_my_config([]) -> %%%----------------------------------------------------------------- %%% Handler being removed -removing_handler(Name, _Config) -> +removing_handler(#{id:=Name}) -> stop(Name). %%%----------------------------------------------------------------- @@ -238,15 +235,15 @@ swap_buffer(Name,Buffer) -> %%%----------------------------------------------------------------- %%% Log a string or report --spec log(Log, Config) -> ok | dropped when - Log :: logger:log(), +-spec log(LogEvent, Config) -> ok | dropped when + LogEvent :: logger:log_event(), Config :: logger:config(). -log(Log,Config=#{id:=Name}) -> +log(LogEvent,Config=#{id:=Name}) -> %% if the handler has crashed, we must drop this request %% and hope the handler restarts so we can try again true = is_pid(whereis(Name)), - Bin = logger_h_common:log_to_binary(Log,Config), + Bin = logger_h_common:log_to_binary(LogEvent,Config), logger_h_common:call_cast_or_drop(Name, Bin). @@ -278,10 +275,11 @@ init([Name, Config = #{disk_log_opts := LogOpts}, last_log_ts => T0, burst_win_ts => T0, burst_msg_count => 0, + last_op => sync, prev_log_result => ok, prev_sync_result => ok, prev_disk_log_info => undefined}), - gen_server:cast(self(), {repeated_disk_log_sync,T0}), + gen_server:cast(self(), repeated_disk_log_sync), enter_loop(Config, State1); Error -> logger_h_common:error_notify({open_disk_log,Name,Error}), @@ -316,8 +314,7 @@ handle_call(disk_log_sync, _From, State = #{id := Name}) -> {reply, Result, State1}; handle_call({change_config,_OldConfig,NewConfig}, _From, - State = #{filesync_repeat_interval := FSyncInt0, - last_log_ts := LastLogTS}) -> + State = #{filesync_repeat_interval := FSyncInt0}) -> HConfig = maps:get(?MODULE, NewConfig, #{}), State1 = #{toggle_sync_qlen := TSQL, drop_new_reqs_qlen := DNRQL, @@ -338,9 +335,8 @@ handle_call({change_config,_OldConfig,NewConfig}, _From, _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, undefined)), - _ = gen_server:cast(self(), {repeated_disk_log_sync, - LastLogTS}) - end, + _ = gen_server:cast(self(), repeated_disk_log_sync) + end, {reply, ok, State1}; false -> {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State} @@ -370,24 +366,23 @@ handle_cast({log, Bin}, State) -> %% clause gets called repeatedly by the handler. In order to %% guarantee that a filesync *always* happens after the last log %% request, the repeat operation must be active! -handle_cast({repeated_disk_log_sync,LastLogTS0}, +handle_cast(repeated_disk_log_sync, State = #{id := Name, filesync_repeat_interval := FSyncInt, - last_log_ts := LastLogTS1}) -> + last_op := LastOp}) -> State1 = if is_integer(FSyncInt) -> %% only do filesync if something has been %% written since last time we checked - NewState = if LastLogTS1 == LastLogTS0 -> + NewState = if LastOp == sync -> State; true -> disk_log_sync(Name, State) end, {ok,TRef} = timer:apply_after(FSyncInt, gen_server,cast, - [self(), - {repeated_disk_log_sync,LastLogTS1}]), - NewState#{rep_sync_tref => TRef}; + [self(),repeated_disk_log_sync]), + NewState#{rep_sync_tref => TRef, last_op => sync}; true -> State end, @@ -649,10 +644,9 @@ close_disk_log(Name, _) -> ok. disk_log_write(Name, Bin, State) -> - Result = case ?disk_log_blog(Name, Bin) of ok -> - ok; + State#{prev_log_result => ok, last_op => write}; LogError -> _ = case maps:get(prev_log_result, State) of LogError -> @@ -664,29 +658,26 @@ disk_log_write(Name, Bin, State) -> LogOpts, LogError}) end, - LogError - end, - State#{prev_log_result => Result}. + State#{prev_log_result => LogError} + end. disk_log_sync(Name, State) -> - Result = - case ?disk_log_sync(Name) of - ok -> - ok; - SyncError -> - _ = case maps:get(prev_sync_result, State) of - SyncError -> - %% don't report same error twice - ok; - _ -> - LogOpts = maps:get(log_opts, State), - logger_h_common:error_notify({Name,sync, - LogOpts, - SyncError}) - end, - SyncError - end, - State#{prev_sync_result => Result}. + case ?disk_log_sync(Name) of + ok -> + State#{prev_sync_result => ok, last_op => sync}; + SyncError -> + _ = case maps:get(prev_sync_result, State) of + SyncError -> + %% don't report same error twice + ok; + _ -> + LogOpts = maps:get(log_opts, State), + logger_h_common:error_notify({Name,sync, + LogOpts, + SyncError}) + end, + State#{prev_sync_result => SyncError} + end. error_notify_new(Info,Info, _Term) -> ok; diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl index 85928f0fd6..7359b3b4b7 100644 --- a/lib/kernel/src/logger_filters.erl +++ b/lib/kernel/src/logger_filters.erl @@ -27,30 +27,31 @@ -include("logger_internal.hrl"). -define(IS_ACTION(A), (A==log orelse A==stop)). --spec domain(Log,Extra) -> logger:filter_return() when - Log :: logger:log(), +-spec domain(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), Extra :: {Action,Compare,MatchDomain}, Action :: log | stop, - Compare :: prefix_of | starts_with | equals | no_domain, + Compare :: super | sub | equal | not_equal | undefined, MatchDomain :: list(atom()). -domain(#{meta:=Meta}=Log,{Action,Compare,MatchDomain}) +domain(#{meta:=Meta}=LogEvent,{Action,Compare,MatchDomain}) when ?IS_ACTION(Action) andalso - (Compare==prefix_of orelse - Compare==starts_with orelse - Compare==equals orelse - Compare==no_domain) andalso + (Compare==super orelse + Compare==sub orelse + Compare==equal orelse + Compare==not_equal orelse + Compare==undefined) andalso is_list(MatchDomain) -> - filter_domain(Compare,Meta,MatchDomain,on_match(Action,Log)); -domain(Log,Extra) -> - erlang:error(badarg,[Log,Extra]). + filter_domain(Compare,Meta,MatchDomain,on_match(Action,LogEvent)); +domain(LogEvent,Extra) -> + erlang:error(badarg,[LogEvent,Extra]). --spec level(Log,Extra) -> logger:filter_return() when - Log :: logger:log(), +-spec level(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), Extra :: {Action,Operator,MatchLevel}, Action :: log | stop, Operator :: neq | eq | lt | gt | lteq | gteq, MatchLevel :: logger:level(). -level(#{level:=L1}=Log,{Action,Op,L2}) +level(#{level:=L1}=LogEvent,{Action,Op,L2}) when ?IS_ACTION(Action) andalso (Op==neq orelse Op==eq orelse @@ -59,37 +60,40 @@ level(#{level:=L1}=Log,{Action,Op,L2}) Op==lteq orelse Op==gteq) andalso ?IS_LEVEL(L2) -> - filter_level(Op,L1,L2,on_match(Action,Log)); -level(Log,Extra) -> - erlang:error(badarg,[Log,Extra]). + filter_level(Op,L1,L2,on_match(Action,LogEvent)); +level(LogEvent,Extra) -> + erlang:error(badarg,[LogEvent,Extra]). --spec progress(Log,Extra) -> logger:filter_return() when - Log :: logger:log(), +-spec progress(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), Extra :: log | stop. -progress(Log,Action) when ?IS_ACTION(Action) -> - filter_progress(Log,on_match(Action,Log)); -progress(Log,Action) -> - erlang:error(badarg,[Log,Action]). +progress(LogEvent,Action) when ?IS_ACTION(Action) -> + filter_progress(LogEvent,on_match(Action,LogEvent)); +progress(LogEvent,Action) -> + erlang:error(badarg,[LogEvent,Action]). --spec remote_gl(Log,Extra) -> logger:filter_return() when - Log :: logger:log(), +-spec remote_gl(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), Extra :: log | stop. -remote_gl(Log,Action) when ?IS_ACTION(Action) -> - filter_remote_gl(Log,on_match(Action,Log)); -remote_gl(Log,Action) -> - erlang:error(badarg,[Log,Action]). +remote_gl(LogEvent,Action) when ?IS_ACTION(Action) -> + filter_remote_gl(LogEvent,on_match(Action,LogEvent)); +remote_gl(LogEvent,Action) -> + erlang:error(badarg,[LogEvent,Action]). %%%----------------------------------------------------------------- %%% Internal -filter_domain(prefix_of,#{domain:=Domain},MatchDomain,OnMatch) -> +filter_domain(super,#{domain:=Domain},MatchDomain,OnMatch) -> is_prefix(Domain,MatchDomain,OnMatch); -filter_domain(starts_with,#{domain:=Domain},MatchDomain,OnMatch) -> +filter_domain(sub,#{domain:=Domain},MatchDomain,OnMatch) -> is_prefix(MatchDomain,Domain,OnMatch); -filter_domain(equals,#{domain:=Domain},Domain,OnMatch) -> +filter_domain(equal,#{domain:=Domain},Domain,OnMatch) -> OnMatch; -filter_domain(Action,Meta,_,OnMatch) -> +filter_domain(not_equal,#{domain:=Domain},MatchDomain,OnMatch) + when Domain=/=MatchDomain -> + OnMatch; +filter_domain(Compare,Meta,_,OnMatch) -> case maps:is_key(domain,Meta) of - false when Action==no_domain -> OnMatch; + false when Compare==undefined; Compare==not_equal -> OnMatch; _ -> ignore end. @@ -119,5 +123,5 @@ filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() -> filter_remote_gl(_,_) -> ignore. -on_match(log,Log) -> Log; +on_match(log,LogEvent) -> LogEvent; on_match(stop,_) -> stop. diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl index 8e954f8d98..4d727b3da0 100644 --- a/lib/kernel/src/logger_formatter.erl +++ b/lib/kernel/src/logger_formatter.erl @@ -20,25 +20,28 @@ -module(logger_formatter). -export([format/2]). +-export([check_config/1]). -include("logger_internal.hrl"). %%%----------------------------------------------------------------- %%% Types +-type config() :: #{chars_limit=>pos_integer()| unlimited, + depth=>pos_integer() | unlimited, + legacy_header=>boolean(), + max_size=>pos_integer() | unlimited, + report_cb=>fun((logger:report()) -> {io:format(),[term()]}), + single_line=>boolean(), + template=>template(), + time_designator=>byte(), + time_offset=>integer()|[byte()]}. -type template() :: [atom()|tuple()|string()]. %%%----------------------------------------------------------------- %%% API --spec format(Log,Config) -> unicode:chardata() when - Log :: logger:log(), - Config :: #{single_line=>boolean(), - legacy_header=>boolean(), - report_cb=>fun((logger:report()) -> {io:format(),[term()]}), - chars_limit=>pos_integer()| unlimited, - max_size=>pos_integer() | unlimited, - depth=>pos_integer() | unlimited, - template=>template(), - utc=>boolean()}. +-spec format(LogEvent,Config) -> unicode:chardata() when + LogEvent :: logger:log_event(), + Config :: config(). format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0) when is_map(Config0) -> Config = add_default_config(Config0), @@ -83,8 +86,6 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0) do_format(Level,Msg,Data,[level|Format],Config) -> [to_string(level,Level,Config)|do_format(Level,Msg,Data,Format,Config)]; -do_format(Level,Msg,Data,[msg|Format],Config) -> - [Msg|do_format(Level,Msg,Data,Format,Config)]; do_format(Level,Msg,Data,[Key|Format],Config) when is_atom(Key); is_tuple(Key) -> Value = value(Key,Data), [to_string(Key,Value,Config)|do_format(Level,Msg,Data,Format,Config)]; @@ -128,9 +129,7 @@ to_string(X) -> io_lib:format("~tp",[X]). format_msg({string,Chardata},Meta,Config) -> - try unicode:characters_to_list(Chardata) - catch _:_ -> format_msg({"INVALID STRING: ~tp",[Chardata]},Meta,Config) - end; + format_msg({"~ts",[Chardata]},Meta,Config); format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config) when is_function(Fun,1) -> format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config)); format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) -> @@ -195,29 +194,24 @@ truncate(String,Size) -> String end. -format_time(Timestamp,Config) when is_integer(Timestamp) -> - {Date,Time,Micro} = timestamp_to_datetimemicro(Timestamp,Config), - format_time(Date,Time,Micro); -format_time(Other,_Config) -> - %% E.g. a string - to_string(Other). - -format_time({Y,M,D},{H,Min,S},Micro) -> - io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w", - [Y,M,D,H,Min,S,Micro]). +%% SysTime is the system time in microseconds +format_time(SysTime,#{time_offset:=Offset,time_designator:=Des}) + when is_integer(SysTime) -> + calendar:system_time_to_rfc3339(SysTime,[{unit,microsecond}, + {offset,Offset}, + {time_designator,Des}]). -%% Assuming this is monotonic time in microseconds -timestamp_to_datetimemicro(Timestamp,Config) when is_integer(Timestamp) -> - SysTime = Timestamp + erlang:time_offset(microsecond), +%% SysTime is the system time in microseconds +timestamp_to_datetimemicro(SysTime,Config) when is_integer(SysTime) -> Micro = SysTime rem 1000000, Sec = SysTime div 1000000, UniversalTime = erlang:posixtime_to_universaltime(Sec), - {Date,Time} = - case Config of - #{utc:=true} -> UniversalTime; - _ -> erlang:universaltime_to_localtime(UniversalTime) + {{Date,Time},UtcStr} = + case offset_to_utc(maps:get(time_offset,Config)) of + true -> {UniversalTime,"UTC "}; + _ -> {erlang:universaltime_to_localtime(UniversalTime),""} end, - {Date,Time,Micro}. + {Date,Time,Micro,UtcStr}. format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_integer(A) -> atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A); @@ -230,9 +224,11 @@ maybe_add_legacy_header(Level, #{time:=Timestamp}=Meta, #{legacy_header:=true}=Config) -> #{title:=Title}=MyMeta = add_legacy_title(Level,maps:get(?MODULE,Meta,#{})), - {{Y,Mo,D},{H,Mi,S},Micro} = timestamp_to_datetimemicro(Timestamp,Config), - Header = io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===", - [Title,D,month(Mo),Y,H,Mi,S,Micro,utcstr(Config)]), + {{Y,Mo,D},{H,Mi,S},Micro,UtcStr} = + timestamp_to_datetimemicro(Timestamp,Config), + Header = + io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===", + [Title,D,month(Mo),Y,H,Mi,S,Micro,UtcStr]), Meta#{?MODULE=>MyMeta#{header=>Header}}; maybe_add_legacy_header(_,Meta,_) -> Meta. @@ -256,20 +252,20 @@ month(10) -> "Oct"; month(11) -> "Nov"; month(12) -> "Dec". -utcstr(#{utc:=true}) -> "UTC "; -utcstr(_) -> "". - -add_default_config(#{utc:=_}=Config0) -> +%% Ensure that all valid configuration parameters exist in the final +%% configuration map +add_default_config(Config0) -> Default = #{legacy_header=>false, single_line=>true, - chars_limit=>unlimited}, - MaxSize = get_max_size(maps:get(max_size,Config0,false)), - Depth = get_depth(maps:get(depth,Config0,false)), + chars_limit=>unlimited, + time_designator=>$T}, + MaxSize = get_max_size(maps:get(max_size,Config0,undefined)), + Depth = get_depth(maps:get(depth,Config0,undefined)), + Offset = get_offset(maps:get(time_offset,Config0,undefined)), add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize, - depth=>Depth})); -add_default_config(Config) -> - add_default_config(Config#{utc=>logger:get_utc_config()}). + depth=>Depth, + time_offset=>Offset})). add_default_template(#{template:=_}=Config) -> Config; @@ -283,12 +279,123 @@ default_template(#{single_line:=true}) -> default_template(_) -> ?DEFAULT_FORMAT_TEMPLATE. -get_max_size(false) -> - logger:get_max_size(); +get_max_size(undefined) -> + unlimited; get_max_size(S) -> max(10,S). -get_depth(false) -> - logger:get_format_depth(); +get_depth(undefined) -> + error_logger:get_format_depth(); get_depth(S) -> max(5,S). + +get_offset(undefined) -> + utc_to_offset(get_utc_config()); +get_offset(Offset) -> + Offset. + +utc_to_offset(true) -> + "Z"; +utc_to_offset(false) -> + "". + +get_utc_config() -> + %% SASL utc_log overrides stdlib config - in order to have uniform + %% timestamps in log messages + case application:get_env(sasl, utc_log) of + {ok, Val} when is_boolean(Val) -> Val; + _ -> + case application:get_env(stdlib, utc_log) of + {ok, Val} when is_boolean(Val) -> Val; + _ -> false + end + end. + +offset_to_utc(Z) when Z=:=0; Z=:="z"; Z=:="Z" -> + true; +offset_to_utc([$+|Tz]) -> + case io_lib:fread("~d:~d", Tz) of + {ok, [0, 0], []} -> + true; + _ -> + false + end; +offset_to_utc(_) -> + false. + +-spec check_config(Config) -> ok | {error,term()} when + Config :: config(). +check_config(Config) when is_map(Config) -> + do_check_config(maps:to_list(Config)); +check_config(Config) -> + {error,{invalid_formatter_config,?MODULE,Config}}. + +do_check_config([{Type,L}|Config]) when Type == chars_limit; + Type == depth; + Type == max_size -> + case check_limit(L) of + ok -> do_check_config(Config); + error -> {error,{invalid_formatter_config,?MODULE,{Type,L}}} + end; +do_check_config([{single_line,SL}|Config]) when is_boolean(SL) -> + do_check_config(Config); +do_check_config([{legacy_header,LH}|Config]) when is_boolean(LH) -> + do_check_config(Config); +do_check_config([{report_cb,RCB}|Config]) when is_function(RCB,1) -> + do_check_config(Config); +do_check_config([{template,T}|Config]) when is_list(T) -> + case lists:all(fun(X) when is_atom(X) -> true; + (X) when is_tuple(X), is_atom(element(1,X)) -> true; + (X) when is_list(X) -> io_lib:printable_unicode_list(X); + (_) -> false + end, + T) of + true -> + do_check_config(Config); + false -> + {error,{invalid_formatter_template,?MODULE,T}} + end; +do_check_config([{time_offset,Offset}|Config]) -> + case check_offset(Offset) of + ok -> + do_check_config(Config); + error -> + {error,{invalid_formatter_config,?MODULE,{time_offset,Offset}}} + end; +do_check_config([{time_designator,Char}|Config]) when Char>=0, Char=<255 -> + case io_lib:printable_latin1_list([Char]) of + true -> + do_check_config(Config); + false -> + {error,{invalid_formatter_config,?MODULE,{time_designator,Char}}} + end; +do_check_config([C|_]) -> + {error,{invalid_formatter_config,?MODULE,C}}; +do_check_config([]) -> + ok. + +check_limit(L) when is_integer(L), L>0 -> + ok; +check_limit(unlimited) -> + ok; +check_limit(_) -> + error. + +check_offset(I) when is_integer(I) -> + ok; +check_offset(Tz) when Tz=:=""; Tz=:="Z"; Tz=:="z" -> + ok; +check_offset([Sign|Tz]) when Sign=:=$+; Sign=:=$- -> + check_timezone(Tz); +check_offset(_) -> + error. + +check_timezone(Tz) -> + try io_lib:fread("~d:~d", Tz) of + {ok, [_, _], []} -> + ok; + _ -> + error + catch _:_ -> + error + end. diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl index 7caad366ae..336398cd4a 100644 --- a/lib/kernel/src/logger_h_common.erl +++ b/lib/kernel/src/logger_h_common.erl @@ -39,8 +39,8 @@ %%%----------------------------------------------------------------- %%% Covert log data on any form to binary --spec log_to_binary(Log,Config) -> LogString when - Log :: logger:log(), +-spec log_to_binary(LogEvent,Config) -> LogString when + LogEvent :: logger:log_event(), Config :: logger:config(), LogString :: binary(). log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) -> @@ -58,7 +58,7 @@ do_log_to_binary(Log,Config) -> catch _:_ -> ?LOG_INTERNAL(debug,[{formatter_error,Formatter}, {config,FormatterConfig}, - {log,Log}, + {log_event,Log}, {bad_return_value,String}]), <<"FORMATTER ERROR: bad_return_value">> end. @@ -69,10 +69,10 @@ try_format(Log,Formatter,FormatterConfig) -> C:R:S -> ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter}, {config,FormatterConfig}, - {log,Log}, + {log_event,Log}, {reason, {C,R,logger:filter_stacktrace(?MODULE,S)}}]), - case {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG} of + case {?DEFAULT_FORMATTER,#{}} of {Formatter,FormatterConfig} -> "DEFAULT FORMATTER CRASHED"; {DefaultFormatter,DefaultConfig} -> @@ -135,7 +135,8 @@ call_cast_or_drop(Name, Bin) -> _:{timeout,_} -> ?observe(Name,{dropped,1}) end; - drop -> ?observe(Name,{dropped,1}) + drop -> + ?observe(Name,{dropped,1}) catch %% if the ETS table doesn't exist (maybe because of a %% handler restart), we can only drop the request @@ -152,12 +153,15 @@ check_load(State = #{id:=Name, mode := Mode, flush_reqs_qlen := FlushQLen}) -> {_,Mem} = process_info(self(), memory), ?observe(Name,{max_mem,Mem}), - %% make sure the handler process doesn't get scheduled - %% out between the message_queue_len check below and the - %% action that follows (flush or write). {_,QLen} = process_info(self(), message_queue_len), ?observe(Name,{max_qlen,QLen}), - + %% When the handler process gets scheduled in, it's impossible + %% to predict the QLen. We could jump "up" arbitrarily from say + %% async to sync, async to drop, sync to flush, etc. However, when + %% the handler process manages the log requests (without flushing), + %% one after the other, we will move "down" from drop to sync and + %% from sync to async. This way we don't risk getting stuck in + %% drop or sync mode with an empty mailbox. {Mode1,_NewDrops,_NewFlushes} = if QLen >= FlushQLen -> @@ -292,7 +296,7 @@ overload_levels_ok(HandlerConfig) -> TSQL = maps:get(toggle_sync_qlen, HandlerConfig, ?TOGGLE_SYNC_QLEN), DNRQL = maps:get(drop_new_reqs_qlen, HandlerConfig, ?DROP_NEW_REQS_QLEN), FRQL = maps:get(flush_reqs_qlen, HandlerConfig, ?FLUSH_REQS_QLEN), - (TSQL < DNRQL) andalso (DNRQL < FRQL). + (DNRQL > 1) andalso (TSQL =< DNRQL) andalso (DNRQL =< FRQL). error_notify(Term) -> ?internal_log(error, Term). diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl index 89378dbb10..ed365ce6eb 100644 --- a/lib/kernel/src/logger_h_common.hrl +++ b/lib/kernel/src/logger_h_common.hrl @@ -124,7 +124,7 @@ %%% slow down execution and therefore should not be include in code %%% to be officially released. -%% -define(TEST_HOOKS, true). +-define(TEST_HOOKS, true). -ifdef(TEST_HOOKS). -define(TEST_HOOKS_TAB, logger_h_test_hooks). diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl index 8c0fc2725d..fedd6db370 100644 --- a/lib/kernel/src/logger_internal.hrl +++ b/lib/kernel/src/logger_internal.hrl @@ -22,17 +22,16 @@ -define(LOGGER_KEY,'$logger_config$'). -define(HANDLER_KEY,'$handler_config$'). -define(LOGGER_META_KEY,'$logger_metadata$'). --define(STANDARD_HANDLER, logger_std_h). +-define(STANDARD_HANDLER, default). -define(DEFAULT_HANDLER_FILTERS, ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp])). -define(DEFAULT_HANDLER_FILTERS(Domain), [{remote_gl,{fun logger_filters:remote_gl/2,stop}}, - {domain,{fun logger_filters:domain/2,{log,prefix_of,Domain}}}, - {no_domain,{fun logger_filters:domain/2,{log,no_domain,[]}}}]). + {domain,{fun logger_filters:domain/2,{log,super,Domain}}}, + {no_domain,{fun logger_filters:domain/2,{log,undefined,[]}}}]). -define(DEFAULT_FORMATTER,logger_formatter). -define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true, - single_line=>false, - template=>?DEFAULT_FORMAT_TEMPLATE_HEADER}). + single_line=>false}). -define(DEFAULT_FORMAT_TEMPLATE_HEADER, [{logger_formatter,header},"\n",msg,"\n"]). -define(DEFAULT_FORMAT_TEMPLATE_SINGLE, diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl index a7f302ac8f..024b9dfa39 100644 --- a/lib/kernel/src/logger_server.erl +++ b/lib/kernel/src/logger_server.erl @@ -25,9 +25,10 @@ -export([start_link/0, add_handler/3, remove_handler/1, add_filter/2, remove_filter/2, - set_module_level/2, reset_module_level/1, + set_module_level/2, unset_module_level/1, cache_module_level/1, - set_config/2, set_config/3]). + set_config/2, set_config/3, update_config/2, + update_formatter_config/2]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -36,8 +37,9 @@ -include("logger_internal.hrl"). -define(SERVER, logger). +-define(LOGGER_SERVER_TAG, '$logger_cb_process'). --record(state, {tid}). +-record(state, {tid, async_req, async_req_queue}). %%%=================================================================== %%% API @@ -47,23 +49,18 @@ start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). add_handler(Id,Module,Config0) -> - case sanity_check(logger,handlers,[Id]) of - ok -> - try check_mod(Module) of + try {check_id(Id),check_mod(Module)} of + {ok,ok} -> + case sanity_check(Id,Config0) of ok -> - case sanity_check(Id,Config0) of - ok -> - Default = default_config(Id), - Config = maps:merge(Default,Config0), - call({add_handler,Id,Module,Config}); - Error -> - Error - end - catch throw:Error -> - {error,Error} - end; - Error -> - Error + Default = default_config(Id), + Config = maps:merge(Default,Config0), + call({add_handler,Id,Module,Config}); + Error -> + Error + end + catch throw:Error -> + {error,Error} end. remove_handler(HandlerId) -> @@ -86,9 +83,9 @@ set_module_level(Module,Level) when is_atom(Module) -> set_module_level(Module,_) -> {error,{not_a_module,Module}}. -reset_module_level(Module) when is_atom(Module) -> - call({reset_module_level,Module}); -reset_module_level(Module) -> +unset_module_level(Module) when is_atom(Module) -> + call({unset_module_level,Module}); +unset_module_level(Module) -> {error,{not_a_module,Module}}. cache_module_level(Module) -> @@ -96,10 +93,7 @@ cache_module_level(Module) -> set_config(Owner,Key,Value) -> - case sanity_check(Owner,Key,Value) of - ok -> call({update_config,Owner,#{Key=>Value}}); - Error -> Error - end. + update_config(Owner,#{Key=>Value}). set_config(Owner,Config0) -> case sanity_check(Owner,Config0) of @@ -110,112 +104,151 @@ set_config(Owner,Config0) -> Error end. +update_config(Owner, Config) -> + case sanity_check(Owner,Config) of + ok -> + call({update_config,Owner,Config}); + Error -> + Error + end. + +update_formatter_config(HandlerId, FormatterConfig) + when is_map(FormatterConfig) -> + call({update_formatter_config,HandlerId,FormatterConfig}); +update_formatter_config(_HandlerId, FormatterConfig) -> + {error,{invalid_formatter_config,FormatterConfig}}. + + %%%=================================================================== %%% gen_server callbacks %%%=================================================================== init([]) -> process_flag(trap_exit, true), + put(?LOGGER_SERVER_TAG,true), Tid = logger_config:new(?LOGGER_TABLE), LoggerConfig = maps:merge(default_config(logger), - #{handlers=>[logger_simple]}), + #{handlers=>[simple]}), logger_config:create(Tid,logger,LoggerConfig), - SimpleConfig0 = maps:merge(default_config(logger_simple), + SimpleConfig0 = maps:merge(default_config(simple), #{filter_default=>stop, - filters=>?DEFAULT_HANDLER_FILTERS, - logger_simple=>#{buffer=>true}}), + filters=>?DEFAULT_HANDLER_FILTERS}), %% If this fails, then the node should crash - {ok,SimpleConfig} = - logger_simple:adding_handler(logger_simple,SimpleConfig0), - logger_config:create(Tid,logger_simple,logger_simple,SimpleConfig), - {ok, #state{tid=Tid}}. + {ok,SimpleConfig} = logger_simple_h:adding_handler(SimpleConfig0), + logger_config:create(Tid,simple,logger_simple_h,SimpleConfig), + {ok, #state{tid=Tid, async_req_queue = queue:new()}}. -handle_call({add_handler,Id,Module,HConfig}, _From, #state{tid=Tid}=State) -> - Reply = - case logger_config:exist(Tid,Id) of - true -> - {error,{already_exist,Id}}; - false -> - %% inform the handler - case call_h(Module,adding_handler,[Id,HConfig],{ok,HConfig}) of - {ok,HConfig1} -> - logger_config:create(Tid,Id,Module,HConfig1), - {ok,Config} = do_get_config(Tid,logger), - Handlers = maps:get(handlers,Config,[]), - do_set_config(Tid,logger, - Config#{handlers=>[Id|Handlers]}), - ok; - {error,HReason} -> - {error,{handler_not_added,HReason}} - end - end, - {reply,Reply,State}; -handle_call({remove_handler,HandlerId}, _From, #state{tid=Tid}=State) -> - Reply = - case logger_config:get(Tid,HandlerId) of - {ok,{Module,_}} -> - {ok,Config} = do_get_config(Tid,logger), - Handlers0 = maps:get(handlers,Config,[]), - Handlers = lists:delete(HandlerId,Handlers0), - %% inform the handler - _ = call_h(Module,removing_handler,[HandlerId,Config],ok), - do_set_config(Tid,logger,Config#{handlers=>Handlers}), - logger_config:delete(Tid,HandlerId), - ok; - _ -> - {error,{not_found,HandlerId}} - end, - {reply,Reply,State}; +handle_call({add_handler,Id,Module,HConfig}, From, #state{tid=Tid}=State) -> + case logger_config:exist(Tid,Id) of + true -> + {reply,{error,{already_exist,Id}},State}; + false -> + call_h_async( + fun() -> + %% inform the handler + call_h(Module,adding_handler,[HConfig],{ok,HConfig}) + end, + fun({ok,HConfig1}) -> + %% We know that the call_h would have loaded the module + %% if it existed, so it is safe here to call function_exported + %% to find out if this is a valid handler + case erlang:function_exported(Module, log, 2) of + true -> + logger_config:create(Tid,Id,Module,HConfig1), + {ok,Config} = do_get_config(Tid,logger), + Handlers = maps:get(handlers,Config,[]), + do_set_config(Tid,logger, + Config#{handlers=>[Id|Handlers]}); + false -> + {error,{invalid_handler, + {function_not_exported, + {Module,log,2}}}} + end; + ({error,HReason}) -> + {error,{handler_not_added,HReason}} + end,From,State) + end; +handle_call({remove_handler,HandlerId}, From, #state{tid=Tid}=State) -> + case logger_config:get(Tid,HandlerId) of + {ok,{Module,HConfig}} -> + {ok,Config} = do_get_config(Tid,logger), + Handlers0 = maps:get(handlers,Config,[]), + Handlers = lists:delete(HandlerId,Handlers0), + call_h_async( + fun() -> + %% inform the handler + call_h(Module,removing_handler,[HConfig],ok) + end, + fun(_Res) -> + do_set_config(Tid,logger,Config#{handlers=>Handlers}), + logger_config:delete(Tid,HandlerId), + ok + end,From,State); + _ -> + {reply,{error,{not_found,HandlerId}},State} + end; handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) -> Reply = do_add_filter(Tid,Id,Filter), {reply,Reply,State}; handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) -> Reply = do_remove_filter(Tid,Id,FilterId), {reply,Reply,State}; -handle_call({update_config,Id,NewConfig}, _From, #state{tid=Tid}=State) -> - Reply = - case logger_config:get(Tid,Id) of - {ok,{Module,OldConfig}} -> - Config = maps:merge(OldConfig,NewConfig), - case call_h(Module,changing_config,[Id,OldConfig,Config], - {ok,Config}) of - {ok,Config1} -> - do_set_config(Tid,Id,Config1); - Error -> - Error - end; - {ok,OldConfig} -> - Config = maps:merge(OldConfig,NewConfig), - do_set_config(Tid,Id,Config); - Error -> - Error - end, - {reply,Reply,State}; +handle_call({update_config,Id,NewConfig}, From, #state{tid=Tid}=State) -> + case logger_config:get(Tid,Id) of + {ok,{_Module,OldConfig}} -> + Config = maps:merge(OldConfig,NewConfig), + handle_call({set_config,Id,Config}, From, State); + {ok,OldConfig} -> + Config = maps:merge(OldConfig,NewConfig), + {reply,do_set_config(Tid,Id,Config),State}; + Error -> + {reply,Error,State} + end; handle_call({set_config,logger,Config}, _From, #state{tid=Tid}=State) -> - Reply = do_set_config(Tid,logger,Config), + {ok,#{handlers:=Handlers}} = logger_config:get(Tid,logger), + Reply = do_set_config(Tid,logger,Config#{handlers=>Handlers}), {reply,Reply,State}; -handle_call({set_config,HandlerId,Config}, _From, #state{tid=Tid}=State) -> +handle_call({set_config,HandlerId,Config}, From, #state{tid=Tid}=State) -> + case logger_config:get(Tid,HandlerId) of + {ok,{Module,OldConfig}} -> + call_h_async( + fun() -> + call_h(Module,changing_config,[OldConfig,Config], + {ok,Config}) + end, + fun({ok,Config1}) -> + do_set_config(Tid,HandlerId,Config1); + (Error) -> + Error + end,From,State); + _ -> + {reply,{error,{not_found,HandlerId}},State} + end; +handle_call({update_formatter_config,HandlerId,NewFConfig},_From, + #state{tid=Tid}=State) -> Reply = case logger_config:get(Tid,HandlerId) of - {ok,{Module,OldConfig}} -> - case call_h(Module,changing_config,[HandlerId,OldConfig,Config], - {ok,Config}) of - {ok,Config1} -> - do_set_config(Tid,HandlerId,Config1); - Error -> - Error + {ok,{_Mod,#{formatter:={FMod,OldFConfig}}=Config}} -> + try + FConfig = maps:merge(OldFConfig,NewFConfig), + check_formatter({FMod,FConfig}), + do_set_config(Tid,HandlerId, + Config#{formatter=>{FMod,FConfig}}) + catch throw:Reason -> {error,Reason} end; _ -> - {error,{not_found,HandlerId}} + {error,{not_found,HandlerId}} end, {reply,Reply,State}; handle_call({set_module_level,Module,Level}, _From, #state{tid=Tid}=State) -> Reply = logger_config:set_module_level(Tid,Module,Level), {reply,Reply,State}; -handle_call({reset_module_level,Module}, _From, #state{tid=Tid}=State) -> - Reply = logger_config:reset_module_level(Tid,Module), +handle_call({unset_module_level,Module}, _From, #state{tid=Tid}=State) -> + Reply = logger_config:unset_module_level(Tid,Module), {reply,Reply,State}. +handle_cast({async_req_reply,_Ref,_Reply} = Reply,State) -> + call_h_reply(Reply,State); handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) -> logger_config:cache_module_level(Tid,Module), {noreply, State}. @@ -235,11 +268,21 @@ handle_info({log,Level,Report,Meta}, State) -> handle_info({Ref,_Reply},State) when is_reference(Ref) -> %% Assuming this is a timed-out gen_server reply - ignoring {noreply, State}; -handle_info(Unexpected,State) -> +handle_info({'DOWN',_Ref,_Proc,_Pid,_Reason} = Down,State) -> + call_h_reply(Down,State); +handle_info(Unexpected,State) when element(1,Unexpected) == 'EXIT' -> + %% The simple logger will send an 'EXIT' message when it is replaced + %% We may as well ignore all 'EXIT' messages that we get ?LOG_INTERNAL(debug, [{logger,got_unexpected_message}, {process,?SERVER}, {message,Unexpected}]), + {noreply,State}; +handle_info(Unexpected,State) -> + ?LOG_INTERNAL(info, + [{logger,got_unexpected_message}, + {process,?SERVER}, + {message,Unexpected}]), {noreply,State}. terminate(_Reason, _State) -> @@ -249,8 +292,11 @@ terminate(_Reason, _State) -> %%% Internal functions %%%=================================================================== call(Request) -> - case whereis(?SERVER) of - Pid when Pid==self() -> + Action = element(1,Request), + case get(?LOGGER_SERVER_TAG) of + true when + Action == add_handler; Action == remove_handler; + Action == update_config; Action == set_config -> {error,{attempting_syncronous_call_to_self,Request}}; _ -> gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT) @@ -301,13 +347,13 @@ do_set_config(Tid,Id,Config) -> default_config(logger) -> #{level=>info, filters=>[], - filter_default=>log, - handlers=>[]}; -default_config(_) -> - #{level=>info, + filter_default=>log}; +default_config(Id) -> + #{id=>Id, + level=>info, filters=>[], filter_default=>log, - formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}. + formatter=>{?DEFAULT_FORMATTER,#{}}}. sanity_check(Owner,Key,Value) -> sanity_check_1(Owner,[{Key,Value}]). @@ -333,9 +379,6 @@ get_type(Id) -> check_config(Owner,[{level,Level}|Config]) -> check_level(Level), check_config(Owner,Config); -check_config(logger,[{handlers,Handlers}|Config]) -> - check_handlers(Handlers), - check_config(logger,Config); check_config(Owner,[{filters,Filters}|Config]) -> check_filters(Filters), check_config(Owner,Config); @@ -373,14 +416,6 @@ check_level(Level) -> throw({invalid_level,Level}) end. -check_handlers([Id|Handlers]) -> - check_id(Id), - check_handlers(Handlers); -check_handlers([]) -> - ok; -check_handlers(Handlers) -> - throw({invalid_handlers,Handlers}). - check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) -> check_filters(Filters); check_filters([Filter|_]) -> @@ -395,40 +430,24 @@ check_filter_default(FD) when FD==stop; FD==log -> check_filter_default(FD) -> throw({invalid_filter_default,FD}). -check_formatter({logger_formatter,Config}) when is_map(Config) -> - check_logger_formatter_config(maps:to_list(Config)); -check_formatter({logger_formatter,Config}) -> - throw({invalid_formatter_config,Config}); -check_formatter({Mod,_}) -> - %% no knowledge of other formatters - check_mod(Mod); +check_formatter({Mod,Config}) -> + check_mod(Mod), + try Mod:check_config(Config) of + ok -> ok; + {error,Error} -> throw(Error) + catch + C:R:S -> + case {C,R,S} of + {error,undef,[{Mod,check_config,[Config],_}|_]} -> + ok; + _ -> + throw({callback_crashed, + {C,R,logger:filter_stacktrace(?MODULE,S)}}) + end + end; check_formatter(Formatter) -> throw({invalid_formatter,Formatter}). - -check_logger_formatter_config([{template,T}|Config]) when is_list(T) -> - case lists:all(fun(X) when is_atom(X) -> true; - (X) when is_tuple(X), is_atom(element(1,X)) -> true; - (X) when is_list(X) -> io_lib:printable_unicode_list(X); - (_) -> false - end, - T) of - true -> - check_logger_formatter_config(Config); - false -> - throw({invalid_formatter_template,T}) - end; -check_logger_formatter_config([{legacy_header,LH}|Config]) when is_boolean(LH) -> - check_logger_formatter_config(Config); -check_logger_formatter_config([{single_line,SL}|Config]) when is_boolean(SL) -> - check_logger_formatter_config(Config); -check_logger_formatter_config([{utc,Utc}|Config]) when is_boolean(Utc) -> - check_logger_formatter_config(Config); -check_logger_formatter_config([C|_]) -> - throw({invalid_formatter_config,C}); -check_logger_formatter_config([]) -> - ok. - call_h(Module, Function, Args, DefRet) -> %% Not calling code:ensure_loaded + erlang:function_exported here, %% since in some rare terminal cases, the code_server might not @@ -440,7 +459,59 @@ call_h(Module, Function, Args, DefRet) -> {error,undef,[{Module,Function,Args,_}|_]} -> DefRet; _ -> - {error,{callback_crashed, - {C,R,logger:filter_stacktrace(?MODULE,S)}}} + ST = logger:filter_stacktrace(?MODULE,S), + ?LOG_INTERNAL(error, + [{logger,callback_crashed}, + {process,?SERVER}, + {reason,{C,R,ST}}]), + {error,{callback_crashed,{C,R,ST}}} end end. + +%% There are all sort of API functions that can cause deadlocks if called +%% from the handler callbacks. So we spawn a process that does the request +%% for the logger_server. There are still APIs that will cause problems, +%% namely logger:add_handler +call_h_async(AsyncFun,PostFun,From,#state{ async_req = undefined } = State) -> + Parent = self(), + {Pid, Ref} = spawn_monitor( + fun() -> + put(?LOGGER_SERVER_TAG,true), + receive Ref -> Ref end, + gen_server:cast(Parent, {async_req_reply, Ref, AsyncFun()}) + end), + Pid ! Ref, + {noreply,State#state{ async_req = {Ref,PostFun,From} }}; +call_h_async(AsyncFun,PostFun,From,#state{ async_req_queue = Q } = State) -> + {noreply,State#state{ async_req_queue = queue:in({AsyncFun,PostFun,From},Q) }}. + +call_h_reply({async_req_reply,Ref,Reply}, + #state{ async_req = {Ref,PostFun,From}, async_req_queue = Q} = State) -> + erlang:demonitor(Ref,[flush]), + _ = gen_server:reply(From, PostFun(Reply)), + {Value,NewQ} = queue:out(Q), + NewState = State#state{ async_req = undefined, + async_req_queue = NewQ }, + case Value of + {value,{AsyncFun,NPostFun,NFrom}} -> + call_h_async(AsyncFun,NPostFun,NFrom,NewState); + empty -> + {noreply,NewState} + end; +call_h_reply({'DOWN',Ref,_Proc,Pid,Reason}, #state{ async_req = {Ref,_PostFun,_From}} = State) -> + %% This clause should only be triggered if someone explicitly sends an exit signal + %% to the spawned process. It is only here to make sure that the logger_server does + %% not deadlock if that happens. + ?LOG_INTERNAL(error, + [{logger,process_exited}, + {process,Pid}, + {reason,Reason}]), + call_h_reply( + {async_req_reply,Ref,{error,{logger_process_exited,Pid,Reason}}}, + State); +call_h_reply(Unexpected,State) -> + ?LOG_INTERNAL(info, + [{logger,got_unexpected_message}, + {process,?SERVER}, + {message,Unexpected}]), + {noreply,State}. diff --git a/lib/kernel/src/logger_simple.erl b/lib/kernel/src/logger_simple_h.erl index a1b427b96c..19fb3b54ba 100644 --- a/lib/kernel/src/logger_simple.erl +++ b/lib/kernel/src/logger_simple_h.erl @@ -17,40 +17,21 @@ %% %% %CopyrightEnd% %% --module(logger_simple). +-module(logger_simple_h). --export([adding_handler/2, removing_handler/2, log/2]). --export([get_buffer/0]). +-export([adding_handler/1, removing_handler/1, log/2]). %% This module implements a simple handler for logger. It is the %% default used during system start. %%%----------------------------------------------------------------- -%%% API -get_buffer() -> - case whereis(?MODULE) of - undefined -> - {error,noproc}; - Pid -> - Ref = erlang:monitor(process,Pid), - Pid ! {get_buffer,self()}, - receive - {buffer,Buffer} -> - erlang:demonitor(Ref,[flush]), - {ok,Buffer}; - {'DOWN',Ref,process,Pid,Reason} -> - {error,Reason} - end - end. - -%%%----------------------------------------------------------------- %%% Logger callback -adding_handler(?MODULE,Config) -> +adding_handler(#{id:=simple}=Config) -> Me = self(), case whereis(?MODULE) of undefined -> - {Pid,Ref} = spawn_opt(fun() -> init(Me,Config) end, + {Pid,Ref} = spawn_opt(fun() -> init(Me) end, [link,monitor,{message_queue_data,off_heap}]), receive {'DOWN',Ref,process,Pid,Reason} -> @@ -63,7 +44,7 @@ adding_handler(?MODULE,Config) -> {error,{handler_process_name_already_exists,?MODULE}} end. -removing_handler(?MODULE,_Config) -> +removing_handler(#{id:=simple}) -> case whereis(?MODULE) of undefined -> ok; @@ -89,7 +70,7 @@ log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) -> do_log( #{level=>error, msg=>{report,{error,simple_handler_process_dead}}, - meta=>#{time=>erlang:monotonic_time(microsecond)}}), + meta=>#{time=>erlang:system_time(microsecond)}}), do_log(Log); _ -> ?MODULE ! {log,Log} @@ -102,54 +83,50 @@ log(_,_) -> %%%----------------------------------------------------------------- %%% Process -init(Starter,Config) -> +init(Starter) -> register(?MODULE,self()), Starter ! {self(),started}, - BufferSize = - case Config of - #{?MODULE:=#{buffer:=true}} -> - 10; - _ -> - infinity - end, - loop(#{buffer_size=>BufferSize,dropped=>0,buffer=>[]},infinity). + loop(#{buffer_size=>10,dropped=>0,buffer=>[]}). -loop(Buffer,Timeout) -> +loop(Buffer) -> receive stop -> - ok; - {get_buffer,From} -> - loop(Buffer#{send_to=>From},0); + %% We replay the logger messages of there is + %% a default handler when the simple handler + %% is removed. + case logger:get_handler_config(default) of + {ok, _} -> + replay_buffer(Buffer); + _ -> + ok + end; {log,#{msg:=_,meta:=#{time:=_}}=Log} -> do_log(Log), - loop(update_buffer(Buffer,Log),Timeout); + loop(update_buffer(Buffer,Log)); _ -> %% Unexpected message - flush it! - loop(Buffer,Timeout) - after Timeout -> - #{dropped:=D,buffer:=B,send_to:=Pid} = Buffer, - LogList = lists:reverse(B) ++ drop_msg(D), - Pid ! {buffer,LogList}, - loop(Buffer#{buffer_size=>infinity, - dropped=>0, - buffer=>[], - send_to=>false}, - infinity) + loop(Buffer) end. -update_buffer(#{buffer_size:=infinity}=Buffer,_Log) -> - Buffer; update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) -> Buffer#{dropped=>D+1}; update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) -> Buffer#{buffer_size=>S-1,buffer=>[Log|B]}. +replay_buffer(#{ dropped := D, buffer := Buffer }) -> + lists:foreach( + fun F(#{msg := {Tag, Msg}} = L) when Tag =:= string; Tag =:= report -> + F(L#{ msg := Msg }); + F(#{ level := Level, msg := Msg, meta := MD}) -> + logger:log(Level, Msg, MD) + end, lists:reverse(Buffer, drop_msg(D))). + drop_msg(0) -> []; drop_msg(N) -> [#{level=>info, msg=>{"Simple handler buffer full, dropped ~w messages",[N]}, - meta=>#{time=>erlang:monotonic_time(microsecond)}}]. + meta=>#{time=>erlang:system_time(microsecond)}}]. %%%----------------------------------------------------------------- %%% Internal @@ -164,8 +141,7 @@ do_log(#{msg:=Msg,meta:=#{time:=T}}) -> display_date(T), display(Msg). -display_date(Timestamp0) when is_integer(Timestamp0) -> - Timestamp = Timestamp0 + erlang:time_offset(microsecond), +display_date(Timestamp) when is_integer(Timestamp) -> Micro = Timestamp rem 1000000, Sec = Timestamp div 1000000, {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime( diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl index 31edcfea8b..63c3ab2dac 100644 --- a/lib/kernel/src/logger_std_h.erl +++ b/lib/kernel/src/logger_std_h.erl @@ -35,8 +35,8 @@ terminate/2, code_change/3]). %% logger callbacks --export([log/2, adding_handler/2, removing_handler/2, - changing_config/3, swap_buffer/2]). +-export([log/2, adding_handler/1, removing_handler/1, + changing_config/2, swap_buffer/2]). %%%=================================================================== %%% API @@ -109,8 +109,8 @@ reset(Name) -> %%%----------------------------------------------------------------- %%% Handler being added -adding_handler(Name, Config) -> - case check_config(adding, Name, Config) of +adding_handler(#{id:=Name}=Config) -> + case check_config(adding, Config) of {ok, Config1} -> %% create initial handler state by merging defaults with config HConfig = maps:get(?MODULE, Config1, #{}), @@ -137,9 +137,8 @@ adding_handler(Name, Config) -> %%%----------------------------------------------------------------- %%% Updating handler config -changing_config(Name, - OldConfig=#{id:=Id, ?MODULE:=#{type:=Type}}, - NewConfig=#{id:=Id}) -> +changing_config(OldConfig=#{id:=Name, ?MODULE:=#{type:=Type}}, + NewConfig=#{id:=Name}) -> MyConfig = maps:get(?MODULE, NewConfig, #{}), case maps:get(type, MyConfig, Type) of Type -> @@ -149,11 +148,11 @@ changing_config(Name, _ -> {error,{illegal_config_change,OldConfig,NewConfig}} end; -changing_config(_Name, OldConfig, NewConfig) -> +changing_config(OldConfig, NewConfig) -> {error,{illegal_config_change,OldConfig,NewConfig}}. changing_config1(Name, OldConfig, NewConfig) -> - case check_config(changing, Name, NewConfig) of + case check_config(changing, NewConfig) of Result = {ok,NewConfig1} -> try gen_server:call(Name, {change_config,OldConfig,NewConfig1}, ?DEFAULT_CALL_TIMEOUT) of @@ -166,9 +165,7 @@ changing_config1(Name, OldConfig, NewConfig) -> Error end. -check_config(adding, Name, Config0) -> - %% Merge in defaults on top level - Config = maps:merge(#{id => Name}, Config0), +check_config(adding, Config) -> %% Merge in defaults on handler level MyConfig0 = maps:get(?MODULE, Config, #{}), MyConfig = maps:merge(#{type => standard_io}, @@ -179,7 +176,7 @@ check_config(adding, Name, Config0) -> Error -> Error end; -check_config(changing, _Name, Config) -> +check_config(changing, Config) -> MyConfig = maps:get(?MODULE, Config, #{}), case check_my_config(maps:to_list(MyConfig)) of ok -> {ok,Config}; @@ -207,7 +204,7 @@ check_my_config([]) -> %%%----------------------------------------------------------------- %%% Handler being removed -removing_handler(Name,_Config) -> +removing_handler(#{id:=Name}) -> stop(Name). %%%----------------------------------------------------------------- @@ -222,15 +219,15 @@ swap_buffer(Name,Buffer) -> %%%----------------------------------------------------------------- %%% Log a string or report --spec log(Log, Config) -> ok | dropped when - Log :: logger:log(), +-spec log(LogEvent, Config) -> ok | dropped when + LogEvent :: logger:log_event(), Config :: logger:config(). -log(Log,Config=#{id:=Name}) -> +log(LogEvent,Config=#{id:=Name}) -> %% if the handler has crashed, we must drop this request %% and hope the handler restarts so we can try again true = is_pid(whereis(Name)), - Bin = logger_h_common:log_to_binary(Log,Config), + Bin = logger_h_common:log_to_binary(LogEvent,Config), logger_h_common:call_cast_or_drop(Name, Bin). %%%=================================================================== @@ -257,10 +254,11 @@ init([Name, Config, file_ctrl_sync => FileCtrlSyncInt, last_qlen => 0, last_log_ts => T0, + last_op => sync, burst_win_ts => T0, burst_msg_count => 0}), proc_lib:init_ack({ok,self()}), - gen_server:cast(self(), {repeated_filesync,T0}), + gen_server:cast(self(), repeated_filesync), enter_loop(Config, State1); Error -> logger_h_common:error_notify({init_handler,Name,Error}), @@ -310,12 +308,11 @@ handle_call(filesync, _From, State = #{type := Type, if is_atom(Type) -> {reply, ok, State}; true -> - {reply, file_ctrl_filesync_sync(FileCtrlPid), State} + {reply, file_ctrl_filesync_sync(FileCtrlPid), State#{last_op=>sync}} end; handle_call({change_config,_OldConfig,NewConfig}, _From, - State = #{filesync_repeat_interval := FSyncInt0, - last_log_ts := LastLogTS}) -> + State = #{filesync_repeat_interval := FSyncInt0}) -> HConfig = maps:get(?MODULE, NewConfig, #{}), State1 = maps:merge(State, HConfig), case logger_h_common:overload_levels_ok(State1) of @@ -334,8 +331,7 @@ handle_call({change_config,_OldConfig,NewConfig}, _From, _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, undefined)), - gen_server:cast(self(), {repeated_filesync, - LastLogTS}) + gen_server:cast(self(), repeated_filesync) end, {reply, ok, State1}; false -> @@ -365,24 +361,24 @@ handle_cast({log, Bin}, State) -> %% clause gets called repeatedly by the handler. In order to %% guarantee that a filesync *always* happens after the last log %% request, the repeat operation must be active! -handle_cast({repeated_filesync,LastLogTS0}, +handle_cast(repeated_filesync, State = #{type := Type, file_ctrl_pid := FileCtrlPid, filesync_repeat_interval := FSyncInt, - last_log_ts := LastLogTS1}) -> + last_op := LastOp}) -> State1 = if not is_atom(Type), is_integer(FSyncInt) -> %% only do filesync if something has been %% written since last time we checked - if LastLogTS1 == LastLogTS0 -> + if LastOp == sync -> ok; true -> file_ctrl_filesync_async(FileCtrlPid) end, {ok,TRef} = timer:apply_after(FSyncInt, gen_server,cast, - [self(),{repeated_filesync,LastLogTS1}]), - State#{rep_sync_tref => TRef}; + [self(),repeated_filesync]), + State#{rep_sync_tref => TRef, last_op => sync}; true -> State end, @@ -600,6 +596,7 @@ write(Name, Mode, T1, Bin, _CallOrCast, State1#{mode => Mode1, last_qlen := LastQLen1, last_log_ts => T1, + last_op => write, burst_win_ts => BurstWinT, burst_msg_count => BurstMsgCount1, file_ctrl_sync => diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl index 669adefdf8..c4e1a0ce1e 100644 --- a/lib/kernel/src/net_kernel.erl +++ b/lib/kernel/src/net_kernel.erl @@ -53,7 +53,7 @@ %% Documented API functions. --export([allow/1, +-export([allow/1, allowed/0, connect_node/1, monitor_nodes/1, monitor_nodes/2, @@ -171,6 +171,8 @@ kernel_apply(M,F,A) -> request({apply,M,F,A}). Nodes :: [node()]. allow(Nodes) -> request({allow, Nodes}). +allowed() -> request(allowed). + longnames() -> request(longnames). -spec stop() -> ok | {error, Reason} when @@ -528,6 +530,9 @@ handle_call({allow, Nodes}, From, State) -> async_reply({reply,error,State}, From) end; +handle_call(allowed, From, #state{allowed = Allowed} = State) -> + async_reply({reply,{ok,Allowed},State}, From); + %% %% authentication, used by auth. Simply works as this: %% if the message comes through, the other node IS authorized. diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile index 8599a3d814..2ad1e3107c 100644 --- a/lib/kernel/test/Makefile +++ b/lib/kernel/test/Makefile @@ -77,8 +77,9 @@ MODULES= \ logger_filters_SUITE \ logger_formatter_SUITE \ logger_legacy_SUITE \ - logger_simple_SUITE \ + logger_simple_h_SUITE \ logger_std_h_SUITE \ + logger_test_lib \ os_SUITE \ pg2_SUITE \ seq_trace_SUITE \ diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl index c00fb44c46..988f26280f 100644 --- a/lib/kernel/test/application_SUITE.erl +++ b/lib/kernel/test/application_SUITE.erl @@ -1603,8 +1603,7 @@ get_key(Conf) when is_list(Conf) -> {ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} = rpc:call(Cp1, application, get_key, [appinc, start_phases]), {ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]), {ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} = rpc:call(Cp1, application, get_key, [appinc, mod]), @@ -1625,8 +1624,7 @@ get_key(Conf) when is_list(Conf) -> {mod, {application_starter, [ch_sup, {appinc, 41, 43}] }}, {start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} = rpc:call(Cp1, application, get_all_key, [appinc]), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, "Test of new app file, including appnew"} = gen_server:call({global, {ch,41}}, {get_pid_key, description}), @@ -1643,8 +1641,7 @@ get_key(Conf) when is_list(Conf) -> {ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} = gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}), {ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, []} = gen_server:call({global, {ch,41}}, {get_pid_key, modules}), {ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} = @@ -1671,8 +1668,7 @@ get_key(Conf) when is_list(Conf) -> {mod, {application_starter, [ch_sup, {appinc, 41, 43}] }}, {start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} = gen_server:call({global, {ch,41}}, get_pid_all_key), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), stop_node_nice(Cp1), ok. diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl index 0470f09f29..9c6712ad74 100644 --- a/lib/kernel/test/erl_distribution_SUITE.erl +++ b/lib/kernel/test/erl_distribution_SUITE.erl @@ -244,7 +244,7 @@ illegal(Name) -> test_node(Name) -> test_node(Name, false). test_node(Name, Illigal) -> - ProgName = atom_to_list(lib:progname()), + ProgName = ct:get_progname(), Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++ " -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++ case Illigal of diff --git a/lib/kernel/test/error_logger_warn_SUITE.erl b/lib/kernel/test/error_logger_warn_SUITE.erl index a8087e11f9..ef55a2d339 100644 --- a/lib/kernel/test/error_logger_warn_SUITE.erl +++ b/lib/kernel/test/error_logger_warn_SUITE.erl @@ -480,9 +480,12 @@ rb_utc() -> UtcLog=case application:get_env(sasl,utc_log) of {ok,true} -> true; - _AllOthers -> + {ok,false} -> application:set_env(sasl,utc_log,true), - false + false; + undefined -> + application:set_env(sasl,utc_log,true), + undefined end, application:start(sasl), rb:start([{report_dir, rd()}]), @@ -494,7 +497,12 @@ rb_utc() -> Sum=one_rb_findstr([],"UTC"), rb:stop(), application:stop(sasl), - application:set_env(sasl,utc_log,UtcLog), + case UtcLog of + undefined -> + application:unset_env(sasl,utc_log); + _ -> + application:set_env(sasl,utc_log,UtcLog) + end, stop_node(Node), ok. diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl index 22db24de5f..e95635b800 100644 --- a/lib/kernel/test/heart_SUITE.erl +++ b/lib/kernel/test/heart_SUITE.erl @@ -168,7 +168,7 @@ reboot(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), rpc:call(Node, init, reboot, []), receive @@ -203,7 +203,7 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) -> [{"ERL_CRASH_DUMP_SECONDS", "0"}]), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), Mod = exhaust_atoms, @@ -254,7 +254,7 @@ node_start_soon_after_crash_test(Config) when is_list(Config) -> [{"ERL_CRASH_DUMP_SECONDS", "10"}]), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), Mod = exhaust_atoms, @@ -309,7 +309,7 @@ set_cmd(Config) when is_list(Config) -> clear_cmd(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), rpc:call(Node, init, reboot, []), receive diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl index 9a4578917d..a21020ff97 100644 --- a/lib/kernel/test/kernel_config_SUITE.erl +++ b/lib/kernel/test/kernel_config_SUITE.erl @@ -76,7 +76,7 @@ sync(Conf) when is_list(Conf) -> %% Reset wall_clock {T1,_} = erlang:statistics(wall_clock), io:format("~p~n", [{t1, T1}]), - Command = lists:concat([lib:progname(), + Command = lists:append([ct:get_progname(), " -detached -sname cp1 ", "-config ", Config, " -env ERL_CRASH_DUMP erl_crash_dump.cp1"]), diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover index b30bcfe920..960bc0abff 100644 --- a/lib/kernel/test/logger.cover +++ b/lib/kernel/test/logger.cover @@ -8,7 +8,7 @@ logger_filters, logger_formatter, logger_server, - logger_simple, + logger_simple_h, logger_std_h, logger_sup]}. diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec index cd76a754a4..1ab90b3e93 100644 --- a/lib/kernel/test/logger.spec +++ b/lib/kernel/test/logger.spec @@ -7,5 +7,5 @@ logger_filters_SUITE, logger_formatter_SUITE, logger_legacy_SUITE, - logger_simple_SUITE, + logger_simple_h_SUITE, logger_std_h_SUITE]}. diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl index f311a9c7ed..f7ec59a7b7 100644 --- a/lib/kernel/test/logger_SUITE.erl +++ b/lib/kernel/test/logger_SUITE.erl @@ -40,18 +40,18 @@ suite() -> [{timetrap,{seconds,30}}]. init_per_suite(Config) -> - case logger:get_handler_config(logger_std_h) of + case logger:get_handler_config(?STANDARD_HANDLER) of {ok,StdH} -> - ok = logger:remove_handler(logger_std_h), - [{logger_std_h,StdH}|Config]; + ok = logger:remove_handler(?STANDARD_HANDLER), + [{default_handler,StdH}|Config]; _ -> Config end. end_per_suite(Config) -> - case ?config(logger_std_h,Config) of + case ?config(default_handler,Config) of {HMod,HConfig} -> - ok = logger:add_handler(logger_std_h,HMod,HConfig); + ok = logger:add_handler(?STANDARD_HANDLER,HMod,HConfig); _ -> ok end. @@ -105,12 +105,12 @@ start_stop(_Config) -> add_remove_handler(_Config) -> register(callback_receiver,self()), - {ok,#{handlers:=Hs0}} = logger:get_logger_config(), + #{handlers:=Hs0} = logger:i(), {error,{not_found,h1}} = logger:get_handler_config(h1), ok = logger:add_handler(h1,?MODULE,#{}), [add] = test_server:messages_get(), - {ok,#{handlers:=Hs}} = logger:get_logger_config(), - [h1|Hs0] = Hs, + #{handlers:=Hs} = logger:i(), + {value,_,Hs0} = lists:keytake(h1,1,Hs), {ok,{?MODULE,#{level:=info,filters:=[],filter_default:=log}}} = % defaults logger:get_handler_config(h1), ok = logger:set_handler_config(h1,filter_default,stop), @@ -124,7 +124,7 @@ add_remove_handler(_Config) -> ok = check_logged(info,"hello",[],?MY_LOC(1)), ok = logger:remove_handler(h1), [remove] = test_server:messages_get(), - {ok,#{handlers:=Hs0}} = logger:get_logger_config(), + #{handlers:=Hs0} = logger:i(), {error,{not_found,h1}} = logger:get_handler_config(h1), {error,{not_found,h1}} = logger:remove_handler(h1), logger:info("hello",[]), @@ -218,33 +218,52 @@ change_config(_Config) -> {ok,{?MODULE,#{level:=info,filter_default:=stop}=C2}} = logger:get_handler_config(h1), false = maps:is_key(custom,C2), - {error,fail} = logger:set_handler_config(h1,#{fail=>true}), + {error,fail} = logger:set_handler_config(h1,#{conf_call=>fun() -> {error,fail} end}), {error,{attempting_syncronous_call_to_self,_}} = logger:set_handler_config( - h1,#{call=>fun() -> logger:set_module_level(?MODULE,debug) end}), + h1,#{conf_call=>fun() -> logger:set_handler_config(?MODULE,#{}) end}), + ok = + logger:set_handler_config( + h1,#{conf_call=>fun() -> logger:set_module_level(?MODULE,debug) end}), {ok,{?MODULE,C2}} = logger:get_handler_config(h1), - %% Change one key only - {error,fail} = logger:set_handler_config(h1,fail,true), + %% Change handler config: Single key + {error,fail} = logger:set_handler_config(h1,conf_call,fun() -> {error,fail} end), ok = logger:set_handler_config(h1,custom,custom), [changing_config] = test_server:messages_get(), {ok,{?MODULE,#{custom:=custom}=C3}} = logger:get_handler_config(h1), C2 = maps:remove(custom,C3), + %% Change handler config: Map + ok = logger:update_handler_config(h1,#{custom=>new_custom}), + [changing_config] = test_server:messages_get(), + {ok,{_,C4}} = logger:get_handler_config(h1), + C4 = C3#{custom:=new_custom}, + + %% Change logger config: Single key + {ok,LConfig0} = logger:get_logger_config(), + ok = logger:set_logger_config(level,warning), + {ok,LConfig1} = logger:get_logger_config(), + LConfig1 = LConfig0#{level:=warning}, + + %% Change logger config: Map + ok = logger:update_logger_config(#{level=>error}), + {ok,LConfig2} = logger:get_logger_config(), + LConfig2 = LConfig1#{level:=error}, + %% Overwrite logger config - check that defaults are added - {ok,LConfig} = logger:get_logger_config(), ok = logger:set_logger_config(#{filter_default=>stop}), - {ok,#{level:=info,filters:=[],handlers:=[],filter_default:=stop}=LC1} = - logger:get_logger_config(), - 4 = maps:size(LC1), - - %% Change one key only - ok = logger:set_logger_config(handlers,[h1]), - {ok,#{level:=info,filters:=[],handlers:=[h1],filter_default:=stop}} = + {ok,#{level:=info,filters:=[],filter_default:=stop}=LC1} = logger:get_logger_config(), + 3 = maps:size(LC1), + %% Check that internal 'handlers' field has not been changed + #{handlers:=HCs} = logger:i(), + HIds1 = [Id || {Id,_,_} <- HCs], + {ok,#{handlers:=HIds2}} = logger_config:get(?LOGGER_TABLE,logger), + HIds1 = lists:sort(HIds2), %% Cleanup - ok = logger:set_logger_config(LConfig), + ok = logger:set_logger_config(LConfig0), [] = test_server:messages_get(), ok. @@ -299,7 +318,7 @@ macros(_Config) -> macros(cleanup,_Config) -> logger:remove_handler(h1), - logger:reset_module_level(?MODULE), + logger:unset_module_level(?MODULE), ok. set_level(_Config) -> @@ -331,29 +350,29 @@ set_level_module(_Config) -> logger:info(M2=?map_rep,?MY_LOC(0)), ok = check_logged(info,M2,?MY_LOC(1)), - {error,{not_a_module,{bad}}} = logger:reset_module_level({bad}), - ok = logger:reset_module_level(?MODULE), + {error,{not_a_module,{bad}}} = logger:unset_module_level({bad}), + ok = logger:unset_module_level(?MODULE), ok. set_level_module(cleanup,_Config) -> logger:remove_handler(h1), - logger:reset_module_level(?MODULE), + logger:unset_module_level(?MODULE), ok. cache_level_module(_Config) -> - ok = logger:reset_module_level(?MODULE), + ok = logger:unset_module_level(?MODULE), [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? ?LOG_INFO(?map_rep), %% Caching is done asynchronously, so wait a bit for the update timer:sleep(100), [_] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? - ok = logger:reset_module_level(?MODULE), + ok = logger:unset_module_level(?MODULE), [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? ok. cache_level_module(cleanup,_Config) -> - logger:reset_module_level(?MODULE), + logger:unset_module_level(?MODULE), ok. format_report(_Config) -> @@ -425,6 +444,7 @@ filter_failed(cleanup,_Config) -> ok. handler_failed(_Config) -> + register(callback_receiver,self()), {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}), {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}), {error,{invalid_handler_config,bad}} = logger:add_handler(h1,?MODULE,bad), @@ -434,26 +454,62 @@ handler_failed(_Config) -> logger:add_handler(h1,?MODULE,#{filter_default=>true}), {error,{invalid_formatter,[]}} = logger:add_handler(h1,?MODULE,#{formatter=>[]}), - ok = logger:add_handler(h1,nomodule,#{filter_default=>log}), + {error,{invalid_handler,_}} = logger:add_handler(h1,nomodule,#{filter_default=>log}), logger:info(?map_rep), check_no_log(), - #{logger:=#{handlers:=Ids1}, - handlers:=H1} = logger:i(), - false = lists:member(h1,Ids1), + #{handlers:=H1} = logger:i(), false = lists:keymember(h1,1,H1), {error,{not_found,h1}} = logger:remove_handler(h1), - ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,crash=>true}), + ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,log_call=>fun() -> a = b end}), {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}), + [add] = test_server:messages_get(), logger:info(?map_rep), - check_no_log(), - #{logger:=#{handlers:=Ids2}, - handlers:=H2} = logger:i(), - false = lists:member(h2,Ids2), + [remove] = test_server:messages_get(), + #{handlers:=H2} = logger:i(), false = lists:keymember(h2,1,H2), {error,{not_found,h2}} = logger:remove_handler(h2), + CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end, + CrashHandler = fun() -> a = b end, + KillHandler = fun() -> exit(self(), die) end, + + {error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} = + logger:add_handler(h1,?MODULE,#{add_call=>CallAddHandler}), + {error,{handler_not_added,{callback_crashed,_}}} = + logger:add_handler(h1,?MODULE,#{add_call=>CrashHandler}), + {error,{handler_not_added,{logger_process_exited,_,die}}} = + logger:add_handler(h1,?MODULE,#{add_call=>KillHandler}), + + check_no_log(), + ok = logger:add_handler(h1,?MODULE,#{}), + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config(h1,#{conf_call=>CallAddHandler}), + {error,{callback_crashed,_}} = + logger:set_handler_config(h1,#{conf_call=>CrashHandler}), + {error,{logger_process_exited,_,die}} = + logger:set_handler_config(h1,#{conf_call=>KillHandler}), + + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config(h1,conf_call,CallAddHandler), + {error,{callback_crashed,_}} = + logger:set_handler_config(h1,conf_call,CrashHandler), + {error,{logger_process_exited,_,die}} = + logger:set_handler_config(h1,conf_call,KillHandler), + + ok = logger:remove_handler(h1), + [add,remove] = test_server:messages_get(), + + check_no_log(), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>CallAddHandler}), + ok = logger:remove_handler(h1), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>CrashHandler}), + ok = logger:remove_handler(h1), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>KillHandler}), + ok = logger:remove_handler(h1), + [add,add,add] = test_server:messages_get(), + ok. handler_failed(cleanup,_Config) -> @@ -466,10 +522,6 @@ config_sanity_check(_Config) -> {error,{invalid_filter_default,bad}} = logger:set_logger_config(filter_default,bad), {error,{invalid_level,bad}} = logger:set_logger_config(level,bad), - {error,{invalid_handlers,bad}} = logger:set_logger_config(handlers,bad), - {error,{invalid_id,{bad,bad}}} = - logger:set_logger_config(handlers,[{bad,bad}]), - {error,{invalid_id,"bad"}} = logger:set_logger_config(handlers,["bad"]), {error,{invalid_filters,bad}} = logger:set_logger_config(filters,bad), {error,{invalid_filter,bad}} = logger:set_logger_config(filters,[bad]), {error,{invalid_filter,{_,_}}} = @@ -499,29 +551,96 @@ config_sanity_check(_Config) -> logger:set_handler_config(h1,formatter,bad), {error,{invalid_module,{bad}}} = logger:set_handler_config(h1,formatter,{{bad},cfg}), - {error,{invalid_formatter_config,bad}} = + {error,{invalid_formatter_config,logger_formatter,bad}} = logger:set_handler_config(h1,formatter,{logger_formatter,bad}), - {error,{invalid_formatter_config,{bad,bad}}} = + {error,{invalid_formatter_config,logger_formatter,{bad,bad}}} = logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}), - {error,{invalid_formatter_config,{template,bad}}} = + {error,{invalid_formatter_config,logger_formatter,{template,bad}}} = logger:set_handler_config(h1,formatter,{logger_formatter, #{template=>bad}}), - {error,{invalid_formatter_template,[1]}} = + {error,{invalid_formatter_template,logger_formatter,[1]}} = logger:set_handler_config(h1,formatter,{logger_formatter, #{template=>[1]}}), ok = logger:set_handler_config(h1,formatter,{logger_formatter, #{template=>[]}}), - {error,{invalid_formatter_config,{single_line,bad}}} = + {error,{invalid_formatter_config,logger_formatter,{single_line,bad}}} = logger:set_handler_config(h1,formatter,{logger_formatter, #{single_line=>bad}}), ok = logger:set_handler_config(h1,formatter,{logger_formatter, #{single_line=>true}}), - {error,{invalid_formatter_config,{legacy_header,bad}}} = + {error,{invalid_formatter_config,logger_formatter,{legacy_header,bad}}} = logger:set_handler_config(h1,formatter,{logger_formatter, #{legacy_header=>bad}}), ok = logger:set_handler_config(h1,formatter,{logger_formatter, #{legacy_header=>true}}), + {error,{invalid_formatter_config,logger_formatter,{report_cb,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{report_cb=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{report_cb=>fun(R) -> + {"~p",[R]} + end}}), + {error,{invalid_formatter_config,logger_formatter,{chars_limit,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>4}}), + {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>4}}), + {error,{invalid_formatter_config,logger_formatter,{max_size,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>4}}), + ok = logger:set_handler_config(h1,formatter,{module,config}), + {error,{callback_crashed,{error,{badmatch,3},[{?MODULE,check_config,1,_}]}}} = + logger:set_handler_config(h1,formatter,{?MODULE,crash}), ok = logger:set_handler_config(h1,custom,custom), + + %% Old utc parameter is no longer allowed (replaced by time_offset) + {error,{invalid_formatter_config,logger_formatter,{utc,true}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{utc=>true}}), + {error,{invalid_formatter_config,logger_formatter,{time_offset,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>0}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>""}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"Z"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"z"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"-0:0"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"+10:13"}}), + + {error,{invalid_formatter_config,logger_formatter,{time_offset,"+0"}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"+0"}}), + + {error,{invalid_formatter_config,logger_formatter,{time_designator,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>bad}}), + {error,{invalid_formatter_config,logger_formatter,{time_designator,"s"}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>"s"}}), + {error,{invalid_formatter_config,logger_formatter,{time_designator,0}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>0}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>$\s}}), ok. config_sanity_check(cleanup,_Config) -> @@ -650,14 +769,14 @@ process_metadata(_Config) -> undefined = logger:get_process_metadata(), {error,badarg} = ?TRY(logger:set_process_metadata(bad)), ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), - Time = erlang:monotonic_time(microsecond), + Time = erlang:system_time(microsecond), ProcMeta = #{time=>Time,line=>0,custom=>proc}, ok = logger:set_process_metadata(ProcMeta), S1 = ?str, ?LOG_INFO(S1,#{custom=>macro}), check_logged(info,S1,#{time=>Time,line=>0,custom=>macro}), - Time2 = erlang:monotonic_time(microsecond), + Time2 = erlang:system_time(microsecond), S2 = ?str, ?LOG_INFO(S2,#{time=>Time2,line=>1,custom=>macro}), check_logged(info,S2,#{time=>Time2,line=>1,custom=>macro}), @@ -720,17 +839,20 @@ check_maps(Expected,Got,What) -> end. %% Handler -adding_handler(_Id,Config) -> +adding_handler(#{add_call:=Fun}) -> + Fun(); +adding_handler(Config) -> maybe_send(add), {ok,Config}. -removing_handler(_Id,_Config) -> + +removing_handler(#{rem_call:=Fun}) -> + Fun(); +removing_handler(_Config) -> maybe_send(remove), ok. -changing_config(_Id,_Old,#{call:=Fun}) -> +changing_config(_Old,#{conf_call:=Fun}) -> Fun(); -changing_config(_Id,_Old,#{fail:=true}) -> - {error,fail}; -changing_config(_Id,_Old,Config) -> +changing_config(_Old,Config) -> maybe_send(changing_config), {ok,Config}. @@ -740,8 +862,8 @@ maybe_send(Msg) -> Pid -> Pid ! Msg end. -log(_Log,#{crash:=true}) -> - a=b; +log(_Log,#{log_call:=Fun}) -> + Fun(); log(Log,Config) -> TcProc = maps:get(tc_proc,Config,self()), TcProc ! {Log,Config}, @@ -829,3 +951,8 @@ test_macros(emergency=Level) -> %%% Called by macro ?TRY(X) my_try(Fun) -> try Fun() catch C:R -> {C,R} end. + +check_config(crash) -> + erlang:error({badmatch,3}); +check_config(_) -> + ok. diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl index 63e5b56021..7a1736c814 100644 --- a/lib/kernel/test/logger_disk_log_h_SUITE.erl +++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl @@ -31,7 +31,8 @@ end). suite() -> - [{timetrap,{seconds,30}}]. + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. init_per_suite(Config) -> timer:start(), % to avoid progress report @@ -327,36 +328,38 @@ formatter_fail(Config) -> logger:add_handler(Name, logger_disk_log_h, HConfig), Pid = whereis(Name), true = is_pid(Pid), - {ok,#{handlers:=H}} = logger:get_logger_config(), + #{handlers:=HC1} = logger:i(), + H = [Id || {Id,_,_} <- HC1], true = lists:member(Name,H), %% Formatter is added automatically {ok,{_,#{formatter:={logger_formatter,_}}}} = logger:get_handler_config(Name), logger:info(M1=?msg,?domain), - Got1 = try_match_file(?log_no(LogFile,1),"=INFO REPORT====.*\n"++M1,5000), + Got1 = try_match_file(?log_no(LogFile,1),"[0-9\\+\\-T:\\.]* info: "++M1,5000), ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}), logger:info(M2=?msg,?domain), Got2 = try_match_file(?log_no(LogFile,1), - Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2, + escape(Got1)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M2, 5000), ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}), logger:info(M3=?msg,?domain), Got3 = try_match_file(?log_no(LogFile,1), - Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3, + escape(Got2)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M3, 5000), ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}), logger:info(?msg,?domain), try_match_file(?log_no(LogFile,1), - Got3++"FORMATTER ERROR: bad_return_value", + escape(Got3)++"FORMATTER ERROR: bad_return_value", 5000), %% Check that handler is still alive and was never dead Pid = whereis(Name), - {ok,#{handlers:=H}} = logger:get_logger_config(), + #{handlers:=HC2} = logger:i(), + H = [Id || {Id,_,_} <- HC2], ok. formatter_fail(cleanup,_Config) -> @@ -369,10 +372,18 @@ config_fail(_Config) -> #{logger_disk_log_h => #{bad => bad}, filter_default=>log, formatter=>{?MODULE,self()}}), - {error,{handler_not_added,{invalid_levels,{42,42,_}}}} = + + {error,{handler_not_added,{invalid_levels,{_,1,_}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{logger_disk_log_h => #{drop_new_reqs_qlen=>1}}), + {error,{handler_not_added,{invalid_levels,{43,42,_}}}} = logger:add_handler(?MODULE,logger_disk_log_h, - #{logger_disk_log_h => #{toggle_sync_qlen=>42, + #{logger_disk_log_h => #{toggle_sync_qlen=>43, drop_new_reqs_qlen=>42}}), + {error,{handler_not_added,{invalid_levels,{_,43,42}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{logger_disk_log_h => #{drop_new_reqs_qlen=>43, + flush_reqs_qlen=>42}}), ok = logger:add_handler(?MODULE,logger_disk_log_h, #{filter_default=>log, @@ -717,7 +728,7 @@ write_failure(Config) -> Log = lists:concat([File,".1"]), ct:pal("Log = ~p", [Log]), - Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File), + Node = start_h_on_new_node(Config, File), false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), @@ -761,7 +772,7 @@ sync_failure(Config) -> File = filename:join(Dir, FileName), - Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File), + Node = start_h_on_new_node(Config, File), false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), @@ -801,21 +812,12 @@ sync_failure(cleanup, _Config) -> Nodes = nodes(), [test_server:stop_node(Node) || Node <- Nodes]. -start_h_on_new_node(_Config, Func, File) -> - Pa = filename:dirname(code:which(?MODULE)), - Dest = - case os:type() of - {win32,_} -> - lists:concat([" {disk_log,\\\"",File,"\\\"}"]); - _ -> - lists:concat([" \'{disk_log,\"",File,"\"}\'"]) - end, - Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]), - NodeName = lists:concat([?MODULE,"_",Func]), - ct:pal("Starting ~s with ~tp", [NodeName,Args]), - {ok,Node} = test_server:start_node(NodeName, peer, [{args, Args}]), - Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), - true = is_pid(Pid), +start_h_on_new_node(Config, File) -> + {ok,_,Node} = + logger_test_lib:setup( + Config, + [{logger,[{handler,default,logger_disk_log_h, + #{ disk_log_opts => #{ file => File }}}]}]), ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, {?MODULE,nl}]), Node. @@ -848,62 +850,115 @@ internal_log(Type, Term) -> op_switch_to_sync(Config) -> {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 500, NewHConfig = - HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 3, - drop_new_reqs_qlen => 501, - flush_reqs_qlen => 2000, + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => NumOfReqs+1, + flush_reqs_qlen => 2*NumOfReqs, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 500, send_burst({n,NumOfReqs}, seq, {chars,79}, info), - NumOfReqs = count_lines(Log), - ok = file:delete(Log). + Lines = count_lines(Log), + ok = file:delete(Log), + NumOfReqs = Lines, + ok. op_switch_to_sync(cleanup, _Config) -> ok = stop_handler(?MODULE). +op_switch_to_drop() -> + [{timetrap,{seconds,180}}]. op_switch_to_drop(Config) -> - {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - - NewHConfig = - HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 3, - flush_reqs_qlen => 600, - enable_burst_limit => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 500, - send_burst({n,NumOfReqs}, seq, {chars,79}, info), - Logged = count_lines(Log), - ct:pal("Number of messages dropped = ~w (~w)", - [NumOfReqs-Logged,NumOfReqs]), - true = (Logged < NumOfReqs), - ok = file:delete(Log). + Test = + fun() -> + {Log,HConfig,DLHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 300, + Procs = 2, + Bursts = 10, + NewHConfig = + HConfig#{logger_disk_log_h => + DLHConfig#{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => Procs*NumOfReqs*Bursts, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that dropping + %% never occurs. Therefore, lets generate a number of + %% bursts to increase the chance of message buildup. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) || + _ <- lists:seq(1, Bursts)], + Logged = count_lines(Log), + ok= stop_handler(?MODULE), + _ = file:delete(Log), + ct:pal("Number of messages dropped = ~w (~w)", + [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), + true = (Logged < (Procs*NumOfReqs*Bursts)), + true = (Logged > 0), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. op_switch_to_drop(cleanup, _Config) -> - ok = stop_handler(?MODULE). + _ = stop_handler(?MODULE). op_switch_to_flush() -> [{timetrap,{minutes,3}}]. op_switch_to_flush(Config) -> - {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - - %% it's important that both async and sync requests have been queued - %% when the flush happens (verify with coverage of flush_log_requests/2) + Test = + fun() -> + {Log,HConfig,DLHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% NOTE: it's important that both async and sync + %% requests have been queued when the flush happens + %% (verify with coverage of flush_log_requests/2) - NewHConfig = - HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 99, - flush_reqs_qlen => 100, - enable_burst_limit => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 1000, - Procs = 500, - send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), - Logged = count_lines(Log), - ct:pal("Number of messages flushed/dropped = ~w (~w)", - [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]), - true = (Logged < (NumOfReqs*Procs)), - ok = file:delete(Log). + NewHConfig = + HConfig#{logger_disk_log_h => + DLHConfig#{toggle_sync_qlen => 2, + %% disable drop mode + drop_new_reqs_qlen => 300, + flush_reqs_qlen => 300, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1500, + Procs = 10, + Bursts = 10, + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that flushing + %% never occurs, or it gets all messages at once, + %% causing all messages to get flushed (no dropping of + %% sync messages gets tested). Therefore, lets + %% generate a number of bursts to increase the chance + %% of message buildup in some random fashion. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) || + _ <- lists:seq(1,Bursts)], + Logged = count_lines(Log), + ok= stop_handler(?MODULE), + _ = file:delete(Log), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), + true = (Logged < (NumOfReqs*Procs*Bursts)), + true = (Logged > 0), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. op_switch_to_flush(cleanup, _Config) -> - ok = stop_handler(?MODULE). + _ = stop_handler(?MODULE). limit_burst_disabled(Config) -> @@ -987,7 +1042,7 @@ qlen_kill_new(Config) -> {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(?MODULE), {_,Mem0} = process_info(Pid0, memory), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig = HConfig#{logger_disk_log_h => DLHConfig#{enable_kill_overloaded=>true, @@ -1008,7 +1063,7 @@ qlen_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter + 1000), + timer:sleep(RestartAfter + 2000), true = is_pid(whereis(?MODULE)), ok after @@ -1024,7 +1079,7 @@ mem_kill_new(Config) -> {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(?MODULE), {_,Mem0} = process_info(Pid0, memory), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig = HConfig#{logger_disk_log_h => DLHConfig#{enable_kill_overloaded=>true, @@ -1045,7 +1100,7 @@ mem_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter * 2), + timer:sleep(RestartAfter + 2000), true = is_pid(whereis(?MODULE)), ok after @@ -1078,7 +1133,7 @@ restart_after(Config) -> end, {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig2 = HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true, handler_overloaded_qlen=>10, @@ -1090,7 +1145,7 @@ restart_after(Config) -> send_burst({n,100}, {spawn,2,0}, {chars,79}, info), receive {'DOWN', MRef2, _, _, _Info2} -> - timer:sleep(RestartAfter + 1000), + timer:sleep(RestartAfter + 2000), Pid1 = whereis(?MODULE), true = is_pid(Pid1), false = (Pid1 == Pid0), @@ -1361,6 +1416,29 @@ count_lines1(File) -> file:close(Dev), Lines. +repeat_until_ok(Fun, N) -> + repeat_until_ok(Fun, 0, N, undefined). + +repeat_until_ok(_Fun, Stop, Stop, Reason) -> + {fails,Reason}; + +repeat_until_ok(Fun, C, Stop, FirstReason) -> + if C > 0 -> timer:sleep(5000); + true -> ok + end, + try Fun() of + Result -> + {ok,{C,Result}} + catch + _:Reason:Stack -> + ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]), + if FirstReason == undefined -> + repeat_until_ok(Fun, C+1, Stop, {Reason,Stack}); + true -> + repeat_until_ok(Fun, C+1, Stop, FirstReason) + end + end. + start_tracer(Trace,Expected) -> Pid = self(), dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), @@ -1382,7 +1460,8 @@ tpl([{M,F,A}|Trace]) -> tpl([]) -> ok. -tracer({trace,_,call,{logger_disk_log_h,handle_cast,[{Op,_}|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) -> +tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]}}, + {Pid,[{Mod,Func,Op}|Expected]}) -> maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]}}, {Pid,[{Mod,Func,Data}|Expected]}) -> maybe_tracer_done(Pid,Expected,{Mod,Func,Data}); @@ -1412,3 +1491,10 @@ check_tracer(T) -> dbg:stop_clear(), ct:fail({timeout,tracer}) end. + +escape([$+|Rest]) -> + [$\\,$+|escape(Rest)]; +escape([H|T]) -> + [H|escape(T)]; +escape([]) -> + []. diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl index c2d3364701..601d331fb0 100644 --- a/lib/kernel/test/logger_env_var_SUITE.erl +++ b/lib/kernel/test/logger_env_var_SUITE.erl @@ -1,4 +1,4 @@ -% +%% %% %CopyrightBegin% %% %% Copyright Ericsson AB 2018. All Rights Reserved. @@ -21,431 +21,636 @@ -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("kernel/include/logger.hrl"). -include_lib("kernel/src/logger_internal.hrl"). --define(all_vars,[{kernel,logger_dest}, - {kernel,logger_level}, - {kernel,logger_log_progress}, - {kernel,logger_sasl_compatible}, - {kernel,error_logger}]). +-import(logger_test_lib,[setup/2,log/3,sync_and_read/3]). suite() -> - [{timetrap,{seconds,30}}]. + [{timetrap,{seconds,60}}, + {ct_hooks,[logger_test_lib]}]. init_per_suite(Config) -> - Env = [{App,Key,application:get_env(App,Key)} || {App,Key} <- ?all_vars], - Removed = cleanup(), - [{env,Env},{logger,Removed}|Config]. - -end_per_suite(Config) -> - [application:set_env(App,Key,Val) || - {App,Key,Val} <- ?config(env,Config), - Val =/= undefined], - Hs = ?config(logger,Config), - [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], - ok. - -init_per_group(_Group, Config) -> - Config. - -end_per_group(_Group, _Config) -> - ok. - -init_per_testcase(_TestCase, Config) -> Config. -end_per_testcase(Case, Config) -> - try apply(?MODULE,Case,[cleanup,Config]) - catch error:undef -> ok - end, - cleanup(), +end_per_suite(_Config) -> ok. groups() -> - []. - -all() -> + [{error_logger,[],[error_logger_tty, + error_logger_tty_sasl_compatible, + error_logger_false, + error_logger_false_progress, + error_logger_false_sasl_compatible, + error_logger_silent, + error_logger_silent_sasl_compatible, + error_logger_file]}, + {logger,[],[logger_file, + logger_file_sasl_compatible, + logger_file_log_progress, + logger_file_no_filter, + logger_file_no_filter_level, + logger_file_formatter, + logger_filters, + logger_filters_stop, + logger_module_level, + logger_disk_log, + logger_disk_log_formatter, + logger_undefined, + logger_many_handlers_default_first, + logger_many_handlers_default_last, + logger_many_handlers_default_last_broken_filter + ]}, + {bad,[],[bad_error_logger, + bad_level, + bad_sasl_compatibility, + bad_progress]}]. + +all() -> [default, default_sasl_compatible, - dest_tty, - dest_tty_sasl_compatible, - dest_false, - dest_false_progress, - dest_false_sasl_compatible, - dest_silent, - dest_silent_sasl_compatible, - dest_file_old, - dest_file, - dest_disk_log, - %% disk_log_vars, % or test this in logger_disk_log_SUITE? sasl_compatible_false, sasl_compatible_false_no_progress, sasl_compatible, - bad_dest%% , - %% bad_level, - %% bad_sasl_compatibility, - %% bad_progress + {group,bad}, + {group,error_logger}, + {group,logger} ]. default(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - undefined, - undefined, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), - true = is_pid(whereis(logger_std_h)), + {ok,#{handlers:=Hs},_Node} = setup(Config,[]), + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), info = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), - {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), true = lists:keymember(stop_progress,1,StdFilters), - false = lists:keymember(logger_simple,1,Hs), - false = lists:keymember(sasl_h,1,Hs), - false = is_pid(whereis(sasl_h)), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. default_sasl_compatible(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - undefined, - undefined, % dest - undefined, % level - true, % sasl comp (default=false) - undefined), % progress (default=false) - {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), - true = is_pid(whereis(logger_std_h)), + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{logger_sasl_compatible,true}]), + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), info = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), - {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + {domain,{_,{log,super,[beam,erlang,otp]}}} = lists:keyfind(domain,1,StdFilters), false = lists:keymember(stop_progress,1,StdFilters), - false = lists:keymember(logger_simple,1,Hs), - true = lists:keymember(sasl_h,1,Hs), - true = is_pid(whereis(sasl_h)), + false = lists:keymember(simple,1,Hs), + true = lists:keymember(sasl,1,Hs), ok. -dest_tty(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - tty, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), - true = is_pid(whereis(logger_std_h)), +error_logger_tty(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config,[{error_logger,tty}]), + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), info = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), - {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), true = lists:keymember(stop_progress,1,StdFilters), - false = lists:keymember(logger_simple,1,Hs), - false = lists:keymember(sasl_h,1,Hs), - false = is_pid(whereis(sasl_h)), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. -dest_tty_sasl_compatible(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - tty, % dest - undefined, % level - true, % sasl comp (default=false) - undefined), % progress (default=false) - {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), - true = is_pid(whereis(logger_std_h)), +error_logger_tty_sasl_compatible(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,tty}, + {logger_sasl_compatible,true}]), + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), info = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), - {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + {domain,{_,{log,super,[beam,erlang,otp]}}} = lists:keyfind(domain,1,StdFilters), false = lists:keymember(stop_progress,1,StdFilters), - false = lists:keymember(logger_simple,1,Hs), - true = lists:keymember(sasl_h,1,Hs), - true = is_pid(whereis(sasl_h)), + false = lists:keymember(simple,1,Hs), + true = lists:keymember(sasl,1,Hs), ok. -dest_false(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - false, % dest - notice, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), - notice = maps:get(level,SimpleC), +error_logger_false(Config) -> + {ok,#{handlers:=Hs,logger:=L},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs), + info = maps:get(level,SimpleC), + notice = maps:get(level,L), SimpleFilters = maps:get(filters,SimpleC), - {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), true = lists:keymember(stop_progress,1,SimpleFilters), - false = lists:keymember(sasl_h,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. -dest_false_progress(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - false, % dest - notice, % level - undefined, % sasl comp (default=false) - true), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), - notice = maps:get(level,SimpleC), +error_logger_false_progress(Config) -> + {ok,#{handlers:=Hs,logger:=L},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}, + {logger_progress_reports,log}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs), + info = maps:get(level,SimpleC), + notice = maps:get(level,L), SimpleFilters = maps:get(filters,SimpleC), - {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), false = lists:keymember(stop_progress,1,SimpleFilters), - false = lists:keymember(sasl_h,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. -dest_false_sasl_compatible(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - false, % dest - notice, % level - true, % sasl comp (default=false) - undefined), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), - notice = maps:get(level,SimpleC), +error_logger_false_sasl_compatible(Config) -> + {ok,#{handlers:=Hs,logger:=L},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}, + {logger_sasl_compatible,true}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs), + info = maps:get(level,SimpleC), + notice = maps:get(level,L), SimpleFilters = maps:get(filters,SimpleC), - {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + {domain,{_,{log,super,[beam,erlang,otp]}}} = lists:keyfind(domain,1,SimpleFilters), false = lists:keymember(stop_progress,1,SimpleFilters), - true = lists:keymember(sasl_h,1,Hs), - true = is_pid(whereis(sasl_h)), + true = lists:keymember(sasl,1,Hs), ok. -dest_silent(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - silent, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - false = lists:keymember(logger_simple,1,Hs), - false = lists:keymember(sasl_h,1,Hs), +error_logger_silent(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,silent}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. -dest_silent_sasl_compatible(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - silent, % dest - undefined, % level - true, % sasl comp (default=false) - undefined), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - false = lists:keymember(logger_simple,1,Hs), - true = lists:keymember(sasl_h,1,Hs), - true = is_pid(whereis(sasl_h)), +error_logger_silent_sasl_compatible(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,silent}, + {logger_sasl_compatible,true}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + false = lists:keymember(simple,1,Hs), + true = lists:keymember(sasl,1,Hs), ok. -dest_file_old(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - error_logger, - file, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - check_log(Log, - file, % dest - 0), % progress in std logger +error_logger_file(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_Hs,Node} = setup(Config, + [{error_logger,{file,Log}}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger ok. - - -dest_file(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - file, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - check_log(Log, - file, % dest - 0), % progress in std logger + + +logger_file(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + true = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + ok. - - -dest_disk_log(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - disk_log, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - check_log(Log, - disk_log, % dest - 0), % progress in std logger + +logger_file_sasl_compatible(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger_sasl_compatible,true}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + true = lists:keymember(sasl,1,Hs), + + ok. + +logger_file_log_progress(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger_progress_reports,log}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 6),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_file_no_filter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filter_default=>log,filters=>[], + logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 6),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_file_no_filter_level(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[],level=>error, + logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0,% progress in std logger + error),% level + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + error = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_file_formatter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[], + formatter=>{logger_formatter,#{}}, + logger_std_h=>#{type=>{file,Log}}}}]}]), + check_single_log(Node,Log, + file,% dest + 6),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_filters(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,logger:=Logger},Node} + = setup(Config, + [{logger_progress_reports,log}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}, + {filters,log,[{stop_progress,{fun logger_filters:progress/2,stop}}]} + ]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + LoggerFilters = maps:get(filters,Logger), + true = lists:keymember(stop_progress,1,LoggerFilters), + + ok. + +logger_filters_stop(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,logger:=Logger},Node} + = setup(Config, + [{logger_progress_reports,log}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[], + logger_std_h=>#{type=>{file,Log}}}}, + {filters,stop,[{log_error,{fun logger_filters:level/2,{log,gt,info}}}]} + ]}]), + check_default_log(Node,Log, + file,% dest + 0, + notice),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + LoggerFilters = maps:get(filters,Logger), + true = lists:keymember(log_error,1,LoggerFilters), + + ok. + +logger_module_level(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,module_levels:=ModuleLevels},Node} + = setup(Config, + [{logger_progress_reports,log}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}, + {module_level,error,[supervisor]} + ]}]), + check_default_log(Node,Log, + file,% dest + 3),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + [{supervisor,error}] = ModuleLevels, + ok. + +logger_disk_log(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_disk_log_h, + #{disk_log_opts=>#{file=>Log}}}]}]), + check_default_log(Node,Log, + disk_log,% dest + 0),% progress in std logger + + {?STANDARD_HANDLER,logger_disk_log_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + true = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_disk_log_formatter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_disk_log_h, + #{filters=>[], + formatter=>{logger_formatter,#{}}, + disk_log_opts=>#{file=>Log}}}]}]), + check_single_log(Node,Log, + disk_log,% dest + 6),% progress in std logger + + {?STANDARD_HANDLER,logger_disk_log_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_undefined(Config) -> + {ok,#{handlers:=Hs,logger:=L},_Node} = + setup(Config,[{logger,[{handler,?STANDARD_HANDLER,undefined}]}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs), + info = maps:get(level,SimpleC), + info = maps:get(level,L), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,SimpleFilters), + true = lists:keymember(stop_progress,1,SimpleFilters), + false = lists:keymember(sasl,1,Hs), + ok. + + +%% Test that we can add multiple handlers with the default first +logger_many_handlers_default_first(Config) -> + LogErr = file(Config,logger_many_handlers_default_first_error), + LogInfo = file(Config,logger_many_handlers_default_first_info), + + logger_many_handlers( + Config,[{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + logger_std_h=>#{type=>{file,LogErr}}} + }, + {handler,info,logger_std_h, + #{level=>info, + filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}], + logger_std_h=>#{type=>{file,LogInfo}}} + } + ]}], LogErr, LogInfo, 6). + +%% Test that we can add multiple handlers with the default last +logger_many_handlers_default_last(Config) -> + LogErr = file(Config,logger_many_handlers_default_last_error), + LogInfo = file(Config,logger_many_handlers_default_last_info), + logger_many_handlers( + Config,[{logger, + [{handler,info,logger_std_h, + #{level=>info, + filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}], + logger_std_h=>#{type=>{file,LogInfo}}} + }, + {handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + logger_std_h=>#{type=>{file,LogErr}}} + } + ]}], LogErr, LogInfo, 7). + +%% Check that we can handle that an added logger has a broken filter +%% This used to cause a deadlock. +logger_many_handlers_default_last_broken_filter(Config) -> + LogErr = file(Config,logger_many_handlers_default_first_broken_filter_error), + LogInfo = file(Config,logger_many_handlers_default_first_broken_filter_info), + + logger_many_handlers( + Config,[{logger, + [{handler,info,logger_std_h, + #{level=>info, + filters=>[{broken,{fun logger_filters:level/2,broken_state}}, + {level,{fun logger_filters:level/2,{stop,gteq,error}}}], + logger_std_h=>#{type=>{file,LogInfo}}} + }, + {handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + logger_std_h=>#{type=>{file,LogErr}}} + } + ]}], LogErr, LogInfo, 7). + +logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) -> + {ok,#{handlers:=Hs},Node} = setup(Config,Env), + check_single_log(Node,LogErr, + file,% dest + 0,% progress in std logger + error), % level + ok = rpc:call(Node,logger_std_h,filesync,[info]), + {ok, Bin} = file:read_file(LogInfo), + ct:log("Log content:~n~s",[Bin]), + match(Bin,<<"info:">>,NumProgress+1,info,info), + match(Bin,<<"alert:">>,0,alert,info), + ok. - sasl_compatible_false(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - file, % dest - undefined, % level - false, % sasl comp - true), % progress - check_log(Log, - file, % dest - 4), % progress in std logger + Log = file(Config,?FUNCTION_NAME), + {ok,_Hs,Node} = setup(Config, + [{error_logger,{file,Log}}, + {logger_sasl_compatible,false}, + {logger_progress_reports,log}]), + check_default_log(Node,Log, + file,% dest + 6),% progress in std logger ok. sasl_compatible_false_no_progress(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - file, % dest - undefined, % level - false, % sasl comp - false), % progress - check_log(Log, - file, % dest - 0), % progress in std logger + Log = file(Config,?FUNCTION_NAME), + {ok,_Hs,Node} = setup(Config, + [{error_logger,{file,Log}}, + {logger_sasl_compatible,false}, + {logger_progress_reports,stop}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger ok. sasl_compatible(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - file, % dest - undefined, % level - true, % sasl comp - undefined), % progress - check_log(Log, - file, % dest - 0), % progress in std logger + Log = file(Config,?FUNCTION_NAME), + {ok,_Hs,Node} = setup(Config, + [{error_logger,{file,Log}}, + {sasl_compatible,true}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger ok. -bad_dest(Config) -> - {error,{bad_config,{kernel,{logger_dest,baddest}}}} = - setup(Config,?FUNCTION_NAME, - logger_dest, - baddest, - undefined, - undefined, - undefined). +bad_error_logger(Config) -> + error = setup(Config,[{error_logger,baddest}]). bad_level(Config) -> - error = - setup(Config,?FUNCTION_NAME, - logger_dest, - tty, - badlevel, - undefined, - undefined). + error = setup(Config,[{logger_level,badlevel}]). bad_sasl_compatibility(Config) -> - error = - setup(Config,?FUNCTION_NAME, - logger_dest, - tty, - info, - badcomp, - undefined). + error = setup(Config,[{logger_sasl_compatible,badcomp}]). bad_progress(Config) -> - error = - setup(Config,?FUNCTION_NAME, - logger_dest, - tty, - info, - undefined, - badprogress). + error = setup(Config,[{logger_progress_reports,badprogress}]). %%%----------------------------------------------------------------- %%% Internal -setup(Config,Func,DestVar,Dest,Level,SaslComp,Progress) -> - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - Dir = ?config(priv_dir,Config), - File = lists:concat([?MODULE,"_",Func,".log"]), - Log = filename:join(Dir,File), - case Dest of - undefined -> - ok; - F when F==file; F==disk_log -> - application:set_env(kernel,DestVar,{Dest,Log}); - _ -> - application:set_env(kernel,DestVar,Dest) - end, - case Level of - undefined -> - ok; - _ -> - application:set_env(kernel,logger_level,Level) - end, - case SaslComp of - undefined -> - ok; - _ -> - application:set_env(kernel,logger_sasl_compatible,SaslComp) - end, - case Progress of - undefined -> - ok; - _ -> - application:set_env(kernel,logger_log_progress,Progress) - end, - case logger:setup_standard_handler() of - ok -> - application:start(sasl), - StdH = case Dest of - NoH when NoH==false; NoH==silent -> false; - _ -> true - end, - StdH = is_pid(whereis(?STANDARD_HANDLER)), - SaslH = if SaslComp -> true; - true -> false - end, - SaslH = is_pid(whereis(sasl_h)), - {ok,{Log,maps:get(handlers,logger:i())}}; - Error -> - Error - end. +file(Config,Func) -> + filename:join(proplists:get_value(priv_dir,Config), + lists:concat([Func,".log"])). + +check_default_log(Node,Log,Dest,NumProgress) -> + check_default_log(Node,Log,Dest,NumProgress,info). +check_default_log(Node,Log,Dest,NumProgress,Level) -> + + {ok,Bin1,Bin2} = check_log(Node,Log,Dest), + + match(Bin1,<<"PROGRESS REPORT">>,NumProgress,info,Level), + match(Bin1,<<"ALERT REPORT">>,1,alert,Level), + match(Bin1,<<"INFO REPORT">>,0,info,Level), + match(Bin1,<<"DEBUG REPORT">>,0,debug,Level), -check_log(Log,Dest,NumProgress) -> - ok = logger:alert("dummy1"), - ok = logger:debug("dummy1"), + match(Bin2,<<"INFO REPORT">>,1,info,Level), + match(Bin2,<<"DEBUG REPORT">>,0,debug,Level), + ok. + +check_single_log(Node,Log,Dest,NumProgress) -> + check_single_log(Node,Log,Dest,NumProgress,info). +check_single_log(Node,Log,Dest,NumProgress,Level) -> + + {ok,Bin1,Bin2} = check_log(Node,Log,Dest), + + match(Bin1,<<"info:">>,NumProgress,info,Level), + match(Bin1,<<"alert:">>,1,alert,Level), + match(Bin1,<<"debug:">>,0,debug,Level), + + match(Bin2,<<"info:">>,NumProgress+1,info,Level), + match(Bin2,<<"debug:">>,0,debug,Level), + + ok. + +check_log(Node,Log,Dest) -> + + ok = log(Node,alert,["dummy1"]), + ok = log(Node,debug,["dummy1"]), %% Check that there are progress reports (supervisor and %% application_controller) and an error report (the call above) in %% the log. There should not be any info reports yet. - {ok,Bin1} = sync_and_read(Dest,Log), + {ok,Bin1} = sync_and_read(Node,Dest,Log), ct:log("Log content:~n~s",[Bin1]), - match(Bin1,<<"PROGRESS REPORT">>,NumProgress), - match(Bin1,<<"ALERT REPORT">>,1), - match(Bin1,<<"INFO REPORT">>,0), - match(Bin1,<<"DEBUG REPORT">>,0), %% Then stop sasl and see that the info report from %% application_controller is there - ok = application:stop(sasl), - {ok,Bin2} = sync_and_read(Dest,Log), + ok = rpc:call(Node,application,stop,[sasl]), + {ok,Bin2} = sync_and_read(Node,Dest,Log), ct:log("Log content:~n~s",[Bin2]), - match(Bin2,<<"INFO REPORT">>,1), - match(Bin1,<<"DEBUG REPORT">>,0), - ok. + {ok,Bin1,Bin2}. -match(Bin,Pattern,0) -> +match(Bin,Pattern,0,_,_) -> nomatch = re:run(Bin,Pattern,[{capture,none}]); -match(Bin,Pattern,N) -> - {match,M} = re:run(Bin,Pattern,[{capture,all},global]), - N = length(M). - -sync_and_read(disk_log,Log) -> - logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER), - file:read_file(Log ++ ".1"); -sync_and_read(file,Log) -> - logger_std_h:filesync(?STANDARD_HANDLER), - file:read_file(Log). - -cleanup() -> - application:stop(sasl), - [application:unset_env(App,Key) || {App,Key} <- ?all_vars], - #{handlers:=Hs0} = logger:i(), - Hs = lists:keydelete(cth_log_redirect,1,Hs0), - [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], - Hs. +match(Bin,Pattern,N,LogLevel,ConfLevel) -> + case logger:compare_levels(LogLevel,ConfLevel) of + lt -> match(Bin,Pattern,0,LogLevel,ConfLevel); + _ -> + {match,M} = re:run(Bin,Pattern,[{capture,all},global]), + N = length(M) + end. diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl index 21f14bbc02..11cce8fd20 100644 --- a/lib/kernel/test/logger_filters_SUITE.erl +++ b/lib/kernel/test/logger_filters_SUITE.erl @@ -75,66 +75,79 @@ all() -> remote_gl]. domain(_Config) -> - L1 = logger_filters:domain(L1=?dlog([]),{log,prefix_of,[]}), - stop = logger_filters:domain(?dlog([]),{stop,prefix_of,[]}), - L2 = logger_filters:domain(L2=?dlog([]),{log,starts_with,[]}), - stop = logger_filters:domain(?dlog([]),{stop,starts_with,[]}), - L3 = logger_filters:domain(L3=?dlog([]),{log,equals,[]}), - stop = logger_filters:domain(?dlog([]),{stop,equals,[]}), - ignore = logger_filters:domain(?dlog([]),{log,no_domain,[]}), - ignore = logger_filters:domain(?dlog([]),{stop,no_domain,[]}), - - L4 = logger_filters:domain(L4=?dlog([a]),{log,prefix_of,[a,b]}), - stop = logger_filters:domain(?dlog([a]),{stop,prefix_of,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{log,starts_with,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{stop,starts_with,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{log,equals,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{stop,equals,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{log,no_domain,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{stop,no_domain,[a,b]}), - - ignore = logger_filters:domain(?dlog([a,b]),{log,prefix_of,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{stop,prefix_of,[a]}), - L5 = logger_filters:domain(L5=?dlog([a,b]),{log,starts_with,[a]}), - stop = logger_filters:domain(?dlog([a,b]),{stop,starts_with,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{log,equals,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{stop,equals,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{log,no_domain,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{stop,no_domain,[a]}), - - ignore = logger_filters:domain(?ndlog,{log,prefix_of,[a]}), - ignore = logger_filters:domain(?ndlog,{stop,prefix_of,[a]}), - ignore = logger_filters:domain(?ndlog,{log,starts_with,[a]}), - ignore = logger_filters:domain(?ndlog,{stop,starts_with,[a]}), - ignore = logger_filters:domain(?ndlog,{log,equals,[a]}), - ignore = logger_filters:domain(?ndlog,{stop,equals,[a]}), - L6 = logger_filters:domain(L6=?ndlog,{log,no_domain,[a]}), - stop = logger_filters:domain(?ndlog,{stop,no_domain,[a]}), - - L7 = logger_filters:domain(L7=?dlog([a,b,c,d]),{log,prefix_of,[a,b,c,d]}), - stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,prefix_of,[a,b,c,d]}), - L8 = logger_filters:domain(L8=?dlog([a,b,c,d]),{log,starts_with,[a,b,c,d]}), - stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,starts_with,[a,b,c,d]}), - L9 = logger_filters:domain(L9=?dlog([a,b,c,d]),{log,equals,[a,b,c,d]}), - stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equals,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,no_domain,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,no_domain,[a,b,c,d]}), + L1 = logger_filters:domain(L1=?dlog([]),{log,super,[]}), + stop = logger_filters:domain(?dlog([]),{stop,super,[]}), + L2 = logger_filters:domain(L2=?dlog([]),{log,sub,[]}), + stop = logger_filters:domain(?dlog([]),{stop,sub,[]}), + L3 = logger_filters:domain(L3=?dlog([]),{log,equal,[]}), + stop = logger_filters:domain(?dlog([]),{stop,equal,[]}), + ignore = logger_filters:domain(?dlog([]),{log,not_equal,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,not_equal,[]}), + ignore = logger_filters:domain(?dlog([]),{log,undefined,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,undefined,[]}), + + L4 = logger_filters:domain(L4=?dlog([a]),{log,super,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,super,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,sub,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,sub,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,equal,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,equal,[a,b]}), + L5 = logger_filters:domain(L5=?dlog([a]),{log,not_equal,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,not_equal,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,undefined,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,undefined,[a,b]}), + + ignore = logger_filters:domain(?dlog([a,b]),{log,super,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,super,[a]}), + L6 = logger_filters:domain(L6=?dlog([a,b]),{log,sub,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,sub,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,equal,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,equal,[a]}), + L7 = logger_filters:domain(L7=?dlog([a,b]),{log,not_equal,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,not_equal,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,undefined,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,undefined,[a]}), + + ignore = logger_filters:domain(?ndlog,{log,super,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,super,[a]}), + ignore = logger_filters:domain(?ndlog,{log,sub,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,sub,[a]}), + ignore = logger_filters:domain(?ndlog,{log,equal,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,equal,[a]}), + L8 = logger_filters:domain(L8=?ndlog,{log,not_equal,[a]}), + stop = logger_filters:domain(?ndlog,{stop,not_equal,[a]}), + L9 = logger_filters:domain(L9=?ndlog,{log,undefined,[a]}), + stop = logger_filters:domain(?ndlog,{stop,undefined,[a]}), + + L10 = logger_filters:domain(L10=?dlog([a,b,c,d]),{log,super,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,super,[a,b,c,d]}), + L11 = logger_filters:domain(L11=?dlog([a,b,c,d]),{log,sub,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,sub,[a,b,c,d]}), + L12 = logger_filters:domain(L12=?dlog([a,b,c,d]),{log,equal,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,undefined,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,undefined,[a,b,c,d]}), %% A domain field in meta which is not a list is allowed by the - %% filter, but it will never match. - ignore = logger_filters:domain(?dlog(dummy),{log,prefix_of,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{stop,prefix_of,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{log,starts_with,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{stop,starts_with,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{log,equals,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{stop,equals,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{log,no_domain,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{stop,no_domain,[a,b,c,d]}), + %% filter, but since MatchDomain is always a list of atoms, only + %% Action=not_equal can ever match. + ignore = logger_filters:domain(?dlog(dummy),{log,super,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,super,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,sub,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,sub,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,equal,[a,b,c,d]}), + L13 = logger_filters:domain(L13=?dlog(dummy),{log,not_equal,[a,b,c,d]}), + stop = logger_filters:domain(?dlog(dummy),{stop,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,undefined,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,undefined,[a,b,c,d]}), {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)), - {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,prefix_of,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,super,[]})), {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})), - {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,prefix_of,bad})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,super,bad})), ok. diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl index 7d1f33746d..7a93f2ca79 100644 --- a/lib/kernel/test/logger_formatter_SUITE.erl +++ b/lib/kernel/test/logger_formatter_SUITE.erl @@ -68,12 +68,13 @@ all() -> level_or_msg_in_meta, faulty_log, faulty_config, - faulty_msg]. + faulty_msg, + update_config]. default(_Config) -> String1 = format(info,{"~p",[term]},#{},#{}), ct:log(String1), - [_Date,_Time,"info:","term\n"] = string:lexemes(String1," "), + [_DateTime,"info:","term\n"] = string:lexemes(String1," "), Time = timestamp(), ExpectedTimestamp = default_time_format(Time), @@ -268,8 +269,8 @@ format_msg(_Config) -> String8 = format(info,{string,['not',printable,list]}, #{report_cb=>fun(_)-> {"formatted",[]} end}, #{template=>Template}), - ct:log(String8), - "INVALID STRING: ['not',printable,list]" = String8, + ct:log("~ts",[String8]), % avoiding ct_log crash + "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String8, String9 = format(info,{string,"string"},#{},#{template=>Template}), ct:log(String9), @@ -297,22 +298,22 @@ max_size(_Config) -> single_line=>false}, "12345678901234567890" = format(info,{"12345678901234567890",[]},#{},Cfg), - application:set_env(kernel,logger_max_size,11), - "12345678901234567890" = % min value is 50, so this is not limited - format(info,{"12345678901234567890",[]},#{},Cfg), - "12345678901234567890123456789012345678901234567..." = % 50 - format(info, - {"123456789012345678901234567890123456789012345678901234567890", - []}, - #{}, - Cfg), - application:set_env(kernel,logger_max_size,53), - "12345678901234567890123456789012345678901234567890..." = %53 - format(info, - {"123456789012345678901234567890123456789012345678901234567890", - []}, - #{}, - Cfg), + %% application:set_env(kernel,logger_max_size,11), + %% "12345678901234567890" = % min value is 50, so this is not limited + %% format(info,{"12345678901234567890",[]},#{},Cfg), + %% "12345678901234567890123456789012345678901234567..." = % 50 + %% format(info, + %% {"123456789012345678901234567890123456789012345678901234567890", + %% []}, + %% #{}, + %% Cfg), + %% application:set_env(kernel,logger_max_size,53), + %% "12345678901234567890123456789012345678901234567890..." = %53 + %% format(info, + %% {"123456789012345678901234567890123456789012345678901234567890", + %% []}, + %% #{}, + %% Cfg), "123456789012..." = format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}), "12345678901234567890" = @@ -341,12 +342,6 @@ depth(_Config) -> {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, #{}, #{template=>Template}), - application:set_env(kernel,logger_format_depth,12), - "[1,2,3,4,5,6,7,8,9,0,1|...]" = - format(info, - {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, - #{}, - #{template=>Template}), "[1,2,3,4,5,6,7,8,9,0,1,2|...]" = format(info, {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, @@ -361,7 +356,7 @@ depth(_Config) -> depth=>unlimited}), ok. depth(cleanup,_Config) -> - application:unset_env(kernel,logger_format_depth), + application:unset_env(kernel,error_logger_format_depth), ok. chars_limit(_Config) -> @@ -370,7 +365,7 @@ chars_limit(_Config) -> lists:seq(1,100), maps:from_list(lists:zip(lists:seq(1,100), lists:duplicate(100,value)))]}, - Meta = #{time=>"2018-04-26 9:15:40.449879"}, + Meta = #{time=>timestamp()}, Template = [time," - ", msg, "\n"], FC = #{template=>Template, depth=>unlimited, @@ -382,7 +377,7 @@ chars_limit(_Config) -> L1 = string:length(String1), ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]), true = L1 > CL1, - true = L1 < CL1 + 10, + true = L1 < CL1 + 15, String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}), L2 = string:length(String2), @@ -394,13 +389,13 @@ chars_limit(_Config) -> L3 = string:length(String3), ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]), true = L3 > CL3, - true = L3 < CL3 + 10, + true = L3 < CL3 + 15, String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}), L4 = string:length(String4), ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]), true = L4 > CL3, - true = L4 < CL3 + 10, + true = L4 < CL3 + 15, %% Test that max_size truncates the string which is limited by %% depth and chars_limit @@ -411,6 +406,14 @@ chars_limit(_Config) -> L5 = MS5, true = lists:prefix(lists:sublist(String5,L5-4),String4), + %% Test that chars_limit limits string also + Str = "123456789012345678901234567890123456789012345678901234567890123456789", + CL6 = 80, + String6 = format(info,{string,Str},Meta,FC#{chars_limit=>CL6}), + L6 = string:length(String6), + ct:log("String6: ~p~nLength6: ~p~n",[String6,L6]), + L6 = CL6, + ok. format_mfa(_Config) -> @@ -439,29 +442,58 @@ format_mfa(_Config) -> ok. format_time(_Config) -> - Time1 = timestamp(), - ExpectedTimestamp1 = default_time_format(Time1), - String1 = format(info,{"~p",[term]},#{time=>Time1},#{}), - ct:log(String1), - " info: term\n" = string:prefix(String1,ExpectedTimestamp1), - - Time2 = timestamp(), - ExpectedTimestamp2 = default_time_format(Time2,true), - String2 = format(info,{"~p",[term]},#{time=>Time2},#{utc=>true}), - ct:log(String2), - " info: term\n" = string:prefix(String2,ExpectedTimestamp2), - - application:set_env(kernel,logger_utc,true), - Time3 = timestamp(), - ExpectedTimestamp3 = default_time_format(Time3,true), - String3 = format(info,{"~p",[term]},#{time=>Time3},#{}), - ct:log(String3), - " info: term\n" = string:prefix(String3,ExpectedTimestamp3), + Time = timestamp(), + Meta = #{time=>Time}, + FC = #{template=>[time]}, + Msg = {string,""}, + ExpectedLocal = default_time_format(Time,false), + ExpectedUtc = default_time_format(Time,true), + + %% default - local time + ExpectedLocal = format(info,Msg,Meta,FC), + + %% time_offset config parameter to formatter + ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}), + ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}), + + %% stdlib utc_log works when time_offset parameter is not set + application:set_env(stdlib,utc_log,true), + ExpectedUtc = format(info,Msg,Meta,FC), + + %% sasl utc_log overwrites stdlib utc_log + application:set_env(sasl,utc_log,false), + ExpectedLocal = format(info,Msg,Meta,FC), + + %% sasl utc_log overwrites stdlib utc_log + application:set_env(sasl,utc_log,true), + application:set_env(stdlib,utc_log,false), + ExpectedUtc = format(info,Msg,Meta,FC), + + %% time_offset config parameter to formatter + %% overwrites sasl and stdlib utc_log + application:set_env(sasl,utc_log,false), + ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}), + + %% time_offset config parameter to formatter + %% overwrites sasl and stdlib utc_log + application:set_env(sasl,utc_log,true), + application:set_env(stdlib,utc_log,true), + ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}), + + %% time_designator config parameter to formatter + ExpectedLocalS = default_time_format(Time,false,$\s), + ExpectedUtcS = default_time_format(Time,true,$\s), + + ExpectedLocalS = format(info,Msg,Meta,FC#{time_offset=>"", + time_designator=>$\s}), + ExpectedUtcS = format(info,Msg,Meta,FC#{time_offset=>"Z", + time_designator=>$\s}), ok. format_time(cleanup,_Config) -> - application:unset_env(kernel,logger_utc), + application:unset_env(sasl,utc_log), + application:unset_env(stdlib,utc_log), ok. level_or_msg_in_meta(_Config) -> @@ -482,7 +514,7 @@ level_or_msg_in_meta(_Config) -> ok. faulty_log(_Config) -> - %% Unexpected log (should be type logger:log()) - print error + %% Unexpected log (should be type logger:log_event()) - print error {error, function_clause, {logger_formatter,format,[_,_],_}} = @@ -509,6 +541,54 @@ faulty_msg(_Config) -> #{})), ok. +%% Test that formatter config can be changed, and that the default +%% template is updated accordingly +update_config(_Config) -> + logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}), + ok = logger:add_handler(?MODULE,?MODULE,#{}), + D = lists:seq(1,1000), + logger:info("~p~n",[D]), + {Lines1,C1} = check_log(), + [ct:log(L) || L <- Lines1], + ct:log("~p",[C1]), + [Line1] = Lines1, + [_Time,"info: "++D1] = string:split(Line1," "), + true = length(D1)>3000, + true = #{}==C1, + + ok = logger:update_formatter_config(?MODULE,single_line,false), + logger:info("~p~n",[D]), + {Lines2,C2} = check_log(), + [ct:log(L) || L <- Lines2], + ct:log("~p",[C2]), + true = length(Lines2)>50, + true = #{single_line=>false}==C2, + + ok = logger:update_formatter_config(?MODULE,#{legacy_header=>true}), + logger:info("~p~n",[D]), + {Lines3,C3} = check_log(), + [ct:log(L) || L <- Lines3], + ct:log("~p",[C3]), + ["=INFO REPORT==== "++_|D3] = Lines3, + true = length(D3)>50, + true = #{legacy_header=>true,single_line=>false}==C3, + + ok = logger:update_formatter_config(?MODULE,single_line,true), + logger:info("~p~n",[D]), + {Lines4,C4} = check_log(), + [ct:log(L) || L <- Lines4], + ct:log("~p",[C4]), + ["=INFO REPORT==== "++_,D4] = Lines4, + true = length(D4)>3000, + true = #{legacy_header=>true,single_line=>true}==C4, + + ok. + +update_config(cleanup,_Config) -> + _ = logger:remove_handler(?MODULE), + _ = logger:remove_handler_filter(default,silence), + ok. + %%%----------------------------------------------------------------- %%% Internal format(Level,Msg,Meta,Config) -> @@ -520,22 +600,16 @@ format(Log,Config) -> default_time_format(Timestamp) -> default_time_format(Timestamp,false). -default_time_format(Timestamp0,Utc) when is_integer(Timestamp0) -> - Timestamp=Timestamp0+erlang:time_offset(microsecond), - %% calendar:system_time_to_rfc3339(Time,[{unit,microsecond}]). - Micro = Timestamp rem 1000000, - Sec = Timestamp div 1000000, - UniversalTime = erlang:posixtime_to_universaltime(Sec), - {Date,Time} = - if Utc -> UniversalTime; - true -> erlang:universaltime_to_localtime(UniversalTime) - end, - default_time_format(Date,Time,Micro). - -default_time_format({Y,M,D},{H,Min,S},Micro) -> - lists:flatten( - io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w", - [Y,M,D,H,Min,S,Micro])). +default_time_format(Timestamp,Utc) -> + default_time_format(Timestamp,Utc,$T). + +default_time_format(Timestamp,Utc,Sep) -> + Offset = if Utc -> "Z"; + true -> "" + end, + calendar:system_time_to_rfc3339(Timestamp,[{unit,microsecond}, + {time_designator,Sep}, + {offset,Offset}]). integer(Str) -> is_integer(list_to_integer(Str)). @@ -551,10 +625,20 @@ my_try(Fun) -> try Fun() catch C:R:S -> {C,R,hd(S)} end. timestamp() -> - erlang:monotonic_time(microsecond). + erlang:system_time(microsecond). %% necessary? add_time(#{time:=_}=Meta) -> Meta; add_time(Meta) -> Meta#{time=>timestamp()}. + +%%%----------------------------------------------------------------- +%%% handler callback +log(Log,#{formatter:={M,C}}) -> + put(log,{M:format(Log,C),C}), + ok. + +check_log() -> + {S,C} = erase(log), + {string:lexemes(S,"\n"),C}. diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl index b59f5f7758..cfba35e43f 100644 --- a/lib/kernel/test/logger_legacy_SUITE.erl +++ b/lib/kernel/test/logger_legacy_SUITE.erl @@ -68,13 +68,13 @@ init_per_group(std, Config) -> ok = logger:set_handler_config( error_logger,filters, [{domain,{fun logger_filters:domain/2, - {log,prefix_of,[beam,erlang,otp]}}}]), + {log,super,[beam,erlang,otp]}}}]), Config; init_per_group(sasl, Config) -> ok = logger:set_handler_config( error_logger,filters, [{domain,{fun logger_filters:domain/2, - {log,prefix_of,[beam,erlang,otp,sasl]}}}]), + {log,super,[beam,erlang,otp,sasl]}}}]), %% cth_log_redirect checks if sasl is started before displaying %% any sasl reports - so just to see the real sasl reports in tc diff --git a/lib/kernel/test/logger_simple_SUITE.erl b/lib/kernel/test/logger_simple_SUITE.erl deleted file mode 100644 index 5d8d32492d..0000000000 --- a/lib/kernel/test/logger_simple_SUITE.erl +++ /dev/null @@ -1,247 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2018. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(logger_simple_SUITE). - --compile(export_all). - --include_lib("common_test/include/ct.hrl"). --include_lib("kernel/include/logger.hrl"). --include_lib("kernel/src/logger_internal.hrl"). - --define(check_no_log,[] = test_server:messages_get()). --define(check(Expected), - receive {log,Expected} -> - [] = test_server:messages_get() - after 1000 -> - ct:fail({report_not_received, - {line,?LINE}, - {expected,Expected}, - {got,test_server:messages_get()}}) - end). - --define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ - ":"++integer_to_list(?LINE)). --define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). --define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). - -suite() -> - [{timetrap,{seconds,30}}]. - -init_per_suite(Config) -> - #{handlers:=Hs0} = logger:i(), - Hs = lists:keydelete(cth_log_redirect,1,Hs0), - [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], - Env = [{App,Key,application:get_env(App,Key)} || - {App,Key} <- [{kernel,logger_dest}, - {kernel,logger_level}]], - [{env,Env},{logger,Hs}|Config]. - -end_per_suite(Config) -> - [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)], - Hs = ?config(logger,Config), - [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], - ok. - -init_per_group(_Group, Config) -> - Config. - -end_per_group(_Group, _Config) -> - ok. - -init_per_testcase(_TestCase, Config) -> - Config. - -end_per_testcase(Case, Config) -> - try apply(?MODULE,Case,[cleanup,Config]) - catch error:undef -> ok - end, - ok. - -groups() -> - []. - -all() -> - [start_stop, - get_buffer, - replace_file, - replace_disk_log - ]. - -start_stop(_Config) -> - undefined = whereis(logger_simple), - register(logger_simple,self()), - {error,_} = logger:add_handler(logger_simple, - logger_simple, - #{filter_default=>log}), - unregister(logger_simple), - ok = logger:add_handler(logger_simple,logger_simple,#{filter_default=>log}), - Pid = whereis(logger_simple), - true = is_pid(Pid), - ok = logger:remove_handler(logger_simple), - false = is_pid(whereis(logger_simple)), - ok. -start_stop(cleanup,_Config) -> - logger:remove_handler(logger_simple). - -get_buffer(_Config) -> - %% Start simple without buffer - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log}), - logger:emergency(?str), - logger:alert(?str,[]), - logger:error(?map_rep), - logger:info(?keyval_rep), - {ok,[]} = logger_simple:get_buffer(), % no buffer - ok = logger:remove_handler(logger_simple), - - %% Start with buffer - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - logger:emergency(M1=?str), - logger:alert(M2=?str,[]), - logger:error(M3=?map_rep), - logger:info(M4=?keyval_rep), - logger:info(M41=?keyval_rep++[not_key_val]), - error_logger:error_report(some_type,M5=?map_rep), - error_logger:warning_report("some_type",M6=?map_rep), - logger:critical(M7=?str,[A7=?keyval_rep]), - logger:notice(M8=["fake",string,"line:",?LINE]), - {ok,Buffered1} = logger_simple:get_buffer(), - [#{level:=emergency,msg:={string,M1}}, - #{level:=alert,msg:={M2,[]}}, - #{level:=error,msg:={report,M3}}, - #{level:=info,msg:={report,M4}}, - #{level:=info,msg:={report,M41}}, - #{level:=error,msg:={report,#{label:={error_logger,error_report}, - report:=M5}}}, - #{level:=warning,msg:={report,#{label:={error_logger,warning_report}, - report:=M6}}}, - #{level:=critical,msg:={M7,[A7]}}, - #{level:=notice,msg:={string,M8}}] = Buffered1, - - %% Keep logging - should not buffer any more - logger:emergency(?str), - logger:alert(?str,[]), - logger:error(?map_rep), - logger:info(?keyval_rep), - {ok,[]} = logger_simple:get_buffer(), - ok = logger:remove_handler(logger_simple), - - %% Fill buffer and drop - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - logger:emergency(M9=?str), - M10=?str, - [logger:info(M10) || _ <- lists:seq(1,8)], - logger:error(M11=?str), - logger:error(?str), - logger:error(?str), - {ok,Buffered3} = logger_simple:get_buffer(), - 11 = length(Buffered3), - [#{level:=emergency,msg:={string,M9}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=error,msg:={string,M11}}, - #{level:=info,msg:={"Simple handler buffer full, dropped ~w messages",[2]}}] - = Buffered3, - ok. -get_buffer(cleanup,_Config) -> - logger:remove_handler(logger_simple). - -replace_file(Config) -> - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - logger:emergency(M1=?str), - logger:alert(M2=?str,[]), - logger:error(?map_rep), - logger:info(?keyval_rep), - undefined = whereis(?STANDARD_HANDLER), - PrivDir = ?config(priv_dir,Config), - File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)++".log"), - - application:set_env(kernel,logger_dest,{file,File}), - application:set_env(kernel,logger_level,info), - - ok = logger:setup_standard_handler(), - true = is_pid(whereis(?STANDARD_HANDLER)), - ok = logger_std_h:filesync(?STANDARD_HANDLER), - {ok,Bin} = file:read_file(File), - Lines = [unicode:characters_to_list(L) || - L <- binary:split(Bin,<<"\n">>,[global,trim])], - ["=EMERGENCY REPORT===="++_, - M1, - "=ALERT REPORT===="++_, - M2, - "=ERROR REPORT===="++_, - _, - _, - "=INFO REPORT===="++_, - _, - _] = Lines, - ok. -replace_file(cleanup,_Config) -> - logger:remove_handler(?STANDARD_HANDLER), - logger:remove_handler(logger_simple). - -replace_disk_log(Config) -> - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - logger:emergency(M1=?str), - logger:alert(M2=?str,[]), - logger:error(?map_rep), - logger:info(?keyval_rep), - undefined = whereis(?STANDARD_HANDLER), - PrivDir = ?config(priv_dir,Config), - File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)), - - application:set_env(kernel,logger_dest,{disk_log,File}), - application:set_env(kernel,logger_level,info), - - ok = logger:setup_standard_handler(), - true = is_pid(whereis(?STANDARD_HANDLER)), - ok = logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER), - {ok,Bin} = file:read_file(File++".1"), - Lines = [unicode:characters_to_list(L) || - L <- binary:split(Bin,<<"\n">>,[global,trim])], - ["=EMERGENCY REPORT===="++_, - M1, - "=ALERT REPORT===="++_, - M2, - "=ERROR REPORT===="++_, - _, - _, - "=INFO REPORT===="++_, - _, - _|_] = Lines, % the tail might be an info report about opening the disk log - ok. -replace_disk_log(cleanup,_Config) -> - logger:remove_handler(?STANDARD_HANDLER), - logger:remove_handler(logger_simple). - diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl new file mode 100644 index 0000000000..271a2126de --- /dev/null +++ b/lib/kernel/test/logger_simple_h_SUITE.erl @@ -0,0 +1,210 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_simple_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-import(logger_test_lib, [setup/2, log/3, sync_and_read/3]). + +-define(check_no_log,[] = test_server:messages_get()). +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks, [logger_test_lib]}]. + +init_per_suite(Config) -> + #{handlers:=Hs0} = logger:i(), + Hs = lists:keydelete(cth_log_redirect,1,Hs0), + [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], + Env = [{App,Key,application:get_env(App,Key)} || + {App,Key} <- [{kernel,logger_level}]], + [{env,Env},{logger,Hs}|Config]. + +end_per_suite(Config) -> + [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)], + Hs = ?config(logger,Config), + [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + replace_default, + replace_file, + replace_disk_log + ]. + +start_stop(_Config) -> + undefined = whereis(logger_simple_h), + register(logger_simple_h,self()), + {error,_} = logger:add_handler(simple, + logger_simple_h, + #{filter_default=>log}), + unregister(logger_simple_h), + ok = logger:add_handler(simple,logger_simple_h,#{filter_default=>log}), + Pid = whereis(logger_simple_h), + true = is_pid(Pid), + ok = logger:remove_handler(simple), + false = is_pid(whereis(logger_simple_h)), + ok. +start_stop(cleanup,_Config) -> + logger:remove_handler(simple). + +%% This testcase just tests that it does not crash, the default handler prints +%% to stdout which we cannot read from in a detached slave. +replace_default(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [M3=?map_rep]), + log(Node, info, [M4=?keyval_rep]), + log(Node, info, [M41=?keyval_rep++[not_key_val]]), + rpc:call(Node, error_logger, error_report, [some_type,M5=?map_rep]), + rpc:call(Node, error_logger, warning_report, ["some_type",M6=?map_rep]), + log(Node, critical, [M7=?str,[A7=?keyval_rep]]), + log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + + Env = rpc:call(Node, application, get_env, [kernel, logger, []]), + ok = rpc:call(Node, logger, add_handlers, [Env]), + + ok. + +replace_file(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [M3=?map_rep]), + log(Node, info, [M4=?keyval_rep]), + log(Node, info, [M41=?keyval_rep++[not_key_val]]), + log(Node, critical, [M7=?str,[A7=?keyval_rep]]), + log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + + File = filename:join(proplists:get_value(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + + ok = rpc:call(Node, logger, add_handlers, + [[{handler, default, logger_std_h, + #{ logger_std_h => #{ type => {file, File} }, + formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]), + + {ok,Bin} = sync_and_read(Node, file, File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _, + _, + "=CRITICAL REPORT===="++_, + _, + _, + "=NOTICE REPORT===="++_, + _ + ] = Lines, + ok. + +replace_disk_log(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [M3=?map_rep]), + log(Node, info, [M4=?keyval_rep]), + log(Node, info, [M41=?keyval_rep++[not_key_val]]), + log(Node, critical, [M7=?str,[A7=?keyval_rep]]), + log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + + File = filename:join(proplists:get_value(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + + ok = rpc:call(Node, logger, add_handlers, + [[{handler, default, logger_disk_log_h, + #{ disk_log_opts => #{ file => File }, + formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]), + {ok,Bin} = sync_and_read(Node, disk_log, File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _, + _, + "=CRITICAL REPORT===="++_, + _, + _, + "=NOTICE REPORT===="++_, + _ + ] = Lines, + ok. diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl index 7c8d63cbbd..5764abd063 100644 --- a/lib/kernel/test/logger_std_h_SUITE.erl +++ b/lib/kernel/test/logger_std_h_SUITE.erl @@ -50,11 +50,12 @@ end). suite() -> - [{timetrap,{seconds,30}}]. + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. init_per_suite(Config) -> timer:start(), % to avoid progress report - {ok,{?STANDARD_HANDLER,#{formatter:=OrigFormatter}}} = + {ok,{logger_std_h,#{formatter:=OrigFormatter}}} = logger:get_handler_config(?STANDARD_HANDLER), [{formatter,OrigFormatter}|Config]. @@ -241,36 +242,38 @@ formatter_fail(Config) -> filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}), Pid = whereis(?MODULE), true = is_pid(Pid), - {ok,#{handlers:=H}} = logger:get_logger_config(), + #{handlers:=HC1} = logger:i(), + H = [Id || {Id,_,_} <- HC1], true = lists:member(?MODULE,H), %% Formatter is added automatically {ok,{_,#{formatter:={logger_formatter,_}}}} = logger:get_handler_config(?MODULE), logger:info(M1=?msg,?domain), - Got1 = try_match_file(Log,"=INFO REPORT====.*\n"++M1,5000), + Got1 = try_match_file(Log,"[0-9\\+\\-T:\\.]* info: "++M1,5000), ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}), logger:info(M2=?msg,?domain), Got2 = try_match_file(Log, - Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2, + escape(Got1)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M2, 5000), ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}), logger:info(M3=?msg,?domain), Got3 = try_match_file(Log, - Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3, + escape(Got2)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M3, 5000), ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}), logger:info(?msg,?domain), try_match_file(Log, - Got3++"FORMATTER ERROR: bad_return_value", + escape(Got3)++"FORMATTER ERROR: bad_return_value", 5000), %% Check that handler is still alive and was never dead Pid = whereis(?MODULE), - {ok,#{handlers:=H}} = logger:get_logger_config(), + #{handlers:=HC2} = logger:i(), + H = [Id || {Id,_,_} <- HC2], ok. @@ -289,10 +292,17 @@ config_fail(_Config) -> #{logger_std_h => #{restart_type => bad}, filter_default=>log, formatter=>{?MODULE,self()}}), - {error,{handler_not_added,{invalid_levels,{42,42,_}}}} = + {error,{handler_not_added,{invalid_levels,{_,1,_}}}} = logger:add_handler(?MODULE,logger_std_h, - #{logger_std_h => #{toggle_sync_qlen=>42, + #{logger_std_h => #{drop_new_reqs_qlen=>1}}), + {error,{handler_not_added,{invalid_levels,{43,42,_}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{toggle_sync_qlen=>43, drop_new_reqs_qlen=>42}}), + {error,{handler_not_added,{invalid_levels,{_,43,42}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{drop_new_reqs_qlen=>43, + flush_reqs_qlen=>42}}), ok = logger:add_handler(?MODULE,logger_std_h, #{filter_default=>log, @@ -315,29 +325,32 @@ config_fail(cleanup,_Config) -> logger:remove_handler(?MODULE). crash_std_h_to_file(Config) -> - crash_std_h(Config,?FUNCTION_NAME,logger_dest,file). + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])), + crash_std_h(Config,?FUNCTION_NAME, + [{handler,default,logger_std_h, + #{ logger_std_h => #{ type => {file, Log} }}}], + file, Log). crash_std_h_to_file(cleanup,_Config) -> crash_std_h(cleanup). crash_std_h_to_disk_log(Config) -> - crash_std_h(Config,?FUNCTION_NAME,logger_dest,disk_log). + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])), + crash_std_h(Config,?FUNCTION_NAME, + [{handler,default,logger_disk_log_h, + #{ disk_log_opts => #{ file => Log }}}], + disk_log,Log). crash_std_h_to_disk_log(cleanup,_Config) -> crash_std_h(cleanup). -crash_std_h(Config,Func,Var,Type) -> +crash_std_h(Config,Func,Var,Type,Log) -> Dir = ?config(priv_dir,Config), - File = lists:concat([?MODULE,"_",Func,".log"]), - Log = filename:join(Dir,File), + SysConfig = filename:join(Dir,lists:concat([?MODULE,"_",Func,".config"])), + ok = file:write_file(SysConfig, io_lib:format("[{kernel,[{logger,~p}]}].",[Var])), Pa = filename:dirname(code:which(?MODULE)), - TypeAndLog = - case os:type() of - {win32,_} -> - lists:concat([" {",Type,",\\\"",Log,"\\\"}"]); - _ -> - lists:concat([" \'{",Type,",\"",Log,"\"}\'"]) - end, - Args = lists:concat([" -kernel ",Var,TypeAndLog," -pa ",Pa]), Name = lists:concat([?MODULE,"_",Func]), + Args = lists:concat([" -config ",filename:rootname(SysConfig)," -pa ",Pa]), ct:pal("Starting ~p with ~tp", [Name,Args]), %% Start a node which prints kernel logs to the destination specified by Type {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), @@ -578,7 +591,7 @@ write_failure(Config) -> Dir = ?config(priv_dir, Config), File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), Log = filename:join(Dir, File), - Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + Node = start_std_h_on_new_node(Config, Log), false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), @@ -615,7 +628,7 @@ sync_failure(Config) -> Dir = ?config(priv_dir, Config), File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), Log = filename:join(Dir, File), - Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + Node = start_std_h_on_new_node(Config, Log), false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), @@ -651,21 +664,12 @@ sync_failure(cleanup, _Config) -> Nodes = nodes(), [test_server:stop_node(Node) || Node <- Nodes]. -start_std_h_on_new_node(_Config, Func, Log) -> - Pa = filename:dirname(code:which(?MODULE)), - Dest = - case os:type() of - {win32,_} -> - lists:concat([" {file,\\\"",Log,"\\\"}"]); - _ -> - lists:concat([" \'{file,\"",Log,"\"}\'"]) - end, - Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]), - Name = lists:concat([?MODULE,"_",Func]), - ct:pal("Starting ~s with ~tp", [Name,Args]), - {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), - Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), - true = is_pid(Pid), +start_std_h_on_new_node(Config, Log) -> + {ok,_,Node} = + logger_test_lib:setup( + Config, + [{logger,[{handler,default,logger_std_h, + #{ logger_std_h => #{ type => {file,Log}}}}]}]), ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, {?MODULE,nl}]), Node. @@ -691,16 +695,17 @@ internal_log(Type, Term) -> op_switch_to_sync_file(Config) -> {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 500, NewHConfig = - HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3, - drop_new_reqs_qlen => 501, - flush_reqs_qlen => 2000, + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => NumOfReqs+1, + flush_reqs_qlen => 2*NumOfReqs, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), %% TRecvPid = start_op_trace(), - NumOfReqs = 500, send_burst({n,NumOfReqs}, seq, {chars,79}, info), - NumOfReqs = count_lines(Log), + Lines = count_lines(Log), + ok = file:delete(Log), %% true = analyse_trace(TRecvPid, %% fun(Events) -> find_mode(async,Events) end), %% true = analyse_trace(TRecvPid, @@ -711,68 +716,82 @@ op_switch_to_sync_file(Config) -> %% fun(Events) -> find_mode(drop,Events) end), %% false = analyse_trace(TRecvPid, %% fun(Events) -> find_mode(flush,Events) end), - ok = file:delete(Log), %% stop_op_trace(TRecvPid), + NumOfReqs = Lines, ok. op_switch_to_sync_file(cleanup, _Config) -> ok = stop_handler(?MODULE). op_switch_to_sync_tty(Config) -> {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NumOfReqs = 500, NewHConfig = HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3, - drop_new_reqs_qlen => 501, - flush_reqs_qlen => 2000, + drop_new_reqs_qlen => NumOfReqs+1, + flush_reqs_qlen => 2*NumOfReqs, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 500, send_burst({n,NumOfReqs}, seq, {chars,79}, info), ok. op_switch_to_sync_tty(cleanup, _Config) -> ok = stop_handler(?MODULE). +op_switch_to_drop_file() -> + [{timetrap,{seconds,180}}]. op_switch_to_drop_file(Config) -> - {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - - NewHConfig = - HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 3, - flush_reqs_qlen => 600, + Test = + fun() -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 300, + Procs = 2, + Bursts = 10, + NewHConfig = + HConfig#{logger_std_h => + StdHConfig#{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => + Procs*NumOfReqs*Bursts, enable_burst_limit => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), - %% TRecvPid = start_op_trace(), - NumOfReqs = 500, - send_burst({n,NumOfReqs}, seq, {chars,79}, info), - Logged = count_lines(Log), - ct:pal("Number of messages dropped = ~w (~w)", - [NumOfReqs-Logged,NumOfReqs]), - true = (Logged < NumOfReqs), - %% true = analyse_trace(TRecvPid, - %% fun(Events) -> find_mode(async,Events) end), - %% true = analyse_trace(TRecvPid, - %% fun(Events) -> find_mode(drop,Events) end), - %% false = analyse_trace(TRecvPid, - %% fun(Events) -> find_mode(flush,Events) end), - %% true = analyse_trace(TRecvPid, - %% fun(Events) -> find_switch(async,drop,Events) - %% orelse find_switch(sync,drop,Events) - %% end), - ok = file:delete(Log), - %% stop_op_trace(TRecvPid), - ok. + ok = logger:set_handler_config(?MODULE, NewHConfig), + %% It sometimes happens that the handler gets the + %% requests in a slow enough pace so that dropping + %% never occurs. Therefore, lets generate a number of + %% bursts to increase the chance of message buildup. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) || + _ <- lists:seq(1, Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + _ = file:delete(Log), + ct:pal("Number of messages dropped = ~w (~w)", + [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), + true = (Logged < (Procs*NumOfReqs*Bursts)), + true = (Logged > 0), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. op_switch_to_drop_file(cleanup, _Config) -> - ok = stop_handler(?MODULE). + _ = stop_handler(?MODULE). op_switch_to_drop_tty(Config) -> {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NumOfReqs = 300, + Procs = 2, NewHConfig = - HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 3, - flush_reqs_qlen => 600, + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => + Procs*NumOfReqs+1, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 500, - send_burst({n,NumOfReqs}, seq, {chars,79}, info), + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), ok. op_switch_to_drop_tty(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -780,32 +799,54 @@ op_switch_to_drop_tty(cleanup, _Config) -> op_switch_to_flush_file() -> [{timetrap,{minutes,3}}]. op_switch_to_flush_file(Config) -> - {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - - %% it's important that both async and sync requests have been queued - %% when the flush happens (verify with coverage of flush_log_requests/2) - - NewHConfig = - HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 99, - flush_reqs_qlen => 100, + Test = + fun() -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% NOTE: it's important that both async and sync + %% requests have been queued when the flush happens + %% (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{logger_std_h => + StdHConfig#{toggle_sync_qlen => 2, + %% disable drop mode + drop_new_reqs_qlen => 300, + flush_reqs_qlen => 300, enable_burst_limit => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 10000, - Procs = 100, - send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), - Logged = count_lines(Log), - ct:pal("Number of messages flushed/dropped = ~w (~w)", - [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]), - true = (Logged < (NumOfReqs*Procs)), - - %%! --- Thu Apr 12 13:46:00 2018 --- peppe was here! - %%! TODO: Verify that handler has switched to flush mode - - ok = file:delete(Log), - ok. + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1500, + Procs = 10, + Bursts = 10, + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that flushing + %% never occurs, or it gets all messages at once, + %% causing all messages to get flushed (no dropping of + %% sync messages gets tested). Therefore, lets + %% generate a number of bursts to increase the chance + %% of message buildup in some random fashion. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) || + _ <- lists:seq(1,Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + _ = file:delete(Log), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), + true = (Logged < (NumOfReqs*Procs*Bursts)), + true = (Logged > 0), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. op_switch_to_flush_file(cleanup, _Config) -> - ok = stop_handler(?MODULE). + _ = stop_handler(?MODULE). op_switch_to_flush_tty(Config) -> {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), @@ -815,12 +856,13 @@ op_switch_to_flush_tty(Config) -> NewHConfig = HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 99, + %% disable drop mode + drop_new_reqs_qlen => 100, flush_reqs_qlen => 100, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 10000, - Procs = 10, + NumOfReqs = 1000, + Procs = 100, send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), ok. op_switch_to_flush_tty(cleanup, _Config) -> @@ -904,10 +946,10 @@ kill_disabled(cleanup, _Config) -> ok = stop_handler(?MODULE). qlen_kill_new(Config) -> - {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + {_Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(?MODULE), {_,Mem0} = process_info(Pid0, memory), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig = HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, handler_overloaded_qlen=>10, @@ -927,7 +969,7 @@ qlen_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter + 1000), + timer:sleep(RestartAfter + 2000), true = is_pid(whereis(?MODULE)), ok after @@ -941,7 +983,7 @@ qlen_kill_new(cleanup, _Config) -> %% choke the standard handler on remote node to verify the termination %% works as expected -qlen_kill_std(Config) -> +qlen_kill_std(_Config) -> %%! HERE %% Dir = ?config(priv_dir, Config), %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), @@ -955,10 +997,10 @@ qlen_kill_std(Config) -> {skip,"Not done yet"}. mem_kill_new(Config) -> - {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + {_Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(?MODULE), {_,Mem0} = process_info(Pid0, memory), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig = HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, handler_overloaded_qlen=>50000, @@ -978,7 +1020,7 @@ mem_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter * 2), + timer:sleep(RestartAfter * 3), true = is_pid(whereis(?MODULE)), ok after @@ -992,7 +1034,7 @@ mem_kill_new(cleanup, _Config) -> %% choke the standard handler on remote node to verify the termination %% works as expected -mem_kill_std(Config) -> +mem_kill_std(_Config) -> {skip,"Not done yet"}. restart_after(Config) -> @@ -1016,7 +1058,7 @@ restart_after(Config) -> end, {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig2 = HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, handler_overloaded_qlen=>10, @@ -1028,7 +1070,7 @@ restart_after(Config) -> send_burst({n,100}, {spawn,2,0}, {chars,79}, info), receive {'DOWN', MRef2, _, _, _Info2} -> - timer:sleep(RestartAfter + 1000), + timer:sleep(RestartAfter + 2000), Pid1 = whereis(?MODULE), true = is_pid(Pid1), false = (Pid1 == Pid0), @@ -1074,7 +1116,7 @@ handler_requests_under_load(Config) -> NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), ok = file:delete(Log). -handler_requests_under_load(cleanup, Config) -> +handler_requests_under_load(cleanup, _Config) -> ok = stop_handler(?MODULE). send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> @@ -1126,8 +1168,9 @@ start_handler(Name, FuncName, Config) -> {Log,HConfig,StdHConfig}. stop_handler(Name) -> - ok = logger:remove_handler(Name), - ct:pal("Handler ~p stopped!", [Name]). + R = logger:remove_handler(Name), + ct:pal("Handler ~p stopped! Result: ~p", [Name,R]), + R. count_lines(File) -> wait_until_written(File, -1), @@ -1299,6 +1342,30 @@ try_match_file(_,Pattern,_,Incorrect) -> [Pattern,Incorrect]), erlang:error({error,not_matching_pattern,Pattern,Incorrect}). +repeat_until_ok(Fun, N) -> + repeat_until_ok(Fun, 0, N, undefined). + +repeat_until_ok(_Fun, Stop, Stop, Reason) -> + {fails,Reason}; + +repeat_until_ok(Fun, C, Stop, FirstReason) -> + if C > 0 -> timer:sleep(5000); + true -> ok + end, + try Fun() of + Result -> + {ok,{C,Result}} + catch + _:Reason:Stack -> + ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]), + if FirstReason == undefined -> + repeat_until_ok(Fun, C+1, Stop, {Reason,Stack}); + true -> + repeat_until_ok(Fun, C+1, Stop, FirstReason) + end + end. + + %%%----------------------------------------------------------------- %%% start_op_trace() -> @@ -1339,17 +1406,17 @@ find_mode(flush, Events) -> find_mode(Mode, Events) -> lists:keymember([{mode,Mode}], 3, Events). -find_switch(From, To, Events) -> - try lists:foldl(fun({trace_return,check_load,{To,_,_,_}}, - {trace_call,check_load,[#{mode := From}]}) -> - throw(match); - (Event, _) -> - Event - end, undefined, Events) of - _ -> false - catch - throw:match -> true - end. +%% find_switch(_From, To, Events) -> +%% try lists:foldl(fun({trace_return,check_load,{To,_,_,_}}, +%% {trace_call,check_load,[#{mode := From}]}) -> +%% throw(match); +%% (Event, _) -> +%% Event +%% end, undefined, Events) of +%% _ -> false +%% catch +%% throw:match -> true +%% end. analyse_trace(TRecvPid, TestFun) -> TRecvPid ! {test,self(),TestFun}, @@ -1411,7 +1478,7 @@ tpl([{M,F,A}|Trace]) -> tpl([]) -> ok. -tracer({trace,_,call,{logger_std_h,handle_cast,[{Op,_}|_]}}, +tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) -> maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}}, @@ -1451,3 +1518,10 @@ check_tracer(T,TimeoutFun) -> dbg:stop_clear(), TimeoutFun() end. + +escape([$+|Rest]) -> + [$\\,$+|escape(Rest)]; +escape([H|T]) -> + [H|escape(T)]; +escape([]) -> + []. diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl new file mode 100644 index 0000000000..4ac05e6480 --- /dev/null +++ b/lib/kernel/test/logger_test_lib.erl @@ -0,0 +1,82 @@ +% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_test_lib). + +-include_lib("kernel/src/logger_internal.hrl"). + +-export([setup/2, log/3, sync_and_read/3]). + +-export([init/2, + pre_init_per_suite/3, pre_init_per_testcase/4, + post_end_per_testcase/5, post_end_per_suite/3]). + +setup(Config,Vars) -> + FuncStr = lists:concat([proplists:get_value(suite, Config), "_", + proplists:get_value(tc, Config)]), + ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr), + file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])), + case test_server:start_node(proplists:get_value(tc, Config), slave, + [{args, ["-pa ",filename:dirname(code:which(?MODULE)), + " -boot start_sasl -kernel start_timer true " + "-config ",ConfigFileName]}]) of + {ok, Node} -> + L = rpc:call(Node, logger, i, []), + ct:log("~p",[L]), + {ok, L, Node}; + {error, Reason} -> + ct:log("Failed to start node: ~p",[Reason]), + error + end. + +log(Node, F, A) -> + log(Node, logger, F, A). +log(Node, M, F, A) -> + MD = #{ gl => rpc:call(Node, erlang, whereis, [logger]) }, + rpc:call(Node, M, F, A ++ [MD]). + +sync_and_read(Node,disk_log,Log) -> + rpc:call(Node,logger_disk_log_h,disk_log_sync,[?STANDARD_HANDLER]), + file:read_file(Log ++ ".1"); +sync_and_read(Node, file,Log) -> + ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), + file:read_file(Log). + + +init(_, _) -> + {ok, []}. + +pre_init_per_suite(_Suite, Config, State) -> + {[{nodes, nodes()} | Config], State}. + +pre_init_per_testcase(Suite, TC, Config, State) -> + cleanup(Config), + {[{suite, Suite}, {tc, TC} | Config], State}. + +post_end_per_testcase(_, _TC, Config, Res, State) -> + cleanup(Config), + {Res, State}. + +post_end_per_suite(_, Config, State) -> + cleanup(Config), + {Config, State}. + +cleanup(Config) -> + [test_server:stop_node(N) || N <- nodes(), + not lists:member(N, proplists:get_value(nodes, Config))]. diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl index 591fbb2125..abbc301360 100644 --- a/lib/kernel/test/os_SUITE.erl +++ b/lib/kernel/test/os_SUITE.erl @@ -227,8 +227,8 @@ find_executable(Config) when is_list(Config) -> DataDir = proplists:get_value(data_dir, Config), %% Smoke test. - case lib:progname() of - erl -> + case ct:get_progname() of + "erl" -> ErlPath = os:find_executable("erl"), true = is_list(ErlPath), true = filelib:is_regular(ErlPath); @@ -388,7 +388,7 @@ comp(Expected, Got) -> ct:fail(failed) end. -%% Like lib:nonl/1, but strips \r as well as \n. +%% strips \n and \r\n from end of string strip_nl([$\r, $\n]) -> []; strip_nl([$\n]) -> []; |