diff options
Diffstat (limited to 'lib')
111 files changed, 5209 insertions, 2821 deletions
diff --git a/lib/common_test/doc/src/ct_hooks_chapter.xml b/lib/common_test/doc/src/ct_hooks_chapter.xml index 7ecc2e4298..3957c0f4a5 100644 --- a/lib/common_test/doc/src/ct_hooks_chapter.xml +++ b/lib/common_test/doc/src/ct_hooks_chapter.xml @@ -501,12 +501,13 @@ <tag><c>cth_log_redirect</c></tag> <item> <p>Built-in</p> - <p>Captures all <c>error_logger</c> and SASL logging - events and prints them to the current test case log. If an event cannot be - associated with a test case, it is printed in the <c>Common Test</c> framework log. + <p>Captures all log events that would normally be printed by the default + logger handler, and prints them to the current test case log. + If an event cannot be associated with a test case, it is printed in + the <c>Common Test</c> framework log. This happens for test cases running in parallel and events occuring in-between test cases. You can configure the level of - <seealso marker="sasl:sasl_app">SASL</seealso> events report + <seealso marker="sasl:sasl_app">SASL</seealso> reports using the normal SASL mechanisms.</p> </item> <tag><c>cth_surefire</c></tag> diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl index 235956a714..3b6ffa8d68 100644 --- a/lib/compiler/test/bs_match_SUITE.erl +++ b/lib/compiler/test/bs_match_SUITE.erl @@ -330,6 +330,11 @@ save_restore(Config) when is_list(Config) -> {"-",<<"x">>} = nnn(C), {"-",<<"x">>} = ooo(C), + a = multiple_matches(<<777:16>>, <<777:16>>), + b = multiple_matches(<<777:16>>, <<999:16>>), + c = multiple_matches(<<777:16>>, <<57:8>>), + d = multiple_matches(<<17:8>>, <<1111:16>>), + Bin = <<-1:64>>, case bad_float_unpack_match(Bin) of -1 -> ok; @@ -357,6 +362,11 @@ nnn(<<Char, Tail/binary>>) -> {[Char],Tail}. %% Buggy Tail! ooo(<<" - ", Tail/binary>>) -> Tail; ooo(<<Char, Tail/binary>>) -> {[Char],Tail}. +multiple_matches(<<Y:16>>, <<Y:16>>) -> a; +multiple_matches(<<_:16>>, <<_:16>>) -> b; +multiple_matches(<<_:16>>, <<_:8>>) -> c; +multiple_matches(<<_:8>>, <<_:16>>) -> d. + bad_float_unpack_match(<<F:64/float>>) -> F; bad_float_unpack_match(<<I:64/integer-signed>>) -> I. diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c index df4e2245f4..6e113ef39e 100644 --- a/lib/crypto/c_src/crypto.c +++ b/lib/crypto/c_src/crypto.c @@ -544,6 +544,7 @@ static int zero_terminate(ErlNifBinary bin, char **buf); #endif static int library_refc = 0; /* number of users of this dynamic library */ +static int library_initialized = 0; static ErlNifFunc nif_funcs[] = { {"info_lib", 0, info_lib}, @@ -1005,14 +1006,14 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info) PRINTF_ERR0("CRYPTO: Could not open resource type 'ENGINE_CTX'"); return __LINE__; } +#endif - if (library_refc > 0) { + if (library_initialized) { /* Repeated loading of this library (module upgrade). * Atoms and callbacks are already set, we are done. */ return 0; } -#endif atom_true = enif_make_atom(env,"true"); atom_false = enif_make_atom(env,"false"); @@ -1119,10 +1120,6 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info) atom_password = enif_make_atom(env,"password"); #endif - init_digest_types(env); - init_cipher_types(env); - init_algorithms_types(env); - #ifdef HAVE_DYNAMIC_CRYPTO_LIB { void* handle; @@ -1168,6 +1165,11 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info) } #endif /* OPENSSL_THREADS */ + init_digest_types(env); + init_cipher_types(env); + init_algorithms_types(env); + + library_initialized = 1; return 0; } diff --git a/lib/dialyzer/src/dialyzer_dataflow.erl b/lib/dialyzer/src/dialyzer_dataflow.erl index c5f93a3392..45b4abb253 100644 --- a/lib/dialyzer/src/dialyzer_dataflow.erl +++ b/lib/dialyzer/src/dialyzer_dataflow.erl @@ -102,6 +102,8 @@ | 'undefined', % race fun_homes :: dict:dict(label(), mfa()) | 'undefined', % race + reachable_funs :: sets:set(label()) + | 'undefined', % race plt :: dialyzer_plt:plt() | 'undefined', % race opaques :: [type()] @@ -269,9 +271,11 @@ traverse(Tree, Map, State) -> case state__warning_mode(State) of true -> {State, Map, Type}; false -> - State2 = state__add_work(get_label(Tree), State), + FunLbl = get_label(Tree), + State2 = state__add_work(FunLbl, State), State3 = state__update_fun_env(Tree, Map, State2), - {State3, Map, Type} + State4 = state__add_reachable(FunLbl, State3), + {State4, Map, Type} end; 'let' -> handle_let(Tree, Map, State); @@ -3039,25 +3043,35 @@ state__new(Callgraph, Codeserver, Tree, Plt, Module, Records) -> {TreeMap, FunHomes} = build_tree_map(Tree, Callgraph), Funs = dict:fetch_keys(TreeMap), FunTab = init_fun_tab(Funs, dict:new(), TreeMap, Callgraph, Plt), - ExportedFuns = - [Fun || Fun <- Funs--[top], dialyzer_callgraph:is_escaping(Fun, Callgraph)], - Work = init_work(ExportedFuns), + ExportedFunctions = + [Fun || + Fun <- Funs--[top], + dialyzer_callgraph:is_escaping(Fun, Callgraph), + dialyzer_callgraph:lookup_name(Fun, Callgraph) =/= error + ], + Work = init_work(ExportedFunctions), Env = lists:foldl(fun(Fun, Env) -> dict:store(Fun, map__new(), Env) end, dict:new(), Funs), #state{callgraph = Callgraph, codeserver = Codeserver, envs = Env, fun_tab = FunTab, fun_homes = FunHomes, opaques = Opaques, plt = Plt, races = dialyzer_races:new(), records = Records, warning_mode = false, warnings = [], work = Work, tree_map = TreeMap, - module = Module}. + module = Module, reachable_funs = sets:new()}. state__warning_mode(#state{warning_mode = WM}) -> WM. state__set_warning_mode(#state{tree_map = TreeMap, fun_tab = FunTab, - races = Races} = State) -> + races = Races, callgraph = Callgraph, + reachable_funs = ReachableFuns} = State) -> ?debug("==========\nStarting warning pass\n==========\n", []), Funs = dict:fetch_keys(TreeMap), - State#state{work = init_work([top|Funs--[top]]), + Work = + [Fun || + Fun <- Funs--[top], + dialyzer_callgraph:lookup_name(Fun, Callgraph) =/= error orelse + sets:is_element(Fun, ReachableFuns)], + State#state{work = init_work(Work), fun_tab = FunTab, warning_mode = true, races = dialyzer_races:put_race_analysis(true, Races)}. @@ -3149,7 +3163,8 @@ state__get_race_warnings(#state{races = Races} = State) -> State1#state{races = Races1}. state__get_warnings(#state{tree_map = TreeMap, fun_tab = FunTab, - callgraph = Callgraph, plt = Plt} = State) -> + callgraph = Callgraph, plt = Plt, + reachable_funs = ReachableFuns} = State) -> FoldFun = fun({top, _}, AccState) -> AccState; ({FunLbl, Fun}, AccState) -> @@ -3184,7 +3199,12 @@ state__get_warnings(#state{tree_map = TreeMap, fun_tab = FunTab, GenRet = dialyzer_contracts:get_contract_return(C), not t_is_unit(GenRet) end, - case Warn of + %% Do not output warnings for unreachable funs. + case + Warn andalso + (dialyzer_callgraph:lookup_name(FunLbl, Callgraph) =/= error + orelse sets:is_element(FunLbl, ReachableFuns)) + of true -> case classify_returns(Fun) of no_match -> @@ -3255,6 +3275,10 @@ state__get_args_and_status(Tree, #state{fun_tab = FunTab}) -> {ok, {ArgTypes, _}} -> {ArgTypes, true} end. +state__add_reachable(FunLbl, #state{reachable_funs = ReachableFuns}=State) -> + NewReachableFuns = sets:add_element(FunLbl, ReachableFuns), + State#state{reachable_funs = NewReachableFuns}. + build_tree_map(Tree, Callgraph) -> Fun = fun(T, {Dict, Homes, FunLbls} = Acc) -> diff --git a/lib/dialyzer/test/small_SUITE_data/results/unused_funs b/lib/dialyzer/test/small_SUITE_data/results/unused_funs new file mode 100644 index 0000000000..c468457ead --- /dev/null +++ b/lib/dialyzer/test/small_SUITE_data/results/unused_funs @@ -0,0 +1,5 @@ + +unused_funs.erl:10: The pattern 'error' can never match the type 'other_error' +unused_funs.erl:15: Function not_used/0 will never be called +unused_funs.erl:19: Function foo/1 will never be called +unused_funs.erl:7: Function test/0 has no local return diff --git a/lib/dialyzer/test/small_SUITE_data/src/unused_funs.erl b/lib/dialyzer/test/small_SUITE_data/src/unused_funs.erl new file mode 100644 index 0000000000..c24cf3ea81 --- /dev/null +++ b/lib/dialyzer/test/small_SUITE_data/src/unused_funs.erl @@ -0,0 +1,21 @@ +%% See also ERL-593. + +-module(unused_funs). + +-export([test/0]). + +test() -> % "has no local return" + Var = outer_scope, + case other_error of + error -> % "can never match" + %% No warnings "no local return" and "_ = 1 can never match 0" (!) + foo(fun() -> {Var, 1 = 0} end) + end. + +not_used() -> % "will never be called" + %% No warnings "no local return" and "1 can never match 0". + foo(fun() -> 1 = 0 end). + +foo(Fun) -> % "will never be called" + 1 = 0, % No pattern match warning (foo/1 is not traversed at all). + Fun(). diff --git a/lib/erl_docgen/priv/xsl/Makefile b/lib/erl_docgen/priv/xsl/Makefile index d0dd227169..d381bd4cf7 100644 --- a/lib/erl_docgen/priv/xsl/Makefile +++ b/lib/erl_docgen/priv/xsl/Makefile @@ -1,7 +1,7 @@ # # %CopyrightBegin% # -# Copyright Ericsson AB 2009-2016. All Rights Reserved. +# Copyright Ericsson AB 2009-2018. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,7 +44,8 @@ XSL_FILES = \ db_html.xsl \ db_html_params.xsl \ db_man.xsl \ - db_eix.xsl + db_eix.xsl \ + db_funcs.xsl # ---------------------------------------------------- diff --git a/lib/erl_docgen/vsn.mk b/lib/erl_docgen/vsn.mk index 95b2329ac5..a556b73103 100644 --- a/lib/erl_docgen/vsn.mk +++ b/lib/erl_docgen/vsn.mk @@ -1 +1 @@ -ERL_DOCGEN_VSN = 0.7.2 +ERL_DOCGEN_VSN = 0.7.3 diff --git a/lib/hipe/main/hipe.erl b/lib/hipe/main/hipe.erl index 5e6a60326d..ac2e6c1e3b 100644 --- a/lib/hipe/main/hipe.erl +++ b/lib/hipe/main/hipe.erl @@ -852,8 +852,8 @@ finalize_fun_sequential({MFA, Icode}, Opts, Servers) -> print_crash_message(What, Error, StackTrace) -> StackFun = fun(_,_,_) -> false end, FormatFun = fun (Term, _) -> io_lib:format("~p", [Term]) end, - StackTrace = erl_error:format_stacktrace(1, StackTrace, - StackFun, FormatFun), + StackTraceS = erl_error:format_stacktrace(1, StackTrace, + StackFun, FormatFun), WhatS = case What of {M,F,A} -> io_lib:format("~w:~w/~w", [M,F,A]); Mod -> io_lib:format("~w", [Mod]) @@ -862,7 +862,7 @@ print_crash_message(What, Error, StackTrace) -> "while compiling ~s~n" "crash reason: ~p~n" "~s~n", - [WhatS, Error, StackTrace]). + [WhatS, Error, StackTraceS]). pp_server_start(Opts) -> set_architecture(Opts), diff --git a/lib/kernel/doc/src/config.xml b/lib/kernel/doc/src/config.xml index fdb2d29f63..8850c1736b 100644 --- a/lib/kernel/doc/src/config.xml +++ b/lib/kernel/doc/src/config.xml @@ -37,10 +37,10 @@ data in the system configuration file <c>Name.config</c>.</p> <p>Configuration parameter values in the configuration file override the values in the application resource files (see - <seealso marker="app"><c>app(4)</c></seealso>. + <seealso marker="app"><c>app(4)</c></seealso>). The values in the configuration file can be overridden by command-line flags (see - <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>.</p> + <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>).</p> <p>The value of a configuration parameter is retrieved by calling <c>application:get_env/1,2</c>.</p> </description> diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml index cb6165c73e..f418aa5bbe 100644 --- a/lib/kernel/doc/src/error_logger.xml +++ b/lib/kernel/doc/src/error_logger.xml @@ -33,44 +33,35 @@ <description> <note> - <p>In OTP-21, a new API for logging was added to Erlang/OTP. The + <p>In Erlang/OTP 21.0, a new API for logging was added. The old <c>error_logger</c> module can still be used by legacy - code, but new code should use the new API instead.</p> + code, but log events are redirected to the new Logger API. New + code should use the Logger API directly.</p> + <p><c>error_logger</c> is no longer started by default, but is + automatically started when an event handler is added + with <c>error_logger:add_report_handler/1,2</c>. The <c>error_logger</c> + module is then also added as a handler to the new logger.</p> <p>See <seealso marker="logger"><c>logger(3)</c></seealso> and the <seealso marker="logger_chapter">Logging</seealso> chapter - in the user's guide for more information.</p> + in the User's Guide for more information.</p> </note> <p>The Erlang <em>error logger</em> is an event manager (see <seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>), - registered as <c>error_logger</c>. Errors, warnings, and info events - are sent to the error logger from the Erlang runtime system and - the different Erlang/OTP applications. The events are, by default, - logged to the terminal. Notice that an event from a process <c>P</c> is - logged at the node of the group leader of <c>P</c>. This means - that log output is directed to the node from which a process was - created, which not necessarily is the same node as where it is - executing.</p> - <p>Initially, <c>error_logger</c> has only a primitive event - handler, which buffers and prints the raw event messages. During - system startup, the Kernel application replaces this with a - <em>standard event handler</em>, by default one that writes - nicely formatted output to the terminal. Kernel can also be - configured so that events are logged to a file instead, or not logged at all, - see <seealso marker="kernel_app"><c>kernel(6)</c></seealso>.</p> - <p>Also the SASL application, if started, adds its own event - handler, which by default writes supervisor, crash, and progress - reports to the terminal. See - <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso>.</p> - <p>It is recommended that user-defined applications report - errors through the error logger to get uniform reports. - User-defined event handlers can be added to handle application-specific - events, see - <seealso marker="#add_report_handler/1"><c>add_report_handler/1,2</c></seealso>. - Also, a useful event handler is provided in STDLIB for multi-file - logging of events, see - <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>.</p> + registered as <c>error_logger</c>.</p> + <p>Error logger is no longer started by default, but is + automatically started when an event handler is added + with <seealso marker="#add_report_handler/1"> + <c>add_report_handler/1,2</c></seealso>. The <c>error_logger</c> + module is then also added as a handler to the new logger, + causing log events to be forwarded from logger to error logger, + and consequently to all installed error logger event + handlers.</p> + <p>User-defined event handlers can be added to handle application-specific + events.</p> + <p>Existing event handlers provided by STDLIB and SASL are still + available, but are no longer used by OTP.</p> <p>Warning events were introduced in Erlang/OTP R9C and are enabled by default as from Erlang/OTP 18.0. To retain backwards compatibility with existing user-defined event handlers, the warning events can be @@ -99,6 +90,9 @@ The function returns <c>ok</c> if successful.</p> <p>The event handler must be able to handle the events in this module, see section <seealso marker="#events">Events</seealso>.</p> + <p>The first time this function is called, + <c>error_logger</c> is added as a Logger handler, and + the <c>error_logger</c> process is started.</p> </desc> </func> <func> @@ -108,37 +102,40 @@ <p>Deletes an event handler from the error logger by calling <c>gen_event:delete_handler(error_logger, <anno>Handler</anno>, [])</c>, see <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>.</p> + <p>If no more event handlers exist after the deletion, + <c>error_logger</c> is removed as a Logger handler, and + the <c>error_logger</c> process is stopped.</p> </desc> </func> <func> <name name="error_msg" arity="1"/> <name name="error_msg" arity="2"/> <name name="format" arity="2"/> - <fsummary>Send a standard error event to the error logger.</fsummary> + <fsummary>Log a standard error event.</fsummary> <desc> - <p>Sends a standard error event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard error event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. - The event is handled by the standard event handler.</p> + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> -1> <input>error_logger:error_msg("An error occurred in ~p~n", [a_module]).</input> - -=ERROR REPORT==== 11-Aug-2005::14:03:19 === +1> <input>error_logger:error_msg("An error occurred in ~p", [a_module]).</input> +=ERROR REPORT==== 22-May-2018::11:18:43.376917 === An error occurred in a_module ok</pre> <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use - <seealso marker="#error_report/1"><c>error_report/1</c></seealso> - instead.</p> - </warning> - <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -146,79 +143,101 @@ ok</pre> </func> <func> <name name="error_report" arity="1"/> - <fsummary>Send a standard error report event to the error logger.</fsummary> + <fsummary>Log a standard error event.</fsummary> <desc> - <p>Sends a standard error report event to the error logger. - The event is handled by the standard event handler.</p> + <p>Log a standard error event. Error logger forwards the event + to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> 2> <input>error_logger:error_report([{tag1,data1},a_term,{tag2,data}]).</input> - -=ERROR REPORT==== 11-Aug-2005::13:45:41 === +=ERROR REPORT==== 22-May-2018::11:24:23.699306 === tag1: data1 a_term tag2: data ok 3> <input>error_logger:error_report("Serious error in my module").</input> - -=ERROR REPORT==== 11-Aug-2005::13:45:49 === +=ERROR REPORT==== 22-May-2018::11:24:45.972445 === Serious error in my module ok</pre> </desc> </func> <func> <name name="error_report" arity="2"/> - <fsummary>Send a user-defined error report event to the error logger.</fsummary> + <fsummary>Log a user-defined error event.</fsummary> <desc> - <p>Sends a user-defined error report event to the error logger. - An event handler to handle the event is supposed to have been - added. The event is ignored by the standard event handler.</p> + <p>Log a user-defined error event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> <p>It is recommended that <c><anno>Report</anno></c> follows the same structure as for <seealso marker="#error_report/1"><c>error_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> <name name="get_format_depth" arity="0"/> <fsummary>Get the value of the Kernel application variable - <c>logger_format_depth</c>.</fsummary> + <c>error_logger_format_depth</c>.</fsummary> <desc> <p>Returns <c>max(10, Depth)</c>, where <c>Depth</c> is the - value of - <seealso marker="kernel_app#logger_format_depth"> - logger_format_depth</seealso> + value of <c>error_logger_format_depth</c> in the Kernel application, if Depth is an integer. Otherwise, <c>unlimited</c> is returned.</p> - <p>For backwards compatibility, the value - of <c>error_logger_format_depth</c> is used - if <c>logger_format_depth</c> is not set.</p> + <note> + <p>The <c>error_logger_format_depth</c> variable + is <seealso marker="kernel_app#deprecated-configuration-parameters"> + deprecated</seealso> since + the <seealso marker="logger">Logger API</seealso> was + introduced in Erlang/OTP 21.0. The variable, and this + function, are kept for backwards compatibility since they + still might be used by legacy report handlers.</p> + </note> </desc> </func> <func> <name name="info_msg" arity="1"/> <name name="info_msg" arity="2"/> - <fsummary>Send a standard information event to the error logger.</fsummary> + <fsummary>Log a standard information event.</fsummary> <desc> - <p>Sends a standard information event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard information event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. The event is handled by the standard event handler.</p> + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> -1> <input>error_logger:info_msg("Something happened in ~p~n", [a_module]).</input> - -=INFO REPORT==== 11-Aug-2005::14:06:15 === +1> <input>error_logger:info_msg("Something happened in ~p", [a_module]).</input> +=INFO REPORT==== 22-May-2018::12:03:32.612462 === Something happened in a_module ok</pre> <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use <c>info_report/1</c> instead.</p> - </warning> - <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -226,37 +245,52 @@ ok</pre> </func> <func> <name name="info_report" arity="1"/> - <fsummary>Send a standard information report event to the error logger.</fsummary> + <fsummary>Log a standard information event.</fsummary> <desc> - <p>Sends a standard information report event to the error - logger. The event is handled by the standard event handler.</p> + <p>Log a standard information event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> 2> <input>error_logger:info_report([{tag1,data1},a_term,{tag2,data}]).</input> - -=INFO REPORT==== 11-Aug-2005::13:55:09 === +=INFO REPORT==== 22-May-2018::12:06:35.994440 === tag1: data1 a_term tag2: data ok 3> <input>error_logger:info_report("Something strange happened").</input> - -=INFO REPORT==== 11-Aug-2005::13:55:36 === +=INFO REPORT==== 22-May-2018::12:06:49.066872 === Something strange happened ok</pre> </desc> </func> <func> <name name="info_report" arity="2"/> - <fsummary>Send a user-defined information report event to the error logger.</fsummary> + <fsummary>Log a user-defined information event.</fsummary> <desc> - <p>Sends a user-defined information report event to the error - logger. An event handler to handle the event is supposed to - have been added. The event is ignored by the standard event - handler.</p> + <p>Log a user-defined information event. Error logger forwards + the event to Logger, including metadata that allows + backwards compatibility with legacy error logger event + handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> <p>It is recommended that <c><anno>Report</anno></c> follows the same structure as for <seealso marker="#info_report/1"><c>info_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> @@ -355,24 +389,27 @@ ok</pre> <func> <name name="warning_msg" arity="1"/> <name name="warning_msg" arity="2"/> - <fsummary>Send a standard warning event to the error logger.</fsummary> + <fsummary>Log a standard warning event.</fsummary> <desc> - <p>Sends a standard warning event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard warning event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. - The event is handled by the standard event handler. It is tagged - as an error, warning, or info, see + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler. The log + level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> - <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use <c>warning_report/1</c> instead.</p> - </warning> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -380,24 +417,43 @@ ok</pre> </func> <func> <name name="warning_report" arity="1"/> - <fsummary>Send a standard warning report event to the error logger.</fsummary> + <fsummary>Log a standard warning event.</fsummary> <desc> - <p>Sends a standard warning report event to the error logger. - The event is handled by the standard event handler. It is - tagged as an error, warning, or info, see + <p>Log a standard warning event. Error logger forwards the event + to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler. The log + level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> <name name="warning_report" arity="2"/> - <fsummary>Send a user-defined warning report event to the error logger.</fsummary> + <fsummary>Log a user-defined warning event.</fsummary> <desc> - <p>Sends a user-defined warning report event to the error - logger. An event handler to handle the event is supposed to - have been added. The event is ignored by the standard event - handler. It is tagged as an error, warning, or info, - depending on the value of + <p>Log a user-defined warning event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> + <p>The log level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> + <p>It is recommended that <c><anno>Report</anno></c> follows the same + structure as for + <seealso marker="#warning_report/1"><c>warning_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> </desc> </func> </funcs> @@ -461,8 +517,9 @@ ok</pre> <section> <title>See Also</title> <p><seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>, - <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso> - <seealso marker="kernel_app"><c>kernel(6)</c></seealso> + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>, + <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>, + <seealso marker="kernel_app"><c>kernel(6)</c></seealso>, <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p> </section> </erlref> diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml index 6e6990ddda..2eadc70abf 100644 --- a/lib/kernel/doc/src/introduction_chapter.xml +++ b/lib/kernel/doc/src/introduction_chapter.xml @@ -46,7 +46,6 @@ <item>Start, stop, supervision, configuration, and distribution of applications</item> <item>Code loading</item> <item>Logging</item> - <item>Error logging</item> <item>Global name service</item> <item>Supervision of Erlang/OTP</item> <item>Communication with sockets</item> diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml index 7894600c21..e2a6d30249 100644 --- a/lib/kernel/doc/src/kernel_app.xml +++ b/lib/kernel/doc/src/kernel_app.xml @@ -42,7 +42,6 @@ <item>Start, stop, supervision, configuration, and distribution of applications</item> <item>Code loading</item> <item>Logging</item> - <item>Error logging</item> <item>Global name service</item> <item>Supervision of Erlang/OTP</item> <item>Communication with sockets</item> @@ -165,140 +164,59 @@ <p>Permissions are described in <seealso marker="application#permit/2"><c>application:permit/2</c></seealso>.</p> </item> - <tag><c>logger_dest = Value</c></tag> + <tag><marker id="logger"/><c>logger = [Config]</c></tag> <item> - <p><c>Value</c> is one of:</p> - <taglist> - <tag><c>tty</c></tag> - <item><p>Installs the standard handler, <seealso marker="logger_std_h"> - <c>logger_std_h(3)</c></seealso>, with <c>type</c> set - to <c>standard_io</c>. This is the default - option.</p></item> - <tag><c>{file, FileName}</c></tag> - <item><p>Installs the standard handler, <seealso marker="logger_std_h"> - <c>logger_std_h(3)</c></seealso>, with <c>type</c> set - to <c>{file, FileName}</c>, where <c>FileName</c> - is a string. The file is opened with encoding UTF-8.</p></item> - <tag><c>{disk_log, FileName}</c></tag> - <item><p>Installs the disk_log handler, <seealso marker="logger_disk_log_h"> - <c>logger_disk_log_h(3)</c></seealso>, with <c>file</c> set - to <c>FileName</c> (a string), and possibly other disk_log - parameters set by the environment variables - <c>logger_disk_log_type</c>, <c>logger_disk_log_maxfiles</c> and - <c>logger_disk_log_maxbytes</c>, - see <seealso marker="#disk_log_vars">below</seealso>. The - file is opened with encoding UTF-8.</p></item> - <tag><c>false</c></tag> - <item> - <p>No standard handler is installed, but - the initial, primitive handler is kept, printing - raw event messages to <c>tty</c>.</p> - </item> - <tag><c>silent</c></tag> - <item> - <p>No standard handler is started, and the initial, - primitive handler is removed.</p> - </item> - </taglist> + <p>Specifies how <seealso marker="logger"><c>logger</c></seealso> should be + configured.</p> + <p>For more details and examples, see the <seealso marker="logger_chapter#logger"> + Configuration</seealso> section in the <seealso marker="logger_chapter"> + Logger User's Guide</seealso>. + </p> </item> - <tag><c>logger_level = Level</c></tag> + <tag><marker id="logger_level"/><c>logger_level = Level</c></tag> <item> - <p><c>Value = emergency | alert | critical | error | warning | + <p><c>Level = emergency | alert | critical | error | warning | notice | info | debug</c></p> <p>This parameter specifies which log levels to log. The specified level, and all levels that are more severe, will be logged.</p> - <p>This configuration parameter is used both for the global - logger level, and for the standard handler started by - the Kernel application (see <c>logger_dest</c> variable above).</p> <p>The default value is <c>info</c>.</p> - </item> - <tag><marker id="disk_log_vars"/> - <c>logger_disk_log_type = halt | wrap</c></tag> - <item/> - <tag><c>logger_disk_log_maxfiles = integer()</c></tag> - <item/> - <tag><c>logger_disk_log_maxbytes = integer()</c></tag> - <item> - <p>If <c>logger_dest</c> is set to {disk_log,File}, then these - parameters specify the configuration to use when opening the - disk log file. They specify the type of disk log, the - maximum number of files (if the type is wrap) and the - maximum size of each file, respectively.</p> - <p>The default values are:</p> - <code> -logger_disk_log_type = wrap -logger_disk_log_maxfiles = 10 -logger_disk_log_maxbytes = 1048576</code> + <p>To change the global log level at run-time, use + <seealso marker="logger#set_logger_config/2"> + <c>logger:set_logger_config(level, error)</c></seealso>.</p> </item> <tag><marker id="logger_sasl_compatible"/> <c>logger_sasl_compatible = boolean()</c></tag> <item> - <p>If this parameter is set to true, then the logger handler - started by kernel will not log any progress-, crash-, or - supervisor reports. If the SASL application is started, + <p>If this parameter is set to true, then the <c>default</c> logger handler + will not log any progress-, crash-, or supervisor reports. + If the SASL application is started, these log events will be sent to a second handler instance - named <c>sasl_h</c>, according to values of the SASL - environment variables <c>sasl_error_logger</c> + named <c>sasl</c>, according to values of the SASL + configuration parameter <c>sasl_error_logger</c> and <c>sasl_errlog_type</c>, see - <seealso marker="sasl:sasl_app#configuration">SASL(6) + <seealso marker="sasl:sasl_app#deprecated_error_logger_config">sasl(6) </seealso></p> <p>The default value is <c>false</c>.</p> <p>See chapter <seealso marker="logger_chapter#compatibility">Backwards compatibility with error_logger</seealso> for more information about handling of the so called SASL reports.</p> + <note><p>This configuration option only effects the <c>default</c> + and <c>sasl</c> handler. Any other handlers are uneffected.</p></note> </item> - <tag><marker id="logger_log_progress"/> - <c>logger_log_progress = boolean()</c></tag> + <tag><marker id="logger_progress_reports"/> + <c>logger_progress_reports = stop | log</c></tag> <item> <p>If <c>logger_sasl_compatible = false</c>, - then <c>logger_log_progress</c> specifies if progress + then <c>logger_progress_reports</c> specifies if progress reports from <c>supervisor</c> - and <c>application_controller</c> shall be logged or - not.</p> + and <c>application_controller</c> shall be logged by the + default logger.</p> <p>If <c>logger_sasl_compatible = true</c>, - then <c>logger_log_progress</c> is ignored.</p> - </item> - <tag><marker id="logger_format_depth"/> - <c>logger_format_depth = Depth</c></tag> - <item> - <p>Can be used to limit the size of the - formatted output from the logger handlers.</p> - - <p><c>Depth</c> is a positive integer representing the maximum - depth to which terms are printed by the logger - handlers included in OTP. This - configuration parameter is used by the default formatter, - <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>, - unless the formatter's <c>depth</c> parameter is explicitly set. - (If you have implemented your own formatter, this configuration - parameter has no effect on that.)</p> - - <p><c>Depth</c> is used as follows: Format strings - received by the formatter are rewritten. - The format controls <c>~p</c> and <c>~w</c> are replaced with - <c>~P</c> and <c>~W</c>, respectively, and <c>Depth</c> is - used as the depth parameter. For details, see - <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB.</p> - - <note><p>A reasonable starting value for <c>Depth</c> is - <c>30</c>. We recommend to test crashing various processes in your - application, examine the logs from the crashes, and then - increase or decrease the value.</p></note> - </item> - <tag><c>logger_max_size = integer() | unlimited</c></tag> - <item> - <p>This parameter specifies a hard maximum size limit (number - of characters) each log event can have when printed by the - default logger formatter. If the resulting string after - formatting an event is bigger than this, it will be - truncated before printed to the handler's destination.</p> - </item> - <tag><c>logger_utc = boolean()</c></tag> - <item> - <p>If set to <c>true</c>, the default formatter will display - all dates in Universal Coordinated Time.</p> + then <c>logger_progress_reports</c> is ignored.</p> + <p>The default value is <c>stop</c></p> + <note><p>This configuration option only effects the <c>default</c> + and <c>sasl</c> handler. Any other handlers are uneffected.</p></note> </item> <tag><c>global_groups = [GroupTuple]</c></tag> <item> @@ -352,7 +270,7 @@ logger_disk_log_maxbytes = 1048576</code> <tag><c>inet_parse_error_log = silent</c></tag> <item> <p>If set, no - <c>error_logger</c> messages are generated when erroneous + <c>logger</c> messages are generated when erroneous lines are found and skipped in the various Inet configuration files.</p> </item> @@ -563,18 +481,29 @@ MaxT = TickTime + TickTime / 4</code> <section> <title>Deprecated Configuration Parameters</title> - <p>In OTP-21, a new API for logging was added to Erlang/OTP. The + <p>In Erlang/OTP 21.0, a new API for logging was added. The old <c>error_logger</c> event manager, and event handlers - running on this manager, will still work, but they are not used + running on this manager, still work, but they are no longer used by default.</p> - <p>The following application environment variables can still be - set, but they will only be used if the corresponding new logger - variables are not set.</p> + <p>The following application configuration parameters can still be + set, but they are only used if the corresponding configuration + parameters for Logger are not set.</p> <taglist> <tag><c>error_logger</c></tag> - <item>Replaced by <c>logger_dest</c></item> + <item>Replaced by setting the type of the default + <seealso marker="logger_std_h#type"><c>logger_std_h</c></seealso> + to the same value. Example: + <code type="none"> +erl -kernel logger '[{handler,default,logger_std_h,#{logger_std_h=>#{type=>{file,"/tmp/erlang.log"}}}}]' + </code> + </item> <tag><c>error_logger_format_depth</c></tag> - <item>Replaced by <c>logger_format_depth</c></item> + <item>Replaced by setting the <seealso marker="logger_formatter#depth"><c>depth</c></seealso> + parameter of the default handlers formatter. Example: + <code type="none"> +erl -kernel logger '[{handler,default,logger_std_h,#{formatter=>{logger_formatter,#{legacy_header=>true,template=>[{logger_formatter,header},"\n",msg,"\n"],depth=>10}}}]' + </code> + </item> </taglist> <p>See <seealso marker="logger_chapter#compatibility">Backwards compatibility with error_logger</seealso> for more @@ -589,12 +518,12 @@ MaxT = TickTime + TickTime / 4</code> <seealso marker="disk_log"><c>disk_log(3)</c></seealso>, <seealso marker="erl_boot_server"><c>erl_boot_server(3)</c></seealso>, <seealso marker="erl_ddll"><c>erl_ddll(3)</c></seealso>, - <seealso marker="error_logger"><c>error_logger(3)</c></seealso>, <seealso marker="file"><c>file(3)</c></seealso>, <seealso marker="global"><c>global(3)</c></seealso>, <seealso marker="global_group"><c>global_group(3)</c></seealso>, <seealso marker="heart"><c>heart(3)</c></seealso>, <seealso marker="inet"><c>inet(3)</c></seealso>, + <seealso marker="logger"><c>logger(3)</c></seealso>, <seealso marker="net_kernel"><c>net_kernel(3)</c></seealso>, <seealso marker="os"><c>os(3)</c></seealso>, <seealso marker="pg2"><c>pg2(3)</c></seealso>, diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml index d901454e62..911eb158da 100644 --- a/lib/kernel/doc/src/logger.xml +++ b/lib/kernel/doc/src/logger.xml @@ -33,10 +33,50 @@ <file>logger.xml</file> </header> <module>logger</module> - <modulesummary>API module for the logger application.</modulesummary> + <modulesummary>API module for logging in Erlang/OTP.</modulesummary> <description> - + <p> + This module is the main API for logging in Erlang/OTP. It + contains functions that allow applications to use a single log + API and the system to manage those log events independently. Use + the <seealso marker="#emergency-1">API functions</seealso> or the log + <seealso marker="#macros">macros</seealso> to log events. For instance, + to log a new error event:</p> + <code> +?LOG_ERROR("error happened because: ~p",[Reason]). %% With macro +logger:error("error happened because: ~p",[Reason]). %% Without macro + </code> + <p>This log event is then sent to the configured log handlers which + by default means that it is be printed to the console. If you want + your systems logs to be printed to a file instead of the console you + must configure the default handler to do so. The simplest way is + to include the following in your <seealso marker="config"><c>sys.config</c></seealso>.</p> + <code> +[{kernel, + [{logger, + [{handler,default,logger_std_h, + #{logger_std_h=>#{type=>{file,"path/to/file.log"}}}}]}]}]. + </code> + <p> + For more information about: + </p> + <list type="bulleted"> + <item>how to use the API, + see <seealso marker="logger_chapter">the User's Guide</seealso>.</item> + <item>how to configure Logger, + see the <seealso marker="logger_chapter#configuration">Configuration</seealso> + section in the User's Guide.</item> + <item>the convinience macros in logger.hrl, + see <seealso marker="#macros">the macro section</seealso>.</item> + <item>what the builtin formatter can do, + see <seealso marker="logger_formatter">logger_formatter</seealso>.</item> + <item>what the builtin handlers can do, + see <seealso marker="logger_std_h">logger_std_h</seealso> and + <seealso marker="logger_disk_log_h">logger_disk_log_h</seealso>.</item> + <item>what builtin filters are available, + see <seealso marker="logger_filters">logger_filters</seealso>.</item> + </list> </description> <datatypes> @@ -47,7 +87,7 @@ </desc> </datatype> <datatype> - <name name="log"/> + <name name="log_event"/> <desc> <p></p> </desc> @@ -72,7 +112,7 @@ <list> <item><c>pid => self()</c></item> <item><c>gl => group_leader()</c></item> - <item><c>time => erlang:monotonic_time(microsecond)</c></item> + <item><c>time => erlang:system_time(microsecond)</c></item> </list> <p>When a log macro is used, Logger also inserts location information:</p> @@ -90,8 +130,31 @@ <c>update_process_metadata/1</c></seealso>.</p> <p>Logger merges all the metadata maps before forwarding the log event to the handlers. If the same keys occur, values - from the log call overwrites process metadata, which in turn - overwrites values set by Logger.</p> + from the log call overwrite process metadata, which in turn + overwrite values set by Logger.</p> + <p>The following custom metadata keys have special meaning:</p> + <taglist> + <tag><c>domain</c></tag> + <item> + <p>The value associated with this key is used by filters + for grouping log events originating from, for example, + specific functional + areas. See <seealso marker="logger_filters#domain-2"> + <c>logger_filters:domain/2</c></seealso> + for a description of how this field can be used.</p> + </item> + <tag><c>report_cb</c></tag> + <item> + <p>If the log message is specified as + a <seealso marker="#type-report"><c>report()</c></seealso>, + the <c>report_cb</c> key can be associated with a fun + (report callback) that converts the report to a format + string and arguments. See + section <seealso marker="logger_chapter#log_message">Log + Message</seealso> in the User's Guide for more + information about report callbacks.</p> + </item> + </taglist> </desc> </datatype> <datatype> @@ -105,7 +168,7 @@ <item><c>filters => []</c></item> <item><c>formatter => {logger_formatter,DefaultFormatterConfig</c>}</item> </list> - <p>See the <seealso marker="logger_formatter#configuration"> + <p>See the <seealso marker="logger_formatter#type-config"> <c>logger_formatter(3)</c></seealso> manual page for information about the default configuration for this formatter.</p> @@ -126,7 +189,8 @@ <datatype> <name name="filter"/> <desc> - <p>A filter which can be installed for logger or for a handler.</p> + <p>A filter which can be installed for the logger part of + Logger, or for a handler.</p> </desc> </datatype> <datatype> @@ -145,8 +209,17 @@ <name name="timestamp"/> <desc> <p>A timestamp produced - with <seealso marker="erts:erlang#monotonic_time-1"> - <c>erlang:monotonic_time(microsecond)</c></seealso>.</p> + with <seealso marker="erts:erlang#system_time-1"> + <c>erlang:system_time(microsecond)</c></seealso>.</p> + </desc> + </datatype> + <datatype> + <name name="formatter_config"/> + <desc> + <p>Configuration data for the + formatter. See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example of a formatter implementation.</p> </desc> </datatype> </datatypes> @@ -174,9 +247,9 @@ <item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item> </list> - <p>All macros expand to a call to logger, where <c>Level</c> is - taken from the macro name, and location data is added. See the - description of + <p>All macros expand to a call to Logger, where <c>Level</c> is + taken from the macro name, and location data is added to the + metadata. See the description of the <seealso marker="#type-metadata"><c>metadata()</c></seealso> type for more information about the location data.</p> @@ -296,23 +369,26 @@ <func> <name name="get_logger_config" arity="0"/> - <fsummary>Lookup the current configuration for logger.</fsummary> + <fsummary>Look up the current configuration for the logger part + of Logger.</fsummary> <desc> - <p>Lookup the current configuration for logger.</p> + <p>Look up the current configuration for the logger part of + Logger.</p> </desc> </func> <func> <name name="get_handler_config" arity="1"/> - <fsummary>Lookup the current configuration for the given handler.</fsummary> + <fsummary>Look up the current configuration for the given + handler.</fsummary> <desc> - <p>Lookup the current configuration for the given handler.</p> + <p>Look up the current configuration for the given handler.</p> </desc> </func> <func> <name name="i" arity="0"/> - <fsummary>Get all logger configurations</fsummary> + <fsummary>Get all Logger configurations</fsummary> <desc> <p>Same as <seealso marker="#i/1"><c>logger:i(term)</c></seealso></p> </desc> @@ -322,42 +398,42 @@ <name name="i" arity="1" clause_i="1"/> <name name="i" arity="1" clause_i="2"/> <name name="i" arity="1" clause_i="3"/> - <fsummary>Get all logger configurations</fsummary> + <fsummary>Get all Logger configurations</fsummary> <desc> - <p>Display or return all current logger configuration.</p> + <p>Display or return all current Logger configurations.</p> <taglist> <tag><c><anno>Action</anno> = string</c></tag> <item> - <p>Return the pretty printed current logger configuration + <p>Return the pretty printed current Logger configuration as iodata.</p> </item> <tag><c><anno>Action</anno> = term</c></tag> <item> - <p>Return the current logger configuration as a term. The - format of this term may change inbetween releases. For a + <p>Return the current Logger configuration as a term. The + format of this term may change between releases. For a stable format use <seealso marker="#get_handler_config/1"> <c>logger:get_handler_config/1</c></seealso> and <seealso marker="#get_logger_config/0"> - <c>logger:get_logger_config/0</c></seealso>. - The same as calling <c>logger:i()</c>.</p> + <c>logger:get_logger_config/0</c></seealso>.</p> + <p>The same as calling <c>logger:i()</c>.</p> </item> <tag><c><anno>Action</anno> = print</c></tag> <item> - <p>Pretty print all the current logger configuration to + <p>Pretty print all the current Logger configuration to standard out. Example:</p> <code><![CDATA[1> logger:i(print). Current logger configuration: Level: info - FilterDefault: log + Filter Default: log Filters: Handlers: - Id: logger_std_h + Id: default Module: logger_std_h Level: info Formatter: Module: logger_formatter - Config: #{template => [{logger_formatter,header},"\n",msg,"\n"], - legacy_header => true} + Config: #{legacy_header => true,single_line => false, + template => [{logger_formatter,header},"\n",msg,"\n"]} Filter Default: stop Filters: Id: stop_progress @@ -368,10 +444,10 @@ Current logger configuration: Config: stop Id: domain Fun: fun logger_filters:domain/2 - Config: {log,prefix_of,[beam,erlang,otp,sasl]} + Config: {log,super,[beam,erlang,otp,sasl]} Id: no_domain Fun: fun logger_filters:domain/2 - Config: {log,no_domain,[]} + Config: {log,undefined,[]} Handler Config: logger_std_h: #{type => standard_io} Level set per module: @@ -384,21 +460,21 @@ Current logger configuration: <func> <name name="add_logger_filter" arity="2"/> - <fsummary>Add a filter to the logger.</fsummary> + <fsummary>Add a filter to the logger part of Logger.</fsummary> <desc> - <p>Add a filter to the logger.</p> + <p>Add a filter to the logger part of Logger.</p> <p>The filter fun is called with the log event as the first parameter, and the specified <c>filter_args()</c> as the second parameter.</p> <p>The return value of the fun specifies if a log event is to be discarded or forwarded to the handlers:</p> <taglist> - <tag><c>log()</c></tag> + <tag><c>log_event()</c></tag> <item> <p>The filter <em>passed</em>. The next logger filter, if any, is applied. If no more logger filters exist, the - log event is forwarded to the handler part of the - logger, where handler filters are applied.</p> + log event is forwarded to the handler part of Logger, + where handler filters are applied.</p> </item> <tag><c>stop</c></tag> <item> @@ -410,13 +486,13 @@ Current logger configuration: <p>The filter has no knowledge of the log event. The next logger filter, if any, is applied. If no more logger filters exist, the value of the <c>filter_default</c> - configuration parameter for the logger specifies if the - log event shall be discarded or forwarded to the handler - part.</p> + configuration parameter for the logger part specifies if + the log event shall be discarded or forwarded to the + handler part.</p> </item> </taglist> - <p>See section <seealso marker="logger_chapter#Filter"> - Filter</seealso> in the User's Guide for more information + <p>See section <seealso marker="logger_chapter#filters"> + Filters</seealso> in the User's Guide for more information about filters.</p> <p>Some built-in filters exist. These are defined in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p> @@ -434,7 +510,7 @@ Current logger configuration: <p>The return value of the fun specifies if a log event is to be discarded or forwarded to the handler callback:</p> <taglist> - <tag><c>log()</c></tag> + <tag><c>log_event()</c></tag> <item> <p>The filter <em>passed</em>. The next handler filter, if any, is applied. If no more filters exist for this @@ -457,7 +533,7 @@ Current logger configuration: </item> </taglist> <p>See - section <seealso marker="logger_chapter#Filter">Filter</seealso> + section <seealso marker="logger_chapter#filters">Filters</seealso> in the User's Guide for more information about filters.</p> <p>Some built-in filters exist. These are defined in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p> @@ -466,10 +542,10 @@ Current logger configuration: <func> <name name="remove_logger_filter" arity="1"/> - <fsummary>Remove a filter from the logger.</fsummary> + <fsummary>Remove a filter from the logger part of Logger.</fsummary> <desc> <p>Remove the filter identified - by <c><anno>FilterId</anno></c> from the logger.</p> + by <c><anno>FilterId</anno></c> from the logger part of Logger.</p> </desc> </func> @@ -489,7 +565,7 @@ Current logger configuration: <desc> <p>Add a handler with the given configuration.</p> <p><c><anno>HandlerId</anno></c> is a unique identifier which - must be used in all subsequent calls reffering to this + must be used in all subsequent calls referring to this handler.</p> </desc> </func> @@ -509,21 +585,21 @@ Current logger configuration: <p>Set the log level for the specified <c><anno>Module</anno></c>.</p> <p>The log level for a module overrides the global log level - of the logger for log event originating from the module in + of Logger for log events originating from the module in question. Notice, however, that it does not override the level configuration for any handler.</p> - <p>For example: Assume that the global log level for the - logger is <c>info</c>, and there is one handler, <c>h1</c>, - with level <c>info</c> and one handler, <c>h2</c>, with + <p>For example: Assume that the global log level for Logger + is <c>info</c>, and there is one handler, <c>h1</c>, with + level <c>info</c> and one handler, <c>h2</c>, with level <c>debug</c>.</p> <p>With this configuration, no debug messages will be logged, since they are all stopped by the global log level.</p> - <p>If the level for <c>mymodule</c> is set now set + <p>If the level for <c>mymodule</c> is now set to <c>debug</c>, then debug events from this module will be logged by the handler <c>h2</c>, but not by handler <c>h1</c>.</p> <p>Debug events from other modules are still not logged.</p> - <p>To change the global log level for the logger, use + <p>To change the global log level for Logger, use <seealso marker="#set_logger_config/2"> <c>logger:set_logger_config(level,Level)</c></seealso>.</p> <p>To change the log level for a handler, use @@ -533,15 +609,16 @@ Current logger configuration: <p>The originating module for a log event is only detected if <c>mfa=>{Module,Function,Arity}</c> exists in the metadata. When log macros are used, this association is - automatically added to all log events. If the logger API + automatically added to all log events. If an API function is called directly, without using a macro, the logging - client must explicitly add this information.</p> + client must explicitly add this information if module + levels shall have any effect.</p> </note> </desc> </func> <func> - <name name="reset_module_level" arity="1"/> + <name name="unset_module_level" arity="1"/> <fsummary>Remove a module specific log setting.</fsummary> <desc> <p>Remove a module specific log setting. After this, the @@ -550,17 +627,71 @@ Current logger configuration: </func> <func> + <name name="add_handlers" arity="1" clause_i="1"/> + <fsummary>Set up log handlers from the application's + configuration parameters.</fsummary> + <desc> + <p>Reads the application configuration parameter <c>logger</c> and + calls <c>logger:add_handlers/1</c> with its contents.</p> + </desc> + </func> + + <func> + <name name="add_handlers" arity="1" clause_i="2"/> + <fsummary>Setup logger handlers.</fsummary> + <type name="config_handler"/> + <desc> + <p>This function should be used by custom Logger handlers to make + configuration consistent no matter which handler the system uses. + Normal usage is to add a call to <c>logger:add_handlers/1</c> + just after the processes that the handler needs are started, + and pass the application's <c>logger</c> configuration as the argument. + For example:</p> + <code> +-behaviour(application). +start(_, []) -> + case supervisor:start_link({local, my_sup}, my_sup, []) of + {ok, Pid} -> + ok = logger:add_handlers(my_app), + {ok, Pid, []}; + Error -> Error + end.</code> + <p>This reads the <c>logger</c> configuration parameter from + the <c>my_all</c> application and starts the configured + handlers. The contents of the configuration use the same + rules as the + <seealso marker="logger_chapter#handler-configuration">logger handler configuration</seealso>. + </p> + <p>If the handler is meant to replace the default handler, the Kernel's + default handler have to be disabled before the new handler is added. + A <c>sys.config</c> file that disables the Kernel handler and adds + a custom handler could look like this:</p> + <code> +[{kernel, + [{logger, + %% Disable the default Kernel handler + [{handler,default,undefined}]}]}, + {my_app, + [{logger, + %% Enable this handler as the default + [{handler,default,my_handler,#{}}]}]}]. + </code> + </desc> + </func> + + <func> <name name="set_logger_config" arity="1"/> - <fsummary>Set configuration data for the logger.</fsummary> + <fsummary>Set configuration data for the logger part of Logger.</fsummary> <desc> - <p>Set configuration data for the logger. This overwrites the - current logger configuration.</p> + <p>Set configuration data for the logger part of Logger. This + overwrites the current logger configuration.</p> <p>To modify the existing configuration, - use <seealso marker="#set_logger_config-2"><c>set_logger_config/2</c> - </seealso>, or read the current configuration + use <seealso marker="#update_logger_config-1"> + <c>update_logger_config/1</c></seealso>, or, if a more + complex merge is needed, read the current configuration with <seealso marker="#get_logger_config-0"><c>get_logger_config/0</c> - </seealso>, then merge in your added or updated - associations before writing it back.</p> + </seealso>, then do the merge before writing the new + configuration back with this function.</p> <p>If a key is removed compared to the current configuration, the default value is used.</p> </desc> @@ -568,12 +699,32 @@ Current logger configuration: <func> <name name="set_logger_config" arity="2"/> - <fsummary>Add or update configuration data for the logger.</fsummary> + <fsummary>Add or update configuration data for the logger part + of Logger.</fsummary> <desc> - <p>Add or update configuration data for the logger. If the - given <c><anno>Key</anno></c> already exists, its associated - value will be changed to <c><anno>Value</anno></c>. If it - doesn't exist, it will be added.</p> + <p>Add or update configuration data for the logger part of + Logger. If the given <c><anno>Key</anno></c> already exists, + its associated value will be changed + to <c><anno>Value</anno></c>. If it does not exist, it will + be added.</p> + </desc> + </func> + + <func> + <name name="update_logger_config" arity="1"/> + <fsummary>Update configuration data for the logger part of + Logger.</fsummary> + <desc> + <p>Update configuration data for the logger part of + Logger. This function behaves as if it was implemented as + follows:</p> + <code type="erl"> +{ok,Old} = logger:get_logger_config(), +logger:set_logger_config(maps:merge(Old,Config)). + </code> + <p>To overwrite the existing configuration without any merge, + use <seealso marker="#set_logger_config-1"><c>set_logger_config/1</c> + </seealso>.</p> </desc> </func> @@ -584,13 +735,14 @@ Current logger configuration: <p>Set configuration data for the specified handler. This overwrites the current handler configuration.</p> <p>To modify the existing configuration, - use <seealso marker="#set_handler_config-3"><c>set_handler_config/3</c> - </seealso>, or read the current configuration + use <seealso marker="#update_handler_config-2"> + <c>update_handler_config/2</c></seealso>, or, if a more + complex merge is needed, read the current configuration with <seealso marker="#get_handler_config-1"><c>get_handler_config/1</c> - </seealso>, then merge in your added or updated - associations before writing it back.</p> + </seealso>, then do the merge before writing the new + configuration back with this function.</p> <p>If a key is removed compared to the current configuration, - and the key is know by Logger, the default value is used. If + and the key is known by Logger, the default value is used. If it is a custom key, then it is up to the handler implementation if the value is removed or a default value is inserted.</p> @@ -605,12 +757,53 @@ Current logger configuration: <p>Add or update configuration data for the specified handler. If the given <c><anno>Key</anno></c> already exists, its associated value will be changed - to <c><anno>Value</anno></c>. If it doesn't exist, it will + to <c><anno>Value</anno></c>. If it does not exist, it will be added.</p> </desc> </func> <func> + <name name="update_handler_config" arity="2"/> + <fsummary>Update configuration data for the specified handler.</fsummary> + <desc> + <p>Update configuration data for the specified handler. This function + behaves as if it was implemented as follows:</p> + <code type="erl"> +{ok,{_,Old}} = logger:get_handler_config(HandlerId), +logger:set_handler_config(HandlerId,maps:merge(Old,Config)). + </code> + <p>To overwrite the existing configuration without any merge, + use <seealso marker="#set_handler_config-2"><c>set_handler_config/2</c> + </seealso>.</p> + </desc> + </func> + + <func> + <name name="update_formatter_config" arity="2"/> + <fsummary>Update the formatter configuration for the specified handler.</fsummary> + <desc> + <p>Update the formatter configuration for the specified handler.</p> + <p>The new configuration is merged with the existing formatter + configuration.</p> + <p>To overwrite the existing configuration without any merge, + use <seealso marker="#set_handler_config-3"> + <c>set_handler_config(HandlerId,formatter, + {FormatterModule,FormatterConfig})</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="update_formatter_config" arity="3"/> + <fsummary>Update the formatter configuration for the specified handler.</fsummary> + <desc> + <p>Update the formatter configuration for the specified handler.</p> + <p>This is equivalent + to <br/><seealso marker="#update_formatter_config-2"> + <c>update_formatter_config(<anno>HandlerId</anno>,#{<anno>Key</anno>=><anno>Value</anno>})</c></seealso></p> + </desc> + </func> + + <func> <name name="compare_levels" arity="2"/> <fsummary>Compare the severity of two log levels.</fsummary> <desc> @@ -650,7 +843,7 @@ Current logger configuration: <p>If process metadata exists for the current process, this function behaves as if it was implemented as follows:</p> <code type="erl"> -logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) +logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)). </code> <p>If no process metadata exists, the function behaves as <seealso marker="#set_process_metadata-1"> @@ -703,18 +896,17 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) </funcs> <section> - <title>Callback Functions</title> + <marker id="handler_callback_functions"/> + <title>Handler Callback Functions</title> <p>The following functions are to be exported from a handler callback module.</p> </section> <funcs> <func> - <name>Module:adding_handler(HandlerId,Config1) -> {ok,Config2} | {error,Reason}</name> + <name>HModule:adding_handler(Config1) -> {ok,Config2} | {error,Reason}</name> <fsummary>An instance of this handler is about to be added.</fsummary> <type> - <v>HandlerId = - <seealso marker="#type-handler_id">handler_id()</seealso></v> <v>Config1 = Config2 = <seealso marker="#type-config">config()</seealso></v> <v>Reason = term()</v> @@ -723,7 +915,9 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) <p>This callback function is optional.</p> <p>The function is called when an new handler is about to be added, and the purpose is to verify the configuration and - initiate all resourced needed by the handler.</p> + initiate all resources needed by the handler.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config1</c>.</p> <p>If everything succeeds, the callback function can add possible default values or internal state values to the configuration, and return the adjusted map @@ -734,28 +928,9 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) </func> <func> - <name>Module:removing_handler(HandlerId,Config) -> ok</name> - <fsummary>The given handler is about to be removed.</fsummary> - <type> - <v>HandlerId = - <seealso marker="#type-handler_id">handler_id()</seealso></v> - <v>Config = - <seealso marker="#type-config">config()</seealso></v> - </type> - <desc> - <p>This callback function is optional.</p> - <p>The function is called when a handler is about to be - removed, and the purpose is to release all resources used by - the handler. The return value is ignored by Logger.</p> - </desc> - </func> - - <func> - <name>Module:changing_config(HandlerId,Config1,Config2) -> {ok,Config3} | {error,Reason}</name> + <name>HModule:changing_config(Config1,Config2) -> {ok,Config3} | {error,Reason}</name> <fsummary>The configuration for this handler is about to change.</fsummary> <type> - <v>HandlerId = - <seealso marker="#type-handler_id">handler_id()</seealso></v> <v>Config1 = Config2 = Config3 = <seealso marker="#type-config">config()</seealso></v> <v>Reason = term()</v> @@ -767,12 +942,108 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) the new configuration.</p> <p><c>Config1</c> is the existing configuration and <c>Config2</c> is the new configuration.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config1</c>.</p> <p>If everything succeeds, the callback function must return a possibly adjusted configuration in <c>{ok,Config3}</c>.</p> <p>If the configuration is faulty, the callback function must return <c>{error,Reason}</c>.</p> </desc> </func> + + <func> + <name>HModule:log(LogEvent,Config) -> void()</name> + <fsummary>Log the given log event.</fsummary> + <type> + <v>LogEvent = + <seealso marker="#type-log_event">log_event()</seealso></v> + <v>Config = + <seealso marker="#type-config">config()</seealso></v> + </type> + <desc> + <p>This callback function is mandatory.</p> + <p>The function is called when all global filters and all + handler filters for the handler in question have passed for + the given log event.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config</c>.</p> + <p>The handler must log the event.</p> + <p>The return value from this function is ignored by + Logger.</p> + </desc> + </func> + + <func> + <name>HModule:removing_handler(Config) -> ok</name> + <fsummary>The given handler is about to be removed.</fsummary> + <type> + <v>Config = + <seealso marker="#type-config">config()</seealso></v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called when a handler is about to be + removed, and the purpose is to release all resources used by + the handler.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config</c>.</p> + <p>The return value is ignored by Logger.</p> + </desc> + </func> + + </funcs> + + <section> + <marker id="formatter_callback_functions"/> + <title>Formatter Callback Functions</title> + <p>The following functions are to be exported from a formatter + callback module.</p> + </section> + + <funcs> + <func> + <name>FModule:check_config(FConfig) -> ok | {error,term()}</name> + <fsummary>Validate the given formatter configuration.</fsummary> + <type> + <v>FConfig = + <seealso marker="#type-formatter_config">formatter_config()</seealso></v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called by a Logger when formatter + configuration is set or modified. The formatter must + validate the given configuration and return <c>ok</c> if it + is correct, and <c>{error,term()}</c> if it is faulty.</p> + <p>See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example implementation. <c>logger_formatter</c> is the + default formatter used by Logger.</p> + </desc> + </func> + <func> + <name>FModule:format(LogEvent,FConfig) -> FormattedLogEntry</name> + <fsummary>Format the given log event.</fsummary> + <type> + <v>LogEvent = + <seealso marker="#type-log_event">log_event()</seealso></v> + <v>FConfig = + <seealso marker="#type-formatter_config">formatter_config()</seealso></v> + <v>FormattedLogEntry = + <seealso marker="unicode#type-chardata">unicode:chardata()</seealso></v> + </type> + <desc> + <p>This callback function is mandatory.</p> + <p>The function can be called by a log handler to convert a + log event term to a printable string. The returned value + can, for example, be printed as a log entry to the console + or a file using <seealso marker="stdlib:io#put_chars-1"> + <c>io:put_chars/1,2</c></seealso>.</p> + <p>See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example implementation. <c>logger_formatter</c> is the + default formatter used by Logger.</p> + </desc> + </func> </funcs> </erlref> diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png Binary files differindex 727609a6ef..901122193a 100644 --- a/lib/kernel/doc/src/logger_arch.png +++ b/lib/kernel/doc/src/logger_arch.png diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml index 519df2ba48..a3eec7bd4b 100644 --- a/lib/kernel/doc/src/logger_chapter.xml +++ b/lib/kernel/doc/src/logger_chapter.xml @@ -30,368 +30,673 @@ <file>logger_chapter.xml</file> </header> + <p>Erlang/OTP 21.0 provides a new standard API for logging + through <c>Logger</c>, which is part of the Kernel + application. Logger consists of the API for issuing log events, + and a customizable backend where log handlers, filters and + formatters can be plugged in.</p> + <p>By default, the Kernel application installs one log handler at + system start. This handler is named <c>default</c>. It receives + and processes standard log events produced by the Erlang runtime + system, standard behaviours and different Erlang/OTP + applications. The log events are by default written to the + terminal.</p> + <p>You can also configure the system so that the default handler + prints log events to a single file, or to a set of wrap logs + via <seealso marker="disk_log"><c>disk_log</c></seealso>.</p> + <p>By confiugration, you can aslo modify or disable the default + handler, replace it by a custom handler, and install additional + handlers.</p> + <section> <title>Overview</title> - <p>Erlang/OTP provides a standard API for logging. The backend of - this API can be used as is, or it can be customized to suite - specific needs.</p> - <p>It consists of two parts - the <em>logger</em> part and the - <em>handler</em> part. The logger will forward log events to one - or more handler(s).</p> + <p>A <em>log event</em> consists of a <em>log level</em>, the + <em>message</em> to be logged, and <em>metadata</em>.</p> + <p>The Logger backend forwards log events from the API, first + through a set of <em>global filters</em>, then through a set + of <em>handler filters</em> for each log handler.</p> + <p>Each filter set consists of a <em>log level check</em>, + followed by zero or more <em>filter functions</em>.</p> + <p>The following figure show a conseptual overview of Logger. The + figure shows two log handlers, but any number of handlers can be + installed.</p> <image file="logger_arch.png"> - <icaption>Conceptual overview</icaption> + <icaption>Conceptual Overview</icaption> </image> - <p><em>Filters</em> can be added to the logger and to each - handler. The filters decide if an event is to be forwarded or - not, and they can also modify all parts of the log event.</p> - - <p>A <em>formatter</em> can be set for each handler. The formatter - does the final formatting of the log event, including the log - message itself, and possibly a timestamp, header and other - metadata.</p> - - <p>In accordance with the Syslog protocol, RFC-5424, eight - severity levels can be specified:</p> - - <table align="left"> - <row> - <cell><strong>Level</strong></cell> - <cell align="center"><strong>Integer</strong></cell> - <cell><strong>Description</strong></cell> - </row> - <row> - <cell>emergency</cell> - <cell align="center">0</cell> - <cell>system is unusable</cell> - </row> - <row> - <cell>alert</cell> - <cell align="center">1</cell> - <cell>action must be taken immediately</cell> - </row> - <row> - <cell>critical</cell> - <cell align="center">2</cell> - <cell>critical contidions</cell> - </row> - <row> - <cell>error</cell> - <cell align="center">3</cell> - <cell>error conditions</cell> - </row> - <row> - <cell>warning</cell> - <cell align="center">4</cell> - <cell>warning conditions</cell> - </row> - <row> - <cell>notice</cell> - <cell align="center">5</cell> - <cell>normal but significant conditions</cell> - </row> - <row> - <cell>info</cell> - <cell align="center">6</cell> - <cell>informational messages</cell> - </row> - <row> - <cell>debug</cell> - <cell align="center">7</cell> - <cell>debug-level messages</cell> - </row> - <tcaption>Severity levels</tcaption> - </table> - - <p>A log event is allowed by Logger if the integer value of - its <c>Level</c> is less than or equal to the currently - configured log level. The log level can be configured globally, - or to allow more verbose logging from a specific part of the - system, per module.</p> - + <p>Log levels are expressed as atoms. Internally in Logger, the + atoms are mapped to integer values, and a log event passes the + log level check if the integer value of its log level is less + than or equal to the currently configured log level. That is, + the check pases if the event is equally or more severe than the + configured level. See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of all log + levels.</p> + <p>The global log level can be overridden by a log level + configured per module. This is to, for instance, allow more + verbose logging from a specific part of the system.</p> + <p>Filter functions can be used for more sophisticated filtering + than the log level check provides. A filter function can stop or + pass a log event, based on any of the event's contents. It can + also modify all parts of the log event. See see + section <seealso marker="#filters">Filters</seealso> for more + details.</p> + <p>If a log event passes through all global filters and all + handler filters for a specific handler, Logger forwards the event + to the handler callback. The handler formats and prints the + event to its destination. See + section <seealso marker="#handlers">Handlers</seealso> for + more details.</p> + <p>Everything up to and including the call to the handler + callbacks is executed on the client process, that is, the + process where the log event was issued. It is up to the handler + implementation if other processes are involved or not.</p> + <p>The handlers are called in sequence, and the order is not + defined.</p> + </section> + <section> + <title>Logger API</title> + <p>The API for logging consists of a set + of <seealso marker="logger#macros">macros</seealso>, and a set + of functions on the form <c>logger:Level/1,2,3</c>, which are + all shortcuts + for <seealso marker="logger#log-2"> + <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p> + <p>The difference between using the macros and the exported + functions is that macros add location (originator) information + to the metadata, and performs lazy evaluation by wrapping the + logger call in a case statement, so it is only evaluated if the + log level of the event passes the global log level check.</p> <section> - <title>Customizable parts</title> - + <marker id="log_level"/> + <title>Log Level</title> + <p>The log level indicates the severity of a event. In + accordance with the Syslog protocol, RFC-5424, eight log + levels can be specified. The following table lists all + possible log levels by name (atom), integer value, and + description:</p> + + <table align="left"> + <row> + <cell><strong>Level</strong></cell> + <cell align="center"><strong>Integer</strong></cell> + <cell><strong>Description</strong></cell> + </row> + <row> + <cell>emergency</cell> + <cell align="center">0</cell> + <cell>system is unusable</cell> + </row> + <row> + <cell>alert</cell> + <cell align="center">1</cell> + <cell>action must be taken immediately</cell> + </row> + <row> + <cell>critical</cell> + <cell align="center">2</cell> + <cell>critical contidions</cell> + </row> + <row> + <cell>error</cell> + <cell align="center">3</cell> + <cell>error conditions</cell> + </row> + <row> + <cell>warning</cell> + <cell align="center">4</cell> + <cell>warning conditions</cell> + </row> + <row> + <cell>notice</cell> + <cell align="center">5</cell> + <cell>normal but significant conditions</cell> + </row> + <row> + <cell>info</cell> + <cell align="center">6</cell> + <cell>informational messages</cell> + </row> + <row> + <cell>debug</cell> + <cell align="center">7</cell> + <cell>debug-level messages</cell> + </row> + <tcaption>Log Levels</tcaption> + </table> + <p>Notice that the integer value is only used internally in + Logger. In the API, you must always use the atom. To compare + the severity of two log levels, + use <seealso marker="logger#compare_levels-2"> + <c>logger:compare_levels/2</c></seealso>.</p> + </section> + <section> + <marker id="log_message"/> + <title>Log Message</title> + <p>The log message contains the information to be logged. The + message can consist of a format string and arguments (given as + two separate parameters in the Logger API), a string or a + report. The latter, which is either a map or a key-value list, + can be accompanied by a report callback specified in the log + event's <seealso marker="#metadata">metadata</seealso>. The + report callback is a convenience function that + the <seealso marker="#formatters">formatter</seealso> can use + to convert the report to a format string and arguments. The + formatter can also use its own conversion function, if no + callback is provided, or if a customized formatting is + desired.</p> + <p>Example, format string and arguments:</p> + <code>logger:error("The file does not exist: ~ts",[Filename])</code> + <p>Example, string:</p> + <code>logger:notice("Something strange happened!")</code> + <p>Example, report, and metadata with report callback:</p> + <code> +logger:debug(#{got => connection_request, id => Id, state => State}, + #{report_cb => fun(R) -> {"~p",[R]} end})</code> + <p>The log message can also be provided through a fun for lazy + evaluation. The fun is only evaluated if the global log level + check passes, and is therefore recommended if it is expensive + to generate the message. The lazy fun must return a string, a + report, or a tuple with format string and arguments.</p> + </section> + <section> + <title>Metadata</title> + <p>Metadata contains additional data associated with a log + message. Logger inserts some metadata fields by default, and + the client can add custom metadata in two different ways:</p> <taglist> - <tag><marker id="Handler"/>Handler</tag> + <tag>Set process metadata</tag> <item> - <p>A handler is defined as a module exporting the following - function:</p> - - <code>log(Log, Config) -> ok</code> - - <p>A handler is called by the logger backend after filtering on - logger level and on handler level for the handler which is - about to be called. The function call is done on the client - process, and it is up to the handler implementation if other - processes are to be involved or not.</p> - - <p>Multiple instances of the same handler can be - added. Configuration is per instance.</p> - + <p>Process metadata is set and updated + with <seealso marker="logger#set_process_metadata-1"> + <c>logger:set_process_metadata/1</c></seealso> + and <seealso marker="logger#update_process_metadata-1"> + <c>logger:update_process metadata/1</c></seealso>, + respectively. This metadata applies to the process on + which these calls are made, and Logger adds the metadata + to all log events issued on that process.</p> </item> - - <tag><marker id="Filter"/>Filter</tag> - <item> - <p>Filters can be set on the logger or on a handler. Logger - filters are applied first, and if passed, the handler filters - for each handler are applied. The handler plugin is only - called if all handler filters for the handler in question also - pass.</p> - - <p>A filter is specified as:</p> - - <code>{fun((Log,Extra) -> Log | stop | ignore), Extra}</code> - - <p>The configuration parameter <c>filter_default</c> - specifies the behavior if all filters return <c>ignore</c>. - <c>filter_default</c> is by default set to <c>log</c>.</p> - - <p>The <c>Extra</c> parameter may contain any data that the - filter needs.</p> - </item> - - <tag><marker id="Formatter"/>Formatter</tag> + <tag>Add metadata to a specifc log event</tag> <item> - <p>A formatter is defined as a module exporting the following - function:</p> - - <code>format(Log,Extra) -> unicode:chardata()</code> - - <p>The formatter plugin is called by each handler, and the - returned string can be printed to the handler's destination - (stdout, file, ...).</p> + <p>Metadata associated with one specifc log event is given + as the last parameter to the log macro or Logger API + function when the event is issued. For example:</p> + <code>?LOG_ERROR("Connection closed",#{context => server})</code> </item> - </taglist> + <p>See the description of + the <seealso marker="logger#type-metadata"> + <c>logger:metadata()</c></seealso> type for information + about which default keys Logger inserts, and how the different + metadata maps are merged.</p> </section> + </section> + <section> + <marker id="filter"/> + <title>Filters</title> + <p>Filters can be global, or attached to a specific + handler. Logger calls the global filters first, and if they all + pass, it calls the handler filters for each handler. Logger + calls the handler callback only if all filters attached to the + handler in question also pass.</p> + <p>A filter is defined as:</p> + <pre>{FilterFun, Extra}</pre> + <p>where <c>FilterFun</c> is a function of arity 2, + and <c>Extra</c> is any term. When applying the filter, Logger + calls the function with the log event as the first argument, + and the value of <c>Extra</c> as the second + argument. See <seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso> for type definitions.</p> + <p>The filter function can return <c>stop</c>, <c>ignore</c> or + the (possibly modified) log event.</p> + <p>If <c>stop</c> is returned, the log event is immediately + discarded. If the filter is global, no handler filters or + callbacks are called. If it is a handler filter, the + corresponding handler callback is not called, but the log event + is forwarded to filters attached to the next handler, if + any.</p> + <p>If the log event is returned, the next filter function is + called with the returned value as the first argument. That is, + if a filter function modifies the log event, the next filter + function receives the modified event. The value returned from + the last filter function is the value that the handler callback + receives.</p> + <p>If the filter function returns <c>ignore</c>, it means that it + did not recognize the log event, and thus leaves to other + filters to decide the event's destiny.</p> + <p>The configuration + option <seealso marker="#filter_default"><c>filter_default</c></seealso> + specifies the behaviour if all filter functions + return <c>ignore</c>, or if no filters + exist. <c>filter_default</c> is by default set to <c>log</c>, + meaning that if all existing filters ignore a log event, Logger + forwards the event to the handler + callback. If <c>filter_default</c> is set to <c>stop</c>, Logger + discards such events.</p> + <p>Global filters are added + with <seealso marker="logger#add_logger_filter-2"> + <c>logger:add_logger_filter/2</c></seealso> + and removed + with <seealso marker="logger#remove_logger_filter-1"> + <c>logger:remove_logger_filter/1</c></seealso>. They can also + be added at system start via the Kernel configuration + parameter <seealso marker="#logger"><c>logger</c></seealso>.</p> + <p>Handler filters are added + with <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso> + and removed + with <seealso marker="logger#remove_handler_filter-2"> + <c>logger:remove_handler_filter/2</c></seealso>. They can also + be specified directly in the configuration when adding a handler + with <seealso marker="logger#add_handler/3"> + <c>logger:add_handler/3</c></seealso> + or via the Kernel configuration + parameter <seealso marker="#logger"><c>logger</c></seealso>.</p> + + <p>To see which filters are currently installed in the system, + use <seealso marker="logger#i-0"><c>logger:i/0</c></seealso>, + or <seealso marker="logger#get_logger_config-0"> + <c>logger:get_logger_config/0</c></seealso> + and <seealso marker="logger#get_handler_config-1"> + <c>logger:get_handler_config/1</c></seealso>. Filters are + listed in the order they are applied, that is, the first + filter in the list is applied first, and so on.</p> + + <p>For convenience, the following built-in filters exist:</p> - <section> - <title>Built-in handlers</title> - - <taglist> - <tag><c>logger_std_h</c></tag> + <taglist> + <tag><seealso marker="logger_filters#domain-2"> + <c>logger_filters:domain/2</c></seealso></tag> <item> - <p>This is the default handler used by OTP. Multiple instances - can be started, and each instance will write log events to a - given destination, console or file. Filters can be used for - selecting which event to send to which handler instance.</p> + <p>Provides a way of filtering log events based on a + <c>domain</c> field in <c>Metadata</c>.</p> </item> - - <tag><c>logger_disk_log_h</c></tag> + <tag><seealso marker="logger_filters#level-2"> + <c>logger_filters:level/2</c></seealso></tag> <item> - <p>This handler behaves much like logger_std_h, except it uses - <seealso marker="disk_log"><c>disk_log</c></seealso> as its - destination.</p> + <p>Provides a way of filtering log events based on the log + level.</p> </item> - - <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag> + <tag><seealso marker="logger_filters#progress-2"> + <c>logger_filters:progress/2</c></seealso></tag> <item> - <p>This handler is to be used for backwards compatibility - only. It is not started by default, but will be automatically - started the first time an event handler is added - with <seealso marker="error_logger#add_report_handler-1"> - <c>error_logger:add_report_handler/1,2</c></seealso>.</p> - - <p>No built-in event handlers exist.</p> + <p>Stops or allows progress reports from <c>supervisor</c> + and <c>application_controller</c>.</p> </item> - </taglist> - </section> - - <section> - <title>Built-in filters</title> - - <taglist> - <tag><c>logger_filters:domain/2</c></tag> + <tag><seealso marker="logger_filters#remote_gl-2"> + <c>logger_filters:remote_gl/2</c></seealso></tag> <item> - <p>This filter provides a way of filtering log events based on a - <c>domain</c> field <c>Metadata</c>. See - <seealso marker="logger_filters#domain-2"> - <c>logger_filters:domain/2</c></seealso></p> + <p>Stops or allows log events originating from a process + that has its group leader on a remote node.</p> </item> + </taglist> + </section> - <tag><c>logger_filters:level/2</c></tag> - <item> - <p>This filter provides a way of filtering log events based - on the log level. See <seealso marker="logger_filters#domain-2"> - <c>logger_filters:domain/2</c></seealso></p> - </item> + <section> + <marker id="handlers"/> + <title>Handlers</title> + <p>A handler is defined as a module exporting at least the + following function:</p> + + <pre><seealso marker="logger#HModule:log-2">log(LogEvent, Config) -> void()</seealso></pre> + + <p>This function is called when a log event has passed through all + global filters, and all handler filters attached to the handler + in question. The function call is executed on the client + process, and it is up to the handler implementation if other + processes are involved or not.</p> + + <p>Logger allows adding multiple instances of a handler + callback. That is, if a callback module implementation allows + it, you can add multiple handler instances using the same + callback module. The different instances are identified by + unique handler identities.</p> + + <p>In addition to the mandatory callback function <c>log/2</c>, a + handler module can export the optional callback + functions <c>adding_handler/1</c>, <c>changing_config/2</c> + and <c>removing_handler/1</c>. See + section <seealso marker="logger#handler_callback_functions">Handler + Callback Functions</seealso> in the logger(3) manual for more + information about these function.</p> + + <p>The following built-in handlers exist:</p> - <tag><c>logger_filters:progress/2</c></tag> - <item> - <p>This filter matches all progress reports - from <c>supervisor</c> and <c>application_controller</c>. - See <seealso marker="logger_filters#progress/2"> - <c>logger_filters:progress/2</c></seealso></p> - </item> + <taglist> + <tag><c>logger_std_h</c></tag> + <item> + <p>This is the default handler used by OTP. Multiple instances + can be started, and each instance will write log events to a + given destination, terminal or file.</p> + </item> - <tag><c>logger_filters:remote_gl/2</c></tag> - <item> - <p>This filter matches all events originating from a process - that has its group leader on a remote node. - See <seealso marker="logger_filters#remote_gl/2"> - <c>logger_filters:remote_gl/2</c></seealso></p> - </item> - </taglist> - </section> + <tag><c>logger_disk_log_h</c></tag> + <item> + <p>This handler behaves much like <c>logger_std_h</c>, except it uses + <seealso marker="disk_log"><c>disk_log</c></seealso> as its + destination.</p> + </item> - <section> - <title>Default formatter</title> + <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag> + <item> + <p>This handler is provided for backwards compatibility + only. It is not started by default, but will be + automatically started the first time an <c>error_logger</c> + event handler is added + with <seealso marker="error_logger#add_report_handler-1"> + <c>error_logger:add_report_handler/1,2</c></seealso>.</p> + + <p>The old <c>error_logger</c> event handlers in STDLIB and + SASL still exist, but they are not added by Erlang/OTP 21.0 + or later.</p> + </item> + </taglist> + </section> - <p>The default formatter is <c>logger_formatter</c>. - See <seealso marker="logger_formatter#format-2"> - <c>logger_formatter:format/2</c></seealso>.</p> - </section> + <section> + <marker id="formatters"/> + <title>Formatters</title> + <p>A formatter can be used by the handler implementation to do the + final formatting of a log event, before printing to the + handler's destination. The handler callback receives the + formatter information as part of the handler configuration, + which is passed as the second argument + to <seealso marker="logger#HModule:log-2"> + <c>HModule:log/2</c></seealso>.</p> + <p>The formatter information consits of a formatter + module, <c>FModule</c> and its + configuration, <c>FConfig</c>. <c>FModule</c> must export the + following function, which can be called by the handler:</p> + <pre><seealso marker="logger#FModule:format-2">format(LogEvent,FConfig) + -> FormattedLogEntry</seealso></pre> + <p>The formatter information for a handler is set as a part of its + configuration when the handler is added. It can also be changed + during runtime + with <seealso marker="logger#set_handler_config-3"> + <c>logger:set_handler_config(HandlerId,formatter,{FModule,FConfig})</c> + </seealso>, which overwrites the current formatter information, + or with <seealso marker="logger#update_formatter_config-2"> + <c>logger:update_formatter_config/2,3</c></seealso>, which + only modifies the formatter configuration.</p> + <p>If the formatter module exports the optional callback + function <seealso marker="logger#FModule:check_config-1"> + <c>check_config(FConfig)</c></seealso>, Logger calls this + function when the formatter information is set or modified, to + verify the validity of the formatter configuration.</p> + <p>If no formatter information is specified for a handler, Logger + uses <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> as default.</p> </section> <section> <title>Configuration</title> + <p>Logger can be configured either when the system starts through + <seealso marker="config">configuration parameters</seealso>, + or at run-time by using the <seealso marker="logger">logger(3)</seealso> + API. The recommended approach is to do the initial configuration in + the <c>sys.config</c> file and then use the API when some configuration + has to be changed at runtime, such as the log level.</p> + <section> - <title>Application environment variables</title> - <p>See <seealso marker="kernel_app#configuration">Kernel(6)</seealso> for - information about the application environment variables that can - be used for configuring logger.</p> + <title>Kernel Configuration Parameters</title> + <p>Logger is best configured by using the configuration parameters + of Kernel. There are four possible configuration parameters: + <seealso marker="#logger"><c>logger</c></seealso>, + <seealso marker="kernel_app#logger_level"><c>logger_level</c></seealso>, + <seealso marker="kernel_app#logger_sasl_compatible"><c>logger_sasl_compatible</c></seealso> and + <seealso marker="kernel_app#logger_progress_reports"><c>logger_progress_reports</c></seealso>. + <c>logger_level</c>, <c>logger_sasl_compatible</c> and <c>logger_progress_reports</c> are described in the + <seealso marker="kernel_app#configuration">Kernel Configuration</seealso>, + while <c>logger</c> is described below.</p> + + <marker id="logger"/> + <p><em>logger</em></p> + <p>The application configuration parameter <c>logger</c> is used to configure + three different Logger aspects; handlers, logger filters and module levels. + The configuration is a list containing tagged tuples that look like this:</p> + <taglist> + <tag><c>DisableHandler = {handler,default,undefined}</c></tag> + <item> + <p>Disable the default handler. This allows another application + to add its own default handler. See <seealso marker="logger#add_handlers/1"> + <c>logger:add_handlers/1</c></seealso> for more details.</p> + <p>Only one entry of this option is allowed.</p></item> + <tag><c>AddHandler = {handler,HandlerId,Module,HandlerConfig}</c></tag> + <item> + <p>Add a handler as if <seealso marker="logger:add_handler/3"> + <c>logger:add_handler(HandlerId,Module,HandlerConfig)</c></seealso> is + called.</p> + <p>It is allowed to have multiple entries of this option.</p></item> + <tag><c>Filters = {filters, default, [Filter]}</c><br/> + <c>FilterDefault = log | stop</c><br/> + <c>Filter = {FilterId, {FilterFun, FilterConfig}}</c></tag> + <item> + <p>Add the specified <seealso marker="logger#add_logger_filter/2"> + logger filters</seealso>.</p> + <p>Only one entry of this option is allowed.</p></item> + <tag><c>ModuleLevel = {module_level, Level, [Module]}</c></tag> + <item> + <p>This option configures <seealso marker="logger#set_module_level/2"> + module log level</seealso>.</p> + <p>It is allowed to have multiple entries of this option.</p></item> + </taglist> + <p>Examples:</p> + <list> + <item> + <p>Output logs into the file "logs/erlang.log"</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{ logger_std_h => #{ type => {file,"log/erlang.log"}}}}]}]}]. + </code> + </item> + <item> + <p>Output logs in single line format</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{ formatter => { logger_formatter,#{ single_line => true}}}}]}]}]. + </code> + </item> + <item> + <p>Add the pid to each log event</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{ formatter => { logger_formatter, + #{ template => [time," ",pid," ",msg,"\n"]}} + }}]}]}]. + </code> + </item> + <item> + <p>Use a different file for debug logging</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{ level => error, + logger_std_h => #{ type => {file, "log/erlang.log"}}}}, + {handler, info, logger_std_h, + #{ level => debug, + logger_std_h => #{ type => {file, "log/debug.log"}}}} + ]}]}]. + </code> + </item> + </list> </section> <section> - <title>Logger configuration</title> + <title>Global Logger Configuration</title> <taglist> - <tag><c>level</c></tag> + <tag><c>level = </c><seealso marker="logger#type-level"> + <c>logger:level()</c></seealso></tag> <item> - <p>Specifies the severity level to log.</p> + <p>Specifies the global log level to log.</p> + <p>See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of + possible log levels.</p> + <p>The initial value of this option is set by the Kernel + configuration + parameter <seealso marker="kernel_app#logger_level"> + <c>logger_level</c></seealso>. It can be changed during + runtime + with <seealso marker="logger#set_logger_config-2"> + <c>logger:set_logger_config(level,NewLevel)</c></seealso>.</p> </item> - <tag><c>filters</c></tag> + <tag><c>filters = [{</c><seealso marker="logger#type-filter_id"> + <c>logger:filter_id()</c></seealso><c>,</c> + <seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso><c>}]</c></tag> <item> - <p>Logger filters are added or removed with + <p>Global filters are added and removed with <seealso marker="logger#add_logger_filter-2"> <c>logger:add_logger_filter/2</c></seealso> and <seealso marker="logger#remove_logger_filter-1"> <c>logger:remove_logger_filter/1</c></seealso>, respectively.</p> - <p>See <seealso marker="#Filter">Filter</seealso> for more - information.</p> - <p>By default, no filters exist.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more information.</p> + <p>Default is <c>[]</c>, that is, no filters exist.</p> </item> - <tag><c>filter_default = log | stop</c></tag> + <tag><marker id="filter_default"/><c>filter_default = log | stop</c></tag> <item> <p>Specifies what to do with an event if all filters - return <c>ignore</c>.</p> + return <c>ignore</c>, or if no filters exist.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more information about how this option is used.</p> <p>Default is <c>log</c>.</p> </item> - <tag><c>handlers</c></tag> - <item> - <p>Handlers are added or removed with - <seealso marker="logger#add_handler-3"> - <c>logger:add_handler/3</c></seealso> and - <seealso marker="logger#remove_handler-1"> - <c>logger:remove_handler/1</c></seealso>, - respectively.</p> - <p>See <seealso marker="#Handler">Handler</seealso> for more - information.</p> - </item> </taglist> </section> <section> <marker id="handler_configuration"/> - <title>Handler configuration</title> + <title>Handler Configuration</title> <taglist> - <tag><c>level</c></tag> + <tag><c>level = </c><seealso marker="logger#type-level"> + <c>logger:level()</c></seealso></tag> <item> - <p>Specifies the severity level to log.</p> + <p>Specifies the log level which the handler logs.</p> + <p>See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of + possible log levels.</p> + <p>The log level can be specified when adding the handler, + or changed during runtime with, for + instance, <seealso marker="logger#set_handler_config/3"> + <c>logger:set_handler_config/3</c></seealso>.</p> + <p>Default is <c>info</c>.</p> </item> - <tag><c>filters</c></tag> + <tag><c>filters = [{</c><seealso marker="logger#type-filter_id"> + <c>logger:filter_id()</c></seealso><c>,</c> + <seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso><c>}]</c></tag> <item> <p>Handler filters can be specified when adding the handler, - or added or removed later with + or added or removed during runtime with <seealso marker="logger#add_handler_filter-3"> <c>logger:add_handler_filter/3</c></seealso> and <seealso marker="logger#remove_handler_filter-2"> <c>logger:remove_handler_filter/2</c></seealso>, respectively.</p> - <p>See <seealso marker="#Filter">Filter</seealso> for more + <p>See <seealso marker="#filters">Filters</seealso> for more information.</p> - <p>By default, no filters exist.</p> + <p>Default is <c>[]</c>, that is, no filters exist.</p> </item> - <tag><c>filter_default = log | stop</c></tag> + <tag><marker id="filter_default"/><c>filter_default = log | stop</c></tag> <item> <p>Specifies what to do with an event if all filters - return <c>ignore</c>.</p> + return <c>ignore</c>, or if no filters exist.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more information about how this option is used.</p> <p>Default is <c>log</c>.</p> </item> - <tag><c>formatter = {Module::module(),Extra::term()}</c></tag> + <tag><c>formatter = {module(),</c><seealso marker="logger#type-formatter_config"> + <c>logger:formatter_config()</c></seealso><c>}</c></tag> <item> - <p>See <seealso marker="#Formatter">Formatter</seealso> for more + <p>The formatter which the handler can use for converting + the log event term to a printable string.</p> + <p>See <seealso marker="#formatters">Formatters</seealso> for more information.</p> - <p>The default module is <seealso marker="logger_formatter"> - <c>logger_formatter</c></seealso>, and <c>Extra</c> is - it's configuration map.</p> + <p>Default + is <c>{logger_formatter,DefaultFormatterConfig}</c>, see + the <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + manual for information about this formatter and its + default configuration.</p> </item> + <tag><c>HandlerConfig, atom() = term()</c></tag> + <item> + <p>Any keys not listed above are considered to be handler + specific configuration. The configuration of the Kernel + handlers can be found in + the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso> + and + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c> + </seealso> manual pages.</p> + </item> </taglist> - <p>Note that <c>level</c> and <c>filters</c> are obeyed by + <p>Notice that <c>level</c> and <c>filters</c> are obeyed by Logger itself before forwarding the log events to each - handler, while <c>formatter</c> is left to the handler - implementation. All Logger's built-in handlers will call the - given formatter before printing.</p> + handler, while <c>formatter</c> and all handle specific + options are left to the handler implementation.</p> + <p>All Logger's built-in handlers will call the given formatter + before printing.</p> </section> </section> <section> <marker id="compatibility"/> - <title>Backwards compatibility with error_logger</title> - <p>Logger provides backwards compatibility with the old + <title>Backwards Compatibility with error_logger</title> + <p>Logger provides backwards compatibility with <c>error_logger</c> in the following ways:</p> <taglist> - <tag>Legacy event handlers</tag> + <tag>API for Logging</tag> <item> - <p>To use event handlers written for <c>error_logger</c>, just - add your event handler with</p> - <code> -error_logger:add_report_handler/1,2. - </code> - <p>This will automatically start the <c>error_logger</c> - event manager, and add <c>error_logger</c> as a - handler to <c>logger</c>, with configuration</p> -<code> -#{level=>info, - filter_default=>log, - filters=>[]}. -</code> - <p>Note that this handler will ignore events that do not - originate from the old <c>error_logger</c> API, or from - within OTP. This means that if your code uses the logger API - for logging, then your log events will be discarded by this - handler.</p> - <p>Also note that <c>error_logger</c> is not overload - protected.</p> - </item> - <tag>Logger API</tag> - <item> - <p>The old <c>error_logger</c> API still exists, but should - only be used by legacy code. It will be removed in a later + <p>The <c>error_logger</c> API still exists, but should only + be used by legacy code. It will be removed in a later release.</p> + <p>Calls + to <seealso marker="error_logger#error_report-1"> + <c>error_logger:error_report/1,2</c></seealso>, + <seealso marker="error_logger#error_msg-1"> + <c>error_logger:error_msg/1,2</c></seealso>, and + corresponding functions for warning and info messages, are + all forwarded to Logger as calls + to <seealso marker="logger#log-3"> + <c>logger:log(Level,Report,Metadata)</c></seealso>.</p> + <p><c>Level = error | warning | info</c> and is taken + from the function name. <c>Report</c> contains the actual + log message, and <c>Metadata</c> contains additional + information which can be used for creating backwards + compatible events for legacy <c>error_logger</c> event + handlers, see + section <seealso marker="#legacy_event_handlers">Legacy + Event Handlers</seealso>.</p> </item> - <tag>Output format</tag> + <tag>Output Format</tag> <item> <p>To get log events on the same format as produced by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>, use the default formatter, <c>logger_formatter</c>, with - configuration parameter <c>legacy_header=>true</c>. This is + configuration parameter <c>legacy_header => true</c>. This is also the default.</p> </item> - <tag>Default format of log events from OTP</tag> + <tag>Default Format of Log Events from OTP</tag> <item> <p>By default, all log events originating from within OTP, except the former so called "SASL reports", look the same as before.</p> </item> - <tag>SASL reports</tag> + <tag><marker id="sasl_reports"/>SASL Reports</tag> <item> <p>By SASL reports we mean supervisor reports, crash reports and progress reports.</p> @@ -401,96 +706,134 @@ error_logger:add_report_handler/1,2. named <c>sasl_report_tty_h</c> and <c>sasl_report_file_h</c>.</p> <p>The destination of these log events were configured by - environment variables for the SASL application.</p> + <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL + configuration parameters</seealso>.</p> <p>Due to the specific event handlers, the output format slightly differed from other log events.</p> - <p>As of OTP-21, the concept of SASL reports is removed, - meaning that the default behavior is as follows:</p> + <p>As of Erlang/OTP 21.0, the concept of SASL reports is + removed, meaning that the default behaviour is as + follows:</p> <list> - <item>Supervisor reports, crash reports and progress reports + <item>Supervisor reports, crash reports, and progress reports are no longer connected to the SASL application.</item> <item>Supervisor reports and crash reports are logged by default.</item> <item>Progress reports are not logged by default, but can be - enabled with the kernel environment - variable <c>logger_log_progress</c>.</item> + enabled with the Kernel configuration + parameter <seealso marker="kernel_app#logger_progress_reports"> + <c>logger_progress_reports</c></seealso>.</item> <item>The output format is the same for all log events.</item> </list> - <p>If the old behavior is preferred, the kernel environment - variable <c>logger_sasl_compatible</c> can be set - to <c>true</c>. The old SASL environment variables can then - be used as before, and the SASL reports will only be printed - if the SASL application is running - through a second log - handler named <c>sasl_h</c>.</p> + <p>If the old behaviour is preferred, the Kernel configuation + parameter <seealso marker="kernel_app:logger_sasl_compatible"> + <c>logger_sasl_compatible</c></seealso> can be set + to <c>true</c>. The + <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL + configuration parameters</seealso> can then be used as + before, and the SASL reports will only be printed if the + SASL application is running, through a second log handler + named <c>sasl</c>.</p> <p>All SASL reports have a metadata - field <c>domain=>[beam,erlang,otp,sasl]</c>, which can be - used, for example, by filters to to stop or allow the - events.</p> + field <c>domain => [beam,erlang,otp,sasl]</c>, which can be + used, for example, by filters to stop or allow the + log events.</p> + <p>See the <seealso marker="sasl:error_logging">SASL User's + Guide</seealso> for more information about the old SASL + error logging functionality.</p> + </item> + <tag><marker id="legacy_event_handlers"/>Legacy Event Handlers</tag> + <item> + <p>To use event handlers written for <c>error_logger</c>, just + add your event handler with</p> + <code> +error_logger:add_report_handler/1,2. + </code> + <p>This will automatically start the <c>error_logger</c> + event manager, and add <c>error_logger</c> as a + handler to <c>logger</c>, with configuration</p> +<code> +#{level => info, + filter_default => log, + filters => []}. +</code> + <p>Notice that this handler will ignore events that do not + originate from the <c>error_logger</c> API, or from within + OTP. This means that if your code uses the Logger API for + logging, then your log events will be discarded by this + handler.</p> + <p>Also notice that <c>error_logger</c> is not overload + protected.</p> </item> </taglist> </section> <section> - <title>Error handling</title> + <title>Error Handling</title> <p>Log data is expected to be either a format string and - arguments, a string (unicode:chardata), or a report (map or + arguments, a string + (<seealso marker="stdlib:unicode#type-chardata"> + <c>unicode:chardata()</c></seealso>), or a report (map or key-value list) which can be converted to a format string and - arguments by the handler. A default report callback should be - included in the log event's metadata, which can be used for - converting the report to a format string and arguments. The - handler might also do a custom conversion if the default format - is not desired.</p> - <p><c>logger</c> does, to a certain extent, check its input data + arguments by the handler. If a report is given, a default report + callback can be included in the log event's metadata. The + handler can use this callback for converting the report to a + format string and arguments. If the format obtained by the + provided callback is not desired, or if there is no provided + callback, the handler must do a custom conversion.</p> + <p>Logger does, to a certain extent, check its input data before forwarding a log event to the handlers, but it does not evaluate conversion funs or check the validity of format strings and arguments. This means that any filter or handler must be careful when formatting the data of a log event, making sure that it does not crash due to bad input data or faulty callbacks.</p> - <p>If a filter or handler still crashes, logger will remove the - filter or handler in question from the configuration, and then - print a short error message on the console. A debug event - containing the crash reason and other details is also issued, - and can be seen if a handler is installed which logs on debug - level.</p> + <p>If a filter or handler still crashes, Logger will remove the + filter or handler in question from the configuration, and print + a short error message to the terminal. A debug event containing + the crash reason and other details is also issued, and can be + seen if a handler logging debug events is installed.</p> </section> <section> <title>Example: add a handler to log debug events to file</title> - <p>When starting an erlang node, the default behavior is that all + <p>When starting an Erlang node, the default behaviour is that all log events with level info and above are logged to the - console. In order to also log debug events, you can either + terminal. In order to also log debug events, you can either change the global log level to <c>debug</c> or add a separate handler to take care of this. In this example we will add a new handler which prints the debug events to a separate file.</p> - <p>First, we add an instance of logger_std_h with + <p>First, we add an instance of <c>logger_std_h</c> with type <c>{file,File}</c>, and we set the handler's level to <c>debug</c>:</p> <pre> -1> <input>Config = #{level=>debug,logger_std_h=>#{type=>{file,"./debug.log"}}}.</input> +1> <input>Config = #{level => debug, logger_std_h => #{type => {file,"./debug.log"}}}.</input> #{logger_std_h => #{type => {file,"./debug.log"}}, level => debug} 2> <input>logger:add_handler(debug_handler,logger_std_h,Config).</input> ok</pre> <p>By default, the handler receives all events - (<c>filter_defalt=log</c>), so we need to add a filter to stop - all non-debug events:</p> + (<c>filter_default=log</c>, see + section <seealso marker="#filters">Filters</seealso> for more + details), so we need to add a filter to stop all non-debug + events. The built-in + filter <seealso marker="logger_filters#level-2"> + <c>logger_filters:level/2</c></seealso> + is used for this:</p> <pre> -3> <input>Fun = fun(#{level:=debug}=Log,_) -> Log; (_,_) -> stop end.</input> -#Fun<erl_eval.12.98642416> -4> <input>logger:add_handler_filter(debug_handler,allow_debug,{Fun,[]}).</input> +3> <input>logger:add_handler_filter(debug_handler,stop_non_debug, + {fun logger_filters:level/2,{stop,neq,debug}}).</input> ok</pre> - <p>And finally, we need to make sure that the logger itself allows + <p>And finally, we need to make sure that Logger itself allows debug events. This can either be done by setting the global - logger level:</p> + log level:</p> <pre> -5> <input>logger:set_logger_config(level,debug).</input> +4> <input>logger:set_logger_config(level,debug).</input> ok</pre> <p>Or by allowing debug events from one or a few modules only:</p> <pre> -6> <input>logger:set_module_level(mymodule,debug).</input> +5> <input>logger:set_module_level(mymodule,debug).</input> ok</pre> </section> @@ -499,107 +842,112 @@ ok</pre> <title>Example: implement a handler</title> <p>The only requirement that a handler MUST fulfill is to export the following function:</p> - <code>log(logger:log(),logger:config()) ->ok</code> - <p>It may also implement the following callbacks:</p> + <code>log(logger:log_event(),logger:config()) -> ok</code> + <p>It can optionally also implement the following callbacks:</p> <code> -adding_handler(logger:handler_id(),logger:config()) -> {ok,logger:config()} | {error,term()} -removing_handler(logger:handler_id(),logger:config()) -> ok -changing_config(logger:handler_id(),logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()} +adding_handler(logger:config()) -> {ok,logger:config()} | {error,term()} +removing_handler(logger:config()) -> ok +changing_config(logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()} </code> - <p>When logger:add_handler(Id,Module,Config) is called, logger - will first call Module:adding_handler(Id,Config), and if it - returns {ok,NewConfig} the NewConfig is written to the - configuration database. After this, the handler may receive log - events as calls to Module:log/2.</p> + <p>When <c>logger:add_handler(Id,Module,Config)</c> is called, + Logger first calls <c>HModule:adding_handler(Config)</c>. If + this function returns <c>{ok,NewConfig}</c>, Logger + writes <c>NewConfig</c> to the configuration database, and + the <c>logger:add_handler/3</c> call returns. After this, the + handler is installed and must be ready to receive log events as + calls to <c>HModule:log/2</c>.</p> <p>A handler can be removed by calling - logger:remove_handler(Id). logger will call - Module:removing_handler(Id,Config), and then remove the handler's - configuration from the configuration database.</p> - <p>When logger:set_handler_config is called, logger calls - Module:changing_config(Id,OldConfig,NewConfig). If this function - returns ok, the NewConfig is written to the configuration - database.</p> - - <p>A simple handler which prints to the console could be - implemented as follows:</p> + <c>logger:remove_handler(Id)</c>. Logger calls + <c>HModule:removing_handler(Config)</c>, and removes the + handler's configuration from the configuration database.</p> + <p>When <c>logger:set_handler_config/2,3</c> + or <c>logger:update_handler_config/2</c> is called, Logger + calls <c>HModule:changing_config(OldConfig,NewConfig)</c>. If + this function returns <c>{ok,NewConfig}</c>, Logger + writes <c>NewConfig</c> to the configuration database.</p> + + <p>A simple handler that prints to the terminal can be implemented + as follows:</p> <code> -module(myhandler). -export([log/2]). -log(Log,#{formatter:={FModule,FConfig}) -> - io:put_chars(FModule:format(Log,FConfig)). +log(LogEvent,#{formatter:={FModule,FConfig}) -> + io:put_chars(FModule:format(LogEvent,FConfig)). </code> <p>A simple handler which prints to file could be implemented like this:</p> <code> -module(myhandler). --export([adding_handler/2, removing_handler/2, log/2]). +-export([adding_handler/1, removing_handler/1, log/2]). -export([init/1, handle_call/3, handle_cast/2, terminate/2]). -adding_handler(Id,Config) -> +adding_handler(Config) -> {ok,Fd} = file:open(File,[append,{encoding,utf8}]), - {ok,Config#{myhandler_fd=>Fd}}. + {ok,Config#{myhandler_fd => Fd}}. -removing_handler(Id,#{myhandler_fd:=Fd}) -> +removing_handler(#{myhandler_fd:=Fd}) -> _ = file:close(Fd), ok. -log(Log,#{myhandler_fd:=Fd,formatter:={FModule,FConfig}}) -> - io:put_chars(Fd,FModule:format(Log,FConfig)). +log(LogEvent,#{myhandler_fd:=Fd,formatter:={FModule,FConfig}}) -> + io:put_chars(Fd,FModule:format(LogEvent,FConfig)). </code> <note><p>The above handlers do not have any overload protection, and all log events are printed directly from the client process.</p></note> - <p>For examples of overload protection, please refer to the - implementation - of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso> - and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c> + <p>For information and examples of overload protection, please + refer to + section <seealso marker="#overload_protection">Protecting the + Handler from Overload</seealso>, and the implementation + of <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso> + and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c> </seealso>.</p> <p>Below is a simpler example of a handler which logs through one single process.</p> <code> -module(myhandler). --export([adding_handler/2, removing_handler/2, log/2]). +-export([adding_handler/1, removing_handler/1, log/2]). -export([init/1, handle_call/3, handle_cast/2, terminate/2]). -adding_handler(Id,Config) -> +adding_handler(Config) -> {ok,Pid} = gen_server:start(?MODULE,Config), - {ok,Config#{myhandler_pid=>Pid}}. + {ok,Config#{myhandler_pid => Pid}}. -removing_handler(Id,#{myhandler_pid:=Pid}) -> +removing_handler(#{myhandler_pid:=Pid}) -> gen_server:stop(Pid). -log(Log,#{myhandler_pid:=Pid} = Config) -> - gen_server:cast(Pid,{log,Log,Config}). +log(LogEvent,#{myhandler_pid:=Pid} = Config) -> + gen_server:cast(Pid,{log,LogEvent,Config}). init(#{myhandler_file:=File}) -> {ok,Fd} = file:open(File,[append,{encoding,utf8}]), - {ok,#{file=>File,fd=>Fd}}. + {ok,#{file => File, fd => Fd}}. handle_call(_,_,State) -> {reply,{error,bad_request},State}. -handle_cast({log,Log,Config},#{fd:=Fd} = State) -> - do_log(Fd,Log,Config), +handle_cast({log,LogEvent,Config},#{fd:=Fd} = State) -> + do_log(Fd,LogEvent,Config), {noreply,State}. terminate(Reason,#{fd:=Fd}) -> _ = file:close(Fd), ok. -do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> - String = FModule:format(Log,FConfig), +do_log(Fd,LogEvent,#{formatter:={FModule,FConfig}}) -> + String = FModule:format(LogEvent,FConfig), io:put_chars(Fd,String). </code> </section> <section> <marker id="overload_protection"/> - <title>Protecting the handler from overload</title> + <title>Protecting the Handler from Overload</title> <p>In order for the built-in handlers to survive, and stay responsive, during periods of high load (i.e. when huge numbers of incoming log requests must be handled), a mechanism for overload protection @@ -610,7 +958,7 @@ do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> as follows:</p> <section> - <title>Message queue length</title> + <title>Message Queue Length</title> <p>The handler process keeps track of the length of its message queue and reacts in different ways depending on the current status. The purpose is to keep the handler in, or (as quickly as possible), @@ -627,7 +975,7 @@ do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> and as long as the length of the message queue is lower, all log requests are handled asynchronously. This simply means that the process sending the log request (by calling a log function in the - logger API) does not wait for a response from the handler but + Logger API) does not wait for a response from the handler but continues executing immediately after the request (i.e. it will not be affected by the time it takes the handler to print to the log device). If the message queue grows larger than this value, however, @@ -661,10 +1009,20 @@ do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> </item> </taglist> - <p>For the overload protection algorithm to work properly, it is a - requirement that:</p> + <p>For the overload protection algorithm to work properly, it is + required that:</p> + + <p><c>toggle_sync_qlen =< drop_new_reqs_qlen =< flush_reqs_qlen</c></p> + + <p>and that:</p> + + <p><c>drop_new_reqs_qlen > 1</c></p> - <p><c>toggle_sync_qlen < drop_new_reqs_qlen < flush_reqs_qlen</c></p> + <p>If <c>toggle_sync_qlen</c> is set to <c>0</c>, the handler will handle all + requests synchronously. Setting the value of <c>toggle_sync_qlen</c> to the same + as <c>drop_new_reqs_qlen</c>, disables the synchronous mode. Likewise, setting + the value of <c>drop_new_reqs_qlen</c> to the same as <c>flush_reqs_qlen</c>, + disables the drop mode.</p> <p>During high load scenarios, the length of the handler message queue rarely grows in a linear and predictable way. Instead, whenever the @@ -695,14 +1053,14 @@ logger:add_handler(my_standard_h, logger_std_h, </section> <section> - <title>Controlling bursts of log requests</title> + <title>Controlling Bursts of Log Requests</title> <p>A potential problem with large bursts of log requests, is that log files may get full or wrapped too quickly (in the latter case overwriting previously logged data that could be of great importance). For this reason, both built-in handlers offer the possibility to set a maximum level of how many requests to process with a certain time frame. With this burst control feature enabled, the handler will take care of bursts of log requests - without choking log files, or the console, with massive amounts of + without choking log files, or the terminal, with massive amounts of printouts. These are the configuration parameters:</p> <taglist> @@ -736,7 +1094,7 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h, </section> <section> - <title>Terminating a large handler</title> + <title>Terminating a Large Handler</title> <p>A handler process may grow large even if it can manage peaks of high load without crashing. The overload protection mechanism includes user configurable levels for a maximum allowed message queue length and maximum allowed memory @@ -773,7 +1131,14 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h, <section> <title>See Also</title> - <p><seealso marker="error_logger"><c>error_logger(3)</c></seealso>, - <seealso marker="sasl:sasl_app"><c>SASL(6)</c></seealso></p> + <p> + <seealso marker="disk_log"><c>disk_log(3)</c></seealso>, + <seealso marker="error_logger"><c>error_logger(3)</c></seealso>, + <seealso marker="logger"><c>logger(3)</c></seealso>, + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>, + <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>, + <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>, + <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>, + <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p> </section> </chapter> diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml index 90cc4fec30..20b49b8ca0 100644 --- a/lib/kernel/doc/src/logger_disk_log_h.xml +++ b/lib/kernel/doc/src/logger_disk_log_h.xml @@ -33,21 +33,21 @@ <file>logger_disk_log_h.xml</file> </header> <module>logger_disk_log_h</module> - <modulesummary>A disk_log based handler for the Logger - application.</modulesummary> + <modulesummary>A disk_log based handler for the Logger.</modulesummary> <description> - <p>This is a handler for the Logger application that offers circular - (wrapped) logs by using the disk_log application. Multiple instances - of this handler can be added to logger, and each instance will print to + <p>This is a handler for Logger that offers circular + (wrapped) logs by using <seealso marker="disk_log"><c>disk_log</c></seealso>. + Multiple instances + of this handler can be added to Logger, and each instance prints to its own disk_log file, created with the name and settings specified in the handler configuration.</p> <p>The default standard handler, <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be - replaced by a disk_log handler at startup of the kernel application. + replaced by a disk_log handler at startup of the Kernel application. See an example of this below.</p> <p>The handler has an overload protection mechanism that will keep the handler - process and the kernel application alive during a high load of log + process and the Kernel application alive during a high load of log requests. How this feature works, and how to modify the configuration, is described in the <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> @@ -121,11 +121,11 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h, #{filesync_repeat_interval => 1000}}). </code> <p>In order to use the disk_log handler instead of the default standard - handler when starting en Erlang node, use the kernel configuration parameter - <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with - value <c>{disk_log,FileName}</c>. Example:</p> + handler when starting an Erlang node, change the Kernel default logger to + use disk_log. Example:</p> <code type="none"> -erl -kernel logger_dest '{disk_log,"./system_disk_log"}' +erl -kernel logger '[{handler,default,logger_disk_log_h, + #{ disk_log_opts => #{ file => "./system_disk_log"}}}]' </code> </description> @@ -141,6 +141,12 @@ erl -kernel logger_dest '{disk_log,"./system_disk_log"}' </funcs> + <section> + <title>See Also</title> + <p><seealso marker="logger"><c>logger(3)</c></seealso></p> + <p><seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso></p> + <p><seealso marker="disk_log"><c>disk_log(3)</c></seealso></p> + </section> </erlref> diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml index c34ec7d14c..f92181ea3f 100644 --- a/lib/kernel/doc/src/logger_filters.xml +++ b/lib/kernel/doc/src/logger_filters.xml @@ -52,57 +52,100 @@ <funcs> <func> <name name="domain" arity="2"/> - <fsummary>Filter log events based on the domain field in metadata.</fsummary> + <fsummary>Filter log events based on the domain field in + metadata.</fsummary> <desc> - <p>This filter provides a way of filtering log events based on a - <c>domain</c> field <c>Metadata</c>.</p> - - <p>The <c><anno>Extra</anno></c> parameter is specified when - adding the filter - via <seealso marker="logger#add_logger_filter-2"> - <c>logger:add_logger_filter/2</c></seealso> - or <seealso marker="logger#add_handler_filter-3"> - <c>logger:add_handler_filter/3</c></seealso>.</p> - - <p>The filter compares the value of the <c>domain</c> field - in the log event's metadata (<c>Domain</c>) - to <c><anno>MatchDomain</anno></c> as follows:</p> - - <taglist> - <tag><c><anno>Compare</anno> = starts_with</c></tag> - <item><p>The filter matches if <c>MatchDomain</c> is a prefix - of <c>Domain</c>.</p></item> - <tag><c><anno>Compare</anno> = prefix_of</c></tag> - <item><p>The filter matches if <c>Domain</c> is a prefix - of <c>MatchDomain</c>.</p></item> - <tag><c><anno>Compare</anno> = equals</c></tag> - <item><p>The filter matches if <c>Domain</c> is equal - to <c>MatchDomain</c>.</p></item> - <tag><c><anno>Compare</anno> = no_domain</c></tag> - <item><p>The filter matches if there is no domain field in - metadata. In this case <c><anno>MatchDomain</anno></c> shall - be <c>[]</c>.</p></item> - </taglist> - - <p>If the filter matches and <c><anno>Action</anno> = - log</c>, the log event is allowed. If the filter matches - and <c><anno>Action</anno> = stop</c>, the log event is - stopped.</p> - - <p>If the filter does not match, it returns <c>ignore</c>, - meaning that other filters, or the value of the - configuration parameter <c>filter_default</c>, will decide - if the event is allowed or not.</p> - - <p>Log events that do not contain any domain field, will - only match when <c><anno>Compare</anno> = no_domain</c>.</p> - - <p>Example: stop all events with - domain <c>[beam,erlang,otp,sasl|_]</c></p> - - <code> + <p>This filter provides a way of filtering log events based on a + <c>domain</c> field in <c>Metadata</c>. This field is + optional, and the purpose of using it is to group log events + from, for example, a specific functional area. This allows + filtering or other specialized treatment in a Logger + handler.</p> + + <p>A domain field must be a list of atoms, creating smaller + and more specialized domains as the list grows longer. The + biggest domain is <c>[]</c>, which comprices all + possible domains.</p> + + <p>For example, consider the following domains:</p> + <pre> +D1 = [beam,erlang,otp] +D2 = [beam,erlang,otp,sasl]</pre> + + <p><c>D1</c> is the biggest of the two, and is said to be a + super-domain of <c>D2</c>. <c>D2</c> is a + sub-domain <c>D1</c>. Both <c>D1</c> and <c>D2</c> are + sub-domains of <c>[]</c></p> + + <p>The above domains are used for logs originating from + Erlang/OTP. D1 specifies that the log event comes from + Erlang/OTP in general, and D2 indicates that the log event + is a so + called <seealso marker="logger_chapter#sasl_reports">SASL + report</seealso>.</p> + + <p>The <c><anno>Extra</anno></c> parameter to + the <c>domain/2</c> function is specified when adding the + filter via <seealso marker="logger#add_logger_filter-2"> + <c>logger:add_logger_filter/2</c></seealso> + or <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso>.</p> + + <p>The filter compares the value of the <c>domain</c> field + in the log event's metadata (<c>Domain</c>) + to <c><anno>MatchDomain</anno></c> as follows:</p> + + <taglist> + <tag><c><anno>Compare</anno> = sub</c></tag> + <item> + <p>The filter matches if <c>Domain</c> is equal to or + a sub-domain of <c>MatchDomain</c>, that is, + if <c>MatchDomain</c> is a prefix of <c>Domain</c>.</p> + </item> + <tag><c><anno>Compare</anno> = super</c></tag> + <item> + <p>The filter matches if <c>Domain</c> is equal to or a + super-domain of <c>MatchDomain</c>, that is, + if <c>Domain</c> is a prefix of <c>MatchDomain</c>.</p> + </item> + <tag><c><anno>Compare</anno> = equal</c></tag> + <item> + <p>The filter matches if <c>Domain</c> is equal + to <c>MatchDomain</c>.</p> + </item> + <tag><c><anno>Compare</anno> = not_equal</c></tag> + <item> + <p>The filter matches if <c>Domain</c> is not equal + to <c>MatchDomain</c>, or if there is no domain field in + metadata.</p> + </item> + <tag><c><anno>Compare</anno> = undefined</c></tag> + <item><p>The filter matches if there is no domain field in + metadata. In this case <c><anno>MatchDomain</anno></c> + must be set to <c>[]</c>.</p> + </item> + </taglist> + + <p>If the filter matches and <c><anno>Action</anno> = log</c>, + the log event is allowed. If the filter matches + and <c><anno>Action</anno> = stop</c>, the log event is + stopped.</p> + + <p>If the filter does not match, it returns <c>ignore</c>, + meaning that other filters, or the value of the + configuration parameter <c>filter_default</c>, decide if the + event is allowed or not.</p> + + <p>Log events that do not contain any domain field, match only + when <c><anno>Compare</anno> = undefined</c> + or <c><anno>Compare</anno> = not_equal</c>.</p> + + <p>Example: stop all events with + domain <c>[beam,erlang,otp,sasl|_]</c></p> + + <code> logger:set_handler_config(h1,filter_default,log). % this is the default -Filter = {fun logger_filters:domain/2,{stop,starts_with,[beam,erlang,otp,sasl]}}. +Filter = {fun logger_filters:domain/2,{stop,sub,[beam,erlang,otp,sasl]}}. logger:add_handler_filter(h1,no_sasl,Filter). ok</code> </desc> diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml index 7df4c88f40..02f89b26be 100644 --- a/lib/kernel/doc/src/logger_formatter.xml +++ b/lib/kernel/doc/src/logger_formatter.xml @@ -39,102 +39,157 @@ <p>Each log handler has a configured formatter specified as a module and a configuration term. The purpose of the formatter is to translate the log events to a final printable string - (<c>unicode:chardata()</c>) which can be written to the output + (<seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c> + </seealso>) which can be written to the output device of the handler.</p> <p><c>logger_formatter</c> is the default formatter used by Logger.</p> </description> - <section> - <title>Configuration</title> - <p>The configuration term for <c>logger_formatter</c> is a map, - and the following keys can be set as configuration - parameters:</p> - <taglist> - <tag><c>chars_limit = pos_integer() | unlimited</c></tag> - <item> - <p>A positive integer representing the value of the option - with the same name to be used when calling - <seealso marker="stdlib:io_lib#format-3">io_lib:format/3</seealso>. - This value limits the total number of characters printed - for each log event. Notice that this is a soft limit. For a - hard truncation limit, see option <c>max_size</c>.</p> - <p>Default is <c>unlimited</c>.</p> - <note> - <p><c>chars_limit</c> has no effect on log messages on - string form. These are expected to be short, but can still - be truncated by the <c>max_size</c> parameter.</p> - </note> - </item> - <tag><c>depth = pos_integer() | unlimited</c></tag> - <item> - <p>A positive integer representing the maximum depth to - which terms shall be printed by this formatter. Format - strings passed to this formatter are rewritten. The format - controls ~p and ~w are replaced with ~P and ~W, - respectively, and the value is used as the depth - parameter. For details, see - <seealso marker="stdlib:io#format-2">io:format/2,3</seealso> - in STDLIB.</p> - <p>Default is <c>unlimited</c>.</p> - <note> - <p><c>depth</c> has no effect on log messages on string - form. These are expected to be short, but can still be - truncated by the <c>max_size</c> parameter.</p> - </note> - </item> - <tag><c>max_size = pos_integer() | unlimited</c></tag> - <item> - <p>A positive integer representing the absolute maximum size a - string returned from this formatter can have. If the - formatted string is longer, after possibly being limited - by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p> - <p>Default is <c>unlimited</c>.</p> - </item> - <tag><c>single_line = boolean()</c></tag> - <item> - <p>If set to <c>true</c>, all newlines in the message are - replaced with <c>", "</c>, and whitespaces following - directly after newlines are removed. Note that newlines - added by the <c>template</c> parameter are not replaced.</p> - <p>Default is <c>true</c>.</p> - </item> - <tag><c>legacy_header = boolen()</c></tag> - <item> - <p>If set to <c>true</c> a header field is added to - logger_formatter's part of <c>Metadata</c>. The value of - this field is a string similar to the header created by the - old <c>error_logger</c> event handlers. It can be included - in the log event by adding the - tuple <c>{logger_formatter,header}</c> to the template. See - section <seealso marker="#default_templates">Default - Templates</seealso> for more information.</p> - <p>Default is <c>false</c>.</p> - </item> - <tag><c>report_cb = fun((</c><seealso marker="logger#type-report"><c>logger:report()</c></seealso><c>) -> {</c><seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso><c>,[term()]})</c></tag> - <item> - <p>A report callback is used by the formatter to transform log - messages on report form to a format string and - arguments. The report callback can be specified in the - metadata for the log event. If no report callback exist in - metadata, <c>logger_formatter</c> will - use <seealso marker="logger#format_report-1"> - <c>logger:format_report/1</c></seealso> as default - callback.</p> - <p>If this configuration parameter is set, it replaces both - the default report callback, and any report callback found - in metadata. That is, all reports are converted by this - configured function.</p> - <p>The value must be a function with arity 1, - returning <c>{Format,Args}</c>, and it will be called with a - report as only argument.</p> - </item> - <tag><c>template = </c><seealso marker="#type-template"><c>template()</c></seealso></tag> - <item> + + <datatypes> + <datatype> + <name name="config"/> + <desc> + <p>The configuration term for <c>logger_formatter</c> is a + <seealso marker="stdlib:maps">map</seealso>, and the + following keys can be set as configuration parameters:</p> + <taglist> + <tag><c>chars_limit = pos_integer() | unlimited</c></tag> + <item> + <p>A positive integer representing the value of the option + with the same name to be used when calling + <seealso marker="stdlib:io_lib#format-3"> + <c>io_lib:format/3</c></seealso>. + This value limits the total number of characters printed + for each log event. Notice that this is a soft limit. For a + hard truncation limit, see option <c>max_size</c>.</p> + <p>Defaults to <c>unlimited</c>.</p> + <note> + <p><c>chars_limit</c> has no effect on log messages on + string form. These are expected to be short, but can + still be truncated by the <c>max_size</c> + parameter.</p> + </note> + </item> + <tag><marker id="depth"/><c>depth = pos_integer() | unlimited</c></tag> + <item> + <p>A positive integer representing the maximum depth to + which terms shall be printed by this formatter. Format + strings passed to this formatter are rewritten. The + format controls ~p and ~w are replaced with ~P and ~W, + respectively, and the value is used as the depth + parameter. For details, see + <seealso marker="stdlib:io#format-2">io:format/2,3</seealso> + in STDLIB.</p> + <p>Defaults to <c>unlimited</c>.</p> + <note> + <p><c>depth</c> has no effect on log messages on string + form. These are expected to be short, but can still be + truncated by the <c>max_size</c> parameter.</p> + </note> + </item> + <tag><c>legacy_header = boolean()</c></tag> + <item> + <p>If set to <c>true</c> a header field is added to + logger_formatter's part of <c>Metadata</c>. The value of + this field is a string similar to the header created by + the old <c>error_logger</c> event handlers. It can be + included in the log event by adding the + tuple <c>{logger_formatter,header}</c> to the + template. See the description of + the <seealso marker="#type-template"><c>template()</c></seealso> + type for more information.</p> + <p>Defaults to <c>false</c>.</p> + </item> + <tag><c>max_size = pos_integer() | unlimited</c></tag> + <item> + <p>A positive integer representing the absolute maximum size a + string returned from this formatter can have. If the + formatted string is longer, after possibly being limited + by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p> + <p>Defaults to <c>unlimited</c>.</p> + </item> + <tag><c>report_cb = fun((</c><seealso marker="logger#type-report"><c>logger:report()</c></seealso><c>) -> {</c><seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso><c>,[term()]})</c></tag> + <item> + <p>A report callback is used by the formatter to transform + log messages on report form to a format string and + arguments. The report callback can be specified in the + metadata for the log event. If no report callback exist + in metadata, <c>logger_formatter</c> will + use <seealso marker="logger#format_report-1"> + <c>logger:format_report/1</c></seealso> as default + callback.</p> + <p>If this configuration parameter is set, it replaces + both the default report callback, and any report + callback found in metadata. That is, all reports are + converted by this configured function.</p> + <p>The value must be a function with arity 1, + returning <c>{Format,Args}</c>, and it will be called + with a report as only argument.</p> + </item> + <tag><c>single_line = boolean()</c></tag> + <item> + <p>If set to <c>true</c>, all newlines in the message are + replaced with <c>", "</c>, and whitespaces following + directly after newlines are removed. Note that newlines + added by the <c>template</c> parameter are not replaced.</p> + <p>Defaults to <c>true</c>.</p> + </item> + <tag><c>template = </c><seealso marker="#type-template"><c>template()</c></seealso></tag> + <item> + <p>The template describes how the formatted string is + composed by combining different data values from the log + event. See the description of + the <seealso marker="#type-template"><c>template()</c></seealso> + type for more information about this.</p> + </item> + <tag><c>time_designator = byte()</c></tag> + <item> + <p>Timestamps are formatted according to RFC3339, and the + time designator is the character used as date and time + separator.</p> + <p>Defaults to <c>$T</c>.</p> + <p>The value of this parameter is used as + the <c>time_designator</c> option + to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2"> + <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p> + </item> + <tag><c>time_offset = integer() | [byte()]</c></tag> + <item> + <p>The time offset, either a string or an integer, to be + used when formatting the timestamp.</p> + <p>An empty string is interpreted as local time. The + values <c>"Z"</c>, <c>"z"</c> or <c>0</c> are + interpreted as Universal Coordinated Time (UTC).</p> + <p>Strings, other than <c>"Z"</c>, <c>"z"</c>, + or <c>""</c>, must be on the form <c>±[hh]:[mm]</c>, for + example <c>"-02:00"</c> or <c>"+00:00"</c>.</p> + <p>Integers must be in microseconds, meaning that the + offset <c>7200000000</c> is equivalent + to <c>"+02:00"</c>.</p> + <p>Defaults to an empty string, meaning that timestamps + are displayed in local time. However, for backwards + compatibility, if the SASL configuration + parameter <seealso marker="sasl:sasl_app#utc_log"> + <c>utc_log</c></seealso><c>=true</c>, the default is + changed to <c>"Z"</c>, meaning that timestamps are displayed + in UTC.</p> + <p>The value of this parameter is used as + the <c>offset</c> option + to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2"> + <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p> + </item> + </taglist> + </desc> + </datatype> + <datatype> + <name name="template"/> + <desc> <p>The template is a list of atoms, tuples and strings. The atoms <c>level</c> or <c>msg</c>, are treated as placeholders for the severity level and the log message, - repectively. Other atoms or tuples are interpreted as + respectively. Other atoms or tuples are interpreted as placeholders for metadata, where atoms are expected to match top level keys, and tuples represent paths to sub keys when the metadata is a nested map. For example the @@ -152,78 +207,85 @@ <p>Strings in the template are printed literally.</p> <p>The default template differs depending on the values of <c>legacy_header</c> - and <c>single_line</c>. See <seealso marker="#default_templates">Default - Templates</seealso> for more information</p> - </item> - <tag><c>utc = boolean()</c></tag> - <item> - <p>If set to <c>true</c>, all dates are displayed in Universal - Coordinated Time.</p> - <p>Default is <c>false</c>.</p> - </item> - </taglist> - </section> - - <section> - <marker id="default_templates"/> - <title>Default templates</title> - - <p>The default value for the <c>template</c> configuration - parameter depends on the value of <c>single_line</c> - and <c>legacy_header</c> as follows.</p> - - <p>The log event used in the examples is:</p> - <code> -?LOG_ERROR("name: ~p~nexit_reason: ~p",[my_reg_name,"It crashed"])</code> - - <taglist> - <tag><c>legacy_header=true</c></tag> - <item> - <p>Default template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p> - - <p>Example log entry:</p> + and <c>single_line</c>:</p> + + <p>The default value for the <c>template</c> configuration + parameter depends on the value of the <c>single_line</c> + and <c>legacy_header</c> configuration parameters as + follows.</p> + + <p>The log event used in the examples is:</p> <code> -=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 === -name: my_reg_name +?LOG_ERROR("name: ~p~nexit_reason: ~p",[my_name,"It crashed"])</code> + + <taglist> + <tag><c>legacy_header=true, single_line=false</c></tag> + <item> + <p>Default + template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p> + + <p>Example log entry:</p> + <code type="none"> +=ERROR REPORT==== 17-May-2018::18:30:19.453447 === +name: my_name exit_reason: "It crashed"</code> - <p>Notice that all eight levels might occur in the heading, - not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as the - old <c>error_logger</c> produced. And microseconds are - added at the end of the timestamp.</p> - </item> + <p>Notice that all eight levels can occur in the heading, + not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as the + old <c>error_logger</c> produced. And microseconds are + added at the end of the timestamp.</p> + </item> - <tag><c>single_line=true</c></tag> - <item> - <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p> + <tag><c>legacy_header=true, single_line=true</c></tag> + <item> + <p>Default + template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p> - <p>Example log entry:</p> - <code>2017-12-29 13:31:49.640317 error: name: my_reg_name, exit_reason: "It crashed"</code> - </item> + <p>Notice that the template is here the same as + for <c>single_line=false</c>, but the resulting log entry + differs in that there is only one line after the + heading:</p> + <code type="none"> +=ERROR REPORT==== 17-May-2018::18:31:06.952665 === +name: my_name, exit_reason: "It crashed"</code> + </item> - <tag><c>legacy_header=false, single_line=false</c></tag> - <item> - <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p> + <tag><c>legacy_header=false, single_line=true</c></tag> + <item> + <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p> - <p>Example log entry:</p> - <code> -2017-12-29 13:32:25.191925 error: -name: my_reg_name -exit_reason: "It crashed"</code> - </item> - </taglist> - </section> + <p>Example log entry:</p> + <code type="none"> +2018-05-17T18:31:31.152864+02:00 error: name: my_name, exit_reason: "It crashed"</code> + </item> - <datatypes> - <datatype> - <name name="template"/> - <desc> + <tag><c>legacy_header=false, single_line=false</c></tag> + <item> + <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p> + + <p>Example log entry:</p> + <code type="none"> +2018-05-17T18:32:20.105422+02:00 error: +name: my_name +exit_reason: "It crashed"</code> + </item> + </taglist> </desc> </datatype> </datatypes> <funcs> <func> + <name name="check_config" arity="1"/> + <fsummary>Validates the given formatter configuration.</fsummary> + <desc> + <p>This callback function is called by Logger when the + formatter configuration for a handler is set or modified. It + returns <c>ok</c> if the configuration is valid, + and <c>{error,term()}</c> if it is faulty.</p> + </desc> + </func> + <func> <name name="format" arity="2"/> <fsummary>Formats the given message.</fsummary> <desc> @@ -245,7 +307,6 @@ exit_reason: "It crashed"</code> </list> </desc> </func> - </funcs> </erlref> diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml index fe9b9ca5a9..a4f2848037 100644 --- a/lib/kernel/doc/src/logger_std_h.xml +++ b/lib/kernel/doc/src/logger_std_h.xml @@ -33,17 +33,17 @@ <file>logger_std_h.xml</file> </header> <module>logger_std_h</module> - <modulesummary>Default handler for the Logger application.</modulesummary> + <modulesummary>Default handler for Logger.</modulesummary> <description> - <p>This is the default handler for the Logger - application. Multiple instances of this handler can be added to - logger, and each instance will print logs to <c>standard_io</c>, + <p>This is the default handler for Logger. + Multiple instances of this handler can be added to + Logger, and each instance will print logs to <c>standard_io</c>, <c>standard_error</c> or to file. The default instance that starts - with kernel is named <c>logger_std_h</c> - which is the name to be used + with Kernel is named <c>default</c> - which is the name to be used for reconfiguration.</p> <p>The handler has an overload protection mechanism that will keep the handler - process and the kernel application alive during a high load of log + process and the Kernel application alive during a high load of log requests. How this feature works, and how to modify the configuration, is described in the <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> @@ -57,7 +57,7 @@ are stored in a sub map with the key <c>logger_std_h</c>. The following keys and values may be specified:</p> <taglist> - <tag><c>type</c></tag> + <tag><marker id="type"/><c>type</c></tag> <item> <p>This will have the value <c>standard_io</c>, <c>standard_error</c>, <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>, @@ -104,12 +104,11 @@ logger:add_handler(my_standard_h, logger_std_h, filesync_repeat_interval => 1000}}). </code> <p>In order to configure the default handler (that starts initially with - the kernel application) to log to file instead of <c>standard_io</c>, - use the kernel configuration parameter - <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with - value <c>{file,FileName}</c>. Example:</p> + the Kernel application) to log to file instead of <c>standard_io</c>, + change the Kernel default logger to use a file. Example:</p> <code type="none"> -erl -kernel logger_dest '{file,"./erl.log"}' +erl -kernel logger '[{handler,default,logger_std_h, + #{ logger_std_h => #{ type => {file,"./log.log"}}}}]' </code> <p>An example of how to replace the standard handler with a disk_log handler at startup can be found in the manual of @@ -128,6 +127,11 @@ erl -kernel logger_dest '{file,"./erl.log"}' </funcs> + <section> + <title>See Also</title> + <p><seealso marker="logger"><c>logger(3)</c></seealso></p> + <p><seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso></p> + </section> </erlref> diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml index a633ae4832..b6c2714664 100644 --- a/lib/kernel/doc/src/ref_man.xml +++ b/lib/kernel/doc/src/ref_man.xml @@ -32,9 +32,11 @@ </description> <xi:include href="kernel_app.xml"/> + <xi:include href="app.xml"/> <xi:include href="application.xml"/> <xi:include href="auth.xml"/> <xi:include href="code.xml"/> + <xi:include href="config.xml"/> <xi:include href="disk_log.xml"/> <xi:include href="erl_boot_server.xml"/> <xi:include href="erl_ddll.xml"/> @@ -67,6 +69,4 @@ <xi:include href="user.xml"/> <xi:include href="wrap_log_reader.xml"/> <xi:include href="zlib_stub.xml"/> - <xi:include href="app.xml"/> - <xi:include href="config.xml"/> </application> diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile index eeb8c6ab2f..f265fdd272 100644 --- a/lib/kernel/src/Makefile +++ b/lib/kernel/src/Makefile @@ -118,7 +118,7 @@ MODULES = \ logger_filters \ logger_formatter \ logger_server \ - logger_simple \ + logger_simple_h \ logger_sup \ net \ net_adm \ @@ -279,7 +279,7 @@ $(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../inclu $(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl $(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl $(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl -$(EBIN)/logger_simple.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_simple_h.beam: logger_internal.hrl ../include/logger.hrl $(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl $(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl $(EBIN)/net_kernel.beam: ../include/net_address.hrl diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl index bbfa2a995d..1a7677295b 100644 --- a/lib/kernel/src/code_server.erl +++ b/lib/kernel/src/code_server.erl @@ -1437,7 +1437,7 @@ error_msg(Format, Args) -> logger ! {log,error,Format,Args, #{pid=>self(), gl=>group_leader(), - time=>erlang:monotonic_time(microsecond), + time=>erlang:system_time(microsecond), error_logger=>#{tag=>error}}}, ok. @@ -1446,7 +1446,7 @@ info_msg(Format, Args) -> logger ! {log,info,Format,Args, #{pid=>self(), gl=>group_leader(), - time=>erlang:monotonic_time(microsecond), + time=>erlang:system_time(microsecond), error_logger=>#{tag=>info_msg}}}, ok. diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl index 47d0ca5ea3..b3957d0c7e 100644 --- a/lib/kernel/src/error_logger.erl +++ b/lib/kernel/src/error_logger.erl @@ -32,7 +32,7 @@ which_report_handlers/0]). %% logger callbacks --export([adding_handler/2, removing_handler/2, log/2]). +-export([adding_handler/1, removing_handler/1, log/2]). -export([get_format_depth/0, limit_term/1]). @@ -101,9 +101,9 @@ stop() -> %%%----------------------------------------------------------------- %%% Callbacks for logger --spec adding_handler(logger:handler_id(),logger:config()) -> +-spec adding_handler(logger:config()) -> {ok,logger:config()} | {error,term()}. -adding_handler(?MODULE,Config) -> +adding_handler(#{id:=?MODULE}=Config) -> case start() of ok -> {ok,Config}; @@ -111,12 +111,12 @@ adding_handler(?MODULE,Config) -> Error end. --spec removing_handler(logger:handler_id(),logger:config()) -> ok. -removing_handler(?MODULE,_Config) -> +-spec removing_handler(logger:config()) -> ok. +removing_handler(#{id:=?MODULE}) -> stop(), ok. --spec log(logger:log(),logger:config()) -> ok. +-spec log(logger:log_event(),logger:config()) -> ok. log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) -> do_log(Level,Msg,Meta). @@ -529,18 +529,38 @@ logfile(filename) -> Flag :: boolean(). tty(true) -> - case lists:member(error_logger_tty_h, which_report_handlers()) of - false -> - add_report_handler(error_logger_tty_h, []); - true -> - ignore - end, + _ = case lists:member(error_logger_tty_h, which_report_handlers()) of + false -> + case logger:get_handler_config(default) of + {ok,{logger_std_h,#{logger_std_h:=#{type:=standard_io}}}} -> + logger:remove_handler_filter(default, + error_logger_tty_false); + _ -> + logger:add_handler(error_logger_tty_true,logger_std_h, + #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS( + [beam,erlang,otp]), + formatter=>{?DEFAULT_FORMATTER, + ?DEFAULT_FORMAT_CONFIG}, + logger_std_h=>#{type=>standard_io}}) + end; + true -> + ok + end, ok; tty(false) -> - delete_report_handler(error_logger_tty_h). + delete_report_handler(error_logger_tty_h), + _ = logger:remove_handler(error_logger_tty_true), + _ = case logger:get_handler_config(default) of + {ok,{logger_std_h,#{logger_std_h:=#{type:=standard_io}}}} -> + logger:add_handler_filter(default,error_logger_tty_false, + {fun(_,_) -> stop end, ok}); + _ -> + ok + end, + ok. %%%----------------------------------------------------------------- - -spec limit_term(term()) -> term(). limit_term(Term) -> @@ -552,4 +572,9 @@ limit_term(Term) -> -spec get_format_depth() -> 'unlimited' | pos_integer(). get_format_depth() -> - logger:get_format_depth(). + case application:get_env(kernel, error_logger_format_depth) of + {ok, Depth} when is_integer(Depth) -> + max(10, Depth); + undefined -> + unlimited + end. diff --git a/lib/kernel/src/file.erl b/lib/kernel/src/file.erl index 57d8fc7a15..1d4e37196c 100644 --- a/lib/kernel/src/file.erl +++ b/lib/kernel/src/file.erl @@ -69,7 +69,7 @@ %% Types that can be used from other modules -- alphabetically ordered. -export_type([date_time/0, fd/0, file_info/0, filename/0, filename_all/0, - io_device/0, name/0, name_all/0, posix/0]). + io_device/0, mode/0, name/0, name_all/0, posix/0]). %%% Includes and defines -include("file_int.hrl"). diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl index a6aa0edd15..3526df3600 100644 --- a/lib/kernel/src/gen_sctp.erl +++ b/lib/kernel/src/gen_sctp.erl @@ -118,6 +118,8 @@ open() -> | inet:address_family() | {port,Port} | {type,SockType} + | {netns, file:filename_all()} + | {bind_to_device, binary()} | option(), IP :: inet:ip_address() | any | loopback, Port :: inet:port_number(), diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl index ac61dbc792..253c63528f 100644 --- a/lib/kernel/src/gen_tcp.erl +++ b/lib/kernel/src/gen_tcp.erl @@ -102,6 +102,8 @@ inet:address_family() | {port, inet:port_number()} | {tcp_module, module()} | + {netns, file:filename_all()} | + {bind_to_device, binary()} | option(). -type listen_option() :: {ip, inet:socket_address()} | @@ -111,6 +113,8 @@ {port, inet:port_number()} | {backlog, B :: non_neg_integer()} | {tcp_module, module()} | + {netns, file:filename_all()} | + {bind_to_device, binary()} | option(). -type socket() :: port(). diff --git a/lib/kernel/src/gen_udp.erl b/lib/kernel/src/gen_udp.erl index 3121544719..9ab58011ec 100644 --- a/lib/kernel/src/gen_udp.erl +++ b/lib/kernel/src/gen_udp.erl @@ -97,6 +97,8 @@ open(Port) -> | {ifaddr, inet:socket_address()} | inet:address_family() | {port, inet:port_number()} + | {netns, file:filename_all()} + | {bind_to_device, binary()} | option(), Socket :: socket(), Reason :: inet:posix(). diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src index afffcd156e..d873178f55 100644 --- a/lib/kernel/src/kernel.app.src +++ b/lib/kernel/src/kernel.app.src @@ -68,7 +68,7 @@ logger_formatter, logger_h_common, logger_server, - logger_simple, + logger_simple_h, logger_std_h, logger_sup, net, @@ -140,7 +140,10 @@ inet_db, pg2]}, {applications, []}, - {env, []}, + {env, [{logger_level, info}, + {logger_sasl_compatible, false}, + {logger_progress_reports, stop} + ]}, {mod, {kernel, []}}, {runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]} ] diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl index ae982c1741..b0e8c00bbf 100644 --- a/lib/kernel/src/kernel.erl +++ b/lib/kernel/src/kernel.erl @@ -30,17 +30,13 @@ %%% Callback functions for the kernel application. %%%----------------------------------------------------------------- start(_, []) -> + %% Setup the logger and configure the kernel logger environment + ok = logger:internal_init_logger(), case supervisor:start_link({local, kernel_sup}, kernel, []) of {ok, Pid} -> ok = erl_signal_handler:start(), - %% add error handler - case logger:setup_standard_handler() of - ok -> {ok, Pid, []}; - Error -> - %% Not necessary since the node will crash anyway: - exit(Pid, shutdown), - Error - end; + ok = logger:add_handlers(kernel), + {ok, Pid, []}; Error -> Error end. @@ -147,7 +143,7 @@ init([]) -> case init:get_argument(mode) of {ok, [["minimal"]]} -> {ok, {SupFlags, - [Code, File, StdError, User, Config, RefC, SafeSup, LoggerSup]}}; + [Code, File, StdError, User, LoggerSup, Config, RefC, SafeSup]}}; _ -> Rpc = #{id => rex, start => {rpc, start_link, []}, diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl index 98a9937111..3beb3102fa 100644 --- a/lib/kernel/src/logger.erl +++ b/lib/kernel/src/logger.erl @@ -37,18 +37,22 @@ -export([add_handler/3, remove_handler/1, add_logger_filter/2, add_handler_filter/3, remove_logger_filter/1, remove_handler_filter/2, - set_module_level/2, reset_module_level/1, + set_module_level/2, unset_module_level/1, set_logger_config/1, set_logger_config/2, set_handler_config/2, set_handler_config/3, - get_logger_config/0, get_handler_config/1]). + update_logger_config/1, update_handler_config/2, + update_formatter_config/2, update_formatter_config/3, + get_logger_config/0, get_handler_config/1, + add_handlers/1]). + +%% Private configuration +-export([internal_init_logger/0]). %% Misc -export([compare_levels/2]). -export([set_process_metadata/1, update_process_metadata/1, unset_process_metadata/0, get_process_metadata/0]). -export([i/0, i/1]). --export([setup_standard_handler/0, replace_simple_handler/3]). --export([limit_term/1, get_format_depth/0, get_max_size/0, get_utc_config/0]). %% Basic report formatting -export([format_report/1, format_otp_report/1]). @@ -60,11 +64,11 @@ %%%----------------------------------------------------------------- %%% Types --type log() :: #{level:=level(), - msg:={io:format(),[term()]} | - {report,report()} | - {string,unicode:chardata()}, - meta:=metadata()}. +-type log_event() :: #{level:=level(), + msg:={io:format(),[term()]} | + {report,report()} | + {string,unicode:chardata()}, + meta:=metadata()}. -type level() :: emergency | alert | critical | error | warning | notice | info | debug. -type report() :: map() | [{atom(),term()}]. @@ -77,24 +81,32 @@ mfa => {module(),atom(),non_neg_integer()}, file => file:filename(), line => non_neg_integer(), - term() => term()}. + domain => [atom()], + report_cb => fun((report()) -> {io:format(),[term()]}), + atom() => term()}. -type location() :: #{mfa := {module(),atom(),non_neg_integer()}, file := file:filename(), line := non_neg_integer()}. -type handler_id() :: atom(). -type filter_id() :: atom(). --type filter() :: {fun((log(),filter_arg()) -> filter_return()),filter_arg()}. +-type filter() :: {fun((log_event(),filter_arg()) -> + filter_return()),filter_arg()}. -type filter_arg() :: term(). --type filter_return() :: stop | ignore | log(). --type config() :: #{level => level(), +-type filter_return() :: stop | ignore | log_event(). +-type config() :: #{id => handler_id(), + level => level(), filter_default => log | stop, filters => [{filter_id(),filter()}], - formatter => {module(),term()}, - term() => term()}. + formatter => {module(),formatter_config()}, + atom() => term()}. -type timestamp() :: integer(). +-type formatter_config() :: #{atom() => term()}. + +-type config_handler() :: {handler, handler_id(), module(), config()}. --export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0, - filter_id/0,filter/0,filter_arg/0,filter_return/0]). +-export_type([log_event/0,level/0,report/0,msg_fun/0,metadata/0,config/0, + handler_id/0,filter_id/0,filter/0,filter_arg/0,filter_return/0, + config_handler/0,formatter_config/0]). %%%----------------------------------------------------------------- %%% API @@ -357,10 +369,22 @@ set_handler_config(HandlerId,Key,Value) -> set_handler_config(HandlerId,Config) -> logger_server:set_config(HandlerId,Config). +-spec update_logger_config(Config) -> ok | {error,term()} when + Config :: config(). +update_logger_config(Config) -> + logger_server:update_config(logger,Config). + +-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when + HandlerId :: handler_id(), + Config :: config(). +update_handler_config(HandlerId,Config) -> + logger_server:update_config(HandlerId,Config). + -spec get_logger_config() -> {ok,Config} when Config :: config(). get_logger_config() -> - logger_config:get(?LOGGER_TABLE,logger). + {ok,Config} = logger_config:get(?LOGGER_TABLE,logger), + {ok,maps:remove(handlers,Config)}. -spec get_handler_config(HandlerId) -> {ok,{Module,Config}} | {error,term()} when HandlerId :: handler_id(), @@ -369,16 +393,31 @@ get_logger_config() -> get_handler_config(HandlerId) -> logger_config:get(?LOGGER_TABLE,HandlerId). +-spec update_formatter_config(HandlerId,FormatterConfig) -> + ok | {error,term()} when + HandlerId :: config(), + FormatterConfig :: formatter_config(). +update_formatter_config(HandlerId,FormatterConfig) -> + logger_server:update_formatter_config(HandlerId,FormatterConfig). + +-spec update_formatter_config(HandlerId,Key,Value) -> + ok | {error,term()} when + HandlerId :: config(), + Key :: atom(), + Value :: term(). +update_formatter_config(HandlerId,Key,Value) -> + logger_server:update_formatter_config(HandlerId,#{Key=>Value}). + -spec set_module_level(Module,Level) -> ok | {error,term()} when Module :: module(), Level :: level(). set_module_level(Module,Level) -> logger_server:set_module_level(Module,Level). --spec reset_module_level(Module) -> ok | {error,term()} when +-spec unset_module_level(Module) -> ok | {error,term()} when Module :: module(). -reset_module_level(Module) -> - logger_server:reset_module_level(Module). +unset_module_level(Module) -> + logger_server:unset_module_level(Module). %%%----------------------------------------------------------------- %%% Misc @@ -441,8 +480,9 @@ i() -> i(_Action = print) -> io:put_chars(i(string)); i(_Action = string) -> - #{logger := #{level := Level, handlers := Handlers, - filters := Filters, filter_default := FilterDefault}, + #{logger := #{level := Level, + filters := Filters, + filter_default := FilterDefault}, handlers := HandlerConfigs, module_levels := Modules} = i(term), [io_lib:format("Current logger configuration:~n", []), @@ -451,16 +491,15 @@ i(_Action = string) -> io_lib:format(" Filters: ~n", []), print_filters(4, Filters), io_lib:format(" Handlers: ~n", []), - print_handlers([C || {Id, _, _} = C <- HandlerConfigs, - lists:member(Id, Handlers)]), + print_handlers(HandlerConfigs), io_lib:format(" Level set per module: ~n", []), print_module_levels(Modules) ]; i(_Action = term) -> {Logger, Handlers, Modules} = logger_config:get(tid()), - #{logger=>Logger, - handlers=>Handlers, - module_levels=>Modules}. + #{logger=>maps:remove(handlers,Logger), + handlers=>lists:keysort(1,Handlers), + module_levels=>lists:keysort(1,Modules)}. print_filters(Indent, {Id, {Fun, Config}}) -> io_lib:format("~sId: ~p~n" @@ -504,204 +543,209 @@ print_module_levels({Module,Level}) -> print_module_levels(ModuleLevels) -> lists:map(fun print_module_levels/1, ModuleLevels). --spec setup_standard_handler() -> ok | {error,term()}. -setup_standard_handler() -> - case get_logger_type() of - {ok,silent} -> - Level = get_logger_level(), - ok = set_logger_config(level,Level), - remove_handler(logger_simple); - {ok,Type} -> - Level = get_logger_level(), - ok = set_logger_config(level,Level), - Filters = get_logger_filters(), - setup_standard_handler(Type,#{level=>Level, - filter_default=>stop, - filters=>Filters}); - Error -> - Error +-spec internal_init_logger() -> ok | {error,term()}. +%% This function is responsible for config of the logger +%% This is done before add_handlers because we want the +%% logger settings to take effect before the kernel supervisor +%% tree is started. +internal_init_logger() -> + try + ok = logger:set_logger_config(level, get_logger_level()), + ok = logger:set_logger_config(filter_default, get_logger_filter_default()), + + [case logger:add_logger_filter(Id, Filter) of + ok -> ok; + {error, Reason} -> throw(Reason) + end || {Id, Filter} <- get_logger_filters()], + + _ = [[case logger:set_module_level(Module, Level) of + ok -> ok; + {error, Reason} -> throw(Reason) + end || Module <- Modules] + || {module_level, Level, Modules} <- get_logger_env()], + + case logger:set_handler_config(simple,filters, + get_default_handler_filters()) of + ok -> ok; + {error,{not_found,simple}} -> ok + end, + + init_kernel_handlers() + catch throw:Reason -> + ?LOG_ERROR("Invalid logger config: ~p", [Reason]), + {error, {bad_config, {kernel, Reason}}} end. --spec setup_standard_handler(Type,Config) -> ok | {error,term()} when - Type :: tty | standard_io | standard_error | {file,File} | - {file,File,Modes} | {disk_log,LogOpts} | false, - File :: file:filename(), - Modes :: [term()], % [file:mode()], or more specific? - Config :: config(), - LogOpts :: map(). -setup_standard_handler(false,#{level:=Level,filters:=Filters}) -> - case set_handler_config(logger_simple,level,Level) of - ok -> - set_handler_config(logger_simple,filters,Filters); - Error -> - Error - end; -setup_standard_handler(Type,Config) -> - {Module,TypeConfig} = get_type_config(Type), - replace_simple_handler(?STANDARD_HANDLER, - Module, - maps:merge(Config,TypeConfig)). - --spec replace_simple_handler(Id,Module,Config) -> ok | {error,term()} when - Id :: handler_id(), - Module :: module(), - Config :: config(). -replace_simple_handler(Id,Module,Config) -> - _ = code:ensure_loaded(Module), - DoBuffer = erlang:function_exported(Module,swap_buffer,2), - case add_handler(Id,Module,Config#{wait_for_buffer=>DoBuffer}) of - ok -> - if DoBuffer -> - {ok,Buffered} = logger_simple:get_buffer(), - _ = remove_handler(logger_simple), - Module:swap_buffer(?STANDARD_HANDLER,Buffered); - true -> - _ = remove_handler(logger_simple), - ok - end, - ok; - Error -> - Error +-spec init_kernel_handlers() -> ok | {error,term()}. +%% Setup the kernel environment variables to be correct +%% The actual handlers are started by a call to add_handlers. +init_kernel_handlers() -> + try + case get_logger_type() of + {ok,silent} -> + ok = logger:remove_handler(simple); + {ok,false} -> + ok; + {ok,Type} -> + init_default_config(Type) + end + catch throw:Reason -> + ?LOG_ERROR("Invalid default handler config: ~p", [Reason]), + {error, {bad_config, {kernel, Reason}}} + end. + +-spec add_handlers(Application) -> ok | {error,term()} when + Application :: atom(); + (HandlerConfig) -> ok | {error,term()} when + HandlerConfig :: [config_handler()]. +%% This function is responsible for resolving the handler config +%% and then starting the correct handlers. This is done after the +%% kernel supervisor tree has been started as it needs the logger_sup. +add_handlers(App) when is_atom(App) -> + add_handlers(application:get_env(App, logger, [])); +add_handlers(HandlerConfig) -> + try + check_logger_config(HandlerConfig), + DefaultAdded = + lists:foldl( + fun({handler, default = Id, Module, Config}, _) + when not is_map_key(filters, Config) -> + %% The default handler should have a couple of extra filters + %% set on it by default. + DefConfig = #{ filter_default => stop, + filters => get_default_handler_filters()}, + setup_handler(Id, Module, maps:merge(DefConfig,Config)), + true; + ({handler, Id, Module, Config}, Default) -> + setup_handler(Id, Module, Config), + Default orelse Id == default; + (_, Default) -> Default + end, false, HandlerConfig), + %% If a default handler was added we try to remove the simple_logger + %% If the simple logger exists it will replay its log events + %% to the handler(s) added in the fold above. + _ = [case logger:remove_handler(simple) of + ok -> ok; + {error,{not_found,simple}} -> ok + end || DefaultAdded], + ok + catch throw:Reason -> + ?LOG_ERROR("Invalid logger handler config: ~p", [Reason]), + {error, {bad_config, {handler, Reason}}} end. +setup_handler(Id, Module, Config) -> + case logger:add_handler(Id, Module, Config) of + ok -> ok; + {error, Reason} -> throw(Reason) + end. + +check_logger_config(_) -> + ok. + +-spec get_logger_type() -> {ok, standard_io | false | silent | + {file, file:name_all()} | + {file, file:name_all(), [file:mode()]}}. get_logger_type() -> - Type0 = - case application:get_env(kernel, logger_dest) of - undefined -> - application:get_env(kernel, error_logger); - T -> - T - end, - case Type0 of + case application:get_env(kernel, error_logger) of {ok, tty} -> - {ok, tty}; + {ok, standard_io}; {ok, {file, File}} when is_list(File) -> {ok, {file, File}}; {ok, {file, File, Modes}} when is_list(File), is_list(Modes) -> {ok, {file, File, Modes}}; - {ok, {disk_log, File}} when is_list(File) -> - {ok, {disk_log, get_disk_log_config(File)}}; {ok, false} -> {ok, false}; {ok, silent} -> {ok, silent}; undefined -> - {ok, tty}; % default value + case lists:member({handler,default,undefined}, get_logger_env()) of + true -> + {ok, false}; + false -> + {ok, standard_io} % default value + end; {ok, Bad} -> - {error,{bad_config, {kernel, {logger_dest, Bad}}}} + throw({error_logger, Bad}) end. -get_disk_log_config(File) -> - Config1 = - case application:get_env(kernel,logger_disk_log_maxfiles) of - undefined -> #{}; - {ok,MF} -> #{max_no_files=>MF} - end, - Config2 = - case application:get_env(kernel,logger_disk_log_maxbytes) of - undefined -> Config1; - {ok,MB} -> Config1#{max_no_bytes=>MB} - end, - Config3 = - case application:get_env(kernel,logger_disk_log_type) of - undefined -> Config2; - {ok,T} -> Config1#{type=>T} - end, - Config3#{file=>File}. - get_logger_level() -> - case application:get_env(kernel,logger_level) of - undefined -> info; - {ok,Level} when ?IS_LEVEL(Level) -> Level + case application:get_env(kernel,logger_level,info) of + Level when ?IS_LEVEL(Level) -> + Level; + Level -> + throw({logger_level, Level}) + end. + +get_logger_filter_default() -> + case lists:keyfind(filters,1,get_logger_env()) of + {filters,Default,_} -> + Default; + false -> + log end. get_logger_filters() -> + lists:foldl( + fun({filters, _, Filters}, _Acc) -> + Filters; + (_, Acc) -> + Acc + end, [], get_logger_env()). + +%% This function looks at the kernel logger environment +%% and updates it so that the correct logger is configured +init_default_config(Type) when Type==standard_io; + Type==standard_error; + element(1,Type)==file -> + Env = get_logger_env(), + DefaultFormatter = #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}, + DefaultConfig = DefaultFormatter#{logger_std_h=>#{type=>Type}}, + NewLoggerEnv = + case lists:keyfind(default, 2, Env) of + {handler, default, Module, Config} -> + lists:map( + fun({handler, default, logger_std_h, _}) -> + %% Only want to add the logger_std_h config + %% if not configured by user AND the default + %% handler is still the logger_std_h. + {handler, default, Module, maps:merge(DefaultConfig,Config)}; + ({handler, default, logger_disk_log_h, _}) -> + %% Add default formatter. The point of this + %% is to get the expected formatter config + %% for the default handler, since this + %% differs from the default values that + %% logger_formatter itself adds. + {handler, default, logger_disk_log_h, maps:merge(DefaultFormatter,Config)}; + (Other) -> + Other + end, Env); + _ -> + %% Nothing has been configured, use default + [{handler, default, logger_std_h, DefaultConfig} | Env] + end, + application:set_env(kernel, logger, NewLoggerEnv, [{timeout,infinity}]); +init_default_config(Type) -> + throw({illegal_logger_type,Type}). + +get_default_handler_filters() -> case application:get_env(kernel, logger_sasl_compatible, false) of true -> ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp]); false -> Extra = - case application:get_env(kernel, logger_log_progress, false) of - true -> + case application:get_env(kernel, logger_progress_reports, stop) of + log -> []; - false -> + stop -> [{stop_progress, {fun logger_filters:progress/2,stop}}] end, Extra ++ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp,sasl]) end. -get_type_config({disk_log,LogOpts}) -> - {logger_disk_log_h,#{disk_log_opts=>LogOpts}}; -get_type_config(tty) -> - %% This is only for backwards compatibility with error_logger and - %% old kernel and sasl environment variables - get_type_config(standard_io); -get_type_config(Type) when Type==standard_io; - Type==standard_error; - element(1,Type)==file -> - {logger_std_h,#{logger_std_h=>#{type=>Type}}}; -get_type_config(Type) -> - {error,{illegal_logger_type,Type}}. - -%%%----------------------------------------------------------------- --spec limit_term(term()) -> term(). - -limit_term(Term) -> - try get_format_depth() of - unlimited -> Term; - D -> io_lib:limit_term(Term, D) - catch error:badarg -> - %% This could happen during system termination, after - %% application_controller process is dead. - unlimited - end. - --spec get_format_depth() -> 'unlimited' | pos_integer(). - -get_format_depth() -> - Depth = - case application:get_env(kernel, logger_format_depth) of - {ok, D} when is_integer(D) -> - D; - undefined -> - case application:get_env(kernel, error_logger_format_depth) of - {ok, D} when is_integer(D) -> - D; - undefined -> - unlimited - end - end, - max(10, Depth). - --spec get_max_size() -> 'unlimited' | pos_integer(). - -get_max_size() -> - case application:get_env(kernel, logger_max_size) of - {ok, Size} when is_integer(Size) -> - max(50, Size); - undefined -> - unlimited - end. - --spec get_utc_config() -> boolean(). - -get_utc_config() -> - %% Kernel's logger_utc configuration overrides SASL utc_log, which - %% in turn overrides stdlib config - in order to have uniform - %% timestamps in log messages - case application:get_env(kernel, logger_utc) of - {ok, Val} -> Val; - undefined -> - case application:get_env(sasl, utc_log) of - {ok, Val} -> Val; - undefined -> - case application:get_env(stdlib, utc_log) of - {ok, Val} -> Val; - undefined -> false - end - end - end. +get_logger_env() -> + application:get_env(kernel, logger, []). %%%----------------------------------------------------------------- %%% Internal @@ -819,7 +863,7 @@ proc_meta() -> default(pid) -> self(); default(gl) -> group_leader(); -default(time) -> erlang:monotonic_time(microsecond). +default(time) -> erlang:system_time(microsecond). %% Remove everything upto and including this module from the stacktrace filter_stacktrace(Module,[{Module,_,_,_}|_]) -> diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl index d9f5aa6faf..b3cf7d67dd 100644 --- a/lib/kernel/src/logger_backend.erl +++ b/lib/kernel/src/logger_backend.erl @@ -58,7 +58,7 @@ call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) -> debug, [{logger,removed_failing_handler}, {handler,{Id,Module}}, - {log,Log1}, + {log_event,Log1}, {config,Config1}, {reason,{C,R,filter_stacktrace(S)}}]); {error,{not_found,_}} -> @@ -122,7 +122,7 @@ handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) -> [{logger,removed_failing_filter}, {filter,Filter}, {owner,Owner}, - {log,Log}, + {log_event,Log}, {reason,Reason}]); _ -> ok diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl index 799aea9617..1d35c2e068 100644 --- a/lib/kernel/src/logger_config.erl +++ b/lib/kernel/src/logger_config.erl @@ -24,14 +24,14 @@ allow/2,allow/3, get/2, get/3, get/1, create/3, create/4, set/3, - set_module_level/3,reset_module_level/2, + set_module_level/3,unset_module_level/2, cache_module_level/2, level_to_int/1]). -include("logger_internal.hrl"). new(Name) -> - _ = ets:new(Name,[set,protected,named_table]), + _ = ets:new(Name,[set,protected,named_table,{write_concurrency,true}]), ets:whereis(Name). delete(Tid,Id) -> @@ -109,7 +109,7 @@ set_module_level(Tid,Module,Level) -> ets:insert(Tid,{Module,level_to_int(Level)}), ok. -reset_module_level(Tid,Module) -> +unset_module_level(Tid,Module) -> ets:delete(Tid,Module), % should possibley overwrite instead of delete? ok. diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl index 0150fa781a..773aa75bc6 100644 --- a/lib/kernel/src/logger_disk_log_h.erl +++ b/lib/kernel/src/logger_disk_log_h.erl @@ -34,8 +34,8 @@ %% logger callbacks -export([log/2, - adding_handler/2, removing_handler/2, - changing_config/3, swap_buffer/2]). + adding_handler/1, removing_handler/1, + changing_config/2, swap_buffer/2]). %%%=================================================================== %%% API @@ -108,8 +108,8 @@ reset(Name) -> %%%----------------------------------------------------------------- %%% Handler being added -adding_handler(Name, Config) -> - case check_config(adding, Name, Config) of +adding_handler(#{id:=Name}=Config) -> + case check_config(adding, Config) of {ok, Config1} -> %% create initial handler state by merging defaults with config HConfig = maps:get(?MODULE, Config1, #{}), @@ -136,10 +136,9 @@ adding_handler(Name, Config) -> %%%----------------------------------------------------------------- %%% Updating handler config -changing_config(Name, - OldConfig=#{id:=Id, disk_log_opts:=DLOpts}, - NewConfig=#{id:=Id, disk_log_opts:=DLOpts}) -> - case check_config(changing, Name, NewConfig) of +changing_config(OldConfig=#{id:=Name, disk_log_opts:=DLOpts}, + NewConfig=#{id:=Name, disk_log_opts:=DLOpts}) -> + case check_config(changing, NewConfig) of Result = {ok,NewConfig1} -> try gen_server:call(Name, {change_config,OldConfig,NewConfig1}, ?DEFAULT_CALL_TIMEOUT) of @@ -151,12 +150,10 @@ changing_config(Name, Error -> Error end; -changing_config(_Name, OldConfig, NewConfig) -> +changing_config(OldConfig, NewConfig) -> {error,{illegal_config_change,OldConfig,NewConfig}}. -check_config(adding, Name, Config0) -> - %% Merge in defaults on top level - Config = maps:merge(#{id => Name}, Config0), +check_config(adding, #{id:=Name}=Config) -> %% Merge in defaults on handler level LogOpts0 = maps:get(disk_log_opts, Config, #{}), LogOpts = merge_default_logopts(Name, LogOpts0), @@ -173,7 +170,7 @@ check_config(adding, Name, Config0) -> Error -> Error end; -check_config(changing, _Name, Config) -> +check_config(changing, Config) -> MyConfig = maps:get(?MODULE, Config, #{}), case check_my_config(maps:to_list(MyConfig)) of ok -> {ok,Config}; @@ -223,7 +220,7 @@ check_my_config([]) -> %%%----------------------------------------------------------------- %%% Handler being removed -removing_handler(Name, _Config) -> +removing_handler(#{id:=Name}) -> stop(Name). %%%----------------------------------------------------------------- @@ -238,15 +235,15 @@ swap_buffer(Name,Buffer) -> %%%----------------------------------------------------------------- %%% Log a string or report --spec log(Log, Config) -> ok | dropped when - Log :: logger:log(), +-spec log(LogEvent, Config) -> ok | dropped when + LogEvent :: logger:log_event(), Config :: logger:config(). -log(Log,Config=#{id:=Name}) -> +log(LogEvent,Config=#{id:=Name}) -> %% if the handler has crashed, we must drop this request %% and hope the handler restarts so we can try again true = is_pid(whereis(Name)), - Bin = logger_h_common:log_to_binary(Log,Config), + Bin = logger_h_common:log_to_binary(LogEvent,Config), logger_h_common:call_cast_or_drop(Name, Bin). @@ -278,10 +275,11 @@ init([Name, Config = #{disk_log_opts := LogOpts}, last_log_ts => T0, burst_win_ts => T0, burst_msg_count => 0, + last_op => sync, prev_log_result => ok, prev_sync_result => ok, prev_disk_log_info => undefined}), - gen_server:cast(self(), {repeated_disk_log_sync,T0}), + gen_server:cast(self(), repeated_disk_log_sync), enter_loop(Config, State1); Error -> logger_h_common:error_notify({open_disk_log,Name,Error}), @@ -316,8 +314,7 @@ handle_call(disk_log_sync, _From, State = #{id := Name}) -> {reply, Result, State1}; handle_call({change_config,_OldConfig,NewConfig}, _From, - State = #{filesync_repeat_interval := FSyncInt0, - last_log_ts := LastLogTS}) -> + State = #{filesync_repeat_interval := FSyncInt0}) -> HConfig = maps:get(?MODULE, NewConfig, #{}), State1 = #{toggle_sync_qlen := TSQL, drop_new_reqs_qlen := DNRQL, @@ -338,9 +335,8 @@ handle_call({change_config,_OldConfig,NewConfig}, _From, _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, undefined)), - _ = gen_server:cast(self(), {repeated_disk_log_sync, - LastLogTS}) - end, + _ = gen_server:cast(self(), repeated_disk_log_sync) + end, {reply, ok, State1}; false -> {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State} @@ -370,24 +366,23 @@ handle_cast({log, Bin}, State) -> %% clause gets called repeatedly by the handler. In order to %% guarantee that a filesync *always* happens after the last log %% request, the repeat operation must be active! -handle_cast({repeated_disk_log_sync,LastLogTS0}, +handle_cast(repeated_disk_log_sync, State = #{id := Name, filesync_repeat_interval := FSyncInt, - last_log_ts := LastLogTS1}) -> + last_op := LastOp}) -> State1 = if is_integer(FSyncInt) -> %% only do filesync if something has been %% written since last time we checked - NewState = if LastLogTS1 == LastLogTS0 -> + NewState = if LastOp == sync -> State; true -> disk_log_sync(Name, State) end, {ok,TRef} = timer:apply_after(FSyncInt, gen_server,cast, - [self(), - {repeated_disk_log_sync,LastLogTS1}]), - NewState#{rep_sync_tref => TRef}; + [self(),repeated_disk_log_sync]), + NewState#{rep_sync_tref => TRef, last_op => sync}; true -> State end, @@ -649,10 +644,9 @@ close_disk_log(Name, _) -> ok. disk_log_write(Name, Bin, State) -> - Result = case ?disk_log_blog(Name, Bin) of ok -> - ok; + State#{prev_log_result => ok, last_op => write}; LogError -> _ = case maps:get(prev_log_result, State) of LogError -> @@ -664,29 +658,26 @@ disk_log_write(Name, Bin, State) -> LogOpts, LogError}) end, - LogError - end, - State#{prev_log_result => Result}. + State#{prev_log_result => LogError} + end. disk_log_sync(Name, State) -> - Result = - case ?disk_log_sync(Name) of - ok -> - ok; - SyncError -> - _ = case maps:get(prev_sync_result, State) of - SyncError -> - %% don't report same error twice - ok; - _ -> - LogOpts = maps:get(log_opts, State), - logger_h_common:error_notify({Name,sync, - LogOpts, - SyncError}) - end, - SyncError - end, - State#{prev_sync_result => Result}. + case ?disk_log_sync(Name) of + ok -> + State#{prev_sync_result => ok, last_op => sync}; + SyncError -> + _ = case maps:get(prev_sync_result, State) of + SyncError -> + %% don't report same error twice + ok; + _ -> + LogOpts = maps:get(log_opts, State), + logger_h_common:error_notify({Name,sync, + LogOpts, + SyncError}) + end, + State#{prev_sync_result => SyncError} + end. error_notify_new(Info,Info, _Term) -> ok; diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl index 85928f0fd6..7359b3b4b7 100644 --- a/lib/kernel/src/logger_filters.erl +++ b/lib/kernel/src/logger_filters.erl @@ -27,30 +27,31 @@ -include("logger_internal.hrl"). -define(IS_ACTION(A), (A==log orelse A==stop)). --spec domain(Log,Extra) -> logger:filter_return() when - Log :: logger:log(), +-spec domain(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), Extra :: {Action,Compare,MatchDomain}, Action :: log | stop, - Compare :: prefix_of | starts_with | equals | no_domain, + Compare :: super | sub | equal | not_equal | undefined, MatchDomain :: list(atom()). -domain(#{meta:=Meta}=Log,{Action,Compare,MatchDomain}) +domain(#{meta:=Meta}=LogEvent,{Action,Compare,MatchDomain}) when ?IS_ACTION(Action) andalso - (Compare==prefix_of orelse - Compare==starts_with orelse - Compare==equals orelse - Compare==no_domain) andalso + (Compare==super orelse + Compare==sub orelse + Compare==equal orelse + Compare==not_equal orelse + Compare==undefined) andalso is_list(MatchDomain) -> - filter_domain(Compare,Meta,MatchDomain,on_match(Action,Log)); -domain(Log,Extra) -> - erlang:error(badarg,[Log,Extra]). + filter_domain(Compare,Meta,MatchDomain,on_match(Action,LogEvent)); +domain(LogEvent,Extra) -> + erlang:error(badarg,[LogEvent,Extra]). --spec level(Log,Extra) -> logger:filter_return() when - Log :: logger:log(), +-spec level(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), Extra :: {Action,Operator,MatchLevel}, Action :: log | stop, Operator :: neq | eq | lt | gt | lteq | gteq, MatchLevel :: logger:level(). -level(#{level:=L1}=Log,{Action,Op,L2}) +level(#{level:=L1}=LogEvent,{Action,Op,L2}) when ?IS_ACTION(Action) andalso (Op==neq orelse Op==eq orelse @@ -59,37 +60,40 @@ level(#{level:=L1}=Log,{Action,Op,L2}) Op==lteq orelse Op==gteq) andalso ?IS_LEVEL(L2) -> - filter_level(Op,L1,L2,on_match(Action,Log)); -level(Log,Extra) -> - erlang:error(badarg,[Log,Extra]). + filter_level(Op,L1,L2,on_match(Action,LogEvent)); +level(LogEvent,Extra) -> + erlang:error(badarg,[LogEvent,Extra]). --spec progress(Log,Extra) -> logger:filter_return() when - Log :: logger:log(), +-spec progress(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), Extra :: log | stop. -progress(Log,Action) when ?IS_ACTION(Action) -> - filter_progress(Log,on_match(Action,Log)); -progress(Log,Action) -> - erlang:error(badarg,[Log,Action]). +progress(LogEvent,Action) when ?IS_ACTION(Action) -> + filter_progress(LogEvent,on_match(Action,LogEvent)); +progress(LogEvent,Action) -> + erlang:error(badarg,[LogEvent,Action]). --spec remote_gl(Log,Extra) -> logger:filter_return() when - Log :: logger:log(), +-spec remote_gl(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), Extra :: log | stop. -remote_gl(Log,Action) when ?IS_ACTION(Action) -> - filter_remote_gl(Log,on_match(Action,Log)); -remote_gl(Log,Action) -> - erlang:error(badarg,[Log,Action]). +remote_gl(LogEvent,Action) when ?IS_ACTION(Action) -> + filter_remote_gl(LogEvent,on_match(Action,LogEvent)); +remote_gl(LogEvent,Action) -> + erlang:error(badarg,[LogEvent,Action]). %%%----------------------------------------------------------------- %%% Internal -filter_domain(prefix_of,#{domain:=Domain},MatchDomain,OnMatch) -> +filter_domain(super,#{domain:=Domain},MatchDomain,OnMatch) -> is_prefix(Domain,MatchDomain,OnMatch); -filter_domain(starts_with,#{domain:=Domain},MatchDomain,OnMatch) -> +filter_domain(sub,#{domain:=Domain},MatchDomain,OnMatch) -> is_prefix(MatchDomain,Domain,OnMatch); -filter_domain(equals,#{domain:=Domain},Domain,OnMatch) -> +filter_domain(equal,#{domain:=Domain},Domain,OnMatch) -> OnMatch; -filter_domain(Action,Meta,_,OnMatch) -> +filter_domain(not_equal,#{domain:=Domain},MatchDomain,OnMatch) + when Domain=/=MatchDomain -> + OnMatch; +filter_domain(Compare,Meta,_,OnMatch) -> case maps:is_key(domain,Meta) of - false when Action==no_domain -> OnMatch; + false when Compare==undefined; Compare==not_equal -> OnMatch; _ -> ignore end. @@ -119,5 +123,5 @@ filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() -> filter_remote_gl(_,_) -> ignore. -on_match(log,Log) -> Log; +on_match(log,LogEvent) -> LogEvent; on_match(stop,_) -> stop. diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl index 8e954f8d98..4d727b3da0 100644 --- a/lib/kernel/src/logger_formatter.erl +++ b/lib/kernel/src/logger_formatter.erl @@ -20,25 +20,28 @@ -module(logger_formatter). -export([format/2]). +-export([check_config/1]). -include("logger_internal.hrl"). %%%----------------------------------------------------------------- %%% Types +-type config() :: #{chars_limit=>pos_integer()| unlimited, + depth=>pos_integer() | unlimited, + legacy_header=>boolean(), + max_size=>pos_integer() | unlimited, + report_cb=>fun((logger:report()) -> {io:format(),[term()]}), + single_line=>boolean(), + template=>template(), + time_designator=>byte(), + time_offset=>integer()|[byte()]}. -type template() :: [atom()|tuple()|string()]. %%%----------------------------------------------------------------- %%% API --spec format(Log,Config) -> unicode:chardata() when - Log :: logger:log(), - Config :: #{single_line=>boolean(), - legacy_header=>boolean(), - report_cb=>fun((logger:report()) -> {io:format(),[term()]}), - chars_limit=>pos_integer()| unlimited, - max_size=>pos_integer() | unlimited, - depth=>pos_integer() | unlimited, - template=>template(), - utc=>boolean()}. +-spec format(LogEvent,Config) -> unicode:chardata() when + LogEvent :: logger:log_event(), + Config :: config(). format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0) when is_map(Config0) -> Config = add_default_config(Config0), @@ -83,8 +86,6 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0) do_format(Level,Msg,Data,[level|Format],Config) -> [to_string(level,Level,Config)|do_format(Level,Msg,Data,Format,Config)]; -do_format(Level,Msg,Data,[msg|Format],Config) -> - [Msg|do_format(Level,Msg,Data,Format,Config)]; do_format(Level,Msg,Data,[Key|Format],Config) when is_atom(Key); is_tuple(Key) -> Value = value(Key,Data), [to_string(Key,Value,Config)|do_format(Level,Msg,Data,Format,Config)]; @@ -128,9 +129,7 @@ to_string(X) -> io_lib:format("~tp",[X]). format_msg({string,Chardata},Meta,Config) -> - try unicode:characters_to_list(Chardata) - catch _:_ -> format_msg({"INVALID STRING: ~tp",[Chardata]},Meta,Config) - end; + format_msg({"~ts",[Chardata]},Meta,Config); format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config) when is_function(Fun,1) -> format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config)); format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) -> @@ -195,29 +194,24 @@ truncate(String,Size) -> String end. -format_time(Timestamp,Config) when is_integer(Timestamp) -> - {Date,Time,Micro} = timestamp_to_datetimemicro(Timestamp,Config), - format_time(Date,Time,Micro); -format_time(Other,_Config) -> - %% E.g. a string - to_string(Other). - -format_time({Y,M,D},{H,Min,S},Micro) -> - io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w", - [Y,M,D,H,Min,S,Micro]). +%% SysTime is the system time in microseconds +format_time(SysTime,#{time_offset:=Offset,time_designator:=Des}) + when is_integer(SysTime) -> + calendar:system_time_to_rfc3339(SysTime,[{unit,microsecond}, + {offset,Offset}, + {time_designator,Des}]). -%% Assuming this is monotonic time in microseconds -timestamp_to_datetimemicro(Timestamp,Config) when is_integer(Timestamp) -> - SysTime = Timestamp + erlang:time_offset(microsecond), +%% SysTime is the system time in microseconds +timestamp_to_datetimemicro(SysTime,Config) when is_integer(SysTime) -> Micro = SysTime rem 1000000, Sec = SysTime div 1000000, UniversalTime = erlang:posixtime_to_universaltime(Sec), - {Date,Time} = - case Config of - #{utc:=true} -> UniversalTime; - _ -> erlang:universaltime_to_localtime(UniversalTime) + {{Date,Time},UtcStr} = + case offset_to_utc(maps:get(time_offset,Config)) of + true -> {UniversalTime,"UTC "}; + _ -> {erlang:universaltime_to_localtime(UniversalTime),""} end, - {Date,Time,Micro}. + {Date,Time,Micro,UtcStr}. format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_integer(A) -> atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A); @@ -230,9 +224,11 @@ maybe_add_legacy_header(Level, #{time:=Timestamp}=Meta, #{legacy_header:=true}=Config) -> #{title:=Title}=MyMeta = add_legacy_title(Level,maps:get(?MODULE,Meta,#{})), - {{Y,Mo,D},{H,Mi,S},Micro} = timestamp_to_datetimemicro(Timestamp,Config), - Header = io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===", - [Title,D,month(Mo),Y,H,Mi,S,Micro,utcstr(Config)]), + {{Y,Mo,D},{H,Mi,S},Micro,UtcStr} = + timestamp_to_datetimemicro(Timestamp,Config), + Header = + io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===", + [Title,D,month(Mo),Y,H,Mi,S,Micro,UtcStr]), Meta#{?MODULE=>MyMeta#{header=>Header}}; maybe_add_legacy_header(_,Meta,_) -> Meta. @@ -256,20 +252,20 @@ month(10) -> "Oct"; month(11) -> "Nov"; month(12) -> "Dec". -utcstr(#{utc:=true}) -> "UTC "; -utcstr(_) -> "". - -add_default_config(#{utc:=_}=Config0) -> +%% Ensure that all valid configuration parameters exist in the final +%% configuration map +add_default_config(Config0) -> Default = #{legacy_header=>false, single_line=>true, - chars_limit=>unlimited}, - MaxSize = get_max_size(maps:get(max_size,Config0,false)), - Depth = get_depth(maps:get(depth,Config0,false)), + chars_limit=>unlimited, + time_designator=>$T}, + MaxSize = get_max_size(maps:get(max_size,Config0,undefined)), + Depth = get_depth(maps:get(depth,Config0,undefined)), + Offset = get_offset(maps:get(time_offset,Config0,undefined)), add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize, - depth=>Depth})); -add_default_config(Config) -> - add_default_config(Config#{utc=>logger:get_utc_config()}). + depth=>Depth, + time_offset=>Offset})). add_default_template(#{template:=_}=Config) -> Config; @@ -283,12 +279,123 @@ default_template(#{single_line:=true}) -> default_template(_) -> ?DEFAULT_FORMAT_TEMPLATE. -get_max_size(false) -> - logger:get_max_size(); +get_max_size(undefined) -> + unlimited; get_max_size(S) -> max(10,S). -get_depth(false) -> - logger:get_format_depth(); +get_depth(undefined) -> + error_logger:get_format_depth(); get_depth(S) -> max(5,S). + +get_offset(undefined) -> + utc_to_offset(get_utc_config()); +get_offset(Offset) -> + Offset. + +utc_to_offset(true) -> + "Z"; +utc_to_offset(false) -> + "". + +get_utc_config() -> + %% SASL utc_log overrides stdlib config - in order to have uniform + %% timestamps in log messages + case application:get_env(sasl, utc_log) of + {ok, Val} when is_boolean(Val) -> Val; + _ -> + case application:get_env(stdlib, utc_log) of + {ok, Val} when is_boolean(Val) -> Val; + _ -> false + end + end. + +offset_to_utc(Z) when Z=:=0; Z=:="z"; Z=:="Z" -> + true; +offset_to_utc([$+|Tz]) -> + case io_lib:fread("~d:~d", Tz) of + {ok, [0, 0], []} -> + true; + _ -> + false + end; +offset_to_utc(_) -> + false. + +-spec check_config(Config) -> ok | {error,term()} when + Config :: config(). +check_config(Config) when is_map(Config) -> + do_check_config(maps:to_list(Config)); +check_config(Config) -> + {error,{invalid_formatter_config,?MODULE,Config}}. + +do_check_config([{Type,L}|Config]) when Type == chars_limit; + Type == depth; + Type == max_size -> + case check_limit(L) of + ok -> do_check_config(Config); + error -> {error,{invalid_formatter_config,?MODULE,{Type,L}}} + end; +do_check_config([{single_line,SL}|Config]) when is_boolean(SL) -> + do_check_config(Config); +do_check_config([{legacy_header,LH}|Config]) when is_boolean(LH) -> + do_check_config(Config); +do_check_config([{report_cb,RCB}|Config]) when is_function(RCB,1) -> + do_check_config(Config); +do_check_config([{template,T}|Config]) when is_list(T) -> + case lists:all(fun(X) when is_atom(X) -> true; + (X) when is_tuple(X), is_atom(element(1,X)) -> true; + (X) when is_list(X) -> io_lib:printable_unicode_list(X); + (_) -> false + end, + T) of + true -> + do_check_config(Config); + false -> + {error,{invalid_formatter_template,?MODULE,T}} + end; +do_check_config([{time_offset,Offset}|Config]) -> + case check_offset(Offset) of + ok -> + do_check_config(Config); + error -> + {error,{invalid_formatter_config,?MODULE,{time_offset,Offset}}} + end; +do_check_config([{time_designator,Char}|Config]) when Char>=0, Char=<255 -> + case io_lib:printable_latin1_list([Char]) of + true -> + do_check_config(Config); + false -> + {error,{invalid_formatter_config,?MODULE,{time_designator,Char}}} + end; +do_check_config([C|_]) -> + {error,{invalid_formatter_config,?MODULE,C}}; +do_check_config([]) -> + ok. + +check_limit(L) when is_integer(L), L>0 -> + ok; +check_limit(unlimited) -> + ok; +check_limit(_) -> + error. + +check_offset(I) when is_integer(I) -> + ok; +check_offset(Tz) when Tz=:=""; Tz=:="Z"; Tz=:="z" -> + ok; +check_offset([Sign|Tz]) when Sign=:=$+; Sign=:=$- -> + check_timezone(Tz); +check_offset(_) -> + error. + +check_timezone(Tz) -> + try io_lib:fread("~d:~d", Tz) of + {ok, [_, _], []} -> + ok; + _ -> + error + catch _:_ -> + error + end. diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl index 7caad366ae..336398cd4a 100644 --- a/lib/kernel/src/logger_h_common.erl +++ b/lib/kernel/src/logger_h_common.erl @@ -39,8 +39,8 @@ %%%----------------------------------------------------------------- %%% Covert log data on any form to binary --spec log_to_binary(Log,Config) -> LogString when - Log :: logger:log(), +-spec log_to_binary(LogEvent,Config) -> LogString when + LogEvent :: logger:log_event(), Config :: logger:config(), LogString :: binary(). log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) -> @@ -58,7 +58,7 @@ do_log_to_binary(Log,Config) -> catch _:_ -> ?LOG_INTERNAL(debug,[{formatter_error,Formatter}, {config,FormatterConfig}, - {log,Log}, + {log_event,Log}, {bad_return_value,String}]), <<"FORMATTER ERROR: bad_return_value">> end. @@ -69,10 +69,10 @@ try_format(Log,Formatter,FormatterConfig) -> C:R:S -> ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter}, {config,FormatterConfig}, - {log,Log}, + {log_event,Log}, {reason, {C,R,logger:filter_stacktrace(?MODULE,S)}}]), - case {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG} of + case {?DEFAULT_FORMATTER,#{}} of {Formatter,FormatterConfig} -> "DEFAULT FORMATTER CRASHED"; {DefaultFormatter,DefaultConfig} -> @@ -135,7 +135,8 @@ call_cast_or_drop(Name, Bin) -> _:{timeout,_} -> ?observe(Name,{dropped,1}) end; - drop -> ?observe(Name,{dropped,1}) + drop -> + ?observe(Name,{dropped,1}) catch %% if the ETS table doesn't exist (maybe because of a %% handler restart), we can only drop the request @@ -152,12 +153,15 @@ check_load(State = #{id:=Name, mode := Mode, flush_reqs_qlen := FlushQLen}) -> {_,Mem} = process_info(self(), memory), ?observe(Name,{max_mem,Mem}), - %% make sure the handler process doesn't get scheduled - %% out between the message_queue_len check below and the - %% action that follows (flush or write). {_,QLen} = process_info(self(), message_queue_len), ?observe(Name,{max_qlen,QLen}), - + %% When the handler process gets scheduled in, it's impossible + %% to predict the QLen. We could jump "up" arbitrarily from say + %% async to sync, async to drop, sync to flush, etc. However, when + %% the handler process manages the log requests (without flushing), + %% one after the other, we will move "down" from drop to sync and + %% from sync to async. This way we don't risk getting stuck in + %% drop or sync mode with an empty mailbox. {Mode1,_NewDrops,_NewFlushes} = if QLen >= FlushQLen -> @@ -292,7 +296,7 @@ overload_levels_ok(HandlerConfig) -> TSQL = maps:get(toggle_sync_qlen, HandlerConfig, ?TOGGLE_SYNC_QLEN), DNRQL = maps:get(drop_new_reqs_qlen, HandlerConfig, ?DROP_NEW_REQS_QLEN), FRQL = maps:get(flush_reqs_qlen, HandlerConfig, ?FLUSH_REQS_QLEN), - (TSQL < DNRQL) andalso (DNRQL < FRQL). + (DNRQL > 1) andalso (TSQL =< DNRQL) andalso (DNRQL =< FRQL). error_notify(Term) -> ?internal_log(error, Term). diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl index 89378dbb10..ed365ce6eb 100644 --- a/lib/kernel/src/logger_h_common.hrl +++ b/lib/kernel/src/logger_h_common.hrl @@ -124,7 +124,7 @@ %%% slow down execution and therefore should not be include in code %%% to be officially released. -%% -define(TEST_HOOKS, true). +-define(TEST_HOOKS, true). -ifdef(TEST_HOOKS). -define(TEST_HOOKS_TAB, logger_h_test_hooks). diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl index 8c0fc2725d..fedd6db370 100644 --- a/lib/kernel/src/logger_internal.hrl +++ b/lib/kernel/src/logger_internal.hrl @@ -22,17 +22,16 @@ -define(LOGGER_KEY,'$logger_config$'). -define(HANDLER_KEY,'$handler_config$'). -define(LOGGER_META_KEY,'$logger_metadata$'). --define(STANDARD_HANDLER, logger_std_h). +-define(STANDARD_HANDLER, default). -define(DEFAULT_HANDLER_FILTERS, ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp])). -define(DEFAULT_HANDLER_FILTERS(Domain), [{remote_gl,{fun logger_filters:remote_gl/2,stop}}, - {domain,{fun logger_filters:domain/2,{log,prefix_of,Domain}}}, - {no_domain,{fun logger_filters:domain/2,{log,no_domain,[]}}}]). + {domain,{fun logger_filters:domain/2,{log,super,Domain}}}, + {no_domain,{fun logger_filters:domain/2,{log,undefined,[]}}}]). -define(DEFAULT_FORMATTER,logger_formatter). -define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true, - single_line=>false, - template=>?DEFAULT_FORMAT_TEMPLATE_HEADER}). + single_line=>false}). -define(DEFAULT_FORMAT_TEMPLATE_HEADER, [{logger_formatter,header},"\n",msg,"\n"]). -define(DEFAULT_FORMAT_TEMPLATE_SINGLE, diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl index a7f302ac8f..47010c9fa5 100644 --- a/lib/kernel/src/logger_server.erl +++ b/lib/kernel/src/logger_server.erl @@ -25,9 +25,10 @@ -export([start_link/0, add_handler/3, remove_handler/1, add_filter/2, remove_filter/2, - set_module_level/2, reset_module_level/1, + set_module_level/2, unset_module_level/1, cache_module_level/1, - set_config/2, set_config/3]). + set_config/2, set_config/3, update_config/2, + update_formatter_config/2]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -36,8 +37,9 @@ -include("logger_internal.hrl"). -define(SERVER, logger). +-define(LOGGER_SERVER_TAG, '$logger_cb_process'). --record(state, {tid}). +-record(state, {tid, async_req, async_req_queue}). %%%=================================================================== %%% API @@ -47,23 +49,18 @@ start_link() -> gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). add_handler(Id,Module,Config0) -> - case sanity_check(logger,handlers,[Id]) of - ok -> - try check_mod(Module) of + try {check_id(Id),check_mod(Module)} of + {ok,ok} -> + case sanity_check(Id,Config0) of ok -> - case sanity_check(Id,Config0) of - ok -> - Default = default_config(Id), - Config = maps:merge(Default,Config0), - call({add_handler,Id,Module,Config}); - Error -> - Error - end - catch throw:Error -> - {error,Error} - end; - Error -> - Error + Default = default_config(Id), + Config = maps:merge(Default,Config0), + call({add_handler,Id,Module,Config}); + Error -> + Error + end + catch throw:Error -> + {error,Error} end. remove_handler(HandlerId) -> @@ -86,9 +83,9 @@ set_module_level(Module,Level) when is_atom(Module) -> set_module_level(Module,_) -> {error,{not_a_module,Module}}. -reset_module_level(Module) when is_atom(Module) -> - call({reset_module_level,Module}); -reset_module_level(Module) -> +unset_module_level(Module) when is_atom(Module) -> + call({unset_module_level,Module}); +unset_module_level(Module) -> {error,{not_a_module,Module}}. cache_module_level(Module) -> @@ -96,10 +93,7 @@ cache_module_level(Module) -> set_config(Owner,Key,Value) -> - case sanity_check(Owner,Key,Value) of - ok -> call({update_config,Owner,#{Key=>Value}}); - Error -> Error - end. + update_config(Owner,#{Key=>Value}). set_config(Owner,Config0) -> case sanity_check(Owner,Config0) of @@ -110,6 +104,21 @@ set_config(Owner,Config0) -> Error end. +update_config(Owner, Config) -> + case sanity_check(Owner,Config) of + ok -> + call({update_config,Owner,Config}); + Error -> + Error + end. + +update_formatter_config(HandlerId, FormatterConfig) + when is_map(FormatterConfig) -> + call({update_formatter_config,HandlerId,FormatterConfig}); +update_formatter_config(_HandlerId, FormatterConfig) -> + {error,{invalid_formatter_config,FormatterConfig}}. + + %%%=================================================================== %%% gen_server callbacks %%%=================================================================== @@ -118,104 +127,127 @@ init([]) -> process_flag(trap_exit, true), Tid = logger_config:new(?LOGGER_TABLE), LoggerConfig = maps:merge(default_config(logger), - #{handlers=>[logger_simple]}), + #{handlers=>[simple]}), logger_config:create(Tid,logger,LoggerConfig), - SimpleConfig0 = maps:merge(default_config(logger_simple), + SimpleConfig0 = maps:merge(default_config(simple), #{filter_default=>stop, - filters=>?DEFAULT_HANDLER_FILTERS, - logger_simple=>#{buffer=>true}}), + filters=>?DEFAULT_HANDLER_FILTERS}), %% If this fails, then the node should crash - {ok,SimpleConfig} = - logger_simple:adding_handler(logger_simple,SimpleConfig0), - logger_config:create(Tid,logger_simple,logger_simple,SimpleConfig), - {ok, #state{tid=Tid}}. + {ok,SimpleConfig} = logger_simple_h:adding_handler(SimpleConfig0), + logger_config:create(Tid,simple,logger_simple_h,SimpleConfig), + {ok, #state{tid=Tid, async_req_queue = queue:new()}}. -handle_call({add_handler,Id,Module,HConfig}, _From, #state{tid=Tid}=State) -> - Reply = - case logger_config:exist(Tid,Id) of - true -> - {error,{already_exist,Id}}; - false -> - %% inform the handler - case call_h(Module,adding_handler,[Id,HConfig],{ok,HConfig}) of - {ok,HConfig1} -> - logger_config:create(Tid,Id,Module,HConfig1), - {ok,Config} = do_get_config(Tid,logger), - Handlers = maps:get(handlers,Config,[]), - do_set_config(Tid,logger, - Config#{handlers=>[Id|Handlers]}), - ok; - {error,HReason} -> - {error,{handler_not_added,HReason}} - end - end, - {reply,Reply,State}; -handle_call({remove_handler,HandlerId}, _From, #state{tid=Tid}=State) -> - Reply = - case logger_config:get(Tid,HandlerId) of - {ok,{Module,_}} -> - {ok,Config} = do_get_config(Tid,logger), - Handlers0 = maps:get(handlers,Config,[]), - Handlers = lists:delete(HandlerId,Handlers0), - %% inform the handler - _ = call_h(Module,removing_handler,[HandlerId,Config],ok), - do_set_config(Tid,logger,Config#{handlers=>Handlers}), - logger_config:delete(Tid,HandlerId), - ok; - _ -> - {error,{not_found,HandlerId}} - end, - {reply,Reply,State}; +handle_call({add_handler,Id,Module,HConfig}, From, #state{tid=Tid}=State) -> + case logger_config:exist(Tid,Id) of + true -> + {reply,{error,{already_exist,Id}},State}; + false -> + call_h_async( + fun() -> + %% inform the handler + call_h(Module,adding_handler,[HConfig],{ok,HConfig}) + end, + fun({ok,HConfig1}) -> + %% We know that the call_h would have loaded the module + %% if it existed, so it is safe here to call function_exported + %% to find out if this is a valid handler + case erlang:function_exported(Module, log, 2) of + true -> + logger_config:create(Tid,Id,Module,HConfig1), + {ok,Config} = do_get_config(Tid,logger), + Handlers = maps:get(handlers,Config,[]), + do_set_config(Tid,logger, + Config#{handlers=>[Id|Handlers]}); + false -> + {error,{invalid_handler, + {function_not_exported, + {Module,log,2}}}} + end; + ({error,HReason}) -> + {error,{handler_not_added,HReason}} + end,From,State) + end; +handle_call({remove_handler,HandlerId}, From, #state{tid=Tid}=State) -> + case logger_config:get(Tid,HandlerId) of + {ok,{Module,HConfig}} -> + {ok,Config} = do_get_config(Tid,logger), + Handlers0 = maps:get(handlers,Config,[]), + Handlers = lists:delete(HandlerId,Handlers0), + call_h_async( + fun() -> + %% inform the handler + call_h(Module,removing_handler,[HConfig],ok) + end, + fun(_Res) -> + do_set_config(Tid,logger,Config#{handlers=>Handlers}), + logger_config:delete(Tid,HandlerId), + ok + end,From,State); + _ -> + {reply,{error,{not_found,HandlerId}},State} + end; handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) -> Reply = do_add_filter(Tid,Id,Filter), {reply,Reply,State}; handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) -> Reply = do_remove_filter(Tid,Id,FilterId), {reply,Reply,State}; -handle_call({update_config,Id,NewConfig}, _From, #state{tid=Tid}=State) -> - Reply = - case logger_config:get(Tid,Id) of - {ok,{Module,OldConfig}} -> - Config = maps:merge(OldConfig,NewConfig), - case call_h(Module,changing_config,[Id,OldConfig,Config], - {ok,Config}) of - {ok,Config1} -> - do_set_config(Tid,Id,Config1); - Error -> - Error - end; - {ok,OldConfig} -> - Config = maps:merge(OldConfig,NewConfig), - do_set_config(Tid,Id,Config); - Error -> - Error - end, - {reply,Reply,State}; +handle_call({update_config,Id,NewConfig}, From, #state{tid=Tid}=State) -> + case logger_config:get(Tid,Id) of + {ok,{_Module,OldConfig}} -> + Config = maps:merge(OldConfig,NewConfig), + handle_call({set_config,Id,Config}, From, State); + {ok,OldConfig} -> + Config = maps:merge(OldConfig,NewConfig), + {reply,do_set_config(Tid,Id,Config),State}; + Error -> + {reply,Error,State} + end; handle_call({set_config,logger,Config}, _From, #state{tid=Tid}=State) -> - Reply = do_set_config(Tid,logger,Config), + {ok,#{handlers:=Handlers}} = logger_config:get(Tid,logger), + Reply = do_set_config(Tid,logger,Config#{handlers=>Handlers}), {reply,Reply,State}; -handle_call({set_config,HandlerId,Config}, _From, #state{tid=Tid}=State) -> +handle_call({set_config,HandlerId,Config}, From, #state{tid=Tid}=State) -> + case logger_config:get(Tid,HandlerId) of + {ok,{Module,OldConfig}} -> + call_h_async( + fun() -> + call_h(Module,changing_config,[OldConfig,Config], + {ok,Config}) + end, + fun({ok,Config1}) -> + do_set_config(Tid,HandlerId,Config1); + (Error) -> + Error + end,From,State); + _ -> + {reply,{error,{not_found,HandlerId}},State} + end; +handle_call({update_formatter_config,HandlerId,NewFConfig},_From, + #state{tid=Tid}=State) -> Reply = case logger_config:get(Tid,HandlerId) of - {ok,{Module,OldConfig}} -> - case call_h(Module,changing_config,[HandlerId,OldConfig,Config], - {ok,Config}) of - {ok,Config1} -> - do_set_config(Tid,HandlerId,Config1); - Error -> - Error + {ok,{_Mod,#{formatter:={FMod,OldFConfig}}=Config}} -> + try + FConfig = maps:merge(OldFConfig,NewFConfig), + check_formatter({FMod,FConfig}), + do_set_config(Tid,HandlerId, + Config#{formatter=>{FMod,FConfig}}) + catch throw:Reason -> {error,Reason} end; _ -> - {error,{not_found,HandlerId}} + {error,{not_found,HandlerId}} end, {reply,Reply,State}; handle_call({set_module_level,Module,Level}, _From, #state{tid=Tid}=State) -> Reply = logger_config:set_module_level(Tid,Module,Level), {reply,Reply,State}; -handle_call({reset_module_level,Module}, _From, #state{tid=Tid}=State) -> - Reply = logger_config:reset_module_level(Tid,Module), +handle_call({unset_module_level,Module}, _From, #state{tid=Tid}=State) -> + Reply = logger_config:unset_module_level(Tid,Module), {reply,Reply,State}. +handle_cast({async_req_reply,_Ref,_Reply} = Reply,State) -> + call_h_reply(Reply,State); handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) -> logger_config:cache_module_level(Tid,Module), {noreply, State}. @@ -235,11 +267,21 @@ handle_info({log,Level,Report,Meta}, State) -> handle_info({Ref,_Reply},State) when is_reference(Ref) -> %% Assuming this is a timed-out gen_server reply - ignoring {noreply, State}; -handle_info(Unexpected,State) -> +handle_info({'DOWN',_Ref,_Proc,_Pid,_Reason} = Down,State) -> + call_h_reply(Down,State); +handle_info(Unexpected,State) when element(1,Unexpected) == 'EXIT' -> + %% The simple logger will send an 'EXIT' message when it is replaced + %% We may as well ignore all 'EXIT' messages that we get ?LOG_INTERNAL(debug, [{logger,got_unexpected_message}, {process,?SERVER}, {message,Unexpected}]), + {noreply,State}; +handle_info(Unexpected,State) -> + ?LOG_INTERNAL(info, + [{logger,got_unexpected_message}, + {process,?SERVER}, + {message,Unexpected}]), {noreply,State}. terminate(_Reason, _State) -> @@ -249,8 +291,11 @@ terminate(_Reason, _State) -> %%% Internal functions %%%=================================================================== call(Request) -> - case whereis(?SERVER) of - Pid when Pid==self() -> + Action = element(1,Request), + case get(?LOGGER_SERVER_TAG) of + true when + Action == add_handler; Action == remove_handler; + Action == update_config; Action == set_config -> {error,{attempting_syncronous_call_to_self,Request}}; _ -> gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT) @@ -301,13 +346,13 @@ do_set_config(Tid,Id,Config) -> default_config(logger) -> #{level=>info, filters=>[], - filter_default=>log, - handlers=>[]}; -default_config(_) -> - #{level=>info, + filter_default=>log}; +default_config(Id) -> + #{id=>Id, + level=>info, filters=>[], filter_default=>log, - formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}. + formatter=>{?DEFAULT_FORMATTER,#{}}}. sanity_check(Owner,Key,Value) -> sanity_check_1(Owner,[{Key,Value}]). @@ -333,9 +378,6 @@ get_type(Id) -> check_config(Owner,[{level,Level}|Config]) -> check_level(Level), check_config(Owner,Config); -check_config(logger,[{handlers,Handlers}|Config]) -> - check_handlers(Handlers), - check_config(logger,Config); check_config(Owner,[{filters,Filters}|Config]) -> check_filters(Filters), check_config(Owner,Config); @@ -373,14 +415,6 @@ check_level(Level) -> throw({invalid_level,Level}) end. -check_handlers([Id|Handlers]) -> - check_id(Id), - check_handlers(Handlers); -check_handlers([]) -> - ok; -check_handlers(Handlers) -> - throw({invalid_handlers,Handlers}). - check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) -> check_filters(Filters); check_filters([Filter|_]) -> @@ -395,40 +429,24 @@ check_filter_default(FD) when FD==stop; FD==log -> check_filter_default(FD) -> throw({invalid_filter_default,FD}). -check_formatter({logger_formatter,Config}) when is_map(Config) -> - check_logger_formatter_config(maps:to_list(Config)); -check_formatter({logger_formatter,Config}) -> - throw({invalid_formatter_config,Config}); -check_formatter({Mod,_}) -> - %% no knowledge of other formatters - check_mod(Mod); +check_formatter({Mod,Config}) -> + check_mod(Mod), + try Mod:check_config(Config) of + ok -> ok; + {error,Error} -> throw(Error) + catch + C:R:S -> + case {C,R,S} of + {error,undef,[{Mod,check_config,[Config],_}|_]} -> + ok; + _ -> + throw({callback_crashed, + {C,R,logger:filter_stacktrace(?MODULE,S)}}) + end + end; check_formatter(Formatter) -> throw({invalid_formatter,Formatter}). - -check_logger_formatter_config([{template,T}|Config]) when is_list(T) -> - case lists:all(fun(X) when is_atom(X) -> true; - (X) when is_tuple(X), is_atom(element(1,X)) -> true; - (X) when is_list(X) -> io_lib:printable_unicode_list(X); - (_) -> false - end, - T) of - true -> - check_logger_formatter_config(Config); - false -> - throw({invalid_formatter_template,T}) - end; -check_logger_formatter_config([{legacy_header,LH}|Config]) when is_boolean(LH) -> - check_logger_formatter_config(Config); -check_logger_formatter_config([{single_line,SL}|Config]) when is_boolean(SL) -> - check_logger_formatter_config(Config); -check_logger_formatter_config([{utc,Utc}|Config]) when is_boolean(Utc) -> - check_logger_formatter_config(Config); -check_logger_formatter_config([C|_]) -> - throw({invalid_formatter_config,C}); -check_logger_formatter_config([]) -> - ok. - call_h(Module, Function, Args, DefRet) -> %% Not calling code:ensure_loaded + erlang:function_exported here, %% since in some rare terminal cases, the code_server might not @@ -440,7 +458,59 @@ call_h(Module, Function, Args, DefRet) -> {error,undef,[{Module,Function,Args,_}|_]} -> DefRet; _ -> - {error,{callback_crashed, - {C,R,logger:filter_stacktrace(?MODULE,S)}}} + ST = logger:filter_stacktrace(?MODULE,S), + ?LOG_INTERNAL(error, + [{logger,callback_crashed}, + {process,?SERVER}, + {reason,{C,R,ST}}]), + {error,{callback_crashed,{C,R,ST}}} end end. + +%% There are all sort of API functions that can cause deadlocks if called +%% from the handler callbacks. So we spawn a process that does the request +%% for the logger_server. There are still APIs that will cause problems, +%% namely logger:add_handler +call_h_async(AsyncFun,PostFun,From,#state{ async_req = undefined } = State) -> + Parent = self(), + {Pid, Ref} = spawn_monitor( + fun() -> + put(?LOGGER_SERVER_TAG,true), + receive Ref -> Ref end, + gen_server:cast(Parent, {async_req_reply, Ref, AsyncFun()}) + end), + Pid ! Ref, + {noreply,State#state{ async_req = {Ref,PostFun,From} }}; +call_h_async(AsyncFun,PostFun,From,#state{ async_req_queue = Q } = State) -> + {noreply,State#state{ async_req_queue = queue:in({AsyncFun,PostFun,From},Q) }}. + +call_h_reply({async_req_reply,Ref,Reply}, + #state{ async_req = {Ref,PostFun,From}, async_req_queue = Q} = State) -> + erlang:demonitor(Ref,[flush]), + _ = gen_server:reply(From, PostFun(Reply)), + {Value,NewQ} = queue:out(Q), + NewState = State#state{ async_req = undefined, + async_req_queue = NewQ }, + case Value of + {value,{AsyncFun,NPostFun,NFrom}} -> + call_h_async(AsyncFun,NPostFun,NFrom,NewState); + empty -> + {noreply,NewState} + end; +call_h_reply({'DOWN',Ref,_Proc,Pid,Reason}, #state{ async_req = {Ref,_PostFun,_From}} = State) -> + %% This clause should only be triggered if someone explicitly sends an exit signal + %% to the spawned process. It is only here to make sure that the logger_server does + %% not deadlock if that happens. + ?LOG_INTERNAL(error, + [{logger,process_exited}, + {process,Pid}, + {reason,Reason}]), + call_h_reply( + {async_req_reply,Ref,{error,{logger_process_exited,Pid,Reason}}}, + State); +call_h_reply(Unexpected,State) -> + ?LOG_INTERNAL(info, + [{logger,got_unexpected_message}, + {process,?SERVER}, + {message,Unexpected}]), + {noreply,State}. diff --git a/lib/kernel/src/logger_simple.erl b/lib/kernel/src/logger_simple_h.erl index a1b427b96c..19fb3b54ba 100644 --- a/lib/kernel/src/logger_simple.erl +++ b/lib/kernel/src/logger_simple_h.erl @@ -17,40 +17,21 @@ %% %% %CopyrightEnd% %% --module(logger_simple). +-module(logger_simple_h). --export([adding_handler/2, removing_handler/2, log/2]). --export([get_buffer/0]). +-export([adding_handler/1, removing_handler/1, log/2]). %% This module implements a simple handler for logger. It is the %% default used during system start. %%%----------------------------------------------------------------- -%%% API -get_buffer() -> - case whereis(?MODULE) of - undefined -> - {error,noproc}; - Pid -> - Ref = erlang:monitor(process,Pid), - Pid ! {get_buffer,self()}, - receive - {buffer,Buffer} -> - erlang:demonitor(Ref,[flush]), - {ok,Buffer}; - {'DOWN',Ref,process,Pid,Reason} -> - {error,Reason} - end - end. - -%%%----------------------------------------------------------------- %%% Logger callback -adding_handler(?MODULE,Config) -> +adding_handler(#{id:=simple}=Config) -> Me = self(), case whereis(?MODULE) of undefined -> - {Pid,Ref} = spawn_opt(fun() -> init(Me,Config) end, + {Pid,Ref} = spawn_opt(fun() -> init(Me) end, [link,monitor,{message_queue_data,off_heap}]), receive {'DOWN',Ref,process,Pid,Reason} -> @@ -63,7 +44,7 @@ adding_handler(?MODULE,Config) -> {error,{handler_process_name_already_exists,?MODULE}} end. -removing_handler(?MODULE,_Config) -> +removing_handler(#{id:=simple}) -> case whereis(?MODULE) of undefined -> ok; @@ -89,7 +70,7 @@ log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) -> do_log( #{level=>error, msg=>{report,{error,simple_handler_process_dead}}, - meta=>#{time=>erlang:monotonic_time(microsecond)}}), + meta=>#{time=>erlang:system_time(microsecond)}}), do_log(Log); _ -> ?MODULE ! {log,Log} @@ -102,54 +83,50 @@ log(_,_) -> %%%----------------------------------------------------------------- %%% Process -init(Starter,Config) -> +init(Starter) -> register(?MODULE,self()), Starter ! {self(),started}, - BufferSize = - case Config of - #{?MODULE:=#{buffer:=true}} -> - 10; - _ -> - infinity - end, - loop(#{buffer_size=>BufferSize,dropped=>0,buffer=>[]},infinity). + loop(#{buffer_size=>10,dropped=>0,buffer=>[]}). -loop(Buffer,Timeout) -> +loop(Buffer) -> receive stop -> - ok; - {get_buffer,From} -> - loop(Buffer#{send_to=>From},0); + %% We replay the logger messages of there is + %% a default handler when the simple handler + %% is removed. + case logger:get_handler_config(default) of + {ok, _} -> + replay_buffer(Buffer); + _ -> + ok + end; {log,#{msg:=_,meta:=#{time:=_}}=Log} -> do_log(Log), - loop(update_buffer(Buffer,Log),Timeout); + loop(update_buffer(Buffer,Log)); _ -> %% Unexpected message - flush it! - loop(Buffer,Timeout) - after Timeout -> - #{dropped:=D,buffer:=B,send_to:=Pid} = Buffer, - LogList = lists:reverse(B) ++ drop_msg(D), - Pid ! {buffer,LogList}, - loop(Buffer#{buffer_size=>infinity, - dropped=>0, - buffer=>[], - send_to=>false}, - infinity) + loop(Buffer) end. -update_buffer(#{buffer_size:=infinity}=Buffer,_Log) -> - Buffer; update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) -> Buffer#{dropped=>D+1}; update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) -> Buffer#{buffer_size=>S-1,buffer=>[Log|B]}. +replay_buffer(#{ dropped := D, buffer := Buffer }) -> + lists:foreach( + fun F(#{msg := {Tag, Msg}} = L) when Tag =:= string; Tag =:= report -> + F(L#{ msg := Msg }); + F(#{ level := Level, msg := Msg, meta := MD}) -> + logger:log(Level, Msg, MD) + end, lists:reverse(Buffer, drop_msg(D))). + drop_msg(0) -> []; drop_msg(N) -> [#{level=>info, msg=>{"Simple handler buffer full, dropped ~w messages",[N]}, - meta=>#{time=>erlang:monotonic_time(microsecond)}}]. + meta=>#{time=>erlang:system_time(microsecond)}}]. %%%----------------------------------------------------------------- %%% Internal @@ -164,8 +141,7 @@ do_log(#{msg:=Msg,meta:=#{time:=T}}) -> display_date(T), display(Msg). -display_date(Timestamp0) when is_integer(Timestamp0) -> - Timestamp = Timestamp0 + erlang:time_offset(microsecond), +display_date(Timestamp) when is_integer(Timestamp) -> Micro = Timestamp rem 1000000, Sec = Timestamp div 1000000, {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime( diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl index 31edcfea8b..63c3ab2dac 100644 --- a/lib/kernel/src/logger_std_h.erl +++ b/lib/kernel/src/logger_std_h.erl @@ -35,8 +35,8 @@ terminate/2, code_change/3]). %% logger callbacks --export([log/2, adding_handler/2, removing_handler/2, - changing_config/3, swap_buffer/2]). +-export([log/2, adding_handler/1, removing_handler/1, + changing_config/2, swap_buffer/2]). %%%=================================================================== %%% API @@ -109,8 +109,8 @@ reset(Name) -> %%%----------------------------------------------------------------- %%% Handler being added -adding_handler(Name, Config) -> - case check_config(adding, Name, Config) of +adding_handler(#{id:=Name}=Config) -> + case check_config(adding, Config) of {ok, Config1} -> %% create initial handler state by merging defaults with config HConfig = maps:get(?MODULE, Config1, #{}), @@ -137,9 +137,8 @@ adding_handler(Name, Config) -> %%%----------------------------------------------------------------- %%% Updating handler config -changing_config(Name, - OldConfig=#{id:=Id, ?MODULE:=#{type:=Type}}, - NewConfig=#{id:=Id}) -> +changing_config(OldConfig=#{id:=Name, ?MODULE:=#{type:=Type}}, + NewConfig=#{id:=Name}) -> MyConfig = maps:get(?MODULE, NewConfig, #{}), case maps:get(type, MyConfig, Type) of Type -> @@ -149,11 +148,11 @@ changing_config(Name, _ -> {error,{illegal_config_change,OldConfig,NewConfig}} end; -changing_config(_Name, OldConfig, NewConfig) -> +changing_config(OldConfig, NewConfig) -> {error,{illegal_config_change,OldConfig,NewConfig}}. changing_config1(Name, OldConfig, NewConfig) -> - case check_config(changing, Name, NewConfig) of + case check_config(changing, NewConfig) of Result = {ok,NewConfig1} -> try gen_server:call(Name, {change_config,OldConfig,NewConfig1}, ?DEFAULT_CALL_TIMEOUT) of @@ -166,9 +165,7 @@ changing_config1(Name, OldConfig, NewConfig) -> Error end. -check_config(adding, Name, Config0) -> - %% Merge in defaults on top level - Config = maps:merge(#{id => Name}, Config0), +check_config(adding, Config) -> %% Merge in defaults on handler level MyConfig0 = maps:get(?MODULE, Config, #{}), MyConfig = maps:merge(#{type => standard_io}, @@ -179,7 +176,7 @@ check_config(adding, Name, Config0) -> Error -> Error end; -check_config(changing, _Name, Config) -> +check_config(changing, Config) -> MyConfig = maps:get(?MODULE, Config, #{}), case check_my_config(maps:to_list(MyConfig)) of ok -> {ok,Config}; @@ -207,7 +204,7 @@ check_my_config([]) -> %%%----------------------------------------------------------------- %%% Handler being removed -removing_handler(Name,_Config) -> +removing_handler(#{id:=Name}) -> stop(Name). %%%----------------------------------------------------------------- @@ -222,15 +219,15 @@ swap_buffer(Name,Buffer) -> %%%----------------------------------------------------------------- %%% Log a string or report --spec log(Log, Config) -> ok | dropped when - Log :: logger:log(), +-spec log(LogEvent, Config) -> ok | dropped when + LogEvent :: logger:log_event(), Config :: logger:config(). -log(Log,Config=#{id:=Name}) -> +log(LogEvent,Config=#{id:=Name}) -> %% if the handler has crashed, we must drop this request %% and hope the handler restarts so we can try again true = is_pid(whereis(Name)), - Bin = logger_h_common:log_to_binary(Log,Config), + Bin = logger_h_common:log_to_binary(LogEvent,Config), logger_h_common:call_cast_or_drop(Name, Bin). %%%=================================================================== @@ -257,10 +254,11 @@ init([Name, Config, file_ctrl_sync => FileCtrlSyncInt, last_qlen => 0, last_log_ts => T0, + last_op => sync, burst_win_ts => T0, burst_msg_count => 0}), proc_lib:init_ack({ok,self()}), - gen_server:cast(self(), {repeated_filesync,T0}), + gen_server:cast(self(), repeated_filesync), enter_loop(Config, State1); Error -> logger_h_common:error_notify({init_handler,Name,Error}), @@ -310,12 +308,11 @@ handle_call(filesync, _From, State = #{type := Type, if is_atom(Type) -> {reply, ok, State}; true -> - {reply, file_ctrl_filesync_sync(FileCtrlPid), State} + {reply, file_ctrl_filesync_sync(FileCtrlPid), State#{last_op=>sync}} end; handle_call({change_config,_OldConfig,NewConfig}, _From, - State = #{filesync_repeat_interval := FSyncInt0, - last_log_ts := LastLogTS}) -> + State = #{filesync_repeat_interval := FSyncInt0}) -> HConfig = maps:get(?MODULE, NewConfig, #{}), State1 = maps:merge(State, HConfig), case logger_h_common:overload_levels_ok(State1) of @@ -334,8 +331,7 @@ handle_call({change_config,_OldConfig,NewConfig}, _From, _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, undefined)), - gen_server:cast(self(), {repeated_filesync, - LastLogTS}) + gen_server:cast(self(), repeated_filesync) end, {reply, ok, State1}; false -> @@ -365,24 +361,24 @@ handle_cast({log, Bin}, State) -> %% clause gets called repeatedly by the handler. In order to %% guarantee that a filesync *always* happens after the last log %% request, the repeat operation must be active! -handle_cast({repeated_filesync,LastLogTS0}, +handle_cast(repeated_filesync, State = #{type := Type, file_ctrl_pid := FileCtrlPid, filesync_repeat_interval := FSyncInt, - last_log_ts := LastLogTS1}) -> + last_op := LastOp}) -> State1 = if not is_atom(Type), is_integer(FSyncInt) -> %% only do filesync if something has been %% written since last time we checked - if LastLogTS1 == LastLogTS0 -> + if LastOp == sync -> ok; true -> file_ctrl_filesync_async(FileCtrlPid) end, {ok,TRef} = timer:apply_after(FSyncInt, gen_server,cast, - [self(),{repeated_filesync,LastLogTS1}]), - State#{rep_sync_tref => TRef}; + [self(),repeated_filesync]), + State#{rep_sync_tref => TRef, last_op => sync}; true -> State end, @@ -600,6 +596,7 @@ write(Name, Mode, T1, Bin, _CallOrCast, State1#{mode => Mode1, last_qlen := LastQLen1, last_log_ts => T1, + last_op => write, burst_win_ts => BurstWinT, burst_msg_count => BurstMsgCount1, file_ctrl_sync => diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile index 8599a3d814..2ad1e3107c 100644 --- a/lib/kernel/test/Makefile +++ b/lib/kernel/test/Makefile @@ -77,8 +77,9 @@ MODULES= \ logger_filters_SUITE \ logger_formatter_SUITE \ logger_legacy_SUITE \ - logger_simple_SUITE \ + logger_simple_h_SUITE \ logger_std_h_SUITE \ + logger_test_lib \ os_SUITE \ pg2_SUITE \ seq_trace_SUITE \ diff --git a/lib/kernel/test/error_logger_warn_SUITE.erl b/lib/kernel/test/error_logger_warn_SUITE.erl index a8087e11f9..ef55a2d339 100644 --- a/lib/kernel/test/error_logger_warn_SUITE.erl +++ b/lib/kernel/test/error_logger_warn_SUITE.erl @@ -480,9 +480,12 @@ rb_utc() -> UtcLog=case application:get_env(sasl,utc_log) of {ok,true} -> true; - _AllOthers -> + {ok,false} -> application:set_env(sasl,utc_log,true), - false + false; + undefined -> + application:set_env(sasl,utc_log,true), + undefined end, application:start(sasl), rb:start([{report_dir, rd()}]), @@ -494,7 +497,12 @@ rb_utc() -> Sum=one_rb_findstr([],"UTC"), rb:stop(), application:stop(sasl), - application:set_env(sasl,utc_log,UtcLog), + case UtcLog of + undefined -> + application:unset_env(sasl,utc_log); + _ -> + application:set_env(sasl,utc_log,UtcLog) + end, stop_node(Node), ok. diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover index b30bcfe920..960bc0abff 100644 --- a/lib/kernel/test/logger.cover +++ b/lib/kernel/test/logger.cover @@ -8,7 +8,7 @@ logger_filters, logger_formatter, logger_server, - logger_simple, + logger_simple_h, logger_std_h, logger_sup]}. diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec index cd76a754a4..1ab90b3e93 100644 --- a/lib/kernel/test/logger.spec +++ b/lib/kernel/test/logger.spec @@ -7,5 +7,5 @@ logger_filters_SUITE, logger_formatter_SUITE, logger_legacy_SUITE, - logger_simple_SUITE, + logger_simple_h_SUITE, logger_std_h_SUITE]}. diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl index f311a9c7ed..f7ec59a7b7 100644 --- a/lib/kernel/test/logger_SUITE.erl +++ b/lib/kernel/test/logger_SUITE.erl @@ -40,18 +40,18 @@ suite() -> [{timetrap,{seconds,30}}]. init_per_suite(Config) -> - case logger:get_handler_config(logger_std_h) of + case logger:get_handler_config(?STANDARD_HANDLER) of {ok,StdH} -> - ok = logger:remove_handler(logger_std_h), - [{logger_std_h,StdH}|Config]; + ok = logger:remove_handler(?STANDARD_HANDLER), + [{default_handler,StdH}|Config]; _ -> Config end. end_per_suite(Config) -> - case ?config(logger_std_h,Config) of + case ?config(default_handler,Config) of {HMod,HConfig} -> - ok = logger:add_handler(logger_std_h,HMod,HConfig); + ok = logger:add_handler(?STANDARD_HANDLER,HMod,HConfig); _ -> ok end. @@ -105,12 +105,12 @@ start_stop(_Config) -> add_remove_handler(_Config) -> register(callback_receiver,self()), - {ok,#{handlers:=Hs0}} = logger:get_logger_config(), + #{handlers:=Hs0} = logger:i(), {error,{not_found,h1}} = logger:get_handler_config(h1), ok = logger:add_handler(h1,?MODULE,#{}), [add] = test_server:messages_get(), - {ok,#{handlers:=Hs}} = logger:get_logger_config(), - [h1|Hs0] = Hs, + #{handlers:=Hs} = logger:i(), + {value,_,Hs0} = lists:keytake(h1,1,Hs), {ok,{?MODULE,#{level:=info,filters:=[],filter_default:=log}}} = % defaults logger:get_handler_config(h1), ok = logger:set_handler_config(h1,filter_default,stop), @@ -124,7 +124,7 @@ add_remove_handler(_Config) -> ok = check_logged(info,"hello",[],?MY_LOC(1)), ok = logger:remove_handler(h1), [remove] = test_server:messages_get(), - {ok,#{handlers:=Hs0}} = logger:get_logger_config(), + #{handlers:=Hs0} = logger:i(), {error,{not_found,h1}} = logger:get_handler_config(h1), {error,{not_found,h1}} = logger:remove_handler(h1), logger:info("hello",[]), @@ -218,33 +218,52 @@ change_config(_Config) -> {ok,{?MODULE,#{level:=info,filter_default:=stop}=C2}} = logger:get_handler_config(h1), false = maps:is_key(custom,C2), - {error,fail} = logger:set_handler_config(h1,#{fail=>true}), + {error,fail} = logger:set_handler_config(h1,#{conf_call=>fun() -> {error,fail} end}), {error,{attempting_syncronous_call_to_self,_}} = logger:set_handler_config( - h1,#{call=>fun() -> logger:set_module_level(?MODULE,debug) end}), + h1,#{conf_call=>fun() -> logger:set_handler_config(?MODULE,#{}) end}), + ok = + logger:set_handler_config( + h1,#{conf_call=>fun() -> logger:set_module_level(?MODULE,debug) end}), {ok,{?MODULE,C2}} = logger:get_handler_config(h1), - %% Change one key only - {error,fail} = logger:set_handler_config(h1,fail,true), + %% Change handler config: Single key + {error,fail} = logger:set_handler_config(h1,conf_call,fun() -> {error,fail} end), ok = logger:set_handler_config(h1,custom,custom), [changing_config] = test_server:messages_get(), {ok,{?MODULE,#{custom:=custom}=C3}} = logger:get_handler_config(h1), C2 = maps:remove(custom,C3), + %% Change handler config: Map + ok = logger:update_handler_config(h1,#{custom=>new_custom}), + [changing_config] = test_server:messages_get(), + {ok,{_,C4}} = logger:get_handler_config(h1), + C4 = C3#{custom:=new_custom}, + + %% Change logger config: Single key + {ok,LConfig0} = logger:get_logger_config(), + ok = logger:set_logger_config(level,warning), + {ok,LConfig1} = logger:get_logger_config(), + LConfig1 = LConfig0#{level:=warning}, + + %% Change logger config: Map + ok = logger:update_logger_config(#{level=>error}), + {ok,LConfig2} = logger:get_logger_config(), + LConfig2 = LConfig1#{level:=error}, + %% Overwrite logger config - check that defaults are added - {ok,LConfig} = logger:get_logger_config(), ok = logger:set_logger_config(#{filter_default=>stop}), - {ok,#{level:=info,filters:=[],handlers:=[],filter_default:=stop}=LC1} = - logger:get_logger_config(), - 4 = maps:size(LC1), - - %% Change one key only - ok = logger:set_logger_config(handlers,[h1]), - {ok,#{level:=info,filters:=[],handlers:=[h1],filter_default:=stop}} = + {ok,#{level:=info,filters:=[],filter_default:=stop}=LC1} = logger:get_logger_config(), + 3 = maps:size(LC1), + %% Check that internal 'handlers' field has not been changed + #{handlers:=HCs} = logger:i(), + HIds1 = [Id || {Id,_,_} <- HCs], + {ok,#{handlers:=HIds2}} = logger_config:get(?LOGGER_TABLE,logger), + HIds1 = lists:sort(HIds2), %% Cleanup - ok = logger:set_logger_config(LConfig), + ok = logger:set_logger_config(LConfig0), [] = test_server:messages_get(), ok. @@ -299,7 +318,7 @@ macros(_Config) -> macros(cleanup,_Config) -> logger:remove_handler(h1), - logger:reset_module_level(?MODULE), + logger:unset_module_level(?MODULE), ok. set_level(_Config) -> @@ -331,29 +350,29 @@ set_level_module(_Config) -> logger:info(M2=?map_rep,?MY_LOC(0)), ok = check_logged(info,M2,?MY_LOC(1)), - {error,{not_a_module,{bad}}} = logger:reset_module_level({bad}), - ok = logger:reset_module_level(?MODULE), + {error,{not_a_module,{bad}}} = logger:unset_module_level({bad}), + ok = logger:unset_module_level(?MODULE), ok. set_level_module(cleanup,_Config) -> logger:remove_handler(h1), - logger:reset_module_level(?MODULE), + logger:unset_module_level(?MODULE), ok. cache_level_module(_Config) -> - ok = logger:reset_module_level(?MODULE), + ok = logger:unset_module_level(?MODULE), [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? ?LOG_INFO(?map_rep), %% Caching is done asynchronously, so wait a bit for the update timer:sleep(100), [_] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? - ok = logger:reset_module_level(?MODULE), + ok = logger:unset_module_level(?MODULE), [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? ok. cache_level_module(cleanup,_Config) -> - logger:reset_module_level(?MODULE), + logger:unset_module_level(?MODULE), ok. format_report(_Config) -> @@ -425,6 +444,7 @@ filter_failed(cleanup,_Config) -> ok. handler_failed(_Config) -> + register(callback_receiver,self()), {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}), {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}), {error,{invalid_handler_config,bad}} = logger:add_handler(h1,?MODULE,bad), @@ -434,26 +454,62 @@ handler_failed(_Config) -> logger:add_handler(h1,?MODULE,#{filter_default=>true}), {error,{invalid_formatter,[]}} = logger:add_handler(h1,?MODULE,#{formatter=>[]}), - ok = logger:add_handler(h1,nomodule,#{filter_default=>log}), + {error,{invalid_handler,_}} = logger:add_handler(h1,nomodule,#{filter_default=>log}), logger:info(?map_rep), check_no_log(), - #{logger:=#{handlers:=Ids1}, - handlers:=H1} = logger:i(), - false = lists:member(h1,Ids1), + #{handlers:=H1} = logger:i(), false = lists:keymember(h1,1,H1), {error,{not_found,h1}} = logger:remove_handler(h1), - ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,crash=>true}), + ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,log_call=>fun() -> a = b end}), {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}), + [add] = test_server:messages_get(), logger:info(?map_rep), - check_no_log(), - #{logger:=#{handlers:=Ids2}, - handlers:=H2} = logger:i(), - false = lists:member(h2,Ids2), + [remove] = test_server:messages_get(), + #{handlers:=H2} = logger:i(), false = lists:keymember(h2,1,H2), {error,{not_found,h2}} = logger:remove_handler(h2), + CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end, + CrashHandler = fun() -> a = b end, + KillHandler = fun() -> exit(self(), die) end, + + {error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} = + logger:add_handler(h1,?MODULE,#{add_call=>CallAddHandler}), + {error,{handler_not_added,{callback_crashed,_}}} = + logger:add_handler(h1,?MODULE,#{add_call=>CrashHandler}), + {error,{handler_not_added,{logger_process_exited,_,die}}} = + logger:add_handler(h1,?MODULE,#{add_call=>KillHandler}), + + check_no_log(), + ok = logger:add_handler(h1,?MODULE,#{}), + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config(h1,#{conf_call=>CallAddHandler}), + {error,{callback_crashed,_}} = + logger:set_handler_config(h1,#{conf_call=>CrashHandler}), + {error,{logger_process_exited,_,die}} = + logger:set_handler_config(h1,#{conf_call=>KillHandler}), + + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config(h1,conf_call,CallAddHandler), + {error,{callback_crashed,_}} = + logger:set_handler_config(h1,conf_call,CrashHandler), + {error,{logger_process_exited,_,die}} = + logger:set_handler_config(h1,conf_call,KillHandler), + + ok = logger:remove_handler(h1), + [add,remove] = test_server:messages_get(), + + check_no_log(), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>CallAddHandler}), + ok = logger:remove_handler(h1), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>CrashHandler}), + ok = logger:remove_handler(h1), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>KillHandler}), + ok = logger:remove_handler(h1), + [add,add,add] = test_server:messages_get(), + ok. handler_failed(cleanup,_Config) -> @@ -466,10 +522,6 @@ config_sanity_check(_Config) -> {error,{invalid_filter_default,bad}} = logger:set_logger_config(filter_default,bad), {error,{invalid_level,bad}} = logger:set_logger_config(level,bad), - {error,{invalid_handlers,bad}} = logger:set_logger_config(handlers,bad), - {error,{invalid_id,{bad,bad}}} = - logger:set_logger_config(handlers,[{bad,bad}]), - {error,{invalid_id,"bad"}} = logger:set_logger_config(handlers,["bad"]), {error,{invalid_filters,bad}} = logger:set_logger_config(filters,bad), {error,{invalid_filter,bad}} = logger:set_logger_config(filters,[bad]), {error,{invalid_filter,{_,_}}} = @@ -499,29 +551,96 @@ config_sanity_check(_Config) -> logger:set_handler_config(h1,formatter,bad), {error,{invalid_module,{bad}}} = logger:set_handler_config(h1,formatter,{{bad},cfg}), - {error,{invalid_formatter_config,bad}} = + {error,{invalid_formatter_config,logger_formatter,bad}} = logger:set_handler_config(h1,formatter,{logger_formatter,bad}), - {error,{invalid_formatter_config,{bad,bad}}} = + {error,{invalid_formatter_config,logger_formatter,{bad,bad}}} = logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}), - {error,{invalid_formatter_config,{template,bad}}} = + {error,{invalid_formatter_config,logger_formatter,{template,bad}}} = logger:set_handler_config(h1,formatter,{logger_formatter, #{template=>bad}}), - {error,{invalid_formatter_template,[1]}} = + {error,{invalid_formatter_template,logger_formatter,[1]}} = logger:set_handler_config(h1,formatter,{logger_formatter, #{template=>[1]}}), ok = logger:set_handler_config(h1,formatter,{logger_formatter, #{template=>[]}}), - {error,{invalid_formatter_config,{single_line,bad}}} = + {error,{invalid_formatter_config,logger_formatter,{single_line,bad}}} = logger:set_handler_config(h1,formatter,{logger_formatter, #{single_line=>bad}}), ok = logger:set_handler_config(h1,formatter,{logger_formatter, #{single_line=>true}}), - {error,{invalid_formatter_config,{legacy_header,bad}}} = + {error,{invalid_formatter_config,logger_formatter,{legacy_header,bad}}} = logger:set_handler_config(h1,formatter,{logger_formatter, #{legacy_header=>bad}}), ok = logger:set_handler_config(h1,formatter,{logger_formatter, #{legacy_header=>true}}), + {error,{invalid_formatter_config,logger_formatter,{report_cb,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{report_cb=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{report_cb=>fun(R) -> + {"~p",[R]} + end}}), + {error,{invalid_formatter_config,logger_formatter,{chars_limit,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>4}}), + {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>4}}), + {error,{invalid_formatter_config,logger_formatter,{max_size,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>4}}), + ok = logger:set_handler_config(h1,formatter,{module,config}), + {error,{callback_crashed,{error,{badmatch,3},[{?MODULE,check_config,1,_}]}}} = + logger:set_handler_config(h1,formatter,{?MODULE,crash}), ok = logger:set_handler_config(h1,custom,custom), + + %% Old utc parameter is no longer allowed (replaced by time_offset) + {error,{invalid_formatter_config,logger_formatter,{utc,true}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{utc=>true}}), + {error,{invalid_formatter_config,logger_formatter,{time_offset,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>0}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>""}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"Z"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"z"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"-0:0"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"+10:13"}}), + + {error,{invalid_formatter_config,logger_formatter,{time_offset,"+0"}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"+0"}}), + + {error,{invalid_formatter_config,logger_formatter,{time_designator,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>bad}}), + {error,{invalid_formatter_config,logger_formatter,{time_designator,"s"}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>"s"}}), + {error,{invalid_formatter_config,logger_formatter,{time_designator,0}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>0}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>$\s}}), ok. config_sanity_check(cleanup,_Config) -> @@ -650,14 +769,14 @@ process_metadata(_Config) -> undefined = logger:get_process_metadata(), {error,badarg} = ?TRY(logger:set_process_metadata(bad)), ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), - Time = erlang:monotonic_time(microsecond), + Time = erlang:system_time(microsecond), ProcMeta = #{time=>Time,line=>0,custom=>proc}, ok = logger:set_process_metadata(ProcMeta), S1 = ?str, ?LOG_INFO(S1,#{custom=>macro}), check_logged(info,S1,#{time=>Time,line=>0,custom=>macro}), - Time2 = erlang:monotonic_time(microsecond), + Time2 = erlang:system_time(microsecond), S2 = ?str, ?LOG_INFO(S2,#{time=>Time2,line=>1,custom=>macro}), check_logged(info,S2,#{time=>Time2,line=>1,custom=>macro}), @@ -720,17 +839,20 @@ check_maps(Expected,Got,What) -> end. %% Handler -adding_handler(_Id,Config) -> +adding_handler(#{add_call:=Fun}) -> + Fun(); +adding_handler(Config) -> maybe_send(add), {ok,Config}. -removing_handler(_Id,_Config) -> + +removing_handler(#{rem_call:=Fun}) -> + Fun(); +removing_handler(_Config) -> maybe_send(remove), ok. -changing_config(_Id,_Old,#{call:=Fun}) -> +changing_config(_Old,#{conf_call:=Fun}) -> Fun(); -changing_config(_Id,_Old,#{fail:=true}) -> - {error,fail}; -changing_config(_Id,_Old,Config) -> +changing_config(_Old,Config) -> maybe_send(changing_config), {ok,Config}. @@ -740,8 +862,8 @@ maybe_send(Msg) -> Pid -> Pid ! Msg end. -log(_Log,#{crash:=true}) -> - a=b; +log(_Log,#{log_call:=Fun}) -> + Fun(); log(Log,Config) -> TcProc = maps:get(tc_proc,Config,self()), TcProc ! {Log,Config}, @@ -829,3 +951,8 @@ test_macros(emergency=Level) -> %%% Called by macro ?TRY(X) my_try(Fun) -> try Fun() catch C:R -> {C,R} end. + +check_config(crash) -> + erlang:error({badmatch,3}); +check_config(_) -> + ok. diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl index 63e5b56021..7a1736c814 100644 --- a/lib/kernel/test/logger_disk_log_h_SUITE.erl +++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl @@ -31,7 +31,8 @@ end). suite() -> - [{timetrap,{seconds,30}}]. + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. init_per_suite(Config) -> timer:start(), % to avoid progress report @@ -327,36 +328,38 @@ formatter_fail(Config) -> logger:add_handler(Name, logger_disk_log_h, HConfig), Pid = whereis(Name), true = is_pid(Pid), - {ok,#{handlers:=H}} = logger:get_logger_config(), + #{handlers:=HC1} = logger:i(), + H = [Id || {Id,_,_} <- HC1], true = lists:member(Name,H), %% Formatter is added automatically {ok,{_,#{formatter:={logger_formatter,_}}}} = logger:get_handler_config(Name), logger:info(M1=?msg,?domain), - Got1 = try_match_file(?log_no(LogFile,1),"=INFO REPORT====.*\n"++M1,5000), + Got1 = try_match_file(?log_no(LogFile,1),"[0-9\\+\\-T:\\.]* info: "++M1,5000), ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}), logger:info(M2=?msg,?domain), Got2 = try_match_file(?log_no(LogFile,1), - Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2, + escape(Got1)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M2, 5000), ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}), logger:info(M3=?msg,?domain), Got3 = try_match_file(?log_no(LogFile,1), - Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3, + escape(Got2)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M3, 5000), ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}), logger:info(?msg,?domain), try_match_file(?log_no(LogFile,1), - Got3++"FORMATTER ERROR: bad_return_value", + escape(Got3)++"FORMATTER ERROR: bad_return_value", 5000), %% Check that handler is still alive and was never dead Pid = whereis(Name), - {ok,#{handlers:=H}} = logger:get_logger_config(), + #{handlers:=HC2} = logger:i(), + H = [Id || {Id,_,_} <- HC2], ok. formatter_fail(cleanup,_Config) -> @@ -369,10 +372,18 @@ config_fail(_Config) -> #{logger_disk_log_h => #{bad => bad}, filter_default=>log, formatter=>{?MODULE,self()}}), - {error,{handler_not_added,{invalid_levels,{42,42,_}}}} = + + {error,{handler_not_added,{invalid_levels,{_,1,_}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{logger_disk_log_h => #{drop_new_reqs_qlen=>1}}), + {error,{handler_not_added,{invalid_levels,{43,42,_}}}} = logger:add_handler(?MODULE,logger_disk_log_h, - #{logger_disk_log_h => #{toggle_sync_qlen=>42, + #{logger_disk_log_h => #{toggle_sync_qlen=>43, drop_new_reqs_qlen=>42}}), + {error,{handler_not_added,{invalid_levels,{_,43,42}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{logger_disk_log_h => #{drop_new_reqs_qlen=>43, + flush_reqs_qlen=>42}}), ok = logger:add_handler(?MODULE,logger_disk_log_h, #{filter_default=>log, @@ -717,7 +728,7 @@ write_failure(Config) -> Log = lists:concat([File,".1"]), ct:pal("Log = ~p", [Log]), - Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File), + Node = start_h_on_new_node(Config, File), false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), @@ -761,7 +772,7 @@ sync_failure(Config) -> File = filename:join(Dir, FileName), - Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File), + Node = start_h_on_new_node(Config, File), false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), @@ -801,21 +812,12 @@ sync_failure(cleanup, _Config) -> Nodes = nodes(), [test_server:stop_node(Node) || Node <- Nodes]. -start_h_on_new_node(_Config, Func, File) -> - Pa = filename:dirname(code:which(?MODULE)), - Dest = - case os:type() of - {win32,_} -> - lists:concat([" {disk_log,\\\"",File,"\\\"}"]); - _ -> - lists:concat([" \'{disk_log,\"",File,"\"}\'"]) - end, - Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]), - NodeName = lists:concat([?MODULE,"_",Func]), - ct:pal("Starting ~s with ~tp", [NodeName,Args]), - {ok,Node} = test_server:start_node(NodeName, peer, [{args, Args}]), - Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), - true = is_pid(Pid), +start_h_on_new_node(Config, File) -> + {ok,_,Node} = + logger_test_lib:setup( + Config, + [{logger,[{handler,default,logger_disk_log_h, + #{ disk_log_opts => #{ file => File }}}]}]), ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, {?MODULE,nl}]), Node. @@ -848,62 +850,115 @@ internal_log(Type, Term) -> op_switch_to_sync(Config) -> {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 500, NewHConfig = - HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 3, - drop_new_reqs_qlen => 501, - flush_reqs_qlen => 2000, + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => NumOfReqs+1, + flush_reqs_qlen => 2*NumOfReqs, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 500, send_burst({n,NumOfReqs}, seq, {chars,79}, info), - NumOfReqs = count_lines(Log), - ok = file:delete(Log). + Lines = count_lines(Log), + ok = file:delete(Log), + NumOfReqs = Lines, + ok. op_switch_to_sync(cleanup, _Config) -> ok = stop_handler(?MODULE). +op_switch_to_drop() -> + [{timetrap,{seconds,180}}]. op_switch_to_drop(Config) -> - {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - - NewHConfig = - HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 3, - flush_reqs_qlen => 600, - enable_burst_limit => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 500, - send_burst({n,NumOfReqs}, seq, {chars,79}, info), - Logged = count_lines(Log), - ct:pal("Number of messages dropped = ~w (~w)", - [NumOfReqs-Logged,NumOfReqs]), - true = (Logged < NumOfReqs), - ok = file:delete(Log). + Test = + fun() -> + {Log,HConfig,DLHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 300, + Procs = 2, + Bursts = 10, + NewHConfig = + HConfig#{logger_disk_log_h => + DLHConfig#{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => Procs*NumOfReqs*Bursts, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that dropping + %% never occurs. Therefore, lets generate a number of + %% bursts to increase the chance of message buildup. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) || + _ <- lists:seq(1, Bursts)], + Logged = count_lines(Log), + ok= stop_handler(?MODULE), + _ = file:delete(Log), + ct:pal("Number of messages dropped = ~w (~w)", + [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), + true = (Logged < (Procs*NumOfReqs*Bursts)), + true = (Logged > 0), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. op_switch_to_drop(cleanup, _Config) -> - ok = stop_handler(?MODULE). + _ = stop_handler(?MODULE). op_switch_to_flush() -> [{timetrap,{minutes,3}}]. op_switch_to_flush(Config) -> - {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - - %% it's important that both async and sync requests have been queued - %% when the flush happens (verify with coverage of flush_log_requests/2) + Test = + fun() -> + {Log,HConfig,DLHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% NOTE: it's important that both async and sync + %% requests have been queued when the flush happens + %% (verify with coverage of flush_log_requests/2) - NewHConfig = - HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 99, - flush_reqs_qlen => 100, - enable_burst_limit => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 1000, - Procs = 500, - send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), - Logged = count_lines(Log), - ct:pal("Number of messages flushed/dropped = ~w (~w)", - [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]), - true = (Logged < (NumOfReqs*Procs)), - ok = file:delete(Log). + NewHConfig = + HConfig#{logger_disk_log_h => + DLHConfig#{toggle_sync_qlen => 2, + %% disable drop mode + drop_new_reqs_qlen => 300, + flush_reqs_qlen => 300, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1500, + Procs = 10, + Bursts = 10, + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that flushing + %% never occurs, or it gets all messages at once, + %% causing all messages to get flushed (no dropping of + %% sync messages gets tested). Therefore, lets + %% generate a number of bursts to increase the chance + %% of message buildup in some random fashion. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) || + _ <- lists:seq(1,Bursts)], + Logged = count_lines(Log), + ok= stop_handler(?MODULE), + _ = file:delete(Log), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), + true = (Logged < (NumOfReqs*Procs*Bursts)), + true = (Logged > 0), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. op_switch_to_flush(cleanup, _Config) -> - ok = stop_handler(?MODULE). + _ = stop_handler(?MODULE). limit_burst_disabled(Config) -> @@ -987,7 +1042,7 @@ qlen_kill_new(Config) -> {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(?MODULE), {_,Mem0} = process_info(Pid0, memory), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig = HConfig#{logger_disk_log_h => DLHConfig#{enable_kill_overloaded=>true, @@ -1008,7 +1063,7 @@ qlen_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter + 1000), + timer:sleep(RestartAfter + 2000), true = is_pid(whereis(?MODULE)), ok after @@ -1024,7 +1079,7 @@ mem_kill_new(Config) -> {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(?MODULE), {_,Mem0} = process_info(Pid0, memory), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig = HConfig#{logger_disk_log_h => DLHConfig#{enable_kill_overloaded=>true, @@ -1045,7 +1100,7 @@ mem_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter * 2), + timer:sleep(RestartAfter + 2000), true = is_pid(whereis(?MODULE)), ok after @@ -1078,7 +1133,7 @@ restart_after(Config) -> end, {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig2 = HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true, handler_overloaded_qlen=>10, @@ -1090,7 +1145,7 @@ restart_after(Config) -> send_burst({n,100}, {spawn,2,0}, {chars,79}, info), receive {'DOWN', MRef2, _, _, _Info2} -> - timer:sleep(RestartAfter + 1000), + timer:sleep(RestartAfter + 2000), Pid1 = whereis(?MODULE), true = is_pid(Pid1), false = (Pid1 == Pid0), @@ -1361,6 +1416,29 @@ count_lines1(File) -> file:close(Dev), Lines. +repeat_until_ok(Fun, N) -> + repeat_until_ok(Fun, 0, N, undefined). + +repeat_until_ok(_Fun, Stop, Stop, Reason) -> + {fails,Reason}; + +repeat_until_ok(Fun, C, Stop, FirstReason) -> + if C > 0 -> timer:sleep(5000); + true -> ok + end, + try Fun() of + Result -> + {ok,{C,Result}} + catch + _:Reason:Stack -> + ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]), + if FirstReason == undefined -> + repeat_until_ok(Fun, C+1, Stop, {Reason,Stack}); + true -> + repeat_until_ok(Fun, C+1, Stop, FirstReason) + end + end. + start_tracer(Trace,Expected) -> Pid = self(), dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), @@ -1382,7 +1460,8 @@ tpl([{M,F,A}|Trace]) -> tpl([]) -> ok. -tracer({trace,_,call,{logger_disk_log_h,handle_cast,[{Op,_}|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) -> +tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]}}, + {Pid,[{Mod,Func,Op}|Expected]}) -> maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]}}, {Pid,[{Mod,Func,Data}|Expected]}) -> maybe_tracer_done(Pid,Expected,{Mod,Func,Data}); @@ -1412,3 +1491,10 @@ check_tracer(T) -> dbg:stop_clear(), ct:fail({timeout,tracer}) end. + +escape([$+|Rest]) -> + [$\\,$+|escape(Rest)]; +escape([H|T]) -> + [H|escape(T)]; +escape([]) -> + []. diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl index c2d3364701..601d331fb0 100644 --- a/lib/kernel/test/logger_env_var_SUITE.erl +++ b/lib/kernel/test/logger_env_var_SUITE.erl @@ -1,4 +1,4 @@ -% +%% %% %CopyrightBegin% %% %% Copyright Ericsson AB 2018. All Rights Reserved. @@ -21,431 +21,636 @@ -compile(export_all). --include_lib("common_test/include/ct.hrl"). -include_lib("kernel/include/logger.hrl"). -include_lib("kernel/src/logger_internal.hrl"). --define(all_vars,[{kernel,logger_dest}, - {kernel,logger_level}, - {kernel,logger_log_progress}, - {kernel,logger_sasl_compatible}, - {kernel,error_logger}]). +-import(logger_test_lib,[setup/2,log/3,sync_and_read/3]). suite() -> - [{timetrap,{seconds,30}}]. + [{timetrap,{seconds,60}}, + {ct_hooks,[logger_test_lib]}]. init_per_suite(Config) -> - Env = [{App,Key,application:get_env(App,Key)} || {App,Key} <- ?all_vars], - Removed = cleanup(), - [{env,Env},{logger,Removed}|Config]. - -end_per_suite(Config) -> - [application:set_env(App,Key,Val) || - {App,Key,Val} <- ?config(env,Config), - Val =/= undefined], - Hs = ?config(logger,Config), - [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], - ok. - -init_per_group(_Group, Config) -> - Config. - -end_per_group(_Group, _Config) -> - ok. - -init_per_testcase(_TestCase, Config) -> Config. -end_per_testcase(Case, Config) -> - try apply(?MODULE,Case,[cleanup,Config]) - catch error:undef -> ok - end, - cleanup(), +end_per_suite(_Config) -> ok. groups() -> - []. - -all() -> + [{error_logger,[],[error_logger_tty, + error_logger_tty_sasl_compatible, + error_logger_false, + error_logger_false_progress, + error_logger_false_sasl_compatible, + error_logger_silent, + error_logger_silent_sasl_compatible, + error_logger_file]}, + {logger,[],[logger_file, + logger_file_sasl_compatible, + logger_file_log_progress, + logger_file_no_filter, + logger_file_no_filter_level, + logger_file_formatter, + logger_filters, + logger_filters_stop, + logger_module_level, + logger_disk_log, + logger_disk_log_formatter, + logger_undefined, + logger_many_handlers_default_first, + logger_many_handlers_default_last, + logger_many_handlers_default_last_broken_filter + ]}, + {bad,[],[bad_error_logger, + bad_level, + bad_sasl_compatibility, + bad_progress]}]. + +all() -> [default, default_sasl_compatible, - dest_tty, - dest_tty_sasl_compatible, - dest_false, - dest_false_progress, - dest_false_sasl_compatible, - dest_silent, - dest_silent_sasl_compatible, - dest_file_old, - dest_file, - dest_disk_log, - %% disk_log_vars, % or test this in logger_disk_log_SUITE? sasl_compatible_false, sasl_compatible_false_no_progress, sasl_compatible, - bad_dest%% , - %% bad_level, - %% bad_sasl_compatibility, - %% bad_progress + {group,bad}, + {group,error_logger}, + {group,logger} ]. default(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - undefined, - undefined, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), - true = is_pid(whereis(logger_std_h)), + {ok,#{handlers:=Hs},_Node} = setup(Config,[]), + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), info = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), - {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), true = lists:keymember(stop_progress,1,StdFilters), - false = lists:keymember(logger_simple,1,Hs), - false = lists:keymember(sasl_h,1,Hs), - false = is_pid(whereis(sasl_h)), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. default_sasl_compatible(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - undefined, - undefined, % dest - undefined, % level - true, % sasl comp (default=false) - undefined), % progress (default=false) - {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), - true = is_pid(whereis(logger_std_h)), + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{logger_sasl_compatible,true}]), + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), info = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), - {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + {domain,{_,{log,super,[beam,erlang,otp]}}} = lists:keyfind(domain,1,StdFilters), false = lists:keymember(stop_progress,1,StdFilters), - false = lists:keymember(logger_simple,1,Hs), - true = lists:keymember(sasl_h,1,Hs), - true = is_pid(whereis(sasl_h)), + false = lists:keymember(simple,1,Hs), + true = lists:keymember(sasl,1,Hs), ok. -dest_tty(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - tty, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), - true = is_pid(whereis(logger_std_h)), +error_logger_tty(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config,[{error_logger,tty}]), + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), info = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), - {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), true = lists:keymember(stop_progress,1,StdFilters), - false = lists:keymember(logger_simple,1,Hs), - false = lists:keymember(sasl_h,1,Hs), - false = is_pid(whereis(sasl_h)), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. -dest_tty_sasl_compatible(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - tty, % dest - undefined, % level - true, % sasl comp (default=false) - undefined), % progress (default=false) - {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), - true = is_pid(whereis(logger_std_h)), +error_logger_tty_sasl_compatible(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,tty}, + {logger_sasl_compatible,true}]), + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), info = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), - {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + {domain,{_,{log,super,[beam,erlang,otp]}}} = lists:keyfind(domain,1,StdFilters), false = lists:keymember(stop_progress,1,StdFilters), - false = lists:keymember(logger_simple,1,Hs), - true = lists:keymember(sasl_h,1,Hs), - true = is_pid(whereis(sasl_h)), + false = lists:keymember(simple,1,Hs), + true = lists:keymember(sasl,1,Hs), ok. -dest_false(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - false, % dest - notice, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), - notice = maps:get(level,SimpleC), +error_logger_false(Config) -> + {ok,#{handlers:=Hs,logger:=L},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs), + info = maps:get(level,SimpleC), + notice = maps:get(level,L), SimpleFilters = maps:get(filters,SimpleC), - {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), true = lists:keymember(stop_progress,1,SimpleFilters), - false = lists:keymember(sasl_h,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. -dest_false_progress(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - false, % dest - notice, % level - undefined, % sasl comp (default=false) - true), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), - notice = maps:get(level,SimpleC), +error_logger_false_progress(Config) -> + {ok,#{handlers:=Hs,logger:=L},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}, + {logger_progress_reports,log}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs), + info = maps:get(level,SimpleC), + notice = maps:get(level,L), SimpleFilters = maps:get(filters,SimpleC), - {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), false = lists:keymember(stop_progress,1,SimpleFilters), - false = lists:keymember(sasl_h,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. -dest_false_sasl_compatible(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - false, % dest - notice, % level - true, % sasl comp (default=false) - undefined), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), - notice = maps:get(level,SimpleC), +error_logger_false_sasl_compatible(Config) -> + {ok,#{handlers:=Hs,logger:=L},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}, + {logger_sasl_compatible,true}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs), + info = maps:get(level,SimpleC), + notice = maps:get(level,L), SimpleFilters = maps:get(filters,SimpleC), - {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + {domain,{_,{log,super,[beam,erlang,otp]}}} = lists:keyfind(domain,1,SimpleFilters), false = lists:keymember(stop_progress,1,SimpleFilters), - true = lists:keymember(sasl_h,1,Hs), - true = is_pid(whereis(sasl_h)), + true = lists:keymember(sasl,1,Hs), ok. -dest_silent(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - silent, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - false = lists:keymember(logger_simple,1,Hs), - false = lists:keymember(sasl_h,1,Hs), +error_logger_silent(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,silent}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), ok. -dest_silent_sasl_compatible(Config) -> - {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - silent, % dest - undefined, % level - true, % sasl comp (default=false) - undefined), % progress (default=false) - false = lists:keymember(logger_std_h,1,Hs), - false = lists:keymember(logger_simple,1,Hs), - true = lists:keymember(sasl_h,1,Hs), - true = is_pid(whereis(sasl_h)), +error_logger_silent_sasl_compatible(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,silent}, + {logger_sasl_compatible,true}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + false = lists:keymember(simple,1,Hs), + true = lists:keymember(sasl,1,Hs), ok. -dest_file_old(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - error_logger, - file, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - check_log(Log, - file, % dest - 0), % progress in std logger +error_logger_file(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_Hs,Node} = setup(Config, + [{error_logger,{file,Log}}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger ok. - - -dest_file(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - file, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - check_log(Log, - file, % dest - 0), % progress in std logger + + +logger_file(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + true = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + ok. - - -dest_disk_log(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - disk_log, % dest - undefined, % level - undefined, % sasl comp (default=false) - undefined), % progress (default=false) - check_log(Log, - disk_log, % dest - 0), % progress in std logger + +logger_file_sasl_compatible(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger_sasl_compatible,true}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + true = lists:keymember(sasl,1,Hs), + + ok. + +logger_file_log_progress(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger_progress_reports,log}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 6),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_file_no_filter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filter_default=>log,filters=>[], + logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 6),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_file_no_filter_level(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[],level=>error, + logger_std_h=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0,% progress in std logger + error),% level + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + error = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_file_formatter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[], + formatter=>{logger_formatter,#{}}, + logger_std_h=>#{type=>{file,Log}}}}]}]), + check_single_log(Node,Log, + file,% dest + 6),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_filters(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,logger:=Logger},Node} + = setup(Config, + [{logger_progress_reports,log}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}, + {filters,log,[{stop_progress,{fun logger_filters:progress/2,stop}}]} + ]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + LoggerFilters = maps:get(filters,Logger), + true = lists:keymember(stop_progress,1,LoggerFilters), + + ok. + +logger_filters_stop(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,logger:=Logger},Node} + = setup(Config, + [{logger_progress_reports,log}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[], + logger_std_h=>#{type=>{file,Log}}}}, + {filters,stop,[{log_error,{fun logger_filters:level/2,{log,gt,info}}}]} + ]}]), + check_default_log(Node,Log, + file,% dest + 0, + notice),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + LoggerFilters = maps:get(filters,Logger), + true = lists:keymember(log_error,1,LoggerFilters), + + ok. + +logger_module_level(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,module_levels:=ModuleLevels},Node} + = setup(Config, + [{logger_progress_reports,log}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{logger_std_h=>#{type=>{file,Log}}}}, + {module_level,error,[supervisor]} + ]}]), + check_default_log(Node,Log, + file,% dest + 3),% progress in std logger + + {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + [{supervisor,error}] = ModuleLevels, + ok. + +logger_disk_log(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_disk_log_h, + #{disk_log_opts=>#{file=>Log}}}]}]), + check_default_log(Node,Log, + disk_log,% dest + 0),% progress in std logger + + {?STANDARD_HANDLER,logger_disk_log_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + true = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_disk_log_formatter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_disk_log_h, + #{filters=>[], + formatter=>{logger_formatter,#{}}, + disk_log_opts=>#{file=>Log}}}]}]), + check_single_log(Node,Log, + disk_log,% dest + 6),% progress in std logger + + {?STANDARD_HANDLER,logger_disk_log_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs), + info = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = lists:keymember(simple,1,Hs), + false = lists:keymember(sasl,1,Hs), + + ok. + +logger_undefined(Config) -> + {ok,#{handlers:=Hs,logger:=L},_Node} = + setup(Config,[{logger,[{handler,?STANDARD_HANDLER,undefined}]}]), + false = lists:keymember(?STANDARD_HANDLER,1,Hs), + {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs), + info = maps:get(level,SimpleC), + info = maps:get(level,L), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,SimpleFilters), + true = lists:keymember(stop_progress,1,SimpleFilters), + false = lists:keymember(sasl,1,Hs), + ok. + + +%% Test that we can add multiple handlers with the default first +logger_many_handlers_default_first(Config) -> + LogErr = file(Config,logger_many_handlers_default_first_error), + LogInfo = file(Config,logger_many_handlers_default_first_info), + + logger_many_handlers( + Config,[{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + logger_std_h=>#{type=>{file,LogErr}}} + }, + {handler,info,logger_std_h, + #{level=>info, + filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}], + logger_std_h=>#{type=>{file,LogInfo}}} + } + ]}], LogErr, LogInfo, 6). + +%% Test that we can add multiple handlers with the default last +logger_many_handlers_default_last(Config) -> + LogErr = file(Config,logger_many_handlers_default_last_error), + LogInfo = file(Config,logger_many_handlers_default_last_info), + logger_many_handlers( + Config,[{logger, + [{handler,info,logger_std_h, + #{level=>info, + filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}], + logger_std_h=>#{type=>{file,LogInfo}}} + }, + {handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + logger_std_h=>#{type=>{file,LogErr}}} + } + ]}], LogErr, LogInfo, 7). + +%% Check that we can handle that an added logger has a broken filter +%% This used to cause a deadlock. +logger_many_handlers_default_last_broken_filter(Config) -> + LogErr = file(Config,logger_many_handlers_default_first_broken_filter_error), + LogInfo = file(Config,logger_many_handlers_default_first_broken_filter_info), + + logger_many_handlers( + Config,[{logger, + [{handler,info,logger_std_h, + #{level=>info, + filters=>[{broken,{fun logger_filters:level/2,broken_state}}, + {level,{fun logger_filters:level/2,{stop,gteq,error}}}], + logger_std_h=>#{type=>{file,LogInfo}}} + }, + {handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + logger_std_h=>#{type=>{file,LogErr}}} + } + ]}], LogErr, LogInfo, 7). + +logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) -> + {ok,#{handlers:=Hs},Node} = setup(Config,Env), + check_single_log(Node,LogErr, + file,% dest + 0,% progress in std logger + error), % level + ok = rpc:call(Node,logger_std_h,filesync,[info]), + {ok, Bin} = file:read_file(LogInfo), + ct:log("Log content:~n~s",[Bin]), + match(Bin,<<"info:">>,NumProgress+1,info,info), + match(Bin,<<"alert:">>,0,alert,info), + ok. - sasl_compatible_false(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - file, % dest - undefined, % level - false, % sasl comp - true), % progress - check_log(Log, - file, % dest - 4), % progress in std logger + Log = file(Config,?FUNCTION_NAME), + {ok,_Hs,Node} = setup(Config, + [{error_logger,{file,Log}}, + {logger_sasl_compatible,false}, + {logger_progress_reports,log}]), + check_default_log(Node,Log, + file,% dest + 6),% progress in std logger ok. sasl_compatible_false_no_progress(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - file, % dest - undefined, % level - false, % sasl comp - false), % progress - check_log(Log, - file, % dest - 0), % progress in std logger + Log = file(Config,?FUNCTION_NAME), + {ok,_Hs,Node} = setup(Config, + [{error_logger,{file,Log}}, + {logger_sasl_compatible,false}, + {logger_progress_reports,stop}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger ok. sasl_compatible(Config) -> - {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, - logger_dest, - file, % dest - undefined, % level - true, % sasl comp - undefined), % progress - check_log(Log, - file, % dest - 0), % progress in std logger + Log = file(Config,?FUNCTION_NAME), + {ok,_Hs,Node} = setup(Config, + [{error_logger,{file,Log}}, + {sasl_compatible,true}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger ok. -bad_dest(Config) -> - {error,{bad_config,{kernel,{logger_dest,baddest}}}} = - setup(Config,?FUNCTION_NAME, - logger_dest, - baddest, - undefined, - undefined, - undefined). +bad_error_logger(Config) -> + error = setup(Config,[{error_logger,baddest}]). bad_level(Config) -> - error = - setup(Config,?FUNCTION_NAME, - logger_dest, - tty, - badlevel, - undefined, - undefined). + error = setup(Config,[{logger_level,badlevel}]). bad_sasl_compatibility(Config) -> - error = - setup(Config,?FUNCTION_NAME, - logger_dest, - tty, - info, - badcomp, - undefined). + error = setup(Config,[{logger_sasl_compatible,badcomp}]). bad_progress(Config) -> - error = - setup(Config,?FUNCTION_NAME, - logger_dest, - tty, - info, - undefined, - badprogress). + error = setup(Config,[{logger_progress_reports,badprogress}]). %%%----------------------------------------------------------------- %%% Internal -setup(Config,Func,DestVar,Dest,Level,SaslComp,Progress) -> - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - Dir = ?config(priv_dir,Config), - File = lists:concat([?MODULE,"_",Func,".log"]), - Log = filename:join(Dir,File), - case Dest of - undefined -> - ok; - F when F==file; F==disk_log -> - application:set_env(kernel,DestVar,{Dest,Log}); - _ -> - application:set_env(kernel,DestVar,Dest) - end, - case Level of - undefined -> - ok; - _ -> - application:set_env(kernel,logger_level,Level) - end, - case SaslComp of - undefined -> - ok; - _ -> - application:set_env(kernel,logger_sasl_compatible,SaslComp) - end, - case Progress of - undefined -> - ok; - _ -> - application:set_env(kernel,logger_log_progress,Progress) - end, - case logger:setup_standard_handler() of - ok -> - application:start(sasl), - StdH = case Dest of - NoH when NoH==false; NoH==silent -> false; - _ -> true - end, - StdH = is_pid(whereis(?STANDARD_HANDLER)), - SaslH = if SaslComp -> true; - true -> false - end, - SaslH = is_pid(whereis(sasl_h)), - {ok,{Log,maps:get(handlers,logger:i())}}; - Error -> - Error - end. +file(Config,Func) -> + filename:join(proplists:get_value(priv_dir,Config), + lists:concat([Func,".log"])). + +check_default_log(Node,Log,Dest,NumProgress) -> + check_default_log(Node,Log,Dest,NumProgress,info). +check_default_log(Node,Log,Dest,NumProgress,Level) -> + + {ok,Bin1,Bin2} = check_log(Node,Log,Dest), + + match(Bin1,<<"PROGRESS REPORT">>,NumProgress,info,Level), + match(Bin1,<<"ALERT REPORT">>,1,alert,Level), + match(Bin1,<<"INFO REPORT">>,0,info,Level), + match(Bin1,<<"DEBUG REPORT">>,0,debug,Level), -check_log(Log,Dest,NumProgress) -> - ok = logger:alert("dummy1"), - ok = logger:debug("dummy1"), + match(Bin2,<<"INFO REPORT">>,1,info,Level), + match(Bin2,<<"DEBUG REPORT">>,0,debug,Level), + ok. + +check_single_log(Node,Log,Dest,NumProgress) -> + check_single_log(Node,Log,Dest,NumProgress,info). +check_single_log(Node,Log,Dest,NumProgress,Level) -> + + {ok,Bin1,Bin2} = check_log(Node,Log,Dest), + + match(Bin1,<<"info:">>,NumProgress,info,Level), + match(Bin1,<<"alert:">>,1,alert,Level), + match(Bin1,<<"debug:">>,0,debug,Level), + + match(Bin2,<<"info:">>,NumProgress+1,info,Level), + match(Bin2,<<"debug:">>,0,debug,Level), + + ok. + +check_log(Node,Log,Dest) -> + + ok = log(Node,alert,["dummy1"]), + ok = log(Node,debug,["dummy1"]), %% Check that there are progress reports (supervisor and %% application_controller) and an error report (the call above) in %% the log. There should not be any info reports yet. - {ok,Bin1} = sync_and_read(Dest,Log), + {ok,Bin1} = sync_and_read(Node,Dest,Log), ct:log("Log content:~n~s",[Bin1]), - match(Bin1,<<"PROGRESS REPORT">>,NumProgress), - match(Bin1,<<"ALERT REPORT">>,1), - match(Bin1,<<"INFO REPORT">>,0), - match(Bin1,<<"DEBUG REPORT">>,0), %% Then stop sasl and see that the info report from %% application_controller is there - ok = application:stop(sasl), - {ok,Bin2} = sync_and_read(Dest,Log), + ok = rpc:call(Node,application,stop,[sasl]), + {ok,Bin2} = sync_and_read(Node,Dest,Log), ct:log("Log content:~n~s",[Bin2]), - match(Bin2,<<"INFO REPORT">>,1), - match(Bin1,<<"DEBUG REPORT">>,0), - ok. + {ok,Bin1,Bin2}. -match(Bin,Pattern,0) -> +match(Bin,Pattern,0,_,_) -> nomatch = re:run(Bin,Pattern,[{capture,none}]); -match(Bin,Pattern,N) -> - {match,M} = re:run(Bin,Pattern,[{capture,all},global]), - N = length(M). - -sync_and_read(disk_log,Log) -> - logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER), - file:read_file(Log ++ ".1"); -sync_and_read(file,Log) -> - logger_std_h:filesync(?STANDARD_HANDLER), - file:read_file(Log). - -cleanup() -> - application:stop(sasl), - [application:unset_env(App,Key) || {App,Key} <- ?all_vars], - #{handlers:=Hs0} = logger:i(), - Hs = lists:keydelete(cth_log_redirect,1,Hs0), - [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], - Hs. +match(Bin,Pattern,N,LogLevel,ConfLevel) -> + case logger:compare_levels(LogLevel,ConfLevel) of + lt -> match(Bin,Pattern,0,LogLevel,ConfLevel); + _ -> + {match,M} = re:run(Bin,Pattern,[{capture,all},global]), + N = length(M) + end. diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl index 21f14bbc02..11cce8fd20 100644 --- a/lib/kernel/test/logger_filters_SUITE.erl +++ b/lib/kernel/test/logger_filters_SUITE.erl @@ -75,66 +75,79 @@ all() -> remote_gl]. domain(_Config) -> - L1 = logger_filters:domain(L1=?dlog([]),{log,prefix_of,[]}), - stop = logger_filters:domain(?dlog([]),{stop,prefix_of,[]}), - L2 = logger_filters:domain(L2=?dlog([]),{log,starts_with,[]}), - stop = logger_filters:domain(?dlog([]),{stop,starts_with,[]}), - L3 = logger_filters:domain(L3=?dlog([]),{log,equals,[]}), - stop = logger_filters:domain(?dlog([]),{stop,equals,[]}), - ignore = logger_filters:domain(?dlog([]),{log,no_domain,[]}), - ignore = logger_filters:domain(?dlog([]),{stop,no_domain,[]}), - - L4 = logger_filters:domain(L4=?dlog([a]),{log,prefix_of,[a,b]}), - stop = logger_filters:domain(?dlog([a]),{stop,prefix_of,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{log,starts_with,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{stop,starts_with,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{log,equals,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{stop,equals,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{log,no_domain,[a,b]}), - ignore = logger_filters:domain(?dlog([a]),{stop,no_domain,[a,b]}), - - ignore = logger_filters:domain(?dlog([a,b]),{log,prefix_of,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{stop,prefix_of,[a]}), - L5 = logger_filters:domain(L5=?dlog([a,b]),{log,starts_with,[a]}), - stop = logger_filters:domain(?dlog([a,b]),{stop,starts_with,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{log,equals,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{stop,equals,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{log,no_domain,[a]}), - ignore = logger_filters:domain(?dlog([a,b]),{stop,no_domain,[a]}), - - ignore = logger_filters:domain(?ndlog,{log,prefix_of,[a]}), - ignore = logger_filters:domain(?ndlog,{stop,prefix_of,[a]}), - ignore = logger_filters:domain(?ndlog,{log,starts_with,[a]}), - ignore = logger_filters:domain(?ndlog,{stop,starts_with,[a]}), - ignore = logger_filters:domain(?ndlog,{log,equals,[a]}), - ignore = logger_filters:domain(?ndlog,{stop,equals,[a]}), - L6 = logger_filters:domain(L6=?ndlog,{log,no_domain,[a]}), - stop = logger_filters:domain(?ndlog,{stop,no_domain,[a]}), - - L7 = logger_filters:domain(L7=?dlog([a,b,c,d]),{log,prefix_of,[a,b,c,d]}), - stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,prefix_of,[a,b,c,d]}), - L8 = logger_filters:domain(L8=?dlog([a,b,c,d]),{log,starts_with,[a,b,c,d]}), - stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,starts_with,[a,b,c,d]}), - L9 = logger_filters:domain(L9=?dlog([a,b,c,d]),{log,equals,[a,b,c,d]}), - stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equals,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,no_domain,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,no_domain,[a,b,c,d]}), + L1 = logger_filters:domain(L1=?dlog([]),{log,super,[]}), + stop = logger_filters:domain(?dlog([]),{stop,super,[]}), + L2 = logger_filters:domain(L2=?dlog([]),{log,sub,[]}), + stop = logger_filters:domain(?dlog([]),{stop,sub,[]}), + L3 = logger_filters:domain(L3=?dlog([]),{log,equal,[]}), + stop = logger_filters:domain(?dlog([]),{stop,equal,[]}), + ignore = logger_filters:domain(?dlog([]),{log,not_equal,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,not_equal,[]}), + ignore = logger_filters:domain(?dlog([]),{log,undefined,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,undefined,[]}), + + L4 = logger_filters:domain(L4=?dlog([a]),{log,super,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,super,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,sub,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,sub,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,equal,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,equal,[a,b]}), + L5 = logger_filters:domain(L5=?dlog([a]),{log,not_equal,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,not_equal,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,undefined,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,undefined,[a,b]}), + + ignore = logger_filters:domain(?dlog([a,b]),{log,super,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,super,[a]}), + L6 = logger_filters:domain(L6=?dlog([a,b]),{log,sub,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,sub,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,equal,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,equal,[a]}), + L7 = logger_filters:domain(L7=?dlog([a,b]),{log,not_equal,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,not_equal,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,undefined,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,undefined,[a]}), + + ignore = logger_filters:domain(?ndlog,{log,super,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,super,[a]}), + ignore = logger_filters:domain(?ndlog,{log,sub,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,sub,[a]}), + ignore = logger_filters:domain(?ndlog,{log,equal,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,equal,[a]}), + L8 = logger_filters:domain(L8=?ndlog,{log,not_equal,[a]}), + stop = logger_filters:domain(?ndlog,{stop,not_equal,[a]}), + L9 = logger_filters:domain(L9=?ndlog,{log,undefined,[a]}), + stop = logger_filters:domain(?ndlog,{stop,undefined,[a]}), + + L10 = logger_filters:domain(L10=?dlog([a,b,c,d]),{log,super,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,super,[a,b,c,d]}), + L11 = logger_filters:domain(L11=?dlog([a,b,c,d]),{log,sub,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,sub,[a,b,c,d]}), + L12 = logger_filters:domain(L12=?dlog([a,b,c,d]),{log,equal,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,undefined,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,undefined,[a,b,c,d]}), %% A domain field in meta which is not a list is allowed by the - %% filter, but it will never match. - ignore = logger_filters:domain(?dlog(dummy),{log,prefix_of,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{stop,prefix_of,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{log,starts_with,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{stop,starts_with,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{log,equals,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{stop,equals,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{log,no_domain,[a,b,c,d]}), - ignore = logger_filters:domain(?dlog(dummy),{stop,no_domain,[a,b,c,d]}), + %% filter, but since MatchDomain is always a list of atoms, only + %% Action=not_equal can ever match. + ignore = logger_filters:domain(?dlog(dummy),{log,super,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,super,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,sub,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,sub,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,equal,[a,b,c,d]}), + L13 = logger_filters:domain(L13=?dlog(dummy),{log,not_equal,[a,b,c,d]}), + stop = logger_filters:domain(?dlog(dummy),{stop,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,undefined,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,undefined,[a,b,c,d]}), {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)), - {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,prefix_of,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,super,[]})), {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})), - {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,prefix_of,bad})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,super,bad})), ok. diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl index 7d1f33746d..7a93f2ca79 100644 --- a/lib/kernel/test/logger_formatter_SUITE.erl +++ b/lib/kernel/test/logger_formatter_SUITE.erl @@ -68,12 +68,13 @@ all() -> level_or_msg_in_meta, faulty_log, faulty_config, - faulty_msg]. + faulty_msg, + update_config]. default(_Config) -> String1 = format(info,{"~p",[term]},#{},#{}), ct:log(String1), - [_Date,_Time,"info:","term\n"] = string:lexemes(String1," "), + [_DateTime,"info:","term\n"] = string:lexemes(String1," "), Time = timestamp(), ExpectedTimestamp = default_time_format(Time), @@ -268,8 +269,8 @@ format_msg(_Config) -> String8 = format(info,{string,['not',printable,list]}, #{report_cb=>fun(_)-> {"formatted",[]} end}, #{template=>Template}), - ct:log(String8), - "INVALID STRING: ['not',printable,list]" = String8, + ct:log("~ts",[String8]), % avoiding ct_log crash + "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String8, String9 = format(info,{string,"string"},#{},#{template=>Template}), ct:log(String9), @@ -297,22 +298,22 @@ max_size(_Config) -> single_line=>false}, "12345678901234567890" = format(info,{"12345678901234567890",[]},#{},Cfg), - application:set_env(kernel,logger_max_size,11), - "12345678901234567890" = % min value is 50, so this is not limited - format(info,{"12345678901234567890",[]},#{},Cfg), - "12345678901234567890123456789012345678901234567..." = % 50 - format(info, - {"123456789012345678901234567890123456789012345678901234567890", - []}, - #{}, - Cfg), - application:set_env(kernel,logger_max_size,53), - "12345678901234567890123456789012345678901234567890..." = %53 - format(info, - {"123456789012345678901234567890123456789012345678901234567890", - []}, - #{}, - Cfg), + %% application:set_env(kernel,logger_max_size,11), + %% "12345678901234567890" = % min value is 50, so this is not limited + %% format(info,{"12345678901234567890",[]},#{},Cfg), + %% "12345678901234567890123456789012345678901234567..." = % 50 + %% format(info, + %% {"123456789012345678901234567890123456789012345678901234567890", + %% []}, + %% #{}, + %% Cfg), + %% application:set_env(kernel,logger_max_size,53), + %% "12345678901234567890123456789012345678901234567890..." = %53 + %% format(info, + %% {"123456789012345678901234567890123456789012345678901234567890", + %% []}, + %% #{}, + %% Cfg), "123456789012..." = format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}), "12345678901234567890" = @@ -341,12 +342,6 @@ depth(_Config) -> {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, #{}, #{template=>Template}), - application:set_env(kernel,logger_format_depth,12), - "[1,2,3,4,5,6,7,8,9,0,1|...]" = - format(info, - {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, - #{}, - #{template=>Template}), "[1,2,3,4,5,6,7,8,9,0,1,2|...]" = format(info, {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, @@ -361,7 +356,7 @@ depth(_Config) -> depth=>unlimited}), ok. depth(cleanup,_Config) -> - application:unset_env(kernel,logger_format_depth), + application:unset_env(kernel,error_logger_format_depth), ok. chars_limit(_Config) -> @@ -370,7 +365,7 @@ chars_limit(_Config) -> lists:seq(1,100), maps:from_list(lists:zip(lists:seq(1,100), lists:duplicate(100,value)))]}, - Meta = #{time=>"2018-04-26 9:15:40.449879"}, + Meta = #{time=>timestamp()}, Template = [time," - ", msg, "\n"], FC = #{template=>Template, depth=>unlimited, @@ -382,7 +377,7 @@ chars_limit(_Config) -> L1 = string:length(String1), ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]), true = L1 > CL1, - true = L1 < CL1 + 10, + true = L1 < CL1 + 15, String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}), L2 = string:length(String2), @@ -394,13 +389,13 @@ chars_limit(_Config) -> L3 = string:length(String3), ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]), true = L3 > CL3, - true = L3 < CL3 + 10, + true = L3 < CL3 + 15, String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}), L4 = string:length(String4), ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]), true = L4 > CL3, - true = L4 < CL3 + 10, + true = L4 < CL3 + 15, %% Test that max_size truncates the string which is limited by %% depth and chars_limit @@ -411,6 +406,14 @@ chars_limit(_Config) -> L5 = MS5, true = lists:prefix(lists:sublist(String5,L5-4),String4), + %% Test that chars_limit limits string also + Str = "123456789012345678901234567890123456789012345678901234567890123456789", + CL6 = 80, + String6 = format(info,{string,Str},Meta,FC#{chars_limit=>CL6}), + L6 = string:length(String6), + ct:log("String6: ~p~nLength6: ~p~n",[String6,L6]), + L6 = CL6, + ok. format_mfa(_Config) -> @@ -439,29 +442,58 @@ format_mfa(_Config) -> ok. format_time(_Config) -> - Time1 = timestamp(), - ExpectedTimestamp1 = default_time_format(Time1), - String1 = format(info,{"~p",[term]},#{time=>Time1},#{}), - ct:log(String1), - " info: term\n" = string:prefix(String1,ExpectedTimestamp1), - - Time2 = timestamp(), - ExpectedTimestamp2 = default_time_format(Time2,true), - String2 = format(info,{"~p",[term]},#{time=>Time2},#{utc=>true}), - ct:log(String2), - " info: term\n" = string:prefix(String2,ExpectedTimestamp2), - - application:set_env(kernel,logger_utc,true), - Time3 = timestamp(), - ExpectedTimestamp3 = default_time_format(Time3,true), - String3 = format(info,{"~p",[term]},#{time=>Time3},#{}), - ct:log(String3), - " info: term\n" = string:prefix(String3,ExpectedTimestamp3), + Time = timestamp(), + Meta = #{time=>Time}, + FC = #{template=>[time]}, + Msg = {string,""}, + ExpectedLocal = default_time_format(Time,false), + ExpectedUtc = default_time_format(Time,true), + + %% default - local time + ExpectedLocal = format(info,Msg,Meta,FC), + + %% time_offset config parameter to formatter + ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}), + ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}), + + %% stdlib utc_log works when time_offset parameter is not set + application:set_env(stdlib,utc_log,true), + ExpectedUtc = format(info,Msg,Meta,FC), + + %% sasl utc_log overwrites stdlib utc_log + application:set_env(sasl,utc_log,false), + ExpectedLocal = format(info,Msg,Meta,FC), + + %% sasl utc_log overwrites stdlib utc_log + application:set_env(sasl,utc_log,true), + application:set_env(stdlib,utc_log,false), + ExpectedUtc = format(info,Msg,Meta,FC), + + %% time_offset config parameter to formatter + %% overwrites sasl and stdlib utc_log + application:set_env(sasl,utc_log,false), + ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}), + + %% time_offset config parameter to formatter + %% overwrites sasl and stdlib utc_log + application:set_env(sasl,utc_log,true), + application:set_env(stdlib,utc_log,true), + ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}), + + %% time_designator config parameter to formatter + ExpectedLocalS = default_time_format(Time,false,$\s), + ExpectedUtcS = default_time_format(Time,true,$\s), + + ExpectedLocalS = format(info,Msg,Meta,FC#{time_offset=>"", + time_designator=>$\s}), + ExpectedUtcS = format(info,Msg,Meta,FC#{time_offset=>"Z", + time_designator=>$\s}), ok. format_time(cleanup,_Config) -> - application:unset_env(kernel,logger_utc), + application:unset_env(sasl,utc_log), + application:unset_env(stdlib,utc_log), ok. level_or_msg_in_meta(_Config) -> @@ -482,7 +514,7 @@ level_or_msg_in_meta(_Config) -> ok. faulty_log(_Config) -> - %% Unexpected log (should be type logger:log()) - print error + %% Unexpected log (should be type logger:log_event()) - print error {error, function_clause, {logger_formatter,format,[_,_],_}} = @@ -509,6 +541,54 @@ faulty_msg(_Config) -> #{})), ok. +%% Test that formatter config can be changed, and that the default +%% template is updated accordingly +update_config(_Config) -> + logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}), + ok = logger:add_handler(?MODULE,?MODULE,#{}), + D = lists:seq(1,1000), + logger:info("~p~n",[D]), + {Lines1,C1} = check_log(), + [ct:log(L) || L <- Lines1], + ct:log("~p",[C1]), + [Line1] = Lines1, + [_Time,"info: "++D1] = string:split(Line1," "), + true = length(D1)>3000, + true = #{}==C1, + + ok = logger:update_formatter_config(?MODULE,single_line,false), + logger:info("~p~n",[D]), + {Lines2,C2} = check_log(), + [ct:log(L) || L <- Lines2], + ct:log("~p",[C2]), + true = length(Lines2)>50, + true = #{single_line=>false}==C2, + + ok = logger:update_formatter_config(?MODULE,#{legacy_header=>true}), + logger:info("~p~n",[D]), + {Lines3,C3} = check_log(), + [ct:log(L) || L <- Lines3], + ct:log("~p",[C3]), + ["=INFO REPORT==== "++_|D3] = Lines3, + true = length(D3)>50, + true = #{legacy_header=>true,single_line=>false}==C3, + + ok = logger:update_formatter_config(?MODULE,single_line,true), + logger:info("~p~n",[D]), + {Lines4,C4} = check_log(), + [ct:log(L) || L <- Lines4], + ct:log("~p",[C4]), + ["=INFO REPORT==== "++_,D4] = Lines4, + true = length(D4)>3000, + true = #{legacy_header=>true,single_line=>true}==C4, + + ok. + +update_config(cleanup,_Config) -> + _ = logger:remove_handler(?MODULE), + _ = logger:remove_handler_filter(default,silence), + ok. + %%%----------------------------------------------------------------- %%% Internal format(Level,Msg,Meta,Config) -> @@ -520,22 +600,16 @@ format(Log,Config) -> default_time_format(Timestamp) -> default_time_format(Timestamp,false). -default_time_format(Timestamp0,Utc) when is_integer(Timestamp0) -> - Timestamp=Timestamp0+erlang:time_offset(microsecond), - %% calendar:system_time_to_rfc3339(Time,[{unit,microsecond}]). - Micro = Timestamp rem 1000000, - Sec = Timestamp div 1000000, - UniversalTime = erlang:posixtime_to_universaltime(Sec), - {Date,Time} = - if Utc -> UniversalTime; - true -> erlang:universaltime_to_localtime(UniversalTime) - end, - default_time_format(Date,Time,Micro). - -default_time_format({Y,M,D},{H,Min,S},Micro) -> - lists:flatten( - io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w", - [Y,M,D,H,Min,S,Micro])). +default_time_format(Timestamp,Utc) -> + default_time_format(Timestamp,Utc,$T). + +default_time_format(Timestamp,Utc,Sep) -> + Offset = if Utc -> "Z"; + true -> "" + end, + calendar:system_time_to_rfc3339(Timestamp,[{unit,microsecond}, + {time_designator,Sep}, + {offset,Offset}]). integer(Str) -> is_integer(list_to_integer(Str)). @@ -551,10 +625,20 @@ my_try(Fun) -> try Fun() catch C:R:S -> {C,R,hd(S)} end. timestamp() -> - erlang:monotonic_time(microsecond). + erlang:system_time(microsecond). %% necessary? add_time(#{time:=_}=Meta) -> Meta; add_time(Meta) -> Meta#{time=>timestamp()}. + +%%%----------------------------------------------------------------- +%%% handler callback +log(Log,#{formatter:={M,C}}) -> + put(log,{M:format(Log,C),C}), + ok. + +check_log() -> + {S,C} = erase(log), + {string:lexemes(S,"\n"),C}. diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl index b59f5f7758..cfba35e43f 100644 --- a/lib/kernel/test/logger_legacy_SUITE.erl +++ b/lib/kernel/test/logger_legacy_SUITE.erl @@ -68,13 +68,13 @@ init_per_group(std, Config) -> ok = logger:set_handler_config( error_logger,filters, [{domain,{fun logger_filters:domain/2, - {log,prefix_of,[beam,erlang,otp]}}}]), + {log,super,[beam,erlang,otp]}}}]), Config; init_per_group(sasl, Config) -> ok = logger:set_handler_config( error_logger,filters, [{domain,{fun logger_filters:domain/2, - {log,prefix_of,[beam,erlang,otp,sasl]}}}]), + {log,super,[beam,erlang,otp,sasl]}}}]), %% cth_log_redirect checks if sasl is started before displaying %% any sasl reports - so just to see the real sasl reports in tc diff --git a/lib/kernel/test/logger_simple_SUITE.erl b/lib/kernel/test/logger_simple_SUITE.erl deleted file mode 100644 index 5d8d32492d..0000000000 --- a/lib/kernel/test/logger_simple_SUITE.erl +++ /dev/null @@ -1,247 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2018. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(logger_simple_SUITE). - --compile(export_all). - --include_lib("common_test/include/ct.hrl"). --include_lib("kernel/include/logger.hrl"). --include_lib("kernel/src/logger_internal.hrl"). - --define(check_no_log,[] = test_server:messages_get()). --define(check(Expected), - receive {log,Expected} -> - [] = test_server:messages_get() - after 1000 -> - ct:fail({report_not_received, - {line,?LINE}, - {expected,Expected}, - {got,test_server:messages_get()}}) - end). - --define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ - ":"++integer_to_list(?LINE)). --define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). --define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). - -suite() -> - [{timetrap,{seconds,30}}]. - -init_per_suite(Config) -> - #{handlers:=Hs0} = logger:i(), - Hs = lists:keydelete(cth_log_redirect,1,Hs0), - [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], - Env = [{App,Key,application:get_env(App,Key)} || - {App,Key} <- [{kernel,logger_dest}, - {kernel,logger_level}]], - [{env,Env},{logger,Hs}|Config]. - -end_per_suite(Config) -> - [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)], - Hs = ?config(logger,Config), - [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], - ok. - -init_per_group(_Group, Config) -> - Config. - -end_per_group(_Group, _Config) -> - ok. - -init_per_testcase(_TestCase, Config) -> - Config. - -end_per_testcase(Case, Config) -> - try apply(?MODULE,Case,[cleanup,Config]) - catch error:undef -> ok - end, - ok. - -groups() -> - []. - -all() -> - [start_stop, - get_buffer, - replace_file, - replace_disk_log - ]. - -start_stop(_Config) -> - undefined = whereis(logger_simple), - register(logger_simple,self()), - {error,_} = logger:add_handler(logger_simple, - logger_simple, - #{filter_default=>log}), - unregister(logger_simple), - ok = logger:add_handler(logger_simple,logger_simple,#{filter_default=>log}), - Pid = whereis(logger_simple), - true = is_pid(Pid), - ok = logger:remove_handler(logger_simple), - false = is_pid(whereis(logger_simple)), - ok. -start_stop(cleanup,_Config) -> - logger:remove_handler(logger_simple). - -get_buffer(_Config) -> - %% Start simple without buffer - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log}), - logger:emergency(?str), - logger:alert(?str,[]), - logger:error(?map_rep), - logger:info(?keyval_rep), - {ok,[]} = logger_simple:get_buffer(), % no buffer - ok = logger:remove_handler(logger_simple), - - %% Start with buffer - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - logger:emergency(M1=?str), - logger:alert(M2=?str,[]), - logger:error(M3=?map_rep), - logger:info(M4=?keyval_rep), - logger:info(M41=?keyval_rep++[not_key_val]), - error_logger:error_report(some_type,M5=?map_rep), - error_logger:warning_report("some_type",M6=?map_rep), - logger:critical(M7=?str,[A7=?keyval_rep]), - logger:notice(M8=["fake",string,"line:",?LINE]), - {ok,Buffered1} = logger_simple:get_buffer(), - [#{level:=emergency,msg:={string,M1}}, - #{level:=alert,msg:={M2,[]}}, - #{level:=error,msg:={report,M3}}, - #{level:=info,msg:={report,M4}}, - #{level:=info,msg:={report,M41}}, - #{level:=error,msg:={report,#{label:={error_logger,error_report}, - report:=M5}}}, - #{level:=warning,msg:={report,#{label:={error_logger,warning_report}, - report:=M6}}}, - #{level:=critical,msg:={M7,[A7]}}, - #{level:=notice,msg:={string,M8}}] = Buffered1, - - %% Keep logging - should not buffer any more - logger:emergency(?str), - logger:alert(?str,[]), - logger:error(?map_rep), - logger:info(?keyval_rep), - {ok,[]} = logger_simple:get_buffer(), - ok = logger:remove_handler(logger_simple), - - %% Fill buffer and drop - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - logger:emergency(M9=?str), - M10=?str, - [logger:info(M10) || _ <- lists:seq(1,8)], - logger:error(M11=?str), - logger:error(?str), - logger:error(?str), - {ok,Buffered3} = logger_simple:get_buffer(), - 11 = length(Buffered3), - [#{level:=emergency,msg:={string,M9}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=info,msg:={string,M10}}, - #{level:=error,msg:={string,M11}}, - #{level:=info,msg:={"Simple handler buffer full, dropped ~w messages",[2]}}] - = Buffered3, - ok. -get_buffer(cleanup,_Config) -> - logger:remove_handler(logger_simple). - -replace_file(Config) -> - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - logger:emergency(M1=?str), - logger:alert(M2=?str,[]), - logger:error(?map_rep), - logger:info(?keyval_rep), - undefined = whereis(?STANDARD_HANDLER), - PrivDir = ?config(priv_dir,Config), - File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)++".log"), - - application:set_env(kernel,logger_dest,{file,File}), - application:set_env(kernel,logger_level,info), - - ok = logger:setup_standard_handler(), - true = is_pid(whereis(?STANDARD_HANDLER)), - ok = logger_std_h:filesync(?STANDARD_HANDLER), - {ok,Bin} = file:read_file(File), - Lines = [unicode:characters_to_list(L) || - L <- binary:split(Bin,<<"\n">>,[global,trim])], - ["=EMERGENCY REPORT===="++_, - M1, - "=ALERT REPORT===="++_, - M2, - "=ERROR REPORT===="++_, - _, - _, - "=INFO REPORT===="++_, - _, - _] = Lines, - ok. -replace_file(cleanup,_Config) -> - logger:remove_handler(?STANDARD_HANDLER), - logger:remove_handler(logger_simple). - -replace_disk_log(Config) -> - ok = logger:add_handler(logger_simple,logger_simple, - #{filter_default=>log, - logger_simple=>#{buffer=>true}}), - logger:emergency(M1=?str), - logger:alert(M2=?str,[]), - logger:error(?map_rep), - logger:info(?keyval_rep), - undefined = whereis(?STANDARD_HANDLER), - PrivDir = ?config(priv_dir,Config), - File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)), - - application:set_env(kernel,logger_dest,{disk_log,File}), - application:set_env(kernel,logger_level,info), - - ok = logger:setup_standard_handler(), - true = is_pid(whereis(?STANDARD_HANDLER)), - ok = logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER), - {ok,Bin} = file:read_file(File++".1"), - Lines = [unicode:characters_to_list(L) || - L <- binary:split(Bin,<<"\n">>,[global,trim])], - ["=EMERGENCY REPORT===="++_, - M1, - "=ALERT REPORT===="++_, - M2, - "=ERROR REPORT===="++_, - _, - _, - "=INFO REPORT===="++_, - _, - _|_] = Lines, % the tail might be an info report about opening the disk log - ok. -replace_disk_log(cleanup,_Config) -> - logger:remove_handler(?STANDARD_HANDLER), - logger:remove_handler(logger_simple). - diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl new file mode 100644 index 0000000000..271a2126de --- /dev/null +++ b/lib/kernel/test/logger_simple_h_SUITE.erl @@ -0,0 +1,210 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_simple_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-import(logger_test_lib, [setup/2, log/3, sync_and_read/3]). + +-define(check_no_log,[] = test_server:messages_get()). +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks, [logger_test_lib]}]. + +init_per_suite(Config) -> + #{handlers:=Hs0} = logger:i(), + Hs = lists:keydelete(cth_log_redirect,1,Hs0), + [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], + Env = [{App,Key,application:get_env(App,Key)} || + {App,Key} <- [{kernel,logger_level}]], + [{env,Env},{logger,Hs}|Config]. + +end_per_suite(Config) -> + [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)], + Hs = ?config(logger,Config), + [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + replace_default, + replace_file, + replace_disk_log + ]. + +start_stop(_Config) -> + undefined = whereis(logger_simple_h), + register(logger_simple_h,self()), + {error,_} = logger:add_handler(simple, + logger_simple_h, + #{filter_default=>log}), + unregister(logger_simple_h), + ok = logger:add_handler(simple,logger_simple_h,#{filter_default=>log}), + Pid = whereis(logger_simple_h), + true = is_pid(Pid), + ok = logger:remove_handler(simple), + false = is_pid(whereis(logger_simple_h)), + ok. +start_stop(cleanup,_Config) -> + logger:remove_handler(simple). + +%% This testcase just tests that it does not crash, the default handler prints +%% to stdout which we cannot read from in a detached slave. +replace_default(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [M3=?map_rep]), + log(Node, info, [M4=?keyval_rep]), + log(Node, info, [M41=?keyval_rep++[not_key_val]]), + rpc:call(Node, error_logger, error_report, [some_type,M5=?map_rep]), + rpc:call(Node, error_logger, warning_report, ["some_type",M6=?map_rep]), + log(Node, critical, [M7=?str,[A7=?keyval_rep]]), + log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + + Env = rpc:call(Node, application, get_env, [kernel, logger, []]), + ok = rpc:call(Node, logger, add_handlers, [Env]), + + ok. + +replace_file(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [M3=?map_rep]), + log(Node, info, [M4=?keyval_rep]), + log(Node, info, [M41=?keyval_rep++[not_key_val]]), + log(Node, critical, [M7=?str,[A7=?keyval_rep]]), + log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + + File = filename:join(proplists:get_value(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + + ok = rpc:call(Node, logger, add_handlers, + [[{handler, default, logger_std_h, + #{ logger_std_h => #{ type => {file, File} }, + formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]), + + {ok,Bin} = sync_and_read(Node, file, File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _, + _, + "=CRITICAL REPORT===="++_, + _, + _, + "=NOTICE REPORT===="++_, + _ + ] = Lines, + ok. + +replace_disk_log(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [M3=?map_rep]), + log(Node, info, [M4=?keyval_rep]), + log(Node, info, [M41=?keyval_rep++[not_key_val]]), + log(Node, critical, [M7=?str,[A7=?keyval_rep]]), + log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + + File = filename:join(proplists:get_value(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + + ok = rpc:call(Node, logger, add_handlers, + [[{handler, default, logger_disk_log_h, + #{ disk_log_opts => #{ file => File }, + formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]), + {ok,Bin} = sync_and_read(Node, disk_log, File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _, + _, + "=CRITICAL REPORT===="++_, + _, + _, + "=NOTICE REPORT===="++_, + _ + ] = Lines, + ok. diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl index 7c8d63cbbd..5764abd063 100644 --- a/lib/kernel/test/logger_std_h_SUITE.erl +++ b/lib/kernel/test/logger_std_h_SUITE.erl @@ -50,11 +50,12 @@ end). suite() -> - [{timetrap,{seconds,30}}]. + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. init_per_suite(Config) -> timer:start(), % to avoid progress report - {ok,{?STANDARD_HANDLER,#{formatter:=OrigFormatter}}} = + {ok,{logger_std_h,#{formatter:=OrigFormatter}}} = logger:get_handler_config(?STANDARD_HANDLER), [{formatter,OrigFormatter}|Config]. @@ -241,36 +242,38 @@ formatter_fail(Config) -> filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}), Pid = whereis(?MODULE), true = is_pid(Pid), - {ok,#{handlers:=H}} = logger:get_logger_config(), + #{handlers:=HC1} = logger:i(), + H = [Id || {Id,_,_} <- HC1], true = lists:member(?MODULE,H), %% Formatter is added automatically {ok,{_,#{formatter:={logger_formatter,_}}}} = logger:get_handler_config(?MODULE), logger:info(M1=?msg,?domain), - Got1 = try_match_file(Log,"=INFO REPORT====.*\n"++M1,5000), + Got1 = try_match_file(Log,"[0-9\\+\\-T:\\.]* info: "++M1,5000), ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}), logger:info(M2=?msg,?domain), Got2 = try_match_file(Log, - Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2, + escape(Got1)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M2, 5000), ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}), logger:info(M3=?msg,?domain), Got3 = try_match_file(Log, - Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3, + escape(Got2)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M3, 5000), ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}), logger:info(?msg,?domain), try_match_file(Log, - Got3++"FORMATTER ERROR: bad_return_value", + escape(Got3)++"FORMATTER ERROR: bad_return_value", 5000), %% Check that handler is still alive and was never dead Pid = whereis(?MODULE), - {ok,#{handlers:=H}} = logger:get_logger_config(), + #{handlers:=HC2} = logger:i(), + H = [Id || {Id,_,_} <- HC2], ok. @@ -289,10 +292,17 @@ config_fail(_Config) -> #{logger_std_h => #{restart_type => bad}, filter_default=>log, formatter=>{?MODULE,self()}}), - {error,{handler_not_added,{invalid_levels,{42,42,_}}}} = + {error,{handler_not_added,{invalid_levels,{_,1,_}}}} = logger:add_handler(?MODULE,logger_std_h, - #{logger_std_h => #{toggle_sync_qlen=>42, + #{logger_std_h => #{drop_new_reqs_qlen=>1}}), + {error,{handler_not_added,{invalid_levels,{43,42,_}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{toggle_sync_qlen=>43, drop_new_reqs_qlen=>42}}), + {error,{handler_not_added,{invalid_levels,{_,43,42}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{drop_new_reqs_qlen=>43, + flush_reqs_qlen=>42}}), ok = logger:add_handler(?MODULE,logger_std_h, #{filter_default=>log, @@ -315,29 +325,32 @@ config_fail(cleanup,_Config) -> logger:remove_handler(?MODULE). crash_std_h_to_file(Config) -> - crash_std_h(Config,?FUNCTION_NAME,logger_dest,file). + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])), + crash_std_h(Config,?FUNCTION_NAME, + [{handler,default,logger_std_h, + #{ logger_std_h => #{ type => {file, Log} }}}], + file, Log). crash_std_h_to_file(cleanup,_Config) -> crash_std_h(cleanup). crash_std_h_to_disk_log(Config) -> - crash_std_h(Config,?FUNCTION_NAME,logger_dest,disk_log). + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])), + crash_std_h(Config,?FUNCTION_NAME, + [{handler,default,logger_disk_log_h, + #{ disk_log_opts => #{ file => Log }}}], + disk_log,Log). crash_std_h_to_disk_log(cleanup,_Config) -> crash_std_h(cleanup). -crash_std_h(Config,Func,Var,Type) -> +crash_std_h(Config,Func,Var,Type,Log) -> Dir = ?config(priv_dir,Config), - File = lists:concat([?MODULE,"_",Func,".log"]), - Log = filename:join(Dir,File), + SysConfig = filename:join(Dir,lists:concat([?MODULE,"_",Func,".config"])), + ok = file:write_file(SysConfig, io_lib:format("[{kernel,[{logger,~p}]}].",[Var])), Pa = filename:dirname(code:which(?MODULE)), - TypeAndLog = - case os:type() of - {win32,_} -> - lists:concat([" {",Type,",\\\"",Log,"\\\"}"]); - _ -> - lists:concat([" \'{",Type,",\"",Log,"\"}\'"]) - end, - Args = lists:concat([" -kernel ",Var,TypeAndLog," -pa ",Pa]), Name = lists:concat([?MODULE,"_",Func]), + Args = lists:concat([" -config ",filename:rootname(SysConfig)," -pa ",Pa]), ct:pal("Starting ~p with ~tp", [Name,Args]), %% Start a node which prints kernel logs to the destination specified by Type {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), @@ -578,7 +591,7 @@ write_failure(Config) -> Dir = ?config(priv_dir, Config), File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), Log = filename:join(Dir, File), - Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + Node = start_std_h_on_new_node(Config, Log), false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), @@ -615,7 +628,7 @@ sync_failure(Config) -> Dir = ?config(priv_dir, Config), File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), Log = filename:join(Dir, File), - Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + Node = start_std_h_on_new_node(Config, Log), false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), @@ -651,21 +664,12 @@ sync_failure(cleanup, _Config) -> Nodes = nodes(), [test_server:stop_node(Node) || Node <- Nodes]. -start_std_h_on_new_node(_Config, Func, Log) -> - Pa = filename:dirname(code:which(?MODULE)), - Dest = - case os:type() of - {win32,_} -> - lists:concat([" {file,\\\"",Log,"\\\"}"]); - _ -> - lists:concat([" \'{file,\"",Log,"\"}\'"]) - end, - Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]), - Name = lists:concat([?MODULE,"_",Func]), - ct:pal("Starting ~s with ~tp", [Name,Args]), - {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), - Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), - true = is_pid(Pid), +start_std_h_on_new_node(Config, Log) -> + {ok,_,Node} = + logger_test_lib:setup( + Config, + [{logger,[{handler,default,logger_std_h, + #{ logger_std_h => #{ type => {file,Log}}}}]}]), ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, {?MODULE,nl}]), Node. @@ -691,16 +695,17 @@ internal_log(Type, Term) -> op_switch_to_sync_file(Config) -> {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 500, NewHConfig = - HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3, - drop_new_reqs_qlen => 501, - flush_reqs_qlen => 2000, + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => NumOfReqs+1, + flush_reqs_qlen => 2*NumOfReqs, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), %% TRecvPid = start_op_trace(), - NumOfReqs = 500, send_burst({n,NumOfReqs}, seq, {chars,79}, info), - NumOfReqs = count_lines(Log), + Lines = count_lines(Log), + ok = file:delete(Log), %% true = analyse_trace(TRecvPid, %% fun(Events) -> find_mode(async,Events) end), %% true = analyse_trace(TRecvPid, @@ -711,68 +716,82 @@ op_switch_to_sync_file(Config) -> %% fun(Events) -> find_mode(drop,Events) end), %% false = analyse_trace(TRecvPid, %% fun(Events) -> find_mode(flush,Events) end), - ok = file:delete(Log), %% stop_op_trace(TRecvPid), + NumOfReqs = Lines, ok. op_switch_to_sync_file(cleanup, _Config) -> ok = stop_handler(?MODULE). op_switch_to_sync_tty(Config) -> {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NumOfReqs = 500, NewHConfig = HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3, - drop_new_reqs_qlen => 501, - flush_reqs_qlen => 2000, + drop_new_reqs_qlen => NumOfReqs+1, + flush_reqs_qlen => 2*NumOfReqs, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 500, send_burst({n,NumOfReqs}, seq, {chars,79}, info), ok. op_switch_to_sync_tty(cleanup, _Config) -> ok = stop_handler(?MODULE). +op_switch_to_drop_file() -> + [{timetrap,{seconds,180}}]. op_switch_to_drop_file(Config) -> - {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - - NewHConfig = - HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 3, - flush_reqs_qlen => 600, + Test = + fun() -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 300, + Procs = 2, + Bursts = 10, + NewHConfig = + HConfig#{logger_std_h => + StdHConfig#{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => + Procs*NumOfReqs*Bursts, enable_burst_limit => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), - %% TRecvPid = start_op_trace(), - NumOfReqs = 500, - send_burst({n,NumOfReqs}, seq, {chars,79}, info), - Logged = count_lines(Log), - ct:pal("Number of messages dropped = ~w (~w)", - [NumOfReqs-Logged,NumOfReqs]), - true = (Logged < NumOfReqs), - %% true = analyse_trace(TRecvPid, - %% fun(Events) -> find_mode(async,Events) end), - %% true = analyse_trace(TRecvPid, - %% fun(Events) -> find_mode(drop,Events) end), - %% false = analyse_trace(TRecvPid, - %% fun(Events) -> find_mode(flush,Events) end), - %% true = analyse_trace(TRecvPid, - %% fun(Events) -> find_switch(async,drop,Events) - %% orelse find_switch(sync,drop,Events) - %% end), - ok = file:delete(Log), - %% stop_op_trace(TRecvPid), - ok. + ok = logger:set_handler_config(?MODULE, NewHConfig), + %% It sometimes happens that the handler gets the + %% requests in a slow enough pace so that dropping + %% never occurs. Therefore, lets generate a number of + %% bursts to increase the chance of message buildup. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) || + _ <- lists:seq(1, Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + _ = file:delete(Log), + ct:pal("Number of messages dropped = ~w (~w)", + [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), + true = (Logged < (Procs*NumOfReqs*Bursts)), + true = (Logged > 0), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. op_switch_to_drop_file(cleanup, _Config) -> - ok = stop_handler(?MODULE). + _ = stop_handler(?MODULE). op_switch_to_drop_tty(Config) -> {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NumOfReqs = 300, + Procs = 2, NewHConfig = - HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 3, - flush_reqs_qlen => 600, + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => + Procs*NumOfReqs+1, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 500, - send_burst({n,NumOfReqs}, seq, {chars,79}, info), + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), ok. op_switch_to_drop_tty(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -780,32 +799,54 @@ op_switch_to_drop_tty(cleanup, _Config) -> op_switch_to_flush_file() -> [{timetrap,{minutes,3}}]. op_switch_to_flush_file(Config) -> - {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - - %% it's important that both async and sync requests have been queued - %% when the flush happens (verify with coverage of flush_log_requests/2) - - NewHConfig = - HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 99, - flush_reqs_qlen => 100, + Test = + fun() -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% NOTE: it's important that both async and sync + %% requests have been queued when the flush happens + %% (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{logger_std_h => + StdHConfig#{toggle_sync_qlen => 2, + %% disable drop mode + drop_new_reqs_qlen => 300, + flush_reqs_qlen => 300, enable_burst_limit => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 10000, - Procs = 100, - send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), - Logged = count_lines(Log), - ct:pal("Number of messages flushed/dropped = ~w (~w)", - [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]), - true = (Logged < (NumOfReqs*Procs)), - - %%! --- Thu Apr 12 13:46:00 2018 --- peppe was here! - %%! TODO: Verify that handler has switched to flush mode - - ok = file:delete(Log), - ok. + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1500, + Procs = 10, + Bursts = 10, + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that flushing + %% never occurs, or it gets all messages at once, + %% causing all messages to get flushed (no dropping of + %% sync messages gets tested). Therefore, lets + %% generate a number of bursts to increase the chance + %% of message buildup in some random fashion. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) || + _ <- lists:seq(1,Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + _ = file:delete(Log), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), + true = (Logged < (NumOfReqs*Procs*Bursts)), + true = (Logged > 0), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. op_switch_to_flush_file(cleanup, _Config) -> - ok = stop_handler(?MODULE). + _ = stop_handler(?MODULE). op_switch_to_flush_tty(Config) -> {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), @@ -815,12 +856,13 @@ op_switch_to_flush_tty(Config) -> NewHConfig = HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, - drop_new_reqs_qlen => 99, + %% disable drop mode + drop_new_reqs_qlen => 100, flush_reqs_qlen => 100, enable_burst_limit => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - NumOfReqs = 10000, - Procs = 10, + NumOfReqs = 1000, + Procs = 100, send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), ok. op_switch_to_flush_tty(cleanup, _Config) -> @@ -904,10 +946,10 @@ kill_disabled(cleanup, _Config) -> ok = stop_handler(?MODULE). qlen_kill_new(Config) -> - {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + {_Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(?MODULE), {_,Mem0} = process_info(Pid0, memory), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig = HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, handler_overloaded_qlen=>10, @@ -927,7 +969,7 @@ qlen_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter + 1000), + timer:sleep(RestartAfter + 2000), true = is_pid(whereis(?MODULE)), ok after @@ -941,7 +983,7 @@ qlen_kill_new(cleanup, _Config) -> %% choke the standard handler on remote node to verify the termination %% works as expected -qlen_kill_std(Config) -> +qlen_kill_std(_Config) -> %%! HERE %% Dir = ?config(priv_dir, Config), %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), @@ -955,10 +997,10 @@ qlen_kill_std(Config) -> {skip,"Not done yet"}. mem_kill_new(Config) -> - {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + {_Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(?MODULE), {_,Mem0} = process_info(Pid0, memory), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig = HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, handler_overloaded_qlen=>50000, @@ -978,7 +1020,7 @@ mem_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter * 2), + timer:sleep(RestartAfter * 3), true = is_pid(whereis(?MODULE)), ok after @@ -992,7 +1034,7 @@ mem_kill_new(cleanup, _Config) -> %% choke the standard handler on remote node to verify the termination %% works as expected -mem_kill_std(Config) -> +mem_kill_std(_Config) -> {skip,"Not done yet"}. restart_after(Config) -> @@ -1016,7 +1058,7 @@ restart_after(Config) -> end, {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), - RestartAfter = 2000, + RestartAfter = ?HANDLER_RESTART_AFTER, NewHConfig2 = HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, handler_overloaded_qlen=>10, @@ -1028,7 +1070,7 @@ restart_after(Config) -> send_burst({n,100}, {spawn,2,0}, {chars,79}, info), receive {'DOWN', MRef2, _, _, _Info2} -> - timer:sleep(RestartAfter + 1000), + timer:sleep(RestartAfter + 2000), Pid1 = whereis(?MODULE), true = is_pid(Pid1), false = (Pid1 == Pid0), @@ -1074,7 +1116,7 @@ handler_requests_under_load(Config) -> NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), ok = file:delete(Log). -handler_requests_under_load(cleanup, Config) -> +handler_requests_under_load(cleanup, _Config) -> ok = stop_handler(?MODULE). send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> @@ -1126,8 +1168,9 @@ start_handler(Name, FuncName, Config) -> {Log,HConfig,StdHConfig}. stop_handler(Name) -> - ok = logger:remove_handler(Name), - ct:pal("Handler ~p stopped!", [Name]). + R = logger:remove_handler(Name), + ct:pal("Handler ~p stopped! Result: ~p", [Name,R]), + R. count_lines(File) -> wait_until_written(File, -1), @@ -1299,6 +1342,30 @@ try_match_file(_,Pattern,_,Incorrect) -> [Pattern,Incorrect]), erlang:error({error,not_matching_pattern,Pattern,Incorrect}). +repeat_until_ok(Fun, N) -> + repeat_until_ok(Fun, 0, N, undefined). + +repeat_until_ok(_Fun, Stop, Stop, Reason) -> + {fails,Reason}; + +repeat_until_ok(Fun, C, Stop, FirstReason) -> + if C > 0 -> timer:sleep(5000); + true -> ok + end, + try Fun() of + Result -> + {ok,{C,Result}} + catch + _:Reason:Stack -> + ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]), + if FirstReason == undefined -> + repeat_until_ok(Fun, C+1, Stop, {Reason,Stack}); + true -> + repeat_until_ok(Fun, C+1, Stop, FirstReason) + end + end. + + %%%----------------------------------------------------------------- %%% start_op_trace() -> @@ -1339,17 +1406,17 @@ find_mode(flush, Events) -> find_mode(Mode, Events) -> lists:keymember([{mode,Mode}], 3, Events). -find_switch(From, To, Events) -> - try lists:foldl(fun({trace_return,check_load,{To,_,_,_}}, - {trace_call,check_load,[#{mode := From}]}) -> - throw(match); - (Event, _) -> - Event - end, undefined, Events) of - _ -> false - catch - throw:match -> true - end. +%% find_switch(_From, To, Events) -> +%% try lists:foldl(fun({trace_return,check_load,{To,_,_,_}}, +%% {trace_call,check_load,[#{mode := From}]}) -> +%% throw(match); +%% (Event, _) -> +%% Event +%% end, undefined, Events) of +%% _ -> false +%% catch +%% throw:match -> true +%% end. analyse_trace(TRecvPid, TestFun) -> TRecvPid ! {test,self(),TestFun}, @@ -1411,7 +1478,7 @@ tpl([{M,F,A}|Trace]) -> tpl([]) -> ok. -tracer({trace,_,call,{logger_std_h,handle_cast,[{Op,_}|_]}}, +tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) -> maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}}, @@ -1451,3 +1518,10 @@ check_tracer(T,TimeoutFun) -> dbg:stop_clear(), TimeoutFun() end. + +escape([$+|Rest]) -> + [$\\,$+|escape(Rest)]; +escape([H|T]) -> + [H|escape(T)]; +escape([]) -> + []. diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl new file mode 100644 index 0000000000..4ac05e6480 --- /dev/null +++ b/lib/kernel/test/logger_test_lib.erl @@ -0,0 +1,82 @@ +% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_test_lib). + +-include_lib("kernel/src/logger_internal.hrl"). + +-export([setup/2, log/3, sync_and_read/3]). + +-export([init/2, + pre_init_per_suite/3, pre_init_per_testcase/4, + post_end_per_testcase/5, post_end_per_suite/3]). + +setup(Config,Vars) -> + FuncStr = lists:concat([proplists:get_value(suite, Config), "_", + proplists:get_value(tc, Config)]), + ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr), + file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])), + case test_server:start_node(proplists:get_value(tc, Config), slave, + [{args, ["-pa ",filename:dirname(code:which(?MODULE)), + " -boot start_sasl -kernel start_timer true " + "-config ",ConfigFileName]}]) of + {ok, Node} -> + L = rpc:call(Node, logger, i, []), + ct:log("~p",[L]), + {ok, L, Node}; + {error, Reason} -> + ct:log("Failed to start node: ~p",[Reason]), + error + end. + +log(Node, F, A) -> + log(Node, logger, F, A). +log(Node, M, F, A) -> + MD = #{ gl => rpc:call(Node, erlang, whereis, [logger]) }, + rpc:call(Node, M, F, A ++ [MD]). + +sync_and_read(Node,disk_log,Log) -> + rpc:call(Node,logger_disk_log_h,disk_log_sync,[?STANDARD_HANDLER]), + file:read_file(Log ++ ".1"); +sync_and_read(Node, file,Log) -> + ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), + file:read_file(Log). + + +init(_, _) -> + {ok, []}. + +pre_init_per_suite(_Suite, Config, State) -> + {[{nodes, nodes()} | Config], State}. + +pre_init_per_testcase(Suite, TC, Config, State) -> + cleanup(Config), + {[{suite, Suite}, {tc, TC} | Config], State}. + +post_end_per_testcase(_, _TC, Config, Res, State) -> + cleanup(Config), + {Res, State}. + +post_end_per_suite(_, Config, State) -> + cleanup(Config), + {Config, State}. + +cleanup(Config) -> + [test_server:stop_node(N) || N <- nodes(), + not lists:member(N, proplists:get_value(nodes, Config))]. diff --git a/lib/public_key/src/pubkey_pem.erl b/lib/public_key/src/pubkey_pem.erl index 06a4455b3f..bacc9ec600 100644 --- a/lib/public_key/src/pubkey_pem.erl +++ b/lib/public_key/src/pubkey_pem.erl @@ -209,6 +209,8 @@ pem_start('DSAPrivateKey') -> <<"-----BEGIN DSA PRIVATE KEY-----">>; pem_start('DHParameter') -> <<"-----BEGIN DH PARAMETERS-----">>; +pem_start('PrivateKeyInfo') -> + <<"-----BEGIN PRIVATE KEY-----">>; pem_start('EncryptedPrivateKeyInfo') -> <<"-----BEGIN ENCRYPTED PRIVATE KEY-----">>; pem_start('CertificationRequest') -> diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl index 931901640a..1c4acc9e1a 100644 --- a/lib/public_key/src/public_key.erl +++ b/lib/public_key/src/public_key.erl @@ -237,7 +237,7 @@ der_decode(Asn1Type, Der) when (Asn1Type == 'PrivateKeyInfo') or andalso is_binary(Der) -> try {ok, Decoded} = 'PKCS-FRAME':decode(Asn1Type, Der), - Decoded + der_priv_key_decode(Decoded) catch error:{badmatch, {error, _}} = Error -> erlang:error(Error) @@ -252,12 +252,45 @@ der_decode(Asn1Type, Der) when is_atom(Asn1Type), is_binary(Der) -> erlang:error(Error) end. +der_priv_key_decode({'PrivateKeyInfo', v1, + {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-ecPublicKey', {asn1_OPENTYPE, Parameters}}, PrivKey, _}) -> + EcPrivKey = der_decode('ECPrivateKey', PrivKey), + EcPrivKey#'ECPrivateKey'{parameters = der_decode('EcpkParameters', Parameters)}; +der_priv_key_decode({'PrivateKeyInfo', v1, + {'PrivateKeyInfo_privateKeyAlgorithm', ?'rsaEncryption', _}, PrivKey, _}) -> + der_decode('RSAPrivateKey', PrivKey); +der_priv_key_decode({'PrivateKeyInfo', v1, + {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-dsa', {asn1_OPENTYPE, Parameters}}, PrivKey, _}) -> + {params, #'Dss-Parms'{p=P, q=Q, g=G}} = der_decode('DSAParams', Parameters), + X = der_decode('Prime-p', PrivKey), + #'DSAPrivateKey'{p=P, q=Q, g=G, x=X}; +der_priv_key_decode(PKCS8Key) -> + PKCS8Key. + %%-------------------------------------------------------------------- -spec der_encode(asn1_type(), term()) -> Der::binary(). %% %% Description: Encodes a public key entity with asn1 DER encoding. %%-------------------------------------------------------------------- -der_encode(Asn1Type, Entity) when (Asn1Type == 'PrivateKeyInfo') or + +der_encode('PrivateKeyInfo', #'DSAPrivateKey'{p=P, q=Q, g=G, x=X}) -> + der_encode('PrivateKeyInfo', + {'PrivateKeyInfo', v1, + {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-dsa', + {asn1_OPENTYPE, der_encode('Dss-Parms', #'Dss-Parms'{p=P, q=Q, g=G})}}, + der_encode('Prime-p', X), asn1_NOVALUE}); +der_encode('PrivateKeyInfo', #'RSAPrivateKey'{} = PrivKey) -> + der_encode('PrivateKeyInfo', + {'PrivateKeyInfo', v1, + {'PrivateKeyInfo_privateKeyAlgorithm', ?'rsaEncryption', {asn1_OPENTYPE, ?DER_NULL}}, + der_encode('RSAPrivateKey', PrivKey), asn1_NOVALUE}); +der_encode('PrivateKeyInfo', #'ECPrivateKey'{parameters = Parameters} = PrivKey) -> + der_encode('PrivateKeyInfo', + {'PrivateKeyInfo', v1, + {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-ecPublicKey', + {asn1_OPENTYPE, der_encode('EcpkParameters', Parameters)}}, + der_encode('ECPrivateKey', PrivKey#'ECPrivateKey'{parameters = asn1_NOVALUE}), asn1_NOVALUE}); +der_encode(Asn1Type, Entity) when (Asn1Type == 'PrivateKeyInfo') or (Asn1Type == 'EncryptedPrivateKeyInfo') -> try {ok, Encoded} = 'PKCS-FRAME':encode(Asn1Type, Entity), diff --git a/lib/public_key/test/pbe_SUITE.erl b/lib/public_key/test/pbe_SUITE.erl index 44caf479e5..8a5db4efec 100644 --- a/lib/public_key/test/pbe_SUITE.erl +++ b/lib/public_key/test/pbe_SUITE.erl @@ -226,11 +226,6 @@ pbes2(Config) when is_list(Config) -> ok end. -check_key_info(#'PrivateKeyInfo'{privateKeyAlgorithm = - #'PrivateKeyInfo_privateKeyAlgorithm'{algorithm = ?rsaEncryption}, - privateKey = Key}) -> - #'RSAPrivateKey'{} = public_key:der_decode('RSAPrivateKey', iolist_to_binary(Key)). - decode_encode_key_file(File, Password, Cipher, Config) -> Datadir = proplists:get_value(data_dir, Config), {ok, PemKey} = file:read_file(filename:join(Datadir, File)), @@ -238,11 +233,10 @@ decode_encode_key_file(File, Password, Cipher, Config) -> PemEntry = public_key:pem_decode(PemKey), ct:print("Pem entry: ~p" , [PemEntry]), [{Asn1Type, _, {Cipher,_} = CipherInfo} = PubEntry] = PemEntry, - KeyInfo = public_key:pem_entry_decode(PubEntry, Password), + #'RSAPrivateKey'{} = KeyInfo = public_key:pem_entry_decode(PubEntry, Password), PemKey1 = public_key:pem_encode([public_key:pem_entry_encode(Asn1Type, KeyInfo, {CipherInfo, Password})]), Pem = strip_ending_newlines(PemKey), - Pem = strip_ending_newlines(PemKey1), - check_key_info(KeyInfo). + Pem = strip_ending_newlines(PemKey1). strip_ending_newlines(Bin) -> string:strip(binary_to_list(Bin), right, 10). diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl index 449d1fc040..572748edc9 100644 --- a/lib/public_key/test/public_key_SUITE.erl +++ b/lib/public_key/test/public_key_SUITE.erl @@ -64,6 +64,7 @@ all() -> groups() -> [{pem_decode_encode, [], [dsa_pem, rsa_pem, ec_pem, encrypted_pem, dh_pem, cert_pem, pkcs7_pem, pkcs10_pem, ec_pem2, + rsa_priv_pkcs8, dsa_priv_pkcs8, ec_priv_pkcs8, ec_pem_encode_generated, gen_ec_param_prime_field, gen_ec_param_char_2_field ]}, @@ -181,6 +182,19 @@ dsa_pem(Config) when is_list(Config) -> DSAPubPemNoEndNewLines = strip_superfluous_newlines(DSAPubPem), DSAPubPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PubEntry0])). +dsa_priv_pkcs8() -> + [{doc, "DSA PKCS8 private key decode/encode"}]. +dsa_priv_pkcs8(Config) when is_list(Config) -> + Datadir = proplists:get_value(data_dir, Config), + {ok, DsaPem} = file:read_file(filename:join(Datadir, "dsa_key_pkcs8.pem")), + [{'PrivateKeyInfo', DerDSAKey, not_encrypted} = Entry0 ] = public_key:pem_decode(DsaPem), + DSAKey = public_key:der_decode('PrivateKeyInfo', DerDSAKey), + DSAKey = public_key:pem_entry_decode(Entry0), + true = check_entry_type(DSAKey, 'DSAPrivateKey'), + PrivEntry0 = public_key:pem_entry_encode('PrivateKeyInfo', DSAKey), + DSAPemNoEndNewLines = strip_superfluous_newlines(DsaPem), + DSAPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PrivEntry0])). + %%-------------------------------------------------------------------- rsa_pem() -> @@ -216,6 +230,19 @@ rsa_pem(Config) when is_list(Config) -> RSARawPemNoEndNewLines = strip_superfluous_newlines(RSARawPem), RSARawPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PubEntry1])). +rsa_priv_pkcs8() -> + [{doc, "RSA PKCS8 private key decode/encode"}]. +rsa_priv_pkcs8(Config) when is_list(Config) -> + Datadir = proplists:get_value(data_dir, Config), + {ok, RsaPem} = file:read_file(filename:join(Datadir, "rsa_key_pkcs8.pem")), + [{'PrivateKeyInfo', DerRSAKey, not_encrypted} = Entry0 ] = public_key:pem_decode(RsaPem), + RSAKey = public_key:der_decode('PrivateKeyInfo', DerRSAKey), + RSAKey = public_key:pem_entry_decode(Entry0), + true = check_entry_type(RSAKey, 'RSAPrivateKey'), + PrivEntry0 = public_key:pem_entry_encode('PrivateKeyInfo', RSAKey), + RSAPemNoEndNewLines = strip_superfluous_newlines(RsaPem), + RSAPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PrivEntry0])). + %%-------------------------------------------------------------------- ec_pem() -> @@ -262,6 +289,18 @@ ec_pem2(Config) when is_list(Config) -> ECPemNoEndNewLines = strip_superfluous_newlines(ECPrivPem), ECPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([Entry1, Entry2])). +ec_priv_pkcs8() -> + [{doc, "EC PKCS8 private key decode/encode"}]. +ec_priv_pkcs8(Config) when is_list(Config) -> + Datadir = proplists:get_value(data_dir, Config), + {ok, ECPrivPem} = file:read_file(filename:join(Datadir, "ec_key_pkcs8.pem")), + [{'PrivateKeyInfo', _, not_encrypted} = PKCS8Key] = public_key:pem_decode(ECPrivPem), + ECPrivKey = public_key:pem_entry_decode(PKCS8Key), + true = check_entry_type(ECPrivKey, 'ECPrivateKey'), + true = check_entry_type(ECPrivKey#'ECPrivateKey'.parameters, 'EcpkParameters'), + PrivEntry0 = public_key:pem_entry_encode('PrivateKeyInfo', ECPrivKey), + ECPemNoEndNewLines = strip_superfluous_newlines(ECPrivPem), + ECPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PrivEntry0])). init_ec_pem_encode_generated(Config) -> case catch true = lists:member('secp384r1', crypto:ec_curves()) of diff --git a/lib/public_key/test/public_key_SUITE_data/dsa_key_pkcs8.pem b/lib/public_key/test/public_key_SUITE_data/dsa_key_pkcs8.pem new file mode 100644 index 0000000000..86e38e2c76 --- /dev/null +++ b/lib/public_key/test/public_key_SUITE_data/dsa_key_pkcs8.pem @@ -0,0 +1,9 @@ +-----BEGIN PRIVATE KEY----- +MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBALez5tklY5CdFeTMos899pA6i4u4 +uCtszgBzrdBk6cl5FVqzdzWMGTQiynnTpGsrOESinzP06Ip+pG15We2OORwgvCxD +/W95aCiN0/+MdiXqlsmboBARMzsa+SmBENN3gF/+tuuEAFzOXU1q2cmEywRLyfbM +2KIBVE/TChWYw2eRAhUA1R64VvcQ90XA8SOKVDmMA0dBzukCgYEAlLMYP0pbgBlg +HQVO3/avAHlWNrIq52Lxk7SdPJWgMvPjTK9Z6sv88kxsCcydtjvO439j1yqcwk50 +GQc+86ktBWWz93/HkIdnFyqafef4mmWvm2Uq6ClQKS+A0Asfaj8Mys+HUMiI+qsf +djRbyIpwb7MX1nsVdsKzALnZNMW27A0EFgIUWYCfDrv5tqwPWKJu00ez0R192SY= +-----END PRIVATE KEY----- diff --git a/lib/public_key/test/public_key_SUITE_data/ec_key_pkcs8.pem b/lib/public_key/test/public_key_SUITE_data/ec_key_pkcs8.pem new file mode 100644 index 0000000000..8280a3671a --- /dev/null +++ b/lib/public_key/test/public_key_SUITE_data/ec_key_pkcs8.pem @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgB349XXSmba5BbJT5UuCK +OoyoPHsygy6n+WzP1J+8eYShRANCAATTJdDtiqV9Hs7q+Y/yak1z3uJpukFQGYmr +lJ2iztxfv7bz10eJ5yM/GNqG8kK0w7SIzjedsIkfjRK7bX6mP7h4 +-----END PRIVATE KEY----- diff --git a/lib/public_key/test/public_key_SUITE_data/rsa_key_pkcs8.pem b/lib/public_key/test/public_key_SUITE_data/rsa_key_pkcs8.pem new file mode 100644 index 0000000000..9ef5b3353f --- /dev/null +++ b/lib/public_key/test/public_key_SUITE_data/rsa_key_pkcs8.pem @@ -0,0 +1,10 @@ +-----BEGIN PRIVATE KEY----- +MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEA1GLJmDS5yLvg1zqa +epnwCgOXzxpPvHokDQx+AcgfO14SPtCD6UTlDEwYBp+6tUTm+qgeQN/CTi7POwIA +m7P3UwIDAQABAkALFiEJ1e7AwLXq5j88GR8Dls5s3CW/Y+zP1ZAaTbT7p0QUMxG+ +0ko7h8NoxcQJHZU27sZXCjog/IBqn577Xv4RAiEA8/aQ09kz0jxi4aNvlix4B+bW +gX0sYtcCDkBzx8Y6iMkCIQDe3WCxV9PuiDjpuC8cAy3UMC5PBygZG4iK3arpgzxp +OwIhAKxKJg+mpgVEJiTpsiVhNEeIS1bZWp5W75m3BM1B/haZAiBQOhEcxikcrR0P +xaXvx5Uv1UhWWpUstKSqmLF17jBJEQIhAMx4HMLqwaGeYwOcxfzxz6Al8fnPmfAR +hqFR28fVJrWX +-----END PRIVATE KEY----- diff --git a/lib/runtime_tools/doc/src/LTTng.xml b/lib/runtime_tools/doc/src/LTTng.xml index 93937b3fdc..4114542c74 100644 --- a/lib/runtime_tools/doc/src/LTTng.xml +++ b/lib/runtime_tools/doc/src/LTTng.xml @@ -111,7 +111,7 @@ $ make </code> <p><em>process_register</em></p> <list type="bulleted"> <item><c>pid : string</c> :: Process ID. Ex. <c>"<0.131.0>"</c></item> - <item><c>name : string</c> :: Registered name. Ex. <c>"error_logger"</c></item> + <item><c>name : string</c> :: Registered name. Ex. <c>"logger"</c></item> <item><c>type : string</c> :: <c>"register" | "unregister"</c></item> </list> <p>Example:</p> diff --git a/lib/sasl/doc/src/error_logging.xml b/lib/sasl/doc/src/error_logging.xml index 4b2c960bbb..8731b73599 100644 --- a/lib/sasl/doc/src/error_logging.xml +++ b/lib/sasl/doc/src/error_logging.xml @@ -32,22 +32,51 @@ <rev>B</rev> <file>error_logging.xml</file> </header> + <note> + <p>The SASL error logging concept desribed in this section is + deprecated since OTP-21, when the + new <seealso marker="kernel:logger_chapter">logging + API</seealso> was introduced.</p> + <p>The new default behaviour is that the SASL application no + longer affects which log events that are logged. + <seealso marker="#supervisor_report">Supervisor + reports</seealso> and <seealso marker="#crash_report">crash + reports</seealso> are logged via the default logger handler + which is setup by + Kernel. <seealso marker="#progress_report">Progress + reports</seealso> are by default not logged, but can be enabled + by setting the Kernel configuration + parameter <seealso marker="kernel:kernel_app#logger_progress_reports"> + <c>logger_progress_reports</c></seealso> to <c>log</c>.</p> + <p>The old SASL error logging behaviour can be re-enabled by setting the + Kernel configuration + parameter <seealso marker="kernel:kernel_app#logger_sasl_compatible"> + <c>logger_sasl_compatible</c></seealso> to <c>true</c>.</p> + <p>The mechanism + for <seealso marker="#multi_file_logging">multi-file error report + logging</seealso> as described in this section is also kept for + backwards compatibility. However, the new logging API also + introduces <seealso marker="kernel:logger_disk_log_h"> + <c>logger_disk_log_h(3)</c></seealso>, which is a logger + handler that can print to multiple files + using <seealso marker="kernel:disk_log"><c>disk_log(3)</c></seealso>.</p> + </note> + + <section> + <title>SASL reports</title> <p>The SASL application introduces three types of reports:</p> <list type="bulleted"> <item>Supervisor report</item> <item>Progress report</item> <item>Crash report</item> </list> - <p>When the SASL application is started, it adds a handler that - formats and writes these reports, as specified in the configuration - parameters for SASL, that is, the environment variables - in the SASL application specification, which is found in the - <c>.app</c> file of SASL. For details, see the - <seealso marker="sasl_app"><c>sasl(6)</c></seealso> application in the - Reference Manual and the <seealso marker="kernel:app"><c>app(4)</c></seealso> - file in the Kernel Reference Manual.</p> + <p>When the SASL application is started, it adds a logger handler + that formats and writes these reports, as specified in + the <seealso marker="sasl_app#deprecated_error_logger_config">configuration + parameters for SASL</seealso></p> <section> + <marker id="supervisor_report"/> <title>Supervisor Report</title> <p>A supervisor report is issued when a supervised child terminates unexpectedly. A supervisor report contains the following @@ -68,6 +97,7 @@ </section> <section> + <marker id="progress_report"/> <title>Progress Report</title> <p>A progress report is issued when a supervisor starts or restarts a child. A progress report contains the following items:</p> @@ -82,6 +112,7 @@ </section> <section> + <marker id="crash_report"/> <title>Crash Report</title> <p>Processes started with functions <seealso marker="stdlib:proc_lib#spawn/1"><c>proc_lib:spawn</c></seealso> or @@ -105,6 +136,7 @@ crash. The information gathered is the same as the information for Crasher, described in the previous item.</p></item> </taglist> + </section> <section> <title>Example</title> @@ -163,6 +195,7 @@ </section> <section> + <marker id="multi_file_logging"/> <title>Multi-File Error Report Logging</title> <p>Multi-file error report logging is used to store error messages received by <c>error_logger</c>. The error messages @@ -171,7 +204,8 @@ of files exist at the same time. The logging is very fast, as each error message is written as a binary term.</p> <p>For more details, see the - <seealso marker="sasl_app"><c>sasl(6)</c></seealso> + <seealso marker="sasl_app#deprecated_error_logger_config"> + <c>sasl(6)</c></seealso> application in the Reference Manual.</p> </section> diff --git a/lib/sasl/doc/src/sasl_app.xml b/lib/sasl/doc/src/sasl_app.xml index 48b0b8eafb..be275879ee 100644 --- a/lib/sasl/doc/src/sasl_app.xml +++ b/lib/sasl/doc/src/sasl_app.xml @@ -86,30 +86,26 @@ <c>RELDIR</c> is used. By default, this is <c>$OTP_ROOT/releases</c>.</p> </item> - <tag><c><![CDATA[utc_log = true | false ]]></c></tag> - <item> - <p>If set to <c>true</c>, all dates in textual log outputs are - displayed in Universal Coordinated Time with the string - <c>UTC</c> appended.</p> - </item> </taglist> </section> <section> + <marker id="deprecated_error_logger_config"/> <title>Deprecated Error Logger Event Handlers and Configuration</title> - <p>In OTP-21, a new API for logging was added to Erlang/OTP. The + <p>In Erlang/OTP 21.0, a new API for logging was added. The old <c>error_logger</c> event manager, and event handlers - running on this manager, will still work, but they are not used + running on this manager, still work, but they are not used by default.</p> <p>The error logger event handlers <c>sasl_report_tty_h</c> - and <c>sasl_report_file_h</c>, were earliger used for printing + and <c>sasl_report_file_h</c>, were earlier used for printing the so called SASL reports, i.e. <em>supervisor reports</em>, <em>crash reports</em>, and <em>progress - reports</em>. These reports are now also printed by the standard + reports</em>. These reports are now also printed by the default logger handler started by the Kernel application. Progress - reports are by default stopped by a filter, but can easily be + reports are by default stopped by a filter, but can be added by setting the Kernel configuration - parameter <seealso marker="kernel:kernel_app#logger_log_progress"><c>logger_log_progress=true</c></seealso>.</p> + parameter <seealso marker="kernel:kernel_app#logger_progress_reports"> + <c>logger_progress_reports</c></seealso> to <c>log</c>.</p> <p>If the old error logger event handlers are still desired, they must be added by calling <c>error_logger:add_report_handler/1,2</c>.</p> @@ -119,32 +115,28 @@ <p>Formats and writes <em>supervisor reports</em>, <em>crash reports</em>, and <em>progress reports</em> to <c>stdio</c>. This error logger event handler uses - <seealso marker="kernel:kernel_app#logger_format_depth"><c>logger_format_depth</c></seealso> + <seealso marker="kernel:kernel_app#deprecated-configuration-parameters"><c>error_logger_format_depth</c></seealso> in the Kernel application to limit how much detail is printed - in crash and supervisor reports. If <c>logger_format_depth</c> - is not set, it uses the old <c>error_logger_format_depth</c> - instead.</p> + in crash and supervisor reports.</p> </item> <tag><c>sasl_report_file_h</c></tag> <item> <p>Formats and writes <em>supervisor reports</em>, <em>crash report</em>, and <em>progress report</em> to a single file. This error logger event handler uses - <seealso marker="kernel:kernel_app#logger_format_depth"><c>logger_format_depth</c></seealso> + <seealso marker="kernel:kernel_app#deprecated-configuration-parameters"><c>error_logger_format_depth</c></seealso> in the Kernel application to limit the details printed in - crash and supervisor reports. If <c>logger_format_depth</c> is - not set, it uses the old <c>error_logger_format_depth</c> - instead.</p> + crash and supervisor reports.</p> </item> </taglist> <p>A similar behaviour, but still using the new logger API, can be obtained by setting the Kernel application environment variable <seealso marker="kernel:kernel_app#logger_sasl_compatible"><c>logger_sasl_compatible=true</c></seealso>. This will add a second instance of the standard logger handler - named <c>sasl_h</c>, which will only print the SASL reports. No + named <c>sasl</c>, which will only print the SASL reports. No SASL reports will then be printed by the Kernel logger handler.</p> - <p>The <c>sasl_h</c> handler will be configured according to the + <p>The <c>sasl</c> handler will be configured according to the values of the following SASL application environment variables.</p> <taglist> @@ -179,6 +171,12 @@ <c>sasl_error_logger</c> to error reports or progress reports, or both. Default is <c>all</c>.</p> </item> + <tag><marker id="utc_log"/><c><![CDATA[utc_log = true | false ]]></c></tag> + <item> + <p>If set to <c>true</c>, all dates in textual log outputs are + displayed in Universal Coordinated Time with the string + <c>UTC</c> appended.</p> + </item> </taglist> <p>The error logger event handler <c>log_mf_h</c> can also still @@ -222,6 +220,7 @@ <title>See Also</title> <p><seealso marker="alarm_handler"><c>alarm_handler(3)</c></seealso>, <seealso marker="kernel:error_logger"><c>error_logger(3)</c></seealso>, + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>, <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>, <seealso marker="rb"><c>rb(3)</c></seealso>, <seealso marker="release_handler"><c>release_handler(3)</c></seealso>, diff --git a/lib/sasl/src/sasl.erl b/lib/sasl/src/sasl.erl index 2bf11bdcdf..92b80694d7 100644 --- a/lib/sasl/src/sasl.erl +++ b/lib/sasl/src/sasl.erl @@ -129,29 +129,24 @@ get_mf_maxf() -> add_sasl_logger(undefined, _Level) -> ok; add_sasl_logger(std, undefined) -> ok; add_sasl_logger(Dest, Level) -> - FC0 = #{legacy_header=>true, - single_line=>false, - template=>[{logger_formatter,header},"\n",msg,"\n"]}, - FC = case application:get_env(sasl,utc_log) of - {ok,Bool} when is_boolean(Bool) -> - FC0#{utc=>Bool}; - _ -> - FC0 - end, - ok = logger:add_handler(sasl_h,logger_std_h, + FC = #{legacy_header=>true, + single_line=>false}, + ok = logger:add_handler(sasl,logger_std_h, #{level=>Level, filter_default=>stop, filters=> - [{sasl_domain, + [{remote_gl, + {fun logger_filters:remote_gl/2,stop}}, + {sasl_domain, {fun logger_filters:domain/2, - {log,equals,[beam,erlang,otp,sasl]}}}], + {log,equal,[beam,erlang,otp,sasl]}}}], logger_std_h=>#{type=>Dest}, formatter=>{logger_formatter,FC}}). delete_sasl_logger(undefined) -> ok; delete_sasl_logger(std) -> ok; delete_sasl_logger(_Type) -> - _ = logger:remove_handler(sasl_h), + _ = logger:remove_handler(sasl), ok. add_error_logger_mf(undefined) -> ok; diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl index f4b1b54fd1..6916107623 100644 --- a/lib/sasl/src/systools_make.erl +++ b/lib/sasl/src/systools_make.erl @@ -1551,7 +1551,7 @@ mandatory_modules() -> logger_server, logger_backend, logger_config, - logger_simple, + logger_simple_h, lists, proc_lib, supervisor diff --git a/lib/sasl/test/sasl_report_SUITE.erl b/lib/sasl/test/sasl_report_SUITE.erl index 96975aaf69..72ee2f0a10 100644 --- a/lib/sasl/test/sasl_report_SUITE.erl +++ b/lib/sasl/test/sasl_report_SUITE.erl @@ -54,7 +54,7 @@ gen_server_crash_unicode(Config) -> gen_server_crash(Config, Encoding) -> StopFilter = {fun(_,_) -> stop end, ok}, - logger:add_handler_filter(logger_std_h,stop_all,StopFilter), + logger:add_handler_filter(default,stop_all,StopFilter), logger:add_handler_filter(cth_log_redirect,stop_all,StopFilter), try do_gen_server_crash(Config, Encoding) @@ -62,7 +62,7 @@ gen_server_crash(Config, Encoding) -> ok = application:unset_env(kernel, logger_sasl_compatible), ok = application:unset_env(sasl, sasl_error_logger), ok = application:unset_env(kernel, error_logger_format_depth), - logger:remove_handler_filter(logger_std_h,stop_all), + logger:remove_handler_filter(default,stop_all), logger:remove_handler_filter(cth_log_redirect,stop_all) end, ok. @@ -83,9 +83,11 @@ do_gen_server_crash(Config, Encoding) -> error_logger:logfile({open,KernelLog}), application:start(sasl), logger:i(print), + ct:log("error_logger handlers: ~p",[error_logger:which_report_handlers()]), crash_me(), + error_logger:logfile(close), application:stop(sasl), diff --git a/lib/ssh/src/ssh_client_channel.erl b/lib/ssh/src/ssh_client_channel.erl index f20007baaf..8b5e196412 100644 --- a/lib/ssh/src/ssh_client_channel.erl +++ b/lib/ssh/src/ssh_client_channel.erl @@ -180,6 +180,8 @@ init([Options]) -> {stop, Why} -> {stop, Why} catch + _:undef -> + {stop, {bad_channel_callback_module,Cb}}; _:Reason -> {stop, Reason} end. @@ -305,8 +307,8 @@ terminate(Reason, #state{cm = ConnectionManager, close_sent = false} = State) -> catch ssh_connection:close(ConnectionManager, ChannelId), terminate(Reason, State#state{close_sent = true}); -terminate(_, #state{channel_cb = Cb, channel_state = ChannelState}) -> - catch Cb:terminate(Cb, ChannelState), +terminate(Reason, #state{channel_cb = Cb, channel_state = ChannelState}) -> + catch Cb:terminate(Reason, ChannelState), ok. %%-------------------------------------------------------------------- diff --git a/lib/ssh/src/ssh_connection.erl b/lib/ssh/src/ssh_connection.erl index ed03b4e2ed..dad7636e3f 100644 --- a/lib/ssh/src/ssh_connection.erl +++ b/lib/ssh/src/ssh_connection.erl @@ -498,25 +498,24 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId, data = Data}, #connection{channel_cache = Cache} = Connection, server) -> <<?DEC_BIN(SsName,_SsLen)>> = Data, - - #channel{remote_id = RemoteId} = Channel0 = + #channel{remote_id=RemoteId} = Channel = ssh_client_channel:cache_lookup(Cache, ChannelId), - - ReplyMsg = {subsystem, ChannelId, WantReply, binary_to_list(SsName)}, - - try - {ok, Pid} = start_subsystem(SsName, Connection, Channel0, ReplyMsg), - erlang:monitor(process, Pid), - Channel = Channel0#channel{user = Pid}, - ssh_client_channel:cache_update(Cache, Channel), - Reply = {connection_reply, - channel_success_msg(RemoteId)}, - {[Reply], Connection} - catch - _:_ -> - ErrorReply = {connection_reply, channel_failure_msg(RemoteId)}, - {[ErrorReply], Connection} - end; + Reply = + try + start_subsystem(SsName, Connection, Channel, + {subsystem, ChannelId, WantReply, binary_to_list(SsName)}) + of + {ok, Pid} -> + erlang:monitor(process, Pid), + ssh_client_channel:cache_update(Cache, Channel#channel{user=Pid}), + channel_success_msg(RemoteId); + {error,_Error} -> + channel_failure_msg(RemoteId) + catch + _:_ -> + channel_failure_msg(RemoteId) + end, + {[{connection_reply,Reply}], Connection}; handle_msg(#ssh_msg_channel_request{request_type = "subsystem"}, Connection, client) -> @@ -822,7 +821,12 @@ start_channel(Cb, Id, Args, SubSysSup, Exec, Opts) -> ChannelSup = ssh_subsystem_sup:channel_supervisor(SubSysSup), case max_num_channels_not_exceeded(ChannelSup, Opts) of true -> - ssh_server_channel_sup:start_child(ChannelSup, Cb, Id, Args, Exec); + case ssh_server_channel_sup:start_child(ChannelSup, Cb, Id, Args, Exec) of + {error,{Error,_Info}} -> + throw(Error); + Others -> + Others + end; false -> throw(max_num_channels_exceeded) end. diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl index f1ff3a70e2..3e224fe13f 100644 --- a/lib/ssh/src/ssh_connection_handler.erl +++ b/lib/ssh/src/ssh_connection_handler.erl @@ -1345,11 +1345,11 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock, {next_event, internal, Msg} ]} catch - C:E -> + C:E:ST -> {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_PROTOCOL_ERROR, io_lib:format("Bad packet: Decrypted, but can't decode~n~p:~p~n~p", - [C,E,erlang:get_stacktrace()]), + [C,E,ST]), StateName, D1), {stop, Shutdown, D} end; @@ -1378,10 +1378,10 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock, StateName, D0), {stop, Shutdown, D} catch - C:E -> + C:E:ST -> {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_PROTOCOL_ERROR, - io_lib:format("Bad packet: Couldn't decrypt~n~p:~p~n~p",[C,E,erlang:get_stacktrace()]), + io_lib:format("Bad packet: Couldn't decrypt~n~p:~p~n~p",[C,E,ST]), StateName, D0), {stop, Shutdown, D} end; diff --git a/lib/ssh/src/ssh_sftp.erl b/lib/ssh/src/ssh_sftp.erl index 5984713ec9..9c391abc43 100644 --- a/lib/ssh/src/ssh_sftp.erl +++ b/lib/ssh/src/ssh_sftp.erl @@ -171,21 +171,16 @@ start_channel(Host, Port, UserOptions) -> stop_channel(Pid) -> case is_process_alive(Pid) of true -> - OldValue = process_flag(trap_exit, true), - link(Pid), - exit(Pid, ssh_sftp_stop_channel), - receive - {'EXIT', Pid, normal} -> - ok - after 5000 -> - exit(Pid, kill), - receive - {'EXIT', Pid, killed} -> - ok - end - end, - process_flag(trap_exit, OldValue), - ok; + MonRef = erlang:monitor(process, Pid), + unlink(Pid), + exit(Pid, ssh_sftp_stop_channel), + receive {'DOWN',MonRef,_,_,_} -> ok + after + 1000 -> + exit(Pid, kill), + erlang:demonitor(MonRef, [flush]), + ok + end; false -> ok end. diff --git a/lib/ssh/test/Makefile b/lib/ssh/test/Makefile index 0a99d31a63..9832a9b210 100644 --- a/lib/ssh/test/Makefile +++ b/lib/ssh/test/Makefile @@ -36,6 +36,7 @@ MODULES= \ ssh_options_SUITE \ ssh_basic_SUITE \ ssh_bench_SUITE \ + ssh_chan_behaviours_SUITE \ ssh_compat_SUITE \ ssh_connection_SUITE \ ssh_dbg_SUITE \ @@ -53,6 +54,8 @@ MODULES= \ ssh_key_cb_options \ ssh_key_cb_engine_keys \ ssh_trpt_test_lib \ + ssh_chan_behaviours_client \ + ssh_chan_behaviours_server \ ssh_echo_server \ ssh_bench_dev_null \ ssh_peername_sockname_server \ diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE.erl b/lib/ssh/test/ssh_chan_behaviours_SUITE.erl new file mode 100644 index 0000000000..16ed152bcd --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_SUITE.erl @@ -0,0 +1,152 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +%% + +-module(ssh_chan_behaviours_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("ssh/src/ssh.hrl"). +-include("ssh_test_lib.hrl"). + +%% Note: This directive should only be used in test suites. +-compile(export_all). + +%%-------------------------------------------------------------------- +%% Common Test interface functions ----------------------------------- +%%-------------------------------------------------------------------- + +suite() -> + [{ct_hooks,[ts_install_cth]}, + {timetrap,{seconds,60}}]. + +all() -> + [ + noexist_subsystem, + undefined_subsystem, + defined_subsystem, + subsystem_client + ]. + +%%-------------------------------------------------------------------- +init_per_suite(Config) -> + ?CHECK_CRYPTO( + begin + ssh:start(), + Config + end). + +end_per_suite(_Config) -> + {Time,R} = timer:tc(ssh, stop, []), + ct:log("Stop ssh: ~p ms",[(100*(Time div 1000)) / 100]), + R. + +init_per_testcase(_TC, Config) -> + SubSystems = [ + {"bad_cb", {ssh_chan_behaviours_undefined, []}}, % A non-existing file + {"ch1", {ssh_chan_behaviours_server, [self(),true]}} + ], + {Pid, Host, Port} = ssh_test_lib:std_daemon(Config, [{subsystems,SubSystems}]), + C = ssh_test_lib:std_connect(Config, Host, Port, []), + [{connref,C}, {daemon_pid,Pid}| Config]. + +end_per_testcase(_TC, Config) -> + {Time,_} = timer:tc(ssh, stop_daemon, [proplists:get_value(daemon_pid,Config)]), + ct:log("Stop daemon: ~p ms",[(100*(Time div 1000)) / 100]), + case flush() of + [] -> ok; + Msgs -> ct:pal("Unhandled messages:~n~p", [Msgs]) + end. + + +-define(EXPECT(What, Bind), + Bind = + (fun() -> + receive What -> + ct:log("~p:~p ~p got ~p",[?MODULE,?LINE,self(),What]), + Bind + after 5000 -> + ct:log("~p:~p ~p Flushed:~n~p",[?MODULE,?LINE,self(),flush()]), + ct:fail("Timeout!",[]) + end + end)() + ). + +%%-------------------------------------------------------------------- +%% Test Cases -------------------------------------------------------- +%%-------------------------------------------------------------------- +%% Try start a subsystem whos name is not known by the server +noexist_subsystem(Config) -> + C = proplists:get_value(connref, Config), + {ok, Ch} = ssh_connection:session_channel(C, infinity), + failure = ssh_connection:subsystem(C, Ch, "noexist", infinity), + ok = ssh_connection:close(C, Ch), + ?EXPECT({ssh_cm,C,{closed,Ch}},[]), + ok. + +%% Try to start a subsystem with a known name, but without any callback file +undefined_subsystem(Config) -> + C = proplists:get_value(connref, Config), + {ok, Ch} = ssh_connection:session_channel(C, infinity), + failure = ssh_connection:subsystem(C, Ch, "bad_cb", infinity), + ok = ssh_connection:close(C, Ch), + ?EXPECT({ssh_cm,C,{closed,Ch}},[]), % self() is instead of a proper channel handler + ok. + +%% Try to start and stop a subsystem with known name and defined callback file +defined_subsystem(Config) -> + C = proplists:get_value(connref, Config), + {ok, Ch1} = ssh_connection:session_channel(C, infinity), + + success = ssh_connection:subsystem(C, Ch1, "ch1", infinity), + IDsrv = ?EXPECT({{_Csrv,_Ch1srv}, {ssh_channel_up,_Ch1srv,_Csrv}}, {_Csrv,_Ch1srv}), + + ok = ssh_connection:close(C, Ch1), + ?EXPECT({IDsrv, {terminate,normal}}, []), + ?EXPECT({ssh_cm, C, {closed,Ch1}}, []), % self() is instead of a proper channel handler + ok. + +%% Try to start and stop a subsystem from a ssh_client_channel behviour +subsystem_client(Config) -> + C = proplists:get_value(connref, Config), + + {ok,ChRef} = ssh_chan_behaviours_client:start_link(C), + IDclt = ?EXPECT({{C,Ch1clt}, {ssh_channel_up,Ch1clt,C}}, {C,Ch1clt}), + IDsrv = ?EXPECT({{_Csrv,Ch1srv}, {ssh_channel_up,Ch1srv,_Csrv}}, {_Csrv,Ch1srv}), + + ok = ssh_chan_behaviours_client:stop(ChRef), + ?EXPECT({IDclt, {terminate,normal}}, []), % From the proper channel handler + ?EXPECT({IDsrv, {terminate,normal}}, []), + ok. + +%%%================================================================ +%%% +%%% + +flush() -> lists:reverse(flush([])). + +flush(Acc) -> + receive + M -> + flush([M|Acc]) + after 0 -> + Acc + end. + diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key new file mode 100644 index 0000000000..51ab6fbd88 --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key @@ -0,0 +1,13 @@ +-----BEGIN DSA PRIVATE KEY----- +MIIBuwIBAAKBgQCClaHzE2ul0gKSUxah5W0W8UiJLy4hXngKEqpaUq9SSdVdY2LK +wVfKH1gt5iuaf1FfzOhsIC9G/GLnjYttXZc92cv/Gfe3gR+s0ni2++MX+T++mE/Q +diltXv/Hp27PybS67SmiFW7I+RWnT2OKlMPtw2oUuKeztCe5UWjaj/y5FQIVAPLA +l9RpiU30Z87NRAHY3NTRaqtrAoGANMRxw8UfdtNVR0CrQj3AgPaXOGE4d+G4Gp4X +skvnCHycSVAjtYxebUkzUzt5Q6f/IabuLUdge3gXrc8BetvrcKbp+XZgM0/Vj2CF +Ymmy3in6kzGZq7Fw1sZaku6AOU8vLa5woBT2vAcHLLT1bLAzj7viL048T6MfjrOP +ef8nHvACgYBhDWFQJ1mf99sg92LalVq1dHLmVXb3PTJDfCO/Gz5NFmj9EZbAtdah +/XcF3DeRF+eEoz48wQF/ExVxSMIhLdL+o+ElpVhlM7Yii+T7dPhkQfEul6zZXu+U +ykSTXYUbtsfTNRFQGBW2/GfnEc0mnIxfn9v10NEWMzlq5z9wT9P0CgIVAN4wtL5W +Lv62jKcdskxNyz2NQoBx +-----END DSA PRIVATE KEY----- + diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key.pub b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key.pub new file mode 100644 index 0000000000..4dbb1305b0 --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key.pub @@ -0,0 +1,11 @@ +---- BEGIN SSH2 PUBLIC KEY ---- +AAAAB3NzaC1kc3MAAACBAIKVofMTa6XSApJTFqHlbRbxSIkvLiFeeAoSqlpSr1JJ1V1j +YsrBV8ofWC3mK5p/UV/M6GwgL0b8YueNi21dlz3Zy/8Z97eBH6zSeLb74xf5P76YT9B2 +KW1e/8enbs/JtLrtKaIVbsj5FadPY4qUw+3DahS4p7O0J7lRaNqP/LkVAAAAFQDywJfU +aYlN9GfOzUQB2NzU0WqrawAAAIA0xHHDxR9201VHQKtCPcCA9pc4YTh34bganheyS+cI +fJxJUCO1jF5tSTNTO3lDp/8hpu4tR2B7eBetzwF62+twpun5dmAzT9WPYIViabLeKfqT +MZmrsXDWxlqS7oA5Ty8trnCgFPa8BwcstPVssDOPu+IvTjxPox+Os495/yce8AAAAIBh +DWFQJ1mf99sg92LalVq1dHLmVXb3PTJDfCO/Gz5NFmj9EZbAtdah/XcF3DeRF+eEoz48 +wQF/ExVxSMIhLdL+o+ElpVhlM7Yii+T7dPhkQfEul6zZXu+UykSTXYUbtsfTNRFQGBW2 +/GfnEc0mnIxfn9v10NEWMzlq5z9wT9P0Cg== +---- END SSH2 PUBLIC KEY ---- diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key new file mode 100644 index 0000000000..fb1a862ded --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key @@ -0,0 +1,6 @@ +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDArxbDfh3p1okrD9wQw6jJ4d4DdlBPD5GqXE8bIeRJiK41Sh40LgvPw +mkqEDSXK++CgBwYFK4EEACKhZANiAAScl43Ih2lWTDKrSox5ve5uiTXil4smsup3 +CfS1XPjKxgBAmlfBim8izbdrT0BFdQzz2joduNMtpt61wO4rGs6jm0UP7Kim9PC7 +Hneb/99fIYopdMH5NMnk60zGO1uZ2vc= +-----END EC PRIVATE KEY----- diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key.pub b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key.pub new file mode 100644 index 0000000000..428d5fb7d7 --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key.pub @@ -0,0 +1 @@ +ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBJyXjciHaVZMMqtKjHm97m6JNeKXiyay6ncJ9LVc+MrGAECaV8GKbyLNt2tPQEV1DPPaOh240y2m3rXA7isazqObRQ/sqKb08Lsed5v/318hiil0wfk0yeTrTMY7W5na9w== uabhnil@elxadlj3q32 diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key new file mode 100644 index 0000000000..79968bdd7d --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key @@ -0,0 +1,16 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDCZX+4FBDwZIh9y/Uxee1VJnEXlowpz2yDKwj8semM4q843337 +zbNfxHmladB1lpz2NqyxI175xMIJuDxogyZdsOxGnFAzAnthR4dqL/RWRWzjaxSB +6IAO9SPYVVlrpZ+1hsjLW79fwXK/yc8VdhRuWTeQiRgYY2ek8+OKbOqz4QIDAQAB +AoGANmvJzJO5hkLuvyDZHKfAnGTtpifcR1wtSa9DjdKUyn8vhKF0mIimnbnYQEmW +NUUb3gXCZLi9PvkpRSVRrASDOZwcjoU/Kvww163vBUVb2cOZfFhyn6o2Sk88Tt++ +udH3hdjpf9i7jTtUkUe+QYPsia+wgvvrmn4QrahLAH86+kECQQDx5gFeXTME3cnW +WMpFz3PPumduzjqgqMMWEccX4FtQkMX/gyGa5UC7OHFyh0N/gSWvPbRHa8A6YgIt +n8DO+fh5AkEAzbqX4DOn8NY6xJIi42q7l/2jIA0RkB6P7YugW5NblhqBZ0XDnpA5 +sMt+rz+K07u9XZtxgh1xi7mNfwY6lEAMqQJBAJBEauCKmRj35Z6OyeQku59SPsnY ++SJEREVvSNw2lH9SOKQQ4wPsYlTGbvKtNVZgAcen91L5MmYfeckYE/fdIZECQQCt +64zxsTnM1I8iFxj/gP/OYlJBikrKt8udWmjaghzvLMEw+T2DExJyb9ZNeT53+UMB +m6O+B/4xzU/djvp+0hbhAkAemIt+rA5kTmYlFndhpvzkSSM8a2EXsO4XIPgGWCTT +tQKS/tTly0ADMjN/TVy11+9d6zcqadNVuHXHGtR4W0GR +-----END RSA PRIVATE KEY----- + diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key.pub b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key.pub new file mode 100644 index 0000000000..75d2025c71 --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key.pub @@ -0,0 +1,5 @@ +---- BEGIN SSH2 PUBLIC KEY ---- +AAAAB3NzaC1yc2EAAAADAQABAAAAgQDCZX+4FBDwZIh9y/Uxee1VJnEXlowpz2yDKwj8 +semM4q843337zbNfxHmladB1lpz2NqyxI175xMIJuDxogyZdsOxGnFAzAnthR4dqL/RW +RWzjaxSB6IAO9SPYVVlrpZ+1hsjLW79fwXK/yc8VdhRuWTeQiRgYY2ek8+OKbOqz4Q== +---- END SSH2 PUBLIC KEY ---- diff --git a/lib/ssh/test/ssh_chan_behaviours_client.erl b/lib/ssh/test/ssh_chan_behaviours_client.erl new file mode 100644 index 0000000000..07ac21ba97 --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_client.erl @@ -0,0 +1,143 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2005-2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +%% + +%%% Description: Example ssh client +-module(ssh_chan_behaviours_client). +-behaviour(ssh_client_channel). +-record(state, { + parent, + cm, + ch, + dbg + }). +-export([start_link/1, start/1, + stop/1, send_eof/1, + init/1, handle_msg/2, handle_ssh_msg/2, terminate/2, + code_change/3, handle_call/3, handle_cast/2 + ]). + +-define(DBG(State,Fmt,Args), + case State#state.dbg of + true -> ct:log("~p:~p ~p C=~p Ch=~p "++Fmt, + [?MODULE,?LINE,self(),State#state.cm,State#state.ch|Args]); + false -> ok + end). + + +start_link(C) -> + {ok, Ch} = ssh_connection:session_channel(C, infinity), + ssh_client_channel:start_link(C, Ch, ssh_chan_behaviours_client, [C, Ch, self(), true]). + +start(C) -> + {ok, Ch} = ssh_connection:session_channel(C, infinity), + ssh_client_channel:start(C, Ch, ssh_chan_behaviours_client, [C, Ch, self(), true]). + +send_eof(ChRef) -> + ssh_client_channel:call(ChRef, send_eof). + +stop(ChRef) -> + ssh_client_channel:call(ChRef, stop). + + +init([C, Ch, Parent, Dbg|_Exec]) -> + case ssh_connection:subsystem(C, Ch, "ch1", infinity) of + success -> + State = #state{cm = C, + ch = Ch, + parent=Parent, + dbg=Dbg}, + ?DBG(State, "callback spawned, parent = ~p", [Parent]), + {ok, State}; + + Other -> + {stop, Other} + end. + +handle_msg({ssh_channel_up, ChannelId, ConnectionManager}=M, State0) -> + State = State0#state{cm = ConnectionManager, + ch = ChannelId}, + tell_parent(M, State), + ?DBG(State, "ssh_channel_up",[]), + {ok, State}. + +handle_ssh_msg({ssh_cm, C, {data, Ch, 0, Data}}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "ssh_cm data size(Data)=~p",[size(Data)]), + {ok, State}; + +handle_ssh_msg({ssh_cm, C, {data, Ch, Type, Data}}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "ssh_cm data Type=~p : ~p",[Type,Data]), + {ok, State}; + +handle_ssh_msg({ssh_cm, C, {eof, Ch}}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "eof",[]), + {ok, State}; + +handle_ssh_msg({ssh_cm, C, {signal, _Ch, _SigNameStr}=Sig} = M, #state{ch=Ch,cm=C} = State) -> + %% Ignore signals according to RFC 4254 section 6.9. + tell_parent(M, State), + ?DBG(State, "~p",[Sig]), + {ok, State}; + +handle_ssh_msg({ssh_cm, C, {exit_signal, Ch, _, _Error, _}=Sig}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "~p",[Sig]), + {stop, Ch, State}; + +handle_ssh_msg({ssh_cm, C, {exit_status, Ch, _Status}=Sig}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "~p",[Sig]), + {stop, Ch, State}. + + +handle_call(send_eof, _From,#state{ch=Ch,cm=C} = State) -> + {reply, ssh_connection:send_eof(C,Ch), State}; + +handle_call(stop, _From, State) -> + {stop, normal, ok, State}; + +handle_call(Msg, _From, State) -> + ?DBG(State, "Unknown call ~p", [Msg]), + {reply, {unknown_call,Msg}, State}. + + +terminate(Reason, State) -> + tell_parent({terminate,Reason}, State), + ?DBG(State, "terminate Reason = ~p",[Reason]). + + +handle_cast(Msg, State) -> + ?DBG(State, "Unknown cast ~p", [Msg]), + {noreply, State}. + +code_change(_OldVsn, State, _Extra) -> {ok, State}. + +%%%================================================================ +%%% +%%% + +tell_parent(Msg, #state{parent = Parent, + cm = C, + ch = Ch}) -> Parent ! {{C,Ch}, Msg}. + diff --git a/lib/ssh/test/ssh_chan_behaviours_server.erl b/lib/ssh/test/ssh_chan_behaviours_server.erl new file mode 100644 index 0000000000..a5ec19e0cf --- /dev/null +++ b/lib/ssh/test/ssh_chan_behaviours_server.erl @@ -0,0 +1,96 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2005-2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +%% + +%%% Description: Example ssh server +-module(ssh_chan_behaviours_server). +-behaviour(ssh_server_channel). +-record(state, { + parent, + cm, + ch, + dbg + }). +-export([init/1, handle_msg/2, handle_ssh_msg/2, terminate/2]). + +-define(DBG(State,Fmt,Args), + case State#state.dbg of + true -> ct:log("~p:~p ~p C=~p Ch=~p "++Fmt, + [?MODULE,?LINE,self(),State#state.cm,State#state.ch|Args]); + false -> ok + end). + + +init([Pid,Dbg|_Exec]) -> + {ok, #state{parent=Pid, + dbg=Dbg}}. + +handle_msg({ssh_channel_up, ChannelId, ConnectionManager}=M, State0) -> + State = State0#state{cm = ConnectionManager, + ch = ChannelId}, + tell_parent(M, State), + ?DBG(State, "ssh_channel_up",[]), + {ok, State}. + +handle_ssh_msg({ssh_cm, C, {data, Ch, 0, Data}}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "ssh_cm data size(Data)=~p",[size(Data)]), + {ok, State}; + +handle_ssh_msg({ssh_cm, C, {data, Ch, Type, Data}}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "ssh_cm data Type=~p : ~p",[Type,Data]), + {ok, State}; + +handle_ssh_msg({ssh_cm, C, {eof, Ch}}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "eof",[]), + {ok, State}; + +handle_ssh_msg({ssh_cm, C, {signal, _Ch, _SigNameStr}=Sig} = M, #state{ch=Ch,cm=C} = State) -> + %% Ignore signals according to RFC 4254 section 6.9. + tell_parent(M, State), + ?DBG(State, "~p",[Sig]), + {ok, State}; + +handle_ssh_msg({ssh_cm, C, {exit_signal, Ch, _, _Error, _}=Sig}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "~p",[Sig]), + {stop, Ch, State}; + +handle_ssh_msg({ssh_cm, C, {exit_status, Ch, _Status}=Sig}=M, #state{ch=Ch,cm=C} = State) -> + tell_parent(M, State), + ?DBG(State, "~p",[Sig]), + {stop, Ch, State}. + +terminate(Reason, State) -> + tell_parent({terminate,Reason}, State), + ?DBG(State, "terminate Reason = ~p",[Reason]), + ok. + +%%%================================================================ +%%% +%%% + +tell_parent(Msg, #state{parent = Parent, + cm = C, + ch = Ch}) -> Parent ! {{C,Ch}, Msg}. + diff --git a/lib/ssl/src/Makefile b/lib/ssl/src/Makefile index 11b3e65912..c389aa8cfe 100644 --- a/lib/ssl/src/Makefile +++ b/lib/ssl/src/Makefile @@ -54,8 +54,8 @@ MODULES= \ ssl_connection_sup \ ssl_listen_tracker_sup\ dtls_connection_sup \ - dtls_udp_listener\ - dtls_udp_sup \ + dtls_packet_demux \ + dtls_listener_sup \ ssl_dist_sup\ ssl_dist_admin_sup\ ssl_dist_connection_sup\ diff --git a/lib/ssl/src/dtls_connection.erl b/lib/ssl/src/dtls_connection.erl index 0fe568759d..4e3f65d9c6 100644 --- a/lib/ssl/src/dtls_connection.erl +++ b/lib/ssl/src/dtls_connection.erl @@ -137,9 +137,8 @@ next_record(#state{protocol_buffers = Buffers#protocol_buffers{dtls_cipher_texts = Rest}, connection_states = ConnectionStates}); next_record(#state{role = server, - socket = {Listener, {Client, _}}, - transport_cb = gen_udp} = State) -> - dtls_udp_listener:active_once(Listener, Client, self()), + socket = {Listener, {Client, _}}} = State) -> + dtls_packet_demux:active_once(Listener, Client, self()), {no_record, State}; next_record(#state{role = client, socket = {_Server, Socket} = DTLSSocket, @@ -448,7 +447,7 @@ init({call, From}, {start, Timeout}, }, {Record, State} = next_record(State3), next_event(hello, Record, State, Actions); -init({call, _} = Type, Event, #state{role = server, transport_cb = gen_udp} = State) -> +init({call, _} = Type, Event, #state{role = server, data_tag = udp} = State) -> Result = gen_handshake(?FUNCTION_NAME, Type, Event, State#state{flight_state = {retransmit, ?INITIAL_RETRANSMIT_TIMEOUT}, protocol_specific = #{current_cookie_secret => dtls_v1:cookie_secret(), @@ -922,7 +921,7 @@ handle_alerts([Alert | Alerts], {next_state, StateName, State}) -> handle_alerts([Alert | Alerts], {next_state, StateName, State, _Actions}) -> handle_alerts(Alerts, ssl_connection:handle_alert(Alert, StateName, State)). -handle_own_alert(Alert, Version, StateName, #state{transport_cb = gen_udp, +handle_own_alert(Alert, Version, StateName, #state{data_tag = udp, role = Role, ssl_options = Options} = State0) -> case ignore_alert(Alert, State0) of @@ -1013,10 +1012,10 @@ next_flight(Flight) -> change_cipher_spec => undefined, handshakes_after_change_cipher_spec => []}. -handle_flight_timer(#state{transport_cb = gen_udp, +handle_flight_timer(#state{data_tag = udp, flight_state = {retransmit, Timeout}} = State) -> start_retransmision_timer(Timeout, State); -handle_flight_timer(#state{transport_cb = gen_udp, +handle_flight_timer(#state{data_tag = udp, flight_state = connection} = State) -> {State, []}; handle_flight_timer(State) -> diff --git a/lib/ssl/src/dtls_udp_sup.erl b/lib/ssl/src/dtls_listener_sup.erl index 197882e92f..6939f1ef3b 100644 --- a/lib/ssl/src/dtls_udp_sup.erl +++ b/lib/ssl/src/dtls_listener_sup.erl @@ -23,7 +23,7 @@ %% Purpose: Supervisor for a procsses dispatching upd datagrams to %% correct DTLS handler %%---------------------------------------------------------------------- --module(dtls_udp_sup). +-module(dtls_listener_sup). -behaviour(supervisor). @@ -52,10 +52,10 @@ init(_O) -> MaxT = 3600, Name = undefined, % As simple_one_for_one is used. - StartFunc = {dtls_udp_listener, start_link, []}, + StartFunc = {dtls_packet_demux, start_link, []}, Restart = temporary, % E.g. should not be restarted Shutdown = 4000, - Modules = [dtls_udp_listener], + Modules = [dtls_packet_demux], Type = worker, ChildSpec = {Name, StartFunc, Restart, Shutdown, Type, Modules}, diff --git a/lib/ssl/src/dtls_udp_listener.erl b/lib/ssl/src/dtls_packet_demux.erl index 0608c6bd2b..1672626165 100644 --- a/lib/ssl/src/dtls_udp_listener.erl +++ b/lib/ssl/src/dtls_packet_demux.erl @@ -19,15 +19,15 @@ %% --module(dtls_udp_listener). +-module(dtls_packet_demux). -behaviour(gen_server). -include("ssl_internal.hrl"). %% API --export([start_link/4, active_once/3, accept/2, sockname/1, close/1, - get_all_opts/1, get_sock_opts/2, set_sock_opts/2]). +-export([start_link/5, active_once/3, accept/2, sockname/1, close/1, + get_all_opts/1, get_sock_opts/2, set_sock_opts/2]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -36,6 +36,7 @@ -record(state, {port, listener, + transport, dtls_options, emulated_options, dtls_msq_queues = kv_new(), @@ -50,35 +51,36 @@ %%% API %%%=================================================================== -start_link(Port, EmOpts, InetOptions, DTLSOptions) -> - gen_server:start_link(?MODULE, [Port, EmOpts, InetOptions, DTLSOptions], []). +start_link(Port, TransportInfo, EmOpts, InetOptions, DTLSOptions) -> + gen_server:start_link(?MODULE, [Port, TransportInfo, EmOpts, InetOptions, DTLSOptions], []). -active_once(UDPConnection, Client, Pid) -> - gen_server:cast(UDPConnection, {active_once, Client, Pid}). +active_once(PacketSocket, Client, Pid) -> + gen_server:cast(PacketSocket, {active_once, Client, Pid}). -accept(UDPConnection, Accepter) -> - call(UDPConnection, {accept, Accepter}). +accept(PacketSocket, Accepter) -> + call(PacketSocket, {accept, Accepter}). -sockname(UDPConnection) -> - call(UDPConnection, sockname). -close(UDPConnection) -> - call(UDPConnection, close). -get_sock_opts(UDPConnection, SplitSockOpts) -> - call(UDPConnection, {get_sock_opts, SplitSockOpts}). -get_all_opts(UDPConnection) -> - call(UDPConnection, get_all_opts). -set_sock_opts(UDPConnection, Opts) -> - call(UDPConnection, {set_sock_opts, Opts}). +sockname(PacketSocket) -> + call(PacketSocket, sockname). +close(PacketSocket) -> + call(PacketSocket, close). +get_sock_opts(PacketSocket, SplitSockOpts) -> + call(PacketSocket, {get_sock_opts, SplitSockOpts}). +get_all_opts(PacketSocket) -> + call(PacketSocket, get_all_opts). +set_sock_opts(PacketSocket, Opts) -> + call(PacketSocket, {set_sock_opts, Opts}). %%%=================================================================== %%% gen_server callbacks %%%=================================================================== -init([Port, EmOpts, InetOptions, DTLSOptions]) -> +init([Port, {TransportModule, _,_,_} = TransportInfo, EmOpts, InetOptions, DTLSOptions]) -> try - {ok, Socket} = gen_udp:open(Port, InetOptions), + {ok, Socket} = TransportModule:open(Port, InetOptions), {ok, #state{port = Port, first = true, + transport = TransportInfo, dtls_options = DTLSOptions, emulated_options = EmOpts, listener = Socket, @@ -134,20 +136,20 @@ handle_cast({active_once, Client, Pid}, State0) -> State = handle_active_once(Client, Pid, State0), {noreply, State}. -handle_info({udp, Socket, IP, InPortNo, _} = Msg, #state{listener = Socket} = State0) -> +handle_info({Transport, Socket, IP, InPortNo, _} = Msg, #state{listener = Socket, transport = {_,Transport,_,_}} = State0) -> State = handle_datagram({IP, InPortNo}, Msg, State0), next_datagram(Socket), {noreply, State}; %% UDP socket does not have a connection and should not receive an econnreset -%% This does however happens on on some windows versions. Just ignoring it +%% This does however happens on some windows versions. Just ignoring it %% appears to make things work as expected! -handle_info({udp_error, Socket, econnreset = Error}, #state{listener = Socket} = State) -> +handle_info({Error, Socket, econnreset = Error}, #state{listener = Socket, transport = {_,_,_, udp_error}} = State) -> Report = io_lib:format("Ignore SSL UDP Listener: Socket error: ~p ~n", [Error]), error_logger:info_report(Report), {noreply, State}; -handle_info({udp_error, Socket, Error}, #state{listener = Socket} = State) -> - Report = io_lib:format("SSL UDP Listener shutdown: Socket error: ~p ~n", [Error]), +handle_info({Error, Socket, Error}, #state{listener = Socket, transport = {_,_,_, Error}} = State) -> + Report = io_lib:format("SSL Packet muliplxer shutdown: Socket error: ~p ~n", [Error]), error_logger:info_report(Report), {noreply, State#state{close=true}}; @@ -231,7 +233,7 @@ setup_new_connection(User, From, Client, Msg, #state{dtls_processes = Processes, listener = Socket, emulated_options = EmOpts} = State) -> ConnArgs = [server, "localhost", Port, {self(), {Client, Socket}}, - {DTLSOpts, EmOpts, udp_listener}, User, dtls_socket:default_cb_info()], + {DTLSOpts, EmOpts, dtls_listener}, User, dtls_socket:default_cb_info()], case dtls_connection_sup:start_child(ConnArgs) of {ok, Pid} -> erlang:monitor(process, Pid), diff --git a/lib/ssl/src/dtls_socket.erl b/lib/ssl/src/dtls_socket.erl index 0e4ab089dc..8dd62bc352 100644 --- a/lib/ssl/src/dtls_socket.erl +++ b/lib/ssl/src/dtls_socket.erl @@ -22,31 +22,31 @@ -include("ssl_internal.hrl"). -include("ssl_api.hrl"). --export([send/3, listen/3, accept/3, connect/4, socket/4, setopts/3, getopts/3, getstat/3, +-export([send/3, listen/2, accept/3, connect/4, socket/4, setopts/3, getopts/3, getstat/3, peername/2, sockname/2, port/2, close/2]). -export([emulated_options/0, emulated_options/1, internal_inet_values/0, default_inet_values/0, default_cb_info/0]). send(Transport, {{IP,Port},Socket}, Data) -> Transport:send(Socket, IP, Port, Data). -listen(gen_udp = Transport, Port, #config{transport_info = {Transport, _, _, _}, - ssl = SslOpts, - emulated = EmOpts, - inet_user = Options} = Config) -> +listen(Port, #config{transport_info = TransportInfo, + ssl = SslOpts, + emulated = EmOpts, + inet_user = Options} = Config) -> - case dtls_udp_sup:start_child([Port, emulated_socket_options(EmOpts, #socket_options{}), + case dtls_listener_sup:start_child([Port, TransportInfo, emulated_socket_options(EmOpts, #socket_options{}), Options ++ internal_inet_values(), SslOpts]) of {ok, Pid} -> - {ok, #sslsocket{pid = {udp, Config#config{udp_handler = {Pid, Port}}}}}; + {ok, #sslsocket{pid = {dtls, Config#config{dtls_handler = {Pid, Port}}}}}; Err = {error, _} -> Err end. -accept(udp, #config{transport_info = {Transport = gen_udp,_,_,_}, +accept(dtls, #config{transport_info = {Transport,_,_,_}, connection_cb = ConnectionCb, - udp_handler = {Listner, _}}, _Timeout) -> - case dtls_udp_listener:accept(Listner, self()) of + dtls_handler = {Listner, _}}, _Timeout) -> + case dtls_packet_demux:accept(Listner, self()) of {ok, Pid, Socket} -> {ok, socket(Pid, Transport, {Listner, Socket}, ConnectionCb)}; {error, Reason} -> @@ -69,7 +69,9 @@ connect(Address, Port, #config{transport_info = {Transport, _, _, _} = CbInfo, end. close(gen_udp, {_Client, _Socket}) -> - ok. + ok; +close(Transport, {_Client, Socket}) -> + Transport:close(Socket). socket(Pid, gen_udp = Transport, {{_, _}, Socket}, ConnectionCb) -> #sslsocket{pid = Pid, @@ -79,18 +81,18 @@ socket(Pid, Transport, Socket, ConnectionCb) -> #sslsocket{pid = Pid, %% "The name "fd" is keept for backwards compatibility fd = {Transport, Socket, ConnectionCb}}. -setopts(_, #sslsocket{pid = {udp, #config{udp_handler = {ListenPid, _}}}}, Options) -> +setopts(_, #sslsocket{pid = {dtls, #config{dtls_handler = {ListenPid, _}}}}, Options) -> SplitOpts = tls_socket:split_options(Options), - dtls_udp_listener:set_sock_opts(ListenPid, SplitOpts); + dtls_packet_demux:set_sock_opts(ListenPid, SplitOpts); %%% Following clauses will not be called for emulated options, they are handled in the connection process setopts(gen_udp, Socket, Options) -> inet:setopts(Socket, Options); setopts(Transport, Socket, Options) -> Transport:setopts(Socket, Options). -getopts(_, #sslsocket{pid = {udp, #config{udp_handler = {ListenPid, _}}}}, Options) -> +getopts(_, #sslsocket{pid = {dtls, #config{dtls_handler = {ListenPid, _}}}}, Options) -> SplitOpts = tls_socket:split_options(Options), - dtls_udp_listener:get_sock_opts(ListenPid, SplitOpts); + dtls_packet_demux:get_sock_opts(ListenPid, SplitOpts); getopts(gen_udp, #sslsocket{pid = {Socket, #config{emulated = EmOpts}}}, Options) -> {SockOptNames, EmulatedOptNames} = tls_socket:split_options(Options), EmulatedOpts = get_emulated_opts(EmOpts, EmulatedOptNames), @@ -112,7 +114,7 @@ getstat(gen_udp, {_,Socket}, Options) -> inet:getstat(Socket, Options); getstat(Transport, Socket, Options) -> Transport:getstat(Socket, Options). -peername(udp, _) -> +peername(_, undefined) -> {error, enotconn}; peername(gen_udp, {_, {Client, _Socket}}) -> {ok, Client}; diff --git a/lib/ssl/src/inet_tls_dist.erl b/lib/ssl/src/inet_tls_dist.erl index d45f209838..a6ceff25cb 100644 --- a/lib/ssl/src/inet_tls_dist.erl +++ b/lib/ssl/src/inet_tls_dist.erl @@ -521,7 +521,7 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) -> ErlEpmd = net_kernel:epmd_module(), {ARMod, ARFun} = get_address_resolver(ErlEpmd, Driver), Timer = trace(dist_util:start_timer(SetupTime)), - case ARMod:ARFun(Address) of + case ARMod:ARFun(Name,Address,Driver:family()) of {ok, Ip, TcpPort, Version} -> do_setup_connect(Driver, Kernel, Node, Address, Ip, TcpPort, Version, Type, MyNode, Timer); {ok, Ip} -> diff --git a/lib/ssl/src/ssl.app.src b/lib/ssl/src/ssl.app.src index 2aecb6836e..da281829cb 100644 --- a/lib/ssl/src/ssl.app.src +++ b/lib/ssl/src/ssl.app.src @@ -17,8 +17,8 @@ dtls_socket, dtls_v1, dtls_connection_sup, - dtls_udp_listener, - dtls_udp_sup, + dtls_packet_demux, + dtls_listener_sup, %% API ssl, %% Main API tls, %% TLS specific diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl index 5b6d92ebf4..a7b4ec2bf7 100644 --- a/lib/ssl/src/ssl.erl +++ b/lib/ssl/src/ssl.erl @@ -23,9 +23,17 @@ %%% Purpose : Main API module for SSL see also tls.erl and dtls.erl -module(ssl). --include("ssl_internal.hrl"). + -include_lib("public_key/include/public_key.hrl"). +-include("ssl_internal.hrl"). +-include("ssl_api.hrl"). +-include("ssl_internal.hrl"). +-include("ssl_record.hrl"). +-include("ssl_cipher.hrl"). +-include("ssl_handshake.hrl"). +-include("ssl_srp.hrl"). + %% Application handling -export([start/0, start/1, stop/0, clear_pem_cache/0]). @@ -39,8 +47,8 @@ close/1, close/2, shutdown/2, recv/2, recv/3, send/2, getopts/2, setopts/2, getstat/1, getstat/2 ]). -%% SSL/TLS protocol handling +%% SSL/TLS protocol handling -export([cipher_suites/0, cipher_suites/1, cipher_suites/2, filter_cipher_suites/2, prepend_cipher_suites/2, append_cipher_suites/2, eccs/0, eccs/1, versions/0, @@ -49,14 +57,9 @@ %% Misc -export([handle_options/2, tls_version/1, new_ssl_options/3]). --include("ssl_api.hrl"). --include("ssl_internal.hrl"). --include("ssl_record.hrl"). --include("ssl_cipher.hrl"). --include("ssl_handshake.hrl"). --include("ssl_srp.hrl"). - --include_lib("public_key/include/public_key.hrl"). +-deprecated({ssl_accept, 1, eventually}). +-deprecated({ssl_accept, 2, eventually}). +-deprecated({ssl_accept, 3, eventually}). %%-------------------------------------------------------------------- -spec start() -> ok | {error, reason()}. @@ -231,7 +234,7 @@ handshake(#sslsocket{fd = {_, _, _, Tracker}} = Socket, SslOpts, Timeout) when handshake(#sslsocket{pid = Pid, fd = {_, _, _}} = Socket, SslOpts, Timeout) when (is_integer(Timeout) andalso Timeout >= 0) or (Timeout == infinity)-> try - {ok, EmOpts, _} = dtls_udp_listener:get_all_opts(Pid), + {ok, EmOpts, _} = dtls_packet_demux:get_all_opts(Pid), ssl_connection:handshake(Socket, {SslOpts, tls_socket:emulated_socket_options(EmOpts, #socket_options{})}, Timeout) catch @@ -280,8 +283,8 @@ handshake_cancel(Socket) -> %%-------------------------------------------------------------------- close(#sslsocket{pid = Pid}) when is_pid(Pid) -> ssl_connection:close(Pid, {close, ?DEFAULT_TIMEOUT}); -close(#sslsocket{pid = {udp, #config{udp_handler = {Pid, _}}}}) -> - dtls_udp_listener:close(Pid); +close(#sslsocket{pid = {dtls, #config{dtls_handler = {Pid, _}}}}) -> + dtls_packet_demux:close(Pid); close(#sslsocket{pid = {ListenSocket, #config{transport_info={Transport,_, _, _}}}}) -> Transport:close(ListenSocket). @@ -308,10 +311,10 @@ close(#sslsocket{pid = {ListenSocket, #config{transport_info={Transport,_, _, _} %%-------------------------------------------------------------------- send(#sslsocket{pid = Pid}, Data) when is_pid(Pid) -> ssl_connection:send(Pid, Data); -send(#sslsocket{pid = {_, #config{transport_info={gen_udp, _, _, _}}}}, _) -> +send(#sslsocket{pid = {_, #config{transport_info={_, udp, _, _}}}}, _) -> {error,enotconn}; %% Emulate connection behaviour -send(#sslsocket{pid = {udp,_}}, _) -> - {error,enotconn}; +send(#sslsocket{pid = {dtls,_}}, _) -> + {error,enotconn}; %% Emulate connection behaviour send(#sslsocket{pid = {ListenSocket, #config{transport_info={Transport, _, _, _}}}}, Data) -> Transport:send(ListenSocket, Data). %% {error,enotconn} @@ -326,7 +329,7 @@ recv(Socket, Length) -> recv(#sslsocket{pid = Pid}, Length, Timeout) when is_pid(Pid), (is_integer(Timeout) andalso Timeout >= 0) or (Timeout == infinity)-> ssl_connection:recv(Pid, Length, Timeout); -recv(#sslsocket{pid = {udp,_}}, _, _) -> +recv(#sslsocket{pid = {dtls,_}}, _, _) -> {error,enotconn}; recv(#sslsocket{pid = {Listen, #config{transport_info = {Transport, _, _, _}}}}, _,_) when is_port(Listen)-> @@ -340,7 +343,7 @@ recv(#sslsocket{pid = {Listen, %%-------------------------------------------------------------------- controlling_process(#sslsocket{pid = Pid}, NewOwner) when is_pid(Pid), is_pid(NewOwner) -> ssl_connection:new_user(Pid, NewOwner); -controlling_process(#sslsocket{pid = {udp, _}}, +controlling_process(#sslsocket{pid = {dtls, _}}, NewOwner) when is_pid(NewOwner) -> ok; %% Meaningless but let it be allowed to conform with TLS controlling_process(#sslsocket{pid = {Listen, @@ -365,7 +368,7 @@ connection_information(#sslsocket{pid = Pid}) when is_pid(Pid) -> end; connection_information(#sslsocket{pid = {Listen, _}}) when is_port(Listen) -> {error, enotconn}; -connection_information(#sslsocket{pid = {udp,_}}) -> +connection_information(#sslsocket{pid = {dtls,_}}) -> {error,enotconn}. %%-------------------------------------------------------------------- @@ -391,13 +394,11 @@ peername(#sslsocket{pid = Pid, fd = {Transport, Socket, _}}) when is_pid(Pid)-> dtls_socket:peername(Transport, Socket); peername(#sslsocket{pid = Pid, fd = {Transport, Socket, _, _}}) when is_pid(Pid)-> tls_socket:peername(Transport, Socket); -peername(#sslsocket{pid = {udp = Transport, #config{udp_handler = {_Pid, _}}}}) -> - dtls_socket:peername(Transport, undefined); -peername(#sslsocket{pid = Pid, fd = {gen_udp= Transport, Socket, _, _}}) when is_pid(Pid) -> - dtls_socket:peername(Transport, Socket); +peername(#sslsocket{pid = {dtls, #config{dtls_handler = {_Pid, _}}}}) -> + dtls_socket:peername(dtls, undefined); peername(#sslsocket{pid = {ListenSocket, #config{transport_info = {Transport,_,_,_}}}}) -> tls_socket:peername(Transport, ListenSocket); %% Will return {error, enotconn} -peername(#sslsocket{pid = {udp,_}}) -> +peername(#sslsocket{pid = {dtls,_}}) -> {error,enotconn}. %%-------------------------------------------------------------------- @@ -412,7 +413,7 @@ peercert(#sslsocket{pid = Pid}) when is_pid(Pid) -> Result -> Result end; -peercert(#sslsocket{pid = {udp, _}}) -> +peercert(#sslsocket{pid = {dtls, _}}) -> {error, enotconn}; peercert(#sslsocket{pid = {Listen, _}}) when is_port(Listen) -> {error, enotconn}. @@ -562,7 +563,7 @@ eccs_filter_supported(Curves) -> %%-------------------------------------------------------------------- getopts(#sslsocket{pid = Pid}, OptionTags) when is_pid(Pid), is_list(OptionTags) -> ssl_connection:get_opts(Pid, OptionTags); -getopts(#sslsocket{pid = {udp, #config{transport_info = {Transport,_,_,_}}}} = ListenSocket, OptionTags) when is_list(OptionTags) -> +getopts(#sslsocket{pid = {dtls, #config{transport_info = {Transport,_,_,_}}}} = ListenSocket, OptionTags) when is_list(OptionTags) -> try dtls_socket:getopts(Transport, ListenSocket, OptionTags) of {ok, _} = Result -> Result; @@ -600,7 +601,7 @@ setopts(#sslsocket{pid = Pid}, Options0) when is_pid(Pid), is_list(Options0) -> _:_ -> {error, {options, {not_a_proplist, Options0}}} end; -setopts(#sslsocket{pid = {udp, #config{transport_info = {Transport,_,_,_}}}} = ListenSocket, Options) when is_list(Options) -> +setopts(#sslsocket{pid = {dtls, #config{transport_info = {Transport,_,_,_}}}} = ListenSocket, Options) when is_list(Options) -> try dtls_socket:setopts(Transport, ListenSocket, Options) of ok -> ok; @@ -657,7 +658,7 @@ getstat(#sslsocket{pid = Pid, fd = {Transport, Socket, _, _}}, Options) when is_ shutdown(#sslsocket{pid = {Listen, #config{transport_info = {Transport,_, _, _}}}}, How) when is_port(Listen) -> Transport:shutdown(Listen, How); -shutdown(#sslsocket{pid = {udp,_}},_) -> +shutdown(#sslsocket{pid = {dtls,_}},_) -> {error, enotconn}; shutdown(#sslsocket{pid = Pid}, How) -> ssl_connection:shutdown(Pid, How). @@ -669,8 +670,8 @@ shutdown(#sslsocket{pid = Pid}, How) -> %%-------------------------------------------------------------------- sockname(#sslsocket{pid = {Listen, #config{transport_info = {Transport, _, _, _}}}}) when is_port(Listen) -> tls_socket:sockname(Transport, Listen); -sockname(#sslsocket{pid = {udp, #config{udp_handler = {Pid, _}}}}) -> - dtls_udp_listener:sockname(Pid); +sockname(#sslsocket{pid = {dtls, #config{dtls_handler = {Pid, _}}}}) -> + dtls_packet_demux:sockname(Pid); sockname(#sslsocket{pid = Pid, fd = {Transport, Socket, _}}) when is_pid(Pid) -> dtls_socket:sockname(Transport, Socket); sockname(#sslsocket{pid = Pid, fd = {Transport, Socket, _, _}}) when is_pid(Pid) -> @@ -704,7 +705,7 @@ versions() -> %%-------------------------------------------------------------------- renegotiate(#sslsocket{pid = Pid}) when is_pid(Pid) -> ssl_connection:renegotiation(Pid); -renegotiate(#sslsocket{pid = {udp,_}}) -> +renegotiate(#sslsocket{pid = {dtls,_}}) -> {error, enotconn}; renegotiate(#sslsocket{pid = {Listen,_}}) when is_port(Listen) -> {error, enotconn}. @@ -719,7 +720,7 @@ renegotiate(#sslsocket{pid = {Listen,_}}) when is_port(Listen) -> prf(#sslsocket{pid = Pid}, Secret, Label, Seed, WantedLength) when is_pid(Pid) -> ssl_connection:prf(Pid, Secret, Label, Seed, WantedLength); -prf(#sslsocket{pid = {udp,_}}, _,_,_,_) -> +prf(#sslsocket{pid = {dtls,_}}, _,_,_,_) -> {error, enotconn}; prf(#sslsocket{pid = {Listen,_}}, _,_,_,_) when is_port(Listen) -> {error, enotconn}. @@ -792,8 +793,8 @@ supported_suites(anonymous, Version) -> do_listen(Port, #config{transport_info = {Transport, _, _, _}} = Config, tls_connection) -> tls_socket:listen(Transport, Port, Config); -do_listen(Port, #config{transport_info = {Transport, _, _, _}} = Config, dtls_connection) -> - dtls_socket:listen(Transport, Port, Config). +do_listen(Port, Config, dtls_connection) -> + dtls_socket:listen(Port, Config). %% Handle extra ssl options given to ssl_accept -spec handle_options([any()], #ssl_options{}) -> #ssl_options{} diff --git a/lib/ssl/src/ssl_connection_sup.erl b/lib/ssl/src/ssl_connection_sup.erl index 1a1f43e683..1aa7c5844f 100644 --- a/lib/ssl/src/ssl_connection_sup.erl +++ b/lib/ssl/src/ssl_connection_sup.erl @@ -51,12 +51,12 @@ init([]) -> ListenOptionsTracker = listen_options_tracker_child_spec(), DTLSConnetionManager = dtls_connection_manager_child_spec(), - DTLSUdpListeners = dtls_udp_listeners_spec(), + DTLSListeners = dtls_listeners_spec(), {ok, {{one_for_one, 10, 3600}, [TLSConnetionManager, ListenOptionsTracker, DTLSConnetionManager, - DTLSUdpListeners + DTLSListeners ]}}. @@ -91,9 +91,9 @@ listen_options_tracker_child_spec() -> Type = supervisor, {Name, StartFunc, Restart, Shutdown, Type, Modules}. -dtls_udp_listeners_spec() -> - Name = dtls_udp_listener, - StartFunc = {dtls_udp_sup, start_link, []}, +dtls_listeners_spec() -> + Name = dtls_listener, + StartFunc = {dtls_listener_sup, start_link, []}, Restart = permanent, Shutdown = 4000, Modules = [], diff --git a/lib/ssl/src/ssl_internal.hrl b/lib/ssl/src/ssl_internal.hrl index 5df00de0e5..977d012fa7 100644 --- a/lib/ssl/src/ssl_internal.hrl +++ b/lib/ssl/src/ssl_internal.hrl @@ -160,7 +160,7 @@ -record(config, {ssl, %% SSL parameters inet_user, %% User set inet options emulated, %% Emulated option list or "inherit_tracker" pid - udp_handler, + dtls_handler, inet_ssl, %% inet options for internal ssl socket transport_info, %% Callback info connection_cb diff --git a/lib/ssl/test/ssl_ECC_openssl_SUITE.erl b/lib/ssl/test/ssl_ECC_openssl_SUITE.erl index 280fa94ecb..5a08b152a6 100644 --- a/lib/ssl/test/ssl_ECC_openssl_SUITE.erl +++ b/lib/ssl/test/ssl_ECC_openssl_SUITE.erl @@ -157,7 +157,7 @@ init_per_testcase(TestCase, Config) -> ct:log("Ciphers: ~p~n ", [ssl:cipher_suites(default, Version)]), end_per_testcase(TestCase, Config), ssl:start(), - ct:timetrap({seconds, 15}), + ct:timetrap({seconds, 30}), Config. end_per_testcase(_TestCase, Config) -> diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl index d3b13050e3..162c63850f 100644 --- a/lib/ssl/test/ssl_basic_SUITE.erl +++ b/lib/ssl/test/ssl_basic_SUITE.erl @@ -686,11 +686,16 @@ hello_client_cancel(Config) when is_list(Config) -> {host, Hostname}, {from, self()}, {options, ssl_test_lib:ssl_options([{handshake, hello}], Config)}, - {continue_options, cancel}]), - - ssl_test_lib:check_result(Server, {error, {tls_alert, "user canceled"}}). -%%-------------------------------------------------------------------- + {continue_options, cancel}]), + receive + {Server, {error, {tls_alert, "user canceled"}}} -> + ok; + {Server, {error, closed}} -> + ct:pal("Did not receive the ALERT"), + ok + end. +%%-------------------------------------------------------------------- hello_server_cancel() -> [{doc, "Test API function ssl:handshake_cancel/1 on the server side"}]. hello_server_cancel(Config) when is_list(Config) -> @@ -2539,7 +2544,7 @@ anonymous_cipher_suites()-> [{doc,"Test the anonymous ciphersuites"}]. anonymous_cipher_suites(Config) when is_list(Config) -> NVersion = ssl_test_lib:protocol_version(Config, tuple), - Ciphers = ssl_test_lib:anonymous_suites(NVersion), + Ciphers = ssl_test_lib:ecdh_dh_anonymous_suites(NVersion), run_suites(Ciphers, Config, anonymous). %%------------------------------------------------------------------- psk_cipher_suites() -> @@ -2635,7 +2640,7 @@ default_reject_anonymous(Config) when is_list(Config) -> Version = ssl_test_lib:protocol_version(Config), TLSVersion = ssl_test_lib:tls_version(Version), - [CipherSuite | _] = ssl_test_lib:anonymous_suites(TLSVersion), + [CipherSuite | _] = ssl_test_lib:ecdh_dh_anonymous_suites(TLSVersion), Server = ssl_test_lib:start_server_error([{node, ServerNode}, {port, 0}, {from, self()}, @@ -5046,8 +5051,14 @@ tls_downgrade_result(Socket) -> tls_close(Socket) -> ok = ssl_test_lib:send_recv_result(Socket), - ok = ssl:close(Socket, 5000). - + case ssl:close(Socket, 5000) of + ok -> + ok; + {error, closed} -> + ok; + Other -> + ct:fail(Other) + end. %% First two clauses handles 1/n-1 splitting countermeasure Rizzo/Duong-Beast treashold(N, {3,0}) -> diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl index 8c27571d64..1e88ca15de 100644 --- a/lib/ssl/test/ssl_test_lib.erl +++ b/lib/ssl/test/ssl_test_lib.erl @@ -1264,8 +1264,16 @@ string_regex_filter(Str, Search) when is_list(Str) -> string_regex_filter(_Str, _Search) -> false. -anonymous_suites(Version) -> - ssl:filter_cipher_suites([ssl_cipher:suite_definition(S) || S <- ssl_cipher:anonymous_suites(Version)],[]). +ecdh_dh_anonymous_suites(Version) -> + ssl:filter_cipher_suites([ssl_cipher:suite_definition(S) || S <- ssl_cipher:anonymous_suites(Version)], + [{key_exchange, + fun(dh_anon) -> + true; + (ecdh_anon) -> + true; + (_) -> + false + end}]). psk_suites(Version) -> ssl:filter_cipher_suites([ssl_cipher:suite_definition(S) || S <- ssl_cipher:psk_suites(Version)], []). diff --git a/lib/ssl/test/ssl_to_openssl_SUITE.erl b/lib/ssl/test/ssl_to_openssl_SUITE.erl index a2e8ef8be0..4f02d8d15d 100644 --- a/lib/ssl/test/ssl_to_openssl_SUITE.erl +++ b/lib/ssl/test/ssl_to_openssl_SUITE.erl @@ -412,8 +412,16 @@ basic_erlang_server_openssl_client(Config) when is_list(Config) -> Port = ssl_test_lib:inet_port(Server), Exe = "openssl", - Args = ["s_client", "-connect", hostname_format(Hostname) ++ - ":" ++ integer_to_list(Port) ++ no_low_flag() | workaround_openssl_s_clinent()], + Args = case no_low_flag("-no_ssl2") of + [] -> + ["s_client", "-connect", hostname_format(Hostname) ++ + ":" ++ integer_to_list(Port), no_low_flag("-no_ssl3") + | workaround_openssl_s_clinent()]; + Flag -> + ["s_client", "-connect", hostname_format(Hostname) ++ + ":" ++ integer_to_list(Port), no_low_flag("-no_ssl3"), Flag + | workaround_openssl_s_clinent()] + end, OpenSslPort = ssl_test_lib:portable_open_port(Exe, Args), true = port_command(OpenSslPort, Data), @@ -588,7 +596,7 @@ erlang_client_openssl_server_anon(Config) when is_list(Config) -> ServerOpts = ssl_test_lib:ssl_options(server_rsa_opts, Config), ClientOpts = ssl_test_lib:ssl_options(client_anon_opts, Config), VersionTuple = ssl_test_lib:protocol_version(Config, tuple), - Ciphers = ssl_test_lib:anonymous_suites(VersionTuple), + Ciphers = ssl_test_lib:ecdh_dh_anonymous_suites(VersionTuple), {ClientNode, _, Hostname} = ssl_test_lib:run_where(Config), @@ -631,7 +639,7 @@ erlang_server_openssl_client_anon(Config) when is_list(Config) -> process_flag(trap_exit, true), ServerOpts = ssl_test_lib:ssl_options(server_anon_opts, Config), VersionTuple = ssl_test_lib:protocol_version(Config, tuple), - Ciphers = ssl_test_lib:anonymous_suites(VersionTuple), + Ciphers = ssl_test_lib:ecdh_dh_anonymous_suites(VersionTuple), {_, ServerNode, Hostname} = ssl_test_lib:run_where(Config), @@ -665,7 +673,7 @@ erlang_server_openssl_client_anon_with_cert(Config) when is_list(Config) -> process_flag(trap_exit, true), ServerOpts = ssl_test_lib:ssl_options(server_rsa_opts, Config), VersionTuple = ssl_test_lib:protocol_version(Config, tuple), - Ciphers = ssl_test_lib:anonymous_suites(VersionTuple), + Ciphers = ssl_test_lib:ecdh_dh_anonymous_suites(VersionTuple), {_, ServerNode, Hostname} = ssl_test_lib:run_where(Config), @@ -1995,10 +2003,12 @@ hostname_format(Hostname) -> "localhost" end. -no_low_flag() -> +no_low_flag("-no_ssl2" = Flag) -> case ssl_test_lib:supports_ssl_tls_version(sslv2) of true -> - " -no_ssl2 -no_ssl3"; + Flag; false -> - " -no_ssl3" - end. + "" + end; +no_low_flag(Flag) -> + Flag. diff --git a/lib/stdlib/doc/src/calendar.xml b/lib/stdlib/doc/src/calendar.xml index 8f2b6b747a..6b4fa7f98a 100644 --- a/lib/stdlib/doc/src/calendar.xml +++ b/lib/stdlib/doc/src/calendar.xml @@ -323,7 +323,9 @@ <type name="rfc3339_string"/> <type name="rfc3339_time_unit"/> <desc> - <p>Converts an RFC 3339 timestamp into system time.</p> + <p>Converts an RFC 3339 timestamp into system time. The data format + of RFC 3339 timestamps is described by + <url href="https://www.ietf.org/rfc/rfc3339.txt">RFC 3339</url>.</p> <p>Valid option:</p> <taglist> <tag><c>{unit, Unit}</c></tag> @@ -378,7 +380,10 @@ <type name="rfc3339_string"/> <type name="rfc3339_time_unit"/> <desc> - <p>Converts a system time into RFC 3339 timestamp.</p> + <p>Converts a system time into an RFC 3339 timestamp. The data format + of RFC 3339 timestamps is described by + <url href="https://www.ietf.org/rfc/rfc3339.txt">RFC 3339</url>. + The data format of offsets is also described by RFC 3339.</p> <p>Valid options:</p> <taglist> <tag><c>{offset, Offset}</c></tag> diff --git a/lib/stdlib/doc/src/gen_event.xml b/lib/stdlib/doc/src/gen_event.xml index 012737c390..51378a6b73 100644 --- a/lib/stdlib/doc/src/gen_event.xml +++ b/lib/stdlib/doc/src/gen_event.xml @@ -458,8 +458,7 @@ gen_event:stop -----> Module:terminate/2 with the expected reason. Any other reason than <c>normal</c>, <c>shutdown</c>, or <c>{shutdown,Term}</c> causes an error report to be issued using - <seealso marker="kernel:error_logger#format/2"> - <c>error_logger:format/2</c></seealso>. + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>. The default <c>Reason</c> is <c>normal</c>.</p> <p><c>Timeout</c> is an integer greater than zero that specifies how many milliseconds to wait for the event manager to diff --git a/lib/stdlib/doc/src/gen_server.xml b/lib/stdlib/doc/src/gen_server.xml index da74e793e6..27edbc8de7 100644 --- a/lib/stdlib/doc/src/gen_server.xml +++ b/lib/stdlib/doc/src/gen_server.xml @@ -486,8 +486,7 @@ gen_server:abcast -----> Module:handle_cast/2 with the expected reason. Any other reason than <c>normal</c>, <c>shutdown</c>, or <c>{shutdown,Term}</c> causes an error report to be issued using - <seealso marker="kernel:error_logger#format/2"> - <c>error_logger:format/2</c></seealso>. + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>. The default <c>Reason</c> is <c>normal</c>.</p> <p><c>Timeout</c> is an integer greater than zero that specifies how many milliseconds to wait for the server to @@ -861,8 +860,7 @@ gen_server:abcast -----> Module:handle_cast/2 <c>shutdown</c>, or <c>{shutdown,Term}</c>, the <c>gen_server</c> process is assumed to terminate because of an error and an error report is issued using - <seealso marker="kernel:error_logger#format/2"> - <c>error_logger:format/2</c></seealso>.</p> + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>.</p> </desc> </func> </funcs> diff --git a/lib/stdlib/doc/src/gen_statem.xml b/lib/stdlib/doc/src/gen_statem.xml index e918e83df7..a808d3af55 100644 --- a/lib/stdlib/doc/src/gen_statem.xml +++ b/lib/stdlib/doc/src/gen_statem.xml @@ -1778,7 +1778,7 @@ handle_event(_, _, State, Data) -> with the expected reason. Any other reason than <c>normal</c>, <c>shutdown</c>, or <c>{shutdown,Term}</c> causes an error report to be issued through - <seealso marker="kernel:error_logger#format/2"><c>error_logger:format/2</c></seealso>. + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>. The default <c><anno>Reason</anno></c> is <c>normal</c>. </p> <p> @@ -2286,7 +2286,7 @@ init(Args) -> erlang:error(not_implemented, [Args]).</pre> <c>shutdown</c>, or <c>{shutdown,Term}</c>, the <c>gen_statem</c> is assumed to terminate because of an error and an error report is issued using - <seealso marker="kernel:error_logger#format/2"><c>error_logger:format/2</c></seealso>. + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>. </p> </desc> </func> diff --git a/lib/stdlib/doc/src/io_lib.xml b/lib/stdlib/doc/src/io_lib.xml index 4a2b425e8e..a3df2897ac 100644 --- a/lib/stdlib/doc/src/io_lib.xml +++ b/lib/stdlib/doc/src/io_lib.xml @@ -163,16 +163,20 @@ <p>Returns a character list that represents <c><anno>Data</anno></c> formatted in accordance with <c><anno>Format</anno></c> in the same way as - <seealso marker="#fwrite/2"><c>fwrite/2</c></seealso> and - <seealso marker="#format/2"><c>format/2</c></seealso>, - but takes an extra argument, a list of options.</p> - <p>Available options:</p> - <taglist> - <tag><c><anno>CharsLimit</anno></c></tag> - <item> - <p>A soft limit on the number of characters returned.</p> - </item> - </taglist> + <seealso marker="#fwrite/2"><c>fwrite/2</c></seealso> and + <seealso marker="#format/2"><c>format/2</c></seealso>, + but takes an extra argument, a list of options.</p> + <p>Valid option:</p> + <taglist> + <tag><c>{chars_limit, <anno>CharsLimit</anno>}</c></tag> + <item> + <p>A soft limit on the number of characters returned. + When the number of characters is reached, remaining + structures are replaced by "<c>...</c>". + <c><anno>CharsLimit</anno></c> defaults to -1, which + means no limit on the number of characters returned.</p> + </item> + </taglist> </desc> </func> @@ -390,11 +394,11 @@ everything below this level is replaced by "<c>...</c>". <c><anno>Depth</anno></c> defaults to -1, which means no limitation. Option <c><anno>CharsLimit</anno></c> puts a - soft limit on the number of characters returned. When the - number of characters is reached, remaining structures are - replaced by "<c>...</c>". <c><anno>CharsLimit</anno></c> - defaults to -1, which means no limit on the number of - characters returned.</p> + soft limit on the number of characters returned. When the + number of characters is reached, remaining structures are + replaced by "<c>...</c>". <c><anno>CharsLimit</anno></c> + defaults to -1, which means no limit on the number of + characters returned.</p> <p><em>Example:</em></p> <pre> 1> <input>lists:flatten(io_lib:write({1,[2],[3],[4,5],6,7,8,9})).</input> diff --git a/lib/stdlib/doc/src/proc_lib.xml b/lib/stdlib/doc/src/proc_lib.xml index cb152d1935..51380ae51c 100644 --- a/lib/stdlib/doc/src/proc_lib.xml +++ b/lib/stdlib/doc/src/proc_lib.xml @@ -59,18 +59,17 @@ <p>When a process that is started using <c>proc_lib</c> terminates abnormally (that is, with another exit reason than <c>normal</c>, <c>shutdown</c>, or <c>{shutdown,Term}</c>), a <em>crash report</em> - is generated, which is written to terminal by the default SASL - event handler. That is, the crash report is normally only visible - if the SASL application is started; see - <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso> and section + is generated, which is written to terminal by the default logger + handler setup by Kernel. For more information about how crash reports + were logged prior to Erlang/OTP 21.0, see <seealso marker="sasl:error_logging">SASL Error Logging</seealso> in the SASL User's Guide.</p> <p>Unlike in "plain Erlang", <c>proc_lib</c> processes will not generate <em>error reports</em>, which are written to the terminal by the - emulator and do not require SASL to be started. All exceptions are + emulator. All exceptions are converted to <em>exits</em> which are ignored by the default - <c>error_logger</c> handler.</p> + <c>logger</c> handler.</p> <p>The crash report contains the previously stored information, such as ancestors and initial function, the termination reason, and @@ -115,12 +114,22 @@ <name name="format" arity="2"/> <fsummary>Format a crash report.</fsummary> <desc> - <p>This function can be used by a user-defined event handler to + <note> + <p>This function is deprecated in the sense that + the <c>error_logger</c> is no longer the preferred + interface for logging in Erlang/OTP. A + new <seealso marker="kernel:logger_chapter">logging + API</seealso> was added in Erlang/OTP 21.0, but + legacy <c>error_logger</c> handlers can still be used. New + Logger handlers do not need to use this function, since + the formatting callback (<c>report_cb</c>) is included as + metadata in the log event.</p> + </note> + <p>This function can be used by a user-defined legacy + <c>error_logger</c> event handler to format a crash report. The crash report is sent using - <seealso marker="kernel:error_logger#error_report/2"> - <c>error_logger:error_report(crash_report, - <anno>CrashReport</anno>)</c></seealso>. - That is, the event to be handled is of the format + <seealso marker="kernel:logger"> + <c>logger(3)</c></seealso>, and the event to be handled is of the format <c>{error_report, GL, {Pid, crash_report, <anno>CrashReport</anno>}}</c>, where <c>GL</c> is the group leader pid of process @@ -132,7 +141,19 @@ <name name="format" arity="3"/> <fsummary>Format a crash report.</fsummary> <desc> - <p>This function can be used by a user-defined event handler to + <note> + <p>This function is deprecated in the sense that + the <c>error_logger</c> is no longer the preferred + interface for logging in Erlang/OTP. A + new <seealso marker="kernel:logger_chapter">logging + API</seealso> was added in Erlang/OTP 21.0, but + legacy <c>error_logger</c> handlers can still be used. New + Logger handlers do not need to used this function, since + the formatting callback (<c>report_cb</c>) is included as + metadata in the log event.</p> + </note> + <p>This function can be used by a user-defined legacy + <c>error_logger</c> event handler to format a crash report. When <anno>Depth</anno> is specified as a positive integer, it is used in the format string to limit the output as follows: <c>io_lib:format("~P", @@ -395,6 +416,8 @@ init(Parent) -> <title>See Also</title> <p><seealso marker="kernel:error_logger"> <c>error_logger(3)</c></seealso></p> + <p><seealso marker="kernel:logger"> + <c>logger(3)</c></seealso></p> </section> </erlref> diff --git a/lib/stdlib/src/ets.erl b/lib/stdlib/src/ets.erl index 6a559f0be5..a35f79c0d9 100644 --- a/lib/stdlib/src/ets.erl +++ b/lib/stdlib/src/ets.erl @@ -77,7 +77,9 @@ whereis/1]). %% internal exports --export([internal_request_all/0]). +-export([internal_request_all/0, + internal_delete_all/2, + internal_select_delete/2]). -spec all() -> [Tab] when Tab :: tab(). @@ -116,7 +118,15 @@ delete(_, _) -> -spec delete_all_objects(Tab) -> true when Tab :: tab(). -delete_all_objects(_) -> +delete_all_objects(Tab) -> + _ = ets:internal_delete_all(Tab, undefined), + true. + +-spec internal_delete_all(Tab, undefined) -> NumDeleted when + Tab :: tab(), + NumDeleted :: non_neg_integer(). + +internal_delete_all(_, _) -> erlang:nif_error(undef). -spec delete_object(Tab, Object) -> true when @@ -378,7 +388,17 @@ select_count(_, _) -> MatchSpec :: match_spec(), NumDeleted :: non_neg_integer(). -select_delete(_, _) -> +select_delete(Tab, [{'_',[],[true]}]) -> + ets:internal_delete_all(Tab, undefined); +select_delete(Tab, MatchSpec) -> + ets:internal_select_delete(Tab, MatchSpec). + +-spec internal_select_delete(Tab, MatchSpec) -> NumDeleted when + Tab :: tab(), + MatchSpec :: match_spec(), + NumDeleted :: non_neg_integer(). + +internal_select_delete(_, _) -> erlang:nif_error(undef). -spec select_replace(Tab, MatchSpec) -> NumReplaced when diff --git a/lib/stdlib/src/gen_fsm.erl b/lib/stdlib/src/gen_fsm.erl index 77826c3dc6..1646186761 100644 --- a/lib/stdlib/src/gen_fsm.erl +++ b/lib/stdlib/src/gen_fsm.erl @@ -129,25 +129,25 @@ %% logger callback -export([format_log/1]). --deprecated({start, 3, next_major_release}). --deprecated({start, 4, next_major_release}). --deprecated({start_link, 3, next_major_release}). --deprecated({start_link, 4, next_major_release}). --deprecated({stop, 1, next_major_release}). --deprecated({stop, 3, next_major_release}). --deprecated({send_event, 2, next_major_release}). --deprecated({sync_send_event, 2, next_major_release}). --deprecated({sync_send_event, 3, next_major_release}). --deprecated({send_all_state_event, 2, next_major_release}). --deprecated({sync_send_all_state_event, 2, next_major_release}). --deprecated({sync_send_all_state_event, 3, next_major_release}). --deprecated({reply, 2, next_major_release}). --deprecated({start_timer, 2, next_major_release}). --deprecated({send_event_after, 2, next_major_release}). --deprecated({cancel_timer, 1, next_major_release}). --deprecated({enter_loop, 4, next_major_release}). --deprecated({enter_loop, 5, next_major_release}). --deprecated({enter_loop, 6, next_major_release}). +-deprecated({start, 3, eventually}). +-deprecated({start, 4, eventually}). +-deprecated({start_link, 3, eventually}). +-deprecated({start_link, 4, eventually}). +-deprecated({stop, 1, eventually}). +-deprecated({stop, 3, eventually}). +-deprecated({send_event, 2, eventually}). +-deprecated({sync_send_event, 2, eventually}). +-deprecated({sync_send_event, 3, eventually}). +-deprecated({send_all_state_event, 2, eventually}). +-deprecated({sync_send_all_state_event, 2, eventually}). +-deprecated({sync_send_all_state_event, 3, eventually}). +-deprecated({reply, 2, eventually}). +-deprecated({start_timer, 2, eventually}). +-deprecated({send_event_after, 2, eventually}). +-deprecated({cancel_timer, 1, eventually}). +-deprecated({enter_loop, 4, eventually}). +-deprecated({enter_loop, 5, eventually}). +-deprecated({enter_loop, 6, eventually}). %%% --------------------------------------------------- %%% Interface functions. diff --git a/lib/stdlib/src/gen_server.erl b/lib/stdlib/src/gen_server.erl index 035dd871ff..09f77c0810 100644 --- a/lib/stdlib/src/gen_server.erl +++ b/lib/stdlib/src/gen_server.erl @@ -934,14 +934,14 @@ format_log(#{label:={gen_server,terminate}, end end; _ -> - logger:limit_term(Reason) + error_logger:limit_term(Reason) end, {ClientFmt,ClientArgs} = format_client_log(Client), {"** Generic server ~tp terminating \n" "** Last message in was ~tp~n" "** When Server state == ~tp~n" "** Reason for termination == ~n** ~tp~n" ++ ClientFmt, - [Name, Msg, logger:limit_term(State), Reason1] ++ ClientArgs}; + [Name, Msg, error_logger:limit_term(State), Reason1] ++ ClientArgs}; format_log(#{label:={gen_server,no_handle_info}, module:=Mod, message:=Msg}) -> diff --git a/lib/stdlib/src/gen_statem.erl b/lib/stdlib/src/gen_statem.erl index f558f0d33e..b36b8cd5a5 100644 --- a/lib/stdlib/src/gen_statem.erl +++ b/lib/stdlib/src/gen_statem.erl @@ -1938,7 +1938,7 @@ format_log(#{label:={gen_statem,terminate}, _ -> {Reason,Stacktrace} end, [LimitedP, LimitedFmtData, LimitedFixedReason] = - [logger:limit_term(D) || D <- [P, FmtData, FixedReason]], + [error_logger:limit_term(D) || D <- [P, FmtData, FixedReason]], CBMode = case StateEnter of true -> diff --git a/lib/stdlib/src/otp_internal.erl b/lib/stdlib/src/otp_internal.erl index a17addcc42..ceec3079a1 100644 --- a/lib/stdlib/src/otp_internal.erl +++ b/lib/stdlib/src/otp_internal.erl @@ -612,6 +612,15 @@ obsolete_1(erlang, get_stacktrace, 0) -> obsolete_1(erlang, hash, 2) -> {removed, {erlang, phash2, 2}, "20.0"}; +%% Add in OTP 21. + +obsolete_1(ssl, ssl_accept, 1) -> + {deprecated, "deprecated; use ssl:handshake/1 instead"}; +obsolete_1(ssl, ssl_accept, 2) -> + {deprecated, "deprecated; use ssl:handshake/2 instead"}; +obsolete_1(ssl, ssl_accept, 3) -> + {deprecated, "deprecated; use ssl:handshake/3 instead"}; + %% not obsolete obsolete_1(_, _, _) -> diff --git a/lib/stdlib/src/proc_lib.erl b/lib/stdlib/src/proc_lib.erl index 9094e0c0cd..5f14e78f91 100644 --- a/lib/stdlib/src/proc_lib.erl +++ b/lib/stdlib/src/proc_lib.erl @@ -553,10 +553,10 @@ get_ancestors(Pid) -> %% assumed that all report handlers call proc_lib:format(). get_messages(Pid) -> Messages = get_process_messages(Pid), - {messages, logger:limit_term(Messages)}. + {messages, error_logger:limit_term(Messages)}. get_process_messages(Pid) -> - Depth = logger:get_format_depth(), + Depth = error_logger:get_format_depth(), case Pid =/= self() orelse Depth =:= unlimited of true -> {messages, Messages} = get_process_info(Pid, messages), @@ -586,7 +586,7 @@ get_cleaned_dictionary(Pid) -> cleaned_dict(Dict) -> CleanDict = clean_dict(Dict), - logger:limit_term(CleanDict). + error_logger:limit_term(CleanDict). clean_dict([{'$ancestors',_}|Dict]) -> clean_dict(Dict); @@ -756,7 +756,7 @@ check(Res) -> Res. Args :: [term()]. report_cb(#{label:={proc_lib,crash}, report:=CrashReport}) -> - Depth = logger:get_format_depth(), + Depth = error_logger:get_format_depth(), get_format_and_args(CrashReport, utf8, Depth). -spec format(CrashReport) -> string() when diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index 02211fa8df..574aac96c8 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -87,6 +87,7 @@ -export([t_select_reverse/1]). +-include_lib("stdlib/include/ms_transform.hrl"). % ets:fun2ms -include_lib("common_test/include/ct.hrl"). -define(m(A,B), assert_eq(A,B)). @@ -173,10 +174,12 @@ groups() -> init_per_suite(Config) -> erts_debug:set_internal_state(available_internal_state, true), + erts_debug:set_internal_state(ets_force_trap, true), Config. end_per_suite(_Config) -> stop_spawn_logger(), + erts_debug:set_internal_state(ets_force_trap, false), catch erts_debug:set_internal_state(available_internal_state, false), ok. @@ -812,7 +815,60 @@ t_delete_all_objects_do(Opts) -> 4000 = ets:info(T,size), true = ets:delete_all_objects(T), 0 = ets:info(T,size), - ets:delete(T). + ets:delete(T), + + %% Test delete_all_objects is atomic + T2 = ets:new(t_delete_all_objects, [public | Opts]), + Self = self(), + Inserters = [spawn_link(fun() -> inserter(T2, 100*1000, 1, Self) end) || _ <- [1,2,3,4]], + [receive {Ipid, running} -> ok end || Ipid <- Inserters], + + ets:delete_all_objects(T2), + erlang:yield(), + [Ipid ! stop || Ipid <- Inserters], + Result = [receive {Ipid, stopped, Highest} -> {Ipid,Highest} end || Ipid <- Inserters], + + %% Verify unbroken sequences of objects inserted _after_ ets:delete_all_objects. + Sum = lists:foldl(fun({Ipid, Highest}, AccSum) -> + %% ets:fun2ms(fun({{K,Ipid}}) when K =< Highest -> true end), + AliveMS = [{{{'$1',Ipid}},[{'=<','$1',{const,Highest}}],[true]}], + Alive = ets:select_count(T2, AliveMS), + Lowest = Highest - (Alive-1), + + %% ets:fun2ms(fun({{K,Ipid}}) when K < Lowest -> true end) + DeletedMS = [{{{'$1',Ipid}},[{'<','$1',{const,Lowest}}],[true]}], + 0 = ets:select_count(T2, DeletedMS), + AccSum + Alive + end, + 0, + Result), + ok = case ets:info(T2, size) of + Sum -> ok; + Size -> + io:format("Sum = ~p\nSize = ~p\n", [Sum, Size]), + {Sum,Size} + end, + + ets:delete(T2). + +inserter(_, 0, _, _) -> + ok; +inserter(T, N, Next, Papa) -> + case Next of + 10*1000 -> + Papa ! {self(), running}; + _ -> + ok + end, + + ets:insert(T, {{Next, self()}}), + receive + stop -> + Papa ! {self(), stopped, Next}, + ok + after 0 -> + inserter(T, N-1, Next+1, Papa) + end. %% Test ets:delete_object/2. diff --git a/lib/tools/doc/src/fprof.xml b/lib/tools/doc/src/fprof.xml index 4c9e48045e..72624bd33b 100644 --- a/lib/tools/doc/src/fprof.xml +++ b/lib/tools/doc/src/fprof.xml @@ -328,10 +328,16 @@ purposes. This option is only allowed with the <c>start</c> option.</item> <tag><c>cpu_time</c>| <c>{cpu_time, bool()}</c></tag> - <item>The options <c>cpu_time</c> or <c>{cpu_time, true></c> + <item>The options <c>cpu_time</c> or <c>{cpu_time, true}</c> makes the timestamps in the trace be in CPU time instead of wallclock time which is the default. This option is - only allowed with the <c>start</c> option.</item> + only allowed with the <c>start</c> option. + <warning><p>Getting correct values out of cpu_time can be difficult. + The best way to get correct values is to run using a single + scheduler and bind that scheduler to a specific CPU, + i.e. <c>erl +S 1 +sbt db</c>.</p> + </warning> + </item> <tag><c>{procs, PidSpec}</c>| <c>{procs, [PidSpec]}</c></tag> <item>Specifies which processes that shall be traced. If this option is not given, the calling process is |