aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/common_test/src/Makefile2
-rw-r--r--lib/common_test/src/cth_log_redirect.erl241
-rw-r--r--lib/common_test/test/ct_hooks_SUITE.erl21
-rw-r--r--lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl2
-rw-r--r--lib/common_test/test/ct_pre_post_test_io_SUITE.erl4
-rw-r--r--lib/kernel/doc/src/Makefile22
-rw-r--r--lib/kernel/doc/src/book.xml3
-rw-r--r--lib/kernel/doc/src/error_logger.xml19
-rw-r--r--lib/kernel/doc/src/introduction_chapter.xml64
-rw-r--r--lib/kernel/doc/src/kernel_app.xml154
-rw-r--r--lib/kernel/doc/src/logger.xml478
-rw-r--r--lib/kernel/doc/src/logger_arch.pngbin0 -> 31459 bytes
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml815
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml146
-rw-r--r--lib/kernel/doc/src/logger_filters.xml191
-rw-r--r--lib/kernel/doc/src/logger_formatter.xml157
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml133
-rw-r--r--lib/kernel/doc/src/part.xml40
-rw-r--r--lib/kernel/doc/src/ref_man.xml5
-rw-r--r--lib/kernel/doc/src/specs.xml5
-rw-r--r--lib/kernel/include/logger.hrl49
-rw-r--r--lib/kernel/src/Makefile29
-rw-r--r--lib/kernel/src/application_controller.erl35
-rw-r--r--lib/kernel/src/code_server.erl14
-rw-r--r--lib/kernel/src/error_logger.erl545
-rw-r--r--lib/kernel/src/kernel.app.src15
-rw-r--r--lib/kernel/src/kernel.erl26
-rw-r--r--lib/kernel/src/logger.erl803
-rw-r--r--lib/kernel/src/logger_backend.erl133
-rw-r--r--lib/kernel/src/logger_config.erl151
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl694
-rw-r--r--lib/kernel/src/logger_filters.erl123
-rw-r--r--lib/kernel/src/logger_formatter.erl295
-rw-r--r--lib/kernel/src/logger_h_common.erl301
-rw-r--r--lib/kernel/src/logger_h_common.hrl262
-rw-r--r--lib/kernel/src/logger_internal.hrl98
-rw-r--r--lib/kernel/src/logger_server.erl440
-rw-r--r--lib/kernel/src/logger_simple.erl236
-rw-r--r--lib/kernel/src/logger_std_h.erl799
-rw-r--r--lib/kernel/src/logger_sup.erl53
-rw-r--r--lib/kernel/test/Makefile14
-rw-r--r--lib/kernel/test/application_SUITE.erl3
-rw-r--r--lib/kernel/test/error_logger_SUITE.erl46
-rw-r--r--lib/kernel/test/init_SUITE.erl6
-rw-r--r--lib/kernel/test/kernel.spec1
-rw-r--r--lib/kernel/test/logger.cover14
-rw-r--r--lib/kernel/test/logger.spec11
-rw-r--r--lib/kernel/test/logger_SUITE.erl828
-rw-r--r--lib/kernel/test/logger_bench_SUITE.erl500
-rw-r--r--lib/kernel/test/logger_bench_SUITE_data/Emakefile1
-rw-r--r--lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl73
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl1417
-rw-r--r--lib/kernel/test/logger_env_var_SUITE.erl451
-rw-r--r--lib/kernel/test/logger_filters_SUITE.erl214
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl558
-rw-r--r--lib/kernel/test/logger_legacy_SUITE.erl282
-rw-r--r--lib/kernel/test/logger_simple_SUITE.erl247
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl1396
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE.erl17
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl24
-rw-r--r--lib/reltool/src/reltool_target.erl2
-rw-r--r--lib/runtime_tools/src/appmon_info.erl2
-rw-r--r--lib/sasl/doc/src/sasl_app.xml183
-rw-r--r--lib/sasl/src/sasl.app.src3
-rw-r--r--lib/sasl/src/sasl.erl90
-rw-r--r--lib/sasl/src/systools_make.erl8
-rw-r--r--lib/sasl/test/sasl_SUITE.erl14
-rw-r--r--lib/sasl/test/sasl_report_SUITE.erl18
-rw-r--r--lib/stdlib/src/Makefile7
-rw-r--r--lib/stdlib/src/gen_event.erl87
-rw-r--r--lib/stdlib/src/gen_fsm.erl53
-rw-r--r--lib/stdlib/src/gen_server.erl110
-rw-r--r--lib/stdlib/src/gen_statem.erl125
-rw-r--r--lib/stdlib/src/proc_lib.erl125
-rw-r--r--lib/stdlib/src/supervisor.erl55
-rw-r--r--lib/stdlib/src/supervisor_bridge.erl27
-rw-r--r--lib/stdlib/test/error_logger_h_SUITE.erl5
-rw-r--r--lib/stdlib/test/proc_lib_SUITE.erl11
78 files changed, 13803 insertions, 828 deletions
diff --git a/lib/common_test/src/Makefile b/lib/common_test/src/Makefile
index 9d751996ad..2a2a9cb5bc 100644
--- a/lib/common_test/src/Makefile
+++ b/lib/common_test/src/Makefile
@@ -165,3 +165,5 @@ release_tests_spec: opt
release_docs_spec: docs
+# Include dependencies -- list below added by Kostis Sagonas
+$(EBIN)/cth_log_redirect.beam: ../../kernel/include/logger.hrl
diff --git a/lib/common_test/src/cth_log_redirect.erl b/lib/common_test/src/cth_log_redirect.erl
index b05f0bd28b..417ea615a3 100644
--- a/lib/common_test/src/cth_log_redirect.erl
+++ b/lib/common_test/src/cth_log_redirect.erl
@@ -33,17 +33,19 @@
pre_init_per_testcase/4, post_init_per_testcase/5,
pre_end_per_testcase/4, post_end_per_testcase/5]).
-%% Event handler Callbacks
--export([init/1,
- handle_event/2, handle_call/2, handle_info/2,
- terminate/1, terminate/2, code_change/3]).
+%% Logger handler and gen_server callbacks
+-export([log/2,
+ init/1,
+ handle_cast/2, handle_call/3,
+ terminate/1, terminate/2]).
%% Other
-export([handle_remote_events/1]).
-include("ct.hrl").
+-include("../../kernel/src/logger_internal.hrl").
--behaviour(gen_event).
+-behaviour(gen_server).
-record(eh_state, {log_func,
curr_suite,
@@ -57,7 +59,7 @@ id(_Opts) ->
init(?MODULE, _Opts) ->
ct_util:mark_process(),
- error_logger:add_report_handler(?MODULE),
+ ok = start_log_handler(),
tc_log_async.
pre_init_per_suite(Suite, Config, State) ->
@@ -100,7 +102,7 @@ pre_end_per_testcase(_Suite, _TC, Config, State) ->
post_end_per_testcase(_Suite, _TC, _Config, Result, State) ->
%% Make sure that the event queue is flushed
%% before ending this test case.
- gen_event:call(error_logger, ?MODULE, flush, 300000),
+ gen_server:call(?MODULE, flush, 300000),
{Result, State}.
pre_end_per_group(_Suite, Group, Config, {tc_log, Group}) ->
@@ -114,127 +116,155 @@ post_end_per_group(_Suite, _Group, Config, Return, State) ->
set_curr_func({group,undefined}, Config),
{Return, State}.
-%% Copied and modified from sasl_report_tty_h.erl
-init(_Type) ->
+start_log_handler() ->
+ case whereis(?MODULE) of
+ undefined ->
+ ChildSpec =
+ #{id=>?MODULE,
+ start=>{gen_server,start_link,[{local,?MODULE},?MODULE,[],[]]},
+ restart=>transient,
+ shutdown=>2000,
+ type=>worker,
+ modules=>[?MODULE]},
+ {ok,_} = supervisor:start_child(logger_sup,ChildSpec);
+ _Pid ->
+ ok
+ end,
+ ok = logger:add_handler(?MODULE,?MODULE,
+ #{level=>info,
+ formatter=>{?DEFAULT_FORMATTER,
+ ?DEFAULT_FORMAT_CONFIG}}).
+
+init([]) ->
{ok, #eh_state{log_func = tc_log_async}}.
-handle_event({_Type,GL,_Msg}, #eh_state{handle_remote_events = false} = State)
- when node(GL) /= node() ->
- {ok, State};
-handle_event(Event, #eh_state{log_func = LogFunc} = State) ->
+log(#{msg:={report,Msg},meta:=#{domain:=[beam,erlang,otp,sasl]}}=Log,Config) ->
case whereis(sasl_sup) of
undefined ->
- sasl_not_started;
+ ok; % sasl application is not started
_Else ->
- {ok, ErrLogType} = application:get_env(sasl, errlog_type),
- SReport = sasl_report:format_report(group_leader(), ErrLogType,
- tag_event(Event, local)),
- if is_list(SReport) ->
- SaslHeader = format_header(State),
- case LogFunc of
- tc_log ->
- ct_logs:tc_log(sasl, ?STD_IMPORTANCE,
- SaslHeader, SReport, [], []);
- tc_log_async ->
- ct_logs:tc_log_async(sasl, ?STD_IMPORTANCE,
- SaslHeader, SReport, [])
- end;
- true -> %% Report is an atom if no logging is to be done
- ignore
- end
- end,
- %% note that error_logger (unlike sasl) expects UTC time
- EReport = error_logger_tty_h:write_event(
- tag_event(Event, utc), io_lib),
- if is_list(EReport) ->
- ErrHeader = format_header(State),
- case LogFunc of
- tc_log ->
- ct_logs:tc_log(error_logger, ?STD_IMPORTANCE,
- ErrHeader, EReport, [], []);
- tc_log_async ->
- ct_logs:tc_log_async(error_logger, ?STD_IMPORTANCE,
- ErrHeader, EReport, [])
- end;
- true -> %% Report is an atom if no logging is to be done
- ignore
- end,
- {ok, State}.
-
-handle_info({'EXIT',User,killed}, State) ->
- case whereis(user) of
- %% init:stop/1/2 has been called, let's finish!
- undefined ->
- remove_handler;
- User ->
- remove_handler;
- _ ->
- {ok,State}
+ Level =
+ case application:get_env(sasl, errlog_type) of
+ {ok,error} ->
+ error;
+ {ok,_} ->
+ info;
+ undefined ->
+ info
+ end,
+ case Level of
+ error ->
+ case Msg of
+ #{label:={_,progress}} ->
+ ok;
+ _ ->
+ do_log(add_log_category(Log,sasl),Config)
+ end;
+ _ ->
+ do_log(add_log_category(Log,sasl),Config)
+ end
end;
+log(#{meta:=#{domain:=[beam,erlang,otp]}}=Log,Config) ->
+ do_log(add_log_category(Log,error_logger),Config);
+log(#{meta:=#{domain:=_}},_) ->
+ ok;
+log(Log,Config) ->
+ do_log(add_log_category(Log,error_logger),Config).
+
+add_log_category(#{meta:=Meta}=Log,Category) ->
+ Log#{meta=>Meta#{?MODULE=>#{category=>Category}}}.
-handle_info(_, State) ->
- {ok,State}.
+do_log(Log,Config) ->
+ gen_server:call(?MODULE,{log,Log,Config}).
-handle_call(flush,State) ->
- {ok, ok, State};
+handle_cast(_, State) ->
+ {noreply,State}.
-handle_call({set_curr_func,{group,Group,Conf},Config},
- State) when is_list(Config) ->
+handle_call({log,#{meta:=#{gl:=GL}},_}, _From,
+ #eh_state{handle_remote_events=false}=State)
+ when node(GL) /= node() ->
+ {reply, ok, State};
+
+handle_call({log,
+ #{msg:=Msg0,
+ meta:=#{?MODULE:=#{category:=Category}}=Meta}=Log,
+ #{formatter:={Formatter,FConfig}}},
+ _From,
+ #eh_state{log_func=LogFunc}=State) ->
+ Header = format_header(State),
+ Msg =
+ case Msg0 of
+ {report,R} ->
+ Fun=maps:get(report_cb,Meta,fun logger:format_report/1),
+ Fun(R);
+ _ ->
+ Msg0
+ end,
+ String = Formatter:format(Log#{msg=>Msg},FConfig),
+ case LogFunc of
+ tc_log ->
+ ct_logs:tc_log(Category, ?STD_IMPORTANCE,
+ Header, String, [], []);
+ tc_log_async ->
+ ct_logs:tc_log_async(sasl, ?STD_IMPORTANCE,
+ Header, String, [])
+ end,
+ {reply,ok,State};
+
+handle_call(flush,_From,State) ->
+ {reply, ok, State};
+
+handle_call({set_curr_func,{group,Group,Conf},Config},_From,State)
+ when is_list(Config) ->
Parallel = case proplists:get_value(tc_group_properties, Config) of
undefined -> false;
Props -> lists:member(parallel, Props)
end,
- {ok, ok, State#eh_state{curr_group = Group,
- curr_func = Conf,
- parallel_tcs = Parallel}};
-handle_call({set_curr_func,{group,Group,Conf},_SkipOrFail}, State) ->
- {ok, ok, State#eh_state{curr_group = Group,
- curr_func = Conf,
- parallel_tcs = false}};
-handle_call({set_curr_func,{group,undefined},_Config}, State) ->
- {ok, ok, State#eh_state{curr_group = undefined,
- curr_func = undefined,
- parallel_tcs = false}};
-handle_call({set_curr_func,{Suite,Conf},_Config}, State) ->
- {ok, ok, State#eh_state{curr_suite = Suite,
- curr_func = Conf,
- parallel_tcs = false}};
-handle_call({set_curr_func,undefined,_Config}, State) ->
- {ok, ok, State#eh_state{curr_suite = undefined,
- curr_func = undefined,
- parallel_tcs = false}};
-handle_call({set_curr_func,TC,_Config}, State) ->
- {ok, ok, State#eh_state{curr_func = TC}};
-
-handle_call({set_logfunc,NewLogFunc}, State) ->
- {ok, NewLogFunc, State#eh_state{log_func = NewLogFunc}};
-
-handle_call({handle_remote_events,Bool}, State) ->
- {ok, ok, State#eh_state{handle_remote_events = Bool}};
-
-handle_call(_Query, _State) ->
- {error, bad_query}.
+ {reply, ok, State#eh_state{curr_group = Group,
+ curr_func = Conf,
+ parallel_tcs = Parallel}};
+handle_call({set_curr_func,{group,Group,Conf},_SkipOrFail}, _From, State) ->
+ {reply, ok, State#eh_state{curr_group = Group,
+ curr_func = Conf,
+ parallel_tcs = false}};
+handle_call({set_curr_func,{group,undefined},_Config}, _From, State) ->
+ {reply, ok, State#eh_state{curr_group = undefined,
+ curr_func = undefined,
+ parallel_tcs = false}};
+handle_call({set_curr_func,{Suite,Conf},_Config}, _From, State) ->
+ {reply, ok, State#eh_state{curr_suite = Suite,
+ curr_func = Conf,
+ parallel_tcs = false}};
+handle_call({set_curr_func,undefined,_Config}, _From, State) ->
+ {reply, ok, State#eh_state{curr_suite = undefined,
+ curr_func = undefined,
+ parallel_tcs = false}};
+handle_call({set_curr_func,TC,_Config}, _From, State) ->
+ {reply, ok, State#eh_state{curr_func = TC}};
+
+handle_call({set_logfunc,NewLogFunc}, _From, State) ->
+ {reply, NewLogFunc, State#eh_state{log_func = NewLogFunc}};
+
+handle_call({handle_remote_events,Bool}, _From, State) ->
+ {reply, ok, State#eh_state{handle_remote_events = Bool}}.
terminate(_) ->
- error_logger:delete_report_handler(?MODULE),
- [].
+ _ = logger:remove_handler(?MODULE),
+ _ = supervisor:terminate_child(logger_sup,?MODULE),
+ _ = supervisor:delete_child(logger_sup,?MODULE),
+ ok.
terminate(_Arg, _State) ->
ok.
-tag_event(Event, utc) ->
- {calendar:universal_time(), Event};
-tag_event(Event, _) ->
- {calendar:local_time(), Event}.
-
set_curr_func(CurrFunc, Config) ->
- gen_event:call(error_logger, ?MODULE, {set_curr_func, CurrFunc, Config}).
+ gen_server:call(?MODULE, {set_curr_func, CurrFunc, Config}).
set_log_func(Func) ->
- gen_event:call(error_logger, ?MODULE, {set_logfunc, Func}).
+ gen_server:call(?MODULE, {set_logfunc, Func}).
handle_remote_events(Bool) ->
- gen_event:call(error_logger, ?MODULE, {handle_remote_events, Bool}).
+ gen_server:call(?MODULE, {handle_remote_events, Bool}).
%%%-----------------------------------------------------------------
@@ -272,6 +302,3 @@ format_header(#eh_state{curr_suite = Suite,
curr_func = TC}) ->
io_lib:format("System report during ~w:~tw/1 in ~tw",
[Suite,TC,Group]).
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/lib/common_test/test/ct_hooks_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE.erl
index 3c1e887f65..6228524a88 100644
--- a/lib/common_test/test/ct_hooks_SUITE.erl
+++ b/lib/common_test/test/ct_hooks_SUITE.erl
@@ -258,15 +258,20 @@ cth_log(Config) when is_list(Config) ->
fun(UnexpIoLog) ->
{ok,Bin} = file:read_file(UnexpIoLog),
Ts = string:lexemes(binary_to_list(Bin),[$\n]),
- Matches = lists:foldl(fun([$=,$E,$R,$R,$O,$R|_], N) ->
- N+1;
- ([$L,$o,$g,$g,$e,$r|_], N) ->
- N+1;
+ Matches = lists:foldl(fun([$=,$E,$R,$R,$O,$R|_], {E,I,L}) ->
+ {E+1,I,L};
+ ([$=,$I,$N,$F,$O|_], {E,I,L}) ->
+ {E,I+1,L};
+ ([$L,$o,$g,$g,$e,$r|_], {E,I,L}) ->
+ {E,I,L+1};
(_, N) -> N
- end, 0, Ts),
- ct:pal("~p matches in ~tp", [Matches,UnexpIoLog]),
- if Matches > 10 -> ok;
- true -> exit({no_unexpected_io_found,UnexpIoLog})
+ end, {0,0,0}, Ts),
+ ct:pal("~p ({Error,Info,Log}) matches in ~tp",
+ [Matches,UnexpIoLog]),
+ MatchList = tuple_to_list(Matches),
+ case [N || N <- MatchList, N<3] of
+ [] -> ok;
+ _ -> exit({missing_unexpected_io,UnexpIoLog})
end
end, UnexpIoLogs),
ok.
diff --git a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl
index bd1ac54781..a0cd77b88b 100644
--- a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl
+++ b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl
@@ -124,6 +124,6 @@ gen() ->
gen_loop(N) ->
ct:log("Logger iteration: ~p", [N]),
error_logger:error_report(N),
- error_logger:info_report(progress, N),
+ error_logger:info_report(N),
ct:sleep(150),
gen_loop(N+1).
diff --git a/lib/common_test/test/ct_pre_post_test_io_SUITE.erl b/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
index 0b85392009..538fd822c1 100644
--- a/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
+++ b/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
@@ -241,7 +241,7 @@ try_loop(_Fun, 0) ->
gave_up;
try_loop(Fun, N) ->
try Fun() of
- {error,_} ->
+ {Error,_} when Error==error; Error==badrpc ->
timer:sleep(10),
try_loop(Fun, N-1);
Result ->
@@ -257,7 +257,7 @@ try_loop(M, F, _A, 0) ->
gave_up;
try_loop(M, F, A, N) ->
try apply(M, F, A) of
- {error,_} ->
+ {Error,_Reason} when Error==error; Error==badrpc ->
timer:sleep(10),
try_loop(M, F, A, N-1);
Result ->
diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile
index 2413541082..82869d7b15 100644
--- a/lib/kernel/doc/src/Makefile
+++ b/lib/kernel/doc/src/Makefile
@@ -56,6 +56,11 @@ XML_REF3_FILES = application.xml \
inet.xml \
inet_res.xml \
init_stub.xml \
+ logger.xml \
+ logger_std_h.xml \
+ logger_disk_log_h.xml \
+ logger_filters.xml \
+ logger_formatter.xml \
net_adm.xml \
net_kernel.xml \
os.xml \
@@ -70,11 +75,17 @@ XML_REF4_FILES = app.xml config.xml
XML_REF6_FILES = kernel_app.xml
-XML_PART_FILES =
-XML_CHAPTER_FILES = notes.xml
+XML_PART_FILES = part.xml
+XML_CHAPTER_FILES = \
+ notes.xml \
+ introduction_chapter.xml \
+ logger_chapter.xml
BOOK_FILES = book.xml
+IMAGE_FILES = \
+ logger_arch.png
+
XML_FILES = \
$(BOOK_FILES) $(XML_CHAPTER_FILES) \
$(XML_PART_FILES) $(XML_REF3_FILES) $(XML_REF4_FILES)\
@@ -111,7 +122,7 @@ SPECS_FLAGS = -I../../include
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
-$(HTMLDIR)/%.gif: %.gif
+$(HTMLDIR)/%: %
$(INSTALL_DATA) $< $@
docs: man pdf html
@@ -120,11 +131,12 @@ $(TOP_PDF_FILE): $(XML_FILES)
pdf: $(TOP_PDF_FILE)
-html: gifs $(HTML_REF_MAN_FILE)
+html: images $(HTML_REF_MAN_FILE)
man: $(MAN3_FILES) $(MAN4_FILES) $(MAN6_FILES)
-gifs: $(GIF_FILES:%=$(HTMLDIR)/%)
+images: $(IMAGE_FILES:%=$(HTMLDIR)/%)
+
debug opt:
clean clean_docs:
diff --git a/lib/kernel/doc/src/book.xml b/lib/kernel/doc/src/book.xml
index 81a87d126d..0b69b547e7 100644
--- a/lib/kernel/doc/src/book.xml
+++ b/lib/kernel/doc/src/book.xml
@@ -34,6 +34,9 @@
<preamble>
<contents level="2"></contents>
</preamble>
+ <parts lift="yes">
+ <xi:include href="part.xml"/>
+ </parts>
<applications>
<xi:include href="ref_man.xml"/>
</applications>
diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml
index 91bf57cb91..cb6165c73e 100644
--- a/lib/kernel/doc/src/error_logger.xml
+++ b/lib/kernel/doc/src/error_logger.xml
@@ -31,6 +31,16 @@
<module>error_logger</module>
<modulesummary>Erlang error logger.</modulesummary>
<description>
+
+ <note>
+ <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ old <c>error_logger</c> module can still be used by legacy
+ code, but new code should use the new API instead.</p>
+ <p>See <seealso marker="logger"><c>logger(3)</c></seealso> and
+ the <seealso marker="logger_chapter">Logging</seealso> chapter
+ in the user's guide for more information.</p>
+ </note>
+
<p>The Erlang <em>error logger</em> is an event manager (see
<seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and
<seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>),
@@ -171,14 +181,17 @@ ok</pre>
<func>
<name name="get_format_depth" arity="0"/>
<fsummary>Get the value of the Kernel application variable
- <c>error_logger_format_depth</c>.</fsummary>
+ <c>logger_format_depth</c>.</fsummary>
<desc>
<p>Returns <c>max(10, Depth)</c>, where <c>Depth</c> is the
value of
- <seealso marker="kernel:kernel_app#error_logger_format_depth">
- error_logger_format_depth</seealso>
+ <seealso marker="kernel_app#logger_format_depth">
+ logger_format_depth</seealso>
in the Kernel application, if Depth is an integer. Otherwise,
<c>unlimited</c> is returned.</p>
+ <p>For backwards compatibility, the value
+ of <c>error_logger_format_depth</c> is used
+ if <c>logger_format_depth</c> is not set.</p>
</desc>
</func>
<func>
diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml
new file mode 100644
index 0000000000..6e6990ddda
--- /dev/null
+++ b/lib/kernel/doc/src/introduction_chapter.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Introduction</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev></rev>
+ <file>introduction.xml</file>
+ </header>
+
+ <section>
+ <title>Scope</title>
+ <p>The Kernel application has all the code necessary to run
+ the Erlang runtime system: file servers, code servers,
+ and so on.</p>
+ <p>The Kernel application is the first application started. It is
+ mandatory in the sense that the minimal system based on
+ Erlang/OTP consists of Kernel and STDLIB. Kernel
+ contains the following functional areas:</p>
+ <list type="bulleted">
+ <item>Start, stop, supervision, configuration, and distribution of applications</item>
+ <item>Code loading</item>
+ <item>Logging</item>
+ <item>Error logging</item>
+ <item>Global name service</item>
+ <item>Supervision of Erlang/OTP</item>
+ <item>Communication with sockets</item>
+ <item>Operating system interface</item>
+ </list>
+ </section>
+
+ <section>
+ <title>Prerequisites</title>
+ <p>It is assumed that the reader is familiar with the Erlang programming
+ language.</p>
+ </section>
+</chapter>
+
+
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 0762cebc94..554d675383 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -51,10 +51,13 @@
</description>
<section>
- <title>Error Logger Event Handlers</title>
- <p>Two standard error logger event handlers are defined in
- the Kernel application. These are described in
- <seealso marker="error_logger"><c>error_logger(3)</c></seealso>.</p>
+ <title>Logger Handlers</title>
+ <p>Two standard logger handlers are defined in
+ the Kernel application. These are described in the
+ <seealso marker="logger_chapter">Kernel User's Guide</seealso>,
+ and in <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c>
+ </seealso>.</p>
</section>
<section>
@@ -113,6 +116,7 @@
</section>
<section>
+ <marker id="configuration"/>
<title>Configuration</title>
<p>The following configuration parameters are defined for the Kernel
application. For more information about configuration parameters,
@@ -176,34 +180,105 @@
<p>Permissions are described in
<seealso marker="application#permit/2"><c>application:permit/2</c></seealso>.</p>
</item>
- <tag><c>error_logger = Value</c></tag>
+ <tag><c>logger_dest = Value</c></tag>
<item>
<p><c>Value</c> is one of:</p>
<taglist>
<tag><c>tty</c></tag>
- <item><p>Installs the standard event handler, which prints error
- reports to <c>stdio</c>. This is the default option.</p></item>
+ <item><p>Installs the standard handler, <seealso marker="logger_std_h">
+ <c>logger_std_h(3)</c></seealso>, with <c>type</c> set
+ to <c>standard_io</c>. This is the default
+ option.</p></item>
<tag><c>{file, FileName}</c></tag>
- <item><p>Installs the standard event handler, which prints error
- reports to file <c>FileName</c>, where <c>FileName</c>
+ <item><p>Installs the standard handler, <seealso marker="logger_std_h">
+ <c>logger_std_h(3)</c></seealso>, with <c>type</c> set
+ to <c>{file, FileName}</c>, where <c>FileName</c>
is a string. The file is opened with encoding UTF-8.</p></item>
+ <tag><c>{disk_log, FileName}</c></tag>
+ <item><p>Installs the disk_log handler, <seealso marker="logger_disk_log_h">
+ <c>logger_disk_log_h(3)</c></seealso>, with <c>file</c> set
+ to <c>FileName</c> (a string), and possibly other disk_log
+ parameters set by the environment variables
+ <c>logger_disk_log_type</c>, <c>logger_disk_log_maxfiles</c> and
+ <c>logger_disk_log_maxbytes</c>,
+ see <seealso marker="#disk_log_vars">below</seealso>. The
+ file is opened with encoding UTF-8.</p></item>
<tag><c>false</c></tag>
<item>
- <p>No standard event handler is installed, but
- the initial, primitive event handler is kept, printing
+ <p>No standard handler is installed, but
+ the initial, primitive handler is kept, printing
raw event messages to <c>tty</c>.</p>
</item>
<tag><c>silent</c></tag>
<item>
- <p>Error logging is turned off.</p>
+ <p>No standard handler is started, and the initial,
+ primitive handler is removed.</p>
</item>
</taglist>
</item>
- <tag><c>error_logger_format_depth = Depth</c></tag>
+ <tag><c>logger_level = Level</c></tag>
+ <item>
+ <p><c>Value = emergency | alert | critical | error | warning |
+ notice | info | debug</c></p>
+ <p>This parameter specifies which log levels to log. The
+ specified level, and all levels that are more severe, will
+ be logged.</p>
+ <p>This configuration parameter is used both for the global
+ logger level, and for the standard handler started by
+ the Kernel application (see <c>logger_dest</c> variable above).</p>
+ <p>The default value is <c>info</c></p>
+ </item>
+ <tag><marker id="disk_log_vars"/>
+ <c>logger_disk_log_type = halt | wrap</c></tag>
+ <item/>
+ <tag><c>logger_disk_log_maxfiles = integer()</c></tag>
+ <item/>
+ <tag><c>logger_disk_log_maxbytes = integer()</c></tag>
+ <item>
+ <p>If <c>logger_dest</c> is set to {disk_log,File}, then these
+ parameters specify the configuration to use when opening the
+ disk log file. They specify the type of disk log, the
+ maximum number of files (if the type is wrap) and the
+ maximum size of each file, respectively.</p>
+ <p>The default values are:</p>
+ <code>
+logger_disk_log_type = wrap
+logger_disk_log_maxfiles = 10
+logger_disk_log_maxbytes = 1048576</code>
+ </item>
+ <tag><marker id="logger_sasl_compatible"/>
+ <c>logger_sasl_compatible = boolean()</c></tag>
+ <item>
+ <p>If this parameter is set to true, then the logger handler
+ started by kernel will not log any progress-, crash-, or
+ supervisor reports. If the SASL application is starated,
+ these log events will be sent to a second handler instance
+ named sasl_h, according to values of the SASL environment
+ variables <c>sasl_error_logger</c>
+ and <c>sasl_errlog_type</c>, see
+ <seealso marker="sasl:sasl_app#configuration">SASL(6)
+ </seealso></p>
+ <p>The default value is <c>false</c></p>
+ <p>See chapter <seealso marker="logger_chapter#compatibility">Backwards
+ compatibility with error_logger</seealso> for more
+ information about handling of the so called SASL reports.</p>
+ </item>
+ <tag><marker id="logger_log_progress"/>
+ <c>logger_log_progress = boolean()</c></tag>
+ <item>
+ <p>If <c>logger_sasl_compatible = false</c>,
+ then <c>logger_log_progress</c> specifies if progress
+ reports from <c>supervisor</c>
+ and <c>application_controller</c> shall be logged or
+ not.</p>
+ <p>If <c>logger_sasl_compatible = false</c>,
+ then <c>logger_log_progress</c> is ignored.</p>
+ </item>
+ <tag><marker id="logger_format_depth"/>
+ <c>logger_format_depth = Depth</c></tag>
<item>
- <marker id="error_logger_format_depth"></marker>
<p>Can be used to limit the size of the
- formatted output from the error logger event handlers.</p>
+ formatted output from the logger handlers.</p>
<note><p>This configuration parameter was introduced in OTP 18.1
and is experimental. Based on user feedback, it
@@ -214,16 +289,16 @@
useless.</p></note>
<p><c>Depth</c> is a positive integer representing the maximum
- depth to which terms are printed by the error logger event
+ depth to which terms are printed by the logger
handlers included in OTP. This
- configuration parameter is used by the two event handlers
- defined by the Kernel application and the two event
- handlers in the SASL application.
- (If you have implemented your own error handlers, this configuration
- parameter has no effect on them.)</p>
+ configuration parameter is used by the default formatter,
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>,
+ unless the formatter's <c>depth</c> parameter is explicitly set.
+ (If you have implemented your own formatter, this configuration
+ parameter has no effect on that.)</p>
<p><c>Depth</c> is used as follows: Format strings
- passed to the event handlers are rewritten.
+ received by the formatter are rewritten.
The format controls <c>~p</c> and <c>~w</c> are replaced with
<c>~P</c> and <c>~W</c>, respectively, and <c>Depth</c> is
used as the depth parameter. For details, see
@@ -234,7 +309,20 @@
<c>30</c>. We recommend to test crashing various processes in your
application, examine the logs from the crashes, and then
increase or decrease the value.</p></note>
- </item>
+ </item>
+ <tag><c>logger_max_size = integer() | unlimited</c></tag>
+ <item>
+ <p>This parameter specifies the maximum size (bytes) each
+ log event can have when printed by the standard logger
+ handler. If the resulting string after formatting an event
+ is bigger than this, it will be truncated before printed
+ to the handler's destination.</p>
+ </item>
+ <tag><c>logger_utc = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c>, the default formatter will display
+ all dates in Universal Coordinated Time.</p>
+ </item>
<tag><c>global_groups = [GroupTuple]</c></tag>
<item>
<marker id="global_groups"></marker>
@@ -497,6 +585,26 @@ MaxT = TickTime + TickTime / 4</code>
</section>
<section>
+ <title>Deprecated Configuration Parameters</title>
+ <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ old <c>error_logger</c> event manager, and event handlers
+ running on this manager, will still work, but they are not used
+ by default.</p>
+ <p>The following application environment variables can still be
+ set, but they will only be used if the corresponding new logger
+ variables are not set.</p>
+ <taglist>
+ <tag><c>error_logger</c></tag>
+ <item>Replaced by <c>logger_dest</c></item>
+ <tag><c>error_logger_format_depth</c></tag>
+ <item>Replaced by <c>logger_format_depth</c></item>
+ </taglist>
+ <p>See <seealso marker="logger_chapter#compatibility">Backwards
+ compatibility with error_logger</seealso> for more
+ information.</p>
+ </section>
+
+ <section>
<title>See Also</title>
<p><seealso marker="app"><c>app(4)</c></seealso>,
<seealso marker="application"><c>application(3)</c></seealso>,
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
new file mode 100644
index 0000000000..66e6e5c689
--- /dev/null
+++ b/lib/kernel/doc/src/logger.xml
@@ -0,0 +1,478 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger.xml</file>
+ </header>
+ <module>logger</module>
+ <modulesummary>API module for the logger application.</modulesummary>
+
+ <description>
+
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="level"/>
+ <desc>
+ <p>The severity level for the message to be logged.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="log"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="report"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="msg_fun"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="metadata"/>
+ <desc>
+ <p>Metadata associated with the message to be logged.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="config"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="handler_id"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_id"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_return"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <section>
+ <title>Macros</title>
+ <p>The following macros are defined:</p>
+
+ <list>
+ <item><c>?LOG_EMERGENCY(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_EMERGENCY(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_ALERT(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_ALERT(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_CRITICAL(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_CRITICAL(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_ERROR(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_ERROR(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_WARNING(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_WARNING(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_NOTICE(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_NOTICE(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_INFO(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_INFO(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_DEBUG(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item>
+ </list>
+
+ <p>All macros expand to a call to logger, where <c>Level</c> is
+ taken from the macro name, and the following metadata is added,
+ or merged with the given <c>Metadata</c>:</p>
+
+ <code>
+#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ file=>?FILE,
+ line=>?LINE}
+ </code>
+
+ <p>The call is wrapped in a case statement and will be evaluated
+ only if <c>Level</c> is equal to or below the configured log
+ level.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>emergency(StringOrReport[,Metadata])</name>
+ <name>emergency(Format,Args[,Metadata])</name>
+ <name>emergency(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>emergency</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(emergency,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>alert(StringOrReport[,Metadata])</name>
+ <name>alert(Format,Args[,Metadata])</name>
+ <name>alert(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>alert</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(alert,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>critical(StringOrReport[,Metadata])</name>
+ <name>critical(Format,Args[,Metadata])</name>
+ <name>critical(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>critical</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(critical,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>error(StringOrReport[,Metadata])</name>
+ <name>error(Format,Args[,Metadata])</name>
+ <name>error(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>error</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(error,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>warning(StringOrReport[,Metadata])</name>
+ <name>warning(Format,Args[,Metadata])</name>
+ <name>warning(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>warning</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(warning,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>notice(StringOrReport[,Metadata])</name>
+ <name>notice(Format,Args[,Metadata])</name>
+ <name>notice(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>notice</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(notice,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>info(StringOrReport[,Metadata])</name>
+ <name>info(Format,Args[,Metadata])</name>
+ <name>info(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>info</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(info,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>debug(StringOrReport[,Metadata])</name>
+ <name>debug(Format,Args[,Metadata])</name>
+ <name>debug(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>debug</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(debug,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="log" arity="2"/>
+ <name name="log" arity="3" clause_i="1"/>
+ <name name="log" arity="3" clause_i="2"/>
+ <name name="log" arity="3" clause_i="3"/>
+ <name name="log" arity="4" clause_i="1"/>
+ <name name="log" arity="4" clause_i="2"/>
+ <fsummary>Logs the given message.</fsummary>
+ <type variable="Level"/>
+ <type variable="StringOrReport" name_i="1"/>
+ <type variable="Format" name_i="3"/>
+ <type variable="Args" name_i="3"/>
+ <type variable="Fun" name_i="4"/>
+ <type variable="FunArgs" name_i="4"/>
+ <type variable="Metadata"/>
+ <desc>
+ <p>Log the given message.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_logger_config" arity="0"/>
+ <fsummary>Lookup the current configuration for logger.</fsummary>
+ <desc>
+ <p>Lookup the current configuration for logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_handler_config" arity="1"/>
+ <fsummary>Lookup the current configuration for the given handler.</fsummary>
+ <desc>
+ <p>Lookup the current configuration for the given handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="i" arity="0"/>
+ <fsummary>Get information about all logger configurations</fsummary>
+ <desc>
+ <p>Same as <seealso marker="#i/1"><c>logger:i(term)</c></seealso></p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="i" arity="1" clause_i="1"/>
+ <name name="i" arity="1" clause_i="2"/>
+ <name name="i" arity="1" clause_i="3"/>
+ <fsummary>Get information about all logger configurations</fsummary>
+ <desc>
+ <p>The <c>logger:i/1</c> function can be used to get all
+ current logger configuration. The way that the information
+ is returned depends on the <c><anno>Action</anno></c></p>
+ <taglist>
+ <tag>string</tag>
+ <item>Return the pretty printed current logger configuration
+ as iodata.</item>
+ <tag>term</tag>
+ <item>Return the current logger configuration as a term. The
+ format of this term may change inbetween releases. For a
+ stable format use <seealso marker="#get_handler_config/1">
+ <c>logger:get_handler_config/1</c></seealso>
+ and <seealso marker="#get_logger_config/0">
+ <c>logger:get_logger_config/0</c></seealso>.
+ The same as calling <c>logger:i()</c>.</item>
+ <tag>print</tag>
+ <item>Pretty print all the current logger configuration to
+ standard out. Example:
+ <code><![CDATA[1> logger:i().
+Current logger configuration:
+ Level: info
+ FilterDefault: log
+ Filters:
+ Handlers:
+ Id: logger_std_h
+ Module: logger_std_h
+ Level: info
+ Formatter:
+ Module: logger_formatter
+ Config: #{template => [{logger_formatter,header},"\n",msg,"\n"],
+ legacy_header => true}
+ Filter Default: stop
+ Filters:
+ Id: stop_progress
+ Fun: fun logger_filters:progress/2
+ Config: stop
+ Id: remote_gl
+ Fun: fun logger_filters:remote_gl/2
+ Config: stop
+ Id: domain
+ Fun: fun logger_filters:domain/2
+ Config: {log,prefix_of,[beam,erlang,otp,sasl]}
+ Id: no_domain
+ Fun: fun logger_filters:domain/2
+ Config: {log,no_domain,[]}
+ Handler Config:
+ logger_std_h: #{type => standard_io}
+ Level set per module:
+ Module: my_module
+ Level: debug]]></code>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_logger_filter" arity="2"/>
+ <fsummary>Add a filter to the logger.</fsummary>
+ <desc>
+ <p>Add a filter to the logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handler_filter" arity="3"/>
+ <fsummary>Add a filter to the specified handler.</fsummary>
+ <desc>
+ <p>Add a filter to the specified handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_logger_filter" arity="1"/>
+ <fsummary>Remove a filter from the logger.</fsummary>
+ <desc>
+ <p>Remove the filter with the specified identity from the logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_handler_filter" arity="2"/>
+ <fsummary>Remove a filter from the specified handler.</fsummary>
+ <desc>
+ <p>Remove the filter with the specified identity from the given handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handler" arity="3"/>
+ <fsummary>Add a handler with the given configuration.</fsummary>
+ <desc>
+ <p>Add a handler with the given configuration.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_handler" arity="1"/>
+ <fsummary>Remove the handler with the specified identity.</fsummary>
+ <desc>
+ <p>Remove the handler with the specified identity.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_module_level" arity="2"/>
+ <fsummary>Set the log level for the specified module.</fsummary>
+ <desc>
+ <p>Set the log level for the specified module.</p>
+ <p>To change the logging level globally, use
+ <seealso marker="#set_logger_config/2"><c>logger:set_logger_config(level, Level)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="reset_module_level" arity="1"/>
+ <fsummary>Remove a module specific log setting.</fsummary>
+ <desc>
+ <p>Remove a module specific log setting. After this, the
+ global log level is used for the specified module.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_logger_config" arity="1"/>
+ <name name="set_logger_config" arity="2"/>
+ <fsummary>Add or update configuration data for the logger.</fsummary>
+ <desc>
+ <p>Add or update configuration data for the logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_handler_config" arity="2"/>
+ <name name="set_handler_config" arity="3"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="compare_levels" arity="2"/>
+ <fsummary>Compare the severity of two log levels.</fsummary>
+ <desc>
+ <p>Compare the severity of two log levels. Returns <c>gt</c>
+ if <c>Level1</c> is more severe than
+ <c>Level2</c>, <c>lt</c> if <c>Level1</c> is less severe,
+ and <c>eq</c> if the levels are equal.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_process_metadata" arity="1"/>
+ <fsummary>Set metadata to use when logging from current process.</fsummary>
+ <desc>
+ <p>Set metadata which <c>logger</c> automatically inserts it
+ in all log events produced on the current
+ process. Subsequent calls will overwrite previous data set
+ by this function.</p>
+ <p>When logging, location data produced by the log macros,
+ and/or metadata given as argument to the log call (API
+ function or macro), will be merged with the process
+ metadata. If the same keys occur, values from the metadata
+ argument to the log call will overwrite values in the
+ process metadata, which in turn will overwrite values from
+ the location data.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_process_metadata" arity="0"/>
+ <fsummary>Retrieve data set with set_process_metadata/1.</fsummary>
+ <desc>
+ <p>Retrieve data set
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_process_metadata" arity="0"/>
+ <fsummary>Delete data set with set_process_metadata/1.</fsummary>
+ <desc>
+ <p>Delete data set
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png
new file mode 100644
index 0000000000..727609a6ef
--- /dev/null
+++ b/lib/kernel/doc/src/logger_arch.png
Binary files differ
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
new file mode 100644
index 0000000000..0bc3b37476
--- /dev/null
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -0,0 +1,815 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Logging</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>logger_chapter.xml</file>
+ </header>
+
+ <section>
+ <title>Overview</title>
+ <p>Erlang/OTP provides a standard API for logging. The backend of
+ this API can be used as is, or it can be customized to suite
+ specific needs.</p>
+ <p>It consists of two parts - the <em>logger</em> part and the
+ <em>handler</em> part. The logger will forward log events to one
+ or more handler(s).</p>
+
+ <image file="logger_arch.png">
+ <icaption>Conceptual overview</icaption>
+ </image>
+
+ <p><em>Filters</em> can be added to the logger and to each
+ handler. The filters decide if an event is to be forwarded or
+ not, and they can also modify all parts of the log event.</p>
+
+ <p>A <em>formatter</em> can be set for each handler. The formatter
+ does the final formatting of the log event, including the log
+ message itself, and possibly a timestamp, header and other
+ metadata.</p>
+
+ <p>In accordance with the Syslog protocol, RFC-5424, eight
+ severity levels can be specified:</p>
+
+ <table align="left">
+ <row>
+ <cell><strong>Level</strong></cell>
+ <cell align="center"><strong>Integer</strong></cell>
+ <cell><strong>Description</strong></cell>
+ </row>
+ <row>
+ <cell>emergency</cell>
+ <cell align="center">0</cell>
+ <cell>system is unusable</cell>
+ </row>
+ <row>
+ <cell>alert</cell>
+ <cell align="center">1</cell>
+ <cell>action must be taken immediately</cell>
+ </row>
+ <row>
+ <cell>critical</cell>
+ <cell align="center">2</cell>
+ <cell>critical contidions</cell>
+ </row>
+ <row>
+ <cell>error</cell>
+ <cell align="center">3</cell>
+ <cell>error conditions</cell>
+ </row>
+ <row>
+ <cell>warning</cell>
+ <cell align="center">4</cell>
+ <cell>warning conditions</cell>
+ </row>
+ <row>
+ <cell>notice</cell>
+ <cell align="center">5</cell>
+ <cell>normal but significant conditions</cell>
+ </row>
+ <row>
+ <cell>info</cell>
+ <cell align="center">6</cell>
+ <cell>informational messages</cell>
+ </row>
+ <row>
+ <cell>debug</cell>
+ <cell align="center">7</cell>
+ <cell>debug-level messages</cell>
+ </row>
+ <tcaption>Severity levels</tcaption>
+ </table>
+
+ <p>A log event is allowed by Logger if the integer value of
+ its <c>Level</c> is less than or equal to the currently
+ configured log level. The log level can be configured globally,
+ or to allow more verbose logging from a specific part of the
+ system, per module.</p>
+
+ <section>
+ <title>Customizable parts</title>
+
+ <taglist>
+ <tag><marker id="Handler"/>Handler</tag>
+ <item>
+ <p>A handler is defined as a module exporting the following
+ function:</p>
+
+ <code>log(Log, Config) -> ok</code>
+
+ <p>A handler is called by the logger backend after filtering on
+ logger level and on handler level for the handler which is
+ about to be called. The function call is done on the client
+ process, and it is up to the handler implementation if other
+ processes are to be involved or not.</p>
+
+ <p>Multiple instances of the same handler can be
+ added. Configuration is per instance.</p>
+
+ </item>
+
+ <tag><marker id="Filter"/>Filter</tag>
+ <item>
+ <p>Filters can be set on the logger or on a handler. Logger
+ filters are applied first, and if passed, the handler filters
+ for each handler are applied. The handler plugin is only
+ called if all handler filters for the handler in question also
+ pass.</p>
+
+ <p>A filter is specified as:</p>
+
+ <code>{fun((Log,Extra) -> Log | stop | ignore), Extra}</code>
+
+ <p>The configuration parameter <c>filter_default</c>
+ specifies the behavior if all filters return <c>ignore</c>.
+ <c>filter_default</c> is by default set to <c>log</c>.</p>
+
+ <p>The <c>Extra</c> parameter may contain any data that the
+ filter needs.</p>
+ </item>
+
+ <tag><marker id="Formatter"/>Formatter</tag>
+ <item>
+ <p>A formatter is defined as a module exporting the following
+ function:</p>
+
+ <code>format(Log,Extra) -> string()</code>
+
+ <p>The formatter plugin is called by each handler, and the
+ returned string can be printed to the handler's destination
+ (stdout, file, ...).</p>
+ </item>
+
+ </taglist>
+ </section>
+
+ <section>
+ <title>Built-in handlers</title>
+
+ <taglist>
+ <tag><c>logger_std_h</c></tag>
+ <item>
+ <p>This is the default handler used by OTP. Multiple instances
+ can be started, and each instance will write log events to a
+ given destination, console or file. Filters can be used for
+ selecting which event to send to which handler instance.</p>
+ </item>
+
+ <tag><c>logger_disk_log_h</c></tag>
+ <item>
+ <p>This handler behaves much like logger_std_h, except it uses
+ <seealso marker="disk_log"><c>disk_log</c></seealso> as its
+ destination.</p>
+ </item>
+
+ <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag>
+ <item>
+ <p>This handler is to be used for backwards compatibility
+ only. It is not started by default, but will be automatically
+ started the first time an event handler is added
+ with <seealso marker="error_logger#add_report_handler-1">
+ <c>error_logger:add_report_handler/1,2</c></seealso>.</p>
+
+ <p>No built-in event handlers exist.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Built-in filters</title>
+
+ <taglist>
+ <tag><c>logger_filters:domain/2</c></tag>
+ <item>
+ <p>This filter provides a way of filtering log events based on a
+ <c>domain</c> field <c>Metadata</c>. See
+ <seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso></p>
+ </item>
+
+ <tag><c>logger_filters:level/2</c></tag>
+ <item>
+ <p>This filter provides a way of filtering log events based
+ on the log level. See <seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso></p>
+ </item>
+
+ <tag><c>logger_filters:progress/2</c></tag>
+ <item>
+ <p>This filter matches all progress reports
+ from <c>supervisor</c> and <c>application_controller</c>.
+ See <seealso marker="logger_filters#progress/2">
+ <c>logger_filters:progress/2</c></seealso></p>
+ </item>
+
+ <tag><c>logger_filters:remote_gl/2</c></tag>
+ <item>
+ <p>This filter matches all events originating from a process
+ that has its group leader on a remote node.
+ See <seealso marker="logger_filters#remote_gl/2">
+ <c>logger_filters:remote_gl/2</c></seealso></p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Default formatter</title>
+
+ <p>The default formatter is <c>logger_formatter</c>.
+ See <seealso marker="logger_formatter#format-2">
+ <c>logger_formatter:format/2</c></seealso>.</p>
+ </section>
+ </section>
+
+ <section>
+ <title>Configuration</title>
+
+ <section>
+ <title>Application environment variables</title>
+ <p>See <seealso marker="kernel_app#configuration">Kernel(6)</seealso> for
+ information about the application environment variables that can
+ be used for configuring logger.</p>
+ </section>
+
+ <section>
+ <title>Logger configuration</title>
+
+ <taglist>
+ <tag><c>level</c></tag>
+ <item>
+ <p>Specifies the severity level to log.</p>
+ </item>
+ <tag><c>filters</c></tag>
+ <item>
+ <p>Logger filters are added or removed with
+ <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso> and
+ <seealso marker="logger#remove_logger_filter-1">
+ <c>logger:remove_logger_filter/1</c></seealso>,
+ respectively.</p>
+ <p>See <seealso marker="#Filter">Filter</seealso> for more
+ information.</p>
+ <p>By default, no filters exist.</p>
+ </item>
+ <tag><c>filter_default = log | stop</c></tag>
+ <item>
+ <p>Specifies what to do with an event if all filters
+ return <c>ignore</c>.</p>
+ <p>Default is <c>log</c>.</p>
+ </item>
+ <tag><c>handlers</c></tag>
+ <item>
+ <p>Handlers are added or removed with
+ <seealso marker="logger#add_handler-3">
+ <c>logger:add_handler/3</c></seealso> and
+ <seealso marker="logger#remove_handler-1">
+ <c>logger:remove_handler/1</c></seealso>,
+ respectively.</p>
+ <p>See <seealso marker="#Handler">Handler</seealso> for more
+ information.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="handler_configuration"/>
+ <title>Handler configuration</title>
+ <taglist>
+ <tag><c>level</c></tag>
+ <item>
+ <p>Specifies the severity level to log.</p>
+ </item>
+ <tag><c>filters</c></tag>
+ <item>
+ <p>Handler filters can be specified when adding the handler,
+ or added or removed later with
+ <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso> and
+ <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>,
+ respectively.</p>
+ <p>See <seealso marker="#Filter">Filter</seealso> for more
+ information.</p>
+ <p>By default, no filters exist.</p>
+ </item>
+ <tag><c>filter_default = log | stop</c></tag>
+ <item>
+ <p>Specifies what to do with an event if all filters
+ return <c>ignore</c>.</p>
+ <p>Default is <c>log</c>.</p>
+ </item>
+ <tag><c>depth = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>Specifies if the depth of terms in the log events shall
+ be limited by using control characters <c>~P</c>
+ and <c>~W</c> instead of <c>~p</c> and <c>~w</c>,
+ respectively. See
+ <seealso marker="stdlib:io#format-1"><c>io:format</c></seealso>.</p>
+ </item>
+ <tag><c>max_size = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>Specifies if the size of a log event shall be limited by
+ truncating the formatted string.</p>
+ </item>
+ <tag><c>formatter = {Module::module(),Extra::term()}</c></tag>
+ <item>
+ <p>See <seealso marker="#Formatter">Formatter</seealso> for more
+ information.</p>
+ <p>The default module is <seealso marker="logger_formatter">
+ <c>logger_formatter</c></seealso>, and <c>Extra</c> is
+ it's configuration map.</p>
+ </item>
+ </taglist>
+
+ <p>Note that <c>level</c> and <c>filters</c> are obeyed by
+ Logger itself before forwarding the log events to each
+ handler, while <c>depth</c>, <c>max_size</c>
+ and <c>formatter</c> are left to the handler
+ implementation. All Logger's built-in handlers do apply these
+ configuration parameters before printing.</p>
+ </section>
+
+ </section>
+
+ <section>
+ <marker id="compatibility"/>
+ <title>Backwards compatibility with error_logger</title>
+ <p>Logger provides backwards compatibility with the old
+ <c>error_logger</c> in the following ways:</p>
+
+ <taglist>
+ <tag>Legacy event handlers</tag>
+ <item>
+ <p>To use event handlers written for <c>error_logger</c>, just
+ add your event handler with</p>
+ <code>
+error_logger:add_report_handler/1,2.
+ </code>
+ <p>This will automatically start the <c>error_logger</c>
+ event manager, and add <c>error_logger</c> as a
+ handler to <c>logger</c>, with configuration</p>
+<code>
+#{level=>info,
+ filter_default=>log,
+ filters=>[]}.
+</code>
+ <p>Note that this handler will ignore events that do not
+ originate from the old <c>error_logger</c> API, or from
+ within OTP. This means that if your code uses the logger API
+ for logging, then your log events will be discarded by this
+ handler.</p>
+ <p>Also note that <c>error_logger</c> is not overload
+ protected.</p>
+ </item>
+ <tag>Logger API</tag>
+ <item>
+ <p>The old <c>error_logger</c> API still exists, but should
+ only be used by legacy code. It will be removed in a later
+ release.</p>
+ </item>
+ <tag>Output format</tag>
+ <item>
+ <p>To get log events on the same format as produced
+ by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>,
+ use the default formatter, <c>logger_formatter</c>, with
+ configuration parameter <c>legacy_header=>true</c>. This is
+ also the default.</p>
+ </item>
+ <tag>Default format of log events from OTP</tag>
+ <item>
+ <p>By default, all log events originating from within OTP,
+ except the former so called "SASL reports", look the same as
+ before.</p>
+ </item>
+ <tag>SASL reports</tag>
+ <item>
+ <p>By SASL reports we mean supervisor reports, crash reports
+ and progress reports.</p>
+ <p>In earlier releases, these reports were only logged when
+ the SASL application was running, and they were printed
+ trough specific event handlers
+ named <c>sasl_report_tty_h</c>
+ and <c>sasl_report_file_h</c>.</p>
+ <p>The destination of these log events were configured by
+ environment variables for the SASL application.</p>
+ <p>Due to the specific event handlers, the output format
+ slightly differed from other log events.</p>
+ <p>As of OTP-21, the concept of SASL reports is removed,
+ meaning that the default behavior is as follows:</p>
+ <list>
+ <item>Supervisor reports, crash reports and progress reports
+ are no longer connected to the SASL application.</item>
+ <item>Supervisor reports and crash reports are logged by
+ default.</item>
+ <item>Progress reports are not logged by default, but can be
+ enabled with the kernel environment
+ variable <c>logger_log_progress</c>.</item>
+ <item>The output format is the same for all log
+ events.</item>
+ </list>
+ <p>If the old behavior is preferred, the kernel environment
+ variable <c>logger_sasl_compatible</c> can be set
+ to <c>true</c>. The old SASL environment variables can then
+ be used as before, and the SASL reports will only be printed
+ if the SASL application is running - through a second log
+ handler named <c>sasl_h</c>.</p>
+ <p>All SASL reports have a metadata
+ field <c>domain=>[beam,erlang,otp,sasl]</c>, which can be
+ used, for example, by filters to to stop or allow the
+ events.</p>
+ </item>
+ </taglist>
+ </section>
+
+
+ <section>
+ <title>Error handling</title>
+ <p>Log data is expected to be either a format string and
+ arguments, a string (unicode:chardata), or a report (map or
+ key-value list) which can be converted to a format string and
+ arguments by the handler. A default report callback should be
+ included in the log event's metadata, which can be used for
+ converting the report to a format string and arguments. The
+ handler might also do a custom conversion if the default format
+ is not desired.</p>
+ <p><c>logger</c> does, to a certain extent, check its input data
+ before forwarding a log event to the handlers, but it does not
+ evaluate conversion funs or check the validity of format strings
+ and arguments. This means that any filter or handler must be
+ careful when formatting the data of a log event, making sure
+ that it does not crash due to bad input data or faulty
+ callbacks.</p>
+ <p>If a filter or handler still crashes, logger will remove the
+ filter or handler in question from the configuration, and then
+ print a short error message on the console. A debug event
+ containing the crash reason and other details is also issued,
+ and can be seen if a handler is installed which logs on debug
+ level.</p>
+ </section>
+
+ <section>
+ <title>Example: add a handler to log debug events to file</title>
+ <p>When starting an erlang node, the default behavior is that all
+ log events with level info and above are logged to the
+ console. In order to also log debug events, you can either
+ change the global log level to <c>debug</c> or add a separate
+ handler to take care of this. In this example we will add a new
+ handler which prints the debug events to a separate file.</p>
+ <p>First, we add an instance of logger_std_h with
+ type <c>{file,File}</c>, and we set the handler's level
+ to <c>debug</c>:</p>
+ <pre>
+1> <input>Config = #{level=>debug,logger_std_h=>#{type=>{file,"./debug.log"}}}.</input>
+#{logger_std_h => #{type => {file,"./debug.log"}},
+ level => debug}
+2> <input>logger:add_handler(debug_handler,logger_std_h,Config).</input>
+ok</pre>
+ <p>By default, the handler receives all events, so we need to add a filter
+ to stop all non-debug events:</p>
+ <pre>
+3> <input>Fun = fun(#{level:=debug}=Log,_) -> Log; (_,_) -> stop end.</input>
+#Fun&lt;erl_eval.12.98642416>
+4> <input>logger:add_handler_filter(debug_handler,allow_debug,{Fun,[]}).</input>
+ok</pre>
+ <p>And finally, we need to make sure that the logger itself allows
+ debug events. This can either be done by setting the global
+ logger level:</p>
+ <pre>
+5> <input>logger:set_logger_config(level,debug).</input>
+ok</pre>
+ <p>Or by allowing debug events from one or a few modules only:</p>
+ <pre>
+6> <input>logger:set_module_level(mymodule,debug).</input>
+ok</pre>
+
+ </section>
+
+ <section>
+ <title>Example: implement a handler</title>
+ <p>The only requirement that a handler MUST fulfill is to export
+ the following function:</p>
+ <code>log(logger:log(),logger:config()) ->ok</code>
+ <p>It may also implement the following callbacks:</p>
+ <code>
+adding_handler(logger:handler_id(),logger:config()) -> {ok,logger:config()} | {error,term()}
+removing_handler(logger:handler_id()) -> ok
+changing_config(logger:handler_id(),logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()}
+ </code>
+ <p>When logger:add_handler(Id,Module,Config) is called, logger
+ will first call Module:adding_handler(Id,Config), and if it
+ returns {ok,NewConfig} the NewConfig is written to the
+ configuration database. After this, the handler may receive log
+ events as calls to Module:log/2.</p>
+ <p>A handler can be removed by calling
+ logger:remove_handler(Id). logger will call
+ Module:removing_handler(Id), and then remove the handler's
+ configuration from the configuration database.</p>
+ <p>When logger:set_handler_config is called, logger calls
+ Module:changing_config(Id,OldConfig,NewConfig). If this function
+ returns ok, the NewConfig is written to the configuration
+ database.</p>
+
+ <p>A simple handler which prints to the console could be
+ implemented as follows:</p>
+ <code>
+-module(myhandler).
+-export([log/2]).
+
+log(#{msg:={report,R}},_) ->
+ io:format("~p~n",[R]);
+log(#{msg:={string,S}},_) ->
+ io:put_chars(S);
+log(#{msg:={F,A}},_) ->
+ io:format(F,A).
+ </code>
+
+ <p>A simple handler which prints to file could be implemented like
+ this:</p>
+ <code>
+-module(myhandler).
+-export([adding_handler/2, removing_handler/1, log/2]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
+
+adding_handler(Id,Config) ->
+ {ok,Fd} = file:open(File,[append,{encoding,utf8}]),
+ {ok,Config#{myhandler_fd=>Fd}}.
+
+removing_handler(Id,#{myhandler_fd:=Fd}) ->
+ _ = file:close(Fd),
+ ok.
+
+log(#{msg:={report,R}},#{myhandler_fd:=Fd}) ->
+ io:format(Fd,"~p~n",[R]);
+log(#{msg:={string,S}},#{myhandler_fd:=Fd}) ->
+ io:put_chars(Fd,S);
+log(#{msg:={F,A}},#{myhandler_fd:=Fd}) ->
+ io:format(Fd,F,A).
+ </code>
+
+ <p>Note that none of the above handlers have any overload
+ protection, and all log events are printed directly from the
+ client process. Neither do the handlers use the formatter or
+ in any way add time or other metadata to the printed events.</p>
+
+ <p>For examples of overload protection, please refer to the
+ implementation
+ of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ </seealso>.</p>
+
+ <p>Below is a simpler example of a handler which logs through one
+ single process, and uses the default formatter to gain a common
+ look of the log events.</p>
+ <p>It also uses the metadata field <c>report_cb</c>, if it exists,
+ to print reports in the way the event issuer suggests. The
+ formatter will normally do this, but if the handler either has
+ an own default (as in this example) or if the
+ given <c>report_cb</c> should not be used at all, then the
+ handler must take care of this itself.</p>
+ <code>
+-module(myhandler).
+-export([adding_handler/2, removing_handler/1, log/2]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
+
+adding_handler(Id,Config) ->
+ {ok,Pid} = gen_server:start(?MODULE,Config),
+ {ok,Config#{myhandler_pid=>Pid}}.
+
+removing_handler(Id,#{myhandler_pid:=Pid}) ->
+ gen_server:stop(Pid).
+
+log(Log,#{myhandler_pid:=Pid} = Config) ->
+ gen_server:cast(Pid,{log,Log,Config}).
+
+init(#{myhandler_file:=File}) ->
+ {ok,Fd} = file:open(File,[append,{encoding,utf8}]),
+ {ok,#{file=>File,fd=>Fd}}.
+
+handle_call(_,_,State) ->
+ {reply,{error,bad_request},State}.
+
+handle_cast({log,Log,Config},#{fd:=Fd} = State) ->
+ do_log(Fd,Log,Config),
+ {noreply,State}.
+
+terminate(Reason,#{fd:=Fd}) ->
+ _ = file:close(Fd),
+ ok.
+
+do_log(Fd,#{msg:={report,R}} = Log, Config) ->
+ Fun = maps:get(report_cb,Config,fun my_report_cb/1,
+ {F,A} = Fun(R),
+ do_log(Fd,Log#{msg=>{F,A},Config);
+do_log(Fd,Log,#{formatter:={FModule,FConfig}}) ->
+ String = FModule:format(Log,FConfig),
+ io:put_chars(Fd,String).
+
+my_report_cb(R) ->
+ {"~p",[R]}.
+ </code>
+ </section>
+
+ <section>
+ <marker id="overload_protection"/>
+ <title>Protecting the handler from overload</title>
+ <p>In order for the built-in handlers to survive, and stay responsive,
+ during periods of high load (i.e. when huge numbers of incoming
+ log requests must be handled), a mechanism for overload protection
+ has been implemented in the
+ <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ </seealso> handler. The mechanism, used by both handlers, works
+ as follows:</p>
+
+ <section>
+ <title>Message queue length</title>
+ <p>The handler process keeps track of the length of its message
+ queue and reacts in different ways depending on the current status.
+ The purpose is to keep the handler in, or (as quickly as possible),
+ get the handler into, a state where it can keep up with the pace
+ of incoming log requests. The memory usage of the handler must never
+ keep growing larger and larger, since that would eventually cause the
+ handler to crash. Three thresholds with associated actions have been
+ defined:</p>
+
+ <taglist>
+ <tag><c>toggle_sync_qlen</c></tag>
+ <item>
+ <p>The default value of this level is <c>10</c> messages,
+ and as long as the length of the message queue is lower, all log
+ requests are handled asynchronously. This simply means that the
+ process sending the log request (by calling a log function in the
+ logger API) does not wait for a response from the handler but
+ continues executing immediately after the request (i.e. it will not
+ be affected by the time it takes the handler to print to the log
+ device). If the message queue grows larger than this value, however,
+ the handler starts handling the log requests synchronously instead,
+ meaning the process sending the request will have to wait for a
+ response. When the handler manages to reduce the message queue to a
+ level below the <c>toggle_sync_qlen</c> threshold, asynchronous
+ operation is resumed. The switch from asynchronous to synchronous
+ mode will force the logging tempo of few busy senders to slow down,
+ but can not protect the handler sufficiently in situations of many
+ concurrent senders.</p>
+ </item>
+ <tag><c>drop_new_reqs_qlen</c></tag>
+ <item>
+ <p>When the message queue has grown larger than this threshold, which
+ defaults to <c>200</c> messages, the handler switches to a mode in
+ which it drops any new requests being made. Dropping a message in
+ this state means that the log function never actually sends a message
+ to the handler. The log call simply returns without an action. When
+ the length of the message queue has been reduced to a level below this
+ threshold, synchronous or asynchronous request handling mode is
+ resumed.</p>
+ </item>
+ <tag><c>flush_reqs_qlen</c></tag>
+ <item>
+ <p>Above this threshold, which defaults to <c>1000</c> messages, a
+ flush operation takes place, in which all messages buffered in the
+ process mailbox get deleted without any logging actually taking
+ place. (Processes waiting for a response from a synchronous log request
+ will receive a reply indicating that the request has been dropped).</p>
+ </item>
+ </taglist>
+
+ <p>For the overload protection algorithm to work properly, it is a
+ requirement that:</p>
+
+ <p><c>toggle_sync_qlen &lt; drop_new_reqs_qlen &lt; flush_reqs_qlen</c></p>
+
+ <p>During high load scenarios, the length of the handler message queue
+ rarely grows in a linear and predictable way. Instead, whenever the
+ handler process gets scheduled in, it can have an almost arbitrary number
+ of messages waiting in the mailbox. It's for this reason that the overload
+ protection mechanism is focused on acting quickly and quite drastically
+ (such as immediately dropping or flushing messages) as soon as a large
+ queue length is detected. </p>
+
+ <p>The thresholds listed above may be modified by the user if, e.g, a handler
+ shouldn't drop or flush messages unless the message queue length grows
+ extremely large. (The handler must be allowed to use large amounts of memory
+ under such circumstances however). Another example of when the user might want
+ to change the settings is if, for performance reasons, the logging processes must
+ never get blocked by synchronous log requests, while dropping or flushing requests
+ is perfectly acceptable (since it doesn't affect the performance of the
+ loggers).</p>
+
+ <p>A configuration example:</p>
+ <code type="none">
+logger:add_handler(my_standard_h, logger_std_h,
+ #{logger_std_h =>
+ #{type => {file,"./system_info.log"},
+ toggle_sync_qlen => 100,
+ drop_new_reqs_qlen => 1000,
+ flush_reqs_qlen => 2000}}).
+ </code>
+ </section>
+
+ <section>
+ <title>Controlling bursts of log requests</title>
+ <p>A potential problem with large bursts of log requests, is that log files
+ may get full or wrapped too quickly (in the latter case overwriting
+ previously logged data that could be of great importance). For this reason,
+ both built-in handlers offer the possibility to set a maximum level of how
+ many requests to process with a certain time frame. With this burst control
+ feature enabled, the handler will take care of bursts of log requests
+ without choking log files, or the console, with massive amounts of
+ printouts. These are the configuration parameters:</p>
+
+ <taglist>
+ <tag><c>enable_burst_limit</c></tag>
+ <item>
+ <p>This is set to <c>true</c> by default. The value <c>false</c>
+ disables the burst control feature.</p>
+ </item>
+ <tag><c>burst_limit_size</c></tag>
+ <item>
+ <p>This is how many requests should be processed within the
+ <c>burst_window_time</c> time frame. After this maximum has been
+ reached, successive requests will be dropped until the end of the
+ time frame. The default value is <c>500</c> messages.</p>
+ </item>
+ <tag><c>burst_window_time</c></tag>
+ <item>
+ <p>The default window is <c>1000</c> milliseconds long.</p>
+ </item>
+ </taglist>
+
+ <p>A configuration example:</p>
+ <code type="none">
+logger:add_handler(my_disk_log_h, logger_disk_log_h,
+ #{disk_log_opts =>
+ #{file => "./my_disk_log"},
+ logger_disk_log_h =>
+ #{burst_limit_size => 10,
+ burst_window_time => 500}}).
+ </code>
+ </section>
+
+ <section>
+ <title>Terminating a large handler</title>
+ <p>A handler process may grow large even if it can manage peaks of high load
+ without crashing. The overload protection mechanism includes user configurable
+ levels for a maximum allowed message queue length and maximum allowed memory
+ usage. This feature is disabled by default, but can be switched on by means
+ of the following configuration parameters:</p>
+
+ <taglist>
+ <tag><c>enable_kill_overloaded</c></tag>
+ <item>
+ <p>This is set to <c>false</c> by default. The value <c>true</c>
+ enables the feature.</p>
+ </item>
+ <tag><c>handler_overloaded_qlen</c></tag>
+ <item>
+ <p>This is the maximum allowed queue length. If the mailbox grows larger
+ than this, the handler process gets terminated.</p>
+ </item>
+ <tag><c>handler_overloaded_mem</c></tag>
+ <item>
+ <p>This is the maximum allowed memory usage of the handler process. If
+ the handler grows any larger, the process gets terminated.</p>
+ </item>
+ <tag><c>handler_restart_after</c></tag>
+ <item>
+ <p>If the handler gets terminated because of its queue length or
+ memory usage, it can get automatically restarted again after a
+ configurable delay time. The time is specified in milliseconds
+ and <c>5000</c> is the default value. The value <c>never</c> can
+ also be set, which prevents a restart.</p>
+ </item>
+ </taglist>
+ </section>
+ </section>
+
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
+ <seealso marker="sasl:sasl_app"><c>SASL(6)</c></seealso></p>
+ </section>
+</chapter>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
new file mode 100644
index 0000000000..90cc4fec30
--- /dev/null
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -0,0 +1,146 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_disk_log_h</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_disk_log_h.xml</file>
+ </header>
+ <module>logger_disk_log_h</module>
+ <modulesummary>A disk_log based handler for the Logger
+ application.</modulesummary>
+
+ <description>
+ <p>This is a handler for the Logger application that offers circular
+ (wrapped) logs by using the disk_log application. Multiple instances
+ of this handler can be added to logger, and each instance will print to
+ its own disk_log file, created with the name and settings specified in
+ the handler configuration.</p>
+ <p>The default standard handler,
+ <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be
+ replaced by a disk_log handler at startup of the kernel application.
+ See an example of this below.</p>
+ <p>The handler has an overload protection mechanism that will keep the handler
+ process and the kernel application alive during a high load of log
+ requests. How this feature works, and how to modify the configuration,
+ is described in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>To add a new instance of the disk_log handler, use
+ <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
+ </seealso>. The handler configuration argument is a map which may contain
+ general configuration parameters, as documented in the
+ <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
+ </seealso>, as well as handler specific parameters.</p>
+ <p>The settings for the disk_log log file should be specified with the
+ key <c>disk_log_opts</c>. These settings are a subset of the disk_log
+ datatype
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>.</p>
+ <p>Parameters in the <c>disk_log_opts</c> map:</p>
+ <taglist>
+ <tag><c>file</c></tag>
+ <item>This is the full name of the disk_log log file.</item>
+ <tag><c>type</c></tag>
+ <item>This is the disk_log type, <c>wrap</c> or <c>halt</c>. The
+ default value is <c>wrap</c>.</item>
+ <tag><c>max_no_files</c></tag>
+ <item>This is the maximum number of files that disk_log will use
+ for its circular logging. The default value is <c>10</c>. (The setting
+ has no effect on a halt log).</item>
+ <tag><c>max_no_bytes</c></tag>
+ <item>This is the maximum number of bytes that will be written to
+ a log file before disk_log proceeds with the next file in order (or
+ generates an error in case of a full halt log). The default value for
+ a wrap log is <c>1048576</c> bytes, and <c>infinity</c> for a halt
+ log.</item>
+ </taglist>
+ <p>Specific configuration for the handler (represented as a sub map)
+ is specified with the key <c>logger_disk_log_h</c>. It may contain the
+ following parameter:</p>
+ <taglist>
+ <tag><c>filesync_repeat_interval</c></tag>
+ <item>
+ <p>This value (in milliseconds) specifies how often the handler will
+ do a disk_log sync operation in order to make sure that buffered data
+ gets written to disk. The handler will repeatedly attempt this
+ operation, but only perform it if something has actually been logged
+ since the last sync. The default value is <c>5000</c> milliseconds.
+ If <c>no_repeat</c> is set as value, the repeated sync operation is
+ disabled. The user can also call the
+ <seealso marker="logger_disk_log_h#disk_log_sync-1"><c>disk_log_sync/1</c>
+ </seealso> function to perform a disk_log sync.</p></item>
+ </taglist>
+ <p>There are a number of other configuration parameters available, that are
+ to be used for customizing the overload protection behaviour. The same
+ parameters are used both in the standard handler and the disk_log handler,
+ and are documented in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>Note that when changing the configuration of the handler in runtime, by
+ calling
+ <seealso marker="logger#set_handler_config-2"><c>logger:set_handler_config/2
+ or logger:set_handler_config/3</c></seealso>, the <c>disk_log_opts</c>
+ settings may not be modified.</p>
+ <p>Example of adding a disk_log handler:</p>
+ <code type="none">
+logger:add_handler(my_disk_log_h, logger_disk_log_h,
+ #{level => error,
+ filter_default => log,
+ disk_log_opts =>
+ #{file => "./my_disk_log",
+ type => wrap,
+ max_no_files => 4,
+ max_no_bytes => 10000},
+ logger_disk_log_h =>
+ #{filesync_repeat_interval => 1000}}).
+ </code>
+ <p>In order to use the disk_log handler instead of the default standard
+ handler when starting en Erlang node, use the kernel configuration parameter
+ <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with
+ value <c>{disk_log,FileName}</c>. Example:</p>
+ <code type="none">
+erl -kernel logger_dest '{disk_log,"./system_disk_log"}'
+ </code>
+ </description>
+
+ <funcs>
+
+ <func>
+ <name name="disk_log_sync" arity="1" clause_i="1"/>
+ <fsummary>Writes buffered data to disk.</fsummary>
+ <desc>
+ <p>Write buffered data to disk.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml
new file mode 100644
index 0000000000..d742391e35
--- /dev/null
+++ b/lib/kernel/doc/src/logger_filters.xml
@@ -0,0 +1,191 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_filters</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_filters.xml</file>
+ </header>
+ <module>logger_filters</module>
+ <modulesummary>Filters to use with logger.</modulesummary>
+
+ <description>
+ <p>Filters to use with logger. All functions exported from this
+ module can be used as logger or handler
+ filters. See <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso>
+ and <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>
+ for more information about how filters are added.</p>
+ </description>
+
+ <funcs>
+ <func>
+ <name name="domain" arity="2"/>
+ <fsummary>Filter log events based on the domain field in metadata.</fsummary>
+ <desc>
+ <p>This filter provides a way of filtering log events based on a
+ <c>domain</c> field <c>Metadata</c>.</p>
+
+ <p>The <c><anno>Extra</anno></c> parameter is specified when
+ adding the filter
+ via <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the <c>domain</c> field
+ in the log event's metadata (<c>Domain</c>)
+ to <c><anno>MatchDomain</anno></c> as follows:</p>
+
+ <taglist>
+ <tag><c><anno>Compare</anno> = starts_with</c></tag>
+ <item><p>The filter matches if <c>MatchDomain</c> is a prefix
+ of <c>Domain</c>.</p></item>
+ <tag><c><anno>Compare</anno> = prefix_of</c></tag>
+ <item><p>The filter matches if <c>Domain</c> is a prefix
+ of <c>MatchDomain</c>.</p></item>
+ <tag><c><anno>Compare</anno> = equals</c></tag>
+ <item><p>The filter matches if <c>Domain</c> is equal
+ to <c>MatchDomain</c>.</p></item>
+ <tag><c><anno>Compare</anno> = no_domain</c></tag>
+ <item><p>The filter matches if there is no domain field in
+ metadata. In this case <c><anno>MatchDomain</anno></c> shall
+ be <c>[]</c>.</p></item>
+ </taglist>
+
+ <p>If the filter matches and <c><anno>Action</anno> =
+ log</c>, the log event is allowed. If the filter matches
+ and <c><anno>Action</anno> = stop</c>, the log event is
+ stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, will decide
+ if the event is allowed or not.</p>
+
+ <p>Log events that do not contain any domain field, will
+ only match when <c><anno>Compare</anno> = no_domain</c>.</p>
+
+ <p>Example: stop all events with
+ domain <c>[beam,erlang,otp,sasl|_]</c></p>
+
+ <code>
+logger:set_handler_config(h1,filter_default,log). % this is the default
+Filter = {fun logger_filters:domain/2,{stop,starts_with,[beam,erlang,otp,sasl]}}.
+logger:add_handler_filter(h1,no_sasl,Filter).
+ok</code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="level" arity="2"/>
+ <fsummary>Filter log events based on the log level.</fsummary>
+ <desc>
+ <p>This filter provides a way of filtering log events based
+ on the log level. It matches log events by comparing the
+ log level with a predefined <c>MatchLevel</c></p>
+
+ <p>The <c><anno>Extra</anno></c> parameter is specified when
+ adding the filter
+ via <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the event's log level
+ (<c>Level</c>) to <c><anno>MatchLevel</anno></c> by
+ calling <seealso marker="logger#compare_levels-2">
+ <c>logger:compare_levels(Level,MatchLevel) -> CmpRet</c></seealso>. It
+ matches the event if:</p>
+
+ <list>
+ <item><c>CmpRet = eq</c> and <c><anno>Operator</anno> =
+ eq | lteq | gteq</c></item>
+ <item><c>CmpRet = lt</c> and <c><anno>Operator</anno> =
+ lt | lteq | neq</c></item>
+ <item><c>CmpRet = gt</c> and <c><anno>Operator</anno> =
+ gt | gteq | neq</c></item>
+ </list>
+
+ <p>If the filter matches and <c><anno>Action</anno> =
+ log</c>, the log event is allowed. If the filter matches
+ and <c><anno>Action</anno> = stop</c>, the log event is
+ stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, will decide
+ if the event is allowed or not.</p>
+
+ <p>Example: only allow debug level log events</p>
+
+ <code>
+logger:set_handler_config(h1,filter_default,stop).
+Filter = {fun logger_filters:level/2,{log,eq,debug}}.
+logger:add_handler_filter(h1,debug_only,Filter).
+ok</code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="progress" arity="2"/>
+ <fsummary>Filter progress reports from supervisor and application_controller.</fsummary>
+ <desc>
+ <p>This filter matches all progress reports
+ from <c>supervisor</c> and <c>application_controller</c>.</p>
+
+ <p>If <c><anno>Extra</anno> = log</c>, the progress reports
+ are allowed. If <c><anno>Extra</anno> = stop</c>, the
+ progress reports are stopped.</p>
+
+ <p>The filter returns <c>ignore</c> for all other log events.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remote_gl" arity="2"/>
+ <fsummary>Filter events with group leader on remote node.</fsummary>
+ <desc>
+ <p>This filter matches all events originating from a process
+ that has its group leader on a remote node.</p>
+
+ <p>If <c><anno>Extra</anno> = log</c>, the matching events
+ are allowed. If <c><anno>Extra</anno> = stop</c>, the
+ matching events are stopped.</p>
+
+ <p>The filter returns <c>ignore</c> for all other log events.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml
new file mode 100644
index 0000000000..6a17e3641f
--- /dev/null
+++ b/lib/kernel/doc/src/logger_formatter.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_formatter</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_formatter.xml</file>
+ </header>
+ <module>logger_formatter</module>
+ <modulesummary>Default formatter for the Logger application.</modulesummary>
+
+ <description>
+ <p>Default formatter for the Logger application.</p>
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="template"/>
+ <desc>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name name="format" arity="2"/>
+ <fsummary>Formats the given message.</fsummary>
+ <desc>
+ <p>Formats the given message.</p>
+ <p>The template is a list of atoms, tuples and strings. Atoms
+ can be <c>level</c> or <c>msg</c>, which are placeholders
+ for the severity level and the log message,
+ repectively. Tuples are interpreted as placeholders for
+ metadata. Each element in the tuple must be an atom which
+ matches a key in the nested metadata map, e.g. the
+ tuple <c>{key1,key2}</c> will be replaced by the value of
+ the key2 field in this nested map (the value vill be
+ converted to a string):</p>
+
+<code>
+#{key1=>#{key2=>my_value,
+ ...},
+ ...}</code>
+
+
+ <p> Strings are printed literally.</p>
+
+ <p><c>depth</c> is a positive integer representing the maximum
+ depth to which terms shall be printed by this
+ formatter. Format strings passed to this formatter are
+ rewritten. The format controls ~p and ~w are replaced with
+ ~P and ~W, respectively, and the value is used as the depth
+ parameter. For details, see
+ <seealso marker="stdlib:io#format-2">io:format/2,3</seealso>
+ in STDLIB.</p>
+
+ <p><c>chars_limit</c> is a positive integer representing the
+ value of the option with the same name to be used when calling
+ <seealso marker="stdlib:io#format-3">io:format/3</seealso>. This
+ value limits the total number of characters printed bu the
+ formatter. Notes that this is a soft limit. For a hard
+ truncation limit, see option <c>max_size</c>.</p>
+
+ <p><c>max_size</c> is a positive integer representing the
+ maximum size a string returned from this formatter can
+ have. If the formatted string is longer, after possibly
+ being limited by <c>depth</c> and/or <c>chars_limit</c>, it
+ will be truncated.</p>
+
+ <p><c>utc</c> is a boolean. If set to true, all dates are
+ displayed in Universal Coordinated Time. Default
+ is <c>false</c>.</p>
+
+ <p><c>report_cb</c> must be a function with arity 1,
+ returning <c>{Format,Args}</c>. This function will replace
+ any <c>report_cb</c> found in metadata.</p>
+
+ <p>If <c>single_line=true</c>, all newlines in the message are
+ replaced with <c>", "</c>, and whitespaces following directly
+ after newlines are removed. Note that newlines added by the
+ formatter template are not replaced.</p>
+
+ <p>If <c>legacy_header=true</c> a header field is added to
+ logger_formatter's part of <c>Metadata</c>. The value of
+ this field is a string similar to the header created by the
+ old <c>error_logger</c> event handlers. It can be included
+ in the log event by adding the
+ tuple <c>{logger_formatter,header}</c> to the template.</p>
+
+ <p>The default template when <c>legacy_header=true</c> is</p>
+
+ <code>[{logger_formatter,header},"\n",msg,"\n"]</code>
+
+ <p>which will cause log entries like this:</p>
+
+ <code>=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 ===
+ process: &lt;0.74.0&gt;
+ exit_reason: "Something went wrong"</code>
+
+ <p>Note that all eight levels might occur here, not
+ only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c>. And also
+ that micro seconds are added at the end of the
+ timestamp.</p>
+
+ <p>The default template when <c>single_line=true</c> is</p>
+
+ <code>[time," ",level,": ",msg,"\n"]</code>
+
+ <p>which will cause log entries like this:</p>
+
+ <code>2017-12-29 13:31:49.640317 error: process: &lt;0.74.0&gt;, exit_reason: "Something went wrong"</code>
+
+ <p>The default template when both <c>legacy_header</c> and
+ <c>single_line</c> are set to false is:</p>
+
+ <code>[time," ",level,":\n",msg,"\n"]</code>
+
+ <p>which will cause log entries like this:</p>
+
+ <code>2017-12-29 13:32:25.191925 error:
+ process: &lt;0.74.0&gt;
+ exit_reason: "Something went wrong"</code>
+
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
new file mode 100644
index 0000000000..fe9b9ca5a9
--- /dev/null
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_std_h</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_std_h.xml</file>
+ </header>
+ <module>logger_std_h</module>
+ <modulesummary>Default handler for the Logger application.</modulesummary>
+
+ <description>
+ <p>This is the default handler for the Logger
+ application. Multiple instances of this handler can be added to
+ logger, and each instance will print logs to <c>standard_io</c>,
+ <c>standard_error</c> or to file. The default instance that starts
+ with kernel is named <c>logger_std_h</c> - which is the name to be used
+ for reconfiguration.</p>
+ <p>The handler has an overload protection mechanism that will keep the handler
+ process and the kernel application alive during a high load of log
+ requests. How this feature works, and how to modify the configuration,
+ is described in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>To add a new instance of the standard handler, use
+ <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
+ </seealso>. The handler configuration argument is a map which may contain
+ general configuration parameters, as documented in the
+ <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
+ </seealso>, as well as handler specific parameters. The specific parameters
+ are stored in a sub map with the key <c>logger_std_h</c>. The following
+ keys and values may be specified:</p>
+ <taglist>
+ <tag><c>type</c></tag>
+ <item>
+ <p>This will have the value <c>standard_io</c>, <c>standard_error</c>,
+ <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>,
+ where <c>standard_io</c> is the default value for type. It's recommended
+ to not specify <c>LogFileOpts</c> if not absolutely necessary. The
+ default options used by the handler to open a file for logging are:
+ <c>raw</c>, <c>append</c> and <c>delayed_write</c>. The standard
+ handler does not have support for circular logging. Use the
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ </seealso> handler for this.</p></item>
+ <tag><c>filesync_repeat_interval</c></tag>
+ <item>
+ <p>This value (in milliseconds) specifies how often the handler will
+ do a file sync operation in order to make sure that buffered data gets
+ written to disk. The handler will repeatedly attempt this
+ operation, but only perform it if something has actually been logged
+ since the last sync. The default value is <c>5000</c> milliseconds.
+ If <c>no_repeat</c> is set as value, the repeated file sync operation
+ is disabled, and it will be the operating system settings that determine
+ how quickly or slowly data gets written to disk. The user can also call
+ the <seealso marker="logger_std_h#filesync-1"><c>filesync/1</c></seealso>
+ function to perform a file sync.</p></item>
+ </taglist>
+ <p>There are a number of other configuration parameters available, that are
+ to be used for customizing the overload protection behaviour. The same
+ parameters are used both in the standard handler and the disk_log handler,
+ and are documented in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>Note that when changing the configuration of the handler in runtime, by
+ calling
+ <seealso marker="logger#set_handler_config-2"><c>logger:set_handler_config/2</c>
+ </seealso>, or
+ <seealso marker="logger#set_handler_config-3"><c>logger:set_handler_config/3</c>
+ </seealso>,
+ the <c>type</c> parameter may not be modified.</p>
+ <p>Example of adding a standard handler:</p>
+ <code type="none">
+logger:add_handler(my_standard_h, logger_std_h,
+ #{level => info,
+ filter_default => log,
+ logger_std_h =>
+ #{type => {file,"./system_info.log"},
+ filesync_repeat_interval => 1000}}).
+ </code>
+ <p>In order to configure the default handler (that starts initially with
+ the kernel application) to log to file instead of <c>standard_io</c>,
+ use the kernel configuration parameter
+ <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with
+ value <c>{file,FileName}</c>. Example:</p>
+ <code type="none">
+erl -kernel logger_dest '{file,"./erl.log"}'
+ </code>
+ <p>An example of how to replace the standard handler with a disk_log handler
+ at startup can be found in the manual of
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>.</p>
+ </description>
+
+ <funcs>
+
+ <func>
+ <name name="filesync" arity="1" clause_i="1"/>
+ <fsummary>Writes buffered data to disk.</fsummary>
+ <desc>
+ <p>Write buffered data to disk.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/part.xml b/lib/kernel/doc/src/part.xml
new file mode 100644
index 0000000000..68eb4530e2
--- /dev/null
+++ b/lib/kernel/doc/src/part.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE part SYSTEM "part.dtd">
+
+<part xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header>
+ <copyright>
+ <year>1996</year><year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Logger User's Guide</title>
+ <prepared>OTP Team</prepared>
+ <docno></docno>
+ <date>2017-12-01</date>
+ <rev>0.1</rev>
+ <file>part.xml</file>
+ </header>
+ <description>
+ <p>The System Architecture Support Libraries SASL application
+ provides support for alarm handling, release handling, and
+ related functions.</p>
+ </description>
+ <xi:include href="introduction_chapter.xml"/>
+ <xi:include href="logger_chapter.xml"/>
+</part>
+
diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml
index 5cd77e0f6f..c06914d23d 100644
--- a/lib/kernel/doc/src/ref_man.xml
+++ b/lib/kernel/doc/src/ref_man.xml
@@ -52,6 +52,11 @@
<xi:include href="inet.xml"/>
<xi:include href="inet_res.xml"/>
<xi:include href="init_stub.xml"/>
+ <xi:include href="logger.xml"/>
+ <xi:include href="logger_filters.xml"/>
+ <xi:include href="logger_formatter.xml"/>
+ <xi:include href="logger_std_h.xml"/>
+ <xi:include href="logger_disk_log_h.xml"/>
<xi:include href="net_adm.xml"/>
<xi:include href="net_kernel.xml"/>
<xi:include href="os.xml"/>
diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml
index 29d52f23bb..bcc422930e 100644
--- a/lib/kernel/doc/src/specs.xml
+++ b/lib/kernel/doc/src/specs.xml
@@ -20,6 +20,11 @@
<xi:include href="../specs/specs_inet.xml"/>
<xi:include href="../specs/specs_inet_res.xml"/>
<xi:include href="../specs/specs_init_stub.xml"/>
+ <xi:include href="../specs/specs_logger.xml"/>
+ <xi:include href="../specs/specs_logger_filters.xml"/>
+ <xi:include href="../specs/specs_logger_formatter.xml"/>
+ <xi:include href="../specs/specs_logger_std_h.xml"/>
+ <xi:include href="../specs/specs_logger_disk_log_h.xml"/>
<xi:include href="../specs/specs_net_adm.xml"/>
<xi:include href="../specs/specs_net_kernel.xml"/>
<xi:include href="../specs/specs_os.xml"/>
diff --git a/lib/kernel/include/logger.hrl b/lib/kernel/include/logger.hrl
new file mode 100644
index 0000000000..2143ccd297
--- /dev/null
+++ b/lib/kernel/include/logger.hrl
@@ -0,0 +1,49 @@
+-ifndef(LOGGER_HRL).
+-define(LOGGER_HRL,true).
+-define(LOG_EMERGENCY(A),?DO_LOG(emergency,[A])).
+-define(LOG_EMERGENCY(A,B),?DO_LOG(emergency,[A,B])).
+-define(LOG_EMERGENCY(A,B,C),?DO_LOG(emergency,[A,B,C])).
+
+-define(LOG_ALERT(A),?DO_LOG(alert,[A])).
+-define(LOG_ALERT(A,B),?DO_LOG(alert,[A,B])).
+-define(LOG_ALERT(A,B,C),?DO_LOG(alert,[A,B,C])).
+
+-define(LOG_CRITICAL(A),?DO_LOG(critical,[A])).
+-define(LOG_CRITICAL(A,B),?DO_LOG(critical,[A,B])).
+-define(LOG_CRITICAL(A,B,C),?DO_LOG(critical,[A,B,C])).
+
+-define(LOG_ERROR(A),?DO_LOG(error,[A])).
+-define(LOG_ERROR(A,B),?DO_LOG(error,[A,B])).
+-define(LOG_ERROR(A,B,C),?DO_LOG(error,[A,B,C])).
+
+-define(LOG_WARNING(A),?DO_LOG(warning,[A])).
+-define(LOG_WARNING(A,B),?DO_LOG(warning,[A,B])).
+-define(LOG_WARNING(A,B,C),?DO_LOG(warning,[A,B,C])).
+
+-define(LOG_NOTICE(A),?DO_LOG(notice,[A])).
+-define(LOG_NOTICE(A,B),?DO_LOG(notice,[A,B])).
+-define(LOG_NOTICE(A,B,C),?DO_LOG(notice,[A,B,C])).
+
+-define(LOG_INFO(A),?DO_LOG(info,[A])).
+-define(LOG_INFO(A,B),?DO_LOG(info,[A,B])).
+-define(LOG_INFO(A,B,C),?DO_LOG(info,[A,B,C])).
+
+-define(LOG_DEBUG(A),?DO_LOG(debug,[A])).
+-define(LOG_DEBUG(A,B),?DO_LOG(debug,[A,B])).
+-define(LOG_DEBUG(A,B,C),?DO_LOG(debug,[A,B,C])).
+
+-define(LOCATION,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ line=>?LINE,
+ file=>?FILE}).
+
+%%%-----------------------------------------------------------------
+%%% Internal, i.e. not intended for direct use in code - use above
+%%% macros instead!
+-define(DO_LOG(Level,Args),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ apply(logger,macro_log,[?LOCATION,Level|Args]);
+ false ->
+ ok
+ end).
+-endif.
diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile
index 0bc9f121a0..702845512c 100644
--- a/lib/kernel/src/Makefile
+++ b/lib/kernel/src/Makefile
@@ -109,6 +109,17 @@ MODULES = \
kernel_refc \
local_udp \
local_tcp \
+ logger \
+ logger_backend \
+ logger_config \
+ logger_std_h \
+ logger_disk_log_h \
+ logger_h_common \
+ logger_filters \
+ logger_formatter \
+ logger_server \
+ logger_simple \
+ logger_sup \
net \
net_adm \
net_kernel \
@@ -132,13 +143,14 @@ MODULES = \
HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \
../include/dist.hrl ../include/dist_util.hrl \
- ../include/net_address.hrl
+ ../include/net_address.hrl ../include/logger.hrl
INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \
erl_epmd.hrl hipe_ext_format.hrl \
inet_dns.hrl inet_res.hrl \
inet_boot.hrl inet_config.hrl inet_int.hrl \
- inet_dns_record_adts.hrl
+ inet_dns_record_adts.hrl \
+ logger_internal.hrl logger_h_common.hrl
ERL_FILES= $(MODULES:%=%.erl)
@@ -223,7 +235,7 @@ release_docs_spec:
# Include dependencies -- list below added by Kostis Sagonas
-$(EBIN)/application_controller.beam: application_master.hrl
+$(EBIN)/application_controller.beam: application_master.hrl ../include/logger.hrl
$(EBIN)/application_master.beam: application_master.hrl
$(EBIN)/auth.beam: ../include/file.hrl
$(EBIN)/code.beam: ../include/file.hrl
@@ -234,6 +246,7 @@ $(EBIN)/disk_log_server.beam: disk_log.hrl
$(EBIN)/dist_util.beam: ../include/dist_util.hrl ../include/dist.hrl
$(EBIN)/erl_boot_server.beam: inet_boot.hrl
$(EBIN)/erl_epmd.beam: inet_int.hrl erl_epmd.hrl
+$(EBIN)/error_logger.beam: logger_internal.hrl ../include/logger.hrl
$(EBIN)/file.beam: ../include/file.hrl file_int.hrl
$(EBIN)/file_io_server.beam: ../include/file.hrl file_int.hrl
$(EBIN)/gen_tcp.beam: inet_int.hrl
@@ -259,6 +272,16 @@ $(EBIN)/inet_udp.beam: inet_int.hrl
$(EBIN)/inet_sctp.beam: inet_int.hrl ../include/inet_sctp.hrl
$(EBIN)/local_udp.beam: inet_int.hrl
$(EBIN)/local_tcp.beam: inet_int.hrl
+$(EBIN)/logger.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_backend.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_config.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_simple.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl
$(EBIN)/net_kernel.beam: ../include/net_address.hrl
$(EBIN)/os.beam: ../include/file.hrl
$(EBIN)/ram_file.beam: ../include/file.hrl
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index 3b642f5873..b9cb722575 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -44,6 +44,7 @@
keyfind/3, keydelete/3, keyreplace/4]).
-include("application_master.hrl").
+-include("logger.hrl").
-define(AC, ?MODULE). % Name of process
@@ -1546,9 +1547,8 @@ do_change_apps(Applications, Config, OldAppls) ->
%% Report errors, but do not terminate
%% (backwards compatible behaviour)
lists:foreach(fun({error, {SysFName, Line, Str}}) ->
- Str2 = lists:flatten(io_lib:format("~tp: ~w: ~ts~n",
- [SysFName, Line, Str])),
- error_logger:format(Str2, [])
+ ?LOG_ERROR("~tp: ~w: ~ts~n",[SysFName, Line, Str],
+ #{error_logger=>#{tag=>error}})
end,
Errors),
@@ -1631,8 +1631,9 @@ make_term(Str) ->
end.
handle_make_term_error(Mod, Reason, Str) ->
- error_logger:format("application_controller: ~ts: ~ts~n",
- [Mod:format_error(Reason), Str]),
+ ?LOG_ERROR("application_controller: ~ts: ~ts~n",
+ [Mod:format_error(Reason), Str],
+ #{error_logger=>#{tag=>error}}),
throw({error, {bad_environment_value, Str}}).
get_env_i(Name, #state{conf_data = ConfData}) when is_list(ConfData) ->
@@ -1913,19 +1914,25 @@ config_error() ->
"configuration file must contain ONE list ended by <dot>"}}.
%%-----------------------------------------------------------------
-%% Info messages sent to error_logger
+%% Info messages sent to logger
%%-----------------------------------------------------------------
info_started(Name, Node) ->
- Rep = [{application, Name},
- {started_at, Node}],
- error_logger:info_report(progress, Rep).
+ ?LOG_INFO(#{label=>{application_controller,progress},
+ report=>[{application, Name},
+ {started_at, Node}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
info_exited(Name, Reason, Type) ->
- Rep = [{application, Name},
- {exited, Reason},
- {type, Type}],
- error_logger:info_report(Rep).
-
+ ?LOG_INFO(#{label=>{application_controller,exit},
+ report=>[{application, Name},
+ {exited, Reason},
+ {type, Type}]},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun logger:format_otp_report/1,
+ error_logger=>#{tag=>info_report,type=>std_info}}).
%%-----------------------------------------------------------------
%% Reply to all processes waiting this application to be started.
diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl
index f5a890cb95..bbfa2a995d 100644
--- a/lib/kernel/src/code_server.erl
+++ b/lib/kernel/src/code_server.erl
@@ -1434,14 +1434,20 @@ all_loaded(Db) ->
-spec error_msg(io:format(), [term()]) -> 'ok'.
error_msg(Format, Args) ->
- Msg = {notify,{error, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ logger ! {log,error,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>error}}},
ok.
-spec info_msg(io:format(), [term()]) -> 'ok'.
info_msg(Format, Args) ->
- Msg = {notify,{info_msg, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ logger ! {log,info,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>info_msg}}},
ok.
objfile_extension() ->
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
index 585507c545..0706220a94 100644
--- a/lib/kernel/src/error_logger.erl
+++ b/lib/kernel/src/error_logger.erl
@@ -19,22 +19,23 @@
%%
-module(error_logger).
--export([start/0,start_link/0,format/2,error_msg/1,error_msg/2,error_report/1,
+-include("logger_internal.hrl").
+
+-export([start/0,start_link/0,stop/0,
+ format/2,error_msg/1,error_msg/2,error_report/1,
error_report/2,info_report/1,info_report/2,warning_report/1,
warning_report/2,error_info/1,
info_msg/1,info_msg/2,warning_msg/1,warning_msg/2,
- logfile/1,tty/1,swap_handler/1,
+ logfile/1,tty/1,
add_report_handler/1,add_report_handler/2,
- delete_report_handler/1]).
+ delete_report_handler/1,
+ which_report_handlers/0]).
--export([init/1,
- handle_event/2, handle_call/2, handle_info/2,
- terminate/2]).
+%% logger callbacks
+-export([adding_handler/2, removing_handler/1, log/2]).
-export([get_format_depth/0, limit_term/1]).
--define(buffer_size, 10).
-
%%-----------------------------------------------------------------
%% Types used in this file
%%-----------------------------------------------------------------
@@ -43,8 +44,6 @@
| 'info' | 'info_msg' | 'info_report'
| 'warning_msg' | 'warning_report'.
--type state() :: {non_neg_integer(), non_neg_integer(), [term()]}.
-
%%% BIF
-export([warning_map/0]).
@@ -59,26 +58,137 @@ warning_map() ->
%%-----------------------------------------------------------------
--spec start() -> {'ok', pid()} | {'error', any()}.
+%%%-----------------------------------------------------------------
+%%% Start the event manager process under logger_sup, which is part of
+%%% the kernel application's supervision tree.
+-spec start() -> 'ok' | {'error', any()}.
start() ->
- case gen_event:start({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ case whereis(?MODULE) of
+ undefined ->
+ ErrorLogger =
+ #{id => ?MODULE,
+ start => {?MODULE, start_link, []},
+ restart => transient,
+ shutdown => 2000,
+ type => worker,
+ modules => dynamic},
+ case supervisor:start_child(logger_sup, ErrorLogger) of
+ {ok,_} ->
+ ok;
+ Error ->
+ Error
+ end;
+ _ ->
+ ok
end.
+%%%-----------------------------------------------------------------
+%%% Start callback specified in child specification to supervisor, see start/0
-spec start_link() -> {'ok', pid()} | {'error', any()}.
start_link() ->
- case gen_event:start_link({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ gen_event:start_link({local, ?MODULE},
+ [{spawn_opt,[{message_queue_data, off_heap}]}]).
+
+%%%-----------------------------------------------------------------
+%%% Stop the event manager
+-spec stop() -> ok.
+stop() ->
+ _ = supervisor:terminate_child(logger_sup,?MODULE),
+ _ = supervisor:delete_child(logger_sup,?MODULE),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for logger
+-spec adding_handler(logger:handler_id(),logger:config()) ->
+ {ok,logger:config()} | {error,term()}.
+adding_handler(?MODULE,Config) ->
+ case start() of
+ ok ->
+ {ok,Config};
+ Error ->
+ Error
end.
+-spec removing_handler(logger:handler_id()) -> ok.
+removing_handler(?MODULE) ->
+ stop(),
+ ok.
+
+-spec log(logger:log(),logger:config()) -> ok.
+log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) ->
+ do_log(Level,Msg,Meta).
+
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag,type:=Type}}=Meta) ->
+ %% From error_logger:*_report/1,2, or logger call which added
+ %% error_logger data to obtain backwards compatibility with
+ %% error_logger:*_report/1,2
+ Report =
+ case Msg of
+ #{label:=_,report:=R} -> R;
+ _ -> Msg
+ end,
+ notify(Level,Tag,Type,Report,Meta);
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag}}=Meta) ->
+ {Format,Args} =
+ case Msg of
+ #{label:=_,format:=F,args:=A} ->
+ %% From error_logger:*_msg/1,2.
+ %% In order to be backwards compatible with handling
+ %% of faulty parameters to error_logger:*_msg/1,2,
+ %% don't use report_cb here.
+ {F,A};
+ _ ->
+ %% From logger call which added error_logger data to
+ %% obtain backwards compatibility with error_logger:*_msg/1,2
+ RCBFun=maps:get(report_cb,Meta,fun logger:format_report/1),
+ try RCBFun(Msg) of
+ {F,A} when is_list(F), is_list(A) ->
+ {F,A};
+ Other ->
+ {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]}
+ catch C:R ->
+ {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]}
+ end
+ end,
+ notify(Level,Tag,Format,Args,Meta);
+do_log(Level,{Format,Args},#{?MODULE:=#{tag:=Tag}}=Meta)
+ when is_list(Format), is_list(Args) ->
+ %% From logger call which added error_logger data to obtain
+ %% backwards compatibility with error_logger:*_msg/1,2
+ notify(Level,Tag,Format,Args,Meta);
+do_log(_Level,_Msg,_Meta) ->
+ %% Ignore the rest - i.e. to get backwards compatibility with
+ %% error_logger, you must use the error_logger API for logging.
+ %% Some modules within OTP go around this by adding an
+ %% error_logger field to its metadata. This is done only to allow
+ %% complete backwards compatibility for log events originating
+ %% from within OTP, while still using the new logger interface.
+ ok.
+
+-spec notify(logger:level(), msg_tag(), any(), any(), map()) -> 'ok'.
+notify(Level,Tag0,FormatOrType0,ArgsOrReport,#{pid:=Pid0,gl:=GL,?MODULE:=My}) ->
+ Tag = fix_warning_tag(Level,Tag0),
+ Pid = case maps:get(emulator,My,false) of
+ true -> emulator;
+ _ -> Pid0
+ end,
+ FormatOrType = fix_warning_type(Level,FormatOrType0),
+ gen_event:notify(?MODULE,{Tag,GL,{Pid,FormatOrType,ArgsOrReport}}).
+
+%% This is to fix the case when the client has explicitly added the
+%% error logger tag and type in metadata, and not checked the warning map.
+fix_warning_tag(error,warning_msg) -> error;
+fix_warning_tag(error,warning_report) -> error_report;
+fix_warning_tag(info,warning_msg) -> info_msg;
+fix_warning_tag(info,warning_report) -> info_report;
+fix_warning_tag(_,Tag) -> Tag.
+
+fix_warning_type(error,std_warning) -> std_error;
+fix_warning_type(info,std_warning) -> std_info;
+fix_warning_type(_,Type) -> Type.
+
%%-----------------------------------------------------------------
%% These two simple old functions generate events tagged 'error'
%% Used for simple messages; error or information.
@@ -95,14 +205,18 @@ error_msg(Format) ->
Data :: list().
error_msg(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_msg},
+ format=>Format,
+ args=>Args},
+ meta(error)).
-spec format(Format, Data) -> 'ok' when
Format :: string(),
Data :: list().
format(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ error_msg(Format, Args).
%%-----------------------------------------------------------------
%% This functions should be used for error reports. Events
@@ -124,7 +238,10 @@ error_report(Report) ->
Report :: report().
error_report(Type, Report) ->
- notify({error_report, group_leader(), {self(), Type, Report}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_report},
+ report=>Report},
+ meta(error_report,Type)).
%%-----------------------------------------------------------------
%% This function should be used for warning reports.
@@ -146,7 +263,8 @@ warning_report(Report) ->
Report :: report().
warning_report(Type, Report) ->
- {Tag, NType} = case error_logger:warning_map() of
+ Level = error_logger:warning_map(),
+ {Tag, NType} = case Level of
info ->
if
Type =:= std_warning ->
@@ -164,7 +282,10 @@ warning_report(Type, Report) ->
{error_report, Type}
end
end,
- notify({Tag, group_leader(), {self(), NType, Report}}).
+ logger:log(Level,
+ #{label=>{?MODULE,warning_report},
+ report=>Report},
+ meta(Tag,NType)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -183,7 +304,8 @@ warning_msg(Format) ->
Data :: list().
warning_msg(Format, Args) ->
- Tag = case error_logger:warning_map() of
+ Level = error_logger:warning_map(),
+ Tag = case Level of
warning ->
warning_msg;
info ->
@@ -191,7 +313,11 @@ warning_msg(Format, Args) ->
error ->
error
end,
- notify({Tag, group_leader(), {self(), Format, Args}}).
+ logger:log(Level,
+ #{label=>{?MODULE,warning_msg},
+ format=>Format,
+ args=>Args},
+ meta(Tag)).
%%-----------------------------------------------------------------
%% This function should be used for information reports. Events
@@ -210,7 +336,10 @@ info_report(Report) ->
Report :: report().
info_report(Type, Report) ->
- notify({info_report, group_leader(), {self(), Type, Report}}).
+ logger:log(info,
+ #{label=>{?MODULE,info_report},
+ report=>Report},
+ meta(info_report,Type)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -228,7 +357,11 @@ info_msg(Format) ->
Data :: list().
info_msg(Format, Args) ->
- notify({info_msg, group_leader(), {self(), Format, Args}}).
+ logger:log(info,
+ #{label=>{?MODULE,info_msg},
+ format=>Format,
+ args=>Args},
+ meta(info_msg)).
%%-----------------------------------------------------------------
%% Used by the init process. Events are tagged 'info'.
@@ -236,38 +369,75 @@ info_msg(Format, Args) ->
-spec error_info(Error :: any()) -> 'ok'.
+%% unused?
error_info(Error) ->
- notify({info, group_leader(), {self(), Error, []}}).
-
--spec notify({msg_tag(), pid(), {pid(), any(), any()}}) -> 'ok'.
-
-notify(Msg) ->
- gen_event:notify(error_logger, Msg).
-
--type swap_handler_type() :: 'false' | 'silent' | 'tty' | {'logfile', string()}.
--spec swap_handler(Type :: swap_handler_type()) -> any().
-
-swap_handler(tty) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_tty_h, []}),
- ok = simple_logger(),
- R;
-swap_handler({logfile, File}) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_file_h, File}),
- ok = simple_logger(),
- R;
-swap_handler(silent) ->
- _ = gen_event:delete_handler(error_logger, error_logger, delete),
- ok = simple_logger();
-swap_handler(false) ->
- ok. % keep primitive event handler as-is
+ {Format,Args} =
+ case string_p(Error) of
+ true -> {Error,[]};
+ false -> {"~p",[Error]}
+ end,
+ MyMeta = #{tag=>info,type=>Error},
+ logger:log(info, Format, Args, #{?MODULE=>MyMeta,domain=>[Error]}).
+
+%%-----------------------------------------------------------------
+%% Create metadata
+meta(Tag) ->
+ meta(Tag,undefined).
+meta(Tag,Type) ->
+ meta(Tag,Type,#{report_cb=>fun report_to_format/1}).
+meta(Tag,undefined,Meta0) ->
+ Meta0#{?MODULE=>#{tag=>Tag}};
+meta(Tag,Type,Meta0) ->
+ maybe_add_domain(Tag,Type,Meta0#{?MODULE=>#{tag=>Tag,type=>Type}}).
+
+%% This is to prevent events of non standard type from being printed
+%% with the standard logger. Similar to how error_logger_tty_h
+%% discards events of non standard type.
+maybe_add_domain(error_report,std_error,Meta) -> Meta;
+maybe_add_domain(info_report,std_info,Meta) -> Meta;
+maybe_add_domain(warning_report,std_warning,Meta) -> Meta;
+maybe_add_domain(_,Type,Meta) -> Meta#{domain=>[Type]}.
+
+%% -----------------------------------------------------------------
+%% Report formatting - i.e. Term => {Format,Args}
+%% This was earlier done in the event handler (error_logger_tty_h, etc)
+%% -----------------------------------------------------------------
+report_to_format(#{label:={?MODULE,_},
+ report:=Report}) when is_map(Report) ->
+ %% logger:format_otp_report does maps:to_list, and for backwards
+ %% compatibility reasons we don't want that.
+ {"~tp\n",[Report]};
+report_to_format(#{label:={?MODULE,_},
+ format:=Format,
+ args:=Args}) ->
+ %% This is not efficient, but needed for backwards compatibility
+ %% in giving faulty arguments to the *_msg functions.
+ try io_lib:scan_format(Format,Args) of
+ _ -> {Format,Args}
+ catch _:_ ->
+ {"ERROR: ~tp - ~tp",[Format,Args]}
+ end;
+report_to_format(Term) ->
+ logger:format_otp_report(Term).
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_list(FlatList).
+
+%% -----------------------------------------------------------------
+%% Stuff directly related to the event manager
+%% -----------------------------------------------------------------
-spec add_report_handler(Handler) -> any() when
Handler :: module().
add_report_handler(Module) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, []).
+ add_report_handler(Module, []).
-spec add_report_handler(Handler, Args) -> Result when
Handler :: module(),
@@ -275,24 +445,37 @@ add_report_handler(Module) when is_atom(Module) ->
Result :: gen_event:add_handler_ret().
add_report_handler(Module, Args) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, Args).
+ _ = logger:add_handler(?MODULE,?MODULE,#{level=>info,filter_default=>log}),
+ gen_event:add_handler(?MODULE, Module, Args).
-spec delete_report_handler(Handler) -> Result when
Handler :: module(),
Result :: gen_event:del_handler_ret().
delete_report_handler(Module) when is_atom(Module) ->
- gen_event:delete_handler(error_logger, Module, []).
-
-%% Start the lowest level error_logger handler with Buffer.
-
-simple_logger(Buffer_size) when is_integer(Buffer_size) ->
- gen_event:add_handler(error_logger, error_logger, Buffer_size).
-
-%% Start the lowest level error_logger handler without Buffer.
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ Return = gen_event:delete_handler(?MODULE, Module, []),
+ case gen_event:which_handlers(?MODULE) of
+ [] ->
+ %% Don't want a lot of logs here if it's not needed
+ _ = logger:remove_handler(?MODULE),
+ ok;
+ _ ->
+ ok
+ end,
+ Return;
+ _ ->
+ ok
+ end.
-simple_logger() ->
- gen_event:add_handler(error_logger, error_logger, []).
+which_report_handlers() ->
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ gen_event:which_handlers(?MODULE);
+ undefined ->
+ []
+ end.
%% Log all errors to File for all eternity
@@ -308,26 +491,35 @@ simple_logger() ->
FilenameReason :: no_log_file.
logfile({open, File}) ->
- case lists:member(error_logger_file_h,
- gen_event:which_handlers(error_logger)) of
+ case lists:member(error_logger_file_h,which_report_handlers()) of
true ->
{error, allready_have_logfile};
_ ->
- gen_event:add_handler(error_logger, error_logger_file_h, File)
+ add_report_handler(error_logger_file_h, File)
end;
logfile(close) ->
- case gen_event:delete_handler(error_logger, error_logger_file_h, normal) of
- {error,Reason} ->
- {error,Reason};
- _ ->
- ok
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:delete_handler(?MODULE, error_logger_file_h, normal) of
+ {error,Reason} ->
+ {error,Reason};
+ _ ->
+ ok
+ end;
+ _ ->
+ {error,module_not_found}
end;
logfile(filename) ->
- case gen_event:call(error_logger, error_logger_file_h, filename) of
- {error,_} ->
- {error, no_log_file};
- Val ->
- Val
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:call(?MODULE, error_logger_file_h, filename) of
+ {error,_} ->
+ {error, no_log_file};
+ Val ->
+ Val
+ end;
+ _ ->
+ {error, no_log_file}
end.
%% Possibly turn off all tty printouts, maybe we only want the errors
@@ -337,193 +529,17 @@ logfile(filename) ->
Flag :: boolean().
tty(true) ->
- Hs = gen_event:which_handlers(error_logger),
- case lists:member(error_logger_tty_h, Hs) of
+ case lists:member(error_logger_tty_h, which_report_handlers()) of
false ->
- gen_event:add_handler(error_logger, error_logger_tty_h, []);
- true ->
+ add_report_handler(error_logger_tty_h, []);
+ true ->
ignore
end,
ok;
tty(false) ->
- gen_event:delete_handler(error_logger, error_logger_tty_h, []),
- ok.
-
+ delete_report_handler(error_logger_tty_h).
-%%% ---------------------------------------------------
-%%% This is the default error_logger handler.
-%%% ---------------------------------------------------
-
--spec init(term()) -> {'ok', state() | []}.
-
-init(Max) when is_integer(Max) ->
- {ok, {Max, 0, []}};
-%% This one is called if someone took over from us, and now wants to
-%% go back.
-init({go_back, _PostState}) ->
- {ok, {?buffer_size, 0, []}};
-init(_) ->
- %% The error logger process may receive a huge amount of
- %% messages. Make sure that they are stored off heap to
- %% avoid exessive GCs.
- process_flag(message_queue_data, off_heap),
- {ok, []}.
-
--spec handle_event(term(), state()) -> {'ok', state()}.
-
-handle_event({Type, GL, Msg}, State) when node(GL) =/= node() ->
- gen_event:notify({error_logger, node(GL)},{Type, GL, Msg}),
- %% handle_event2({Type, GL, Msg}, State); %% Shall we do something
- {ok, State}; %% at this node too ???
-handle_event({info_report, _, {_, Type, _}}, State) when Type =/= std_info ->
- {ok, State}; %% Ignore other info reports here
-handle_event(Event, State) ->
- handle_event2(Event, State).
-
--spec handle_info(term(), state()) -> {'ok', state()}.
-
-handle_info({emulator, GL, Chars}, State) when node(GL) =/= node() ->
- {error_logger, node(GL)} ! {emulator, GL, add_node(Chars,self())},
- {ok, State};
-handle_info({emulator, GL, Chars}, State) ->
- handle_event2({emulator, GL, Chars}, State);
-handle_info(_, State) ->
- {ok, State}.
-
--spec handle_call(term(), state()) -> {'ok', {'error', 'bad_query'}, state()}.
-
-handle_call(_Query, State) -> {ok, {error, bad_query}, State}.
-
--spec terminate(term(), state()) -> {'error_logger', [term()]}.
-
-terminate(swap, {_, 0, Buff}) ->
- {error_logger, Buff};
-terminate(swap, {_, Lost, Buff}) ->
- Myevent = {info, group_leader(), {self(), {lost_messages, Lost}, []}},
- {error_logger, [tag_event(Myevent)|Buff]};
-terminate(_, _) ->
- {error_logger, []}.
-
-handle_event2(Event, {1, Lost, Buff}) ->
- display(tag_event(Event)),
- {ok, {1, Lost+1, Buff}};
-handle_event2(Event, {N, Lost, Buff}) ->
- Tagged = tag_event(Event),
- display(Tagged),
- {ok, {N-1, Lost, [Tagged|Buff]}};
-handle_event2(_, State) ->
- {ok, State}.
-
-tag_event(Event) ->
- {erlang:localtime(), Event}.
-
-display({Tag,{error,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{error_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info,_,{_,Error,_}}}) ->
- display2(Tag,Error,[]);
-display({Tag,{info_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{warning_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{warning_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{emulator,_,Chars}}) ->
- display2(Tag,Chars,[]).
-
-add_node(X, Pid) when is_atom(X) ->
- add_node(atom_to_list(X), Pid);
-add_node(X, Pid) ->
- lists:concat([X,"** at node ",node(Pid)," **~n"]).
-
-%% Can't do io_lib:format
-
-display2({{_Y,_Mo,_D},{_H,_Mi,_S}} = Date, F, A) ->
- display_date(Date),
- display3(string_p(F), F, A).
-
-display_date({{Y,Mo,D},{H,Mi,S}}) ->
- erlang:display_string(
- integer_to_list(Y) ++ "-" ++
- two_digits(Mo) ++ "-" ++
- two_digits(D) ++ " " ++
- two_digits(H) ++ ":" ++
- two_digits(Mi) ++ ":" ++
- two_digits(S) ++ " ").
-
-two_digits(N) when 0 =< N, N =< 9 ->
- [$0, $0 + N];
-two_digits(N) ->
- integer_to_list(N).
-
-display3(true, F, A) ->
- %% Format string with arguments
- erlang:display_string(F ++ "\n"),
- [begin
- erlang:display_string("\t"),
- erlang:display(Arg)
- end || Arg <- A],
- ok;
-display3(false, Atom, A) when is_atom(Atom) ->
- %% The widest atom seems to be 'supervisor_report' at 17.
- ColumnWidth = 20,
- AtomString = atom_to_list(Atom),
- AtomLength = length(AtomString),
- Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
- erlang:display_string(AtomString ++ Padding),
- display4(A);
-display3(_, F, A) ->
- erlang:display({F, A}).
-
-display4([A, []]) ->
- %% Not sure why crash reports look like this.
- display4(A);
-display4(A = [_|_]) ->
- case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
- true ->
- erlang:display_string("\n"),
- lists:foreach(
- fun({Key, Value}) ->
- erlang:display_string(
- " " ++
- atom_to_list(Key) ++
- ": "),
- erlang:display(Value)
- end, A);
- false ->
- erlang:display(A)
- end;
-display4(A) ->
- erlang:display(A).
-
-string_p([]) ->
- false;
-string_p(Term) ->
- string_p1(Term).
-
-string_p1([H|T]) when is_integer(H), H >= $\040, H =< $\176 ->
- string_p1(T);
-string_p1([H|T]) when is_integer(H), H >= 16#A0, H < 16#D800;
- is_integer(H), H > 16#DFFF, H < 16#FFFE;
- is_integer(H), H > 16#FFFF, H =< 16#10FFFF ->
- string_p1(T);
-string_p1([$\n|T]) -> string_p1(T);
-string_p1([$\r|T]) -> string_p1(T);
-string_p1([$\t|T]) -> string_p1(T);
-string_p1([$\v|T]) -> string_p1(T);
-string_p1([$\b|T]) -> string_p1(T);
-string_p1([$\f|T]) -> string_p1(T);
-string_p1([$\e|T]) -> string_p1(T);
-string_p1([H|T]) when is_list(H) ->
- case string_p1(H) of
- true -> string_p1(T);
- _ -> false
- end;
-string_p1([]) -> true;
-string_p1(_) -> false.
+%%%-----------------------------------------------------------------
-spec limit_term(term()) -> term().
@@ -536,9 +552,4 @@ limit_term(Term) ->
-spec get_format_depth() -> 'unlimited' | pos_integer().
get_format_depth() ->
- case application:get_env(kernel, error_logger_format_depth) of
- {ok, Depth} when is_integer(Depth) ->
- max(10, Depth);
- undefined ->
- unlimited
- end.
+ logger:get_format_depth().
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
index 82a3571da9..afffcd156e 100644
--- a/lib/kernel/src/kernel.app.src
+++ b/lib/kernel/src/kernel.app.src
@@ -60,6 +60,17 @@
kernel_refc,
local_tcp,
local_udp,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_filters,
+ logger_formatter,
+ logger_h_common,
+ logger_server,
+ logger_simple,
+ logger_std_h,
+ logger_sup,
net,
net_adm,
net_kernel,
@@ -117,6 +128,8 @@
kernel_config,
kernel_refc,
kernel_sup,
+ logger,
+ logger_sup,
net_kernel,
net_sup,
rex,
@@ -127,7 +140,7 @@
inet_db,
pg2]},
{applications, []},
- {env, [{error_logger, tty}]},
+ {env, []},
{mod, {kernel, []}},
{runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]}
]
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index 0382764b39..20aa47f602 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -40,8 +40,7 @@ start(_, []) ->
ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
end,
%% add error handler
- Type = get_error_logger_type(),
- case error_logger:swap_handler(Type) of
+ case logger:setup_standard_handler() of
ok -> {ok, Pid, []};
Error ->
%% Not necessary since the node will crash anyway:
@@ -62,16 +61,6 @@ config_change(Changed, New, Removed) ->
do_global_groups_change(Changed, New, Removed),
ok.
-get_error_logger_type() ->
- case application:get_env(kernel, error_logger) of
- {ok, tty} -> tty;
- {ok, {file, File}} when is_list(File) -> {logfile, File};
- {ok, false} -> false;
- {ok, silent} -> silent;
- undefined -> tty; % default value
- {ok, Bad} -> exit({bad_config, {kernel, {error_logger, Bad}}})
- end.
-
%%%-----------------------------------------------------------------
%%% The process structure in kernel is as shown in the figure.
%%%
@@ -153,9 +142,18 @@ init([]) ->
type => supervisor,
modules => [?MODULE]},
+
+ LoggerSup = #{id => logger_sup,
+ start => {logger_sup, start_link, []},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [logger_sup]},
+
case init:get_argument(mode) of
{ok, [["minimal"]]} ->
- {ok, {SupFlags, [Code, File, StdError, User, Config, RefC, SafeSup]}};
+ {ok, {SupFlags,
+ [Code, File, StdError, User, Config, RefC, SafeSup, LoggerSup]}};
_ ->
Rpc = #{id => rex,
start => {rpc, start_link, []},
@@ -206,7 +204,7 @@ init([]) ->
{ok, {SupFlags,
[Code, Rpc, Global, InetDb | DistAC] ++
[NetSup, GlGroup, File, SigSrv,
- StdError, User, Config, RefC, SafeSup] ++ Timer}}
+ StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
end;
init(safe) ->
SupFlags = #{strategy => one_for_one,
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
new file mode 100644
index 0000000000..943ef8c2d1
--- /dev/null
+++ b/lib/kernel/src/logger.erl
@@ -0,0 +1,803 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger).
+
+%% Log interface
+-export([emergency/1,emergency/2,emergency/3,
+ alert/1,alert/2,alert/3,
+ critical/1,critical/2,critical/3,
+ error/1,error/2,error/3,
+ warning/1,warning/2,warning/3,
+ notice/1,notice/2,notice/3,
+ info/1,info/2,info/3,
+ debug/1,debug/2,debug/3]).
+-export([log/2,log/3,log/4]).
+
+%% Called by macro
+-export([allow/2,macro_log/3,macro_log/4,macro_log/5,add_default_metadata/1]).
+
+%% Configuration
+-export([add_handler/3, remove_handler/1,
+ add_logger_filter/2, add_handler_filter/3,
+ remove_logger_filter/1, remove_handler_filter/2,
+ set_module_level/2, reset_module_level/1,
+ set_logger_config/1, set_logger_config/2,
+ set_handler_config/2, set_handler_config/3,
+ get_logger_config/0, get_handler_config/1]).
+
+%% Misc
+-export([compare_levels/2]).
+-export([set_process_metadata/1, unset_process_metadata/0,
+ get_process_metadata/0]).
+-export([i/0, i/1]).
+-export([setup_standard_handler/0, replace_simple_handler/3]).
+-export([limit_term/1, get_format_depth/0, get_max_size/0, get_utc_config/0]).
+
+%% Basic report formatting
+-export([format_report/1, format_otp_report/1]).
+
+-export([internal_log/2,filter_stacktrace/2]).
+
+-include("logger_internal.hrl").
+-include("logger.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type log() :: #{level=>level(),
+ msg=>{io:format(),[term()]} |
+ {report,report()} |
+ {string,unicode:chardata()},
+ meta=>metadata()}.
+-type level() :: emergency | alert | critical | error |
+ warning | notice | info | debug.
+-type report() :: map() | [{atom(),term()}].
+-type msg_fun() :: fun((term()) -> {io:format(),[term()]} |
+ report() |
+ unicode:chardata()).
+-type metadata() :: map().
+
+-type handler_id() :: atom().
+-type filter_id() :: atom().
+-type filter() :: {fun((log(),term()) -> filter_return()),term()}.
+-type filter_return() :: stop | ignore | log().
+-type config() :: map().
+
+-export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0,
+ filter_id/0,filter/0,filter_return/0]).
+
+%%%-----------------------------------------------------------------
+%%% API
+emergency(X) ->
+ log(emergency,X).
+emergency(X,Y) ->
+ log(emergency,X,Y).
+emergency(X,Y,Z) ->
+ log(emergency,X,Y,Z).
+
+alert(X) ->
+ log(alert,X).
+alert(X,Y) ->
+ log(alert,X,Y).
+alert(X,Y,Z) ->
+ log(alert,X,Y,Z).
+
+critical(X) ->
+ log(critical,X).
+critical(X,Y) ->
+ log(critical,X,Y).
+critical(X,Y,Z) ->
+ log(critical,X,Y,Z).
+
+error(X) ->
+ log(error,X).
+error(X,Y) ->
+ log(error,X,Y).
+error(X,Y,Z) ->
+ log(error,X,Y,Z).
+
+warning(X) ->
+ log(warning,X).
+warning(X,Y) ->
+ log(warning,X,Y).
+warning(X,Y,Z) ->
+ log(warning,X,Y,Z).
+
+notice(X) ->
+ log(notice,X).
+notice(X,Y) ->
+ log(notice,X,Y).
+notice(X,Y,Z) ->
+ log(notice,X,Y,Z).
+
+info(X) ->
+ log(info,X).
+info(X,Y) ->
+ log(info,X,Y).
+info(X,Y,Z) ->
+ log(info,X,Y,Z).
+
+debug(X) ->
+ log(debug,X).
+debug(X,Y) ->
+ log(debug,X,Y).
+debug(X,Y,Z) ->
+ log(debug,X,Y,Z).
+
+-spec log(Level,StringOrReport) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+log(Level, StringOrReport) ->
+ do_log(Level,StringOrReport,#{}).
+
+-spec log(Level,StringOrReport,Metadata) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Metadata :: metadata();
+ (Level,Format,Args) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Level,Fun,FunArgs) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+log(Level, StringOrReport, Metadata)
+ when is_map(Metadata), not is_function(StringOrReport) ->
+ do_log(Level,StringOrReport,Metadata);
+log(Level, FunOrFormat, Args) ->
+ do_log(Level,{FunOrFormat,Args},#{}).
+
+-spec log(Level,Format, Args, Metadata) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args :: [term()],
+ Metadata :: metadata();
+ (Level,Fun,FunArgs,Metadata) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Metadata :: metadata().
+log(Level, FunOrFormat, Args, Metadata) ->
+ do_log(Level,{FunOrFormat,Args},Metadata).
+
+-spec allow(Level,Module) -> boolean() when
+ Level :: level(),
+ Module :: module().
+allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) ->
+ logger_config:allow(?LOGGER_TABLE,Level,Module).
+
+
+-spec macro_log(Location,Level,StringOrReport) -> ok when
+ Location :: map(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+macro_log(Location,Level,StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,#{}).
+
+-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Meta :: metadata();
+ (Location,Level,Format,Args) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Location,Level,Fun,FunArgs) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+macro_log(Location,Level,StringOrReport,Meta)
+ when is_map(Meta), not is_function(StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,Meta);
+macro_log(Location,Level,FunOrFormat,Args) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},#{}).
+
+-spec macro_log(Location,Level,Format,Args,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()],
+ Meta :: metadata();
+ (Location,Level,Fun,FunArgs,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Meta :: metadata().
+macro_log(Location,Level,FunOrFormat,Args,Meta) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},Meta).
+
+-spec format_otp_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_otp_report(#{label:=_,report:=Report}) ->
+ format_report(Report);
+format_otp_report(Report) ->
+ format_report(Report).
+
+-spec format_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_report(Report) when is_map(Report) ->
+ format_report(maps:to_list(Report));
+format_report(Report) when is_list(Report) ->
+ case lists:flatten(Report) of
+ [] ->
+ {"~tp",[[]]};
+ FlatList ->
+ case string_p1(FlatList) of
+ true ->
+ {"~ts",[FlatList]};
+ false ->
+ format_term_list(Report,[],[])
+ end
+ end;
+format_report(Report) ->
+ {"~tp",[Report]}.
+
+format_term_list([{Tag,Data}|T],Format,Args) ->
+ PorS = case string_p(Data) of
+ true -> "s";
+ false -> "p"
+ end,
+ format_term_list(T,[" ~tp: ~t"++PorS|Format],[Data,Tag|Args]);
+format_term_list([Data|T],Format,Args) ->
+ format_term_list(T,[" ~tp"|Format],[Data|Args]);
+format_term_list([],Format,Args) ->
+ {lists:flatten(lists:join($\n,lists:reverse(Format))),lists:reverse(Args)}.
+
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_unicode_list(FlatList).
+
+internal_log(Level,Term) when is_atom(Level) ->
+ erlang:display_string("Logger - "++ atom_to_list(Level) ++ ": "),
+ erlang:display(Term).
+
+%%%-----------------------------------------------------------------
+%%% Configuration
+-spec add_logger_filter(FilterId,Filter) -> ok | {error,term()} when
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_logger_filter(FilterId,Filter) ->
+ logger_server:add_filter(logger,{FilterId,Filter}).
+
+-spec add_handler_filter(HandlerId,FilterId,Filter) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_handler_filter(HandlerId,FilterId,Filter) ->
+ logger_server:add_filter(HandlerId,{FilterId,Filter}).
+
+
+-spec remove_logger_filter(FilterId) -> ok | {error,term()} when
+ FilterId :: filter_id().
+remove_logger_filter(FilterId) ->
+ logger_server:remove_filter(logger,FilterId).
+
+-spec remove_handler_filter(HandlerId,FilterId) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id().
+remove_handler_filter(HandlerId,FilterId) ->
+ logger_server:remove_filter(HandlerId,FilterId).
+
+-spec add_handler(HandlerId,Module,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Module :: module(),
+ Config :: config().
+add_handler(HandlerId,Module,Config) ->
+ logger_server:add_handler(HandlerId,Module,Config).
+
+-spec remove_handler(HandlerId) -> ok | {error,term()} when
+ HandlerId :: handler_id().
+remove_handler(HandlerId) ->
+ logger_server:remove_handler(HandlerId).
+
+-spec set_logger_config(Key,Value) -> ok | {error,term()} when
+ Key :: atom(),
+ Value :: term().
+set_logger_config(Key,Value) ->
+ logger_server:set_config(logger,Key,Value).
+
+-spec set_logger_config(Config) -> ok | {error,term()} when
+ Config :: config().
+set_logger_config(Config) ->
+ logger_server:set_config(logger,Config).
+
+-spec set_handler_config(HandlerId,Key,Value) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Key :: atom(),
+ Value :: term().
+set_handler_config(HandlerId,Key,Value) ->
+ logger_server:set_config(HandlerId,Key,Value).
+
+-spec set_handler_config(HandlerId,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: config().
+set_handler_config(HandlerId,Config) ->
+ logger_server:set_config(HandlerId,Config).
+
+-spec get_logger_config() -> {ok,Config} when
+ Config :: config().
+get_logger_config() ->
+ logger_config:get(?LOGGER_TABLE,logger).
+
+-spec get_handler_config(HandlerId) -> {ok,{Module,Config}} | {error,term()} when
+ HandlerId :: handler_id(),
+ Module :: module(),
+ Config :: config().
+get_handler_config(HandlerId) ->
+ logger_config:get(?LOGGER_TABLE,HandlerId).
+
+-spec set_module_level(Module,Level) -> ok | {error,term()} when
+ Module :: module(),
+ Level :: level().
+set_module_level(Module,Level) ->
+ logger_server:set_module_level(Module,Level).
+
+-spec reset_module_level(Module) -> ok | {error,term()} when
+ Module :: module().
+reset_module_level(Module) ->
+ logger_server:reset_module_level(Module).
+
+%%%-----------------------------------------------------------------
+%%% Misc
+-spec compare_levels(Level1,Level2) -> eq | gt | lt when
+ Level1 :: level(),
+ Level2 :: level().
+compare_levels(Level,Level) when ?IS_LEVEL(Level) ->
+ eq;
+compare_levels(Level1,Level2) when ?IS_LEVEL(Level1), ?IS_LEVEL(Level2) ->
+ Int1 = logger_config:level_to_int(Level1),
+ Int2 = logger_config:level_to_int(Level2),
+ if Int1 < Int2 -> gt;
+ true -> lt
+ end;
+compare_levels(Level1,Level2) ->
+ erlang:error(badarg,[Level1,Level2]).
+
+-spec set_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+set_process_metadata(Meta) when is_map(Meta) ->
+ _ = put(?LOGGER_META_KEY,Meta),
+ ok;
+set_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
+-spec get_process_metadata() -> Meta | undefined when
+ Meta :: metadata().
+get_process_metadata() ->
+ get(?LOGGER_META_KEY).
+
+-spec unset_process_metadata() -> ok.
+unset_process_metadata() ->
+ _ = erase(?LOGGER_META_KEY),
+ ok.
+
+-spec i() -> #{logger=>config(),
+ handlers=>[{handler_id(),module(),config()}],
+ module_levels=>[{module(),level()}]}.
+i() ->
+ i(term).
+
+-spec i(term) -> #{logger=>config(),
+ handlers=>[{handler_id(),module(),config()}],
+ module_levels=>[{module(),level()}]};
+ (print) -> ok;
+ (string) -> iolist().
+i(_Action = print) ->
+ io:put_chars(i(string));
+i(_Action = string) ->
+ #{logger := #{level := Level, handlers := Handlers,
+ filters := Filters, filter_default := FilterDefault},
+ handlers := HandlerConfigs,
+ module_levels := Modules} = i(term),
+ [io_lib:format("Current logger configuration:~n", []),
+ io_lib:format(" Level: ~p~n",[Level]),
+ io_lib:format(" Filter Default: ~p~n", [FilterDefault]),
+ io_lib:format(" Filters: ~n", []),
+ print_filters(4, Filters),
+ io_lib:format(" Handlers: ~n", []),
+ print_handlers([C || {Id, _, _} = C <- HandlerConfigs,
+ lists:member(Id, Handlers)]),
+ io_lib:format(" Level set per module: ~n", []),
+ print_module_levels(Modules)
+ ];
+i(_Action = term) ->
+ {Logger, Handlers, Modules} = logger_config:get(tid()),
+ #{logger=>Logger,
+ handlers=>Handlers,
+ module_levels=>Modules}.
+
+print_filters(Indent, {Id, {Fun, Config}}) ->
+ io_lib:format("~sId: ~p~n"
+ "~s Fun: ~p~n"
+ "~s Config: ~p~n",[Indent, Id, Indent, Fun, Indent, Config]);
+print_filters(Indent, Filters) ->
+ IndentStr = io_lib:format("~.*s",[Indent, ""]),
+ lists:map(fun(Filter) ->print_filters(IndentStr, Filter) end, Filters).
+
+
+print_handlers({Id,Module,
+ #{level := Level,
+ filters := Filters, filter_default := FilterDefault,
+ formatter := {FormatterModule,FormatterConfig}} = Config}) ->
+ MyKeys = [filter_default, filters, formatter, level, id],
+ UnhandledConfig = maps:filter(fun(Key, _) ->
+ not lists:member(Key, MyKeys)
+ end, Config),
+ Unhandled = lists:map(fun({Key, Value}) ->
+ io_lib:format(" ~p: ~p~n",[Key, Value])
+ end, maps:to_list(UnhandledConfig)),
+ io_lib:format(" Id: ~p~n"
+ " Module: ~p~n"
+ " Level: ~p~n"
+ " Formatter:~n"
+ " Module: ~p~n"
+ " Config: ~p~n"
+ " Filter Default: ~p~n"
+ " Filters:~n~s"
+ " Handler Config:~n"
+ "~s"
+ "",[Id, Module, Level, FormatterModule, FormatterConfig,
+ FilterDefault, print_filters(8, Filters), Unhandled]);
+print_handlers(Handlers) ->
+ lists:map(fun print_handlers/1, Handlers).
+
+print_module_levels({Module,Level}) ->
+ io_lib:format(" Module: ~p~n"
+ " Level: ~p~n",
+ [Module,Level]);
+print_module_levels(ModuleLevels) ->
+ lists:map(fun print_module_levels/1, ModuleLevels).
+
+-spec setup_standard_handler() -> ok | {error,term()}.
+setup_standard_handler() ->
+ case get_logger_type() of
+ {ok,silent} ->
+ Level = get_logger_level(),
+ ok = set_logger_config(level,Level),
+ remove_handler(logger_simple);
+ {ok,Type} ->
+ Level = get_logger_level(),
+ ok = set_logger_config(level,Level),
+ Filters = get_logger_filters(),
+ setup_standard_handler(Type,#{level=>Level,
+ filter_default=>stop,
+ filters=>Filters});
+ Error ->
+ Error
+ end.
+
+-spec setup_standard_handler(Type,Config) -> ok | {error,term()} when
+ Type :: tty | standard_io | standard_error | {file,File} |
+ {file,File,Modes} | {disk_log,LogOpts} | false,
+ File :: file:filename(),
+ Modes :: [term()], % [file:mode()], or more specific?
+ Config :: config(),
+ LogOpts :: map().
+setup_standard_handler(false,#{level:=Level,filters:=Filters}) ->
+ case set_handler_config(logger_simple,level,Level) of
+ ok ->
+ set_handler_config(logger_simple,filters,Filters);
+ Error ->
+ Error
+ end;
+setup_standard_handler(Type,Config) ->
+ {Module,TypeConfig} = get_type_config(Type),
+ replace_simple_handler(?STANDARD_HANDLER,
+ Module,
+ maps:merge(Config,TypeConfig)).
+
+-spec replace_simple_handler(Id,Module,Config) -> ok | {error,term()} when
+ Id :: handler_id(),
+ Module :: module(),
+ Config :: config().
+replace_simple_handler(Id,Module,Config) ->
+ _ = code:ensure_loaded(Module),
+ DoBuffer = erlang:function_exported(Module,swap_buffer,2),
+ case add_handler(Id,Module,Config#{wait_for_buffer=>DoBuffer}) of
+ ok ->
+ if DoBuffer ->
+ {ok,Buffered} = logger_simple:get_buffer(),
+ _ = remove_handler(logger_simple),
+ Module:swap_buffer(?STANDARD_HANDLER,Buffered);
+ true ->
+ _ = remove_handler(logger_simple),
+ ok
+ end,
+ ok;
+ Error ->
+ Error
+ end.
+
+get_logger_type() ->
+ Type0 =
+ case application:get_env(kernel, logger_dest) of
+ undefined ->
+ application:get_env(kernel, error_logger);
+ T ->
+ T
+ end,
+ case Type0 of
+ {ok, tty} ->
+ {ok, tty};
+ {ok, {file, File}} when is_list(File) ->
+ {ok, {file, File}};
+ {ok, {file, File, Modes}} when is_list(File), is_list(Modes) ->
+ {ok, {file, File, Modes}};
+ {ok, {disk_log, File}} when is_list(File) ->
+ {ok, {disk_log, get_disk_log_config(File)}};
+ {ok, false} ->
+ {ok, false};
+ {ok, silent} ->
+ {ok, silent};
+ undefined ->
+ {ok, tty}; % default value
+ {ok, Bad} ->
+ {error,{bad_config, {kernel, {logger_dest, Bad}}}}
+ end.
+
+get_disk_log_config(File) ->
+ Config1 =
+ case application:get_env(kernel,logger_disk_log_maxfiles) of
+ undefined -> #{};
+ {ok,MF} -> #{max_no_files=>MF}
+ end,
+ Config2 =
+ case application:get_env(kernel,logger_disk_log_maxbytes) of
+ undefined -> Config1;
+ {ok,MB} -> Config1#{max_no_bytes=>MB}
+ end,
+ Config3 =
+ case application:get_env(kernel,logger_disk_log_type) of
+ undefined -> Config2;
+ {ok,T} -> Config1#{type=>T}
+ end,
+ Config3#{file=>File}.
+
+get_logger_level() ->
+ case application:get_env(kernel,logger_level) of
+ undefined -> info;
+ {ok,Level} when ?IS_LEVEL(Level) -> Level
+ end.
+
+get_logger_filters() ->
+ case application:get_env(kernel, logger_sasl_compatible, false) of
+ true ->
+ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp]);
+ false ->
+ Extra =
+ case application:get_env(kernel, logger_log_progress, false) of
+ true ->
+ [];
+ false ->
+ [{stop_progress,
+ {fun logger_filters:progress/2,stop}}]
+ end,
+ Extra ++ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp,sasl])
+ end.
+
+get_type_config({disk_log,LogOpts}) ->
+ {logger_disk_log_h,#{disk_log_opts=>LogOpts}};
+get_type_config(tty) ->
+ %% This is only for backwards compatibility with error_logger and
+ %% old kernel and sasl environment variables
+ get_type_config(standard_io);
+get_type_config(Type) when Type==standard_io;
+ Type==standard_error;
+ element(1,Type)==file ->
+ {logger_std_h,#{logger_std_h=>#{type=>Type}}};
+get_type_config(Type) ->
+ {error,{illegal_logger_type,Type}}.
+
+%%%-----------------------------------------------------------------
+-spec limit_term(term()) -> term().
+
+limit_term(Term) ->
+ try get_format_depth() of
+ unlimited -> Term;
+ D -> io_lib:limit_term(Term, D)
+ catch error:badarg ->
+ %% This could happen during system termination, after
+ %% application_controller process is dead.
+ unlimited
+ end.
+
+-spec get_format_depth() -> 'unlimited' | pos_integer().
+
+get_format_depth() ->
+ Depth =
+ case application:get_env(kernel, logger_format_depth) of
+ {ok, D} when is_integer(D) ->
+ D;
+ undefined ->
+ case application:get_env(kernel, error_logger_format_depth) of
+ {ok, D} when is_integer(D) ->
+ D;
+ undefined ->
+ unlimited
+ end
+ end,
+ max(10, Depth).
+
+-spec get_max_size() -> 'unlimited' | pos_integer().
+
+get_max_size() ->
+ case application:get_env(kernel, logger_max_size) of
+ {ok, Size} when is_integer(Size) ->
+ max(50, Size);
+ undefined ->
+ unlimited
+ end.
+
+-spec get_utc_config() -> boolean().
+
+get_utc_config() ->
+ %% Kernel's logger_utc configuration overrides SASL utc_log, which
+ %% in turn overrides stdlib config - in order to have uniform
+ %% timestamps in log messages
+ case application:get_env(kernel, logger_utc) of
+ {ok, Val} -> Val;
+ undefined ->
+ case application:get_env(sasl, utc_log) of
+ {ok, Val} -> Val;
+ undefined ->
+ case application:get_env(stdlib, utc_log) of
+ {ok, Val} -> Val;
+ undefined -> false
+ end
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+do_log(warning,Msg,Meta) ->
+ do_log_1(error_logger:warning_map(),Msg,Meta);
+do_log(Level,Msg,Meta) ->
+ do_log_1(Level,Msg,Meta).
+
+do_log_1(Level,Msg,#{mfa:={Module,_,_}}=Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level,Module) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end;
+do_log_1(Level,Msg,Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end.
+
+-spec log_allowed(Location,Level,Msg,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Msg :: {msg_fun(),term()} |
+ {io:format(),[term()]} |
+ report() |
+ unicode:chardata(),
+ Meta :: metadata().
+log_allowed(Location,Level,{Fun,FunArgs},Meta) when is_function(Fun,1) ->
+ try Fun(FunArgs) of
+ Msg={Format,Args} when is_list(Format), is_list(Args) ->
+ log_allowed(Location,Level,Msg,Meta);
+ Report when ?IS_REPORT(Report) ->
+ log_allowed(Location,Level,Report,Meta);
+ String when ?IS_STRING(String) ->
+ log_allowed(Location,Level,String,Meta);
+ Other ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{Fun,FunArgs},Other]},
+ Meta)
+ catch C:R ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{Fun,FunArgs},{C,R}]},
+ Meta)
+ end;
+log_allowed(Location,Level,Msg,Meta0) when is_map(Meta0) ->
+ %% Metadata priorities are:
+ %% Location (added in API macros) - will be overwritten by process
+ %% metadata (set by set_process_metadata/1), which in turn will be
+ %% overwritten by the metadata given as argument in the log call
+ %% (function or macro).
+ Meta = add_default_metadata(
+ maps:merge(Location,maps:merge(proc_meta(),Meta0))),
+ case node(maps:get(gl,Meta)) of
+ Node when Node=/=node() ->
+ log_remote(Node,Level,Msg,Meta),
+ do_log_allowed(Level,Msg,Meta);
+ _ ->
+ do_log_allowed(Level,Msg,Meta)
+ end.
+
+do_log_allowed(Level,{Format,Args}=Msg,Meta)
+ when ?IS_LEVEL(Level),
+ is_list(Format),
+ is_list(Args),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>Msg,meta=>Meta},tid());
+do_log_allowed(Level,Report,Meta)
+ when ?IS_LEVEL(Level),
+ ?IS_REPORT(Report),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{report,Report},meta=>Meta},
+ tid());
+do_log_allowed(Level,String,Meta)
+ when ?IS_LEVEL(Level),
+ ?IS_STRING(String),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{string,String},meta=>Meta},
+ tid()).
+tid() ->
+ ets:whereis(?LOGGER_TABLE).
+
+log_remote(Node,Level,{Format,Args},Meta) ->
+ log_remote(Node,{log,Level,Format,Args,Meta});
+log_remote(Node,Level,Msg,Meta) ->
+ log_remote(Node,{log,Level,Msg,Meta}).
+
+log_remote(Node,Request) ->
+ {logger,Node} ! Request,
+ ok.
+
+add_default_metadata(Meta) ->
+ add_default_metadata([pid,gl,time],Meta).
+
+add_default_metadata([Key|Keys],Meta) ->
+ case maps:is_key(Key,Meta) of
+ true ->
+ add_default_metadata(Keys,Meta);
+ false ->
+ add_default_metadata(Keys,Meta#{Key=>default(Key)})
+ end;
+add_default_metadata([],Meta) ->
+ Meta.
+
+proc_meta() ->
+ case get_process_metadata() of
+ ProcMeta when is_map(ProcMeta) -> ProcMeta;
+ _ -> #{}
+ end.
+
+default(pid) -> self();
+default(gl) -> group_leader();
+default(time) -> erlang:monotonic_time(microsecond).
+
+%% Remove everything upto and including this module from the stacktrace
+filter_stacktrace(Module,[{Module,_,_,_}|_]) ->
+ [];
+filter_stacktrace(Module,[H|T]) ->
+ [H|filter_stacktrace(Module,T)];
+filter_stacktrace(_,[]) ->
+ [].
diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl
new file mode 100644
index 0000000000..d9f5aa6faf
--- /dev/null
+++ b/lib/kernel/src/logger_backend.erl
@@ -0,0 +1,133 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_backend).
+
+-export([log_allowed/2]).
+
+-include("logger_internal.hrl").
+
+-define(OWN_KEYS,[level,filters,filter_default,handlers]).
+
+%%%-----------------------------------------------------------------
+%%% The default logger backend
+log_allowed(Log, Tid) ->
+ {ok,Config} = logger_config:get(Tid,logger),
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(logger,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Handlers = maps:get(handlers,Config,[]),
+ call_handlers(Log1,Handlers,Tid)
+ end,
+ ok.
+
+call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) ->
+ case logger_config:get(Tid,Id,Level) of
+ {ok,{Module,Config}} ->
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(Id,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Config1 = maps:without(?OWN_KEYS,Config),
+ try Module:log(Log1,Config1)
+ catch C:R:S ->
+ case logger:remove_handler(Id) of
+ ok ->
+ logger:internal_log(
+ error,{removed_failing_handler,Id}),
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,removed_failing_handler},
+ {handler,{Id,Module}},
+ {log,Log1},
+ {config,Config1},
+ {reason,{C,R,filter_stacktrace(S)}}]);
+ {error,{not_found,_}} ->
+ %% Probably already removed by other client
+ %% Don't report again
+ ok;
+ {error,Reason} ->
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,remove_handler_failed},
+ {reason,Reason}])
+ end
+ end
+ end;
+ _ ->
+ ok
+ end,
+ call_handlers(Log,Handlers,Tid);
+call_handlers(_Log,[],_Tid) ->
+ ok.
+
+apply_filters(Owner,Log,Filters,Config) ->
+ case do_apply_filters(Owner,Log,Filters,ignore) of
+ stop ->
+ stop;
+ ignore ->
+ case maps:get(filter_default,Config) of
+ log ->
+ Log;
+ stop ->
+ stop
+ end;
+ Log1 ->
+ Log1
+ end.
+
+do_apply_filters(Owner,Log,[{_Id,{FilterFun,FilterArgs}}=Filter|Filters],State) ->
+ try FilterFun(Log,FilterArgs) of
+ stop ->
+ stop;
+ ignore ->
+ do_apply_filters(Owner,Log,Filters,State);
+ Log1=#{level:=Level,msg:=Msg,meta:=Meta}
+ when is_atom(Level), ?IS_MSG(Msg), is_map(Meta) ->
+ do_apply_filters(Owner,Log1,Filters,log);
+ Bad ->
+ handle_filter_failed(Filter,Owner,Log,{bad_return_value,Bad})
+ catch C:R:S ->
+ handle_filter_failed(Filter,Owner,Log,{C,R,filter_stacktrace(S)})
+ end;
+do_apply_filters(_Owner,_Log,[],ignore) ->
+ ignore;
+do_apply_filters(_Owner,Log,[],log) ->
+ Log.
+
+handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) ->
+ case logger_server:remove_filter(Owner,Id) of
+ ok ->
+ logger:internal_log(error,{removed_failing_filter,Id}),
+ ?LOG_INTERNAL(debug,
+ [{logger,removed_failing_filter},
+ {filter,Filter},
+ {owner,Owner},
+ {log,Log},
+ {reason,Reason}]);
+ _ ->
+ ok
+ end,
+ ignore.
+
+filter_stacktrace(Stacktrace) ->
+ logger:filter_stacktrace(?MODULE,Stacktrace).
diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl
new file mode 100644
index 0000000000..799aea9617
--- /dev/null
+++ b/lib/kernel/src/logger_config.erl
@@ -0,0 +1,151 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_config).
+
+-export([new/1,delete/2,
+ exist/2,
+ allow/2,allow/3,
+ get/2, get/3, get/1,
+ create/3, create/4, set/3,
+ set_module_level/3,reset_module_level/2,
+ cache_module_level/2,
+ level_to_int/1]).
+
+-include("logger_internal.hrl").
+
+new(Name) ->
+ _ = ets:new(Name,[set,protected,named_table]),
+ ets:whereis(Name).
+
+delete(Tid,Id) ->
+ ets:delete(Tid,table_key(Id)).
+
+allow(Tid,Level,Module) ->
+ LevelInt = level_to_int(Level),
+ case ets:lookup(Tid,Module) of
+ [{Module,{ModLevel,cached}}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [{Module,ModLevel}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [] ->
+ logger_server:cache_module_level(Module),
+ allow(Tid,Level);
+ _ ->
+ false
+ end.
+
+allow(Tid,Level) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?LOGGER_KEY,2),
+ level_to_int(Level) =< GlobalLevelInt.
+
+exist(Tid,What) ->
+ ets:member(Tid,table_key(What)).
+
+get(Tid,What) ->
+ case ets:lookup(Tid,table_key(What)) of
+ [{_,_,Config}] ->
+ {ok,Config};
+ [{_,_,Config,Module}] ->
+ {ok,{Module,Config}};
+ [] ->
+ {error,{not_found,What}}
+ end.
+
+get(Tid,What,Level) ->
+ MS = [{{table_key(What),'$1','$2'}, % logger config
+ [{'>=','$1',level_to_int(Level)}],
+ ['$2']},
+ {{table_key(What),'$1','$2','$3'}, % handler config
+ [{'>=','$1',level_to_int(Level)}],
+ [{{'$3','$2'}}]}],
+ case ets:select(Tid,MS) of
+ [] -> error;
+ [Data] -> {ok,Data}
+ end.
+
+create(Tid,What,Module,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ ets:insert(Tid,{table_key(What),LevelInt,Config,Module}).
+create(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ ets:insert(Tid,{table_key(What),LevelInt,Config}).
+
+set(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ %% Should do this only if the level has actually changed. Possibly
+ %% overwrite instead of delete?
+ case What of
+ logger ->
+ _ = ets:select_delete(Tid,[{{'_',{'$1',cached}},
+ [{'=/=','$1',LevelInt}],
+ [true]}]),
+ ok;
+ _ ->
+ ok
+ end,
+ ets:update_element(Tid,table_key(What),[{2,LevelInt},{3,Config}]),
+ ok.
+
+set_module_level(Tid,Module,Level) ->
+ ets:insert(Tid,{Module,level_to_int(Level)}),
+ ok.
+
+reset_module_level(Tid,Module) ->
+ ets:delete(Tid,Module), % should possibley overwrite instead of delete?
+ ok.
+
+cache_module_level(Tid,Module) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?LOGGER_KEY,2),
+ ets:insert_new(Tid,{Module,{GlobalLevelInt,cached}}),
+ ok.
+
+get(Tid) ->
+ {ok,Logger} = get(Tid,logger),
+ HMS = [{{table_key('$1'),'_','$2','$3'},[],[{{'$1','$3','$2'}}]}],
+ Handlers = ets:select(Tid,HMS),
+ MMS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[{{'$1','$2'}}]}],
+ Modules = ets:select(Tid,MMS),
+ {Logger,Handlers,[{M,int_to_level(L)} || {M,L} <- Modules]}.
+
+level_to_int(emergency) -> ?EMERGENCY;
+level_to_int(alert) -> ?ALERT;
+level_to_int(critical) -> ?CRITICAL;
+level_to_int(error) -> ?ERROR;
+level_to_int(warning) -> ?WARNING;
+level_to_int(notice) -> ?NOTICE;
+level_to_int(info) -> ?INFO;
+level_to_int(debug) -> ?DEBUG.
+
+int_to_level(?EMERGENCY) -> emergency;
+int_to_level(?ALERT) -> alert;
+int_to_level(?CRITICAL) -> critical;
+int_to_level(?ERROR) -> error;
+int_to_level(?WARNING) -> warning;
+int_to_level(?NOTICE) -> notice;
+int_to_level(?INFO) -> info;
+int_to_level(?DEBUG) -> debug.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+table_key(logger) -> ?LOGGER_KEY;
+table_key(HandlerId) -> {?HANDLER_KEY,HandlerId}.
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
new file mode 100644
index 0000000000..3b71f936d8
--- /dev/null
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -0,0 +1,694 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_disk_log_h).
+
+-behaviour(gen_server).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+%%% API
+-export([start_link/3, info/1, disk_log_sync/1, reset/1]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2,
+ adding_handler/2, removing_handler/1,
+ changing_config/3, swap_buffer/2]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Start a disk_log handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added (as a result of calling add/3).
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+-spec disk_log_sync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+disk_log_sync(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, disk_log_sync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+disk_log_sync(Name) ->
+ {error,{badarg,{disk_log_sync,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec info(Name) -> Info | {error,Reason} when
+ Name :: atom(),
+ Info :: term(),
+ Reason :: handler_busy | {badarg,term()}.
+
+info(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec reset(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+reset(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(Name, Config) ->
+ case check_config(adding, Name, Config) of
+ {ok, Config1} ->
+ %% create initial handler state by merging defaults with config
+ HConfig = maps:get(?MODULE, Config1, #{}),
+ HState = maps:merge(get_init_state(), HConfig),
+ case logger_h_common:overload_levels_ok(HState) of
+ true ->
+ case start(Name, Config1, HState) of
+ ok ->
+ %% Make sure wait_for_buffer is not stored, so we
+ %% won't hang and wait for buffer on a restart
+ {ok, maps:remove(wait_for_buffer,Config1)};
+ Error ->
+ Error
+ end;
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = HState,
+ {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(Name,
+ OldConfig=#{id:=Id, disk_log_opts:=DLOpts},
+ NewConfig=#{id:=Id, disk_log_opts:=DLOpts}) ->
+ case check_config(changing, Name, NewConfig) of
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(Name, {change_config,OldConfig,NewConfig1},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> Result;
+ HError -> HError
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ Error ->
+ Error
+ end;
+changing_config(_Name, OldConfig, NewConfig) ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}.
+
+check_config(adding, Name, Config0) ->
+ %% Merge in defaults on top level
+ Config = maps:merge(#{id => Name}, Config0),
+ %% Merge in defaults on handler level
+ LogOpts0 = maps:get(disk_log_opts, Config, #{}),
+ LogOpts = merge_default_logopts(Name, LogOpts0),
+ case check_log_opts(maps:to_list(LogOpts)) of
+ ok ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok ->
+ {ok,Config#{disk_log_opts=>LogOpts,
+ ?MODULE=>MyConfig}};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+check_config(changing, _Name, Config) ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok -> {ok,Config};
+ Error -> Error
+ end.
+
+merge_default_logopts(Name, LogOpts) ->
+ Type = maps:get(type, LogOpts, wrap),
+ {DefaultNoFiles,DefaultNoBytes} =
+ case Type of
+ halt -> {undefined,infinity};
+ _wrap -> {10,1048576}
+ end,
+ {ok,Dir} = file:get_cwd(),
+ Default = #{file => filename:join(Dir,Name),
+ max_no_files => DefaultNoFiles,
+ max_no_bytes => DefaultNoBytes,
+ type => Type},
+ maps:merge(Default,LogOpts).
+
+check_log_opts([{file,File}|Opts]) when is_list(File) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_files,undefined}|Opts]) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_files,N}|Opts]) when is_integer(N), N>0 ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_bytes,infinity}|Opts]) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_bytes,N}|Opts]) when is_integer(N), N>0 ->
+ check_log_opts(Opts);
+check_log_opts([{type,Type}|Opts]) when Type==wrap; Type==halt ->
+ check_log_opts(Opts);
+check_log_opts([Invalid|_]) ->
+ {error,{invalid_config,disk_log_opt,Invalid}};
+check_log_opts([]) ->
+ ok.
+
+check_my_config([Other | Config]) ->
+ case logger_h_common:check_common_config(Other) of
+ valid ->
+ check_my_config(Config);
+ invalid ->
+ {error,{invalid_config,?MODULE,Other}}
+ end;
+check_my_config([]) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Name) ->
+ stop(Name).
+
+%%%-----------------------------------------------------------------
+%%% Get buffer when swapping from simple handler
+swap_buffer(Name,Buffer) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ Name ! {buffer,Buffer}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(Log, Config) -> ok | dropped when
+ Log :: logger:log(),
+ Config :: logger:config().
+
+log(Log,Config=#{id:=Name}) ->
+ %% if the handler has crashed, we must drop this request
+ %% and hope the handler restarts so we can try again
+ true = is_pid(whereis(Name)),
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ logger_h_common:call_cast_or_drop(Name, Bin).
+
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name, Config = #{disk_log_opts := LogOpts},
+ State = #{dl_sync_int := DLSyncInt}]) ->
+ register(Name, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case open_disk_log(Name, LogOpts) of
+ ok ->
+ catch ets:new(Name, [public, named_table]),
+ ?set_mode(Name, async),
+ proc_lib:init_ack({ok,self()}),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{id => Name,
+ mode => async,
+ dl_sync => DLSyncInt,
+ log_opts => LogOpts,
+ last_qlen => 0,
+ last_log_ts => T0,
+ burst_win_ts => T0,
+ burst_msg_count => 0,
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}),
+ gen_server:cast(self(), {repeated_disk_log_sync,T0}),
+ enter_loop(Config, State1);
+ Error ->
+ logger_h_common:error_notify({open_disk_log,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+enter_loop(#{wait_for_buffer:=true}=Config,State) ->
+ State1 =
+ receive
+ {buffer,Buffer} ->
+ lists:foldl(
+ fun(Log,S) ->
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ {_,S1} = do_log(Bin,cast,S),
+ S1
+ end,
+ State,
+ Buffer)
+ end,
+ gen_server:enter_loop(?MODULE,[],State1);
+enter_loop(_Config,State) ->
+ gen_server:enter_loop(?MODULE,[],State).
+
+%% This is the synchronous log request.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply, Result, State1};
+
+handle_call(disk_log_sync, _From, State = #{id := Name}) ->
+ State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State),
+ {reply, Result, State1};
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0,
+ last_log_ts := LastLogTS}) ->
+ HConfig = maps:get(?MODULE, NewConfig, #{}),
+ State1 = #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = maps:merge(State, HConfig),
+ case logger_h_common:overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ _ = gen_server:cast(self(), {repeated_disk_log_sync,
+ LastLogTS})
+ end,
+ {reply, ok, State1};
+ false ->
+ {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp(),
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+
+%% This is the asynchronous log request.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% request, the repeat operation must be active!
+handle_cast({repeated_disk_log_sync,LastLogTS0},
+ State = #{id := Name,
+ filesync_repeat_interval := FSyncInt,
+ last_log_ts := LastLogTS1}) ->
+ State1 =
+ if is_integer(FSyncInt) ->
+ %% only do filesync if something has been
+ %% written since last time we checked
+ NewState = if LastLogTS1 == LastLogTS0 ->
+ State;
+ true ->
+ disk_log_sync(Name, State)
+ end,
+ {ok,TRef} =
+ timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),
+ {repeated_disk_log_sync,LastLogTS1}]),
+ NewState#{rep_sync_tref => TRef};
+ true ->
+ State
+ end,
+ {noreply,State1}.
+
+%% The disk log owner must handle status messages from disk_log.
+handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) ->
+ {noreply, State};
+handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+handle_info({disk_log, _Node, Log, full},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}),
+ {noreply, State#{prev_disk_log_info => full}};
+handle_info({disk_log, _Node, Log, Info = {error_status,_Status}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+
+handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) ->
+ {noreply, State};
+
+handle_info(_, State) ->
+ {noreply, State}.
+
+terminate(Reason, State = #{id := Name}) ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
+ undefined)),
+ _ = close_disk_log(Name, normal),
+ logger_h_common:stop_or_restart(Name, Reason, State).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+%%%-----------------------------------------------------------------
+%%%
+get_init_state() ->
+ #{toggle_sync_qlen => ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen => ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen => ?FLUSH_REQS_QLEN,
+ enable_burst_limit => ?ENABLE_BURST_LIMIT,
+ burst_limit_size => ?BURST_LIMIT_SIZE,
+ burst_window_time => ?BURST_WINDOW_TIME,
+ enable_kill_overloaded => ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen => ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem => ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after => ?HANDLER_RESTART_AFTER,
+ dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+%%%-----------------------------------------------------------------
+%%% Add a disk_log handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Config is the logger:config() map containing a sub map with any of
+%%% the following associations:
+%%%
+%%% Config = #{disk_log_opts => #{file => file:filename(),
+%%% max_no_bytes => integer(),
+%%% max_no_files => integer(),
+%%% type => wrap | halt}}.
+%%%
+%%% This map will be merged with the logger configuration data for
+%%% the disk_log LogName. If type == halt, then max_no_files is
+%%% ignored.
+%%%
+%%% Handler specific config should be provided with a sub map associated
+%%% with a key named the same as this module, e.g:
+%%%
+%%% Config = #{logger_disk_log_h => #{toggle_sync_qlen => 50}
+%%%
+%%% The disk_log handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ LoggerDLH =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, LoggerDLH) of
+ {ok,_} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Stop and remove the handler.
+stop(Name) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ _ = gen_server:call(Name,stop),
+ _ = supervisor:delete_child(logger_sup, Name),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logging and overload control.
+-define(update_dl_sync(C, Interval),
+ if C == 0 -> Interval;
+ true -> C-1 end).
+
+%% check for overload between every request (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize requests have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name, mode := _Mode0}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% request to react quickly to large bursts of requests and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
+
+ %% kill the handler if it can't keep up with the load
+ logger_h_common:kill_if_choked(Name, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this function is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ _NewFlushed = logger_h_common:flush_log_requests(?FLUSH_MAX_N),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log request
+ ?observe(Name,{flushed,_NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,_NewFlushed,
+ State1#{mode => ?set_mode(Name,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this function is called to write to disk_log
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{dl_sync := DLSync,
+ dl_sync_int := DLSyncInt,
+ last_qlen := LastQLen,
+ last_log_ts := T0}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log requests
+ {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
+
+ %% only send a synhrounous request to the disk_log process
+ %% every DLSyncInt time, to give the handler time between
+ %% writes so it can keep up with incoming messages
+ {Status,LastQLen1,State1} =
+ if DoWrite, DLSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ NewState = disk_log_write(Name, Bin, State),
+ {ok, element(2,process_info(self(),message_queue_len)),
+ NewState};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ NewState = disk_log_write(Name, Bin, State),
+ {ok, LastQLen, NewState};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped, LastQLen, State}
+ end,
+
+ %% Check if the time since the previous log request is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log requests is fast and no new
+ %% request comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1,State2} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ {?change_mode(Name,Mode,async), 0, disk_log_sync(Name,State1)};
+ true ->
+ {Mode, BurstMsgCount,State1}
+ end,
+
+ State3 =
+ ?update_calls_or_casts(_CallOrCast,1,State2),
+ State4 =
+ ?update_max_time(Time,
+ State3#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}),
+ {Status,State4}.
+
+
+open_disk_log(Name, LogOpts) ->
+ #{file := File,
+ max_no_bytes := MaxNoBytes,
+ max_no_files := MaxNoFiles,
+ type := Type} = LogOpts,
+ case filelib:ensure_dir(File) of
+ ok ->
+ Size =
+ if Type == halt -> MaxNoBytes;
+ Type == wrap -> {MaxNoBytes,MaxNoFiles}
+ end,
+ Opts = [{name, Name},
+ {file, File},
+ {size, Size},
+ {type, Type},
+ {linkto, self()},
+ {repair, false},
+ {format, external},
+ {notify, true},
+ {quiet, true},
+ {mode, read_write}],
+ case disk_log:open(Opts) of
+ {ok,Name} ->
+ ok;
+ Error = {error,_Reason} ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+close_disk_log(Name, _) ->
+ _ = ?disk_log_sync(Name),
+ _ = disk_log:lclose(Name),
+ ok.
+
+disk_log_write(Name, Bin, State) ->
+ Result =
+ case ?disk_log_blog(Name, Bin) of
+ ok ->
+ ok;
+ LogError ->
+ _ = case maps:get(prev_log_result, State) of
+ LogError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,log,
+ LogOpts,
+ LogError})
+ end,
+ LogError
+ end,
+ State#{prev_log_result => Result}.
+
+disk_log_sync(Name, State) ->
+ Result =
+ case ?disk_log_sync(Name) of
+ ok ->
+ ok;
+ SyncError ->
+ _ = case maps:get(prev_sync_result, State) of
+ SyncError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,sync,
+ LogOpts,
+ SyncError})
+ end,
+ SyncError
+ end,
+ State#{prev_sync_result => Result}.
+
+error_notify_new(Info,Info, _Term) ->
+ ok;
+error_notify_new(_Info0,_Info1, Term) ->
+ logger_h_common:error_notify(Term).
diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl
new file mode 100644
index 0000000000..85928f0fd6
--- /dev/null
+++ b/lib/kernel/src/logger_filters.erl
@@ -0,0 +1,123 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters).
+
+-export([domain/2,
+ level/2,
+ progress/2,
+ remote_gl/2]).
+
+-include("logger_internal.hrl").
+-define(IS_ACTION(A), (A==log orelse A==stop)).
+
+-spec domain(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: {Action,Compare,MatchDomain},
+ Action :: log | stop,
+ Compare :: prefix_of | starts_with | equals | no_domain,
+ MatchDomain :: list(atom()).
+domain(#{meta:=Meta}=Log,{Action,Compare,MatchDomain})
+ when ?IS_ACTION(Action) andalso
+ (Compare==prefix_of orelse
+ Compare==starts_with orelse
+ Compare==equals orelse
+ Compare==no_domain) andalso
+ is_list(MatchDomain) ->
+ filter_domain(Compare,Meta,MatchDomain,on_match(Action,Log));
+domain(Log,Extra) ->
+ erlang:error(badarg,[Log,Extra]).
+
+-spec level(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: {Action,Operator,MatchLevel},
+ Action :: log | stop,
+ Operator :: neq | eq | lt | gt | lteq | gteq,
+ MatchLevel :: logger:level().
+level(#{level:=L1}=Log,{Action,Op,L2})
+ when ?IS_ACTION(Action) andalso
+ (Op==neq orelse
+ Op==eq orelse
+ Op==lt orelse
+ Op==gt orelse
+ Op==lteq orelse
+ Op==gteq) andalso
+ ?IS_LEVEL(L2) ->
+ filter_level(Op,L1,L2,on_match(Action,Log));
+level(Log,Extra) ->
+ erlang:error(badarg,[Log,Extra]).
+
+-spec progress(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: log | stop.
+progress(Log,Action) when ?IS_ACTION(Action) ->
+ filter_progress(Log,on_match(Action,Log));
+progress(Log,Action) ->
+ erlang:error(badarg,[Log,Action]).
+
+-spec remote_gl(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: log | stop.
+remote_gl(Log,Action) when ?IS_ACTION(Action) ->
+ filter_remote_gl(Log,on_match(Action,Log));
+remote_gl(Log,Action) ->
+ erlang:error(badarg,[Log,Action]).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+filter_domain(prefix_of,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(Domain,MatchDomain,OnMatch);
+filter_domain(starts_with,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(MatchDomain,Domain,OnMatch);
+filter_domain(equals,#{domain:=Domain},Domain,OnMatch) ->
+ OnMatch;
+filter_domain(Action,Meta,_,OnMatch) ->
+ case maps:is_key(domain,Meta) of
+ false when Action==no_domain -> OnMatch;
+ _ -> ignore
+ end.
+
+is_prefix(D1,D2,OnMatch) when is_list(D1), is_list(D2) ->
+ case lists:prefix(D1,D2) of
+ true -> OnMatch;
+ false -> ignore
+ end;
+is_prefix(_,_,_) ->
+ ignore.
+
+filter_level(Op,L1,L2,OnMatch) ->
+ case logger:compare_levels(L1,L2) of
+ eq when Op==eq; Op==lteq; Op==gteq -> OnMatch;
+ lt when Op==lt; Op==lteq; Op==neq -> OnMatch;
+ gt when Op==gt; Op==gteq; Op==neq -> OnMatch;
+ _ -> ignore
+ end.
+
+filter_progress(#{msg:={report,#{label:={_,progress}}}},OnMatch) ->
+ OnMatch;
+filter_progress(_,_) ->
+ ignore.
+
+filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() ->
+ OnMatch;
+filter_remote_gl(_,_) ->
+ ignore.
+
+on_match(log,Log) -> Log;
+on_match(stop,_) -> stop.
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
new file mode 100644
index 0000000000..386e7832e2
--- /dev/null
+++ b/lib/kernel/src/logger_formatter.erl
@@ -0,0 +1,295 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter).
+
+-export([format/2]).
+
+-include("logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type template() :: [atom()|tuple()|string()].
+
+%%%-----------------------------------------------------------------
+%%% API
+-spec format(Log,Config) -> String when
+ Log :: logger:log(),
+ Config :: #{single_line=>boolean(),
+ legacy_header=>boolean(),
+ report_cb=>fun((logger:report()) -> {io:format(),[term()]}),
+ chars_limit=>pos_integer()| unlimited,
+ max_size=>pos_integer() | unlimited,
+ depth=>pos_integer() | unlimited,
+ template=>template(),
+ utc=>boolean()},
+ String :: string().
+format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
+ when is_map(Config0) ->
+ Config = add_default_config(Config0),
+ Meta1 = maybe_add_legacy_header(Level,Meta,Config),
+ Template = maps:get(template,Config),
+ {BT,AT0} = lists:splitwith(fun(msg) -> false; (_) -> true end, Template),
+ {DoMsg,AT} =
+ case AT0 of
+ [msg|Rest] -> {true,Rest};
+ _ ->{false,AT0}
+ end,
+ B = do_format(Level,"",Meta1,BT,Config),
+ A = do_format(Level,"",Meta1,AT,Config),
+ MsgStr =
+ if DoMsg ->
+ Config1 =
+ case maps:get(chars_limit,Config) of
+ unlimited ->
+ Config;
+ Size0 ->
+ Size =
+ case Size0 - string:length([B,A]) of
+ S when S>=0 -> S;
+ _ -> 0
+ end,
+ Config#{chars_limit=>Size}
+ end,
+ MsgStr0 = format_msg(Msg0,Meta1,Config1),
+ case maps:get(single_line,Config) of
+ true ->
+ %% Trim leading and trailing whitespaces, and replace
+ %% newlines with ", "
+ re:replace(string:trim(MsgStr0),",?\r?\n\s*",", ",
+ [{return,list},global]);
+ _false ->
+ MsgStr0
+ end;
+ true ->
+ ""
+ end,
+ truncate(B ++ MsgStr ++ A,maps:get(max_size,Config)).
+
+do_format(Level,Msg,Data,[level|Format],Config) ->
+ [to_string(level,Level,Config)|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[msg|Format],Config) ->
+ [Msg|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[Key|Format],Config) when is_atom(Key); is_tuple(Key) ->
+ Value = value(Key,Data),
+ [to_string(Key,Value,Config)|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[Str|Format],Config) ->
+ [Str|do_format(Level,Msg,Data,Format,Config)];
+do_format(_Level,_Msg,_Data,[],_Config) ->
+ [].
+
+value(Key,Meta) when is_atom(Key), is_map(Meta) ->
+ maps:get(Key,Meta,"");
+value(Key,_) when is_atom(Key) ->
+ "";
+value(Keys,Meta) when is_tuple(Keys) ->
+ value(tuple_to_list(Keys),Meta);
+value([Key|Keys],Meta) ->
+ value(Keys,value(Key,Meta));
+value([],Value) ->
+ Value.
+
+to_string(time,Time,Config) ->
+ format_time(Time,Config);
+to_string(mfa,MFA,_Config) ->
+ format_mfa(MFA);
+to_string(_,Value,_Config) ->
+ to_string(Value).
+
+to_string(X) when is_atom(X) ->
+ atom_to_list(X);
+to_string(X) when is_integer(X) ->
+ integer_to_list(X);
+to_string(X) when is_pid(X) ->
+ pid_to_list(X);
+to_string(X) when is_reference(X) ->
+ ref_to_list(X);
+to_string(X) when is_list(X) ->
+ case io_lib:printable_unicode_list(lists:flatten(X)) of
+ true -> X;
+ _ -> io_lib:format("~tp",[X])
+ end;
+to_string(X) ->
+ io_lib:format("~tp",[X]).
+
+format_msg({string,Chardata},Meta,Config) ->
+ try unicode:characters_to_list(Chardata)
+ catch _:_ -> format_msg({"INVALID STRING: ~tp",[Chardata]},Meta,Config)
+ end;
+format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config) when is_function(Fun,1) ->
+ format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config));
+format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) ->
+ try Fun(Report) of
+ {Format,Args} when is_list(Format), is_list(Args) ->
+ format_msg({Format,Args},maps:remove(report_cb,Meta),Config);
+ Other ->
+ format_msg({"REPORT_CB ERROR: ~tp; Returned: ~tp",
+ [Report,Other]},Meta,Config)
+ catch C:R ->
+ format_msg({"REPORT_CB CRASH: ~tp; Reason: ~tp",
+ [Report,{C,R}]},Meta,Config)
+ end;
+format_msg({report,Report},Meta,Config) ->
+ format_msg({report,Report},
+ Meta#{report_cb=>fun logger:format_report/1},
+ Config);
+format_msg(Msg,_Meta,#{depth:=Depth,chars_limit:=CharsLimit}) ->
+ limit_size(Msg, Depth, CharsLimit).
+
+limit_size(Msg,Depth,unlimited) ->
+ limit_size(Msg,Depth,[]);
+limit_size(Msg,Depth,CharsLimit) when is_integer(CharsLimit) ->
+ limit_size(Msg,Depth,[{chars_limit,CharsLimit}]);
+limit_size({Format,Args},unlimited,Opts) when is_list(Opts) ->
+ try io_lib:format(Format,Args,Opts)
+ catch _:_ ->
+ io_lib:format("FORMAT ERROR: ~tp - ~tp",[Format,Args],Opts)
+ end;
+limit_size({Format0,Args},Depth,Opts) when is_integer(Depth) ->
+ try
+ Format1 = io_lib:scan_format(Format0, Args),
+ Format = limit_format(Format1, Depth),
+ io_lib:build_text(Format,Opts)
+ catch _:_ ->
+ limit_size({"FORMAT ERROR: ~tp - ~tp",[Format0,Args]},Depth,Opts)
+ end.
+
+limit_format([#{control_char:=C0}=M0|T], Depth) when C0 =:= $p;
+ C0 =:= $w ->
+ C = C0 - ($a - $A), %To uppercase.
+ #{args:=Args} = M0,
+ M = M0#{control_char:=C,args:=Args++[Depth]},
+ [M|limit_format(T, Depth)];
+limit_format([H|T], Depth) ->
+ [H|limit_format(T, Depth)];
+limit_format([], _) ->
+ [].
+
+truncate(String,unlimited) ->
+ String;
+truncate(String,Size) ->
+ Length = string:length(String),
+ if Length>Size ->
+ case lists:reverse(lists:flatten(String)) of
+ [$\n|_] ->
+ string:slice(String,0,Size-4)++"...\n";
+ _ ->
+ string:slice(String,0,Size-3)++"..."
+ end;
+ true ->
+ String
+ end.
+
+format_time(Timestamp,Config) when is_integer(Timestamp) ->
+ {Date,Time,Micro} = timestamp_to_datetimemicro(Timestamp,Config),
+ format_time(Date,Time,Micro);
+format_time(Other,_Config) ->
+ %% E.g. a string
+ to_string(Other).
+
+format_time({Y,M,D},{H,Min,S},Micro) ->
+ io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w",
+ [Y,M,D,H,Min,S,Micro]).
+
+%% Assuming this is monotonic time in microseconds
+timestamp_to_datetimemicro(Timestamp,Config) when is_integer(Timestamp) ->
+ SysTime = Timestamp + erlang:time_offset(microsecond),
+ Micro = SysTime rem 1000000,
+ Sec = SysTime div 1000000,
+ UniversalTime = erlang:posixtime_to_universaltime(Sec),
+ {Date,Time} =
+ case Config of
+ #{utc:=true} -> UniversalTime;
+ _ -> erlang:universaltime_to_localtime(UniversalTime)
+ end,
+ {Date,Time,Micro}.
+
+format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_integer(A) ->
+ atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A);
+format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_list(A) ->
+ format_mfa({M,F,length(A)});
+format_mfa(MFA) ->
+ to_string(MFA).
+
+maybe_add_legacy_header(Level,
+ #{time:=Timestamp}=Meta,
+ #{legacy_header:=true}=Config) ->
+ #{title:=Title}=MyMeta = add_legacy_title(Level,maps:get(?MODULE,Meta,#{})),
+ {{Y,Mo,D},{H,Mi,S},Micro} = timestamp_to_datetimemicro(Timestamp,Config),
+ Header = io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===",
+ [Title,D,month(Mo),Y,H,Mi,S,Micro,utcstr(Config)]),
+ Meta#{?MODULE=>MyMeta#{header=>Header}};
+maybe_add_legacy_header(_,Meta,_) ->
+ Meta.
+
+add_legacy_title(_Level,#{title:=_}=MyMeta) ->
+ MyMeta;
+add_legacy_title(Level,MyMeta) ->
+ Title = string:uppercase(atom_to_list(Level)) ++ " REPORT",
+ MyMeta#{title=>Title}.
+
+month(1) -> "Jan";
+month(2) -> "Feb";
+month(3) -> "Mar";
+month(4) -> "Apr";
+month(5) -> "May";
+month(6) -> "Jun";
+month(7) -> "Jul";
+month(8) -> "Aug";
+month(9) -> "Sep";
+month(10) -> "Oct";
+month(11) -> "Nov";
+month(12) -> "Dec".
+
+utcstr(#{utc:=true}) -> "UTC ";
+utcstr(_) -> "".
+
+add_default_config(#{utc:=_}=Config0) ->
+ Default =
+ #{legacy_header=>false,
+ single_line=>false,
+ chars_limit=>unlimited},
+ MaxSize = get_max_size(maps:get(max_size,Config0,false)),
+ Depth = get_depth(maps:get(depth,Config0,false)),
+ add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize,
+ depth=>Depth}));
+add_default_config(Config) ->
+ add_default_config(Config#{utc=>logger:get_utc_config()}).
+
+add_default_template(#{template:=_}=Config) ->
+ Config;
+add_default_template(Config) ->
+ Config#{template=>default_template(Config)}.
+
+default_template(#{legacy_header:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_HEADER;
+default_template(#{single_line:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_SINGLE;
+default_template(_) ->
+ ?DEFAULT_FORMAT_TEMPLATE.
+
+get_max_size(false) ->
+ logger:get_max_size();
+get_max_size(S) ->
+ max(10,S).
+
+get_depth(false) ->
+ logger:get_format_depth();
+get_depth(S) ->
+ max(5,S).
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
new file mode 100644
index 0000000000..7caad366ae
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.erl
@@ -0,0 +1,301 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_h_common).
+
+-include("logger_h_common.hrl").
+-include("logger_internal.hrl").
+
+-export([log_to_binary/2,
+ check_common_config/1,
+ call_cast_or_drop/2,
+ check_load/1,
+ limit_burst/1,
+ kill_if_choked/4,
+ flush_log_requests/0,
+ flush_log_requests/1,
+ handler_exit/2,
+ cancel_timer/1,
+ stop_or_restart/3,
+ overload_levels_ok/1,
+ error_notify/1,
+ info_notify/1]).
+
+%%%-----------------------------------------------------------------
+%%% Covert log data on any form to binary
+-spec log_to_binary(Log,Config) -> LogString when
+ Log :: logger:log(),
+ Config :: logger:config(),
+ LogString :: binary().
+log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) ->
+ do_log_to_binary(Log,Config);
+log_to_binary(#{msg:={report,_},meta:=Meta}=Log,Config) ->
+ DefaultReportCb = fun logger:format_otp_report/1,
+ do_log_to_binary(Log#{meta=>Meta#{report_cb=>DefaultReportCb}},Config);
+log_to_binary(Log,Config) ->
+ do_log_to_binary(Log,Config).
+
+do_log_to_binary(Log,Config) ->
+ {Formatter,FormatterConfig} = maps:get(formatter,Config),
+ String = try_format(Log,Formatter,FormatterConfig),
+ try unicode:characters_to_binary(String)
+ catch _:_ ->
+ ?LOG_INTERNAL(debug,[{formatter_error,Formatter},
+ {config,FormatterConfig},
+ {log,Log},
+ {bad_return_value,String}]),
+ <<"FORMATTER ERROR: bad_return_value">>
+ end.
+
+try_format(Log,Formatter,FormatterConfig) ->
+ try Formatter:format(Log,FormatterConfig)
+ catch
+ C:R:S ->
+ ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter},
+ {config,FormatterConfig},
+ {log,Log},
+ {reason,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}}]),
+ case {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG} of
+ {Formatter,FormatterConfig} ->
+ "DEFAULT FORMATTER CRASHED";
+ {DefaultFormatter,DefaultConfig} ->
+ try_format(Log#{msg=>{"FORMATTER CRASH: ~tp",
+ [maps:get(msg,Log)]}},
+ DefaultFormatter,DefaultConfig)
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Check that the configuration term is valid
+check_common_config({toggle_sync_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({drop_new_reqs_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({flush_reqs_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({enable_burst_limit,Bool}) when Bool == true;
+ Bool == false ->
+ valid;
+check_common_config({burst_limit_size,N}) when is_integer(N) ->
+ valid;
+check_common_config({burst_window_time,N}) when is_integer(N) ->
+ valid;
+check_common_config({enable_kill_overloaded,Bool}) when Bool == true;
+ Bool == false ->
+ valid;
+check_common_config({handler_overloaded_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({handler_overloaded_mem,N}) when is_integer(N) ->
+ valid;
+check_common_config({handler_restart_after,NorA}) when is_integer(NorA);
+ NorA == never ->
+ valid;
+check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA);
+ NorA == no_repeat ->
+ valid;
+check_common_config(_) ->
+ invalid.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload Protection
+call_cast_or_drop(Name, Bin) ->
+ %% If the handler process is getting overloaded, the log request
+ %% will be synchronous instead of asynchronous (slows down the
+ %% logging tempo of a process doing lots of logging. If the
+ %% handler is choked, drop mode is set and no request will be sent.
+ try ?get_mode(Name) of
+ async ->
+ gen_server:cast(Name, {log,Bin});
+ sync ->
+ try gen_server:call(Name, {log,Bin}, ?DEFAULT_CALL_TIMEOUT) of
+ %% if return value from call == dropped, the
+ %% message has been flushed by handler and should
+ %% therefore not be counted as dropped in stats
+ ok -> ok;
+ dropped -> ok
+ catch
+ _:{timeout,_} ->
+ ?observe(Name,{dropped,1})
+ end;
+ drop -> ?observe(Name,{dropped,1})
+ catch
+ %% if the ETS table doesn't exist (maybe because of a
+ %% handler restart), we can only drop the request
+ _:_ -> ?observe(Name,{dropped,1})
+ end,
+ ok.
+
+handler_exit(_Name, Reason) ->
+ exit(Reason).
+
+check_load(State = #{id:=Name, mode := Mode,
+ toggle_sync_qlen := ToggleSyncQLen,
+ drop_new_reqs_qlen := DropNewQLen,
+ flush_reqs_qlen := FlushQLen}) ->
+ {_,Mem} = process_info(self(), memory),
+ ?observe(Name,{max_mem,Mem}),
+ %% make sure the handler process doesn't get scheduled
+ %% out between the message_queue_len check below and the
+ %% action that follows (flush or write).
+ {_,QLen} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,QLen}),
+
+ {Mode1,_NewDrops,_NewFlushes} =
+ if
+ QLen >= FlushQLen ->
+ {flush, 0,1};
+ QLen >= DropNewQLen ->
+ %% Note that drop mode will force log requests to
+ %% be dropped on the client side (never sent get to
+ %% the handler).
+ IncDrops = if Mode == drop -> 0; true -> 1 end,
+ {?change_mode(Name, Mode, drop), IncDrops,0};
+ QLen >= ToggleSyncQLen ->
+ {?change_mode(Name, Mode, sync), 0,0};
+ true ->
+ {?change_mode(Name, Mode, async), 0,0}
+ end,
+ State1 = ?update_other(drops,DROPS,_NewDrops,State),
+ {Mode1, QLen, Mem,
+ ?update_other(flushes,FLUSHES,_NewFlushes,
+ State1#{last_qlen => QLen})}.
+
+limit_burst(#{enable_burst_limit := false}) ->
+ {true,0,0};
+limit_burst(#{burst_win_ts := BurstWinT0,
+ burst_msg_count := BurstMsgCount,
+ burst_window_time := BurstWinTime,
+ burst_limit_size := BurstLimitSz}) ->
+ if (BurstMsgCount >= BurstLimitSz) ->
+ %% the limit for allowed messages has been reached
+ BurstWinT1 = ?timestamp(),
+ case ?diff_time(BurstWinT1,BurstWinT0) of
+ BurstCheckTime when BurstCheckTime < (BurstWinTime*1000) ->
+ %% we're still within the burst time frame
+ {false,BurstWinT0,BurstMsgCount};
+ _BurstCheckTime ->
+ %% burst time frame passed, reset counters
+ {true,BurstWinT1,0}
+ end;
+ true ->
+ %% the limit for allowed messages not yet reached
+ {true,BurstWinT0,BurstMsgCount+1}
+ end.
+
+kill_if_choked(Name, QLen, Mem,
+ #{enable_kill_overloaded := KillIfOL,
+ handler_overloaded_qlen := HOLQLen,
+ handler_overloaded_mem := HOLMem}) ->
+ if KillIfOL andalso
+ ((QLen > HOLQLen) orelse (Mem > HOLMem)) ->
+ handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}});
+ true ->
+ ok
+ end.
+
+flush_log_requests() ->
+ flush_log_requests(-1).
+
+flush_log_requests(Limit) ->
+ process_flag(priority, high),
+ Flushed = flush_log_requests(0, Limit),
+ process_flag(priority, normal),
+ Flushed.
+
+flush_log_requests(Limit, Limit) ->
+ Limit;
+flush_log_requests(N, Limit) ->
+ %% flush log requests but leave other requests, such as
+ %% file/disk_log_sync, info and change_config, so that these
+ %% have a chance to be processed even under heavy load
+ receive
+ {'$gen_cast',{log,_}} ->
+ flush_log_requests(N+1, Limit);
+ {'$gen_call',{Pid,MRef},{log,_}} ->
+ Pid ! {MRef, dropped},
+ flush_log_requests(N+1, Limit)
+ after
+ 0 -> N
+ end.
+
+cancel_timer(TRef) when is_atom(TRef) -> ok;
+cancel_timer(TRef) -> timer:cancel(TRef).
+
+
+stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
+ #{handler_restart_after := RestartAfter}) ->
+ %% If we're terminating because of an overload situation (see
+ %% logger_h_common:kill_if_choked/4), we need to remove the handler
+ %% and set a restart timer. A separate process must perform this
+ %% in order to avoid deadlock.
+ HandlerPid = self(),
+ RemoveAndRestart =
+ fun() ->
+ MRef = erlang:monitor(process, HandlerPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after 30000 ->
+ error_notify(Reason),
+ exit(HandlerPid, kill)
+ end,
+ case logger:get_handler_config(Name) of
+ {ok,{HMod,HConfig}} when is_integer(RestartAfter) ->
+ _ = logger:remove_handler(Name),
+ _ = timer:apply_after(RestartAfter, logger, add_handler,
+ [Name,HMod,HConfig]);
+ {ok,_} ->
+ _ = logger:remove_handler(Name);
+ {error,CfgReason} when is_integer(RestartAfter) ->
+ error_notify({Name,restart_impossible,CfgReason});
+ {error,_} ->
+ ok
+ end
+ end,
+ spawn(RemoveAndRestart),
+ ok;
+
+stop_or_restart(Name, shutdown, _State) ->
+ %% Probably terminated by supervisor. Remove the handler to avoid
+ %% error printouts due to failing handler.
+ _ = case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% Spawning to avoid deadlock
+ spawn(logger,remove_handler,[Name]);
+ _ ->
+ ok
+ end,
+ ok;
+
+stop_or_restart(_Name, _Reason, _State) ->
+ ok.
+
+overload_levels_ok(HandlerConfig) ->
+ TSQL = maps:get(toggle_sync_qlen, HandlerConfig, ?TOGGLE_SYNC_QLEN),
+ DNRQL = maps:get(drop_new_reqs_qlen, HandlerConfig, ?DROP_NEW_REQS_QLEN),
+ FRQL = maps:get(flush_reqs_qlen, HandlerConfig, ?FLUSH_REQS_QLEN),
+ (TSQL < DNRQL) andalso (DNRQL < FRQL).
+
+error_notify(Term) ->
+ ?internal_log(error, Term).
+
+info_notify(Term) ->
+ ?internal_log(info, Term).
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
new file mode 100644
index 0000000000..89378dbb10
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -0,0 +1,262 @@
+
+%%%-----------------------------------------------------------------
+%%% Overload protection configuration
+
+%%! *** NOTE ***
+%%! It's important that:
+%%! TOGGLE_SYNC_QLEN < DROP_NEW_REQS_QLEN < FLUSH_REQS_QLEN
+%%! and that DROP_NEW_REQS_QLEN >= 2.
+%%! Otherwise the handler could end up in drop mode with no new
+%%! log requests to process. This would cause all future requests
+%%! to be dropped (no switch to async mode would ever take place).
+
+%% This specifies the message_queue_len value where the log
+%% requests switch from asynchronous casts to synchronous calls.
+-define(TOGGLE_SYNC_QLEN, 10).
+%% Above this message_queue_len, log requests will be dropped,
+%% i.e. no log requests get sent to the handler process.
+-define(DROP_NEW_REQS_QLEN, 200).
+%% Above this message_queue_len, the handler process will flush
+%% its mailbox and only leave this number of messages in it.
+-define(FLUSH_REQS_QLEN, 1000).
+
+%% Never flush more than this number of messages in one go,
+%% or the handler will be unresponsive for seconds (keep this
+%% number as large as possible or the mailbox could grow large).
+-define(FLUSH_MAX_N, 5000).
+
+%% BURST_LIMIT is the max number of log requests allowed to be
+%% written within a BURST_WINDOW_TIME time frame.
+-define(ENABLE_BURST_LIMIT, true).
+-define(BURST_LIMIT_SIZE, 500).
+-define(BURST_WINDOW_TIME, 1000).
+
+%% This enables/disables the feature to automatically get the
+%% handler terminated if it gets too loaded (and can't keep up).
+-define(ENABLE_KILL_OVERLOADED, false).
+%% If the message_queue_len goes above this size even after
+%% flushing has been performed, the handler is terminated.
+-define(HANDLER_OVERLOADED_QLEN, 20000).
+%% If the memory usage exceeds this level
+-define(HANDLER_OVERLOADED_MEM, 3000000).
+
+%% This is the default time that the handler will wait before
+%% restarting and accepting new requests. The value 'never'
+%% disables restarts.
+-define(HANDLER_RESTART_AFTER, 5000).
+%%-define(HANDLER_RESTART_AFTER, never).
+
+%% The handler sends asynchronous write requests to the process
+%% controlling the i/o device, but every once in this interval
+%% will the write request be synchronous, so that the i/o device
+%% process doesn't get overloaded. This gives the handler time
+%% to keep up with its mailbox in overload situations, even if
+%% the i/o is slow.
+-define(CONTROLLER_SYNC_INTERVAL, 20).
+%% The handler will not perform a file sync operation if the
+%% mailbox size is greater than this number. This is to ensure
+%% the handler process doesn't get overloaded while waiting for
+%% an expensive file sync operation to finish.
+-define(FILESYNC_OK_QLEN, 2).
+%% Do a file/disk_log sync operation every integer() millisec
+%% (if necessary) or set to 'no_repeat' to only do file sync when
+%% the handler is idle. Note that file sync is not guaranteed to
+%% happen automatically if this operation is disabled.
+-define(FILESYNC_REPEAT_INTERVAL, 5000).
+%%-define(FILESYNC_REPEAT_INTERVAL, no_repeat).
+
+%% This is the time after last message received that we think/hope
+%% that the handler has an empty mailbox (no new log request has
+%% come in).
+-define(IDLE_DETECT_TIME_MSEC, 100).
+-define(IDLE_DETECT_TIME_USEC, 100000).
+
+%% Default disk log option values
+-define(DISK_LOG_TYPE, wrap).
+-define(DISK_LOG_MAX_NO_FILES, 10).
+-define(DISK_LOG_MAX_NO_BYTES, 1048576).
+
+%%%-----------------------------------------------------------------
+%%% Overload protection macros
+
+-define(timestamp(), erlang:monotonic_time(microsecond)).
+
+-define(get_mode(HandlerName),
+ case ets:lookup(HandlerName, mode) of
+ [{mode,sync}] ->
+ case whereis(HandlerName)==self() of
+ true -> async;
+ _ -> sync
+ end;
+ [{mode,M}] -> M;
+ _ -> async
+ end).
+
+-define(set_mode(HandlerName, M),
+ begin ets:insert(HandlerName, {mode,M}), M end).
+
+-define(change_mode(HandlerName, M0, M1),
+ if M0 == M1 ->
+ M0;
+ true ->
+ ets:insert(HandlerName, {mode,M1}),
+ M1
+ end).
+
+-define(min(X1, X2),
+ if X2 == undefined -> X1;
+ X2 < X1 -> X2;
+ true -> X1
+ end).
+
+-define(max(X1, X2),
+ if
+ X2 == undefined -> X1;
+ X2 > X1 -> X2;
+ true -> X1
+ end).
+
+-define(diff_time(OS_T1, OS_T0), OS_T1-OS_T0).
+
+%%%-----------------------------------------------------------------
+%%% The test hook macros make it possible to observe and manipulate
+%%% internal handler functionality. When enabled, these macros will
+%%% slow down execution and therefore should not be include in code
+%%% to be officially released.
+
+%% -define(TEST_HOOKS, true).
+-ifdef(TEST_HOOKS).
+ -define(TEST_HOOKS_TAB, logger_h_test_hooks).
+
+ -define(init_test_hooks(),
+ _ = case ets:whereis(?TEST_HOOKS_TAB) of
+ undefined -> ets:new(?TEST_HOOKS_TAB, [named_table,public]);
+ _ -> ok
+ end,
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(set_internal_log(MOD_FUNC),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,MOD_FUNC})).
+
+ -define(set_result(OPERATION, RESULT),
+ ets:insert(?TEST_HOOKS_TAB, {OPERATION,RESULT})).
+
+ -define(set_defaults(),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(internal_log(TYPE, TERM),
+ try ets:lookup(?TEST_HOOKS_TAB, internal_log) of
+ [{_,{LMOD,LFUNC}}] -> apply(LMOD, LFUNC, [TYPE,TERM]);
+ _ -> logger:internal_log(TYPE, TERM)
+ catch _:_ -> logger:internal_log(TYPE, TERM) end).
+
+ -define(file_write(DEVICE, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, file_write) of
+ [{_,ok}] -> file:write(DEVICE, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:write(DEVICE, DATA) end).
+
+ -define(file_datasync(DEVICE),
+ try ets:lookup(?TEST_HOOKS_TAB, file_datasync) of
+ [{_,ok}] -> file:datasync(DEVICE);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:datasync(DEVICE) end).
+
+ -define(disk_log_blog(LOG, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_blog) of
+ [{_,ok}] -> disk_log:blog(LOG, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:blog(LOG, DATA) end).
+
+ -define(disk_log_sync(LOG),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of
+ [{_,ok}] -> disk_log:sync(LOG);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:sync(LOG) end).
+
+ -define(DEFAULT_CALL_TIMEOUT, 5000).
+
+-else. % DEFAULTS!
+ -define(TEST_HOOKS_TAB, undefined).
+ -define(init_test_hooks(), ok).
+ -define(set_internal_log(_MOD_FUNC), ok).
+ -define(set_result(_OPERATION, _RESULT), ok).
+ -define(set_defaults(), ok).
+ -define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)).
+ -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)).
+ -define(file_datasync(DEVICE), file:datasync(DEVICE)).
+ -define(disk_log_blog(LOG, DATA), disk_log:blog(LOG, DATA)).
+ -define(disk_log_sync(LOG), disk_log:sync(LOG)).
+ -define(DEFAULT_CALL_TIMEOUT, 10000).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable statistics counters in the state of the
+%%% handler which is useful for analysing the overload protection
+%%% behaviour. These counters should not be included in code to be
+%%% officially released (as some counters will grow very large
+%%% over time).
+
+%%-define(SAVE_STATS, true).
+-ifdef(SAVE_STATS).
+ -define(merge_with_stats(STATE),
+ STATE#{flushes => 0, flushed => 0, drops => 0,
+ casts => 0, calls => 0,
+ max_qlen => 0, max_time => 0}).
+
+ -define(update_max_qlen(QLEN, STATE),
+ begin #{max_qlen := QLEN0} = STATE,
+ STATE#{max_qlen => ?max(QLEN0,QLEN)} end).
+
+ -define(update_calls_or_casts(CALL_OR_CAST, INC, STATE),
+ case CALL_OR_CAST of
+ cast ->
+ #{casts := CASTS0} = STATE,
+ STATE#{casts => CASTS0+INC};
+ call ->
+ #{calls := CALLS0} = STATE,
+ STATE#{calls => CALLS0+INC}
+ end).
+
+ -define(update_max_time(TIME, STATE),
+ begin #{max_time := TIME0} = STATE,
+ STATE#{max_time => ?max(TIME0,TIME)} end).
+
+ -define(update_other(OTHER, VAR, INCVAL, STATE),
+ begin #{OTHER := VAR} = STATE,
+ STATE#{OTHER => VAR+INCVAL} end).
+
+-else. % DEFAULT!
+ -define(merge_with_stats(STATE), STATE).
+ -define(update_max_qlen(_QLEN, STATE), STATE).
+ -define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE).
+ -define(update_max_time(_TIME, STATE), STATE).
+ -define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable callbacks that make it possible to analyse
+%%% the overload protection behaviour from outside the handler
+%%% process (including dropped requests on the client side).
+%%% An external callback module (?OBSERVER_MOD) is required which
+%%% is not part of the kernel application. For this reason, these
+%%% callbacks should not be included in code to be officially released.
+
+%%-define(OBSERVER_MOD, logger_test).
+-ifdef(OBSERVER_MOD).
+ -define(start_observation(NAME), ?OBSERVER:start_observation(NAME)).
+ -define(observe(NAME,EVENT), ?OBSERVER:observe(NAME,EVENT)).
+
+-else. % DEFAULT!
+ -define(start_observation(_NAME), ok).
+ -define(observe(_NAME,_EVENT), ok).
+-endif.
+%%! <---
diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl
new file mode 100644
index 0000000000..82df499c2b
--- /dev/null
+++ b/lib/kernel/src/logger_internal.hrl
@@ -0,0 +1,98 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-include_lib("kernel/include/logger.hrl").
+-define(LOGGER_TABLE,logger).
+-define(LOGGER_KEY,'$logger_config$').
+-define(HANDLER_KEY,'$handler_config$').
+-define(LOGGER_META_KEY,'$logger_metadata$').
+-define(STANDARD_HANDLER, logger_std_h).
+-define(DEFAULT_HANDLER_FILTERS,
+ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp])).
+-define(DEFAULT_HANDLER_FILTERS(Domain),
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,prefix_of,Domain}}},
+ {no_domain,{fun logger_filters:domain/2,{log,no_domain,[]}}}]).
+-define(DEFAULT_FORMATTER,logger_formatter).
+-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true,
+ template=>?DEFAULT_FORMAT_TEMPLATE_HEADER}).
+-define(DEFAULT_FORMAT_TEMPLATE_HEADER,
+ [{logger_formatter,header},"\n",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE_SINGLE,
+ [time," ",level,": ",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE,
+ [time," ",level,":\n",msg,"\n"]).
+
+-define(DEFAULT_LOGGER_CALL_TIMEOUT, infinity).
+
+-define(LOG_INTERNAL(Level,Report),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ %% Spawn this to avoid deadlocks
+ _ = spawn(logger,macro_log,[?LOCATION,Level,Report,
+ logger:add_default_metadata(#{})]),
+ ok;
+ false ->
+ ok
+ end).
+
+%%%-----------------------------------------------------------------
+%%% Levels
+%%% Using same as syslog
+-define(LEVELS,[emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug]).
+-define(EMERGENCY,0).
+-define(ALERT,1).
+-define(CRITICAL,2).
+-define(ERROR,3).
+-define(WARNING,4).
+-define(NOTICE,5).
+-define(INFO,6).
+-define(DEBUG,7).
+
+-define(IS_LEVEL(L),
+ (L=:=emergency orelse
+ L=:=alert orelse
+ L=:=critical orelse
+ L=:=error orelse
+ L=:=warning orelse
+ L=:=notice orelse
+ L=:=info orelse
+ L=:=debug)).
+
+-define(IS_MSG(Msg),
+ ((is_tuple(Msg) andalso tuple_size(Msg)==2)
+ andalso
+ (is_list(element(1,Msg)) andalso is_list(element(2,Msg)))
+ orelse
+ (element(1,Msg)==report andalso ?IS_REPORT(element(2,Msg)))
+ orelse
+ (element(1,Msg)==string andalso ?IS_STRING(element(2,Msg))))).
+
+-define(IS_REPORT(Report),
+ (is_map(Report) orelse (is_list(Report) andalso is_tuple(hd(Report))))).
+
+-define(IS_STRING(String),
+ (is_list(String) orelse is_binary(String))).
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
new file mode 100644
index 0000000000..6ef3b8582a
--- /dev/null
+++ b/lib/kernel/src/logger_server.erl
@@ -0,0 +1,440 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_server).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0,
+ add_handler/3, remove_handler/1,
+ add_filter/2, remove_filter/2,
+ set_module_level/2, reset_module_level/1,
+ cache_module_level/1,
+ set_config/2, set_config/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2]).
+
+-include("logger_internal.hrl").
+
+-define(SERVER, logger).
+
+-record(state, {tid}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+add_handler(Id,Module,Config0) ->
+ case sanity_check(logger,handlers,[Id]) of
+ ok ->
+ try check_mod(Module) of
+ ok ->
+ case sanity_check(Id,Config0) of
+ ok ->
+ Default = default_config(Id),
+ Config = maps:merge(Default,Config0),
+ call({add_handler,Id,Module,Config});
+ Error ->
+ Error
+ end
+ catch throw:Error ->
+ {error,Error}
+ end;
+ Error ->
+ Error
+ end.
+
+remove_handler(HandlerId) ->
+ call({remove_handler,HandlerId}).
+
+add_filter(Owner,Filter) ->
+ case sanity_check(Owner,filters,[Filter]) of
+ ok -> call({add_filter,Owner,Filter});
+ Error -> Error
+ end.
+
+remove_filter(Owner,FilterId) ->
+ call({remove_filter,Owner,FilterId}).
+
+set_module_level(Module,Level) when is_atom(Module) ->
+ case sanity_check(logger,level,Level) of
+ ok -> call({set_module_level,Module,Level});
+ Error -> Error
+ end;
+set_module_level(Module,_) ->
+ {error,{not_a_module,Module}}.
+
+reset_module_level(Module) when is_atom(Module) ->
+ call({reset_module_level,Module});
+reset_module_level(Module) ->
+ {error,{not_a_module,Module}}.
+
+cache_module_level(Module) ->
+ gen_server:cast(?SERVER,{cache_module_level,Module}).
+
+
+set_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok -> call({update_config,Owner,#{Key=>Value}});
+ Error -> Error
+ end.
+
+set_config(Owner,Config0) ->
+ case sanity_check(Owner,Config0) of
+ ok ->
+ Config = maps:merge(default_config(Owner),Config0),
+ call({set_config,Owner,Config});
+ Error ->
+ Error
+ end.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([]) ->
+ process_flag(trap_exit, true),
+ Tid = logger_config:new(?LOGGER_TABLE),
+ LoggerConfig = maps:merge(default_config(logger),
+ #{handlers=>[logger_simple]}),
+ logger_config:create(Tid,logger,LoggerConfig),
+ SimpleConfig0 = maps:merge(default_config(logger_simple),
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS,
+ logger_simple=>#{buffer=>true}}),
+ %% If this fails, then the node should crash
+ {ok,SimpleConfig} =
+ logger_simple:adding_handler(logger_simple,SimpleConfig0),
+ logger_config:create(Tid,logger_simple,logger_simple,SimpleConfig),
+ {ok, #state{tid=Tid}}.
+
+handle_call({add_handler,Id,Module,HConfig}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:exist(Tid,Id) of
+ true ->
+ {error,{already_exist,Id}};
+ false ->
+ %% inform the handler
+ case call_h(Module,adding_handler,[Id,HConfig],{ok,HConfig}) of
+ {ok,HConfig1} ->
+ logger_config:create(Tid,Id,Module,HConfig1),
+ {ok,Config} = do_get_config(Tid,logger),
+ Handlers = maps:get(handlers,Config,[]),
+ do_set_config(Tid,logger,
+ Config#{handlers=>[Id|Handlers]}),
+ ok;
+ {error,HReason} ->
+ {error,{handler_not_added,HReason}}
+ end
+ end,
+ {reply,Reply,State};
+handle_call({remove_handler,HandlerId}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,HandlerId) of
+ {ok,{Module,_}} ->
+ {ok,Config} = do_get_config(Tid,logger),
+ Handlers0 = maps:get(handlers,Config,[]),
+ Handlers = lists:delete(HandlerId,Handlers0),
+ %% inform the handler
+ _ = call_h(Module,removing_handler,[HandlerId],ok),
+ do_set_config(Tid,logger,Config#{handlers=>Handlers}),
+ logger_config:delete(Tid,HandlerId),
+ ok;
+ _ ->
+ {error,{not_found,HandlerId}}
+ end,
+ {reply,Reply,State};
+handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
+ Reply = do_add_filter(Tid,Id,Filter),
+ {reply,Reply,State};
+handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
+ Reply = do_remove_filter(Tid,Id,FilterId),
+ {reply,Reply,State};
+handle_call({update_config,Id,NewConfig}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,Id) of
+ {ok,{Module,OldConfig}} ->
+ Config = maps:merge(OldConfig,NewConfig),
+ case call_h(Module,changing_config,[Id,OldConfig,Config],
+ {ok,Config}) of
+ {ok,Config1} ->
+ do_set_config(Tid,Id,Config1);
+ Error ->
+ Error
+ end;
+ {ok,OldConfig} ->
+ Config = maps:merge(OldConfig,NewConfig),
+ do_set_config(Tid,Id,Config);
+ Error ->
+ Error
+ end,
+ {reply,Reply,State};
+handle_call({set_config,logger,Config}, _From, #state{tid=Tid}=State) ->
+ Reply = do_set_config(Tid,logger,Config),
+ {reply,Reply,State};
+handle_call({set_config,HandlerId,Config}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,HandlerId) of
+ {ok,{Module,OldConfig}} ->
+ case call_h(Module,changing_config,[HandlerId,OldConfig,Config],
+ {ok,Config}) of
+ {ok,Config1} ->
+ do_set_config(Tid,HandlerId,Config1);
+ Error ->
+ Error
+ end;
+ _ ->
+ {error,{not_found,HandlerId}}
+ end,
+ {reply,Reply,State};
+handle_call({set_module_level,Module,Level}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:set_module_level(Tid,Module,Level),
+ {reply,Reply,State};
+handle_call({reset_module_level,Module}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:reset_module_level(Tid,Module),
+ {reply,Reply,State}.
+
+handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) ->
+ logger_config:cache_module_level(Tid,Module),
+ {noreply, State}.
+
+%% Interface for those who can't call the API - e.g. the emulator, or
+%% places related to code loading.
+%%
+%% This can also be log events from remote nodes which are sent from
+%% logger.erl when the group leader of the client process is on a
+%% same node as the client process itself.
+handle_info({log,Level,Format,Args,Meta}, State) ->
+ logger:log(Level,Format,Args,Meta),
+ {noreply, State};
+handle_info({log,Level,Report,Meta}, State) ->
+ logger:log(Level,Report,Meta),
+ {noreply, State};
+handle_info({Ref,_Reply},State) when is_reference(Ref) ->
+ %% Assuming this is a timed-out gen_server reply - ignoring
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+call(Request) ->
+ case whereis(?SERVER) of
+ Pid when Pid==self() ->
+ {error,{attempting_syncronous_call_to_self,Request}};
+ _ ->
+ gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
+ end.
+
+do_add_filter(Tid,Id,{FId,_} = Filter) ->
+ case do_get_config(Tid,Id) of
+ {ok,Config} ->
+ Filters = maps:get(filters,Config,[]),
+ case lists:keymember(FId,1,Filters) of
+ true ->
+ {error,{already_exist,FId}};
+ false ->
+ do_set_config(Tid,Id,Config#{filters=>[Filter|Filters]})
+ end;
+ Error ->
+ Error
+ end.
+
+do_remove_filter(Tid,Id,FilterId) ->
+ case do_get_config(Tid,Id) of
+ {ok,Config} ->
+ Filters0 = maps:get(filters,Config,[]),
+ case lists:keytake(FilterId,1,Filters0) of
+ {value,_,Filters} ->
+ do_set_config(Tid,Id,Config#{filters=>Filters});
+ false ->
+ {error,{not_found,FilterId}}
+ end;
+ Error ->
+ Error
+ end.
+
+do_get_config(Tid,Id) ->
+ case logger_config:get(Tid,Id) of
+ {ok,{_,Config}} ->
+ {ok,Config};
+ {ok,Config} ->
+ {ok,Config};
+ Error ->
+ Error
+ end.
+
+do_set_config(Tid,Id,Config) ->
+ logger_config:set(Tid,Id,Config),
+ ok.
+
+default_config(logger) ->
+ #{level=>info,
+ filters=>[],
+ filter_default=>log,
+ handlers=>[]};
+default_config(_) ->
+ #{level=>info,
+ filters=>[],
+ filter_default=>log,
+ formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}.
+
+sanity_check(Owner,Key,Value) ->
+ sanity_check_1(Owner,[{Key,Value}]).
+
+sanity_check(HandlerId,Config) when is_map(Config) ->
+ sanity_check_1(HandlerId,maps:to_list(Config));
+sanity_check(_,Config) ->
+ {error,{invalid_handler_config,Config}}.
+
+sanity_check_1(Owner,Config) when is_list(Config) ->
+ try
+ Type = get_type(Owner),
+ check_config(Type,Config)
+ catch throw:Error -> {error,Error}
+ end.
+
+get_type(logger) ->
+ logger;
+get_type(Id) ->
+ check_id(Id),
+ handler.
+
+check_config(Owner,[{level,Level}|Config]) ->
+ check_level(Level),
+ check_config(Owner,Config);
+check_config(logger,[{handlers,Handlers}|Config]) ->
+ check_handlers(Handlers),
+ check_config(logger,Config);
+check_config(Owner,[{filters,Filters}|Config]) ->
+ check_filters(Filters),
+ check_config(Owner,Config);
+check_config(Owner,[{filter_default,FD}|Config]) ->
+ check_filter_default(FD),
+ check_config(Owner,Config);
+check_config(handler,[{formatter,Formatter}|Config]) ->
+ check_formatter(Formatter),
+ check_config(handler,Config);
+check_config(logger,[C|_]) ->
+ throw({invalid_logger_config,C});
+check_config(handler,[{_,_}|Config]) ->
+ %% Arbitrary config elements are allowed for handlers
+ check_config(handler,Config);
+check_config(_,[]) ->
+ ok.
+
+check_id(Id) when is_atom(Id) ->
+ ok;
+check_id(Id) ->
+ throw({invalid_id,Id}).
+
+check_mod(Mod) when is_atom(Mod) ->
+ ok;
+check_mod(Mod) ->
+ throw({invalid_module,Mod}).
+
+check_level({LevelInt,cached}) when LevelInt>=?EMERGENCY, LevelInt=<?DEBUG ->
+ ok;
+check_level(Level) ->
+ case lists:member(Level,?LEVELS) of
+ true ->
+ ok;
+ false ->
+ throw({invalid_level,Level})
+ end.
+
+check_handlers([Id|Handlers]) ->
+ check_id(Id),
+ check_handlers(Handlers);
+check_handlers([]) ->
+ ok;
+check_handlers(Handlers) ->
+ throw({invalid_handlers,Handlers}).
+
+check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) ->
+ check_filters(Filters);
+check_filters([Filter|_]) ->
+ throw({invalid_filter,Filter});
+check_filters([]) ->
+ ok;
+check_filters(Filters) ->
+ throw({invalid_filters,Filters}).
+
+check_filter_default(FD) when FD==stop; FD==log ->
+ ok;
+check_filter_default(FD) ->
+ throw({invalid_filter_default,FD}).
+
+check_formatter({logger_formatter,Config}) when is_map(Config) ->
+ check_logger_formatter_config(maps:to_list(Config));
+check_formatter({logger_formatter,Config}) ->
+ throw({invalid_formatter_config,Config});
+check_formatter({Mod,_}) ->
+ %% no knowledge of other formatters
+ check_mod(Mod);
+check_formatter(Formatter) ->
+ throw({invalid_formatter,Formatter}).
+
+
+check_logger_formatter_config([{template,T}|Config]) when is_list(T) ->
+ case lists:all(fun(X) when is_atom(X) -> true;
+ (X) when is_tuple(X), is_atom(element(1,X)) -> true;
+ (X) when is_list(X) -> io_lib:printable_unicode_list(X);
+ (_) -> false
+ end,
+ T) of
+ true ->
+ check_logger_formatter_config(Config);
+ false ->
+ throw({invalid_formatter_template,T})
+ end;
+check_logger_formatter_config([{legacy_header,LH}|Config]) when is_boolean(LH) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([{single_line,SL}|Config]) when is_boolean(SL) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([{utc,Utc}|Config]) when is_boolean(Utc) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([C|_]) ->
+ throw({invalid_formatter_config,C});
+check_logger_formatter_config([]) ->
+ ok.
+
+call_h(Module, Function, Args, DefRet) ->
+ %% Not calling code:ensure_loaded + erlang:function_exported here,
+ %% since in some rare terminal cases, the code_server might not
+ %% exist and we'll get a deadlock in removing the handler.
+ try apply(Module, Function, Args)
+ catch
+ C:R:S ->
+ case {C,R,S} of
+ {error,undef,[{Module,Function,Args,_}|_]} ->
+ DefRet;
+ _ ->
+ {error,{callback_crashed,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}}}
+ end
+ end.
diff --git a/lib/kernel/src/logger_simple.erl b/lib/kernel/src/logger_simple.erl
new file mode 100644
index 0000000000..23ff6ccd2e
--- /dev/null
+++ b/lib/kernel/src/logger_simple.erl
@@ -0,0 +1,236 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple).
+
+-export([adding_handler/2, removing_handler/1, log/2]).
+-export([get_buffer/0]).
+
+%% This module implements a simple handler for logger. It is the
+%% default used during system start.
+
+%%%-----------------------------------------------------------------
+%%% API
+get_buffer() ->
+ case whereis(?MODULE) of
+ undefined ->
+ {error,noproc};
+ Pid ->
+ Ref = erlang:monitor(process,Pid),
+ Pid ! {get_buffer,self()},
+ receive
+ {buffer,Buffer} ->
+ erlang:demonitor(Ref,[flush]),
+ {ok,Buffer};
+ {'DOWN',Ref,process,Pid,Reason} ->
+ {error,Reason}
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logger callback
+
+adding_handler(?MODULE,Config) ->
+ Me = self(),
+ case whereis(?MODULE) of
+ undefined ->
+ {Pid,Ref} = spawn_opt(fun() -> init(Me,Config) end,
+ [link,monitor,{message_queue_data,off_heap}]),
+ receive
+ {'DOWN',Ref,process,Pid,Reason} ->
+ {error,Reason};
+ {Pid,started} ->
+ erlang:demonitor(Ref),
+ {ok,Config}
+ end;
+ _ ->
+ {error,{handler_process_name_already_exists,?MODULE}}
+ end.
+
+removing_handler(?MODULE) ->
+ case whereis(?MODULE) of
+ undefined ->
+ ok;
+ Pid ->
+ Ref = erlang:monitor(process,Pid),
+ unlink(Pid),
+ Pid ! stop,
+ receive {'DOWN',Ref,process,Pid,_} ->
+ ok
+ end
+ end.
+
+log(#{meta:=#{error_logger:=#{tag:=info_report,type:=Type}}},_Config)
+ when Type=/=std_info ->
+ %% Skip info reports that are not 'std_info' (ref simple logger in
+ %% error_logger)
+ ok;
+log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) ->
+ _ = case whereis(?MODULE) of
+ undefined ->
+ %% Is the node on the way down? Real emergency?
+ %% Log directly from client just to get it out
+ do_log(
+ #{level=>error,
+ msg=>{report,{error,simple_handler_process_dead}},
+ meta=>#{time=>erlang:monotonic_time(microsecond)}}),
+ do_log(Log);
+ _ ->
+ ?MODULE ! {log,Log}
+ end,
+ ok;
+log(_,_) ->
+ %% Unexpected log.
+ %% We don't want to crash the simple logger, so ignore this.
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Process
+init(Starter,Config) ->
+ register(?MODULE,self()),
+ Starter ! {self(),started},
+ BufferSize =
+ case Config of
+ #{?MODULE:=#{buffer:=true}} ->
+ 10;
+ _ ->
+ infinity
+ end,
+ loop(#{buffer_size=>BufferSize,dropped=>0,buffer=>[]},infinity).
+
+loop(Buffer,Timeout) ->
+ receive
+ stop ->
+ ok;
+ {get_buffer,From} ->
+ loop(Buffer#{send_to=>From},0);
+ {log,#{msg:=_,meta:=#{time:=_}}=Log} ->
+ do_log(Log),
+ loop(update_buffer(Buffer,Log),Timeout);
+ _ ->
+ %% Unexpected message - flush it!
+ loop(Buffer,Timeout)
+ after Timeout ->
+ #{dropped:=D,buffer:=B,send_to:=Pid} = Buffer,
+ LogList = lists:reverse(B) ++ drop_msg(D),
+ Pid ! {buffer,LogList},
+ loop(Buffer#{buffer_size=>infinity,
+ dropped=>0,
+ buffer=>[],
+ send_to=>false},
+ infinity)
+ end.
+
+update_buffer(#{buffer_size:=infinity}=Buffer,_Log) ->
+ Buffer;
+update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) ->
+ Buffer#{dropped=>D+1};
+update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) ->
+ Buffer#{buffer_size=>S-1,buffer=>[Log|B]}.
+
+drop_msg(0) ->
+ [];
+drop_msg(N) ->
+ [#{level=>info,
+ msg=>{"Simple handler buffer full, dropped ~w messages",[N]},
+ meta=>#{time=>erlang:monotonic_time(microsecond)}}].
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+%% Can't do io_lib:format
+
+do_log(#{msg:={report,Report},
+ meta:=#{time:=T,error_logger:=#{type:=Type}}}) ->
+ display_date(T),
+ display_report(Type,Report);
+do_log(#{msg:=Msg,meta:=#{time:=T}}) ->
+ display_date(T),
+ display(Msg).
+
+display_date(Timestamp0) when is_integer(Timestamp0) ->
+ Timestamp = Timestamp0 + erlang:time_offset(microsecond),
+ Micro = Timestamp rem 1000000,
+ Sec = Timestamp div 1000000,
+ {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime(
+ erlang:posixtime_to_universaltime(Sec)),
+ erlang:display_string(
+ integer_to_list(Y) ++ "-" ++
+ pad(Mo,2) ++ "-" ++
+ pad(D,2) ++ " " ++
+ pad(H,2) ++ ":" ++
+ pad(Mi,2) ++ ":" ++
+ pad(S,2) ++ "." ++
+ pad(Micro,6) ++ " ").
+
+pad(Int,Size) when is_integer(Int) ->
+ pad(integer_to_list(Int),Size);
+pad(Str,Size) when length(Str)==Size ->
+ Str;
+pad(Str,Size) ->
+ pad([$0|Str],Size).
+
+display({string,Chardata}) ->
+ try unicode:characters_to_list(Chardata) of
+ String -> erlang:display_string(String), erlang:display_string("\n")
+ catch _:_ -> erlang:display(Chardata)
+ end;
+display({report,Report}) when is_map(Report) ->
+ display_report(maps:to_list(Report));
+display({report,Report}) ->
+ display_report(Report);
+display({F, A}) when is_list(F), is_list(A) ->
+ erlang:display_string(F ++ "\n"),
+ [begin
+ erlang:display_string("\t"),
+ erlang:display(Arg)
+ end || Arg <- A],
+ ok.
+
+display_report(Atom, A) when is_atom(Atom) ->
+ %% The widest atom seems to be 'supervisor_report' at 17.
+ ColumnWidth = 20,
+ AtomString = atom_to_list(Atom),
+ AtomLength = length(AtomString),
+ Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
+ erlang:display_string(AtomString ++ Padding),
+ display_report(A);
+display_report(F, A) ->
+ erlang:display({F, A}).
+
+display_report([A, []]) ->
+ %% Special case for crash reports when process has no links
+ display_report(A);
+display_report(A = [_|_]) ->
+ case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
+ true ->
+ erlang:display_string("\n"),
+ lists:foreach(
+ fun({Key, Value}) ->
+ erlang:display_string(
+ " " ++
+ atom_to_list(Key) ++
+ ": "),
+ erlang:display(Value)
+ end, A);
+ false ->
+ erlang:display(A)
+ end;
+display_report(A) ->
+ erlang:display(A).
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
new file mode 100644
index 0000000000..cbc9db372c
--- /dev/null
+++ b/lib/kernel/src/logger_std_h.erl
@@ -0,0 +1,799 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h).
+
+-behaviour(gen_server).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+-include_lib("kernel/include/file.hrl").
+
+%% API
+-export([start_link/3, info/1, filesync/1, reset/1]).
+
+%% gen_server and proc_lib callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/2, removing_handler/1,
+ changing_config/3, swap_buffer/2]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Start a standard handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+-spec filesync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+filesync(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, filesync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+filesync(Name) ->
+ {error,{badarg,{filesync,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec info(Name) -> Info | {error,Reason} when
+ Name :: atom(),
+ Info :: term(),
+ Reason :: handler_busy | {badarg,term()}.
+
+info(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec reset(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+reset(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(Name, Config) ->
+ case check_config(adding, Name, Config) of
+ {ok, Config1} ->
+ %% create initial handler state by merging defaults with config
+ HConfig = maps:get(?MODULE, Config1, #{}),
+ HState = maps:merge(get_init_state(), HConfig),
+ case logger_h_common:overload_levels_ok(HState) of
+ true ->
+ case start(Name, Config1, HState) of
+ ok ->
+ %% Make sure wait_for_buffer is not stored, so we
+ %% won't hang and wait for buffer on a restart
+ {ok, maps:remove(wait_for_buffer,Config1)};
+ Error ->
+ Error
+ end;
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = HState,
+ {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(Name,
+ OldConfig=#{id:=Id, ?MODULE:=#{type:=Type}},
+ NewConfig=#{id:=Id}) ->
+ MyConfig = maps:get(?MODULE, NewConfig, #{}),
+ case maps:get(type, MyConfig, Type) of
+ Type ->
+ MyConfig1 = MyConfig#{type=>Type},
+ changing_config1(Name, OldConfig,
+ NewConfig#{?MODULE=>MyConfig1});
+ _ ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}
+ end;
+changing_config(_Name, OldConfig, NewConfig) ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}.
+
+changing_config1(Name, OldConfig, NewConfig) ->
+ case check_config(changing, Name, NewConfig) of
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(Name, {change_config,OldConfig,NewConfig1},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> Result;
+ HError -> HError
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ Error ->
+ Error
+ end.
+
+check_config(adding, Name, Config0) ->
+ %% Merge in defaults on top level
+ Config = maps:merge(#{id => Name}, Config0),
+ %% Merge in defaults on handler level
+ MyConfig0 = maps:get(?MODULE, Config, #{}),
+ MyConfig = maps:merge(#{type => standard_io},
+ MyConfig0),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok ->
+ {ok,Config#{?MODULE=>MyConfig}};
+ Error ->
+ Error
+ end;
+check_config(changing, _Name, Config) ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok -> {ok,Config};
+ Error -> Error
+ end.
+
+check_my_config([{type,Type} | Config]) when Type == standard_io;
+ Type == standard_error ->
+ check_my_config(Config);
+check_my_config([{type,{file,File}} | Config]) when is_list(File) ->
+ check_my_config(Config);
+check_my_config([{type,{file,File,Modes}} | Config]) when is_list(File),
+ is_list(Modes) ->
+ check_my_config(Config);
+check_my_config([Other | Config]) ->
+ case logger_h_common:check_common_config(Other) of
+ valid ->
+ check_my_config(Config);
+ invalid ->
+ {error,{invalid_config,?MODULE,Other}}
+ end;
+check_my_config([]) ->
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Name) ->
+ stop(Name).
+
+%%%-----------------------------------------------------------------
+%%% Get buffer when swapping from simple handler
+swap_buffer(Name,Buffer) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ Name ! {buffer,Buffer}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(Log, Config) -> ok | dropped when
+ Log :: logger:log(),
+ Config :: logger:config().
+
+log(Log,Config=#{id:=Name}) ->
+ %% if the handler has crashed, we must drop this request
+ %% and hope the handler restarts so we can try again
+ true = is_pid(whereis(Name)),
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ logger_h_common:call_cast_or_drop(Name, Bin).
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name, Config,
+ State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) ->
+ register(Name, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case do_init(Name, Type) of
+ {ok,InitState} ->
+ catch ets:new(Name, [public, named_table]),
+ ?set_mode(Name, async),
+ State = maps:merge(State0, InitState),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{mode => async,
+ file_ctrl_sync => FileCtrlSyncInt,
+ last_qlen => 0,
+ last_log_ts => T0,
+ burst_win_ts => T0,
+ burst_msg_count => 0}),
+ proc_lib:init_ack({ok,self()}),
+ gen_server:cast(self(), {repeated_filesync,T0}),
+ enter_loop(Config, State1);
+ Error ->
+ logger_h_common:error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+do_init(Name, Std) when Std=:=standard_io; Std=:=standard_error ->
+ case open_log_file(Name, Std) of
+ {ok,FileCtrlPid} ->
+ {ok,#{id=>Name,type=>Std,file_ctrl_pid=>FileCtrlPid}};
+ Error ->
+ Error
+ end;
+do_init(Name, FileInfo) when is_tuple(FileInfo) ->
+ case open_log_file(Name, FileInfo) of
+ {ok,FileCtrlPid} ->
+ {ok,#{id=>Name,type=>FileInfo,file_ctrl_pid=>FileCtrlPid}};
+ Error ->
+ Error
+ end.
+
+enter_loop(#{wait_for_buffer:=true}=Config,State) ->
+ State1 =
+ receive
+ {buffer,Buffer} ->
+ lists:foldl(
+ fun(Log,S) ->
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ {_,S1} = do_log(Bin,cast,S),
+ S1
+ end,
+ State,
+ Buffer)
+ end,
+ gen_server:enter_loop(?MODULE,[],State1);
+enter_loop(_Config,State) ->
+ gen_server:enter_loop(?MODULE,[],State).
+
+%% This is the synchronous log request.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply,Result, State1};
+
+handle_call(filesync, _From, State = #{type := Type,
+ file_ctrl_pid := FileCtrlPid}) ->
+ if is_atom(Type) ->
+ {reply, ok, State};
+ true ->
+ {reply, file_ctrl_filesync_sync(FileCtrlPid), State}
+ end;
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0,
+ last_log_ts := LastLogTS}) ->
+ HConfig = maps:get(?MODULE, NewConfig, #{}),
+ State1 = maps:merge(State, HConfig),
+ case logger_h_common:overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ gen_server:cast(self(), {repeated_filesync,
+ LastLogTS})
+ end,
+ {reply, ok, State1};
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = State1,
+ {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp()}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+%% This is the asynchronous log request.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% request, the repeat operation must be active!
+handle_cast({repeated_filesync,LastLogTS0},
+ State = #{type := Type,
+ file_ctrl_pid := FileCtrlPid,
+ filesync_repeat_interval := FSyncInt,
+ last_log_ts := LastLogTS1}) ->
+ State1 =
+ if not is_atom(Type), is_integer(FSyncInt) ->
+ %% only do filesync if something has been
+ %% written since last time we checked
+ if LastLogTS1 == LastLogTS0 ->
+ ok;
+ true ->
+ file_ctrl_filesync_async(FileCtrlPid)
+ end,
+ {ok,TRef} =
+ timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),{repeated_filesync,LastLogTS1}]),
+ State#{rep_sync_tref => TRef};
+ true ->
+ State
+ end,
+ {noreply,State1}.
+
+handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) ->
+ case maps:get(file_ctrl_pid, State, undefined) of
+ Pid ->
+ %% file error, terminate handler
+ logger_h_common:handler_exit(Name,
+ {error,{write_failed,FileInfo,Why}});
+ _Other ->
+ %% ignore EXIT
+ ok
+ end,
+ {noreply, State};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
+ type:=_FileInfo}) ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
+ undefined)),
+ case is_process_alive(FWPid) of
+ true ->
+ unlink(FWPid),
+ _ = file_ctrl_stop(FWPid),
+ MRef = erlang:monitor(process, FWPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ exit(FWPid, kill)
+ end;
+ false ->
+ ok
+ end,
+ logger_h_common:stop_or_restart(Name, Reason, State).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%%
+get_init_state() ->
+ #{toggle_sync_qlen => ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen => ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen => ?FLUSH_REQS_QLEN,
+ enable_burst_limit => ?ENABLE_BURST_LIMIT,
+ burst_limit_size => ?BURST_LIMIT_SIZE,
+ burst_window_time => ?BURST_WINDOW_TIME,
+ enable_kill_overloaded => ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen => ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem => ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after => ?HANDLER_RESTART_AFTER,
+ file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+%%%-----------------------------------------------------------------
+%%% Add a standard handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Handler specific config should be provided with a sub map associated
+%%% with a key named the same as this module, e.g:
+%%%
+%%% Config = #{logger_std_h => #{toggle_sync_qlen => 50}
+%%%
+%%% The standard handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ LoggerStdH =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, LoggerStdH) of
+ {ok,_Pid} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Stop and remove the handler.
+stop(Name) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ _ = gen_server:call(Name,stop),
+ _ = supervisor:delete_child(logger_sup, Name),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logging and overload control.
+-define(update_file_ctrl_sync(C, Interval),
+ if C == 0 -> Interval;
+ true -> C-1 end).
+
+%% check for overload between every request (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize requests have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% request to react quickly to large bursts of requests and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
+
+ %% kill the handler if it can't keep up with the load
+ logger_h_common:kill_if_choked(Name, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this clause is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ _NewFlushed = logger_h_common:flush_log_requests(?FLUSH_MAX_N),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log request
+ ?observe(Name,{flushed,_NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,_NewFlushed,
+ State1#{mode => ?set_mode(Name,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this clause is called to write to file
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{file_ctrl_pid := FileCtrlPid,
+ file_ctrl_sync := FileCtrlSync,
+ last_qlen := LastQLen,
+ last_log_ts := T0,
+ file_ctrl_sync_int := FileCtrlSyncInt}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log requests
+ {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
+
+ %% only send a synhrounous request to the file controller process
+ %% every FileCtrlSyncInt time, to give the handler time between
+ %% file writes so it can keep up with incoming messages
+ {Result,LastQLen1} =
+ if DoWrite, FileCtrlSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ file_write_sync(FileCtrlPid, Bin, false),
+ {ok,element(2, process_info(self(), message_queue_len))};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ file_write_async(FileCtrlPid, Bin),
+ {ok,LastQLen};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped,LastQLen}
+ end,
+
+ %% Check if the time since the previous log request is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log requests is fast and no new
+ %% request comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ %% do filesync if necessary
+ case maps:get(type, State) of
+ Std when is_atom(Std) ->
+ ok;
+ _File ->
+ file_ctrl_filesync_async(FileCtrlPid)
+ end,
+ {?change_mode(Name, Mode, async),0};
+ true ->
+ {Mode,BurstMsgCount}
+ end,
+ State1 =
+ ?update_calls_or_casts(_CallOrCast,1,State),
+ State2 =
+ ?update_max_time(Time,
+ State1#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ file_ctrl_sync =>
+ ?update_file_ctrl_sync(FileCtrlSync,
+ FileCtrlSyncInt)}),
+ {Result,State2}.
+
+open_log_file(HandlerName, FileInfo) ->
+ case file_ctrl_start(HandlerName, FileInfo) of
+ OK = {ok,_FileCtrlPid} -> OK;
+ Error -> Error
+ end.
+
+do_open_log_file({file,File}) ->
+ do_open_log_file({file,File,[raw,append,delayed_write]});
+
+do_open_log_file({file,File,[]}) ->
+ do_open_log_file({file,File,[raw,append,delayed_write]});
+
+do_open_log_file({file,File,Modes}) ->
+ try
+ case filelib:ensure_dir(File) of
+ ok ->
+ file:open(File, Modes);
+ Error ->
+ Error
+ end
+ catch
+ _:Reason -> {error,Reason}
+ end.
+
+close_log_file(Std) when Std == standard_io; Std == standard_error ->
+ ok;
+close_log_file(Fd) ->
+ _ = file:datasync(Fd),
+ _ = file:close(Fd).
+
+%%%-----------------------------------------------------------------
+%%% File control process
+
+file_ctrl_start(HandlerName, FileInfo) ->
+ Starter = self(),
+ FileCtrlPid =
+ spawn_link(fun() ->
+ file_ctrl_init(HandlerName, FileInfo, Starter)
+ end),
+ receive
+ {FileCtrlPid,ok} ->
+ {ok,FileCtrlPid};
+ {FileCtrlPid,Error} ->
+ Error
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,file_ctrl_process_not_started}
+ end.
+
+file_ctrl_stop(Pid) ->
+ Pid ! stop.
+
+file_write_async(Pid, Bin) ->
+ Pid ! {log,Bin},
+ ok.
+
+file_write_sync(Pid, Bin, FileSync) ->
+ case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of
+ {error,Reason} ->
+ {error,{write_failed,Bin,Reason}};
+ Result ->
+ Result
+ end.
+
+file_ctrl_filesync_async(Pid) ->
+ Pid ! filesync,
+ ok.
+
+file_ctrl_filesync_sync(Pid) ->
+ file_ctrl_call(Pid, {filesync,self()}).
+
+file_ctrl_call(Pid, Msg) ->
+ MRef = monitor(process, Pid),
+ Pid ! {Msg,MRef},
+ receive
+ {MRef,Result} ->
+ demonitor(MRef, [flush]),
+ Result;
+ {'DOWN',MRef,_Type,_Object,Reason} ->
+ {error,Reason}
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,{no_response,Pid}}
+ end.
+
+file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
+ process_flag(message_queue_data, off_heap),
+ FileName = element(2, FileInfo),
+ case do_open_log_file(FileInfo) of
+ {ok,Fd} ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(Fd, file, FileName, false, ok, ok, HandlerName);
+ {error,Reason} ->
+ Starter ! {self(),{error,{open_failed,FileName,Reason}}}
+ end;
+file_ctrl_init(HandlerName, StdDev, Starter) ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(StdDev, standard_io, StdDev, false, ok, ok, HandlerName).
+
+file_ctrl_loop(Fd, Type, DevName, Synced,
+ PrevWriteResult, PrevSyncResult, HandlerName) ->
+ receive
+ %% asynchronous request
+ {log,Bin} ->
+ Result = if Type == file ->
+ write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName);
+ true ->
+ io:put_chars(Fd, Bin)
+ end,
+ file_ctrl_loop(Fd, Type, DevName, false,
+ Result, PrevSyncResult, HandlerName);
+
+ %% synchronous request
+ {{log,From,Bin,FileSync},MRef} ->
+ if Type == file ->
+ %% check that file hasn't been deleted
+ CheckFile =
+ fun() -> {ok,_} = file:read_file_info(DevName) end,
+ spawn_link(CheckFile),
+ WResult = write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName),
+ {Synced1,SResult} =
+ if not FileSync ->
+ {false,PrevSyncResult};
+ true ->
+ case sync_dev(Fd, DevName,
+ PrevSyncResult, HandlerName) of
+ ok -> {true,ok};
+ Error -> {false,Error}
+ end
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, Synced1,
+ WResult, SResult, HandlerName);
+ true ->
+ _ = io:put_chars(Fd, Bin),
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, false,
+ ok, PrevSyncResult, HandlerName)
+ end;
+
+ filesync when not Synced ->
+ Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName),
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, Result, HandlerName);
+
+ filesync ->
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, PrevSyncResult, HandlerName);
+
+ {{filesync,From},MRef} ->
+ Result = if not Synced ->
+ sync_dev(Fd, DevName, PrevSyncResult, HandlerName);
+ true ->
+ ok
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, Result, HandlerName);
+
+ stop ->
+ _ = close_log_file(Fd),
+ stopped
+ end.
+
+write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) ->
+ case ?file_write(Fd, Bin) of
+ ok ->
+ ok;
+ PrevWriteResult ->
+ %% don't report same error twice
+ PrevWriteResult;
+ Error ->
+ logger_h_common:error_notify({HandlerName,write,FileName,Error}),
+ Error
+ end.
+
+sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
+ case ?file_datasync(Fd) of
+ ok ->
+ ok;
+ PrevSyncResult ->
+ %% don't report same error twice
+ PrevSyncResult;
+ Error ->
+ logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
+ Error
+ end.
+
diff --git a/lib/kernel/src/logger_sup.erl b/lib/kernel/src/logger_sup.erl
new file mode 100644
index 0000000000..4e4de94d5c
--- /dev/null
+++ b/lib/kernel/src/logger_sup.erl
@@ -0,0 +1,53 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Supervisor callbacks
+-export([init/1]).
+
+-define(SERVER, ?MODULE).
+
+%%%===================================================================
+%%% API functions
+%%%===================================================================
+
+start_link() ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+%%%===================================================================
+%%% Supervisor callbacks
+%%%===================================================================
+
+init([]) ->
+
+ SupFlags = #{strategy => one_for_one,
+ intensity => 1,
+ period => 5},
+
+ {ok, {SupFlags, []}}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile
index 03b6355056..8599a3d814 100644
--- a/lib/kernel/test/Makefile
+++ b/lib/kernel/test/Makefile
@@ -70,6 +70,15 @@ MODULES= \
interactive_shell_SUITE \
init_SUITE \
kernel_config_SUITE \
+ logger_SUITE \
+ logger_bench_SUITE \
+ logger_disk_log_h_SUITE \
+ logger_env_var_SUITE \
+ logger_filters_SUITE \
+ logger_formatter_SUITE \
+ logger_legacy_SUITE \
+ logger_simple_SUITE \
+ logger_std_h_SUITE \
os_SUITE \
pg2_SUITE \
seq_trace_SUITE \
@@ -102,7 +111,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR))
INSTALL_PROGS= $(TARGET_FILES)
EMAKEFILE=Emakefile
-COVERFILE=kernel.cover
+COVERFILE=kernel.cover logger.cover
# ----------------------------------------------------
# Release directory specification
@@ -149,7 +158,8 @@ release_tests_spec: make_emakefile
$(INSTALL_DIR) "$(RELSYSDIR)"
$(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)"
$(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)"
- $(INSTALL_DATA) kernel.spec kernel_smoke.spec kernel_bench.spec \
+ $(INSTALL_DATA) \
+ kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \
$(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)"
chmod -R u+w "$(RELSYSDIR)"
@tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -)
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index 866043cfb4..c00fb44c46 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -1568,7 +1568,8 @@ loop5606(Pid) ->
%% Tests get_env/* functions.
get_env(Conf) when is_list(Conf) ->
- {ok, _} = application:get_env(kernel, error_logger),
+ ok = application:set_env(kernel, new_var, new_val),
+ {ok, new_val} = application:get_env(kernel, new_var),
undefined = application:get_env(undefined_app, a),
undefined = application:get_env(kernel, error_logger_xyz),
default = application:get_env(kernel, error_logger_xyz, default),
diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl
index 2d26a7246c..6c4526d0cf 100644
--- a/lib/kernel/test/error_logger_SUITE.erl
+++ b/lib/kernel/test/error_logger_SUITE.erl
@@ -32,7 +32,8 @@
init_per_group/2,end_per_group/2,
off_heap/1,
error_report/1, info_report/1, error/1, info/1,
- emulator/1, tty/1, logfile/1, add/1, delete/1]).
+ emulator/1, via_logger_process/1, other_node/1,
+ tty/1, logfile/1, add/1, delete/1]).
-export([generate_error/2]).
@@ -46,16 +47,19 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [off_heap, error_report, info_report, error, info, emulator, tty,
- logfile, add, delete].
+ [off_heap, error_report, info_report, error, info, emulator,
+ via_logger_process, other_node, tty, logfile, add, delete].
groups() ->
[].
init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>log}),
Config.
end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
ok.
init_per_group(_GroupName, Config) ->
@@ -226,6 +230,40 @@ generate_error(Error, Stack) ->
erlang:raise(error, Error, Stack).
%%-----------------------------------------------------------------
+
+via_logger_process(Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skip,"Skip on windows - cant change file mode"};
+ _ ->
+ error_logger:add_report_handler(?MODULE, self()),
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ Msg = "File operation error: eacces. Target: " ++
+ Dir ++ ". Function: list_dir. ",
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ ok = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ reported(error_report, std_error, Msg),
+ my_yes = error_logger:delete_report_handler(?MODULE),
+ ok
+ end.
+
+%%-----------------------------------------------------------------
+
+other_node(_Config) ->
+ error_logger:add_report_handler(?MODULE, self()),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ ok = rpc:call(Node,logger,add_handler,[error_logger,error_logger,
+ #{level=>info,filter_default=>log}]),
+ rpc:call(Node,error_logger,error_report,[hi_from_remote]),
+ reported(error_report,std_error,hi_from_remote),
+ test_server:stop_node(Node),
+ ok.
+
+
+%%-----------------------------------------------------------------
%% We don't enables or disables tty error logging here. We do not
%% want to interact with the test run.
%%-----------------------------------------------------------------
@@ -279,7 +317,7 @@ reported(Tag, Type, Report) ->
test_server:messages_get(),
ok
after 1000 ->
- ct:fail(no_report_received)
+ ct:fail({no_report_received,test_server:messages_get()})
end.
%%-----------------------------------------------------------------
diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl
index c8415b34e5..6a006cdc01 100644
--- a/lib/kernel/test/init_SUITE.erl
+++ b/lib/kernel/test/init_SUITE.erl
@@ -299,7 +299,7 @@ many_restarts() ->
many_restarts(Config) when is_list(Config) ->
{ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC),
- loop_restart(50,Node,rpc:call(Node,erlang,whereis,[error_logger])),
+ loop_restart(50,Node,rpc:call(Node,erlang,whereis,[logger])),
loose_node:stop(Node),
ok.
@@ -316,13 +316,13 @@ loop_restart(N,Node,EHPid) ->
ct:fail(not_stopping)
end,
ok = wait_for(30, Node, EHPid),
- loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[error_logger])).
+ loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])).
wait_for(0,Node,_) ->
loose_node:stop(Node),
error;
wait_for(N,Node,EHPid) ->
- case rpc:call(Node, erlang, whereis, [error_logger]) of
+ case rpc:call(Node, erlang, whereis, [logger]) of
Pid when is_pid(Pid), Pid =/= EHPid ->
%% erlang:display(ok),
ok;
diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec
index 62afc9f97b..86d2155828 100644
--- a/lib/kernel/test/kernel.spec
+++ b/lib/kernel/test/kernel.spec
@@ -2,3 +2,4 @@
{config, "../test_server/ts.unix.config"}.
{suites,"../kernel_test", all}.
+{skip_suites,"../kernel_test",[logger_bench_SUITE],"Not ready"}.
diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover
new file mode 100644
index 0000000000..b30bcfe920
--- /dev/null
+++ b/lib/kernel/test/logger.cover
@@ -0,0 +1,14 @@
+%% -*- erlang -*-
+{incl_mods,[error_logger,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_h_common,
+ logger_filters,
+ logger_formatter,
+ logger_server,
+ logger_simple,
+ logger_std_h,
+ logger_sup]}.
+
diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec
new file mode 100644
index 0000000000..cd76a754a4
--- /dev/null
+++ b/lib/kernel/test/logger.spec
@@ -0,0 +1,11 @@
+%% -*-erlang-*-
+{suites,"../kernel_test", [error_logger_SUITE,
+ error_logger_warn_SUITE,
+ logger_SUITE,
+ logger_disk_log_h_SUITE,
+ logger_env_var_SUITE,
+ logger_filters_SUITE,
+ logger_formatter_SUITE,
+ logger_legacy_SUITE,
+ logger_simple_SUITE,
+ logger_std_h_SUITE]}.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
new file mode 100644
index 0000000000..0edce3e34c
--- /dev/null
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -0,0 +1,828 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ file=>?FILE, line=>?LINE-N}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ case logger:get_handler_config(logger_std_h) of
+ {ok,StdH} ->
+ ok = logger:remove_handler(logger_std_h),
+ [{logger_std_h,StdH}|Config];
+ _ ->
+ Config
+ end.
+
+end_per_suite(Config) ->
+ case ?config(logger_std_h,Config) of
+ {HMod,HConfig} ->
+ ok = logger:add_handler(logger_std_h,HMod,HConfig);
+ _ ->
+ ok
+ end.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ {ok,LC} = logger:get_logger_config(),
+ [{logger_config,LC}|Config].
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ add_remove_handler,
+ multiple_handlers,
+ add_remove_filter,
+ change_config,
+ set_formatter,
+ log_all_levels_api,
+ macros,
+ set_level,
+ set_level_module,
+ cache_level_module,
+ format_report,
+ filter_failed,
+ handler_failed,
+ config_sanity_check,
+ log_failed,
+ emulator,
+ via_logger_process,
+ other_node,
+ compare_levels,
+ process_metadata].
+
+start_stop(_Config) ->
+ S = whereis(logger),
+ true = is_pid(S),
+ ok.
+
+add_remove_handler(_Config) ->
+ register(callback_receiver,self()),
+ {ok,#{handlers:=Hs0}} = logger:get_logger_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ [add] = test_server:messages_get(),
+ {ok,#{handlers:=Hs}} = logger:get_logger_config(),
+ [h1|Hs0] = Hs,
+ {ok,{?MODULE,#{level:=info,filters:=[],filter_default:=log}}} = % defaults
+ logger:get_handler_config(h1),
+ ok = logger:set_handler_config(h1,filter_default,stop),
+ [changing_config] = test_server:messages_get(),
+ ?LOG_INFO("hello",[]),
+ ok = check_no_log(),
+ ok = logger:set_handler_config(h1,filter_default,log),
+ [changing_config] = test_server:messages_get(),
+ {ok,{?MODULE,#{filter_default:=log}}} = logger:get_handler_config(h1),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(info,"hello",[],?MY_LOC(1)),
+ ok = logger:remove_handler(h1),
+ [remove] = test_server:messages_get(),
+ {ok,#{handlers:=Hs0}} = logger:get_logger_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+ logger:info("hello",[]),
+ ok = check_no_log(),
+ ok.
+
+add_remove_handler(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+multiple_handlers(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}),
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(info,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+ ok.
+
+multiple_handlers(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+add_remove_filter(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ LF = {fun(Log,_) -> Log#{level=>error} end, []},
+ ok = logger:add_logger_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_logger_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_logger_filter(lf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+
+ ok = logger:add_handler(h2,?MODULE,#{level=>info,filter_default=>log}),
+ HF = {fun(#{level:=error}=Log,_) ->
+ Log#{level=>mylevel};
+ (_,_) ->
+ ignore
+ end,
+ []},
+ ok = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_logger_filter(lf),
+ {error,{not_found,lf}} = logger:remove_logger_filter(lf),
+
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(info,"hello",[],?MY_LOC(1)),
+ ok = check_logged(info,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_handler_filter(h1,hf),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(info,"hello",[],?MY_LOC(1)),
+ ok = check_logged(info,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ok.
+
+add_remove_filter(cleanup,_Config) ->
+ logger:remove_logger_filter(lf),
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+change_config(_Config) ->
+ %% Overwrite handler config - check that defaults are added
+ ok = logger:add_handler(h1,?MODULE,#{level=>debug,custom=>custom}),
+ {ok,{?MODULE,#{level:=debug,filter_default:=log,custom:=custom}}} =
+ logger:get_handler_config(h1),
+ register(callback_receiver,self()),
+ ok = logger:set_handler_config(h1,#{filter_default=>stop}),
+ [changing_config] = test_server:messages_get(),
+ {ok,{?MODULE,#{level:=info,filter_default:=stop}=C2}} =
+ logger:get_handler_config(h1),
+ false = maps:is_key(custom,C2),
+ {error,fail} = logger:set_handler_config(h1,#{fail=>true}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(
+ h1,#{call=>fun() -> logger:set_module_level(?MODULE,debug) end}),
+ {ok,{?MODULE,C2}} = logger:get_handler_config(h1),
+
+ %% Change one key only
+ {error,fail} = logger:set_handler_config(h1,fail,true),
+ ok = logger:set_handler_config(h1,custom,custom),
+ [changing_config] = test_server:messages_get(),
+ {ok,{?MODULE,#{custom:=custom}=C3}} = logger:get_handler_config(h1),
+ C2 = maps:remove(custom,C3),
+
+ %% Overwrite logger config - check that defaults are added
+ {ok,LConfig} = logger:get_logger_config(),
+ ok = logger:set_logger_config(#{filter_default=>stop}),
+ {ok,#{level:=info,filters:=[],handlers:=[],filter_default:=stop}=LC1} =
+ logger:get_logger_config(),
+ 4 = maps:size(LC1),
+
+ %% Change one key only
+ ok = logger:set_logger_config(handlers,[h1]),
+ {ok,#{level:=info,filters:=[],handlers:=[h1],filter_default:=stop}} =
+ logger:get_logger_config(),
+
+ %% Cleanup
+ ok = logger:set_logger_config(LConfig),
+ [] = test_server:messages_get(),
+
+ ok.
+
+change_config(cleanup,Config) ->
+ logger:remove_handler(h1),
+ LC = ?config(logger_config,Config),
+ logger:set_logger_config(LC),
+ ok.
+
+set_formatter(_Config) ->
+ {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ logger:info("hello",[]),
+ receive
+ {_Log,#{formatter:={?MODULE,[]}}} ->
+ ok
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end,
+ ok.
+
+set_formatter(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_all_levels_api(_Config) ->
+ ok = logger:set_logger_config(level,debug),
+ ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}),
+ test_api(emergency),
+ test_api(alert),
+ test_api(critical),
+ test_api(error),
+ test_api(warning),
+ test_api(notice),
+ test_api(info),
+ test_api(debug),
+ test_log_function(emergency),
+ ok.
+
+log_all_levels_api(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_logger_config(level,info),
+ ok.
+
+macros(_Config) ->
+ ok = logger:set_module_level(?MODULE,debug),
+ ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}),
+ test_macros(emergency),
+ ok.
+
+macros(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:reset_module_level(?MODULE),
+ ok.
+
+set_level(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}),
+ logger:debug(?map_rep),
+ ok = check_no_log(),
+ logger:info(M1=?map_rep),
+ ok = check_logged(info,M1,#{}),
+ ok = logger:set_logger_config(level,debug),
+ logger:debug(M2=?map_rep),
+ ok = check_logged(debug,M2,#{}),
+ ok.
+
+set_level(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_logger_config(level,info),
+ ok.
+
+set_level_module(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad),
+ {error,{not_a_module,{bad}}} = logger:set_module_level({bad},warning),
+ ok = logger:set_module_level(?MODULE,warning),
+ logger:info(?map_rep,?MY_LOC(0)),
+ ok = check_no_log(),
+ logger:warning(M1=?map_rep,?MY_LOC(0)),
+ ok = check_logged(warning,M1,?MY_LOC(1)),
+ ok = logger:set_module_level(?MODULE,info),
+ logger:info(M2=?map_rep,?MY_LOC(0)),
+ ok = check_logged(info,M2,?MY_LOC(1)),
+
+ {error,{not_a_module,{bad}}} = logger:reset_module_level({bad}),
+ ok = logger:reset_module_level(?MODULE),
+
+ ok.
+
+set_level_module(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:reset_module_level(?MODULE),
+ ok.
+
+cache_level_module(_Config) ->
+ ok = logger:reset_module_level(?MODULE),
+ [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
+ ?LOG_INFO(?map_rep),
+ %% Caching is done asynchronously, so wait a bit for the update
+ timer:sleep(100),
+ [_] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
+ ok = logger:reset_module_level(?MODULE),
+ [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
+ ok.
+
+cache_level_module(cleanup,_Config) ->
+ logger:reset_module_level(?MODULE),
+ ok.
+
+format_report(_Config) ->
+ {"~ts",["string"]} = logger:format_report("string"),
+ {"~tp",[term]} = logger:format_report(term),
+ {"~tp",[[]]} = logger:format_report([]),
+ {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]),
+ KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}],
+ KeyValRes =
+ {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp",
+ [key1,value1,key2,"value2",key3,[]]} =
+ logger:format_report(KeyVals),
+ KeyValRes = logger:format_report(maps:from_list(KeyVals)),
+ KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}),
+ {" ~tp: ~tp\n ~tp: ~tp",
+ [label,{?MODULE,test},report,KeyVals]} =
+ logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,term]} =
+ logger:format_report([{key1,value1},term]),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,[]]} =
+ logger:format_report([{key1,value1},[]]),
+
+ {"~tp",[[]]} = logger:format_report([[],[],[]]),
+
+ ok.
+
+filter_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+
+ %% Logger filters
+ {error,{invalid_filter,_}} =
+ logger:add_logger_filter(lf,{fun(_) -> ok end,args}),
+ ok = logger:add_logger_filter(lf,{fun(_,_) -> a=b end,args}),
+ {ok,#{filters:=[_]}} = logger:get_logger_config(),
+ ok = logger:info(M1=?map_rep),
+ ok = check_logged(info,M1,#{}),
+ {error,{not_found,lf}} = logger:remove_logger_filter(lf),
+
+ ok = logger:add_logger_filter(lf,{fun(_,_) -> faulty_return end,args}),
+ {ok,#{filters:=[_]}} = logger:get_logger_config(),
+ ok = logger:info(M2=?map_rep),
+ ok = check_logged(info,M2,#{}),
+ {error,{not_found,lf}} = logger:remove_logger_filter(lf),
+
+ %% Handler filters
+ {error,{not_found,h0}} =
+ logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}),
+ {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf),
+ {error,{invalid_filter,_}} =
+ logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}),
+ ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> a=b end,args}),
+ {ok,{?MODULE,#{filters:=[_]}}} = logger:get_handler_config(h1),
+ ok = logger:info(M3=?map_rep),
+ ok = check_logged(info,M3,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}),
+ {ok,{?MODULE,#{filters:=[_]}}} = logger:get_handler_config(h1),
+ ok = logger:info(M4=?map_rep),
+ ok = check_logged(info,M4,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok.
+
+filter_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+handler_failed(_Config) ->
+ {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}),
+ {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}),
+ {error,{invalid_handler_config,bad}} = logger:add_handler(h1,?MODULE,bad),
+ {error,{invalid_filters,false}} =
+ logger:add_handler(h1,?MODULE,#{filters=>false}),
+ {error,{invalid_filter_default,true}} =
+ logger:add_handler(h1,?MODULE,#{filter_default=>true}),
+ {error,{invalid_formatter,[]}} =
+ logger:add_handler(h1,?MODULE,#{formatter=>[]}),
+ ok = logger:add_handler(h1,nomodule,#{filter_default=>log}),
+ logger:info(?map_rep),
+ check_no_log(),
+ #{logger:=#{handlers:=Ids1},
+ handlers:=H1} = logger:i(),
+ false = lists:member(h1,Ids1),
+ false = lists:keymember(h1,1,H1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+
+ ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,crash=>true}),
+ {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}),
+
+ logger:info(?map_rep),
+ check_no_log(),
+ #{logger:=#{handlers:=Ids2},
+ handlers:=H2} = logger:i(),
+ false = lists:member(h2,Ids2),
+ false = lists:keymember(h2,1,H2),
+ {error,{not_found,h2}} = logger:remove_handler(h2),
+
+ ok.
+
+handler_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+config_sanity_check(_Config) ->
+ %% Logger config
+ {error,{invalid_filter_default,bad}} =
+ logger:set_logger_config(filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_logger_config(level,bad),
+ {error,{invalid_handlers,bad}} = logger:set_logger_config(handlers,bad),
+ {error,{invalid_id,{bad,bad}}} =
+ logger:set_logger_config(handlers,[{bad,bad}]),
+ {error,{invalid_id,"bad"}} = logger:set_logger_config(handlers,["bad"]),
+ {error,{invalid_filters,bad}} = logger:set_logger_config(filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_logger_config(filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_logger_config(filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_logger_config(filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_logger_config(filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_logger_config,{bad,bad}}} =
+ logger:set_logger_config(bad,bad),
+
+ %% Handler config
+ {error,{not_found,h1}} = logger:set_handler_config(h1,a,b),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{invalid_filter_default,bad}} =
+ logger:set_handler_config(h1,filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad),
+ {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_handler_config(h1,filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,bad),
+ {error,{invalid_module,{bad}}} =
+ logger:set_handler_config(h1,formatter,{{bad},cfg}),
+ {error,{invalid_formatter_config,bad}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,bad}),
+ {error,{invalid_formatter_config,{bad,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}),
+ {error,{invalid_formatter_config,{template,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>bad}}),
+ {error,{invalid_formatter_template,[1]}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[1]}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[]}}),
+ {error,{invalid_formatter_config,{single_line,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>true}}),
+ {error,{invalid_formatter_config,{legacy_header,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>true}}),
+ ok = logger:set_handler_config(h1,custom,custom),
+ ok.
+
+config_sanity_check(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ {error,function_clause} = ?TRY(logger:log(bad,?map_rep)),
+ {error,function_clause} = ?TRY(logger:log(info,?map_rep,bad)),
+ {error,function_clause} = ?TRY(logger:log(info,fun() -> ?map_rep end,bad)),
+ {error,function_clause} = ?TRY(logger:log(info,fun() -> ?map_rep end,bad,#{})),
+ {error,function_clause} = ?TRY(logger:log(info,bad,bad,bad)),
+ {error,function_clause} = ?TRY(logger:log(info,bad,bad,#{})),
+ check_no_log(),
+ ok = logger:log(info,M1=?str,#{}),
+ check_logged(info,M1,#{}),
+ ok = logger:log(info,M2=?map_rep,#{}),
+ check_logged(info,M2,#{}),
+ ok = logger:log(info,M3=?keyval_rep,#{}),
+ check_logged(info,M3,#{}),
+
+ %% Should we check report input more thoroughly?
+ ok = logger:log(info,M4=?keyval_rep++[other,stuff,in,list],#{}),
+ check_logged(info,M4,#{}),
+
+ %% This might break a handler since it is assumed to be a format
+ %% string and args, so it depends how the handler protects itself
+ %% against something like io_lib:format("ok","ok")
+ ok = logger:log(info,"ok","ok",#{}),
+ check_logged(info,"ok","ok",#{}),
+
+ ok.
+
+log_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+emulator(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log,
+ tc_proc=>self()}),
+ Msg = "Error in process ~p on node ~p with exit value:~n~p~n",
+ Error = {badmatch,4},
+ Stack = [{module, function, 2, []}],
+ Pid = spawn(?MODULE, generate_error, [Error, Stack]),
+ check_logged(error, Msg, [Pid, node(), {Error, Stack}],
+ #{gl=>group_leader(),
+ error_logger=>#{tag=>error,emulator=>true}}),
+ ok.
+
+emulator(cleanup, _Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+generate_error(Error, Stack) ->
+ erlang:raise(error, Error, Stack).
+
+via_logger_process(Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log,
+ tc_proc=>self()}),
+
+ %% Explicitly send a message to the logger process
+ %% This is used by code_server, erl_prim_loader, init, prim_file, ...
+ Msg = ?str,
+ logger ! {log,error,Msg,[],#{}},
+ check_logged(error, Msg, [], #{}),
+
+ case os:type() of
+ {win32,_} ->
+ %% Skip this part on windows - cant change file mode"
+ ok;
+ _ ->
+ %% This should trigger the same thing from erl_prim_loader
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ check_logged(error,
+ #{report=>"File operation error: eacces. Target: " ++
+ Dir ++". Function: list_dir. "},
+ #{pid=>self(),
+ gl=>group_leader(),
+ error_logger=>#{tag=>error_report,
+ type=>std_error}}),
+ ok
+ end.
+
+via_logger_process(cleanup, Config) ->
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ _ = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ logger:remove_handler(h1),
+ ok.
+
+other_node(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log,
+ tc_proc=>self()}),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ rpc:call(Node,logger,error,[Msg=?str,#{}]),
+ check_logged(error,Msg,#{}),
+ ok.
+
+other_node(cleanup,_Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes],
+ logger:remove_handler(h1),
+ ok.
+
+compare_levels(_Config) ->
+ Levels = [emergency,alert,critical,error,warning,notice,info,debug],
+ ok = compare(Levels),
+ {error,badarg} = ?TRY(logger:compare_levels(bad,bad)),
+ {error,badarg} = ?TRY(logger:compare_levels({bad},info)),
+ {error,badarg} = ?TRY(logger:compare_levels(info,"bad")),
+ ok.
+
+compare([L|Rest]) ->
+ eq = logger:compare_levels(L,L),
+ [gt = logger:compare_levels(L,L1) || L1 <- Rest],
+ [lt = logger:compare_levels(L1,L) || L1 <- Rest],
+ compare(Rest);
+compare([]) ->
+ ok.
+
+process_metadata(_Config) ->
+ undefined = logger:get_process_metadata(),
+ {error,badarg} = ?TRY(logger:set_process_metadata(bad)),
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ Time = erlang:monotonic_time(microsecond),
+ ProcMeta = #{time=>Time,line=>0,custom=>proc},
+ ok = logger:set_process_metadata(ProcMeta),
+ S1 = ?str,
+ ?LOG_INFO(S1,#{custom=>macro}),
+ check_logged(info,S1,#{time=>Time,line=>0,custom=>macro}),
+
+ Time2 = erlang:monotonic_time(microsecond),
+ S2 = ?str,
+ ?LOG_INFO(S2,#{time=>Time2,line=>1,custom=>macro}),
+ check_logged(info,S2,#{time=>Time2,line=>1,custom=>macro}),
+
+ logger:info(S3=?str,#{custom=>func}),
+ check_logged(info,S3,#{time=>Time,line=>0,custom=>func}),
+
+ ProcMeta = logger:get_process_metadata(),
+ ok = logger:unset_process_metadata(),
+ undefined = logger:get_process_metadata(),
+
+ ok.
+
+process_metadata(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+check_logged(Level,Format,Args,Meta) ->
+ do_check_logged(Level,{Format,Args},Meta).
+
+check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) ->
+ do_check_logged(Level,{report,Msg},Meta);
+check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) ->
+ do_check_logged(Level,{string,Msg},Meta).
+
+do_check_logged(Level,Msg0,Meta0) ->
+ receive
+ {#{level:=Level,msg:=Msg,meta:=Meta},_} ->
+ check_msg(Msg0,Msg),
+ check_maps(Meta0,Meta,meta)
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end.
+
+check_no_log() ->
+ receive
+ X -> ct:fail({got_unexpected_log,X})
+ after 500 ->
+ ok
+ end.
+
+check_msg(Msg,Msg) ->
+ ok;
+check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) ->
+ check_maps(Expected,Got,msg);
+check_msg(Expected,Got) ->
+ ct:fail({unexpected,msg,Expected,Got}).
+
+check_maps(Expected,Got,What) ->
+ case maps:merge(Got,Expected) of
+ Got ->
+ ok;
+ _ ->
+ ct:fail({unexpected,What,Expected,Got})
+ end.
+
+%% Handler
+adding_handler(_Id,Config) ->
+ maybe_send(add),
+ {ok,Config}.
+removing_handler(_Id) ->
+ maybe_send(remove),
+ ok.
+changing_config(_Id,_Old,#{call:=Fun}) ->
+ Fun();
+changing_config(_Id,_Old,#{fail:=true}) ->
+ {error,fail};
+changing_config(_Id,_Old,Config) ->
+ maybe_send(changing_config),
+ {ok,Config}.
+
+maybe_send(Msg) ->
+ case whereis(callback_receiver) of
+ undefined -> ok;
+ Pid -> Pid ! Msg
+ end.
+
+log(_Log,#{crash:=true}) ->
+ a=b;
+log(Log,Config) ->
+ TcProc = maps:get(tc_proc,Config,self()),
+ TcProc ! {Log,Config},
+ ok.
+
+test_api(Level) ->
+ logger:Level(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:Level(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:Level("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_log_function(Level) ->
+ logger:log(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:log(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:log(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_macros(emergency=Level) ->
+ ?LOG_EMERGENCY(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG_EMERGENCY(F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG_EMERGENCY(F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
diff --git a/lib/kernel/test/logger_bench_SUITE.erl b/lib/kernel/test/logger_bench_SUITE.erl
new file mode 100644
index 0000000000..d47122ea9d
--- /dev/null
+++ b/lib/kernel/test/logger_bench_SUITE.erl
@@ -0,0 +1,500 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_bench_SUITE).
+
+-compile(export_all).
+
+%%%-----------------------------------------------------------------
+%%% To include lager tests, add paths to lager and goldrush
+%%% (goldrush is a dependency inside the lager repo)
+%%%
+%%% To print data to .csv files, add the following to a config file:
+%%% {print_csv,[{console_handler,[{path,"/some/dir/"}]}]}.
+%%%
+%%%-----------------------------------------------------------------
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(msg,lists:flatten(string:pad("Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE),
+ 80,trailing,$*))).
+-define(meta,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ pid=>self()}).
+
+-define(NO_COMPARE,[profile]).
+
+-define(TIMES,100000).
+
+suite() ->
+ [{timetrap,{seconds,120}}].
+
+init_per_suite(Config) ->
+ DataDir = ?config(data_dir,Config),
+ have_lager() andalso make(DataDir),
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(Group, Config) ->
+ H = remove_all_handlers(),
+ do_init_per_group(Group),
+ [{handlers,H}|Config].
+
+do_init_per_group(minimal_handler) ->
+ ok = logger:add_handler(?MODULE,?MODULE,#{level=>error,filter_default=>log});
+do_init_per_group(console_handler) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS,
+ logger_std_h=>#{type=>standard_io,
+ toggle_sync_qlen => ?TIMES+1,
+ drop_new_reqs_qlen => ?TIMES+2,
+ flush_reqs_qlen => ?TIMES+3,
+ enable_burst_limit => false}}),
+ have_lager() andalso lager_helper:start(),
+ ok.
+
+end_per_group(Group, Config) ->
+ case ?config(saved_config,Config) of
+ {_,[{bench,Bench}]} ->
+ print_compare_chart(Group,Bench);
+ _ ->
+ ok
+ end,
+ add_all_handlers(?config(handlers,Config)),
+ do_end_per_group(Group).
+
+do_end_per_group(minimal_handler) ->
+ ok = logger:remove_handler(?MODULE);
+do_end_per_group(console_handler) ->
+ ok = logger:remove_handler(?MODULE),
+ have_lager() andalso lager_helper:stop(),
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ wait_for_handlers(),
+ ok.
+
+wait_for_handlers() ->
+ wait_for_handler(?MODULE),
+ wait_for_handler(lager_event).
+
+wait_for_handler(Handler) ->
+ case whereis(Handler) of
+ undefined ->
+ io:format("~p: noproc1",[Handler]),
+ ok;
+ Pid ->
+ case process_info(Pid,message_queue_len) of
+ {_,0} ->
+ io:format("~p: queue=~p",[Handler,0]),
+ ok;
+ {_,Q} ->
+ io:format("~p: queue=~p",[Handler,Q]),
+ timer:sleep(2000),
+ wait_for_handler(Handler);
+ undefined ->
+ io:format("~p: noproc2",[Handler]),
+ ok
+ end
+ end.
+
+groups() ->
+ [{minimal_handler,[],[log,
+ log_drop,
+ log_drop_by_handler,
+ macro,
+ macro_drop,
+ macro_drop_by_handler,
+ error_logger,
+ error_logger_drop,
+ error_logger_drop_by_handler
+ ]},
+ {console_handler,[],[%profile,
+ log,
+ log_drop,
+ log_drop_by_handler,
+ %% log_handler_complete,
+ macro,
+ macro_drop,
+ macro_drop_by_handler,
+ %% macro_handler_complete,
+ error_logger,
+ error_logger_drop,
+ error_logger_drop_by_handler%% ,
+ %% error_logger_handler_complete
+ ] ++ lager_cases()}
+ ].
+
+lager_cases() ->
+ case have_lager() of
+ true ->
+ [lager_log,
+ lager_log_drop,
+ lager_log_drop_by_handler,
+ %% lager_log_handler_complete,
+ lager_parsetrans,
+ lager_parsetrans_drop,
+ lager_parsetrans_drop_by_handler%% ,
+ %% lager_parsetrans_handler_complete
+ ];
+ false ->
+ []
+ end.
+
+
+all() ->
+ [{group,minimal_handler},
+ {group,console_handler}
+ ].
+
+log(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [error,?msg], Times).
+
+log_drop(Config) ->
+ Times = ?TIMES*100,
+ ok = logger:set_logger_config(level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times).
+
+log_drop(cleanup,_Config) ->
+ ok = logger:set_logger_config(level,info).
+
+log_drop_by_handler(Config) ->
+ Times = ?TIMES,
+ %% just ensure correct levels
+ ok = logger:set_logger_config(level,info),
+ ok = logger:set_handler_config(?MODULE,level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times).
+
+log_handler_complete(Config) ->
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?MODULE,?DEFAULT_FORMAT_CONFIG}),
+ handler_complete(Config, ?FUNCTION_NAME, fun do_log_func/2, [error,?msg]).
+
+log_handler_complete(cleanup,_Config) ->
+ ok=logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}).
+
+macro(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2,[error,?msg], Times).
+
+macro_drop(Config) ->
+ Times = ?TIMES*100,
+ ok = logger:set_logger_config(level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2,[info,?msg], Times).
+
+macro_drop(cleanup,_Config) ->
+ ok = logger:set_logger_config(level,info).
+
+macro_drop_by_handler(Config) ->
+ Times = ?TIMES,
+ %% just ensure correct levels
+ ok = logger:set_logger_config(level,info),
+ ok = logger:set_handler_config(?MODULE,level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2, [info,?msg], Times).
+
+macro_handler_complete(Config) ->
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?MODULE,?DEFAULT_FORMAT_CONFIG}),
+ handler_complete(Config, ?FUNCTION_NAME, fun do_log_macro/2, [error,?msg]).
+
+macro_handler_complete(cleanup,_Config) ->
+ ok=logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}).
+
+error_logger(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun do_error_logger/2, [error,?msg], Times).
+
+error_logger_drop(Config) ->
+ Times = ?TIMES*100,
+ ok = logger:set_logger_config(level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_error_logger/2, [info,?msg], Times).
+
+error_logger_drop(cleanup,_Config) ->
+ ok = logger:set_logger_config(level,info).
+
+error_logger_drop_by_handler(Config) ->
+ Times = ?TIMES,
+ %% just ensure correct levels
+ ok = logger:set_logger_config(level,info),
+ ok = logger:set_handler_config(?MODULE,level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times).
+
+error_logger_handler_complete(Config) ->
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?MODULE,?DEFAULT_FORMAT_CONFIG}),
+ handler_complete(Config, ?FUNCTION_NAME, fun do_error_logger/2, [error,?msg]).
+
+error_logger_handler_complete(cleanup,_Config) ->
+ ok=logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}).
+
+lager_log(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [error,?msg], Times).
+
+lager_log_drop(Config) ->
+ Times = ?TIMES*100,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [info,?msg], Times).
+
+lager_log_drop_by_handler(Config) ->
+ %% This concept does not exist, so doing same as lager_log_drop/1
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [info,?msg], Times).
+
+lager_log_handler_complete(Config) ->
+ handler_complete(Config, ?FUNCTION_NAME, fun lager_helper:do_func/2, [error,?msg]).
+
+lager_parsetrans(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [error,?msg], Times).
+
+lager_parsetrans_drop(Config) ->
+ Times = ?TIMES*100,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [info,?msg], Times).
+
+lager_parsetrans_drop_by_handler(Config) ->
+ %% This concept does not exist, so doing same as lager_parsetrans_drop/1
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [info,?msg], Times).
+
+lager_parsetrans_handler_complete(Config) ->
+ handler_complete(Config, ?FUNCTION_NAME, fun lager_helper:do_parsetrans/2, [error,?msg]).
+
+
+profile(Config) ->
+ Times = ?TIMES,
+ %% fprof:apply(fun repeated_apply/3,[fun lager_helper:do_func/2,[error,?msg],Times]),
+ fprof:apply(fun repeated_apply/3,[fun do_log_func/2,[error,?msg],Times]),
+ ok = fprof:profile(),
+ ok = fprof:analyse(dest,"../fprof.analyse"),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+%% Handler
+log(_Log,_Config) ->
+ ok.
+
+format(Log=#{meta:=#{pid:=Pid}},Config) when is_pid(Pid) ->
+ String = ?DEFAULT_FORMATTER:format(Log,Config),
+ Pid ! done,
+ String;
+format(Log=#{meta:=#{pid:=PidStr}},Config) when is_list(PidStr) ->
+ String = ?DEFAULT_FORMATTER:format(Log,Config),
+ list_to_pid(PidStr) ! done,
+ String.
+
+handler_complete(Config, TC, Fun, Args) ->
+ Times = ?TIMES,
+ Start = os:perf_counter(microsecond),
+ repeated_apply(Fun, Args, Times),
+ MSecs = wait_for_done(Start,Times),
+ calc_and_report(Config,TC,MSecs,Times).
+
+wait_for_done(Start,0) ->
+ os:perf_counter(microsecond) - Start;
+wait_for_done(Start,N) ->
+ receive
+ done ->
+ wait_for_done(Start,N-1)
+ after 20000 ->
+ ct:fail("missing " ++ integer_to_list(N) ++ " replys")
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Benchmark stuff
+run_benchmark(Config,Tag,Fun,Args,Times) ->
+ _ = erlang:apply(Fun, Args), % apply once to ensure level is cached
+ MSecs = measure_repeated_op(Fun, Args, Times),
+ %% fprof:profile(),
+ %% fprof:analyse(dest,"../"++atom_to_list(Tag)++".prof"),
+ calc_and_report(Config,Tag,MSecs,Times).
+
+measure_repeated_op(Fun, Args, Times) ->
+ Start = os:perf_counter(microsecond),
+ %% fprof:apply(fun repeated_apply/3, [Fun, Args, Times]),
+ repeated_apply(Fun, Args, Times),
+ os:perf_counter(microsecond) - Start.
+
+repeated_apply(_F, _Args, Times) when Times =< 0 ->
+ ok;
+repeated_apply(F, Args, Times) ->
+ erlang:apply(F, Args),
+ repeated_apply(F, Args, Times - 1).
+
+calc_and_report(Config,Tag,MSecs,Times) ->
+ IOPS = trunc(Times * (1000000 / MSecs)),
+ ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }),
+ ct:print("~p:~n~p IOPS, ~p us", [Tag, IOPS, MSecs]),
+ ct:comment("~p IOPS, ~p us", [IOPS, MSecs]),
+ Bench = case ?config(saved_config,Config) of
+ {_,[{bench,B}]} -> B;
+ undefined -> []
+ end,
+ {save_config,[{bench,[{Tag,IOPS,MSecs}|Bench]}]}.
+
+remove_all_handlers() ->
+ #{handlers:=Hs} = logger:i(),
+ [logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Hs.
+
+add_all_handlers(Hs) ->
+ [logger:add_handler(Id,Mod,Config) || {Id,Mod,Config} <- Hs],
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Call logger in different ways
+do_log_func(Level,Msg) ->
+ logger:Level(Msg,[],?meta).
+
+do_log_macro(error,Msg) ->
+ ?LOG_ERROR(Msg,[]);
+do_log_macro(info,Msg) ->
+ ?LOG_INFO(Msg,[]);
+do_log_macro(debug,Msg) ->
+ ?LOG_DEBUG(Msg,[]).
+
+do_error_logger(error,Msg) ->
+ error_logger:error_msg(Msg,[]);
+do_error_logger(info,Msg) ->
+ error_logger:info_msg(Msg,[]).
+
+%%%-----------------------------------------------------------------
+%%%
+print_compare_chart(Group,Bench) ->
+ io:format("~-20s~12s~12s~12s~12s",
+ ["Microseconds:","Log","Drop","HDrop","Complete"]),
+ io:format(user,"~-20s~12s~12s~12s~12s~n",
+ ["Microseconds:","Log","Drop","HDrop","Complete"]),
+ {Log,Drop,HDrop,Comp} = sort_bench(Bench,[],[],[],[]),
+ print_compare_chart(Log,Drop,HDrop,Comp),
+ io:format(user,"~n",[]),
+ maybe_print_csv_files(Group,
+ [{log,Log},{drop,Drop},{hdrop,HDrop},{comp,Comp}]).
+
+print_compare_chart([{What,LIOPS,LMSecs}|Log],
+ [{What,DIOPS,DMSecs}|Drop],
+ [{What,HIOPS,HMSecs}|HDrop],
+ [{What,CIOPS,CMSecs}|Comp]) ->
+ io:format("~-20w~12w~12w~12w~12w",[What,LMSecs,DMSecs,HMSecs,CMSecs]),
+ io:format(user,"~-20w~12w~12w~12w~12w~n",[What,LMSecs,DMSecs,HMSecs,CMSecs]),
+ print_compare_chart(Log,Drop,HDrop,Comp);
+print_compare_chart([{What,LIOPS,LMSecs}|Log],
+ [{What,DIOPS,DMSecs}|Drop],
+ [{What,HIOPS,HMSecs}|HDrop],
+ []=Comp) ->
+ io:format("~-20w~12w~12w~12w",[What,LMSecs,DMSecs,HMSecs]),
+ io:format(user,"~-20w~12w~12w~12w~n",[What,LMSecs,DMSecs,HMSecs]),
+ print_compare_chart(Log,Drop,HDrop,Comp);
+print_compare_chart([],[],[],[]) ->
+ ok;
+print_compare_chart(Log,Drop,HDrop,Comp) ->
+ ct:fail({Log,Drop,HDrop,Comp}).
+
+sort_bench([{TC,IOPS,MSecs}|Bench],Log,Drop,HDrop,Comp) ->
+ case lists:member(TC,?NO_COMPARE) of
+ true ->
+ sort_bench(Bench,Log,Drop,HDrop,Comp);
+ false ->
+ TCStr = atom_to_list(TC),
+ {What,Type} =
+ case re:run(TCStr,"(.*)_(drop.*)",
+ [{capture,all_but_first,list}]) of
+ {match,[WhatStr,TypeStr]} ->
+ {list_to_atom(WhatStr),list_to_atom(TypeStr)};
+ nomatch ->
+ case re:run(TCStr,"(.*)_(handler_complete.*)",
+ [{capture,all_but_first,list}]) of
+ {match,[WhatStr,TypeStr]} ->
+ {list_to_atom(WhatStr),list_to_atom(TypeStr)};
+ nomatch ->
+ {TC,log}
+ end
+ end,
+ case Type of
+ log ->
+ sort_bench(Bench,[{What,IOPS,MSecs}|Log],Drop,HDrop,Comp);
+ drop ->
+ sort_bench(Bench,Log,[{What,IOPS,MSecs}|Drop],HDrop,Comp);
+ drop_by_handler ->
+ sort_bench(Bench,Log,Drop,[{What,IOPS,MSecs}|HDrop],Comp);
+ handler_complete ->
+ sort_bench(Bench,Log,Drop,HDrop,[{What,IOPS,MSecs}|Comp])
+ end
+ end;
+sort_bench([],Log,Drop,HDrop,Comp) ->
+ {lists:keysort(1,Log),
+ lists:keysort(1,Drop),
+ lists:keysort(1,HDrop),
+ lists:keysort(1,Comp)}.
+
+maybe_print_csv_files(Group,Data) ->
+ case ct:get_config({print_csv,Group}) of
+ undefined ->
+ ok;
+ Cfg ->
+ Path = proplists:get_value(path,Cfg,".."),
+ Files = [begin
+ File = filename:join(Path,F)++".csv",
+ case filelib:is_regular(File) of
+ true ->
+ {ok,Fd} = file:open(File,[append]),
+ Fd;
+ false ->
+ {ok,Fd} = file:open(File,[write]),
+ ok = file:write(Fd,
+ "error_logger,lager_log,"
+ "lager_parsetrans,logger_log,"
+ "logger_macro\n"),
+ Fd
+ end
+ end || {F,_} <- Data],
+ [print_csv_file(F,D) || {F,D} <- lists:zip(Files,Data)],
+ [file:close(Fd) || Fd <- Files],
+ ok
+ end.
+
+print_csv_file(Fd,{_,Data}) ->
+ AllIOPS = [integer_to_list(IOPS) || {_,IOPS,_} <- Data],
+ ok = file:write(Fd,lists:join(",",AllIOPS)++"\n").
+
+have_lager() ->
+ code:ensure_loaded(lager) == {module,lager}.
+
+make(Dir) ->
+ {ok,Cwd} = file:get_cwd(),
+ ok = file:set_cwd(Dir),
+ up_to_date = make:all([load]),
+ ok = file:set_cwd(Cwd),
+ code:add_path(Dir).
diff --git a/lib/kernel/test/logger_bench_SUITE_data/Emakefile b/lib/kernel/test/logger_bench_SUITE_data/Emakefile
new file mode 100644
index 0000000000..85c82bdaab
--- /dev/null
+++ b/lib/kernel/test/logger_bench_SUITE_data/Emakefile
@@ -0,0 +1 @@
+{['lager_helper'],[{outdir,"."},debug_info,{i,"/home/uabshan/Work/git/otp/lib/kernel/src"},{i,"/home/uabshan/Work/git/otp/lib/kernel/include"}]}.
diff --git a/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl b/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl
new file mode 100644
index 0000000000..296ced4276
--- /dev/null
+++ b/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl
@@ -0,0 +1,73 @@
+-module(lager_helper).
+
+-compile(export_all).
+-compile({parse_transform,lager_transform}).
+
+-include_lib("kernel/src/logger_internal.hrl").
+
+start() ->
+ application:load(lager),
+ application:set_env(lager, error_logger_redirect, false),
+ application:set_env(lager, async_threshold, 100010),
+ application:set_env(lager, async_threshold_window, 100),
+ application:set_env(lager,handlers,[{?MODULE,[{level,error}]}]),
+ lager:start().
+
+stop() ->
+ application:stop(lager).
+
+do_func(Level,Msg) ->
+ lager:log(Level,[{pid,self()}],Msg,[]).
+
+do_parsetrans(error,Msg) ->
+ lager:error(Msg,[]);
+do_parsetrans(info,Msg) ->
+ lager:info(Msg,[]).
+
+%%%-----------------------------------------------------------------
+%%% Dummy handler for lager
+-record(state, {level :: {'mask', integer()},
+ formatter :: atom(),
+ format_config :: any()}).
+init(Opts) ->
+ Level = proplists:get_value(level,Opts,info),
+ Formatter = proplists:get_value(formatter,Opts,logger_bench_SUITE),
+ FormatConfig = proplists:get_value(format_config,Opts,?DEFAULT_FORMAT_CONFIG),
+ {ok,#state{level=lager_util:config_to_mask(Level),
+ formatter=Formatter,
+ format_config=FormatConfig}}.
+
+handle_call(get_loglevel, #state{level=Level} = State) ->
+ {ok, Level, State};
+handle_call({set_loglevel, Level}, State) ->
+ try lager_util:config_to_mask(Level) of
+ Levels ->
+ {ok, ok, State#state{level=Levels}}
+ catch
+ _:_ ->
+ {ok, {error, bad_log_level}, State}
+ end;
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+handle_event({log, Message},
+ #state{level=L,formatter=Formatter,format_config=FormatConfig} = State) ->
+ case lager_util:is_loggable(Message, L, ?MODULE) of
+ true ->
+ Metadata =
+ case maps:from_list(lager_msg:metadata(Message)) of
+ Meta = #{pid:=Pid} when is_pid(Pid) ->
+ Meta;
+ Meta = #{pid:=PidStr} when is_list(PidStr) ->
+ Meta
+ end,
+ Log = #{level=>lager_msg:severity(Message),
+ msg=>{report,lager_msg:message(Message)},
+ meta=>Metadata},
+ io:put_chars(user, Formatter:format(Log,FormatConfig)),
+ {ok, State};
+ false ->
+ {ok, State}
+ end;
+handle_event(_Event, State) ->
+ {ok, State}.
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
new file mode 100644
index 0000000000..c7c6137380
--- /dev/null
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -0,0 +1,1417 @@
+-module(logger_disk_log_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log, [] = test_server:messages_get()).
+
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(log_no(File,N), lists:concat([File,".",N])).
+-define(domain,#{domain=>[?MODULE]}).
+
+-define(SYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500;
+ true -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ if ?TEST_HOOKS_TAB == undefined ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ true ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop_handler,
+ create_log,
+ open_existing_log,
+ disk_log_opts,
+ default_formatter,
+ logging,
+ errors,
+ formatter_fail,
+ config_fail,
+ bad_input,
+ info_and_reset,
+ reconfig,
+ disk_log_sync,
+ disk_log_full,
+ disk_log_wrap,
+ disk_log_events,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync,
+ op_switch_to_drop,
+ op_switch_to_flush,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ %% qlen_kill_std,
+ mem_kill_new,
+ %% mem_kill_std,
+ restart_after,
+ handler_requests_under_load
+ ].
+
+start_stop_handler(_Config) ->
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ true = is_pid(whereis(?MODULE)),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(?MODULE).
+start_stop_handler(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+create_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])),
+ LogFile1 = filename:join(PrivDir, Name1),
+ ok = start_and_add(Name1, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:info("hello", ?domain),
+ logger_disk_log_h:disk_log_sync(Name1),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000),
+
+ %% test second handler
+ Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])),
+ DLName = lists:concat([?FUNCTION_NAME,"_B_log"]),
+ LogFile2 = filename:join(PrivDir, DLName),
+ ok = start_and_add(Name2, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile2}),
+ logger:info("dummy", ?domain),
+ logger_disk_log_h:disk_log_sync(Name2),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+
+ remove_and_stop(Name1),
+ remove_and_stop(Name2),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+ ok.
+
+open_existing_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ HName = ?FUNCTION_NAME,
+ DLName = lists:concat([?FUNCTION_NAME,"_log"]),
+ LogFile1 = filename:join(PrivDir, DLName),
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:info("one", ?domain),
+ logger_disk_log_h:disk_log_sync(HName),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000),
+ logger:info("two", ?domain),
+ ok = remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000),
+
+ logger:info("two and a half", ?domain),
+
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:info("three", ?domain),
+ logger_disk_log_h:disk_log_sync(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000),
+ remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000).
+
+disk_log_opts(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ PrivDir = ?config(priv_dir,Config),
+ WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])),
+ WFile = lists:concat([?FUNCTION_NAME,"_W_log"]),
+ Size = length("12345"),
+ ConfigW = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ WFileFull = filename:join(PrivDir, WFile),
+ DLOptsW = #{file => WFileFull,
+ type => wrap,
+ max_no_bytes => Size,
+ max_no_files => 2},
+ ok = start_and_add(WName, ConfigW, DLOptsW),
+ WInfo1 = disk_log:info(WName),
+ ct:log("Fullname = ~s", [WFileFull]),
+ {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1),
+ Get(size,WInfo1),Get(current_file,WInfo1)},
+ logger:info("123", ?domain),
+ logger_disk_log_h:disk_log_sync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:info("45", ?domain),
+ logger_disk_log_h:disk_log_sync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:info("6", ?domain),
+ logger_disk_log_h:disk_log_sync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ logger:info("7890", ?domain),
+ logger_disk_log_h:disk_log_sync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])),
+ HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]),
+ ConfigH = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ HFile1Full = filename:join(PrivDir, HFile1),
+ DLOptsH1 = #{file => HFile1Full,
+ type => halt},
+ ok = start_and_add(HName1, ConfigH, DLOptsH1),
+ HInfo1 = disk_log:info(HName1),
+ ct:log("Fullname = ~s", [HFile1Full]),
+ {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1),
+ Get(size,HInfo1)},
+ logger:info("12345", ?domain),
+ logger_disk_log_h:disk_log_sync(HName1),
+ timer:sleep(500),
+ 1 = Get(no_written_items, disk_log:info(HName1)),
+
+ HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])),
+ HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]),
+ HFile2Full = filename:join(PrivDir, HFile2),
+ DLOptsH2 = DLOptsH1#{file => HFile2Full,
+ max_no_bytes => 1000},
+ ok = start_and_add(HName2, ConfigH, DLOptsH2),
+ HInfo3 = disk_log:info(HName2),
+ ct:log("Fullname = ~s", [HFile2Full]),
+ {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3),
+ Get(size,HInfo3)},
+
+ remove_and_stop(WName),
+ remove_and_stop(HName1),
+ remove_and_stop(HName2),
+ ok.
+
+default_formatter(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)),
+ HConfig = #{disk_log_opts => #{file=>LogFile},
+ filter_default=>log},
+ ct:pal("Log: ~p", [LogFile]),
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, HConfig),
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ LogName = lists:concat([LogFile, ".1"]),
+ logger:info("dummy"),
+ wait_until_written(LogName),
+ {ok,Bin} = file:read_file(LogName),
+ match = re:run(Bin, "=INFO REPORT====.*\ndummy", [{capture,none}]),
+ ok.
+default_formatter(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+logging(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile = filename:join(PrivDir, Name),
+ ok = start_and_add(Name, #{filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ #{file => LogFile}),
+ MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end,
+ logger:info([{x,y}], #{report_cb => MsgFormatter}),
+ logger:info([{x,y}], #{}),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]),
+ try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000).
+
+logging(cleanup, _Config) ->
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ remove_and_stop(Name).
+
+errors(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile1 = filename:join(PrivDir,Name1),
+ HConfig = #{disk_log_opts=>#{file=>LogFile1},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ ok = logger:add_handler(Name1, logger_disk_log_h, HConfig),
+ {error,{already_exist,Name1}} =
+ logger:add_handler(Name1, logger_disk_log_h, #{}),
+
+ %%! TODO:
+ %%! Check how bad log_opts are handled!
+
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(Name1,
+ disk_log_opts,
+ #{file=>LogFile1,
+ type=>halt}),
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(Name1,id,new),
+
+ ok = logger:remove_handler(Name1),
+ {error,{not_found,Name1}} = logger:remove_handler(Name1),
+ ok.
+
+errors(cleanup, _Config) ->
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ _ = logger:remove_handler(Name1).
+
+formatter_fail(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name = ?FUNCTION_NAME,
+ LogFile = filename:join(PrivDir,Name),
+ ct:pal("Log = ~p", [LogFile]),
+ HConfig = #{disk_log_opts => #{file=>LogFile},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])},
+ %% no formatter!
+ logger:add_handler(Name, logger_disk_log_h, HConfig),
+ Pid = whereis(Name),
+ true = is_pid(Pid),
+ {ok,#{handlers:=H}} = logger:get_logger_config(),
+ true = lists:member(Name,H),
+
+ %% Formatter is added automatically
+ {ok,{_,#{formatter:={logger_formatter,_}}}} =
+ logger:get_handler_config(Name),
+ logger:info(M1=?msg,?domain),
+ Got1 = try_match_file(?log_no(LogFile,1),"=INFO REPORT====.*\n"++M1,5000),
+
+ ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}),
+ logger:info(M2=?msg,?domain),
+ Got2 = try_match_file(?log_no(LogFile,1),
+ Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}),
+ logger:info(M3=?msg,?domain),
+ Got3 = try_match_file(?log_no(LogFile,1),
+ Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}),
+ logger:info(?msg,?domain),
+ try_match_file(?log_no(LogFile,1),
+ Got3++"FORMATTER ERROR: bad_return_value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(Name),
+ {ok,#{handlers:=H}} = logger:get_logger_config(),
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ _ = logger:remove_handler(?FUNCTION_NAME),
+ ok.
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{logger_disk_log_h => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_levels,{42,42,_}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{logger_disk_log_h => #{toggle_sync_qlen=>42,
+ drop_new_reqs_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ %% can't change the disk log options for a log already in use
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,disk_log_opts,
+ #{max_no_files=>2}),
+ %% can't change name of an existing handler
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,id,bad),
+ %% incorrect values of OP params
+ {error,{invalid_levels,_}} =
+ logger:set_handler_config(?MODULE,logger_disk_log_h,
+ #{toggle_sync_qlen=>100,
+ flush_reqs_qlen=>99}),
+ %% invalid name of config parameter
+ {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
+ logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{filesync_rep_int => 2000}),
+ ok.
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+bad_input(_Config) ->
+ {error,{badarg,{disk_log_sync,["BadType"]}}} =
+ logger_disk_log_h:disk_log_sync("BadType"),
+ {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"),
+ {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType").
+
+info_and_reset(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE} = logger_disk_log_h:info(?MODULE),
+ ok = logger_disk_log_h:reset(?MODULE).
+info_and_reset(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE,
+ toggle_sync_qlen := ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen := ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen := ?FLUSH_REQS_QLEN,
+ enable_burst_limit := ?ENABLE_BURST_LIMIT,
+ burst_limit_size := ?BURST_LIMIT_SIZE,
+ burst_window_time := ?BURST_WINDOW_TIME,
+ enable_kill_overloaded := ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen := ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem := ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after := ?HANDLER_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := _DiskLogFile}} =
+ logger_disk_log_h:info(?MODULE),
+
+ ok = logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{toggle_sync_qlen => 1,
+ drop_new_reqs_qlen => 2,
+ flush_reqs_qlen => 3,
+ enable_burst_limit => false,
+ burst_limit_size => 10,
+ burst_window_time => 10,
+ enable_kill_overloaded => true,
+ handler_overloaded_qlen => 100000,
+ handler_overloaded_mem => 10000000,
+ handler_restart_after => never,
+ filesync_repeat_interval => no_repeat}),
+ #{id := ?MODULE,
+ toggle_sync_qlen := 1,
+ drop_new_reqs_qlen := 2,
+ flush_reqs_qlen := 3,
+ enable_burst_limit := false,
+ burst_limit_size := 10,
+ burst_window_time := 10,
+ enable_kill_overloaded := true,
+ handler_overloaded_qlen := 100000,
+ handler_overloaded_mem := 10000000,
+ handler_restart_after := never,
+ filesync_repeat_interval := no_repeat} =
+ logger_disk_log_h:info(?MODULE),
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir, "logfile"),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ disk_log_opts=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => 1024,
+ file => File}}),
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}} =
+ logger_disk_log_h:info(?MODULE),
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+disk_log_sync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{disk_log_opts => #{file => File},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,nl}}),
+
+ start_tracer([{?MODULE,format,2},
+ {disk_log,blog,2},
+ {disk_log,sync,1}],
+ [{formatter,"first"},
+ {disk_log,blog},
+ {disk_log,sync}]),
+
+ logger:info("first", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
+
+ start_tracer([{?MODULE,format,2},
+ {disk_log,blog,2},
+ {disk_log,sync,1}],
+ [{formatter,"second"},
+ {formatter,"third"},
+ {disk_log,blog},
+ {disk_log,blog},
+ {disk_log,sync}]),
+ %% two log requests in fast succession will make the handler skip
+ %% an automatic disk log sync
+ logger:info("second", ?domain),
+ logger:info("third", ?domain),
+ %% do explicit disk_log_sync
+ logger_disk_log_h:disk_log_sync(?MODULE),
+ check_tracer(100),
+
+ %% check that if there's no repeated disk_log_sync active,
+ %% a disk_log_sync is still performed when handler goes idle
+ logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{filesync_repeat_interval => no_repeat}),
+ no_repeat = maps:get(filesync_repeat_interval,
+ logger_disk_log_h:info(?MODULE)),
+
+ start_tracer([{?MODULE,format,2},
+ {disk_log,blog,2},
+ {disk_log,sync,1}],
+ [{formatter,"fourth"},
+ {disk_log,blog},
+ {formatter,"fifth"},
+ {disk_log,blog},
+ {disk_log,sync}]),
+
+ logger:info("fourth", ?domain),
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ logger:info("fifth", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?IDLE_DETECT_TIME_MSEC*2),
+
+ try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\nfifth\n">>}, 1000),
+
+ %% switch repeated disk_log_sync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync},
+ %% receive 1 initial repeated_disk_log_sync, then 1 per sec
+ start_tracer([{logger_disk_log_h,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+
+ logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{filesync_repeat_interval => SyncInt}),
+ SyncInt = maps:get(filesync_repeat_interval,
+ logger_disk_log_h:info(?MODULE)),
+ timer:sleep(WaitT),
+ logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{filesync_repeat_interval => no_repeat}),
+ check_tracer(100),
+ ok.
+disk_log_sync(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+disk_log_wrap(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxFiles = 3,
+ MaxBytes = 5,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ disk_log_opts=>
+ #{type => wrap,
+ max_no_files => MaxFiles,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+ Info = disk_log:info(?MODULE),
+ {File,wrap,{MaxBytes,MaxFiles},1} =
+ {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)},
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)],
+ ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]),
+ %% fill first file
+ lists:foreach(fun(N) ->
+ Log = lists:concat([File,".",N]),
+ logger:info(Text, ?domain),
+ wait_until_written(Log),
+ ct:pal("N = ~w",
+ [N = Get(current_file,
+ disk_log:info(?MODULE))])
+ end, lists:seq(1,MaxFiles)),
+
+ %% wait for trace messages
+ timer:sleep(1000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)],
+ ok.
+
+disk_log_wrap(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+disk_log_full(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxBytes = 50,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ disk_log_opts=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ NoOfChars = 5,
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)],
+ [logger:info(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)],
+
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ [{trace,full},
+ {trace,{error_status,{error,{full,_}}}}] = Received,
+ ok.
+disk_log_full(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+disk_log_events(Config) ->
+ Node = node(),
+ Log = ?MODULE,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ %% Events copied from disk_log API
+ Events =
+ [{disk_log, Node, Log, {wrap, 0}},
+ {disk_log, Node, Log, {truncated, 0}},
+ {disk_log, Node, Log, {read_only, 42}},
+ {disk_log, Node, Log, {blocked_log, 42}},
+ {disk_log, Node, Log, {format_external, 42}},
+ {disk_log, Node, Log, full},
+ {disk_log, Node, Log, {error_status, ok}}],
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ [whereis(?MODULE) ! E || E <- Events],
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:map(fun({trace,_M,handle_info,
+ [Got,_]}) -> Got
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ NoOfEvents = length(Events),
+ NoOfEvents = length(Received),
+ lists:foreach(fun(Event) ->
+ true = lists:member(Event, Received)
+ end, Received),
+ ok.
+disk_log_events(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ct:pal("Log = ~p", [Log]),
+
+ Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_disk_log_h, disk_log_sync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\n">>}, ?SYNC_REP_INT),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,
+ {error,{full,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,
+ {error,{full,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_disk_log_h, disk_log_sync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?SYNC_REP_INT),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]),
+ File = filename:join(Dir, FileName),
+ Log = lists:concat([File,".1"]),
+
+ Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ LogOpts = maps:get(log_opts, HState),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, set_handler_config,
+ [?STANDARD_HANDLER, logger_disk_log_h,
+ #{filesync_repeat_interval => SyncInt}]),
+ Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ SyncInt = maps:get(filesync_repeat_interval, Info),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,sync,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result,
+ [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,sync,LogOpts,
+ {error,{blocked_log,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_h_on_new_node(_Config, Func, File) ->
+ Pa = filename:dirname(code:which(?MODULE)),
+ Dest =
+ case os:type() of
+ {win32,_} ->
+ lists:concat([" {disk_log,\\\"",File,"\\\"}"]);
+ _ ->
+ lists:concat([" \'{disk_log,\"",File,"\"}\'"])
+ end,
+ Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]),
+ NodeName = lists:concat([?MODULE,"_",Func]),
+ ct:pal("Starting ~s with ~tp", [NodeName,Args]),
+ {ok,Node} = test_server:start_node(NodeName, peer, [{args, Args}]),
+ Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
+ true = is_pid(Pid),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+log_on_remote_node(Node,Msg) ->
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:info(Msg)
+ end),
+ ok.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(Mod, Func) ->
+ ?set_internal_log({Mod,Func}).
+set_result(Op, Result) ->
+ ?set_result(Op, Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 3,
+ drop_new_reqs_qlen => 501,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ NumOfReqs = count_lines(Log),
+ ok = file:delete(Log).
+op_switch_to_sync(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 3,
+ flush_reqs_qlen => 600,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [NumOfReqs-Logged,NumOfReqs]),
+ true = (Logged < NumOfReqs),
+ ok = file:delete(Log).
+op_switch_to_drop(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush() ->
+ [{timetrap,{seconds,60}}].
+op_switch_to_flush(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 99,
+ flush_reqs_qlen => 100,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1000,
+ Procs = 500,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]),
+ true = (Logged < (NumOfReqs*Procs)),
+ ok = file:delete(Log).
+op_switch_to_flush(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => false,
+ burst_limit_size => 10,
+ burst_window_time => 2000,
+ drop_new_reqs_qlen => 200,
+ flush_reqs_qlen => 300}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ NumOfReqs = Logged.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => true,
+ burst_limit_size => ReqLimit,
+ burst_window_time => 2000,
+ drop_new_reqs_qlen => 200,
+ flush_reqs_qlen => 300}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ ReqLimit = Logged.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => true,
+ burst_limit_size => ReqLimit,
+ burst_window_time => BurstTWin,
+ drop_new_reqs_qlen => 20000,
+ flush_reqs_qlen => 20001}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ ok = file:delete(Log),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))).
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>false,
+ handler_overloaded_qlen=>10,
+ handler_overloaded_mem=>100}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ true = is_pid(whereis(?MODULE)),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(?MODULE),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = 2000,
+ NewHConfig =
+ HConfig#{logger_disk_log_h =>
+ DLHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_overloaded_mem=>Mem0+50000,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 2,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ timer:sleep(RestartAfter + 1000),
+ true = is_pid(whereis(?MODULE)),
+ ok
+ after
+ 5000 ->
+ Info = logger_disk_log_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+mem_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(?MODULE),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = 2000,
+ NewHConfig =
+ HConfig#{logger_disk_log_h =>
+ DLHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>50000,
+ handler_overloaded_mem=>Mem0+500,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 2,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ timer:sleep(RestartAfter * 2),
+ true = is_pid(whereis(?MODULE)),
+ ok
+ after
+ 5000 ->
+ Info = logger_disk_log_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+restart_after(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_restart_after=>never}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(?MODULE)),
+ %% kill handler
+ send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
+ receive
+ {'DOWN', MRef1, _, _, _Info1} ->
+ timer:sleep(?HANDLER_RESTART_AFTER + 1000),
+ undefined = whereis(?MODULE),
+ ok
+ after
+ 5000 ->
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = 2000,
+ NewHConfig2 =
+ HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(?MODULE),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
+ receive
+ {'DOWN', MRef2, _, _, _Info2} ->
+ timer:sleep(RestartAfter + 1000),
+ Pid1 = whereis(?MODULE),
+ true = is_pid(Pid1),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (filesync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{seconds,60}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 1000,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{disk_log_sync,[]},
+ {info,[]},
+ {reset,[]},
+ {change_config,[]}])
+ end),
+ Procs = 100,
+ Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, info),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file:delete(Log).
+handler_requests_under_load(cleanup, Config) ->
+ ok = stop_handler(?MODULE).
+
+send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result =
+ case Req of
+ change_config ->
+ logger:set_handler_config(HName, logger_disk_log_h,
+ #{enable_kill_overloaded =>
+ false});
+ Func ->
+ logger_disk_log_h:Func(HName)
+ end,
+ send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}])
+ end.
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, FuncName),
+ ct:pal("Logging to ~tp", [File]),
+ ok = logger:add_handler(Name,
+ logger_disk_log_h,
+ #{disk_log_opts=>#{file => File,
+ max_no_files => 1,
+ max_no_bytes => 100000000},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,{_,HConfig = #{logger_disk_log_h := DLHConfig}}} =
+ logger:get_handler_config(Name),
+ {lists:concat([File,".1"]),HConfig,DLHConfig}.
+
+stop_handler(Name) ->
+ ok = logger:remove_handler(Name),
+ ct:pal("Handler ~p stopped!", [Name]).
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ PerProc = fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end,
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(PerProc)) end ||
+ _ <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) ->
+ format(Log#{msg=>Fun(R)},Config);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},Pid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ Pid ! {log,String},
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n";
+format(Msg,Tag) ->
+ Error = {unexpected_format,Msg,Tag},
+ erlang:display(Error),
+ exit(Error).
+
+remove(Handler, LogName) ->
+ logger_disk_log_h:remove(Handler, LogName),
+ HState = #{log_names := Logs} = logger_disk_log_h:info(),
+ false = maps:is_key(LogName, HState),
+ false = lists:member(LogName, Logs),
+ false = logger_config:exist(logger, LogName),
+ {error,no_such_log} = disk_log:info(LogName),
+ ok.
+
+start_and_add(Name, Config, LogOpts) ->
+ ct:pal("Adding handler ~w with: ~p",
+ [Name,Config#{disk_log_opts=>LogOpts}]),
+ ok = logger:add_handler(Name, logger_disk_log_h,
+ Config#{disk_log_opts=>LogOpts}),
+ Pid = whereis(Name),
+ true = is_pid(Pid),
+ Name = proplists:get_value(name, disk_log:info(Name)),
+ ok.
+
+remove_and_stop(Handler) ->
+ ok = logger:remove_handler(Handler),
+ timer:sleep(500),
+ undefined = whereis(Handler),
+ ok.
+
+try_read_file(FileName, Expected, Time) ->
+ try_read_file(FileName, Expected, Time, undefined).
+
+try_read_file(FileName, Expected, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ erlang:error(Error);
+ SomethingElse ->
+ ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500, SomethingElse)
+ end;
+
+try_read_file(_, _, _, Incorrect) ->
+ ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]),
+ erlang:error({error,not_matching_pattern,Incorrect}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+count_lines(File) ->
+ wait_until_written(File),
+ count_lines1(File).
+
+wait_until_written(File) ->
+ wait_until_written(File, -1).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz1}} ->
+ ok;
+ {ok,#file_info{size = Sz2}} ->
+ wait_until_written(File, Sz2)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ Counter = fun Cnt(Dev,LC) ->
+ case file:read_line(Dev) of
+ eof -> LC;
+ _ -> Cnt(Dev,LC+1)
+ end
+ end,
+ {_,Dev} = file:open(File, [read]),
+ Lines = Counter(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(whereis(?MODULE),[c]),
+ dbg:p(Pid,[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,[]),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{?MODULE,format,[#{msg:={string,Msg}}|_]}}, {Pid,[{formatter,Msg}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{formatter,Msg});
+tracer({trace,_,call,{logger_disk_log_h,handle_cast,[{Op,_}|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func});
+tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[],Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! tracer_done;
+maybe_tracer_done(Pid,Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ receive
+ tracer_done ->
+ dbg:stop_clear(),
+ ok;
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ ct:fail({timeout,tracer})
+ end.
diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl
new file mode 100644
index 0000000000..c2d3364701
--- /dev/null
+++ b/lib/kernel/test/logger_env_var_SUITE.erl
@@ -0,0 +1,451 @@
+%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_env_var_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(all_vars,[{kernel,logger_dest},
+ {kernel,logger_level},
+ {kernel,logger_log_progress},
+ {kernel,logger_sasl_compatible},
+ {kernel,error_logger}]).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Env = [{App,Key,application:get_env(App,Key)} || {App,Key} <- ?all_vars],
+ Removed = cleanup(),
+ [{env,Env},{logger,Removed}|Config].
+
+end_per_suite(Config) ->
+ [application:set_env(App,Key,Val) ||
+ {App,Key,Val} <- ?config(env,Config),
+ Val =/= undefined],
+ Hs = ?config(logger,Config),
+ [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ cleanup(),
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [default,
+ default_sasl_compatible,
+ dest_tty,
+ dest_tty_sasl_compatible,
+ dest_false,
+ dest_false_progress,
+ dest_false_sasl_compatible,
+ dest_silent,
+ dest_silent_sasl_compatible,
+ dest_file_old,
+ dest_file,
+ dest_disk_log,
+ %% disk_log_vars, % or test this in logger_disk_log_SUITE?
+ sasl_compatible_false,
+ sasl_compatible_false_no_progress,
+ sasl_compatible,
+ bad_dest%% ,
+ %% bad_level,
+ %% bad_sasl_compatibility,
+ %% bad_progress
+ ].
+
+default(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ undefined,
+ undefined, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
+ true = is_pid(whereis(logger_std_h)),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ true = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(logger_simple,1,Hs),
+ false = lists:keymember(sasl_h,1,Hs),
+ false = is_pid(whereis(sasl_h)),
+ ok.
+
+default_sasl_compatible(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ undefined,
+ undefined, % dest
+ undefined, % level
+ true, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
+ true = is_pid(whereis(logger_std_h)),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ false = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(logger_simple,1,Hs),
+ true = lists:keymember(sasl_h,1,Hs),
+ true = is_pid(whereis(sasl_h)),
+ ok.
+
+dest_tty(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
+ true = is_pid(whereis(logger_std_h)),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ true = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(logger_simple,1,Hs),
+ false = lists:keymember(sasl_h,1,Hs),
+ false = is_pid(whereis(sasl_h)),
+ ok.
+
+dest_tty_sasl_compatible(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty, % dest
+ undefined, % level
+ true, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
+ true = is_pid(whereis(logger_std_h)),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ false = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(logger_simple,1,Hs),
+ true = lists:keymember(sasl_h,1,Hs),
+ true = is_pid(whereis(sasl_h)),
+ ok.
+
+dest_false(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ false, % dest
+ notice, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
+ notice = maps:get(level,SimpleC),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,SimpleFilters),
+ true = lists:keymember(stop_progress,1,SimpleFilters),
+ false = lists:keymember(sasl_h,1,Hs),
+ ok.
+
+dest_false_progress(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ false, % dest
+ notice, % level
+ undefined, % sasl comp (default=false)
+ true), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
+ notice = maps:get(level,SimpleC),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,SimpleFilters),
+ false = lists:keymember(stop_progress,1,SimpleFilters),
+ false = lists:keymember(sasl_h,1,Hs),
+ ok.
+
+dest_false_sasl_compatible(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ false, % dest
+ notice, % level
+ true, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
+ notice = maps:get(level,SimpleC),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ lists:keyfind(domain,1,SimpleFilters),
+ false = lists:keymember(stop_progress,1,SimpleFilters),
+ true = lists:keymember(sasl_h,1,Hs),
+ true = is_pid(whereis(sasl_h)),
+ ok.
+
+dest_silent(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ silent, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ false = lists:keymember(logger_simple,1,Hs),
+ false = lists:keymember(sasl_h,1,Hs),
+ ok.
+
+dest_silent_sasl_compatible(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ silent, % dest
+ undefined, % level
+ true, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ false = lists:keymember(logger_simple,1,Hs),
+ true = lists:keymember(sasl_h,1,Hs),
+ true = is_pid(whereis(sasl_h)),
+ ok.
+
+
+dest_file_old(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ error_logger,
+ file, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ check_log(Log,
+ file, % dest
+ 0), % progress in std logger
+ ok.
+
+
+dest_file(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ file, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ check_log(Log,
+ file, % dest
+ 0), % progress in std logger
+ ok.
+
+
+dest_disk_log(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ disk_log, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ check_log(Log,
+ disk_log, % dest
+ 0), % progress in std logger
+ ok.
+
+
+sasl_compatible_false(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ file, % dest
+ undefined, % level
+ false, % sasl comp
+ true), % progress
+ check_log(Log,
+ file, % dest
+ 4), % progress in std logger
+ ok.
+
+sasl_compatible_false_no_progress(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ file, % dest
+ undefined, % level
+ false, % sasl comp
+ false), % progress
+ check_log(Log,
+ file, % dest
+ 0), % progress in std logger
+ ok.
+
+sasl_compatible(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ file, % dest
+ undefined, % level
+ true, % sasl comp
+ undefined), % progress
+ check_log(Log,
+ file, % dest
+ 0), % progress in std logger
+ ok.
+
+bad_dest(Config) ->
+ {error,{bad_config,{kernel,{logger_dest,baddest}}}} =
+ setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ baddest,
+ undefined,
+ undefined,
+ undefined).
+
+bad_level(Config) ->
+ error =
+ setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty,
+ badlevel,
+ undefined,
+ undefined).
+
+bad_sasl_compatibility(Config) ->
+ error =
+ setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty,
+ info,
+ badcomp,
+ undefined).
+
+bad_progress(Config) ->
+ error =
+ setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty,
+ info,
+ undefined,
+ badprogress).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+setup(Config,Func,DestVar,Dest,Level,SaslComp,Progress) ->
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ Dir = ?config(priv_dir,Config),
+ File = lists:concat([?MODULE,"_",Func,".log"]),
+ Log = filename:join(Dir,File),
+ case Dest of
+ undefined ->
+ ok;
+ F when F==file; F==disk_log ->
+ application:set_env(kernel,DestVar,{Dest,Log});
+ _ ->
+ application:set_env(kernel,DestVar,Dest)
+ end,
+ case Level of
+ undefined ->
+ ok;
+ _ ->
+ application:set_env(kernel,logger_level,Level)
+ end,
+ case SaslComp of
+ undefined ->
+ ok;
+ _ ->
+ application:set_env(kernel,logger_sasl_compatible,SaslComp)
+ end,
+ case Progress of
+ undefined ->
+ ok;
+ _ ->
+ application:set_env(kernel,logger_log_progress,Progress)
+ end,
+ case logger:setup_standard_handler() of
+ ok ->
+ application:start(sasl),
+ StdH = case Dest of
+ NoH when NoH==false; NoH==silent -> false;
+ _ -> true
+ end,
+ StdH = is_pid(whereis(?STANDARD_HANDLER)),
+ SaslH = if SaslComp -> true;
+ true -> false
+ end,
+ SaslH = is_pid(whereis(sasl_h)),
+ {ok,{Log,maps:get(handlers,logger:i())}};
+ Error ->
+ Error
+ end.
+
+check_log(Log,Dest,NumProgress) ->
+ ok = logger:alert("dummy1"),
+ ok = logger:debug("dummy1"),
+
+ %% Check that there are progress reports (supervisor and
+ %% application_controller) and an error report (the call above) in
+ %% the log. There should not be any info reports yet.
+ {ok,Bin1} = sync_and_read(Dest,Log),
+ ct:log("Log content:~n~s",[Bin1]),
+ match(Bin1,<<"PROGRESS REPORT">>,NumProgress),
+ match(Bin1,<<"ALERT REPORT">>,1),
+ match(Bin1,<<"INFO REPORT">>,0),
+ match(Bin1,<<"DEBUG REPORT">>,0),
+
+ %% Then stop sasl and see that the info report from
+ %% application_controller is there
+ ok = application:stop(sasl),
+ {ok,Bin2} = sync_and_read(Dest,Log),
+ ct:log("Log content:~n~s",[Bin2]),
+ match(Bin2,<<"INFO REPORT">>,1),
+ match(Bin1,<<"DEBUG REPORT">>,0),
+ ok.
+
+match(Bin,Pattern,0) ->
+ nomatch = re:run(Bin,Pattern,[{capture,none}]);
+match(Bin,Pattern,N) ->
+ {match,M} = re:run(Bin,Pattern,[{capture,all},global]),
+ N = length(M).
+
+sync_and_read(disk_log,Log) ->
+ logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER),
+ file:read_file(Log ++ ".1");
+sync_and_read(file,Log) ->
+ logger_std_h:filesync(?STANDARD_HANDLER),
+ file:read_file(Log).
+
+cleanup() ->
+ application:stop(sasl),
+ [application:unset_env(App,Key) || {App,Key} <- ?all_vars],
+ #{handlers:=Hs0} = logger:i(),
+ Hs = lists:keydelete(cth_log_redirect,1,Hs0),
+ [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Hs.
diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl
new file mode 100644
index 0000000000..21f14bbc02
--- /dev/null
+++ b/lib/kernel/test/logger_filters_SUITE.erl
@@ -0,0 +1,214 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(ndlog,
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(dlog(Domain),
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}).
+-define(llog(Level),
+ #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(plog,
+ #{level=>info,
+ msg=>{report,#{label=>{?MODULE,progress}}},
+ meta=>#{line=>?LINE}}).
+-define(rlog(Node),
+ #{level=>info,
+ msg=>{"Line: ~p",[?LINE]},
+ meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [domain,
+ level,
+ progress,
+ remote_gl].
+
+domain(_Config) ->
+ L1 = logger_filters:domain(L1=?dlog([]),{log,prefix_of,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,prefix_of,[]}),
+ L2 = logger_filters:domain(L2=?dlog([]),{log,starts_with,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,starts_with,[]}),
+ L3 = logger_filters:domain(L3=?dlog([]),{log,equals,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,equals,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,no_domain,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,no_domain,[]}),
+
+ L4 = logger_filters:domain(L4=?dlog([a]),{log,prefix_of,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,prefix_of,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,starts_with,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,starts_with,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,equals,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,equals,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,no_domain,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,no_domain,[a,b]}),
+
+ ignore = logger_filters:domain(?dlog([a,b]),{log,prefix_of,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,prefix_of,[a]}),
+ L5 = logger_filters:domain(L5=?dlog([a,b]),{log,starts_with,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,starts_with,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,equals,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,equals,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,no_domain,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,no_domain,[a]}),
+
+ ignore = logger_filters:domain(?ndlog,{log,prefix_of,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,prefix_of,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,starts_with,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,starts_with,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,equals,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,equals,[a]}),
+ L6 = logger_filters:domain(L6=?ndlog,{log,no_domain,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,no_domain,[a]}),
+
+ L7 = logger_filters:domain(L7=?dlog([a,b,c,d]),{log,prefix_of,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,prefix_of,[a,b,c,d]}),
+ L8 = logger_filters:domain(L8=?dlog([a,b,c,d]),{log,starts_with,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,starts_with,[a,b,c,d]}),
+ L9 = logger_filters:domain(L9=?dlog([a,b,c,d]),{log,equals,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equals,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,no_domain,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,no_domain,[a,b,c,d]}),
+
+ %% A domain field in meta which is not a list is allowed by the
+ %% filter, but it will never match.
+ ignore = logger_filters:domain(?dlog(dummy),{log,prefix_of,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,prefix_of,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,starts_with,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,starts_with,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,equals,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,equals,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,no_domain,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,no_domain,[a,b,c,d]}),
+
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,prefix_of,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,prefix_of,bad})),
+
+ ok.
+
+level(_Config) ->
+ ignore = logger_filters:level(?llog(info),{log,lt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,lt,info}),
+ ignore = logger_filters:level(?llog(info),{log,gt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,info}),
+ L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,info}),
+ L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,gteq,info}),
+ L3 = logger_filters:level(L3=?llog(info),{log,eq,info}),
+ stop = logger_filters:level(?llog(info),{stop,eq,info}),
+ ignore = logger_filters:level(?llog(info),{log,neq,info}),
+ ignore = logger_filters:level(?llog(info),{stop,neq,info}),
+
+ ignore = logger_filters:level(?llog(error),{log,lt,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lt,info}),
+ L4 = logger_filters:level(L4=?llog(error),{log,gt,info}),
+ stop = logger_filters:level(?llog(error),{stop,gt,info}),
+ ignore = logger_filters:level(?llog(error),{log,lteq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lteq,info}),
+ L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}),
+ stop = logger_filters:level(?llog(error),{stop,gteq,info}),
+ ignore = logger_filters:level(?llog(error),{log,eq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,eq,info}),
+ L6 = logger_filters:level(L6=?llog(error),{log,neq,info}),
+ stop = logger_filters:level(?llog(error),{stop,neq,info}),
+
+ L7 = logger_filters:level(L7=?llog(info),{log,lt,error}),
+ stop = logger_filters:level(?llog(info),{stop,lt,error}),
+ ignore = logger_filters:level(?llog(info),{log,gt,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,error}),
+ L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,eq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,eq,error}),
+ L9 = logger_filters:level(L9=?llog(info),{log,neq,error}),
+ stop = logger_filters:level(?llog(info),{stop,neq,error}),
+
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})),
+
+ ok.
+
+progress(_Config) ->
+ L1 = logger_filters:progress(L1=?plog,log),
+ stop = logger_filters:progress(?plog,stop),
+ ignore = logger_filters:progress(?ndlog,log),
+ ignore = logger_filters:progress(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)),
+
+ ok.
+
+remote_gl(_Config) ->
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ L1 = logger_filters:remote_gl(L1=?rlog(Node),log),
+ stop = logger_filters:remote_gl(?rlog(Node),stop),
+ ignore = logger_filters:remote_gl(?ndlog,log),
+ ignore = logger_filters:remote_gl(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)),
+ ok.
+
+remote_gl(cleanup,_Config) ->
+ [test_server:stop_node(N) || N<-nodes()].
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
new file mode 100644
index 0000000000..ac1abba629
--- /dev/null
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -0,0 +1,558 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [default,
+ legacy_header,
+ single_line,
+ template,
+ format_msg,
+ report_cb,
+ max_size,
+ depth,
+ chars_limit,
+ format_mfa,
+ format_time,
+ level_or_msg_in_meta,
+ faulty_log,
+ faulty_config,
+ faulty_msg].
+
+default(_Config) ->
+ String1 = format(info,{"~p",[term]},#{},#{}),
+ ct:log(String1),
+ [_Date,_Time,"info:\nterm\n"] = string:lexemes(String1," "),
+
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+ ok.
+
+legacy_header(_Config) ->
+ Time = timestamp(),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true}),
+ ct:log(String1),
+ "=INFO REPORT==== "++Rest = String1,
+ [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="),
+ [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."),
+ integer(D,31),
+ integer(Y,2018,infinity),
+ integer(H,23),
+ integer(Min,59),
+ integer(S,59),
+ integer(Micro,999999),
+ true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun",
+ "Jul","Aug","Sep","Oct","Nov","Dec"]),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false}),
+ ct:log(String2),
+ ExpectedTimestamp = default_time_format(Time),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad}),
+ ct:log(String3),
+ String3 = String2,
+
+ String4 = format(info,{"~p",[term]},#{time=>Time},
+ #{legacy_header=>true,
+ single_line=>true}), % <---ignored
+ ct:log(String4),
+ String4 = String1,
+
+ String5 = format(info,{"~p",[term]},#{}, % <--- no time
+ #{legacy_header=>true}),
+ ct:log(String5),
+ "=INFO REPORT==== "++_ = String5,
+ ok.
+
+single_line(_Config) ->
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}),
+ ct:log(String1),
+ " info: term\n" = string:prefix(String1,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}),
+ ok.
+
+template(_Config) ->
+ Time = timestamp(),
+
+ Template1 = [msg],
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}),
+ ct:log(String1),
+ "term" = String1,
+
+ Template2 = [msg,unknown],
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}),
+ ct:log(String2),
+ "term" = String2,
+
+ Template3 = ["string"],
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}),
+ ct:log(String3),
+ "string" = String3,
+
+ Template4 = ["string\nnewline"],
+ String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4,
+ single_line=>true}),
+ ct:log(String4),
+ "string\nnewline" = String4,
+
+ Template5 = [],
+ String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}),
+ ct:log(String5),
+ "" = String5,
+
+ Ref6 = erlang:make_ref(),
+ Meta6 = #{atom=>some_atom,
+ integer=>632,
+ list=>[list,"string",4321,#{},{tuple}],
+ mfa=>{mod,func,0},
+ pid=>self(),
+ ref=>Ref6,
+ string=>"some string",
+ time=>Time,
+ tuple=>{1,atom,"list"},
+ nested=>#{subkey=>subvalue}},
+ Template6 = lists:join(";",maps:keys(maps:remove(nested,Meta6)) ++
+ [{nested,subkey}]),
+ String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6,
+ single_line=>true}),
+ ct:log(String6),
+ SelfStr = pid_to_list(self()),
+ RefStr6 = ref_to_list(Ref6),
+ ListStr = "[list,\"string\",4321,#{},{tuple}]",
+ ExpectedTime6 = default_time_format(Time),
+ ["some_atom",
+ "632",
+ ListStr,
+ "mod:func/0",
+ SelfStr,
+ RefStr6,
+ "some string",
+ ExpectedTime6,
+ "{1,atom,\"list\"}",
+ "subvalue"] = string:lexemes(String6,";"),
+
+ Meta7 = #{time=>Time,
+ nested=>#{key1=>#{subkey1=>value1},
+ key2=>value2}},
+ Template7 = lists:join(";",[nested,
+ {nested,key1},
+ {nested,key1,subkey1},
+ {nested,key2},
+ {nested,key2,subkey2},
+ {nested,key3},
+ {nested,key3,subkey3}]),
+ String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7,
+ single_line=>true}),
+ ct:log(String7),
+ [MultipleKeysStr,
+ "#{subkey1 => value1}",
+ "value1",
+ "value2",
+ "",
+ "",
+ ""] = string:split(String7,";",all),
+ %% Order of keys is not fixed
+ case MultipleKeysStr of
+ "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok;
+ "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok;
+ _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr})
+ end,
+ ok.
+
+format_msg(_Config) ->
+ Template = [msg],
+
+ String1 = format(info,{"~p",[term]},#{},#{template=>Template}),
+ ct:log(String1),
+ "term" = String1,
+
+ String2 = format(info,{"list",[term]},#{},#{template=>Template}),
+ ct:log(String2),
+ "FORMAT ERROR: \"list\" - [term]" = String2,
+
+ String3 = format(info,{report,term},#{},#{template=>Template}),
+ ct:log(String3),
+ "term" = String3,
+
+ String4 = format(info,{report,term},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String4),
+ "formatted" = String4,
+
+ String5 = format(info,{report,term},
+ #{report_cb=>fun(_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String5),
+ "REPORT_CB ERROR: term; Returned: faulty_return" = String5,
+
+ String6 = format(info,{report,term},
+ #{report_cb=>fun(_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String6),
+ "REPORT_CB CRASH: term; Reason: {error,fun_crashed}" = String6,
+
+ %% strings are not formatted
+ String7 = format(info,{string,"string"},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String7),
+ "string" = String7,
+
+ String8 = format(info,{string,['not',printable,list]},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String8),
+ "INVALID STRING: ['not',printable,list]" = String8,
+
+ String9 = format(info,{string,"string"},#{},#{template=>Template}),
+ ct:log(String9),
+ "string" = String9,
+
+ ok.
+
+report_cb(_Config) ->
+ Template = [msg],
+ MetaFun = fun(_) -> {"meta_rcb",[]} end,
+ ConfigFun = fun(_) -> {"config_rcb",[]} end,
+ "term" = format(info,{report,term},#{},#{template=>Template}),
+ "meta_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}),
+ "config_rcb" =
+ format(info,{report,term},#{},#{template=>Template,
+ report_cb=>ConfigFun}),
+ "config_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template,
+ report_cb=>ConfigFun}),
+ ok.
+
+max_size(_Config) ->
+ Template = [msg],
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},#{template=>Template}),
+ application:set_env(kernel,logger_max_size,11),
+ "12345678901234567890" = % min value is 50, so this is not limited
+ format(info,{"12345678901234567890",[]},#{},#{template=>Template}),
+ "12345678901234567890123456789012345678901234567..." = % 50
+ format(info,
+ {"123456789012345678901234567890123456789012345678901234567890",
+ []},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,logger_max_size,53),
+ "12345678901234567890123456789012345678901234567890..." = %53
+ format(info,
+ {"123456789012345678901234567890123456789012345678901234567890",
+ []},
+ #{},
+ #{template=>Template}),
+ "123456789012..." =
+ format(info,{"12345678901234567890",[]},#{},#{template=>Template,
+ max_size=>15}),
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},#{template=>Template,
+ max_size=>unlimited}),
+ %% Check that one newline at the end of the line is kept (if it exists)
+ "12345678901...\n" =
+ format(info,{"12345678901234567890\n",[]},#{},#{template=>Template,
+ max_size=>15}),
+ "12345678901...\n" =
+ format(info,{"12345678901234567890",[]},#{},#{template=>[msg,"\n"],
+ max_size=>15}),
+ ok.
+max_size(cleanup,_Config) ->
+ application:unset_env(kernel,logger_max_size),
+ ok.
+
+depth(_Config) ->
+ Template = [msg],
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,error_logger_format_depth,11),
+ "[1,2,3,4,5,6,7,8,9,0|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,logger_format_depth,12),
+ "[1,2,3,4,5,6,7,8,9,0,1|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>13}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>unlimited}),
+ ok.
+depth(cleanup,_Config) ->
+ application:unset_env(kernel,logger_format_depth),
+ ok.
+
+chars_limit(_Config) ->
+ FA = {"LoL: ~p~nL: ~p~nMap: ~p~n",
+ [lists:duplicate(10,lists:seq(1,100)),
+ lists:seq(1,100),
+ maps:from_list(lists:zip(lists:seq(1,100),
+ lists:duplicate(100,value)))]},
+ Meta = #{time=>"2018-04-26 9:15:40.449879"},
+ Template = [time," - ", msg, "\n"],
+ FC = #{template=>Template,
+ depth=>unlimited,
+ max_size=>unlimited,
+ chars_limit=>unlimited,
+ single_line=>true},
+ CL1 = 80,
+ String1 = format(info,FA,Meta,FC#{chars_limit=>CL1}),
+ L1 = string:length(String1),
+ ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]),
+ true = L1 > CL1,
+ true = L1 < CL1 + 10,
+
+ String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}),
+ L2 = string:length(String2),
+ ct:log("String2: ~p~nLength2: ~p~n",[lists:flatten(String2),L2]),
+ String2 = String1,
+
+ CL3 = 200,
+ String3 = format(info,FA,Meta,FC#{chars_limit=>CL3}),
+ L3 = string:length(String3),
+ ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]),
+ true = L3 > CL3,
+ true = L3 < CL3 + 10,
+
+ String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}),
+ L4 = string:length(String4),
+ ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]),
+ true = L4 > CL3,
+ true = L4 < CL3 + 10,
+
+ %% Test that max_size truncates the string which is limited by
+ %% depth and chars_limit
+ MS5 = 150,
+ String5 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10,max_size=>MS5}),
+ L5 = string:length(String5),
+ ct:log("String5: ~p~nLength5: ~p~n",[String5,L5]),
+ L5 = MS5,
+ true = lists:prefix(lists:sublist(String5,L5-4),String4),
+
+ ok.
+
+format_mfa(_Config) ->
+ Template = [mfa],
+
+ Meta1 = #{mfa=>{mod,func,0}},
+ String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}),
+ ct:log(String1),
+ "mod:func/0" = String1,
+
+ Meta2 = #{mfa=>{mod,func,[]}},
+ String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}),
+ ct:log(String2),
+ "mod:func/0" = String2,
+
+ Meta3 = #{mfa=>"mod:func/0"},
+ String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}),
+ ct:log(String3),
+ "mod:func/0" = String3,
+
+ Meta4 = #{mfa=>othermfa},
+ String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}),
+ ct:log(String4),
+ "othermfa" = String4,
+
+ ok.
+
+format_time(_Config) ->
+ Time1 = timestamp(),
+ ExpectedTimestamp1 = default_time_format(Time1),
+ String1 = format(info,{"~p",[term]},#{time=>Time1},#{}),
+ ct:log(String1),
+ " info:\nterm\n" = string:prefix(String1,ExpectedTimestamp1),
+
+ Time2 = timestamp(),
+ ExpectedTimestamp2 = default_time_format(Time2,true),
+ String2 = format(info,{"~p",[term]},#{time=>Time2},#{utc=>true}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp2),
+
+ application:set_env(kernel,logger_utc,true),
+ Time3 = timestamp(),
+ ExpectedTimestamp3 = default_time_format(Time3,true),
+ String3 = format(info,{"~p",[term]},#{time=>Time3},#{}),
+ ct:log(String3),
+ " info:\nterm\n" = string:prefix(String3,ExpectedTimestamp3),
+
+ ok.
+
+format_time(cleanup,_Config) ->
+ application:unset_env(kernel,logger_utc),
+ ok.
+
+level_or_msg_in_meta(_Config) ->
+ %% The template contains atoms to pick out values from meta,
+ %% or level/msg to add these from the log event. What if you have
+ %% a key named 'level' or 'msg' in meta and want to display
+ %% its value?
+ %% For now we simply ignore Meta on this and display the
+ %% actual level and msg from the log event.
+
+ Meta = #{level=>mylevel,
+ msg=>"metamsg"},
+ Template = [level,";",msg],
+ String = format(info,{"~p",[term]},Meta,#{template=>Template}),
+ ct:log(String),
+ "info;term" = String, % so mylevel and "metamsg" are ignored
+
+ ok.
+
+faulty_log(_Config) ->
+ %% Unexpected log (should be type logger:log()) - print error
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(unexp_log,#{})),
+ ok.
+
+faulty_config(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>{"~p",[term]},
+ meta=>#{time=>timestamp()}},
+ unexp_config)),
+ ok.
+
+faulty_msg(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,_,_,_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>term,
+ meta=>#{time=>timestamp()}},
+ #{})),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+format(Level,Msg,Meta,Config) ->
+ format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config).
+
+format(Log,Config) ->
+ lists:flatten(logger_formatter:format(Log,Config)).
+
+default_time_format(Timestamp) ->
+ default_time_format(Timestamp,false).
+
+default_time_format(Timestamp0,Utc) when is_integer(Timestamp0) ->
+ Timestamp=Timestamp0+erlang:time_offset(microsecond),
+ %% calendar:system_time_to_rfc3339(Time,[{unit,microsecond}]).
+ Micro = Timestamp rem 1000000,
+ Sec = Timestamp div 1000000,
+ UniversalTime = erlang:posixtime_to_universaltime(Sec),
+ {Date,Time} =
+ if Utc -> UniversalTime;
+ true -> erlang:universaltime_to_localtime(UniversalTime)
+ end,
+ default_time_format(Date,Time,Micro).
+
+default_time_format({Y,M,D},{H,Min,S},Micro) ->
+ lists:flatten(
+ io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w",
+ [Y,M,D,H,Min,S,Micro])).
+
+integer(Str) ->
+ is_integer(list_to_integer(Str)).
+integer(Str,Max) ->
+ integer(Str,0,Max).
+integer(Str,Min,Max) ->
+ Int = list_to_integer(Str),
+ Int >= Min andalso Int =<Max.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R:S -> {C,R,hd(S)} end.
+
+timestamp() ->
+ erlang:monotonic_time(microsecond).
+
+%% necessary?
+add_time(#{time:=_}=Meta) ->
+ Meta;
+add_time(Meta) ->
+ Meta#{time=>timestamp()}.
diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl
new file mode 100644
index 0000000000..b59f5f7758
--- /dev/null
+++ b/lib/kernel/test/logger_legacy_SUITE.erl
@@ -0,0 +1,282 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_legacy_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% This test suite test that log events from within OTP can be
+%%% delivered to legacy error_logger event handlers on the same format
+%%% as before 'logger' was introduced.
+%%%
+%%% Before changing the expected format of any of the log events in
+%%% this suite, please make sure that the backwards incompatibility it
+%%% introduces is ok.
+%%% -----------------------------------------------------------------
+
+-define(check(Expected),
+ receive Expected ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+-define(check_no_flush(Expected),
+ receive Expected ->
+ ok
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>stop}),
+ Config.
+
+end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
+ ok.
+
+init_per_group(std, Config) ->
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,
+ {log,prefix_of,[beam,erlang,otp]}}}]),
+ Config;
+init_per_group(sasl, Config) ->
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,
+ {log,prefix_of,[beam,erlang,otp,sasl]}}}]),
+
+ %% cth_log_redirect checks if sasl is started before displaying
+ %% any sasl reports - so just to see the real sasl reports in tc
+ %% log:
+ application:start(sasl),
+ Config;
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(sasl, _Config) ->
+ application:stop(sasl),
+ ok;
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ error_logger:add_report_handler(?MODULE,{event_handler,self()}),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ %% Using gen_event directly here, instead of
+ %% error_logger:delete_report_handler. This is to avoid
+ %% automatically stopping the error_logger process due to removing
+ %% the last handler.
+ gen_event:delete_handler(error_logger,?MODULE,[]),
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [{std,[],[gen_server,
+ gen_event,
+ gen_fsm,
+ gen_statem]},
+ {sasl,[],[sasl_reports,
+ supervisor_handle_info]}].
+
+all() ->
+ [{group,std},
+ {group,sasl}].
+
+gen_server(_Config) ->
+ {ok,Pid} = gen_server:start(?MODULE,gen_server,[]),
+ Msg = fun() -> a=b end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ ok = gen_server:cast(Pid,Msg),
+ ?check({error,"** Generic server ~tp terminating"++_,
+ [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}).
+
+gen_event(_Config) ->
+ {ok,Pid} = gen_event:start(),
+ ok = gen_event:add_handler(Pid,?MODULE,gen_event),
+ Msg = fun() -> a=b end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}),
+ gen_event:notify(Pid,Msg),
+ ?check({error,"** gen_event handler ~p crashed."++_,
+ [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}).
+
+gen_fsm(_Config) ->
+ {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]),
+ Msg = fun() -> a=b end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ gen_fsm:send_all_state_event(Pid,Msg),
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}).
+
+gen_statem(_Config) ->
+ {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]),
+ Msg = fun() -> a=b end,
+ Pid ! Msg,
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}).
+
+sasl_reports(Config) ->
+ App = {application,?MODULE,[{description, ""},
+ {vsn, "1.0"},
+ {modules, [?MODULE]},
+ {registered, []},
+ {applications, []},
+ {mod, {?MODULE, []}}]},
+ AppStr = io_lib:format("~p.",[App]),
+ Dir = ?config(priv_dir,Config),
+ AppFile = filename:join(Dir,?MODULE_STRING++".app"),
+ ok = file:write_file(AppFile,AppStr),
+ true = code:add_patha(Dir),
+ ok = application:start(?MODULE),
+ SupName = sup_name(),
+ Pid = whereis(SupName),
+ [{ch,ChPid,_,_}] = supervisor:which_children(Pid),
+ Node = node(),
+ ?check_no_flush({info_report,progress,[{application,?MODULE},
+ {started_at,Node}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,[{pid,ChPid}|_]}]}),
+ ok = gen_server:cast(ChPid, fun() ->
+ spawn_link(fun() -> receive x->ok end end)
+ end),
+ Msg = fun() -> a=b end,
+ ok = gen_server:cast(ChPid,Msg),
+ ?check_no_flush({error,"** Generic server ~tp terminating"++_,
+ [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}),
+ ?check_no_flush({error_report,crash_report,
+ [[{initial_call,_},
+ {pid,ChPid},
+ {registered_name,[]},
+ {error_info,{error,{badmatch,b},_}},
+ {ancestors,_},
+ {message_queue_len,_},
+ {messages,_},
+ {links,[Pid,Neighbour]},
+ {dictionary,_},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_}],
+ [{neighbour,[{pid,Neighbour},
+ {registered_name,_},
+ {initial_call,_},
+ {current_function,_},
+ {ancestors,_},
+ {message_queue_len,_},
+ {links,[ChPid]},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_},
+ {current_stacktrace,_}]}]]}),
+ ?check_no_flush({error_report,supervisor_report,
+ [{supervisor,{local,SupName}},
+ {errorContext,child_terminated},
+ {reason,{{badmatch,b},_}},
+ {offender,[{pid,ChPid}|_]}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,_}]}),
+
+ ok = application:stop(?MODULE),
+ ?check({info_report,std_info,[{application,?MODULE},
+ {exited,stopped},
+ {type,temporary}]}).
+
+sasl_reports(cleanup,_Config) ->
+ application:stop(?MODULE).
+
+supervisor_handle_info(_Config) ->
+ {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor),
+ ?check({info_report,progress,[{supervisor,_},{started,_}]}),
+ Pid ! msg,
+ ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}).
+
+supervisor_handle_info(cleanup,_Config) ->
+ Pid = whereis(sup_name()),
+ unlink(Pid),
+ exit(Pid,shutdown).
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for error_logger event handler, gen_server, gen_statem,
+%%% gen_fsm, gen_event, supervisor and application.
+start(_,_) ->
+ supervisor:start_link({local,sup_name()},?MODULE,supervisor).
+
+init(supervisor) ->
+ {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}};
+init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm ->
+ {ok,mystate,StateMachine};
+init(State) ->
+ {ok,State}.
+
+%% error_logger event handler
+handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) ->
+ Pid ! {Tag,Type,Report},
+ {ok,State};
+%% other gen_event
+handle_event(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {next_state,State}.
+
+%% gen_fsm
+handle_event(Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_statem
+handle_event(info,Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_server
+handle_cast(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {noreply,State}.
+
+%% gen_statem
+callback_mode() ->
+ handle_event_function.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+sup_name() ->
+ list_to_atom(?MODULE_STRING++"_sup").
diff --git a/lib/kernel/test/logger_simple_SUITE.erl b/lib/kernel/test/logger_simple_SUITE.erl
new file mode 100644
index 0000000000..5d8d32492d
--- /dev/null
+++ b/lib/kernel/test/logger_simple_SUITE.erl
@@ -0,0 +1,247 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(check_no_log,[] = test_server:messages_get()).
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ #{handlers:=Hs0} = logger:i(),
+ Hs = lists:keydelete(cth_log_redirect,1,Hs0),
+ [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Env = [{App,Key,application:get_env(App,Key)} ||
+ {App,Key} <- [{kernel,logger_dest},
+ {kernel,logger_level}]],
+ [{env,Env},{logger,Hs}|Config].
+
+end_per_suite(Config) ->
+ [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)],
+ Hs = ?config(logger,Config),
+ [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ get_buffer,
+ replace_file,
+ replace_disk_log
+ ].
+
+start_stop(_Config) ->
+ undefined = whereis(logger_simple),
+ register(logger_simple,self()),
+ {error,_} = logger:add_handler(logger_simple,
+ logger_simple,
+ #{filter_default=>log}),
+ unregister(logger_simple),
+ ok = logger:add_handler(logger_simple,logger_simple,#{filter_default=>log}),
+ Pid = whereis(logger_simple),
+ true = is_pid(Pid),
+ ok = logger:remove_handler(logger_simple),
+ false = is_pid(whereis(logger_simple)),
+ ok.
+start_stop(cleanup,_Config) ->
+ logger:remove_handler(logger_simple).
+
+get_buffer(_Config) ->
+ %% Start simple without buffer
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log}),
+ logger:emergency(?str),
+ logger:alert(?str,[]),
+ logger:error(?map_rep),
+ logger:info(?keyval_rep),
+ {ok,[]} = logger_simple:get_buffer(), % no buffer
+ ok = logger:remove_handler(logger_simple),
+
+ %% Start with buffer
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ logger:emergency(M1=?str),
+ logger:alert(M2=?str,[]),
+ logger:error(M3=?map_rep),
+ logger:info(M4=?keyval_rep),
+ logger:info(M41=?keyval_rep++[not_key_val]),
+ error_logger:error_report(some_type,M5=?map_rep),
+ error_logger:warning_report("some_type",M6=?map_rep),
+ logger:critical(M7=?str,[A7=?keyval_rep]),
+ logger:notice(M8=["fake",string,"line:",?LINE]),
+ {ok,Buffered1} = logger_simple:get_buffer(),
+ [#{level:=emergency,msg:={string,M1}},
+ #{level:=alert,msg:={M2,[]}},
+ #{level:=error,msg:={report,M3}},
+ #{level:=info,msg:={report,M4}},
+ #{level:=info,msg:={report,M41}},
+ #{level:=error,msg:={report,#{label:={error_logger,error_report},
+ report:=M5}}},
+ #{level:=warning,msg:={report,#{label:={error_logger,warning_report},
+ report:=M6}}},
+ #{level:=critical,msg:={M7,[A7]}},
+ #{level:=notice,msg:={string,M8}}] = Buffered1,
+
+ %% Keep logging - should not buffer any more
+ logger:emergency(?str),
+ logger:alert(?str,[]),
+ logger:error(?map_rep),
+ logger:info(?keyval_rep),
+ {ok,[]} = logger_simple:get_buffer(),
+ ok = logger:remove_handler(logger_simple),
+
+ %% Fill buffer and drop
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ logger:emergency(M9=?str),
+ M10=?str,
+ [logger:info(M10) || _ <- lists:seq(1,8)],
+ logger:error(M11=?str),
+ logger:error(?str),
+ logger:error(?str),
+ {ok,Buffered3} = logger_simple:get_buffer(),
+ 11 = length(Buffered3),
+ [#{level:=emergency,msg:={string,M9}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=error,msg:={string,M11}},
+ #{level:=info,msg:={"Simple handler buffer full, dropped ~w messages",[2]}}]
+ = Buffered3,
+ ok.
+get_buffer(cleanup,_Config) ->
+ logger:remove_handler(logger_simple).
+
+replace_file(Config) ->
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ logger:emergency(M1=?str),
+ logger:alert(M2=?str,[]),
+ logger:error(?map_rep),
+ logger:info(?keyval_rep),
+ undefined = whereis(?STANDARD_HANDLER),
+ PrivDir = ?config(priv_dir,Config),
+ File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)++".log"),
+
+ application:set_env(kernel,logger_dest,{file,File}),
+ application:set_env(kernel,logger_level,info),
+
+ ok = logger:setup_standard_handler(),
+ true = is_pid(whereis(?STANDARD_HANDLER)),
+ ok = logger_std_h:filesync(?STANDARD_HANDLER),
+ {ok,Bin} = file:read_file(File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=INFO REPORT===="++_,
+ _,
+ _] = Lines,
+ ok.
+replace_file(cleanup,_Config) ->
+ logger:remove_handler(?STANDARD_HANDLER),
+ logger:remove_handler(logger_simple).
+
+replace_disk_log(Config) ->
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ logger:emergency(M1=?str),
+ logger:alert(M2=?str,[]),
+ logger:error(?map_rep),
+ logger:info(?keyval_rep),
+ undefined = whereis(?STANDARD_HANDLER),
+ PrivDir = ?config(priv_dir,Config),
+ File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)),
+
+ application:set_env(kernel,logger_dest,{disk_log,File}),
+ application:set_env(kernel,logger_level,info),
+
+ ok = logger:setup_standard_handler(),
+ true = is_pid(whereis(?STANDARD_HANDLER)),
+ ok = logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER),
+ {ok,Bin} = file:read_file(File++".1"),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=INFO REPORT===="++_,
+ _,
+ _|_] = Lines, % the tail might be an info report about opening the disk log
+ ok.
+replace_disk_log(cleanup,_Config) ->
+ logger:remove_handler(?STANDARD_HANDLER),
+ logger:remove_handler(logger_simple).
+
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
new file mode 100644
index 0000000000..e940e0a026
--- /dev/null
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -0,0 +1,1396 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log, [] = test_server:messages_get()).
+-define(check(Expected),
+ receive
+ {log,Expected} ->
+ [] = test_server:messages_get()
+ after 5000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(domain,#{domain=>[?MODULE]}).
+
+-define(FILESYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500;
+ true -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ {ok,{?STANDARD_HANDLER,#{formatter:=OrigFormatter}}} =
+ logger:get_handler_config(?STANDARD_HANDLER),
+ [{formatter,OrigFormatter}|Config].
+
+end_per_suite(Config) ->
+ {OrigMod,OrigConf} = proplists:get_value(formatter,Config),
+ logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}),
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ if ?TEST_HOOKS_TAB == undefined ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ true ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [add_remove_instance_tty,
+ add_remove_instance_standard_io,
+ add_remove_instance_standard_error,
+ add_remove_instance_file1,
+ add_remove_instance_file2,
+ default_formatter,
+ errors,
+ formatter_fail,
+ config_fail,
+ crash_std_h_to_file,
+ crash_std_h_to_disk_log,
+ bad_input,
+ info_and_reset,
+ reconfig,
+ file_opts,
+ filesync,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync_file,
+ op_switch_to_sync_tty,
+ op_switch_to_drop_file,
+ op_switch_to_drop_tty,
+ op_switch_to_flush_file,
+ op_switch_to_flush_tty,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ qlen_kill_std,
+ mem_kill_new,
+ mem_kill_std,
+ restart_after,
+ handler_requests_under_load
+ ].
+
+add_remove_instance_tty(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{type => tty},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ ok.
+
+add_remove_instance_standard_io(_Config) ->
+ add_remove_instance_nofile(standard_io).
+add_remove_instance_standard_io(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_standard_error(_Config) ->
+ add_remove_instance_nofile(standard_error).
+add_remove_instance_standard_error(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file1(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog1.txt"),
+ Type = {file,Log},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file1(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file2(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog2.txt"),
+ Type = {file,Log,[raw,append]},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file2(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) ->
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => Type},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(?MODULE),
+ true = is_pid(Pid),
+ logger:info(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(?MODULE),
+ logger:info(?msg,?domain),
+ ?check_no_log,
+ try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ ok.
+
+default_formatter(_Config) ->
+ ok = logger:set_handler_config(?STANDARD_HANDLER,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ ct:capture_start(),
+ logger:info(M1=?msg),
+ timer:sleep(100),
+ ct:capture_stop(),
+ [Msg] = ct:capture_get(),
+ match = re:run(Msg,"=INFO REPORT====.*\n"++M1,[{capture,none}]),
+ ok.
+
+errors(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE,logger_std_h,#{}),
+
+ {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name),
+
+ ok = logger:remove_handler(?MODULE),
+ {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE),
+
+ {error,
+ {handler_not_added,
+ {invalid_config,logger_std_h,{type,faulty_type}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{type => faulty_type}}),
+
+ NoDir = lists:concat(["/",?MODULE,"_dir"]),
+ {error,
+ {handler_not_added,{{open_failed,NoDir,eacces},_}}} =
+ logger:add_handler(myh2,logger_std_h,
+ #{logger_std_h=>#{type=>{file,NoDir}}}),
+
+ {error,
+ {handler_not_added,{{open_failed,Log,_},_}}} =
+ logger:add_handler(myh3,logger_std_h,
+ #{logger_std_h=>#{type=>{file,Log,[bad_file_opt]}}}),
+
+ ok = logger:info(?msg).
+
+errors(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+formatter_fail(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ %% no formatter
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => {file,Log}},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}),
+ Pid = whereis(?MODULE),
+ true = is_pid(Pid),
+ {ok,#{handlers:=H}} = logger:get_logger_config(),
+ true = lists:member(?MODULE,H),
+
+ %% Formatter is added automatically
+ {ok,{_,#{formatter:={logger_formatter,_}}}} =
+ logger:get_handler_config(?MODULE),
+ logger:info(M1=?msg,?domain),
+ Got1 = try_match_file(Log,"=INFO REPORT====.*\n"++M1,5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}),
+ logger:info(M2=?msg,?domain),
+ Got2 = try_match_file(Log,
+ Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}),
+ logger:info(M3=?msg,?domain),
+ Got3 = try_match_file(Log,
+ Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}),
+ logger:info(?msg,?domain),
+ try_match_file(Log,
+ Got3++"FORMATTER ERROR: bad_return_value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(?MODULE),
+ {ok,#{handlers:=H}} = logger:get_logger_config(),
+
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {restart_type,bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{restart_type => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_levels,{42,42,_}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{toggle_sync_qlen=>42,
+ drop_new_reqs_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,logger_std_h,
+ #{type=>{file,"file"}}),
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,id,bad),
+ {error,{invalid_levels,_}} =
+ logger:set_handler_config(?MODULE,logger_std_h,
+ #{toggle_sync_qlen=>100,
+ flush_reqs_qlen=>99}),
+ {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
+ logger:set_handler_config(?MODULE, logger_std_h,
+ #{filesync_rep_int => 2000}),
+ ok.
+
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+crash_std_h_to_file(Config) ->
+ crash_std_h(Config,?FUNCTION_NAME,logger_dest,file).
+crash_std_h_to_file(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h_to_disk_log(Config) ->
+ crash_std_h(Config,?FUNCTION_NAME,logger_dest,disk_log).
+crash_std_h_to_disk_log(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h(Config,Func,Var,Type) ->
+ Dir = ?config(priv_dir,Config),
+ File = lists:concat([?MODULE,"_",Func,".log"]),
+ Log = filename:join(Dir,File),
+ Pa = filename:dirname(code:which(?MODULE)),
+ TypeAndLog =
+ case os:type() of
+ {win32,_} ->
+ lists:concat([" {",Type,",\\\"",Log,"\\\"}"]);
+ _ ->
+ lists:concat([" \'{",Type,",\"",Log,"\"}\'"])
+ end,
+ Args = lists:concat([" -kernel ",Var,TypeAndLog," -pa ",Pa]),
+ Name = lists:concat([?MODULE,"_",Func]),
+ ct:pal("Starting ~p with ~tp", [Name,Args]),
+ %% Start a node which prints kernel logs to the destination specified by Type
+ {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
+ Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,self()}]),
+ ok = log_on_remote_node(Node,"dummy1"),
+ ?check("dummy1"),
+ {ok,Bin1} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}),
+
+ %% Kill the logger_std_h process
+ exit(Pid, kill),
+
+ %% Wait a bit, then check that it is gone
+ timer:sleep(2000),
+ undefined = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
+
+ %% Check that file is not empty
+ {ok,Bin2} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}),
+ ok.
+
+%% Can not use rpc:call here, since the code would execute on a
+%% process with group_leader on this (the calling) node, and thus
+%% logger would send the log event to the logger process here instead
+%% of logging it itself.
+log_on_remote_node(Node,Msg) ->
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:info(Msg)
+ end),
+ ok.
+
+
+crash_std_h(cleanup) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,disk_log_sync,[?STANDARD_HANDLER]),
+ case file:read_file(Log ++ ".1") of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log ++ ".1");
+ Ok ->
+ Ok
+ end;
+sync_and_read(Node,file,Log) ->
+ rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ case file:read_file(Log) of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log);
+ Ok ->
+ Ok
+ end.
+
+bad_input(_Config) ->
+ {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"),
+ {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"),
+ {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType").
+
+
+info_and_reset(_Config) ->
+ #{id := ?STANDARD_HANDLER} = logger_std_h:info(?STANDARD_HANDLER),
+ ok = logger_std_h:reset(?STANDARD_HANDLER).
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => standard_io},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE,
+ type := standard_io,
+ file_ctrl_pid := FileCtrlPid,
+ toggle_sync_qlen := ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen := ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen := ?FLUSH_REQS_QLEN,
+ enable_burst_limit := ?ENABLE_BURST_LIMIT,
+ burst_limit_size := ?BURST_LIMIT_SIZE,
+ burst_window_time := ?BURST_WINDOW_TIME,
+ enable_kill_overloaded := ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen := ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem := ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after := ?HANDLER_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ logger_std_h:info(?MODULE),
+
+ ok = logger:set_handler_config(?MODULE, logger_std_h,
+ #{toggle_sync_qlen => 1,
+ drop_new_reqs_qlen => 2,
+ flush_reqs_qlen => 3,
+ enable_burst_limit => false,
+ burst_limit_size => 10,
+ burst_window_time => 10,
+ enable_kill_overloaded => true,
+ handler_overloaded_qlen => 100000,
+ handler_overloaded_mem => 10000000,
+ handler_restart_after => never,
+ filesync_repeat_interval => no_repeat}),
+ #{id := ?MODULE,
+ type := standard_io,
+ file_ctrl_pid := FileCtrlPid,
+ toggle_sync_qlen := 1,
+ drop_new_reqs_qlen := 2,
+ flush_reqs_qlen := 3,
+ enable_burst_limit := false,
+ burst_limit_size := 10,
+ burst_window_time := 10,
+ enable_kill_overloaded := true,
+ handler_overloaded_qlen := 100000,
+ handler_overloaded_mem := 10000000,
+ handler_restart_after := never,
+ filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE),
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+file_opts(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ BadFileOpts = [raw],
+ BadType = {file,Log,BadFileOpts},
+ {error,{handler_not_added,{{open_failed,Log,enoent},_}}} =
+ logger:add_handler(?MODULE, logger_std_h,
+ #{logger_std_h => #{type => BadType}}),
+
+ OkFileOpts = [raw,append],
+ OkType = {file,Log,OkFileOpts},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => OkType},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ #{type := OkType} = logger_std_h:info(?MODULE),
+ logger:info(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ ok.
+file_opts(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+filesync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ Type = {file,Log},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => Type},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid;
+ ({trace,TPid,'receive',Received}, Pid) ->
+ Pid ! {trace,TPid,Received},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ FileCtrlPid = maps:get(file_ctrl_pid , logger_std_h:info(?MODULE)),
+ {ok,_} = dbg:p(FileCtrlPid, [c]),
+ {ok,_} = dbg:tpl(logger_std_h, write_to_dev, 5, []),
+ {ok,_} = dbg:tpl(logger_std_h, sync_dev, 4, []),
+ {ok,_} = dbg:tp(file, datasync, 1, []),
+
+ logger:info("first", ?domain),
+ %% wait for automatic filesync
+ timer:sleep(?FILESYNC_REP_INT),
+ Expected1 = [{log,"first"}, {trace,logger_std_h,write_to_dev},
+ {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
+
+ logger:info("second", ?domain),
+ %% do explicit filesync
+ logger_std_h:filesync(?MODULE),
+ %% a second filesync should be ignored
+ logger_std_h:filesync(?MODULE),
+ Expected2 = [{log,"second"}, {trace,logger_std_h,write_to_dev},
+ {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
+
+ %% check that if there's no repeated filesync active,
+ %% a filesync is still performed when handler goes idle
+ logger:set_handler_config(?MODULE, logger_std_h,
+ #{filesync_repeat_interval => no_repeat}),
+ no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ logger:info("third", ?domain),
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ logger:info("fourth", ?domain),
+ %% wait for automatic filesync
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ Expected3 = [{log,"third"}, {trace,logger_std_h,write_to_dev},
+ {log,"fourth"}, {trace,logger_std_h,write_to_dev},
+ {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
+
+ dbg:stop_clear(),
+
+ %% verify that filesync has been performed as expected
+ Received1 = lists:map(fun({trace,M,F,_}) -> {trace,M,F};
+ (Other) -> Other
+ end, test_server:messages_get()),
+ ct:pal("Trace #1 =~n~p", [Received1]),
+ Received1 = Expected1 ++ Expected2 ++ Expected3,
+
+ try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\n">>}, 1000),
+
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:tpl(logger_std_h, handle_cast, 2, []),
+
+ %% switch repeated filesync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ logger:set_handler_config(?MODULE, logger_std_h,
+ #{filesync_repeat_interval => SyncInt}),
+ SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ timer:sleep(WaitT),
+ logger:set_handler_config(?MODULE, logger_std_h,
+ #{filesync_repeat_interval => no_repeat}),
+ dbg:stop_clear(),
+
+ Received2 = lists:map(fun({trace,_M,handle_cast,[{Op,_},_]}) -> {trace,Op};
+ (Other) -> Other
+ end, test_server:messages_get()),
+ ct:pal("Trace #2 =~n~p", [Received2]),
+ OneSync = [{trace,repeated_filesync}],
+ %% receive 1 initial repeated_filesync, then 1 per sec
+ Received2 =
+ lists:flatten([OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ ok.
+filesync(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\n">>}, ?FILESYNC_REP_INT),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?FILESYNC_REP_INT),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, set_handler_config,
+ [?STANDARD_HANDLER, logger_std_h,
+ #{filesync_repeat_interval => SyncInt}]),
+ Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]),
+ SyncInt = maps:get(filesync_repeat_interval, Info),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_std_h_on_new_node(_Config, Func, Log) ->
+ Pa = filename:dirname(code:which(?MODULE)),
+ Dest =
+ case os:type() of
+ {win32,_} ->
+ lists:concat([" {file,\\\"",Log,"\\\"}"]);
+ _ ->
+ lists:concat([" \'{file,\"",Log,"\"}\'"])
+ end,
+ Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]),
+ Name = lists:concat([?MODULE,"_",Func]),
+ ct:pal("Starting ~s with ~tp", [Name,Args]),
+ {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
+ Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
+ true = is_pid(Pid),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(Mod, Func) ->
+ ?set_internal_log({Mod,Func}).
+set_result(Op, Result) ->
+ ?set_result(Op, Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3,
+ drop_new_reqs_qlen => 501,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ %% TRecvPid = start_op_trace(),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ NumOfReqs = count_lines(Log),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(async,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(sync,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_switch(async,sync,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(drop,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(flush,Events) end),
+ ok = file:delete(Log),
+ %% stop_op_trace(TRecvPid),
+ ok.
+op_switch_to_sync_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_sync_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3,
+ drop_new_reqs_qlen => 501,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ ok.
+op_switch_to_sync_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 3,
+ flush_reqs_qlen => 600,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ %% TRecvPid = start_op_trace(),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [NumOfReqs-Logged,NumOfReqs]),
+ true = (Logged < NumOfReqs),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(async,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(drop,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(flush,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_switch(async,drop,Events)
+ %% orelse find_switch(sync,drop,Events)
+ %% end),
+ ok = file:delete(Log),
+ %% stop_op_trace(TRecvPid),
+ ok.
+op_switch_to_drop_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 3,
+ flush_reqs_qlen => 600,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ ok.
+op_switch_to_drop_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush_file() ->
+ [{timetrap,{seconds,60}}].
+op_switch_to_flush_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 99,
+ flush_reqs_qlen => 100,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 10000,
+ Procs = 100,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]),
+ true = (Logged < (NumOfReqs*Procs)),
+
+ %%! --- Thu Apr 12 13:46:00 2018 --- peppe was here!
+ %%! TODO: Verify that handler has switched to flush mode
+
+ ok = file:delete(Log),
+ ok.
+op_switch_to_flush_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 99,
+ flush_reqs_qlen => 100,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 10000,
+ Procs = 10,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ ok.
+op_switch_to_flush_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => false,
+ burst_limit_size => 10,
+ burst_window_time => 2000,
+ drop_new_reqs_qlen => 200,
+ flush_reqs_qlen => 300}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ NumOfReqs = Logged.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => true,
+ burst_limit_size => ReqLimit,
+ burst_window_time => 2000,
+ drop_new_reqs_qlen => 200,
+ flush_reqs_qlen => 300}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ ReqLimit = Logged.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => true,
+ burst_limit_size => ReqLimit,
+ burst_window_time => BurstTWin,
+ drop_new_reqs_qlen => 20000,
+ flush_reqs_qlen => 20001}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ ok = file:delete(Log),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))).
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>false,
+ handler_overloaded_qlen=>10,
+ handler_overloaded_mem=>100}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ true = is_pid(whereis(?MODULE)),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(?MODULE),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = 2000,
+ NewHConfig =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_overloaded_mem=>Mem0+50000,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 2,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ timer:sleep(RestartAfter + 1000),
+ true = is_pid(whereis(?MODULE)),
+ ok
+ after
+ 5000 ->
+ Info = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+qlen_kill_std(Config) ->
+ %%! HERE
+ %% Dir = ?config(priv_dir, Config),
+ %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ %% Log = filename:join(Dir, File),
+ %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ %% ok = rpc:call(Node, logger, set_handler_config,
+ %% [?STANDARD_HANDLER, logger_std_h,
+ %% #{enable_kill_overloaded=>true,
+ %% handler_overloaded_qlen=>10,
+ %% handler_overloaded_mem=>100000}]),
+ {skip,"Not done yet"}.
+
+mem_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(?MODULE),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = 2000,
+ NewHConfig =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>50000,
+ handler_overloaded_mem=>Mem0+500,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 2,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ timer:sleep(RestartAfter * 2),
+ true = is_pid(whereis(?MODULE)),
+ ok
+ after
+ 5000 ->
+ Info = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+mem_kill_std(Config) ->
+ {skip,"Not done yet"}.
+
+restart_after(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_restart_after=>never}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(?MODULE)),
+ %% kill handler
+ send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
+ receive
+ {'DOWN', MRef1, _, _, _Info1} ->
+ timer:sleep(?HANDLER_RESTART_AFTER + 1000),
+ undefined = whereis(?MODULE),
+ ok
+ after
+ 5000 ->
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = 2000,
+ NewHConfig2 =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(?MODULE),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
+ receive
+ {'DOWN', MRef2, _, _, _Info2} ->
+ timer:sleep(RestartAfter + 1000),
+ Pid1 = whereis(?MODULE),
+ true = is_pid(Pid1),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (filesync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{seconds,60}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 1000,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
+ {info,[]},
+ {reset,[]},
+ {change_config,[]}])
+ end),
+ Sent = send_burst({t,10000}, seq, {chars,79}, info),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file:delete(Log).
+handler_requests_under_load(cleanup, Config) ->
+ ok = stop_handler(?MODULE).
+
+send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result =
+ case Req of
+ change_config ->
+ logger:set_handler_config(HName, logger_std_h,
+ #{enable_kill_overloaded =>
+ false});
+ Func ->
+ logger_std_h:Func(HName)
+ end,
+ send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}])
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, TTY, Config) when TTY == standard_io;
+ TTY == standard_error->
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{logger_std_h => #{type => TTY},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,{_,HConfig = #{logger_std_h := StdHConfig}}} =
+ logger:get_handler_config(Name),
+ {HConfig,StdHConfig};
+
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([FuncName,".log"])),
+ ct:pal("Logging to ~tp", [Log]),
+ Type = {file,Log},
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{logger_std_h => #{type => Type},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,{_,HConfig = #{logger_std_h := StdHConfig}}} =
+ logger:get_handler_config(Name),
+ {Log,HConfig,StdHConfig}.
+
+stop_handler(Name) ->
+ ok = logger:remove_handler(Name),
+ ct:pal("Handler ~p stopped!", [Name]).
+
+count_lines(File) ->
+ wait_until_written(File, -1),
+ count_lines1(File).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz1}} ->
+ ok;
+ {ok,#file_info{size = Sz2}} ->
+ wait_until_written(File, Sz2)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ Counter = fun Cnt(Dev,LC) ->
+ case file:read_line(Dev) of
+ eof -> LC;
+ _ -> Cnt(Dev,LC+1)
+ end
+ end,
+ {_,Dev} = file:open(File, [read]),
+ Lines = Counter(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ PerProc = fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end,
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(PerProc)) end ||
+ _ <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},Pid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ Pid ! {log,String},
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n".
+
+add_remove_instance_nofile(Type) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{type => Type},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(?MODULE),
+ true = is_pid(Pid),
+ group_leader(group_leader(),Pid), % to get printouts in test log
+ logger:info(M1=?msg,?domain),
+ ?check(M1),
+ %% check that filesync doesn't do damage even if not relevant
+ ok = logger_std_h:filesync(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(?MODULE),
+ logger:info(?msg,?domain),
+ ?check_no_log,
+ ok.
+
+logger_std_h_remove() ->
+ logger:remove_handler(?MODULE).
+logger_std_h_remove(Id) ->
+ logger:remove_handler(Id).
+
+try_read_file(FileName, Expected, Time) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ ct:pal("Can't read ~tp: ~tp", [FileName,Error]),
+ erlang:error(Error);
+ Got ->
+ ct:pal("try_read_file got ~tp", [Got]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500)
+ end;
+try_read_file(FileName, Expected, _) ->
+ ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]),
+ erlang:error({error,missing_expected_pattern}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+%%%-----------------------------------------------------------------
+%%%
+start_op_trace() ->
+ TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) ->
+ Pid ! {trace_call,Func,Details},
+ Pid;
+ ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) ->
+ Pid ! {trace_return,Func,RetVal},
+ Pid
+ end,
+ TRecvPid = spawn_link(fun() -> trace_receiver(5000) end),
+ {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}),
+
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:p(self(), [c]),
+
+ MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
+ {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1),
+
+ {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
+
+ MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end),
+ {ok,_} = dbg:tpl(ets, lookup, 2, MS2),
+
+ ct:pal("Tracing started!", []),
+ TRecvPid.
+
+stop_op_trace(TRecvPid) ->
+ dbg:stop_clear(),
+ unlink(TRecvPid),
+ exit(TRecvPid, kill),
+ ok.
+
+find_mode(flush, Events) ->
+ lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true;
+ (_) -> false
+ end, Events);
+find_mode(Mode, Events) ->
+ lists:keymember([{mode,Mode}], 3, Events).
+
+find_switch(From, To, Events) ->
+ try lists:foldl(fun({trace_return,check_load,{To,_,_,_}},
+ {trace_call,check_load,[#{mode := From}]}) ->
+ throw(match);
+ (Event, _) ->
+ Event
+ end, undefined, Events) of
+ _ -> false
+ catch
+ throw:match -> true
+ end.
+
+analyse_trace(TRecvPid, TestFun) ->
+ TRecvPid ! {test,self(),TestFun},
+ receive
+ {result,TRecvPid,Result} ->
+ Result
+ after
+ 60000 ->
+ fails
+ end.
+
+trace_receiver(IdleT) ->
+ Msgs = receive_until_idle(IdleT, 5, []),
+ ct:pal("~w trace events generated", [length(Msgs)]),
+ analyse(Msgs).
+
+receive_until_idle(IdleT, WaitN, Msgs) ->
+ receive
+ Msg = {trace_call,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs]);
+ Msg = {trace_return,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs])
+ after
+ IdleT ->
+ if WaitN == 0 ->
+ Msgs;
+ true ->
+ receive_until_idle(IdleT, WaitN-1, Msgs)
+ end
+ end.
+
+analyse(Msgs) ->
+ receive
+ {test,From,TestFun} ->
+ From ! {result,self(),TestFun(Msgs)},
+ analyse(Msgs)
+ end.
diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl
index 40a016aed0..b1ee29a11f 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE.erl
@@ -428,13 +428,14 @@ stop() ->
ok = wrap_log_test:stop(),
dl_wait().
-%% Give disk logs opened by 'logger' and 'wlt' time to close after
+%% Give disk logs opened by 'wlr_logger' and 'wlt' time to close after
%% receiving EXIT signals.
dl_wait() ->
case disk_log:accessible_logs() of
{[], []} ->
ok;
- _ ->
+ _X ->
+ erlang:display(_X),
timer:sleep(100),
dl_wait()
end.
@@ -507,27 +508,27 @@ add_ext(Name, Ext) ->
%% disk_log.
open(Log, File, Where) ->
- logger ! {open, self(), Log, File},
+ wlr_logger ! {open, self(), Log, File},
rec1(ok, Where).
open_ext(Log, File, Where) ->
- logger ! {open_ext, self(), Log, File},
+ wlr_logger ! {open_ext, self(), Log, File},
rec1(ok, Where).
close(Log) ->
- logger ! {close, self(), Log},
+ wlr_logger ! {close, self(), Log},
rec(ok, ?LINE).
sync(Log) ->
- logger ! {sync, self(), Log},
+ wlr_logger ! {sync, self(), Log},
rec(ok, ?LINE).
log_terms(File, Terms) ->
- logger ! {log_terms, self(), File, Terms},
+ wlr_logger ! {log_terms, self(), File, Terms},
rec(ok, ?LINE).
blog_terms(File, Terms) ->
- logger ! {blog_terms, self(), File, Terms},
+ wlr_logger ! {blog_terms, self(), File, Terms},
rec(ok, ?LINE).
rec1(M, Where) ->
diff --git a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
index 38449b6bb3..2b24ccc66f 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
@@ -36,9 +36,9 @@
-endif.
init() ->
- spawn(fun() -> start(logger) end),
+ spawn(fun() -> start(wlr_logger) end),
spawn(fun() -> start2(wlt) end),
- wait_registered(logger),
+ wait_registered(wlr_logger),
wait_registered(wlt),
ok.
@@ -52,9 +52,9 @@ wait_registered(Name) ->
end.
stop() ->
- catch logger ! exit,
+ catch wlr_logger ! exit,
catch wlt ! exit,
- wait_unregistered(logger),
+ wait_unregistered(wlr_logger),
wait_unregistered(wlt),
ok.
@@ -82,47 +82,47 @@ loop() ->
{open, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{open_ext, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{format, external}, {size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{close, Pid, Name} ->
R = disk_log:close(Name),
- ?format("logger: close ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: close ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{sync, Pid, Name} ->
R = disk_log:sync(Name),
- ?format("logger: sync ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: sync ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{log_terms, Pid, Name, Terms} ->
R = disk_log:log_terms(Name, Terms),
- ?format("logger: log_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: log_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{blog_terms, Pid, Name, Terms} ->
R = disk_log:blog_terms(Name, Terms),
- ?format("logger: blog_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: blog_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
exit ->
- ?format("Stopping logger~n", []),
+ ?format("Stopping wlr_logger~n", []),
exit(normal);
_Else ->
- ?format("logger: ignored: ~p~n", [_Else]),
+ ?format("wlr_logger: ignored: ~p~n", [_Else]),
loop()
end.
diff --git a/lib/reltool/src/reltool_target.erl b/lib/reltool/src/reltool_target.erl
index 503e1971b9..0eeeca4a61 100644
--- a/lib/reltool/src/reltool_target.erl
+++ b/lib/reltool/src/reltool_target.erl
@@ -55,7 +55,7 @@ mandatory_modules() ->
kernel_processes(KernelApp) ->
[
{kernelProcess, heart, {heart, start, []}},
- {kernelProcess, error_logger , {error_logger, start_link, []}},
+ {kernelProcess, logger , {logger_server, start_link, []}},
{kernelProcess,
application_controller,
{application_controller, start, [KernelApp]}}
diff --git a/lib/runtime_tools/src/appmon_info.erl b/lib/runtime_tools/src/appmon_info.erl
index b5500085a3..9c587475ca 100644
--- a/lib/runtime_tools/src/appmon_info.erl
+++ b/lib/runtime_tools/src/appmon_info.erl
@@ -690,7 +690,7 @@ find_avoid() ->
[P|Accu];
_ -> Accu end end,
[undefined],
- [application_controller, init, error_logger, gs,
+ [application_controller, init, gs,
node_serv, appmon, appmon_a, appmon_info]).
diff --git a/lib/sasl/doc/src/sasl_app.xml b/lib/sasl/doc/src/sasl_app.xml
index e0693fcb60..48b0b8eafb 100644
--- a/lib/sasl/doc/src/sasl_app.xml
+++ b/lib/sasl/doc/src/sasl_app.xml
@@ -34,12 +34,9 @@
<p>The SASL application provides the following services:</p>
<list type="bulleted">
<item><c>alarm_handler</c></item>
- <item><c>rb</c></item>
<item><c>release_handler</c></item>
<item><c>systools</c></item>
</list>
- <p>The SASL application also includes <c>error_logger</c> event
- handlers for formatting SASL error and crash reports.</p>
<note>
<p>The SASL application in OTP has nothing to do with
"Simple Authentication and Security Layer" (RFC 4422).</p>
@@ -47,51 +44,109 @@
</description>
<section>
- <title>Error Logger Event Handlers</title>
- <p>The following error logger event handlers are used by
- the SASL application.</p>
+ <title>Configuration</title>
+ <p>The following configuration parameters are defined for the SASL
+ application. For more information about configuration parameters, see
+ <seealso marker="kernel:app"><c>app(4)</c></seealso> in Kernel.</p>
+ <p>All configuration parameters are optional.</p>
+ <taglist>
+ <tag><c><![CDATA[start_prg = string() ]]></c></tag>
+ <item>
+ <p>Specifies the program to be used when restarting the system
+ during release installation. Default is
+ <c>$OTP_ROOT/bin/start</c>.</p>
+ </item>
+ <tag><c><![CDATA[masters = [atom()] ]]></c></tag>
+ <item>
+ <p>Specifies the nodes used by this node to read/write release
+ information. This parameter is ignored if parameter
+ <c>client_directory</c> is not set.</p>
+ </item>
+ <tag><c><![CDATA[client_directory = string() ]]></c></tag>
+ <item>
+ <p>This parameter specifies the client directory at the master
+ nodes. For details, see
+ <seealso marker="doc/design_principles:release_handling">Release Handling</seealso>
+ in <em>OTP Design Principles</em>. This parameter is
+ ignored if parameter <c>masters</c> is not set.</p>
+ </item>
+ <tag><c><![CDATA[static_emulator = true | false ]]></c></tag>
+ <item>
+ <p>Indicates if the Erlang emulator is statically installed. A
+ node with a static emulator cannot switch dynamically to a
+ new emulator, as the executable files are written into memory
+ statically. This parameter is ignored if parameters <c>masters</c>
+ and <c>client_directory</c> are not set.</p>
+ </item>
+ <tag><c><![CDATA[releases_dir = string() ]]></c></tag>
+ <item>
+ <p>Indicates where the <c>releases</c> directory is located.
+ The release handler writes all its files to this directory.
+ If this parameter is not set, the OS environment parameter
+ <c>RELDIR</c> is used. By default, this is
+ <c>$OTP_ROOT/releases</c>.</p>
+ </item>
+ <tag><c><![CDATA[utc_log = true | false ]]></c></tag>
+ <item>
+ <p>If set to <c>true</c>, all dates in textual log outputs are
+ displayed in Universal Coordinated Time with the string
+ <c>UTC</c> appended.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Deprecated Error Logger Event Handlers and Configuration</title>
+ <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ old <c>error_logger</c> event manager, and event handlers
+ running on this manager, will still work, but they are not used
+ by default.</p>
+ <p>The error logger event handlers <c>sasl_report_tty_h</c>
+ and <c>sasl_report_file_h</c>, were earliger used for printing
+ the so called SASL reports, i.e. <em>supervisor
+ reports</em>, <em>crash reports</em>, and <em>progress
+ reports</em>. These reports are now also printed by the standard
+ logger handler started by the Kernel application. Progress
+ reports are by default stopped by a filter, but can easily be
+ added by setting the Kernel configuration
+ parameter <seealso marker="kernel:kernel_app#logger_log_progress"><c>logger_log_progress=true</c></seealso>.</p>
+ <p>If the old error logger event handlers are still desired, they
+ must be added by
+ calling <c>error_logger:add_report_handler/1,2</c>.</p>
<taglist>
<tag><c>sasl_report_tty_h</c></tag>
<item>
<p>Formats and writes <em>supervisor reports</em>, <em>crash
reports</em>, and <em>progress reports</em> to <c>stdio</c>.
This error logger event handler uses
- <seealso marker="kernel:kernel_app#error_logger_format_depth">error_logger_format_depth</seealso>
- in the Kernel application to limit how much detail is
- printed in crash and supervisor reports.</p>
+ <seealso marker="kernel:kernel_app#logger_format_depth"><c>logger_format_depth</c></seealso>
+ in the Kernel application to limit how much detail is printed
+ in crash and supervisor reports. If <c>logger_format_depth</c>
+ is not set, it uses the old <c>error_logger_format_depth</c>
+ instead.</p>
</item>
<tag><c>sasl_report_file_h</c></tag>
<item>
<p>Formats and writes <em>supervisor reports</em>, <em>crash
report</em>, and <em>progress report</em> to a single file.
This error logger event handler uses
- <seealso marker="kernel:kernel_app#error_logger_format_depth">error_logger_format_depth</seealso>
- in the Kernel application to limit the details
- printed in crash and supervisor reports.</p>
- </item>
- <tag><c>log_mf_h</c></tag>
- <item>
- <p>This error logger writes <em>all</em> events sent to the
- error logger to disk. Multiple files and log rotation are
- used. For efficiency reasons, each event is written as a
- binary. For more information about this handler,
- see <seealso marker="stdlib:log_mf_h">the STDLIB Reference
- Manual</seealso>.</p>
- <p>To activate this event handler, three SASL
- configuration parameters must be set,
- <c>error_logger_mf_dir</c>, <c>error_logger_mf_maxbytes</c>,
- and <c>error_logger_mf_maxfiles</c>. The next section provides
- more information about the configuration parameters.</p>
+ <seealso marker="kernel:kernel_app#logger_format_depth"><c>logger_format_depth</c></seealso>
+ in the Kernel application to limit the details printed in
+ crash and supervisor reports. If <c>logger_format_depth</c> is
+ not set, it uses the old <c>error_logger_format_depth</c>
+ instead.</p>
</item>
</taglist>
- </section>
-
- <section>
- <title>Configuration</title>
- <p>The following configuration parameters are defined for the SASL
- application. For more information about configuration parameters, see
- <seealso marker="kernel:app"><c>app(4)</c></seealso> in Kernel.</p>
- <p>All configuration parameters are optional.</p>
+ <p>A similar behaviour, but still using the new logger API, can be
+ obtained by setting the Kernel application environment
+ variable <seealso marker="kernel:kernel_app#logger_sasl_compatible"><c>logger_sasl_compatible=true</c></seealso>. This will add a
+ second instance of the standard logger handler
+ named <c>sasl_h</c>, which will only print the SASL reports. No
+ SASL reports will then be printed by the Kernel logger
+ handler.</p>
+ <p>The <c>sasl_h</c> handler will be configured according to the
+ values of the following SASL application environment
+ variables.</p>
<taglist>
<tag><c><![CDATA[sasl_error_logger = Value ]]></c></tag>
<item>
@@ -124,6 +179,19 @@
<c>sasl_error_logger</c> to error reports or progress reports,
or both. Default is <c>all</c>.</p>
</item>
+ </taglist>
+
+ <p>The error logger event handler <c>log_mf_h</c> can also still
+ be used. This event handler writes <em>all</em> events sent to
+ the error logger to disk. Multiple files and log rotation are
+ used. For efficiency reasons, each event is written as a
+ binary. For more information about this handler,
+ see <seealso marker="stdlib:log_mf_h">the STDLIB Reference
+ Manual</seealso>.</p>
+ <p>To activate this event handler, three SASL configuration
+ parameters must be
+ set:</p>
+ <taglist>
<tag><c><![CDATA[error_logger_mf_dir = string() | false ]]></c></tag>
<item>
<p>Specifies in which directory <c>log_mf_h</c> is to store
@@ -142,49 +210,12 @@
this parameter is undefined, the <c>log_mf_h</c> handler is
not installed.</p>
</item>
- <tag><c><![CDATA[start_prg = string() ]]></c></tag>
- <item>
- <p>Specifies the program to be used when restarting the system
- during release installation. Default is
- <c>$OTP_ROOT/bin/start</c>.</p>
- </item>
- <tag><c><![CDATA[masters = [atom()] ]]></c></tag>
- <item>
- <p>Specifies the nodes used by this node to read/write release
- information. This parameter is ignored if parameter
- <c>client_directory</c> is not set.</p>
- </item>
- <tag><c><![CDATA[client_directory = string() ]]></c></tag>
- <item>
- <p>This parameter specifies the client directory at the master
- nodes. For details, see
- <seealso marker="doc/design_principles:release_handling">Release Handling</seealso>
- in <em>OTP Design Principles</em>. This parameter is
- ignored if parameter <c>masters</c> is not set.</p>
- </item>
- <tag><c><![CDATA[static_emulator = true | false ]]></c></tag>
- <item>
- <p>Indicates if the Erlang emulator is statically installed. A
- node with a static emulator cannot switch dynamically to a
- new emulator, as the executable files are written into memory
- statically. This parameter is ignored if parameters <c>masters</c>
- and <c>client_directory</c> are not set.</p>
- </item>
- <tag><c><![CDATA[releases_dir = string() ]]></c></tag>
- <item>
- <p>Indicates where the <c>releases</c> directory is located.
- The release handler writes all its files to this directory.
- If this parameter is not set, the OS environment parameter
- <c>RELDIR</c> is used. By default, this is
- <c>$OTP_ROOT/releases</c>.</p>
- </item>
- <tag><c><![CDATA[utc_log = true | false ]]></c></tag>
- <item>
- <p>If set to <c>true</c>, all dates in textual log outputs are
- displayed in Universal Coordinated Time with the string
- <c>UTC</c> appended.</p>
- </item>
</taglist>
+ <p>The new <seealso marker="kernel:logger_disk_log_h">
+ <c>logger_disk_log_h</c></seealso> might be an alternative
+ to <c>log_mf_h</c> if log rotation is desired. This does,
+ however, write the log events in clear text and not as binaries.</p>
+
</section>
<section>
diff --git a/lib/sasl/src/sasl.app.src b/lib/sasl/src/sasl.app.src
index 1e8e58a978..60d08ffa54 100644
--- a/lib/sasl/src/sasl.app.src
+++ b/lib/sasl/src/sasl.app.src
@@ -40,8 +40,7 @@
]},
{registered, [sasl_sup, alarm_handler, release_handler]},
{applications, [kernel, stdlib]},
- {env, [{sasl_error_logger, tty},
- {errlog_type, all}]},
+ {env, []},
{mod, {sasl, []}},
{runtime_dependencies, ["tools-2.6.14","stdlib-3.4","kernel-5.3",
"erts-9.0"]}]}.
diff --git a/lib/sasl/src/sasl.erl b/lib/sasl/src/sasl.erl
index 24afaee183..657eb6688a 100644
--- a/lib/sasl/src/sasl.erl
+++ b/lib/sasl/src/sasl.erl
@@ -31,45 +31,52 @@
%%%-----------------------------------------------------------------
-behaviour(application).
--record(state, {sasl_error_logger, error_logger_mf}).
+-record(state, {sasl_logger, error_logger_mf}).
start(_, []) ->
- Handler = get_sasl_error_logger(),
- Type = get_sasl_error_logger_type(),
+ {Dest,Level} = get_logger_info(),
Mf = get_error_logger_mf(),
- add_sasl_error_logger(Handler, Type),
+ add_sasl_logger(Dest, Level),
add_error_logger_mf(Mf),
- State = #state{sasl_error_logger = Handler, error_logger_mf = Mf},
+ State = #state{sasl_logger = Dest, error_logger_mf = Mf},
case supervisor:start_link({local, sasl_sup}, sasl, []) of
{ok, Pid} -> {ok, Pid, State};
Error -> Error
end.
stop(State) ->
- delete_sasl_error_logger(State#state.sasl_error_logger),
+ delete_sasl_logger(State#state.sasl_logger),
delete_error_logger_mf(State#state.error_logger_mf).
%%-----------------------------------------------------------------
%% Internal functions
%%-----------------------------------------------------------------
-get_sasl_error_logger() ->
+get_logger_info() ->
+ case application:get_env(kernel, logger_sasl_compatible) of
+ {ok,true} ->
+ {get_logger_dest(),get_logger_level()};
+ _ ->
+ {std,undefined}
+ end.
+
+get_logger_dest() ->
case application:get_env(sasl, sasl_error_logger) of
- {ok, false} -> undefined;
- {ok, tty} -> tty;
- {ok, {file, File}} when is_list(File) -> {file, File, [write]};
- {ok, {file, File, Modes}} when is_list(File), is_list(Modes) ->
- {file, File, Modes};
- {ok, Bad} -> exit({bad_config, {sasl, {sasl_error_logger, Bad}}});
- _ -> undefined
+ {ok, false} -> undefined;
+ {ok, tty} -> standard_io;
+ {ok, {file, File}} when is_list(File) -> {file, File};
+ {ok, {file, File, Modes}} when is_list(File), is_list(Modes) ->
+ {file, File, Modes};
+ {ok, Bad} -> exit({bad_config, {sasl, {sasl_logger_dest, Bad}}});
+ undefined -> standard_io
end.
-get_sasl_error_logger_type() ->
+get_logger_level() ->
case application:get_env(sasl, errlog_type) of
- {ok, error} -> error;
- {ok, progress} -> progress;
- {ok, all} -> all;
- {ok, Bad} -> exit({bad_config, {sasl, {errlog_type, Bad}}});
- _ -> all
+ {ok, error} -> error;
+ {ok, progress} -> info;
+ {ok, all} -> info;
+ {ok, Bad} -> exit({bad_config, {sasl, {errlog_type, Bad}}});
+ _ -> info
end.
get_error_logger_mf() ->
@@ -119,23 +126,32 @@ get_mf_maxf() ->
{ok, Bad} -> exit({bad_config, {sasl, {error_logger_mf_maxfiles, Bad}}})
end.
-add_sasl_error_logger(undefined, _Type) -> ok;
-add_sasl_error_logger(Handler, Type) ->
- error_logger:add_report_handler(mod(Handler), args(Handler, Type)).
-
-delete_sasl_error_logger(undefined) -> ok;
-delete_sasl_error_logger(Type) ->
- error_logger:delete_report_handler(mod(Type)).
-
-mod(tty) -> sasl_report_tty_h;
-mod({file, _File, _Modes}) -> sasl_report_file_h.
-
-args({file, File, Modes}, Type) -> {File, Modes, type(Type)};
-args(_, Type) -> type(Type).
-
-type(error) -> error;
-type(progress) -> progress;
-type(_) -> all.
+add_sasl_logger(undefined, _Level) -> ok;
+add_sasl_logger(std, undefined) -> ok;
+add_sasl_logger(Dest, Level) ->
+ FC0 = #{legacy_header=>true,
+ template=>[{logger_formatter,header},"\n",msg,"\n"]},
+ FC = case application:get_env(sasl,utc_log) of
+ {ok,Bool} when is_boolean(Bool) ->
+ FC0#{utc=>Bool};
+ _ ->
+ FC0
+ end,
+ ok = logger:add_handler(sasl_h,logger_std_h,
+ #{level=>Level,
+ filter_default=>stop,
+ filters=>
+ [{sasl_domain,
+ {fun logger_filters:domain/2,
+ {log,equals,[beam,erlang,otp,sasl]}}}],
+ logger_std_h=>#{type=>Dest},
+ formatter=>{logger_formatter,FC}}).
+
+delete_sasl_logger(undefined) -> ok;
+delete_sasl_logger(std) -> ok;
+delete_sasl_logger(_Type) ->
+ _ = logger:remove_handler(sasl_h),
+ ok.
add_error_logger_mf(undefined) -> ok;
add_error_logger_mf({Dir, MaxB, MaxF}) ->
diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl
index a9e8bcecfa..f4b1b54fd1 100644
--- a/lib/sasl/src/systools_make.erl
+++ b/lib/sasl/src/systools_make.erl
@@ -1546,6 +1546,12 @@ mandatory_modules() ->
gen_server,
heart,
kernel,
+ logger,
+ logger_filters,
+ logger_server,
+ logger_backend,
+ logger_config,
+ logger_simple,
lists,
proc_lib,
supervisor
@@ -1570,7 +1576,7 @@ preloaded() ->
kernel_processes() ->
[{heart, heart, start, []},
- {error_logger, error_logger, start_link, []},
+ {logger, logger_server, start_link, []},
{application_controller, application_controller, start,
fun(Appls) ->
[{_,App}] = filter(fun({{kernel,_},_App}) -> true;
diff --git a/lib/sasl/test/sasl_SUITE.erl b/lib/sasl/test/sasl_SUITE.erl
index f12bde9b3d..7b63684c53 100644
--- a/lib/sasl/test/sasl_SUITE.erl
+++ b/lib/sasl/test/sasl_SUITE.erl
@@ -21,6 +21,7 @@
-include_lib("common_test/include/ct.hrl").
%% Test server specific exports
+-export([init_per_suite/1,end_per_suite/1]).
-export([all/0,groups/0,init_per_group/2,end_per_group/2]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -37,6 +38,19 @@ all() ->
groups() ->
[].
+init_per_suite(Config) ->
+ S = application:get_env(kernel,logger_sasl_compatible),
+ application:set_env(kernel,logger_sasl_compatible,true),
+ [{sasl_compatible,S}|Config].
+
+end_per_suite(Config) ->
+ case ?config(sasl_compatible,Config) of
+ {ok,X} ->
+ application:set_env(kernel,logger_sasl_compatible,X);
+ undefined ->
+ application:unset_env(kernel,logger_sasl_compatible)
+ end.
+
init_per_group(_GroupName, Config) ->
Config.
diff --git a/lib/sasl/test/sasl_report_SUITE.erl b/lib/sasl/test/sasl_report_SUITE.erl
index 92df5e6e40..96975aaf69 100644
--- a/lib/sasl/test/sasl_report_SUITE.erl
+++ b/lib/sasl/test/sasl_report_SUITE.erl
@@ -53,13 +53,17 @@ gen_server_crash_unicode(Config) ->
gen_server_crash(Config, unicode).
gen_server_crash(Config, Encoding) ->
+ StopFilter = {fun(_,_) -> stop end, ok},
+ logger:add_handler_filter(logger_std_h,stop_all,StopFilter),
+ logger:add_handler_filter(cth_log_redirect,stop_all,StopFilter),
try
do_gen_server_crash(Config, Encoding)
after
- error_logger:tty(true),
+ ok = application:unset_env(kernel, logger_sasl_compatible),
ok = application:unset_env(sasl, sasl_error_logger),
ok = application:unset_env(kernel, error_logger_format_depth),
- error_logger:add_report_handler(cth_log_redirect)
+ logger:remove_handler_filter(logger_std_h,stop_all),
+ logger:remove_handler_filter(cth_log_redirect,stop_all)
end,
ok.
@@ -70,26 +74,26 @@ do_gen_server_crash(Config, Encoding) ->
SaslLog = filename:join(LogDir, "sasl.log"),
ok = filelib:ensure_dir(SaslLog),
- error_logger:delete_report_handler(cth_log_redirect),
- error_logger:tty(false),
application:stop(sasl),
Modes = [write, {encoding, Encoding}],
+ ok = application:set_env(kernel, logger_sasl_compatible, true),
ok = application:set_env(sasl, sasl_error_logger, {file,SaslLog,Modes},
[{persistent,true}]),
application:set_env(kernel, error_logger_format_depth, 30),
error_logger:logfile({open,KernelLog}),
application:start(sasl),
- io:format("~p\n", [gen_event:which_handlers(error_logger)]),
+ logger:i(print),
crash_me(),
error_logger:logfile(close),
+ application:stop(sasl),
check_file(KernelLog, utf8, 70000, 150000),
check_file(SaslLog, Encoding, 70000, 150000),
- %% ok = file:delete(KernelLog),
- %% ok = file:delete(SaslLog),
+ ok = file:delete(KernelLog),
+ ok = file:delete(SaslLog),
ok.
check_file(File, Encoding, Min, Max) ->
diff --git a/lib/stdlib/src/Makefile b/lib/stdlib/src/Makefile
index 8b156929d7..dc3735055a 100644
--- a/lib/stdlib/src/Makefile
+++ b/lib/stdlib/src/Makefile
@@ -238,6 +238,13 @@ $(EBIN)/erl_tar.beam: ../../kernel/include/file.hrl erl_tar.hrl
$(EBIN)/file_sorter.beam: ../../kernel/include/file.hrl
$(EBIN)/filelib.beam: ../../kernel/include/file.hrl
$(EBIN)/filename.beam: ../../kernel/include/file.hrl
+$(EBIN)/gen_event.beam: ../../kernel/include/logger.hrl
+$(EBIN)/gen_fsm.beam: ../../kernel/include/logger.hrl
+$(EBIN)/gen_server.beam: ../../kernel/include/logger.hrl
+$(EBIN)/gen_statem.beam: ../../kernel/include/logger.hrl
+$(EBIN)/proc_lib.beam: ../../kernel/include/logger.hrl
$(EBIN)/qlc_pt.beam: ../include/ms_transform.hrl
$(EBIN)/shell.beam: ../../kernel/include/file.hrl
+$(EBIN)/supervisor.beam: ../../kernel/include/logger.hrl
+$(EBIN)/supervisor_bridge.beam: ../../kernel/include/logger.hrl
$(EBIN)/zip.beam: ../include/zip.hrl ../../kernel/include/file.hrl
diff --git a/lib/stdlib/src/gen_event.erl b/lib/stdlib/src/gen_event.erl
index 73e4457bd0..53042251cc 100644
--- a/lib/stdlib/src/gen_event.erl
+++ b/lib/stdlib/src/gen_event.erl
@@ -47,16 +47,19 @@
system_replace_state/2,
format_status/2]).
+%% logger callback
+-export([format_log/1]).
+
-export_type([handler/0, handler_args/0, add_handler_ret/0,
del_handler_ret/0]).
--import(error_logger, [error_msg/2]).
-
-record(handler, {module :: atom(),
id = false,
state,
supervised = false :: 'false' | pid()}).
+-include("logger.hrl").
+
%%%=========================================================================
%%% API
%%%=========================================================================
@@ -583,9 +586,13 @@ server_update(Handler1, Func, Event, SName) ->
remove, SName, normal),
no;
{'EXIT', {undef, [{Mod1, handle_info, [_,_], _}|_]}} ->
- error_logger:warning_msg("** Undefined handle_info in ~tp~n"
- "** Unhandled message: ~tp~n", [Mod1, Event]),
- {ok, Handler1};
+ ?LOG_WARNING(#{label=>{gen_event,no_handle_info},
+ module=>Mod1,
+ message=>Event},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_event:format_log/1,
+ error_logger=>#{tag=>warning_msg}}), % warningmap??
+ {ok, Handler1};
Other ->
do_terminate(Mod1, Handler1, {error, Other}, State,
Event, SName, crash),
@@ -737,6 +744,23 @@ report_error(_Handler, normal, _, _, _) -> ok;
report_error(_Handler, shutdown, _, _, _) -> ok;
report_error(_Handler, {swapped,_,_}, _, _, _) -> ok;
report_error(Handler, Reason, State, LastIn, SName) ->
+ ?LOG_ERROR(#{label=>{gen_event,terminate},
+ handler=>handler(Handler),
+ name=>SName,
+ last_message=>LastIn,
+ state=>format_status(terminate,Handler#handler.module,
+ get(),State),
+ reason=>Reason},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_event:format_log/1,
+ error_logger=>#{tag=>error}}).
+
+format_log(#{label:={gen_event,terminate},
+ handler:=Handler,
+ name:=SName,
+ last_message:=LastIn,
+ state:=State,
+ reason:=Reason}) ->
Reason1 =
case Reason of
{'EXIT',{undef,[{M,F,A,L}|MFAs]}} ->
@@ -756,23 +780,18 @@ report_error(Handler, Reason, State, LastIn, SName) ->
_ ->
Reason
end,
- Mod = Handler#handler.module,
- FmtState = case erlang:function_exported(Mod, format_status, 2) of
- true ->
- Args = [get(), State],
- case catch Mod:format_status(terminate, Args) of
- {'EXIT', _} -> State;
- Else -> Else
- end;
- _ ->
- State
- end,
- error_msg("** gen_event handler ~p crashed.~n"
- "** Was installed in ~tp~n"
- "** Last event was: ~tp~n"
- "** When handler state == ~tp~n"
- "** Reason == ~tp~n",
- [handler(Handler),SName,LastIn,FmtState,Reason1]).
+ {"** gen_event handler ~p crashed.~n"
+ "** Was installed in ~tp~n"
+ "** Last event was: ~tp~n"
+ "** When handler state == ~tp~n"
+ "** Reason == ~tp~n",
+ [Handler,SName,LastIn,State,Reason1]};
+format_log(#{label:={gen_event,no_handle_info},
+ module:=Mod,
+ message:=Msg}) ->
+ {"** Undefined handle_info in ~tp~n"
+ "** Unhandled message: ~tp~n",
+ [Mod, Msg]}.
handler(Handler) when not Handler#handler.id ->
Handler#handler.module;
@@ -805,17 +824,21 @@ format_status(Opt, StatusData) ->
[PDict, SysState, Parent, _Debug, [ServerName, MSL, _HibernateAfterTimeout, _Hib]] = StatusData,
Header = gen:format_status_header("Status for event handler",
ServerName),
- FmtMSL = [case erlang:function_exported(Mod, format_status, 2) of
- true ->
- Args = [PDict, State],
- case catch Mod:format_status(Opt, Args) of
- {'EXIT', _} -> MSL;
- Else -> MS#handler{state = Else}
- end;
- _ ->
- MS
- end || #handler{module = Mod, state = State} = MS <- MSL],
+ FmtMSL = [MS#handler{state=format_status(Opt, Mod, PDict, State)}
+ || #handler{module = Mod, state = State} = MS <- MSL],
[{header, Header},
{data, [{"Status", SysState},
{"Parent", Parent}]},
{items, {"Installed handlers", FmtMSL}}].
+
+format_status(Opt, Mod, PDict, State) ->
+ case erlang:function_exported(Mod, format_status, 2) of
+ true ->
+ Args = [PDict, State],
+ case catch Mod:format_status(Opt, Args) of
+ {'EXIT', _} -> State;
+ Else -> Else
+ end;
+ false ->
+ State
+ end.
diff --git a/lib/stdlib/src/gen_fsm.erl b/lib/stdlib/src/gen_fsm.erl
index 8c7db65563..77826c3dc6 100644
--- a/lib/stdlib/src/gen_fsm.erl
+++ b/lib/stdlib/src/gen_fsm.erl
@@ -105,6 +105,8 @@
%%%
%%% ---------------------------------------------------
+-include("logger.hrl").
+
-export([start/3, start/4,
start_link/3, start_link/4,
stop/1, stop/3,
@@ -124,6 +126,9 @@
system_replace_state/2,
format_status/2]).
+%% logger callback
+-export([format_log/1]).
+
-deprecated({start, 3, next_major_release}).
-deprecated({start, 4, next_major_release}).
-deprecated({start_link, 3, next_major_release}).
@@ -144,8 +149,6 @@
-deprecated({enter_loop, 5, next_major_release}).
-deprecated({enter_loop, 6, next_major_release}).
--import(error_logger, [format/2]).
-
%%% ---------------------------------------------------
%%% Interface functions.
%%% ---------------------------------------------------
@@ -499,8 +502,12 @@ handle_msg(Msg, Parent, Name, StateName, StateData, Mod, _Time, HibernateAfterTi
reply(From, Reply),
exit(R);
{'EXIT', {undef, [{Mod, handle_info, [_,_,_], _}|_]}} ->
- error_logger:warning_msg("** Undefined handle_info in ~p~n"
- "** Unhandled message: ~tp~n", [Mod, Msg]),
+ ?LOG_WARNING(#{label=>{gen_fsm,no_handle_info},
+ module=>Mod,
+ message=>Msg},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_fsm:format_log/1,
+ error_logger=>#{tag=>warning_msg}}),
loop(Parent, Name, StateName, StateData, Mod, infinity, HibernateAfterTimeout, []);
{'EXIT', What} ->
terminate(What, Name, Msg, Mod, StateName, StateData, []);
@@ -603,6 +610,24 @@ terminate(Reason, Name, Msg, Mod, StateName, StateData, Debug) ->
end.
error_info(Reason, Name, Msg, StateName, StateData, Debug) ->
+ ?LOG_ERROR(#{label=>{gen_fsm,terminate},
+ name=>Name,
+ last_message=>Msg,
+ state_name=>StateName,
+ state_data=>StateData,
+ reason=>Reason},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_fsm:format_log/1,
+ error_logger=>#{tag=>error}}),
+ sys:print_log(Debug),
+ ok.
+
+format_log(#{label:={gen_fsm,terminate},
+ name:=Name,
+ last_message:=Msg,
+ state_name:=StateName,
+ state_data:=StateData,
+ reason:=Reason}) ->
Reason1 =
case Reason of
{undef,[{M,F,A,L}|MFAs]} ->
@@ -620,14 +645,18 @@ error_info(Reason, Name, Msg, StateName, StateData, Debug) ->
_ ->
Reason
end,
- Str = "** State machine ~tp terminating \n" ++
- get_msg_str(Msg) ++
- "** When State == ~tp~n"
- "** Data == ~tp~n"
- "** Reason for termination = ~n** ~tp~n",
- format(Str, [Name, get_msg(Msg), StateName, StateData, Reason1]),
- sys:print_log(Debug),
- ok.
+ {"** State machine ~tp terminating \n" ++
+ get_msg_str(Msg) ++
+ "** When State == ~tp~n"
+ "** Data == ~tp~n"
+ "** Reason for termination = ~n** ~tp~n",
+ [Name, get_msg(Msg), StateName, StateData, Reason1]};
+format_log(#{label:={gen_fsm,no_handle_info},
+ module:=Mod,
+ message:=Msg}) ->
+ {"** Undefined handle_info in ~p~n"
+ "** Unhandled message: ~tp~n",
+ [Mod, Msg]}.
get_msg_str({'$gen_event', _Event}) ->
"** Last event in was ~tp~n";
diff --git a/lib/stdlib/src/gen_server.erl b/lib/stdlib/src/gen_server.erl
index f29314d0a2..f65ef78636 100644
--- a/lib/stdlib/src/gen_server.erl
+++ b/lib/stdlib/src/gen_server.erl
@@ -104,9 +104,14 @@
system_replace_state/2,
format_status/2]).
+%% logger callback
+-export([format_log/1]).
+
%% Internal exports
-export([init_it/6]).
+-include("logger.hrl").
+
-define(
STACKTRACE(),
element(2, erlang:process_info(self(), current_stacktrace))).
@@ -636,9 +641,13 @@ try_dispatch(Mod, Func, Msg, State) ->
error:undef = R:Stacktrace when Func == handle_info ->
case erlang:function_exported(Mod, handle_info, 2) of
false ->
- error_logger:warning_msg("** Undefined handle_info in ~p~n"
- "** Unhandled message: ~tp~n",
- [Mod, Msg]),
+ ?LOG_WARNING(
+ #{label=>{gen_server,no_handle_info},
+ module=>Mod,
+ message=>Msg},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_server:format_log/1,
+ error_logger=>#{tag=>warning_msg}}),
{ok, {noreply, State}};
true ->
{'EXIT', error, R, Stacktrace}
@@ -849,8 +858,7 @@ terminate(Class, Reason, Stacktrace, ReportReason, Name, From, Msg, Mod, State,
Reply = try_terminate(Mod, terminate_reason(Class, Reason, Stacktrace), State),
case Reply of
{'EXIT', C, R, S} ->
- FmtState = format_status(terminate, Mod, get(), State),
- error_info({R, S}, Name, From, Msg, FmtState, Debug),
+ error_info({R, S}, Name, From, Msg, Mod, State, Debug),
erlang:raise(C, R, S);
_ ->
case {Class, Reason} of
@@ -858,8 +866,7 @@ terminate(Class, Reason, Stacktrace, ReportReason, Name, From, Msg, Mod, State,
{exit, shutdown} -> ok;
{exit, {shutdown,_}} -> ok;
_ ->
- FmtState = format_status(terminate, Mod, get(), State),
- error_info(ReportReason, Name, From, Msg, FmtState, Debug)
+ error_info(ReportReason, Name, From, Msg, Mod, State, Debug)
end
end,
case Stacktrace of
@@ -872,12 +879,46 @@ terminate(Class, Reason, Stacktrace, ReportReason, Name, From, Msg, Mod, State,
terminate_reason(error, Reason, Stacktrace) -> {Reason, Stacktrace};
terminate_reason(exit, Reason, _Stacktrace) -> Reason.
-error_info(_Reason, application_controller, _From, _Msg, _State, _Debug) ->
+error_info(_Reason, application_controller, _From, _Msg, _Mod, _State, _Debug) ->
%% OTP-5811 Don't send an error report if it's the system process
%% application_controller which is terminating - let init take care
%% of it instead
ok;
-error_info(Reason, Name, From, Msg, State, Debug) ->
+error_info(Reason, Name, From, Msg, Mod, State, Debug) ->
+ ?LOG_ERROR(#{label=>{gen_server,terminate},
+ name=>Name,
+ last_message=>Msg,
+ state=>format_status(terminate, Mod, get(), State),
+ reason=>Reason,
+ client_info=>client_stacktrace(From)},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_server:format_log/1,
+ error_logger=>#{tag=>error}}),
+ sys:print_log(Debug),
+ ok.
+
+client_stacktrace(undefined) ->
+ undefined;
+client_stacktrace({From,_Tag}) ->
+ client_stacktrace(From);
+client_stacktrace(From) when is_pid(From), node(From) =:= node() ->
+ case process_info(From, [current_stacktrace, registered_name]) of
+ undefined ->
+ {From,dead};
+ [{current_stacktrace, Stacktrace}, {registered_name, []}] ->
+ {From,{From,Stacktrace}};
+ [{current_stacktrace, Stacktrace}, {registered_name, Name}] ->
+ {From,{Name,Stacktrace}}
+ end;
+client_stacktrace(From) when is_pid(From) ->
+ {From,remote}.
+
+format_log(#{label:={gen_server,terminate},
+ name:=Name,
+ last_message:=Msg,
+ state:=State,
+ reason:=Reason,
+ client_info:=Client}) ->
Reason1 =
case Reason of
{undef,[{M,F,A,L}|MFAs]} ->
@@ -893,36 +934,31 @@ error_info(Reason, Name, From, Msg, State, Debug) ->
end
end;
_ ->
- error_logger:limit_term(Reason)
+ logger:limit_term(Reason)
end,
- {ClientFmt, ClientArgs} = client_stacktrace(From),
- LimitedState = error_logger:limit_term(State),
- error_logger:format("** Generic server ~tp terminating \n"
- "** Last message in was ~tp~n"
- "** When Server state == ~tp~n"
- "** Reason for termination == ~n** ~tp~n" ++ ClientFmt,
- [Name, Msg, LimitedState, Reason1] ++ ClientArgs),
- sys:print_log(Debug),
- ok.
-client_stacktrace(undefined) ->
+ {ClientFmt,ClientArgs} = format_client_log(Client),
+ {"** Generic server ~tp terminating \n"
+ "** Last message in was ~tp~n"
+ "** When Server state == ~tp~n"
+ "** Reason for termination == ~n** ~tp~n" ++ ClientFmt,
+ [Name, Msg, logger:limit_term(State), Reason1] ++ ClientArgs};
+format_log(#{label:={gen_server,no_handle_info},
+ module:=Mod,
+ message:=Msg}) ->
+ {"** Undefined handle_info in ~p~n"
+ "** Unhandled message: ~tp~n",
+ [Mod, Msg]}.
+
+format_client_log(undefined) ->
{"", []};
-client_stacktrace({From, _Tag}) ->
- client_stacktrace(From);
-client_stacktrace(From) when is_pid(From), node(From) =:= node() ->
- case process_info(From, [current_stacktrace, registered_name]) of
- undefined ->
- {"** Client ~p is dead~n", [From]};
- [{current_stacktrace, Stacktrace}, {registered_name, []}] ->
- {"** Client ~p stacktrace~n"
- "** ~tp~n",
- [From, Stacktrace]};
- [{current_stacktrace, Stacktrace}, {registered_name, Name}] ->
- {"** Client ~tp stacktrace~n"
- "** ~tp~n",
- [Name, Stacktrace]}
- end;
-client_stacktrace(From) when is_pid(From) ->
- {"** Client ~p is remote on node ~p~n", [From, node(From)]}.
+format_client_log({From,dead}) ->
+ {"** Client ~p is dead~n", [From]};
+format_client_log({From,remote}) ->
+ {"** Client ~p is remote on node ~p~n", [From, node(From)]};
+format_client_log({_From,{Name,Stacktrace}}) ->
+ {"** Client ~tp stacktrace~n"
+ "** ~tp~n",
+ [Name, Stacktrace]}.
%%-----------------------------------------------------------------
%% Status information
diff --git a/lib/stdlib/src/gen_statem.erl b/lib/stdlib/src/gen_statem.erl
index f7dc0050b3..f558f0d33e 100644
--- a/lib/stdlib/src/gen_statem.erl
+++ b/lib/stdlib/src/gen_statem.erl
@@ -19,6 +19,8 @@
%%
-module(gen_statem).
+-include("logger.hrl").
+
%% API
-export(
[start/3,start/4,start_link/3,start_link/4,
@@ -44,6 +46,9 @@
-export(
[wakeup_from_hibernate/3]).
+%% logger callback
+-export([format_log/1]).
+
%% Type exports for templates and callback modules
-export_type(
[event_type/0,
@@ -702,7 +707,7 @@ init_it(Starter, Parent, ServerRef, Module, Args, Opts) ->
error_info(
Class, Reason, Stacktrace,
#state{name = Name},
- [], undefined),
+ []),
erlang:raise(Class, Reason, Stacktrace)
end.
@@ -733,7 +738,7 @@ init_result(Starter, Parent, ServerRef, Module, Result, Opts) ->
error_info(
error, Error, ?STACKTRACE(),
#state{name = Name},
- [], undefined),
+ []),
exit(Error)
end.
@@ -1849,9 +1854,7 @@ terminate(
catch
_ -> ok;
C:R:ST ->
- error_info(
- C, R, ST, S, Q,
- format_status(terminate, get(), S)),
+ error_info(C, R, ST, S, Q),
sys:print_log(Debug),
erlang:raise(C, R, ST)
end;
@@ -1867,9 +1870,7 @@ terminate(
{shutdown,_} ->
terminate_sys_debug(Debug, S, State, Reason);
_ ->
- error_info(
- Class, Reason, Stacktrace, S, Q,
- format_status(terminate, get(), S)),
+ error_info(Class, Reason, Stacktrace, S, Q),
sys:print_log(Debug)
end,
case Stacktrace of
@@ -1889,8 +1890,28 @@ error_info(
name = Name,
callback_mode = CallbackMode,
state_enter = StateEnter,
- postponed = P},
- Q, FmtData) ->
+ postponed = P} = S,
+ Q) ->
+ ?LOG_ERROR(#{label=>{gen_statem,terminate},
+ name=>Name,
+ queue=>Q,
+ postponed=>P,
+ callback_mode=>CallbackMode,
+ state_enter=>StateEnter,
+ state=>format_status(terminate, get(), S),
+ reason=>{Class,Reason,Stacktrace}},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_statem:format_log/1,
+ error_logger=>#{tag=>error}}).
+
+format_log(#{label:={gen_statem,terminate},
+ name:=Name,
+ queue:=Q,
+ postponed:=P,
+ callback_mode:=CallbackMode,
+ state_enter:=StateEnter,
+ state:=FmtData,
+ reason:={Class,Reason,Stacktrace}}) ->
{FixedReason,FixedStacktrace} =
case Stacktrace of
[{M,F,Args,_}|ST]
@@ -1917,7 +1938,7 @@ error_info(
_ -> {Reason,Stacktrace}
end,
[LimitedP, LimitedFmtData, LimitedFixedReason] =
- [error_logger:limit_term(D) || D <- [P, FmtData, FixedReason]],
+ [logger:limit_term(D) || D <- [P, FmtData, FixedReason]],
CBMode =
case StateEnter of
true ->
@@ -1925,48 +1946,46 @@ error_info(
false ->
CallbackMode
end,
- error_logger:format(
- "** State machine ~tp terminating~n" ++
- case Q of
- [] -> "";
- _ -> "** Last event = ~tp~n"
- end ++
- "** When server state = ~tp~n" ++
- "** Reason for termination = ~w:~tp~n" ++
- "** Callback mode = ~p~n" ++
- case Q of
- [_,_|_] -> "** Queued = ~tp~n";
- _ -> ""
- end ++
- case P of
- [] -> "";
- _ -> "** Postponed = ~tp~n"
- end ++
- case FixedStacktrace of
- [] -> "";
- _ -> "** Stacktrace =~n** ~tp~n"
- end,
- [Name |
- case Q of
- [] -> [];
- [Event|_] -> [Event]
- end] ++
- [LimitedFmtData,
- Class,LimitedFixedReason,
- CBMode] ++
- case Q of
- [_|[_|_] = Events] -> [Events];
- _ -> []
- end ++
- case P of
- [] -> [];
- _ -> [LimitedP]
- end ++
- case FixedStacktrace of
- [] -> [];
- _ -> [FixedStacktrace]
- end).
-
+ {"** State machine ~tp terminating~n" ++
+ case Q of
+ [] -> "";
+ _ -> "** Last event = ~tp~n"
+ end ++
+ "** When server state = ~tp~n" ++
+ "** Reason for termination = ~w:~tp~n" ++
+ "** Callback mode = ~p~n" ++
+ case Q of
+ [_,_|_] -> "** Queued = ~tp~n";
+ _ -> ""
+ end ++
+ case P of
+ [] -> "";
+ _ -> "** Postponed = ~tp~n"
+ end ++
+ case FixedStacktrace of
+ [] -> "";
+ _ -> "** Stacktrace =~n** ~tp~n"
+ end,
+ [Name |
+ case Q of
+ [] -> [];
+ [Event|_] -> [Event]
+ end] ++
+ [LimitedFmtData,
+ Class,LimitedFixedReason,
+ CBMode] ++
+ case Q of
+ [_|[_|_] = Events] -> [Events];
+ _ -> []
+ end ++
+ case P of
+ [] -> [];
+ _ -> [LimitedP]
+ end ++
+ case FixedStacktrace of
+ [] -> [];
+ _ -> [FixedStacktrace]
+ end}.
%% Call Module:format_status/2 or return a default value
format_status(
diff --git a/lib/stdlib/src/proc_lib.erl b/lib/stdlib/src/proc_lib.erl
index 1991585c13..8d01840313 100644
--- a/lib/stdlib/src/proc_lib.erl
+++ b/lib/stdlib/src/proc_lib.erl
@@ -30,7 +30,7 @@
start/3, start/4, start/5, start_link/3, start_link/4, start_link/5,
hibernate/3,
init_ack/1, init_ack/2,
- init_p/3,init_p/5,format/1,format/2,format/3,
+ init_p/3,init_p/5,format/1,format/2,format/3,report_cb/1,
initial_call/1,
translate_initial_call/1,
stop/1, stop/3]).
@@ -40,6 +40,8 @@
-export_type([spawn_option/0]).
+-include("logger.hrl").
+
%%-----------------------------------------------------------------------------
-type priority_level() :: 'high' | 'low' | 'max' | 'normal'.
@@ -503,10 +505,13 @@ crash_report(exit, normal, _, _) -> ok;
crash_report(exit, shutdown, _, _) -> ok;
crash_report(exit, {shutdown,_}, _, _) -> ok;
crash_report(Class, Reason, StartF, Stacktrace) ->
- OwnReport = my_info(Class, Reason, StartF, Stacktrace),
- LinkReport = linked_info(self()),
- Rep = [OwnReport,LinkReport],
- error_logger:error_report(crash_report, Rep).
+ ?LOG_ERROR(#{label=>{proc_lib,crash},
+ report=>[my_info(Class, Reason, StartF, Stacktrace),
+ linked_info(self())]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun proc_lib:report_cb/1,
+ logger_formatter=>#{title=>"CRASH REPORT"},
+ error_logger=>#{tag=>error_report,type=>crash_report}}).
my_info(Class, Reason, [], Stacktrace) ->
my_info_1(Class, Reason, Stacktrace);
@@ -548,10 +553,10 @@ get_ancestors(Pid) ->
%% assumed that all report handlers call proc_lib:format().
get_messages(Pid) ->
Messages = get_process_messages(Pid),
- {messages, error_logger:limit_term(Messages)}.
+ {messages, logger:limit_term(Messages)}.
get_process_messages(Pid) ->
- Depth = error_logger:get_format_depth(),
+ Depth = logger:get_format_depth(),
case Pid =/= self() orelse Depth =:= unlimited of
true ->
{messages, Messages} = get_process_info(Pid, messages),
@@ -581,7 +586,7 @@ get_cleaned_dictionary(Pid) ->
cleaned_dict(Dict) ->
CleanDict = clean_dict(Dict),
- error_logger:limit_term(CleanDict).
+ logger:limit_term(CleanDict).
clean_dict([{'$ancestors',_}|Dict]) ->
clean_dict(Dict);
@@ -742,9 +747,18 @@ check({badrpc,Error}) -> Error;
check(Res) -> Res.
%%% -----------------------------------------------------------
-%%% Format (and write) a generated crash info structure.
+%%% Format a generated crash info structure.
%%% -----------------------------------------------------------
+-spec report_cb(CrashReport) -> {Format,Args} when
+ CrashReport :: #{label=>{proc_lib,crash},report=>[term()]},
+ Format :: io:format(),
+ Args :: [term()].
+report_cb(#{label:={proc_lib,crash},
+ report:=CrashReport}) ->
+ Depth = logger:get_format_depth(),
+ get_format_and_args(CrashReport, utf8, Depth).
+
-spec format(CrashReport) -> string() when
CrashReport :: [term()].
format(CrashReport) ->
@@ -762,61 +776,74 @@ format(CrashReport, Encoding) ->
Encoding :: latin1 | unicode | utf8,
Depth :: unlimited | pos_integer().
-format([OwnReport,LinkReport], Encoding, Depth) ->
+format(CrashReport, Encoding, Depth) ->
+ {F,A} = get_format_and_args(CrashReport, Encoding, Depth),
+ lists:flatten(io_lib:format(F,A)).
+
+get_format_and_args([OwnReport,LinkReport], Encoding, Depth) ->
Extra = {Encoding,Depth},
MyIndent = " ",
- OwnFormat = format_report(OwnReport, MyIndent, Extra),
- LinkFormat = format_link_report(LinkReport, MyIndent, Extra),
- Str = io_lib:format(" crasher:~n~ts neighbours:~n~ts",
- [OwnFormat, LinkFormat]),
- lists:flatten(Str).
+ {OwnFormat,OwnArgs} = format_report(OwnReport, MyIndent, Extra, [], []),
+ {LinkFormat,LinkArgs} = format_link_report(LinkReport, MyIndent, Extra, [], []),
+ {" crasher:~n"++OwnFormat++" neighbours:~n"++LinkFormat,OwnArgs++LinkArgs}.
-format_link_report([Link|Reps], Indent, Extra) ->
+format_link_report([], _Indent, _Extra, Format, Args) ->
+ {lists:flatten(lists:reverse(Format)),lists:append(lists:reverse(Args))};
+format_link_report([Link|Reps], Indent, Extra, Format, Args) ->
Rep = case Link of
{neighbour,Rep0} -> Rep0;
_ -> Link
end,
LinkIndent = [" ",Indent],
- [Indent,"neighbour:\n",format_report(Rep, LinkIndent, Extra)|
- format_link_report(Reps, Indent, Extra)];
-format_link_report(Rep, Indent, Extra) ->
- format_report(Rep, Indent, Extra).
-
-format_report(Rep, Indent, Extra) when is_list(Rep) ->
- format_rep(Rep, Indent, Extra);
-format_report(Rep, Indent, {Enc,unlimited}) ->
- io_lib:format("~s~"++modifier(Enc)++"p~n", [Indent, Rep]);
-format_report(Rep, Indent, {Enc,Depth}) ->
- io_lib:format("~s~"++modifier(Enc)++"P~n", [Indent, Rep, Depth]).
-
-format_rep([{initial_call,InitialCall}|Rep], Indent, Extra) ->
- [format_mfa(Indent, InitialCall, Extra)|format_rep(Rep, Indent, Extra)];
-format_rep([{error_info,{Class,Reason,StackTrace}}|Rep], Indent, Extra) ->
- [format_exception(Class, Reason, StackTrace, Extra)|
- format_rep(Rep, Indent, Extra)];
-format_rep([{Tag,Data}|Rep], Indent, Extra) ->
- [format_tag(Indent, Tag, Data, Extra)|format_rep(Rep, Indent, Extra)];
-format_rep(_, _, _Extra) ->
- [].
-
-format_exception(Class, Reason, StackTrace, {Enc,_}=Extra) ->
- PF = pp_fun(Extra),
- StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
- %% EI = " exception: ",
- EI = " ",
- [EI, lib:format_exception(1+length(EI), Class, Reason,
- StackTrace, StackFun, PF, Enc), "\n"].
+ {LinkFormat,LinkArgs} = format_report(Rep, LinkIndent, Extra, [], []),
+ F = "~sneighbour:\n"++LinkFormat,
+ A = [Indent|LinkArgs],
+ format_link_report(Reps, Indent, Extra, [F|Format], [A|Args]);
+format_link_report(Rep, Indent, Extra, Format, Args) ->
+ {F,A} = format_report(Rep, Indent, Extra, [], []),
+ format_link_report([], Indent, Extra, [F|Format],[A|Args]).
+
+format_report([], _Indent, _Extra, Format, Args) ->
+ {lists:flatten(lists:reverse(Format)),lists:append(lists:reverse(Args))};
+format_report([Rep|Reps], Indent, Extra, Format, Args) ->
+ {F,A} = format_rep(Rep, Indent, Extra),
+ format_report(Reps, Indent, Extra, [F|Format], [A|Args]);
+format_report(Rep, Indent, {Enc,unlimited}=Extra, Format, Args) ->
+ {F,A} = {"~s~"++modifier(Enc)++"p~n", [Indent, Rep]},
+ format_report([], Indent, Extra, [F|Format], [A|Args]);
+format_report(Rep, Indent, {Enc,Depth}=Extra, Format, Args) ->
+ {F,A} = {"~s~"++modifier(Enc)++"P~n", [Indent, Rep, Depth]},
+ format_report([], Indent, Extra, [F|Format], [A|Args]).
+
+format_rep({initial_call,InitialCall}, Indent, Extra) ->
+ format_mfa(Indent, InitialCall, Extra);
+format_rep({error_info,{Class,Reason,StackTrace}}, _Indent, Extra) ->
+ {lists:flatten(format_exception(Class, Reason, StackTrace, Extra)),[]};
+format_rep({Tag,Data}, Indent, Extra) ->
+ format_tag(Indent, Tag, Data, Extra).
format_mfa(Indent, {M,F,Args}=StartF, {Enc,_}=Extra) ->
try
A = length(Args),
- [Indent,"initial call: ",atom_to_list(M),$:,to_string(F, Enc),$/,
- integer_to_list(A),"\n"]
+ {lists:flatten([Indent,"initial call: ",atom_to_list(M),
+ $:,to_string(F, Enc),$/,integer_to_list(A),"\n"]),[]}
catch
error:_ ->
format_tag(Indent, initial_call, StartF, Extra)
end.
+format_tag(Indent, Tag, Data, {Enc,Depth}) ->
+ {P,Tl} = p(Enc, Depth),
+ {"~s~p: ~80.18" ++ P ++ "\n", [Indent, Tag, Data|Tl]}.
+
+format_exception(Class, Reason, StackTrace, {Enc,_}=Extra) ->
+ PF = pp_fun(Extra),
+ StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
+ %% EI = " exception: ",
+ EI = " ",
+ [EI, lib:format_exception(1+length(EI), Class, Reason,
+ StackTrace, StackFun, PF, Enc), "\n"].
+
to_string(A, latin1) ->
io_lib:write_atom_as_latin1(A);
to_string(A, _) ->
@@ -828,10 +855,6 @@ pp_fun({Enc,Depth}) ->
io_lib:format("~." ++ integer_to_list(I) ++ P, [Term|Tl])
end.
-format_tag(Indent, Tag, Data, {Enc,Depth}) ->
- {P,Tl} = p(Enc, Depth),
- io_lib:format("~s~p: ~80.18" ++ P ++ "\n", [Indent, Tag, Data|Tl]).
-
p(Encoding, Depth) ->
{Letter, Tl} = case Depth of
unlimited -> {"p", []};
diff --git a/lib/stdlib/src/supervisor.erl b/lib/stdlib/src/supervisor.erl
index e56415650f..eb46ac611a 100644
--- a/lib/stdlib/src/supervisor.erl
+++ b/lib/stdlib/src/supervisor.erl
@@ -35,6 +35,20 @@
%% For release_handler only
-export([get_callback_module/1]).
+-include("logger.hrl").
+
+-define(report_error(Error, Reason, Child, SupName),
+ ?LOG_ERROR(#{label=>{supervisor,Error},
+ report=>[{supervisor,SupName},
+ {errorContext,Error},
+ {reason,Reason},
+ {offender,extract_child(Child)}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"SUPERVISOR REPORT"},
+ error_logger=>#{tag=>error_report,
+ type=>supervisor_report}})).
+
%%--------------------------------------------------------------------------
-export_type([sup_flags/0, child_spec/0, startchild_ret/0, strategy/0]).
@@ -340,7 +354,7 @@ start_children(Children, SupName) ->
{ok, Pid, _Extra} ->
{update,Child#child{pid = Pid}};
{error, Reason} ->
- report_error(start_error, Reason, Child, SupName),
+ ?report_error(start_error, Reason, Child, SupName),
{abort,{failed_to_start_child,Id,Reason}}
end
end,
@@ -565,8 +579,9 @@ handle_info({'EXIT', Pid, Reason}, State) ->
end;
handle_info(Msg, State) ->
- error_logger:error_msg("Supervisor received unexpected message: ~tp~n",
- [Msg]),
+ ?LOG_ERROR("Supervisor received unexpected message: ~tp~n",[Msg],
+ #{domain=>[beam,erlang,otp],
+ error_logger=>#{tag=>error}}),
{noreply, State}.
%%
@@ -683,7 +698,7 @@ restart_child(Pid, Reason, State) ->
end.
do_restart(Reason, Child, State) when ?is_permanent(Child) ->
- report_error(child_terminated, Reason, Child, State#state.name),
+ ?report_error(child_terminated, Reason, Child, State#state.name),
restart(Child, State);
do_restart(normal, Child, State) ->
NState = del_child(Child, State),
@@ -695,10 +710,10 @@ do_restart({shutdown, _Term}, Child, State) ->
NState = del_child(Child, State),
{ok, NState};
do_restart(Reason, Child, State) when ?is_transient(Child) ->
- report_error(child_terminated, Reason, Child, State#state.name),
+ ?report_error(child_terminated, Reason, Child, State#state.name),
restart(Child, State);
do_restart(Reason, Child, State) when ?is_temporary(Child) ->
- report_error(child_terminated, Reason, Child, State#state.name),
+ ?report_error(child_terminated, Reason, Child, State#state.name),
NState = del_child(Child, State),
{ok, NState}.
@@ -718,7 +733,7 @@ restart(Child, State) ->
Other
end;
{terminate, NState} ->
- report_error(shutdown, reached_max_restart_intensity,
+ ?report_error(shutdown, reached_max_restart_intensity,
Child, State#state.name),
{shutdown, del_child(Child, NState)}
end.
@@ -745,7 +760,7 @@ restart(simple_one_for_one, Child, State0) ->
NRestarts = State2#state.dynamic_restarts + 1,
State3 = State2#state{dynamic_restarts = NRestarts},
NState = dyn_store(ROldPid, A, State3),
- report_error(start_error, Error, Child, NState#state.name),
+ ?report_error(start_error, Error, Child, NState#state.name),
{{try_again, ROldPid}, NState}
end;
restart(one_for_one, #child{id=Id} = Child, State) ->
@@ -759,7 +774,7 @@ restart(one_for_one, #child{id=Id} = Child, State) ->
{ok, NState};
{error, Reason} ->
NState = set_pid(restarting(OldPid), Id, State),
- report_error(start_error, Reason, Child, State#state.name),
+ ?report_error(start_error, Reason, Child, State#state.name),
{{try_again,Id}, NState}
end;
restart(rest_for_one, #child{id=Id} = Child, #state{name=SupName} = State) ->
@@ -820,7 +835,7 @@ do_terminate(Child, SupName) when is_pid(Child#child.pid) ->
{error, normal} when not (?is_permanent(Child)) ->
ok;
{error, OtherReason} ->
- report_error(shutdown_error, OtherReason, Child, SupName)
+ ?report_error(shutdown_error, OtherReason, Child, SupName)
end,
ok;
do_terminate(_Child, _SupName) ->
@@ -924,7 +939,7 @@ terminate_dynamic_children(State) ->
end,
%% Unroll stacked errors and report them
dict:fold(fun(Reason, Ls, _) ->
- report_error(shutdown_error, Reason,
+ ?report_error(shutdown_error, Reason,
Child#child{pid=Ls}, State#state.name)
end, ok, EStack).
@@ -1385,14 +1400,6 @@ inPeriod(Then, Now, Period) ->
%%% ------------------------------------------------------
%%% Error and progress reporting.
%%% ------------------------------------------------------
-
-report_error(Error, Reason, Child, SupName) ->
- ErrorMsg = [{supervisor, SupName},
- {errorContext, Error},
- {reason, Reason},
- {offender, extract_child(Child)}],
- error_logger:error_report(supervisor_report, ErrorMsg).
-
extract_child(Child) when is_list(Child#child.pid) ->
[{nb_children, length(Child#child.pid)},
{id, Child#child.id},
@@ -1409,9 +1416,13 @@ extract_child(Child) ->
{child_type, Child#child.child_type}].
report_progress(Child, SupName) ->
- Progress = [{supervisor, SupName},
- {started, extract_child(Child)}],
- error_logger:info_report(progress, Progress).
+ ?LOG_INFO(#{label=>{supervisor,progress},
+ report=>[{supervisor,SupName},
+ {started,extract_child(Child)}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
format_status(terminate, [_PDict, State]) ->
State;
diff --git a/lib/stdlib/src/supervisor_bridge.erl b/lib/stdlib/src/supervisor_bridge.erl
index af1e046d30..39372935fa 100644
--- a/lib/stdlib/src/supervisor_bridge.erl
+++ b/lib/stdlib/src/supervisor_bridge.erl
@@ -21,6 +21,8 @@
-behaviour(gen_server).
+-include("logger.hrl").
+
%% External exports
-export([start_link/2, start_link/3]).
%% Internal exports
@@ -129,13 +131,22 @@ terminate_pid(Reason, #state{mod = Mod, child_state = ChildState}) ->
Mod:terminate(Reason, ChildState).
report_progress(Pid, Mod, StartArgs, SupName) ->
- Progress = [{supervisor, SupName},
- {started, [{pid, Pid}, {mfa, {Mod, init, [StartArgs]}}]}],
- error_logger:info_report(progress, Progress).
+ ?LOG_INFO(#{label=>{supervisor,progress},
+ report=>[{supervisor, SupName},
+ {started, [{pid, Pid},
+ {mfa, {Mod, init, [StartArgs]}}]}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
report_error(Error, Reason, #state{name = Name, pid = Pid, mod = Mod}) ->
- ErrorMsg = [{supervisor, Name},
- {errorContext, Error},
- {reason, Reason},
- {offender, [{pid, Pid}, {mod, Mod}]}],
- error_logger:error_report(supervisor_report, ErrorMsg).
+ ?LOG_ERROR(#{label=>{supervisor,error},
+ report=>[{supervisor, Name},
+ {errorContext, Error},
+ {reason, Reason},
+ {offender, [{pid, Pid}, {mod, Mod}]}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"SUPERVISOR REPORT"},
+ error_logger=>#{tag=>error_report,type=>supervisor_report}}).
diff --git a/lib/stdlib/test/error_logger_h_SUITE.erl b/lib/stdlib/test/error_logger_h_SUITE.erl
index 9dc04f27a1..d533305939 100644
--- a/lib/stdlib/test/error_logger_h_SUITE.erl
+++ b/lib/stdlib/test/error_logger_h_SUITE.erl
@@ -62,6 +62,7 @@ logfile(Config) ->
error_logger:logfile({open,Log}),
ok = rpc:call(Node, erlang, apply, [fun gen_events/1,[Ev]]),
AtNode = iolist_to_binary(["** at node ",atom_to_list(Node)," **"]),
+ timer:sleep(1000), % some time get all log events in the log
error_logger:logfile(close),
analyse_events(Log, Ev, [AtNode], unlimited),
@@ -124,6 +125,7 @@ tty(Config) ->
ok = rpc:call(Node, erlang, apply, [fun gen_events/1,[Ev]]),
tty_log_close(),
AtNode = iolist_to_binary(["** at node ",atom_to_list(Node)," **"]),
+ timer:sleep(1000), % some time get all log events in the log
analyse_events(Log, Ev, [AtNode], unlimited),
test_server:stop_node(Node),
@@ -207,7 +209,7 @@ event_templates() ->
gen_events(Ev) ->
io:format("node = ~p\n", [node()]),
io:format("group leader = ~p\n", [group_leader()]),
- io:format("~p\n", [gen_event:which_handlers(error_logger)]),
+ io:format("~p\n", [error_logger:which_report_handlers()]),
call_error_logger(Ev),
{Pid,Ref} = spawn_monitor(fun() -> error(ouch) end),
@@ -240,6 +242,7 @@ analyse_events(Log, Ev, AtNode, Depth) ->
call_error_logger([{F,Args}|T]) ->
apply(error_logger, F, Args),
+ timer:sleep(10),
call_error_logger(T);
call_error_logger([]) -> ok.
diff --git a/lib/stdlib/test/proc_lib_SUITE.erl b/lib/stdlib/test/proc_lib_SUITE.erl
index fbdcb518b2..81bf9020b8 100644
--- a/lib/stdlib/test/proc_lib_SUITE.erl
+++ b/lib/stdlib/test/proc_lib_SUITE.erl
@@ -542,16 +542,17 @@ system_terminate(Reason,_Parent,_Deb,_State) ->
t_format(_Config) ->
- error_logger:tty(false),
+ logger:add_handler_filter(logger_std_h,stop_all,{fun(_,_) -> stop end,ok}),
+ error_logger:add_report_handler(?MODULE, self()),
try
t_format()
after
- error_logger:tty(true)
+ error_logger:delete_report_handler(?MODULE),
+ logger:remove_handler_filter(logger_std_h,stop_all)
end,
ok.
t_format() ->
- error_logger:add_report_handler(?MODULE, self()),
Pid = proc_lib:spawn(fun '\x{aaa}t_format_looper'/0),
HugeData = gb_sets:from_list(lists:seq(1, 100)),
SomeData1 = list_to_atom([246]),
@@ -584,11 +585,11 @@ t_format() ->
ok.
t_format_arbitrary(_Config) ->
- error_logger:tty(false),
+ logger:add_handler_filter(logger_std_h,stop_all,{fun(_,_) -> stop end,ok}),
try
t_format_arbitrary()
after
- error_logger:tty(true)
+ logger:remove_handler_filter(logger_std_h,stop_all)
end,
ok.