aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel/src
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel/src')
-rw-r--r--lib/kernel/src/Makefile29
-rw-r--r--lib/kernel/src/application_controller.erl35
-rw-r--r--lib/kernel/src/code_server.erl14
-rw-r--r--lib/kernel/src/error_logger.erl545
-rw-r--r--lib/kernel/src/kernel.app.src15
-rw-r--r--lib/kernel/src/kernel.erl26
-rw-r--r--lib/kernel/src/logger.erl803
-rw-r--r--lib/kernel/src/logger_backend.erl133
-rw-r--r--lib/kernel/src/logger_config.erl151
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl694
-rw-r--r--lib/kernel/src/logger_filters.erl123
-rw-r--r--lib/kernel/src/logger_formatter.erl295
-rw-r--r--lib/kernel/src/logger_h_common.erl301
-rw-r--r--lib/kernel/src/logger_h_common.hrl262
-rw-r--r--lib/kernel/src/logger_internal.hrl98
-rw-r--r--lib/kernel/src/logger_server.erl440
-rw-r--r--lib/kernel/src/logger_simple.erl236
-rw-r--r--lib/kernel/src/logger_std_h.erl799
-rw-r--r--lib/kernel/src/logger_sup.erl53
19 files changed, 4749 insertions, 303 deletions
diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile
index 0bc9f121a0..702845512c 100644
--- a/lib/kernel/src/Makefile
+++ b/lib/kernel/src/Makefile
@@ -109,6 +109,17 @@ MODULES = \
kernel_refc \
local_udp \
local_tcp \
+ logger \
+ logger_backend \
+ logger_config \
+ logger_std_h \
+ logger_disk_log_h \
+ logger_h_common \
+ logger_filters \
+ logger_formatter \
+ logger_server \
+ logger_simple \
+ logger_sup \
net \
net_adm \
net_kernel \
@@ -132,13 +143,14 @@ MODULES = \
HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \
../include/dist.hrl ../include/dist_util.hrl \
- ../include/net_address.hrl
+ ../include/net_address.hrl ../include/logger.hrl
INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \
erl_epmd.hrl hipe_ext_format.hrl \
inet_dns.hrl inet_res.hrl \
inet_boot.hrl inet_config.hrl inet_int.hrl \
- inet_dns_record_adts.hrl
+ inet_dns_record_adts.hrl \
+ logger_internal.hrl logger_h_common.hrl
ERL_FILES= $(MODULES:%=%.erl)
@@ -223,7 +235,7 @@ release_docs_spec:
# Include dependencies -- list below added by Kostis Sagonas
-$(EBIN)/application_controller.beam: application_master.hrl
+$(EBIN)/application_controller.beam: application_master.hrl ../include/logger.hrl
$(EBIN)/application_master.beam: application_master.hrl
$(EBIN)/auth.beam: ../include/file.hrl
$(EBIN)/code.beam: ../include/file.hrl
@@ -234,6 +246,7 @@ $(EBIN)/disk_log_server.beam: disk_log.hrl
$(EBIN)/dist_util.beam: ../include/dist_util.hrl ../include/dist.hrl
$(EBIN)/erl_boot_server.beam: inet_boot.hrl
$(EBIN)/erl_epmd.beam: inet_int.hrl erl_epmd.hrl
+$(EBIN)/error_logger.beam: logger_internal.hrl ../include/logger.hrl
$(EBIN)/file.beam: ../include/file.hrl file_int.hrl
$(EBIN)/file_io_server.beam: ../include/file.hrl file_int.hrl
$(EBIN)/gen_tcp.beam: inet_int.hrl
@@ -259,6 +272,16 @@ $(EBIN)/inet_udp.beam: inet_int.hrl
$(EBIN)/inet_sctp.beam: inet_int.hrl ../include/inet_sctp.hrl
$(EBIN)/local_udp.beam: inet_int.hrl
$(EBIN)/local_tcp.beam: inet_int.hrl
+$(EBIN)/logger.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_backend.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_config.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_simple.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl
$(EBIN)/net_kernel.beam: ../include/net_address.hrl
$(EBIN)/os.beam: ../include/file.hrl
$(EBIN)/ram_file.beam: ../include/file.hrl
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index 3b642f5873..b9cb722575 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -44,6 +44,7 @@
keyfind/3, keydelete/3, keyreplace/4]).
-include("application_master.hrl").
+-include("logger.hrl").
-define(AC, ?MODULE). % Name of process
@@ -1546,9 +1547,8 @@ do_change_apps(Applications, Config, OldAppls) ->
%% Report errors, but do not terminate
%% (backwards compatible behaviour)
lists:foreach(fun({error, {SysFName, Line, Str}}) ->
- Str2 = lists:flatten(io_lib:format("~tp: ~w: ~ts~n",
- [SysFName, Line, Str])),
- error_logger:format(Str2, [])
+ ?LOG_ERROR("~tp: ~w: ~ts~n",[SysFName, Line, Str],
+ #{error_logger=>#{tag=>error}})
end,
Errors),
@@ -1631,8 +1631,9 @@ make_term(Str) ->
end.
handle_make_term_error(Mod, Reason, Str) ->
- error_logger:format("application_controller: ~ts: ~ts~n",
- [Mod:format_error(Reason), Str]),
+ ?LOG_ERROR("application_controller: ~ts: ~ts~n",
+ [Mod:format_error(Reason), Str],
+ #{error_logger=>#{tag=>error}}),
throw({error, {bad_environment_value, Str}}).
get_env_i(Name, #state{conf_data = ConfData}) when is_list(ConfData) ->
@@ -1913,19 +1914,25 @@ config_error() ->
"configuration file must contain ONE list ended by <dot>"}}.
%%-----------------------------------------------------------------
-%% Info messages sent to error_logger
+%% Info messages sent to logger
%%-----------------------------------------------------------------
info_started(Name, Node) ->
- Rep = [{application, Name},
- {started_at, Node}],
- error_logger:info_report(progress, Rep).
+ ?LOG_INFO(#{label=>{application_controller,progress},
+ report=>[{application, Name},
+ {started_at, Node}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
info_exited(Name, Reason, Type) ->
- Rep = [{application, Name},
- {exited, Reason},
- {type, Type}],
- error_logger:info_report(Rep).
-
+ ?LOG_INFO(#{label=>{application_controller,exit},
+ report=>[{application, Name},
+ {exited, Reason},
+ {type, Type}]},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun logger:format_otp_report/1,
+ error_logger=>#{tag=>info_report,type=>std_info}}).
%%-----------------------------------------------------------------
%% Reply to all processes waiting this application to be started.
diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl
index f5a890cb95..bbfa2a995d 100644
--- a/lib/kernel/src/code_server.erl
+++ b/lib/kernel/src/code_server.erl
@@ -1434,14 +1434,20 @@ all_loaded(Db) ->
-spec error_msg(io:format(), [term()]) -> 'ok'.
error_msg(Format, Args) ->
- Msg = {notify,{error, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ logger ! {log,error,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>error}}},
ok.
-spec info_msg(io:format(), [term()]) -> 'ok'.
info_msg(Format, Args) ->
- Msg = {notify,{info_msg, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ logger ! {log,info,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>info_msg}}},
ok.
objfile_extension() ->
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
index 585507c545..0706220a94 100644
--- a/lib/kernel/src/error_logger.erl
+++ b/lib/kernel/src/error_logger.erl
@@ -19,22 +19,23 @@
%%
-module(error_logger).
--export([start/0,start_link/0,format/2,error_msg/1,error_msg/2,error_report/1,
+-include("logger_internal.hrl").
+
+-export([start/0,start_link/0,stop/0,
+ format/2,error_msg/1,error_msg/2,error_report/1,
error_report/2,info_report/1,info_report/2,warning_report/1,
warning_report/2,error_info/1,
info_msg/1,info_msg/2,warning_msg/1,warning_msg/2,
- logfile/1,tty/1,swap_handler/1,
+ logfile/1,tty/1,
add_report_handler/1,add_report_handler/2,
- delete_report_handler/1]).
+ delete_report_handler/1,
+ which_report_handlers/0]).
--export([init/1,
- handle_event/2, handle_call/2, handle_info/2,
- terminate/2]).
+%% logger callbacks
+-export([adding_handler/2, removing_handler/1, log/2]).
-export([get_format_depth/0, limit_term/1]).
--define(buffer_size, 10).
-
%%-----------------------------------------------------------------
%% Types used in this file
%%-----------------------------------------------------------------
@@ -43,8 +44,6 @@
| 'info' | 'info_msg' | 'info_report'
| 'warning_msg' | 'warning_report'.
--type state() :: {non_neg_integer(), non_neg_integer(), [term()]}.
-
%%% BIF
-export([warning_map/0]).
@@ -59,26 +58,137 @@ warning_map() ->
%%-----------------------------------------------------------------
--spec start() -> {'ok', pid()} | {'error', any()}.
+%%%-----------------------------------------------------------------
+%%% Start the event manager process under logger_sup, which is part of
+%%% the kernel application's supervision tree.
+-spec start() -> 'ok' | {'error', any()}.
start() ->
- case gen_event:start({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ case whereis(?MODULE) of
+ undefined ->
+ ErrorLogger =
+ #{id => ?MODULE,
+ start => {?MODULE, start_link, []},
+ restart => transient,
+ shutdown => 2000,
+ type => worker,
+ modules => dynamic},
+ case supervisor:start_child(logger_sup, ErrorLogger) of
+ {ok,_} ->
+ ok;
+ Error ->
+ Error
+ end;
+ _ ->
+ ok
end.
+%%%-----------------------------------------------------------------
+%%% Start callback specified in child specification to supervisor, see start/0
-spec start_link() -> {'ok', pid()} | {'error', any()}.
start_link() ->
- case gen_event:start_link({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ gen_event:start_link({local, ?MODULE},
+ [{spawn_opt,[{message_queue_data, off_heap}]}]).
+
+%%%-----------------------------------------------------------------
+%%% Stop the event manager
+-spec stop() -> ok.
+stop() ->
+ _ = supervisor:terminate_child(logger_sup,?MODULE),
+ _ = supervisor:delete_child(logger_sup,?MODULE),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for logger
+-spec adding_handler(logger:handler_id(),logger:config()) ->
+ {ok,logger:config()} | {error,term()}.
+adding_handler(?MODULE,Config) ->
+ case start() of
+ ok ->
+ {ok,Config};
+ Error ->
+ Error
end.
+-spec removing_handler(logger:handler_id()) -> ok.
+removing_handler(?MODULE) ->
+ stop(),
+ ok.
+
+-spec log(logger:log(),logger:config()) -> ok.
+log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) ->
+ do_log(Level,Msg,Meta).
+
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag,type:=Type}}=Meta) ->
+ %% From error_logger:*_report/1,2, or logger call which added
+ %% error_logger data to obtain backwards compatibility with
+ %% error_logger:*_report/1,2
+ Report =
+ case Msg of
+ #{label:=_,report:=R} -> R;
+ _ -> Msg
+ end,
+ notify(Level,Tag,Type,Report,Meta);
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag}}=Meta) ->
+ {Format,Args} =
+ case Msg of
+ #{label:=_,format:=F,args:=A} ->
+ %% From error_logger:*_msg/1,2.
+ %% In order to be backwards compatible with handling
+ %% of faulty parameters to error_logger:*_msg/1,2,
+ %% don't use report_cb here.
+ {F,A};
+ _ ->
+ %% From logger call which added error_logger data to
+ %% obtain backwards compatibility with error_logger:*_msg/1,2
+ RCBFun=maps:get(report_cb,Meta,fun logger:format_report/1),
+ try RCBFun(Msg) of
+ {F,A} when is_list(F), is_list(A) ->
+ {F,A};
+ Other ->
+ {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]}
+ catch C:R ->
+ {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]}
+ end
+ end,
+ notify(Level,Tag,Format,Args,Meta);
+do_log(Level,{Format,Args},#{?MODULE:=#{tag:=Tag}}=Meta)
+ when is_list(Format), is_list(Args) ->
+ %% From logger call which added error_logger data to obtain
+ %% backwards compatibility with error_logger:*_msg/1,2
+ notify(Level,Tag,Format,Args,Meta);
+do_log(_Level,_Msg,_Meta) ->
+ %% Ignore the rest - i.e. to get backwards compatibility with
+ %% error_logger, you must use the error_logger API for logging.
+ %% Some modules within OTP go around this by adding an
+ %% error_logger field to its metadata. This is done only to allow
+ %% complete backwards compatibility for log events originating
+ %% from within OTP, while still using the new logger interface.
+ ok.
+
+-spec notify(logger:level(), msg_tag(), any(), any(), map()) -> 'ok'.
+notify(Level,Tag0,FormatOrType0,ArgsOrReport,#{pid:=Pid0,gl:=GL,?MODULE:=My}) ->
+ Tag = fix_warning_tag(Level,Tag0),
+ Pid = case maps:get(emulator,My,false) of
+ true -> emulator;
+ _ -> Pid0
+ end,
+ FormatOrType = fix_warning_type(Level,FormatOrType0),
+ gen_event:notify(?MODULE,{Tag,GL,{Pid,FormatOrType,ArgsOrReport}}).
+
+%% This is to fix the case when the client has explicitly added the
+%% error logger tag and type in metadata, and not checked the warning map.
+fix_warning_tag(error,warning_msg) -> error;
+fix_warning_tag(error,warning_report) -> error_report;
+fix_warning_tag(info,warning_msg) -> info_msg;
+fix_warning_tag(info,warning_report) -> info_report;
+fix_warning_tag(_,Tag) -> Tag.
+
+fix_warning_type(error,std_warning) -> std_error;
+fix_warning_type(info,std_warning) -> std_info;
+fix_warning_type(_,Type) -> Type.
+
%%-----------------------------------------------------------------
%% These two simple old functions generate events tagged 'error'
%% Used for simple messages; error or information.
@@ -95,14 +205,18 @@ error_msg(Format) ->
Data :: list().
error_msg(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_msg},
+ format=>Format,
+ args=>Args},
+ meta(error)).
-spec format(Format, Data) -> 'ok' when
Format :: string(),
Data :: list().
format(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ error_msg(Format, Args).
%%-----------------------------------------------------------------
%% This functions should be used for error reports. Events
@@ -124,7 +238,10 @@ error_report(Report) ->
Report :: report().
error_report(Type, Report) ->
- notify({error_report, group_leader(), {self(), Type, Report}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_report},
+ report=>Report},
+ meta(error_report,Type)).
%%-----------------------------------------------------------------
%% This function should be used for warning reports.
@@ -146,7 +263,8 @@ warning_report(Report) ->
Report :: report().
warning_report(Type, Report) ->
- {Tag, NType} = case error_logger:warning_map() of
+ Level = error_logger:warning_map(),
+ {Tag, NType} = case Level of
info ->
if
Type =:= std_warning ->
@@ -164,7 +282,10 @@ warning_report(Type, Report) ->
{error_report, Type}
end
end,
- notify({Tag, group_leader(), {self(), NType, Report}}).
+ logger:log(Level,
+ #{label=>{?MODULE,warning_report},
+ report=>Report},
+ meta(Tag,NType)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -183,7 +304,8 @@ warning_msg(Format) ->
Data :: list().
warning_msg(Format, Args) ->
- Tag = case error_logger:warning_map() of
+ Level = error_logger:warning_map(),
+ Tag = case Level of
warning ->
warning_msg;
info ->
@@ -191,7 +313,11 @@ warning_msg(Format, Args) ->
error ->
error
end,
- notify({Tag, group_leader(), {self(), Format, Args}}).
+ logger:log(Level,
+ #{label=>{?MODULE,warning_msg},
+ format=>Format,
+ args=>Args},
+ meta(Tag)).
%%-----------------------------------------------------------------
%% This function should be used for information reports. Events
@@ -210,7 +336,10 @@ info_report(Report) ->
Report :: report().
info_report(Type, Report) ->
- notify({info_report, group_leader(), {self(), Type, Report}}).
+ logger:log(info,
+ #{label=>{?MODULE,info_report},
+ report=>Report},
+ meta(info_report,Type)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -228,7 +357,11 @@ info_msg(Format) ->
Data :: list().
info_msg(Format, Args) ->
- notify({info_msg, group_leader(), {self(), Format, Args}}).
+ logger:log(info,
+ #{label=>{?MODULE,info_msg},
+ format=>Format,
+ args=>Args},
+ meta(info_msg)).
%%-----------------------------------------------------------------
%% Used by the init process. Events are tagged 'info'.
@@ -236,38 +369,75 @@ info_msg(Format, Args) ->
-spec error_info(Error :: any()) -> 'ok'.
+%% unused?
error_info(Error) ->
- notify({info, group_leader(), {self(), Error, []}}).
-
--spec notify({msg_tag(), pid(), {pid(), any(), any()}}) -> 'ok'.
-
-notify(Msg) ->
- gen_event:notify(error_logger, Msg).
-
--type swap_handler_type() :: 'false' | 'silent' | 'tty' | {'logfile', string()}.
--spec swap_handler(Type :: swap_handler_type()) -> any().
-
-swap_handler(tty) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_tty_h, []}),
- ok = simple_logger(),
- R;
-swap_handler({logfile, File}) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_file_h, File}),
- ok = simple_logger(),
- R;
-swap_handler(silent) ->
- _ = gen_event:delete_handler(error_logger, error_logger, delete),
- ok = simple_logger();
-swap_handler(false) ->
- ok. % keep primitive event handler as-is
+ {Format,Args} =
+ case string_p(Error) of
+ true -> {Error,[]};
+ false -> {"~p",[Error]}
+ end,
+ MyMeta = #{tag=>info,type=>Error},
+ logger:log(info, Format, Args, #{?MODULE=>MyMeta,domain=>[Error]}).
+
+%%-----------------------------------------------------------------
+%% Create metadata
+meta(Tag) ->
+ meta(Tag,undefined).
+meta(Tag,Type) ->
+ meta(Tag,Type,#{report_cb=>fun report_to_format/1}).
+meta(Tag,undefined,Meta0) ->
+ Meta0#{?MODULE=>#{tag=>Tag}};
+meta(Tag,Type,Meta0) ->
+ maybe_add_domain(Tag,Type,Meta0#{?MODULE=>#{tag=>Tag,type=>Type}}).
+
+%% This is to prevent events of non standard type from being printed
+%% with the standard logger. Similar to how error_logger_tty_h
+%% discards events of non standard type.
+maybe_add_domain(error_report,std_error,Meta) -> Meta;
+maybe_add_domain(info_report,std_info,Meta) -> Meta;
+maybe_add_domain(warning_report,std_warning,Meta) -> Meta;
+maybe_add_domain(_,Type,Meta) -> Meta#{domain=>[Type]}.
+
+%% -----------------------------------------------------------------
+%% Report formatting - i.e. Term => {Format,Args}
+%% This was earlier done in the event handler (error_logger_tty_h, etc)
+%% -----------------------------------------------------------------
+report_to_format(#{label:={?MODULE,_},
+ report:=Report}) when is_map(Report) ->
+ %% logger:format_otp_report does maps:to_list, and for backwards
+ %% compatibility reasons we don't want that.
+ {"~tp\n",[Report]};
+report_to_format(#{label:={?MODULE,_},
+ format:=Format,
+ args:=Args}) ->
+ %% This is not efficient, but needed for backwards compatibility
+ %% in giving faulty arguments to the *_msg functions.
+ try io_lib:scan_format(Format,Args) of
+ _ -> {Format,Args}
+ catch _:_ ->
+ {"ERROR: ~tp - ~tp",[Format,Args]}
+ end;
+report_to_format(Term) ->
+ logger:format_otp_report(Term).
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_list(FlatList).
+
+%% -----------------------------------------------------------------
+%% Stuff directly related to the event manager
+%% -----------------------------------------------------------------
-spec add_report_handler(Handler) -> any() when
Handler :: module().
add_report_handler(Module) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, []).
+ add_report_handler(Module, []).
-spec add_report_handler(Handler, Args) -> Result when
Handler :: module(),
@@ -275,24 +445,37 @@ add_report_handler(Module) when is_atom(Module) ->
Result :: gen_event:add_handler_ret().
add_report_handler(Module, Args) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, Args).
+ _ = logger:add_handler(?MODULE,?MODULE,#{level=>info,filter_default=>log}),
+ gen_event:add_handler(?MODULE, Module, Args).
-spec delete_report_handler(Handler) -> Result when
Handler :: module(),
Result :: gen_event:del_handler_ret().
delete_report_handler(Module) when is_atom(Module) ->
- gen_event:delete_handler(error_logger, Module, []).
-
-%% Start the lowest level error_logger handler with Buffer.
-
-simple_logger(Buffer_size) when is_integer(Buffer_size) ->
- gen_event:add_handler(error_logger, error_logger, Buffer_size).
-
-%% Start the lowest level error_logger handler without Buffer.
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ Return = gen_event:delete_handler(?MODULE, Module, []),
+ case gen_event:which_handlers(?MODULE) of
+ [] ->
+ %% Don't want a lot of logs here if it's not needed
+ _ = logger:remove_handler(?MODULE),
+ ok;
+ _ ->
+ ok
+ end,
+ Return;
+ _ ->
+ ok
+ end.
-simple_logger() ->
- gen_event:add_handler(error_logger, error_logger, []).
+which_report_handlers() ->
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ gen_event:which_handlers(?MODULE);
+ undefined ->
+ []
+ end.
%% Log all errors to File for all eternity
@@ -308,26 +491,35 @@ simple_logger() ->
FilenameReason :: no_log_file.
logfile({open, File}) ->
- case lists:member(error_logger_file_h,
- gen_event:which_handlers(error_logger)) of
+ case lists:member(error_logger_file_h,which_report_handlers()) of
true ->
{error, allready_have_logfile};
_ ->
- gen_event:add_handler(error_logger, error_logger_file_h, File)
+ add_report_handler(error_logger_file_h, File)
end;
logfile(close) ->
- case gen_event:delete_handler(error_logger, error_logger_file_h, normal) of
- {error,Reason} ->
- {error,Reason};
- _ ->
- ok
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:delete_handler(?MODULE, error_logger_file_h, normal) of
+ {error,Reason} ->
+ {error,Reason};
+ _ ->
+ ok
+ end;
+ _ ->
+ {error,module_not_found}
end;
logfile(filename) ->
- case gen_event:call(error_logger, error_logger_file_h, filename) of
- {error,_} ->
- {error, no_log_file};
- Val ->
- Val
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:call(?MODULE, error_logger_file_h, filename) of
+ {error,_} ->
+ {error, no_log_file};
+ Val ->
+ Val
+ end;
+ _ ->
+ {error, no_log_file}
end.
%% Possibly turn off all tty printouts, maybe we only want the errors
@@ -337,193 +529,17 @@ logfile(filename) ->
Flag :: boolean().
tty(true) ->
- Hs = gen_event:which_handlers(error_logger),
- case lists:member(error_logger_tty_h, Hs) of
+ case lists:member(error_logger_tty_h, which_report_handlers()) of
false ->
- gen_event:add_handler(error_logger, error_logger_tty_h, []);
- true ->
+ add_report_handler(error_logger_tty_h, []);
+ true ->
ignore
end,
ok;
tty(false) ->
- gen_event:delete_handler(error_logger, error_logger_tty_h, []),
- ok.
-
+ delete_report_handler(error_logger_tty_h).
-%%% ---------------------------------------------------
-%%% This is the default error_logger handler.
-%%% ---------------------------------------------------
-
--spec init(term()) -> {'ok', state() | []}.
-
-init(Max) when is_integer(Max) ->
- {ok, {Max, 0, []}};
-%% This one is called if someone took over from us, and now wants to
-%% go back.
-init({go_back, _PostState}) ->
- {ok, {?buffer_size, 0, []}};
-init(_) ->
- %% The error logger process may receive a huge amount of
- %% messages. Make sure that they are stored off heap to
- %% avoid exessive GCs.
- process_flag(message_queue_data, off_heap),
- {ok, []}.
-
--spec handle_event(term(), state()) -> {'ok', state()}.
-
-handle_event({Type, GL, Msg}, State) when node(GL) =/= node() ->
- gen_event:notify({error_logger, node(GL)},{Type, GL, Msg}),
- %% handle_event2({Type, GL, Msg}, State); %% Shall we do something
- {ok, State}; %% at this node too ???
-handle_event({info_report, _, {_, Type, _}}, State) when Type =/= std_info ->
- {ok, State}; %% Ignore other info reports here
-handle_event(Event, State) ->
- handle_event2(Event, State).
-
--spec handle_info(term(), state()) -> {'ok', state()}.
-
-handle_info({emulator, GL, Chars}, State) when node(GL) =/= node() ->
- {error_logger, node(GL)} ! {emulator, GL, add_node(Chars,self())},
- {ok, State};
-handle_info({emulator, GL, Chars}, State) ->
- handle_event2({emulator, GL, Chars}, State);
-handle_info(_, State) ->
- {ok, State}.
-
--spec handle_call(term(), state()) -> {'ok', {'error', 'bad_query'}, state()}.
-
-handle_call(_Query, State) -> {ok, {error, bad_query}, State}.
-
--spec terminate(term(), state()) -> {'error_logger', [term()]}.
-
-terminate(swap, {_, 0, Buff}) ->
- {error_logger, Buff};
-terminate(swap, {_, Lost, Buff}) ->
- Myevent = {info, group_leader(), {self(), {lost_messages, Lost}, []}},
- {error_logger, [tag_event(Myevent)|Buff]};
-terminate(_, _) ->
- {error_logger, []}.
-
-handle_event2(Event, {1, Lost, Buff}) ->
- display(tag_event(Event)),
- {ok, {1, Lost+1, Buff}};
-handle_event2(Event, {N, Lost, Buff}) ->
- Tagged = tag_event(Event),
- display(Tagged),
- {ok, {N-1, Lost, [Tagged|Buff]}};
-handle_event2(_, State) ->
- {ok, State}.
-
-tag_event(Event) ->
- {erlang:localtime(), Event}.
-
-display({Tag,{error,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{error_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info,_,{_,Error,_}}}) ->
- display2(Tag,Error,[]);
-display({Tag,{info_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{warning_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{warning_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{emulator,_,Chars}}) ->
- display2(Tag,Chars,[]).
-
-add_node(X, Pid) when is_atom(X) ->
- add_node(atom_to_list(X), Pid);
-add_node(X, Pid) ->
- lists:concat([X,"** at node ",node(Pid)," **~n"]).
-
-%% Can't do io_lib:format
-
-display2({{_Y,_Mo,_D},{_H,_Mi,_S}} = Date, F, A) ->
- display_date(Date),
- display3(string_p(F), F, A).
-
-display_date({{Y,Mo,D},{H,Mi,S}}) ->
- erlang:display_string(
- integer_to_list(Y) ++ "-" ++
- two_digits(Mo) ++ "-" ++
- two_digits(D) ++ " " ++
- two_digits(H) ++ ":" ++
- two_digits(Mi) ++ ":" ++
- two_digits(S) ++ " ").
-
-two_digits(N) when 0 =< N, N =< 9 ->
- [$0, $0 + N];
-two_digits(N) ->
- integer_to_list(N).
-
-display3(true, F, A) ->
- %% Format string with arguments
- erlang:display_string(F ++ "\n"),
- [begin
- erlang:display_string("\t"),
- erlang:display(Arg)
- end || Arg <- A],
- ok;
-display3(false, Atom, A) when is_atom(Atom) ->
- %% The widest atom seems to be 'supervisor_report' at 17.
- ColumnWidth = 20,
- AtomString = atom_to_list(Atom),
- AtomLength = length(AtomString),
- Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
- erlang:display_string(AtomString ++ Padding),
- display4(A);
-display3(_, F, A) ->
- erlang:display({F, A}).
-
-display4([A, []]) ->
- %% Not sure why crash reports look like this.
- display4(A);
-display4(A = [_|_]) ->
- case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
- true ->
- erlang:display_string("\n"),
- lists:foreach(
- fun({Key, Value}) ->
- erlang:display_string(
- " " ++
- atom_to_list(Key) ++
- ": "),
- erlang:display(Value)
- end, A);
- false ->
- erlang:display(A)
- end;
-display4(A) ->
- erlang:display(A).
-
-string_p([]) ->
- false;
-string_p(Term) ->
- string_p1(Term).
-
-string_p1([H|T]) when is_integer(H), H >= $\040, H =< $\176 ->
- string_p1(T);
-string_p1([H|T]) when is_integer(H), H >= 16#A0, H < 16#D800;
- is_integer(H), H > 16#DFFF, H < 16#FFFE;
- is_integer(H), H > 16#FFFF, H =< 16#10FFFF ->
- string_p1(T);
-string_p1([$\n|T]) -> string_p1(T);
-string_p1([$\r|T]) -> string_p1(T);
-string_p1([$\t|T]) -> string_p1(T);
-string_p1([$\v|T]) -> string_p1(T);
-string_p1([$\b|T]) -> string_p1(T);
-string_p1([$\f|T]) -> string_p1(T);
-string_p1([$\e|T]) -> string_p1(T);
-string_p1([H|T]) when is_list(H) ->
- case string_p1(H) of
- true -> string_p1(T);
- _ -> false
- end;
-string_p1([]) -> true;
-string_p1(_) -> false.
+%%%-----------------------------------------------------------------
-spec limit_term(term()) -> term().
@@ -536,9 +552,4 @@ limit_term(Term) ->
-spec get_format_depth() -> 'unlimited' | pos_integer().
get_format_depth() ->
- case application:get_env(kernel, error_logger_format_depth) of
- {ok, Depth} when is_integer(Depth) ->
- max(10, Depth);
- undefined ->
- unlimited
- end.
+ logger:get_format_depth().
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
index 82a3571da9..afffcd156e 100644
--- a/lib/kernel/src/kernel.app.src
+++ b/lib/kernel/src/kernel.app.src
@@ -60,6 +60,17 @@
kernel_refc,
local_tcp,
local_udp,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_filters,
+ logger_formatter,
+ logger_h_common,
+ logger_server,
+ logger_simple,
+ logger_std_h,
+ logger_sup,
net,
net_adm,
net_kernel,
@@ -117,6 +128,8 @@
kernel_config,
kernel_refc,
kernel_sup,
+ logger,
+ logger_sup,
net_kernel,
net_sup,
rex,
@@ -127,7 +140,7 @@
inet_db,
pg2]},
{applications, []},
- {env, [{error_logger, tty}]},
+ {env, []},
{mod, {kernel, []}},
{runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]}
]
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index 0382764b39..20aa47f602 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -40,8 +40,7 @@ start(_, []) ->
ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
end,
%% add error handler
- Type = get_error_logger_type(),
- case error_logger:swap_handler(Type) of
+ case logger:setup_standard_handler() of
ok -> {ok, Pid, []};
Error ->
%% Not necessary since the node will crash anyway:
@@ -62,16 +61,6 @@ config_change(Changed, New, Removed) ->
do_global_groups_change(Changed, New, Removed),
ok.
-get_error_logger_type() ->
- case application:get_env(kernel, error_logger) of
- {ok, tty} -> tty;
- {ok, {file, File}} when is_list(File) -> {logfile, File};
- {ok, false} -> false;
- {ok, silent} -> silent;
- undefined -> tty; % default value
- {ok, Bad} -> exit({bad_config, {kernel, {error_logger, Bad}}})
- end.
-
%%%-----------------------------------------------------------------
%%% The process structure in kernel is as shown in the figure.
%%%
@@ -153,9 +142,18 @@ init([]) ->
type => supervisor,
modules => [?MODULE]},
+
+ LoggerSup = #{id => logger_sup,
+ start => {logger_sup, start_link, []},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [logger_sup]},
+
case init:get_argument(mode) of
{ok, [["minimal"]]} ->
- {ok, {SupFlags, [Code, File, StdError, User, Config, RefC, SafeSup]}};
+ {ok, {SupFlags,
+ [Code, File, StdError, User, Config, RefC, SafeSup, LoggerSup]}};
_ ->
Rpc = #{id => rex,
start => {rpc, start_link, []},
@@ -206,7 +204,7 @@ init([]) ->
{ok, {SupFlags,
[Code, Rpc, Global, InetDb | DistAC] ++
[NetSup, GlGroup, File, SigSrv,
- StdError, User, Config, RefC, SafeSup] ++ Timer}}
+ StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
end;
init(safe) ->
SupFlags = #{strategy => one_for_one,
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
new file mode 100644
index 0000000000..943ef8c2d1
--- /dev/null
+++ b/lib/kernel/src/logger.erl
@@ -0,0 +1,803 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger).
+
+%% Log interface
+-export([emergency/1,emergency/2,emergency/3,
+ alert/1,alert/2,alert/3,
+ critical/1,critical/2,critical/3,
+ error/1,error/2,error/3,
+ warning/1,warning/2,warning/3,
+ notice/1,notice/2,notice/3,
+ info/1,info/2,info/3,
+ debug/1,debug/2,debug/3]).
+-export([log/2,log/3,log/4]).
+
+%% Called by macro
+-export([allow/2,macro_log/3,macro_log/4,macro_log/5,add_default_metadata/1]).
+
+%% Configuration
+-export([add_handler/3, remove_handler/1,
+ add_logger_filter/2, add_handler_filter/3,
+ remove_logger_filter/1, remove_handler_filter/2,
+ set_module_level/2, reset_module_level/1,
+ set_logger_config/1, set_logger_config/2,
+ set_handler_config/2, set_handler_config/3,
+ get_logger_config/0, get_handler_config/1]).
+
+%% Misc
+-export([compare_levels/2]).
+-export([set_process_metadata/1, unset_process_metadata/0,
+ get_process_metadata/0]).
+-export([i/0, i/1]).
+-export([setup_standard_handler/0, replace_simple_handler/3]).
+-export([limit_term/1, get_format_depth/0, get_max_size/0, get_utc_config/0]).
+
+%% Basic report formatting
+-export([format_report/1, format_otp_report/1]).
+
+-export([internal_log/2,filter_stacktrace/2]).
+
+-include("logger_internal.hrl").
+-include("logger.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type log() :: #{level=>level(),
+ msg=>{io:format(),[term()]} |
+ {report,report()} |
+ {string,unicode:chardata()},
+ meta=>metadata()}.
+-type level() :: emergency | alert | critical | error |
+ warning | notice | info | debug.
+-type report() :: map() | [{atom(),term()}].
+-type msg_fun() :: fun((term()) -> {io:format(),[term()]} |
+ report() |
+ unicode:chardata()).
+-type metadata() :: map().
+
+-type handler_id() :: atom().
+-type filter_id() :: atom().
+-type filter() :: {fun((log(),term()) -> filter_return()),term()}.
+-type filter_return() :: stop | ignore | log().
+-type config() :: map().
+
+-export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0,
+ filter_id/0,filter/0,filter_return/0]).
+
+%%%-----------------------------------------------------------------
+%%% API
+emergency(X) ->
+ log(emergency,X).
+emergency(X,Y) ->
+ log(emergency,X,Y).
+emergency(X,Y,Z) ->
+ log(emergency,X,Y,Z).
+
+alert(X) ->
+ log(alert,X).
+alert(X,Y) ->
+ log(alert,X,Y).
+alert(X,Y,Z) ->
+ log(alert,X,Y,Z).
+
+critical(X) ->
+ log(critical,X).
+critical(X,Y) ->
+ log(critical,X,Y).
+critical(X,Y,Z) ->
+ log(critical,X,Y,Z).
+
+error(X) ->
+ log(error,X).
+error(X,Y) ->
+ log(error,X,Y).
+error(X,Y,Z) ->
+ log(error,X,Y,Z).
+
+warning(X) ->
+ log(warning,X).
+warning(X,Y) ->
+ log(warning,X,Y).
+warning(X,Y,Z) ->
+ log(warning,X,Y,Z).
+
+notice(X) ->
+ log(notice,X).
+notice(X,Y) ->
+ log(notice,X,Y).
+notice(X,Y,Z) ->
+ log(notice,X,Y,Z).
+
+info(X) ->
+ log(info,X).
+info(X,Y) ->
+ log(info,X,Y).
+info(X,Y,Z) ->
+ log(info,X,Y,Z).
+
+debug(X) ->
+ log(debug,X).
+debug(X,Y) ->
+ log(debug,X,Y).
+debug(X,Y,Z) ->
+ log(debug,X,Y,Z).
+
+-spec log(Level,StringOrReport) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+log(Level, StringOrReport) ->
+ do_log(Level,StringOrReport,#{}).
+
+-spec log(Level,StringOrReport,Metadata) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Metadata :: metadata();
+ (Level,Format,Args) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Level,Fun,FunArgs) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+log(Level, StringOrReport, Metadata)
+ when is_map(Metadata), not is_function(StringOrReport) ->
+ do_log(Level,StringOrReport,Metadata);
+log(Level, FunOrFormat, Args) ->
+ do_log(Level,{FunOrFormat,Args},#{}).
+
+-spec log(Level,Format, Args, Metadata) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args :: [term()],
+ Metadata :: metadata();
+ (Level,Fun,FunArgs,Metadata) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Metadata :: metadata().
+log(Level, FunOrFormat, Args, Metadata) ->
+ do_log(Level,{FunOrFormat,Args},Metadata).
+
+-spec allow(Level,Module) -> boolean() when
+ Level :: level(),
+ Module :: module().
+allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) ->
+ logger_config:allow(?LOGGER_TABLE,Level,Module).
+
+
+-spec macro_log(Location,Level,StringOrReport) -> ok when
+ Location :: map(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+macro_log(Location,Level,StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,#{}).
+
+-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Meta :: metadata();
+ (Location,Level,Format,Args) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Location,Level,Fun,FunArgs) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+macro_log(Location,Level,StringOrReport,Meta)
+ when is_map(Meta), not is_function(StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,Meta);
+macro_log(Location,Level,FunOrFormat,Args) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},#{}).
+
+-spec macro_log(Location,Level,Format,Args,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()],
+ Meta :: metadata();
+ (Location,Level,Fun,FunArgs,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Meta :: metadata().
+macro_log(Location,Level,FunOrFormat,Args,Meta) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},Meta).
+
+-spec format_otp_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_otp_report(#{label:=_,report:=Report}) ->
+ format_report(Report);
+format_otp_report(Report) ->
+ format_report(Report).
+
+-spec format_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_report(Report) when is_map(Report) ->
+ format_report(maps:to_list(Report));
+format_report(Report) when is_list(Report) ->
+ case lists:flatten(Report) of
+ [] ->
+ {"~tp",[[]]};
+ FlatList ->
+ case string_p1(FlatList) of
+ true ->
+ {"~ts",[FlatList]};
+ false ->
+ format_term_list(Report,[],[])
+ end
+ end;
+format_report(Report) ->
+ {"~tp",[Report]}.
+
+format_term_list([{Tag,Data}|T],Format,Args) ->
+ PorS = case string_p(Data) of
+ true -> "s";
+ false -> "p"
+ end,
+ format_term_list(T,[" ~tp: ~t"++PorS|Format],[Data,Tag|Args]);
+format_term_list([Data|T],Format,Args) ->
+ format_term_list(T,[" ~tp"|Format],[Data|Args]);
+format_term_list([],Format,Args) ->
+ {lists:flatten(lists:join($\n,lists:reverse(Format))),lists:reverse(Args)}.
+
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_unicode_list(FlatList).
+
+internal_log(Level,Term) when is_atom(Level) ->
+ erlang:display_string("Logger - "++ atom_to_list(Level) ++ ": "),
+ erlang:display(Term).
+
+%%%-----------------------------------------------------------------
+%%% Configuration
+-spec add_logger_filter(FilterId,Filter) -> ok | {error,term()} when
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_logger_filter(FilterId,Filter) ->
+ logger_server:add_filter(logger,{FilterId,Filter}).
+
+-spec add_handler_filter(HandlerId,FilterId,Filter) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_handler_filter(HandlerId,FilterId,Filter) ->
+ logger_server:add_filter(HandlerId,{FilterId,Filter}).
+
+
+-spec remove_logger_filter(FilterId) -> ok | {error,term()} when
+ FilterId :: filter_id().
+remove_logger_filter(FilterId) ->
+ logger_server:remove_filter(logger,FilterId).
+
+-spec remove_handler_filter(HandlerId,FilterId) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id().
+remove_handler_filter(HandlerId,FilterId) ->
+ logger_server:remove_filter(HandlerId,FilterId).
+
+-spec add_handler(HandlerId,Module,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Module :: module(),
+ Config :: config().
+add_handler(HandlerId,Module,Config) ->
+ logger_server:add_handler(HandlerId,Module,Config).
+
+-spec remove_handler(HandlerId) -> ok | {error,term()} when
+ HandlerId :: handler_id().
+remove_handler(HandlerId) ->
+ logger_server:remove_handler(HandlerId).
+
+-spec set_logger_config(Key,Value) -> ok | {error,term()} when
+ Key :: atom(),
+ Value :: term().
+set_logger_config(Key,Value) ->
+ logger_server:set_config(logger,Key,Value).
+
+-spec set_logger_config(Config) -> ok | {error,term()} when
+ Config :: config().
+set_logger_config(Config) ->
+ logger_server:set_config(logger,Config).
+
+-spec set_handler_config(HandlerId,Key,Value) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Key :: atom(),
+ Value :: term().
+set_handler_config(HandlerId,Key,Value) ->
+ logger_server:set_config(HandlerId,Key,Value).
+
+-spec set_handler_config(HandlerId,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: config().
+set_handler_config(HandlerId,Config) ->
+ logger_server:set_config(HandlerId,Config).
+
+-spec get_logger_config() -> {ok,Config} when
+ Config :: config().
+get_logger_config() ->
+ logger_config:get(?LOGGER_TABLE,logger).
+
+-spec get_handler_config(HandlerId) -> {ok,{Module,Config}} | {error,term()} when
+ HandlerId :: handler_id(),
+ Module :: module(),
+ Config :: config().
+get_handler_config(HandlerId) ->
+ logger_config:get(?LOGGER_TABLE,HandlerId).
+
+-spec set_module_level(Module,Level) -> ok | {error,term()} when
+ Module :: module(),
+ Level :: level().
+set_module_level(Module,Level) ->
+ logger_server:set_module_level(Module,Level).
+
+-spec reset_module_level(Module) -> ok | {error,term()} when
+ Module :: module().
+reset_module_level(Module) ->
+ logger_server:reset_module_level(Module).
+
+%%%-----------------------------------------------------------------
+%%% Misc
+-spec compare_levels(Level1,Level2) -> eq | gt | lt when
+ Level1 :: level(),
+ Level2 :: level().
+compare_levels(Level,Level) when ?IS_LEVEL(Level) ->
+ eq;
+compare_levels(Level1,Level2) when ?IS_LEVEL(Level1), ?IS_LEVEL(Level2) ->
+ Int1 = logger_config:level_to_int(Level1),
+ Int2 = logger_config:level_to_int(Level2),
+ if Int1 < Int2 -> gt;
+ true -> lt
+ end;
+compare_levels(Level1,Level2) ->
+ erlang:error(badarg,[Level1,Level2]).
+
+-spec set_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+set_process_metadata(Meta) when is_map(Meta) ->
+ _ = put(?LOGGER_META_KEY,Meta),
+ ok;
+set_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
+-spec get_process_metadata() -> Meta | undefined when
+ Meta :: metadata().
+get_process_metadata() ->
+ get(?LOGGER_META_KEY).
+
+-spec unset_process_metadata() -> ok.
+unset_process_metadata() ->
+ _ = erase(?LOGGER_META_KEY),
+ ok.
+
+-spec i() -> #{logger=>config(),
+ handlers=>[{handler_id(),module(),config()}],
+ module_levels=>[{module(),level()}]}.
+i() ->
+ i(term).
+
+-spec i(term) -> #{logger=>config(),
+ handlers=>[{handler_id(),module(),config()}],
+ module_levels=>[{module(),level()}]};
+ (print) -> ok;
+ (string) -> iolist().
+i(_Action = print) ->
+ io:put_chars(i(string));
+i(_Action = string) ->
+ #{logger := #{level := Level, handlers := Handlers,
+ filters := Filters, filter_default := FilterDefault},
+ handlers := HandlerConfigs,
+ module_levels := Modules} = i(term),
+ [io_lib:format("Current logger configuration:~n", []),
+ io_lib:format(" Level: ~p~n",[Level]),
+ io_lib:format(" Filter Default: ~p~n", [FilterDefault]),
+ io_lib:format(" Filters: ~n", []),
+ print_filters(4, Filters),
+ io_lib:format(" Handlers: ~n", []),
+ print_handlers([C || {Id, _, _} = C <- HandlerConfigs,
+ lists:member(Id, Handlers)]),
+ io_lib:format(" Level set per module: ~n", []),
+ print_module_levels(Modules)
+ ];
+i(_Action = term) ->
+ {Logger, Handlers, Modules} = logger_config:get(tid()),
+ #{logger=>Logger,
+ handlers=>Handlers,
+ module_levels=>Modules}.
+
+print_filters(Indent, {Id, {Fun, Config}}) ->
+ io_lib:format("~sId: ~p~n"
+ "~s Fun: ~p~n"
+ "~s Config: ~p~n",[Indent, Id, Indent, Fun, Indent, Config]);
+print_filters(Indent, Filters) ->
+ IndentStr = io_lib:format("~.*s",[Indent, ""]),
+ lists:map(fun(Filter) ->print_filters(IndentStr, Filter) end, Filters).
+
+
+print_handlers({Id,Module,
+ #{level := Level,
+ filters := Filters, filter_default := FilterDefault,
+ formatter := {FormatterModule,FormatterConfig}} = Config}) ->
+ MyKeys = [filter_default, filters, formatter, level, id],
+ UnhandledConfig = maps:filter(fun(Key, _) ->
+ not lists:member(Key, MyKeys)
+ end, Config),
+ Unhandled = lists:map(fun({Key, Value}) ->
+ io_lib:format(" ~p: ~p~n",[Key, Value])
+ end, maps:to_list(UnhandledConfig)),
+ io_lib:format(" Id: ~p~n"
+ " Module: ~p~n"
+ " Level: ~p~n"
+ " Formatter:~n"
+ " Module: ~p~n"
+ " Config: ~p~n"
+ " Filter Default: ~p~n"
+ " Filters:~n~s"
+ " Handler Config:~n"
+ "~s"
+ "",[Id, Module, Level, FormatterModule, FormatterConfig,
+ FilterDefault, print_filters(8, Filters), Unhandled]);
+print_handlers(Handlers) ->
+ lists:map(fun print_handlers/1, Handlers).
+
+print_module_levels({Module,Level}) ->
+ io_lib:format(" Module: ~p~n"
+ " Level: ~p~n",
+ [Module,Level]);
+print_module_levels(ModuleLevels) ->
+ lists:map(fun print_module_levels/1, ModuleLevels).
+
+-spec setup_standard_handler() -> ok | {error,term()}.
+setup_standard_handler() ->
+ case get_logger_type() of
+ {ok,silent} ->
+ Level = get_logger_level(),
+ ok = set_logger_config(level,Level),
+ remove_handler(logger_simple);
+ {ok,Type} ->
+ Level = get_logger_level(),
+ ok = set_logger_config(level,Level),
+ Filters = get_logger_filters(),
+ setup_standard_handler(Type,#{level=>Level,
+ filter_default=>stop,
+ filters=>Filters});
+ Error ->
+ Error
+ end.
+
+-spec setup_standard_handler(Type,Config) -> ok | {error,term()} when
+ Type :: tty | standard_io | standard_error | {file,File} |
+ {file,File,Modes} | {disk_log,LogOpts} | false,
+ File :: file:filename(),
+ Modes :: [term()], % [file:mode()], or more specific?
+ Config :: config(),
+ LogOpts :: map().
+setup_standard_handler(false,#{level:=Level,filters:=Filters}) ->
+ case set_handler_config(logger_simple,level,Level) of
+ ok ->
+ set_handler_config(logger_simple,filters,Filters);
+ Error ->
+ Error
+ end;
+setup_standard_handler(Type,Config) ->
+ {Module,TypeConfig} = get_type_config(Type),
+ replace_simple_handler(?STANDARD_HANDLER,
+ Module,
+ maps:merge(Config,TypeConfig)).
+
+-spec replace_simple_handler(Id,Module,Config) -> ok | {error,term()} when
+ Id :: handler_id(),
+ Module :: module(),
+ Config :: config().
+replace_simple_handler(Id,Module,Config) ->
+ _ = code:ensure_loaded(Module),
+ DoBuffer = erlang:function_exported(Module,swap_buffer,2),
+ case add_handler(Id,Module,Config#{wait_for_buffer=>DoBuffer}) of
+ ok ->
+ if DoBuffer ->
+ {ok,Buffered} = logger_simple:get_buffer(),
+ _ = remove_handler(logger_simple),
+ Module:swap_buffer(?STANDARD_HANDLER,Buffered);
+ true ->
+ _ = remove_handler(logger_simple),
+ ok
+ end,
+ ok;
+ Error ->
+ Error
+ end.
+
+get_logger_type() ->
+ Type0 =
+ case application:get_env(kernel, logger_dest) of
+ undefined ->
+ application:get_env(kernel, error_logger);
+ T ->
+ T
+ end,
+ case Type0 of
+ {ok, tty} ->
+ {ok, tty};
+ {ok, {file, File}} when is_list(File) ->
+ {ok, {file, File}};
+ {ok, {file, File, Modes}} when is_list(File), is_list(Modes) ->
+ {ok, {file, File, Modes}};
+ {ok, {disk_log, File}} when is_list(File) ->
+ {ok, {disk_log, get_disk_log_config(File)}};
+ {ok, false} ->
+ {ok, false};
+ {ok, silent} ->
+ {ok, silent};
+ undefined ->
+ {ok, tty}; % default value
+ {ok, Bad} ->
+ {error,{bad_config, {kernel, {logger_dest, Bad}}}}
+ end.
+
+get_disk_log_config(File) ->
+ Config1 =
+ case application:get_env(kernel,logger_disk_log_maxfiles) of
+ undefined -> #{};
+ {ok,MF} -> #{max_no_files=>MF}
+ end,
+ Config2 =
+ case application:get_env(kernel,logger_disk_log_maxbytes) of
+ undefined -> Config1;
+ {ok,MB} -> Config1#{max_no_bytes=>MB}
+ end,
+ Config3 =
+ case application:get_env(kernel,logger_disk_log_type) of
+ undefined -> Config2;
+ {ok,T} -> Config1#{type=>T}
+ end,
+ Config3#{file=>File}.
+
+get_logger_level() ->
+ case application:get_env(kernel,logger_level) of
+ undefined -> info;
+ {ok,Level} when ?IS_LEVEL(Level) -> Level
+ end.
+
+get_logger_filters() ->
+ case application:get_env(kernel, logger_sasl_compatible, false) of
+ true ->
+ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp]);
+ false ->
+ Extra =
+ case application:get_env(kernel, logger_log_progress, false) of
+ true ->
+ [];
+ false ->
+ [{stop_progress,
+ {fun logger_filters:progress/2,stop}}]
+ end,
+ Extra ++ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp,sasl])
+ end.
+
+get_type_config({disk_log,LogOpts}) ->
+ {logger_disk_log_h,#{disk_log_opts=>LogOpts}};
+get_type_config(tty) ->
+ %% This is only for backwards compatibility with error_logger and
+ %% old kernel and sasl environment variables
+ get_type_config(standard_io);
+get_type_config(Type) when Type==standard_io;
+ Type==standard_error;
+ element(1,Type)==file ->
+ {logger_std_h,#{logger_std_h=>#{type=>Type}}};
+get_type_config(Type) ->
+ {error,{illegal_logger_type,Type}}.
+
+%%%-----------------------------------------------------------------
+-spec limit_term(term()) -> term().
+
+limit_term(Term) ->
+ try get_format_depth() of
+ unlimited -> Term;
+ D -> io_lib:limit_term(Term, D)
+ catch error:badarg ->
+ %% This could happen during system termination, after
+ %% application_controller process is dead.
+ unlimited
+ end.
+
+-spec get_format_depth() -> 'unlimited' | pos_integer().
+
+get_format_depth() ->
+ Depth =
+ case application:get_env(kernel, logger_format_depth) of
+ {ok, D} when is_integer(D) ->
+ D;
+ undefined ->
+ case application:get_env(kernel, error_logger_format_depth) of
+ {ok, D} when is_integer(D) ->
+ D;
+ undefined ->
+ unlimited
+ end
+ end,
+ max(10, Depth).
+
+-spec get_max_size() -> 'unlimited' | pos_integer().
+
+get_max_size() ->
+ case application:get_env(kernel, logger_max_size) of
+ {ok, Size} when is_integer(Size) ->
+ max(50, Size);
+ undefined ->
+ unlimited
+ end.
+
+-spec get_utc_config() -> boolean().
+
+get_utc_config() ->
+ %% Kernel's logger_utc configuration overrides SASL utc_log, which
+ %% in turn overrides stdlib config - in order to have uniform
+ %% timestamps in log messages
+ case application:get_env(kernel, logger_utc) of
+ {ok, Val} -> Val;
+ undefined ->
+ case application:get_env(sasl, utc_log) of
+ {ok, Val} -> Val;
+ undefined ->
+ case application:get_env(stdlib, utc_log) of
+ {ok, Val} -> Val;
+ undefined -> false
+ end
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+do_log(warning,Msg,Meta) ->
+ do_log_1(error_logger:warning_map(),Msg,Meta);
+do_log(Level,Msg,Meta) ->
+ do_log_1(Level,Msg,Meta).
+
+do_log_1(Level,Msg,#{mfa:={Module,_,_}}=Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level,Module) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end;
+do_log_1(Level,Msg,Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end.
+
+-spec log_allowed(Location,Level,Msg,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Msg :: {msg_fun(),term()} |
+ {io:format(),[term()]} |
+ report() |
+ unicode:chardata(),
+ Meta :: metadata().
+log_allowed(Location,Level,{Fun,FunArgs},Meta) when is_function(Fun,1) ->
+ try Fun(FunArgs) of
+ Msg={Format,Args} when is_list(Format), is_list(Args) ->
+ log_allowed(Location,Level,Msg,Meta);
+ Report when ?IS_REPORT(Report) ->
+ log_allowed(Location,Level,Report,Meta);
+ String when ?IS_STRING(String) ->
+ log_allowed(Location,Level,String,Meta);
+ Other ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{Fun,FunArgs},Other]},
+ Meta)
+ catch C:R ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{Fun,FunArgs},{C,R}]},
+ Meta)
+ end;
+log_allowed(Location,Level,Msg,Meta0) when is_map(Meta0) ->
+ %% Metadata priorities are:
+ %% Location (added in API macros) - will be overwritten by process
+ %% metadata (set by set_process_metadata/1), which in turn will be
+ %% overwritten by the metadata given as argument in the log call
+ %% (function or macro).
+ Meta = add_default_metadata(
+ maps:merge(Location,maps:merge(proc_meta(),Meta0))),
+ case node(maps:get(gl,Meta)) of
+ Node when Node=/=node() ->
+ log_remote(Node,Level,Msg,Meta),
+ do_log_allowed(Level,Msg,Meta);
+ _ ->
+ do_log_allowed(Level,Msg,Meta)
+ end.
+
+do_log_allowed(Level,{Format,Args}=Msg,Meta)
+ when ?IS_LEVEL(Level),
+ is_list(Format),
+ is_list(Args),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>Msg,meta=>Meta},tid());
+do_log_allowed(Level,Report,Meta)
+ when ?IS_LEVEL(Level),
+ ?IS_REPORT(Report),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{report,Report},meta=>Meta},
+ tid());
+do_log_allowed(Level,String,Meta)
+ when ?IS_LEVEL(Level),
+ ?IS_STRING(String),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{string,String},meta=>Meta},
+ tid()).
+tid() ->
+ ets:whereis(?LOGGER_TABLE).
+
+log_remote(Node,Level,{Format,Args},Meta) ->
+ log_remote(Node,{log,Level,Format,Args,Meta});
+log_remote(Node,Level,Msg,Meta) ->
+ log_remote(Node,{log,Level,Msg,Meta}).
+
+log_remote(Node,Request) ->
+ {logger,Node} ! Request,
+ ok.
+
+add_default_metadata(Meta) ->
+ add_default_metadata([pid,gl,time],Meta).
+
+add_default_metadata([Key|Keys],Meta) ->
+ case maps:is_key(Key,Meta) of
+ true ->
+ add_default_metadata(Keys,Meta);
+ false ->
+ add_default_metadata(Keys,Meta#{Key=>default(Key)})
+ end;
+add_default_metadata([],Meta) ->
+ Meta.
+
+proc_meta() ->
+ case get_process_metadata() of
+ ProcMeta when is_map(ProcMeta) -> ProcMeta;
+ _ -> #{}
+ end.
+
+default(pid) -> self();
+default(gl) -> group_leader();
+default(time) -> erlang:monotonic_time(microsecond).
+
+%% Remove everything upto and including this module from the stacktrace
+filter_stacktrace(Module,[{Module,_,_,_}|_]) ->
+ [];
+filter_stacktrace(Module,[H|T]) ->
+ [H|filter_stacktrace(Module,T)];
+filter_stacktrace(_,[]) ->
+ [].
diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl
new file mode 100644
index 0000000000..d9f5aa6faf
--- /dev/null
+++ b/lib/kernel/src/logger_backend.erl
@@ -0,0 +1,133 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_backend).
+
+-export([log_allowed/2]).
+
+-include("logger_internal.hrl").
+
+-define(OWN_KEYS,[level,filters,filter_default,handlers]).
+
+%%%-----------------------------------------------------------------
+%%% The default logger backend
+log_allowed(Log, Tid) ->
+ {ok,Config} = logger_config:get(Tid,logger),
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(logger,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Handlers = maps:get(handlers,Config,[]),
+ call_handlers(Log1,Handlers,Tid)
+ end,
+ ok.
+
+call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) ->
+ case logger_config:get(Tid,Id,Level) of
+ {ok,{Module,Config}} ->
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(Id,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Config1 = maps:without(?OWN_KEYS,Config),
+ try Module:log(Log1,Config1)
+ catch C:R:S ->
+ case logger:remove_handler(Id) of
+ ok ->
+ logger:internal_log(
+ error,{removed_failing_handler,Id}),
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,removed_failing_handler},
+ {handler,{Id,Module}},
+ {log,Log1},
+ {config,Config1},
+ {reason,{C,R,filter_stacktrace(S)}}]);
+ {error,{not_found,_}} ->
+ %% Probably already removed by other client
+ %% Don't report again
+ ok;
+ {error,Reason} ->
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,remove_handler_failed},
+ {reason,Reason}])
+ end
+ end
+ end;
+ _ ->
+ ok
+ end,
+ call_handlers(Log,Handlers,Tid);
+call_handlers(_Log,[],_Tid) ->
+ ok.
+
+apply_filters(Owner,Log,Filters,Config) ->
+ case do_apply_filters(Owner,Log,Filters,ignore) of
+ stop ->
+ stop;
+ ignore ->
+ case maps:get(filter_default,Config) of
+ log ->
+ Log;
+ stop ->
+ stop
+ end;
+ Log1 ->
+ Log1
+ end.
+
+do_apply_filters(Owner,Log,[{_Id,{FilterFun,FilterArgs}}=Filter|Filters],State) ->
+ try FilterFun(Log,FilterArgs) of
+ stop ->
+ stop;
+ ignore ->
+ do_apply_filters(Owner,Log,Filters,State);
+ Log1=#{level:=Level,msg:=Msg,meta:=Meta}
+ when is_atom(Level), ?IS_MSG(Msg), is_map(Meta) ->
+ do_apply_filters(Owner,Log1,Filters,log);
+ Bad ->
+ handle_filter_failed(Filter,Owner,Log,{bad_return_value,Bad})
+ catch C:R:S ->
+ handle_filter_failed(Filter,Owner,Log,{C,R,filter_stacktrace(S)})
+ end;
+do_apply_filters(_Owner,_Log,[],ignore) ->
+ ignore;
+do_apply_filters(_Owner,Log,[],log) ->
+ Log.
+
+handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) ->
+ case logger_server:remove_filter(Owner,Id) of
+ ok ->
+ logger:internal_log(error,{removed_failing_filter,Id}),
+ ?LOG_INTERNAL(debug,
+ [{logger,removed_failing_filter},
+ {filter,Filter},
+ {owner,Owner},
+ {log,Log},
+ {reason,Reason}]);
+ _ ->
+ ok
+ end,
+ ignore.
+
+filter_stacktrace(Stacktrace) ->
+ logger:filter_stacktrace(?MODULE,Stacktrace).
diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl
new file mode 100644
index 0000000000..799aea9617
--- /dev/null
+++ b/lib/kernel/src/logger_config.erl
@@ -0,0 +1,151 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_config).
+
+-export([new/1,delete/2,
+ exist/2,
+ allow/2,allow/3,
+ get/2, get/3, get/1,
+ create/3, create/4, set/3,
+ set_module_level/3,reset_module_level/2,
+ cache_module_level/2,
+ level_to_int/1]).
+
+-include("logger_internal.hrl").
+
+new(Name) ->
+ _ = ets:new(Name,[set,protected,named_table]),
+ ets:whereis(Name).
+
+delete(Tid,Id) ->
+ ets:delete(Tid,table_key(Id)).
+
+allow(Tid,Level,Module) ->
+ LevelInt = level_to_int(Level),
+ case ets:lookup(Tid,Module) of
+ [{Module,{ModLevel,cached}}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [{Module,ModLevel}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [] ->
+ logger_server:cache_module_level(Module),
+ allow(Tid,Level);
+ _ ->
+ false
+ end.
+
+allow(Tid,Level) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?LOGGER_KEY,2),
+ level_to_int(Level) =< GlobalLevelInt.
+
+exist(Tid,What) ->
+ ets:member(Tid,table_key(What)).
+
+get(Tid,What) ->
+ case ets:lookup(Tid,table_key(What)) of
+ [{_,_,Config}] ->
+ {ok,Config};
+ [{_,_,Config,Module}] ->
+ {ok,{Module,Config}};
+ [] ->
+ {error,{not_found,What}}
+ end.
+
+get(Tid,What,Level) ->
+ MS = [{{table_key(What),'$1','$2'}, % logger config
+ [{'>=','$1',level_to_int(Level)}],
+ ['$2']},
+ {{table_key(What),'$1','$2','$3'}, % handler config
+ [{'>=','$1',level_to_int(Level)}],
+ [{{'$3','$2'}}]}],
+ case ets:select(Tid,MS) of
+ [] -> error;
+ [Data] -> {ok,Data}
+ end.
+
+create(Tid,What,Module,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ ets:insert(Tid,{table_key(What),LevelInt,Config,Module}).
+create(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ ets:insert(Tid,{table_key(What),LevelInt,Config}).
+
+set(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ %% Should do this only if the level has actually changed. Possibly
+ %% overwrite instead of delete?
+ case What of
+ logger ->
+ _ = ets:select_delete(Tid,[{{'_',{'$1',cached}},
+ [{'=/=','$1',LevelInt}],
+ [true]}]),
+ ok;
+ _ ->
+ ok
+ end,
+ ets:update_element(Tid,table_key(What),[{2,LevelInt},{3,Config}]),
+ ok.
+
+set_module_level(Tid,Module,Level) ->
+ ets:insert(Tid,{Module,level_to_int(Level)}),
+ ok.
+
+reset_module_level(Tid,Module) ->
+ ets:delete(Tid,Module), % should possibley overwrite instead of delete?
+ ok.
+
+cache_module_level(Tid,Module) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?LOGGER_KEY,2),
+ ets:insert_new(Tid,{Module,{GlobalLevelInt,cached}}),
+ ok.
+
+get(Tid) ->
+ {ok,Logger} = get(Tid,logger),
+ HMS = [{{table_key('$1'),'_','$2','$3'},[],[{{'$1','$3','$2'}}]}],
+ Handlers = ets:select(Tid,HMS),
+ MMS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[{{'$1','$2'}}]}],
+ Modules = ets:select(Tid,MMS),
+ {Logger,Handlers,[{M,int_to_level(L)} || {M,L} <- Modules]}.
+
+level_to_int(emergency) -> ?EMERGENCY;
+level_to_int(alert) -> ?ALERT;
+level_to_int(critical) -> ?CRITICAL;
+level_to_int(error) -> ?ERROR;
+level_to_int(warning) -> ?WARNING;
+level_to_int(notice) -> ?NOTICE;
+level_to_int(info) -> ?INFO;
+level_to_int(debug) -> ?DEBUG.
+
+int_to_level(?EMERGENCY) -> emergency;
+int_to_level(?ALERT) -> alert;
+int_to_level(?CRITICAL) -> critical;
+int_to_level(?ERROR) -> error;
+int_to_level(?WARNING) -> warning;
+int_to_level(?NOTICE) -> notice;
+int_to_level(?INFO) -> info;
+int_to_level(?DEBUG) -> debug.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+table_key(logger) -> ?LOGGER_KEY;
+table_key(HandlerId) -> {?HANDLER_KEY,HandlerId}.
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
new file mode 100644
index 0000000000..3b71f936d8
--- /dev/null
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -0,0 +1,694 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_disk_log_h).
+
+-behaviour(gen_server).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+%%% API
+-export([start_link/3, info/1, disk_log_sync/1, reset/1]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2,
+ adding_handler/2, removing_handler/1,
+ changing_config/3, swap_buffer/2]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Start a disk_log handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added (as a result of calling add/3).
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+-spec disk_log_sync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+disk_log_sync(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, disk_log_sync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+disk_log_sync(Name) ->
+ {error,{badarg,{disk_log_sync,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec info(Name) -> Info | {error,Reason} when
+ Name :: atom(),
+ Info :: term(),
+ Reason :: handler_busy | {badarg,term()}.
+
+info(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec reset(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+reset(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(Name, Config) ->
+ case check_config(adding, Name, Config) of
+ {ok, Config1} ->
+ %% create initial handler state by merging defaults with config
+ HConfig = maps:get(?MODULE, Config1, #{}),
+ HState = maps:merge(get_init_state(), HConfig),
+ case logger_h_common:overload_levels_ok(HState) of
+ true ->
+ case start(Name, Config1, HState) of
+ ok ->
+ %% Make sure wait_for_buffer is not stored, so we
+ %% won't hang and wait for buffer on a restart
+ {ok, maps:remove(wait_for_buffer,Config1)};
+ Error ->
+ Error
+ end;
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = HState,
+ {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(Name,
+ OldConfig=#{id:=Id, disk_log_opts:=DLOpts},
+ NewConfig=#{id:=Id, disk_log_opts:=DLOpts}) ->
+ case check_config(changing, Name, NewConfig) of
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(Name, {change_config,OldConfig,NewConfig1},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> Result;
+ HError -> HError
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ Error ->
+ Error
+ end;
+changing_config(_Name, OldConfig, NewConfig) ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}.
+
+check_config(adding, Name, Config0) ->
+ %% Merge in defaults on top level
+ Config = maps:merge(#{id => Name}, Config0),
+ %% Merge in defaults on handler level
+ LogOpts0 = maps:get(disk_log_opts, Config, #{}),
+ LogOpts = merge_default_logopts(Name, LogOpts0),
+ case check_log_opts(maps:to_list(LogOpts)) of
+ ok ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok ->
+ {ok,Config#{disk_log_opts=>LogOpts,
+ ?MODULE=>MyConfig}};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+check_config(changing, _Name, Config) ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok -> {ok,Config};
+ Error -> Error
+ end.
+
+merge_default_logopts(Name, LogOpts) ->
+ Type = maps:get(type, LogOpts, wrap),
+ {DefaultNoFiles,DefaultNoBytes} =
+ case Type of
+ halt -> {undefined,infinity};
+ _wrap -> {10,1048576}
+ end,
+ {ok,Dir} = file:get_cwd(),
+ Default = #{file => filename:join(Dir,Name),
+ max_no_files => DefaultNoFiles,
+ max_no_bytes => DefaultNoBytes,
+ type => Type},
+ maps:merge(Default,LogOpts).
+
+check_log_opts([{file,File}|Opts]) when is_list(File) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_files,undefined}|Opts]) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_files,N}|Opts]) when is_integer(N), N>0 ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_bytes,infinity}|Opts]) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_bytes,N}|Opts]) when is_integer(N), N>0 ->
+ check_log_opts(Opts);
+check_log_opts([{type,Type}|Opts]) when Type==wrap; Type==halt ->
+ check_log_opts(Opts);
+check_log_opts([Invalid|_]) ->
+ {error,{invalid_config,disk_log_opt,Invalid}};
+check_log_opts([]) ->
+ ok.
+
+check_my_config([Other | Config]) ->
+ case logger_h_common:check_common_config(Other) of
+ valid ->
+ check_my_config(Config);
+ invalid ->
+ {error,{invalid_config,?MODULE,Other}}
+ end;
+check_my_config([]) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Name) ->
+ stop(Name).
+
+%%%-----------------------------------------------------------------
+%%% Get buffer when swapping from simple handler
+swap_buffer(Name,Buffer) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ Name ! {buffer,Buffer}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(Log, Config) -> ok | dropped when
+ Log :: logger:log(),
+ Config :: logger:config().
+
+log(Log,Config=#{id:=Name}) ->
+ %% if the handler has crashed, we must drop this request
+ %% and hope the handler restarts so we can try again
+ true = is_pid(whereis(Name)),
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ logger_h_common:call_cast_or_drop(Name, Bin).
+
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name, Config = #{disk_log_opts := LogOpts},
+ State = #{dl_sync_int := DLSyncInt}]) ->
+ register(Name, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case open_disk_log(Name, LogOpts) of
+ ok ->
+ catch ets:new(Name, [public, named_table]),
+ ?set_mode(Name, async),
+ proc_lib:init_ack({ok,self()}),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{id => Name,
+ mode => async,
+ dl_sync => DLSyncInt,
+ log_opts => LogOpts,
+ last_qlen => 0,
+ last_log_ts => T0,
+ burst_win_ts => T0,
+ burst_msg_count => 0,
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}),
+ gen_server:cast(self(), {repeated_disk_log_sync,T0}),
+ enter_loop(Config, State1);
+ Error ->
+ logger_h_common:error_notify({open_disk_log,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+enter_loop(#{wait_for_buffer:=true}=Config,State) ->
+ State1 =
+ receive
+ {buffer,Buffer} ->
+ lists:foldl(
+ fun(Log,S) ->
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ {_,S1} = do_log(Bin,cast,S),
+ S1
+ end,
+ State,
+ Buffer)
+ end,
+ gen_server:enter_loop(?MODULE,[],State1);
+enter_loop(_Config,State) ->
+ gen_server:enter_loop(?MODULE,[],State).
+
+%% This is the synchronous log request.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply, Result, State1};
+
+handle_call(disk_log_sync, _From, State = #{id := Name}) ->
+ State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State),
+ {reply, Result, State1};
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0,
+ last_log_ts := LastLogTS}) ->
+ HConfig = maps:get(?MODULE, NewConfig, #{}),
+ State1 = #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = maps:merge(State, HConfig),
+ case logger_h_common:overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ _ = gen_server:cast(self(), {repeated_disk_log_sync,
+ LastLogTS})
+ end,
+ {reply, ok, State1};
+ false ->
+ {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp(),
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+
+%% This is the asynchronous log request.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% request, the repeat operation must be active!
+handle_cast({repeated_disk_log_sync,LastLogTS0},
+ State = #{id := Name,
+ filesync_repeat_interval := FSyncInt,
+ last_log_ts := LastLogTS1}) ->
+ State1 =
+ if is_integer(FSyncInt) ->
+ %% only do filesync if something has been
+ %% written since last time we checked
+ NewState = if LastLogTS1 == LastLogTS0 ->
+ State;
+ true ->
+ disk_log_sync(Name, State)
+ end,
+ {ok,TRef} =
+ timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),
+ {repeated_disk_log_sync,LastLogTS1}]),
+ NewState#{rep_sync_tref => TRef};
+ true ->
+ State
+ end,
+ {noreply,State1}.
+
+%% The disk log owner must handle status messages from disk_log.
+handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) ->
+ {noreply, State};
+handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+handle_info({disk_log, _Node, Log, full},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}),
+ {noreply, State#{prev_disk_log_info => full}};
+handle_info({disk_log, _Node, Log, Info = {error_status,_Status}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+
+handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) ->
+ {noreply, State};
+
+handle_info(_, State) ->
+ {noreply, State}.
+
+terminate(Reason, State = #{id := Name}) ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
+ undefined)),
+ _ = close_disk_log(Name, normal),
+ logger_h_common:stop_or_restart(Name, Reason, State).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+%%%-----------------------------------------------------------------
+%%%
+get_init_state() ->
+ #{toggle_sync_qlen => ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen => ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen => ?FLUSH_REQS_QLEN,
+ enable_burst_limit => ?ENABLE_BURST_LIMIT,
+ burst_limit_size => ?BURST_LIMIT_SIZE,
+ burst_window_time => ?BURST_WINDOW_TIME,
+ enable_kill_overloaded => ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen => ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem => ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after => ?HANDLER_RESTART_AFTER,
+ dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+%%%-----------------------------------------------------------------
+%%% Add a disk_log handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Config is the logger:config() map containing a sub map with any of
+%%% the following associations:
+%%%
+%%% Config = #{disk_log_opts => #{file => file:filename(),
+%%% max_no_bytes => integer(),
+%%% max_no_files => integer(),
+%%% type => wrap | halt}}.
+%%%
+%%% This map will be merged with the logger configuration data for
+%%% the disk_log LogName. If type == halt, then max_no_files is
+%%% ignored.
+%%%
+%%% Handler specific config should be provided with a sub map associated
+%%% with a key named the same as this module, e.g:
+%%%
+%%% Config = #{logger_disk_log_h => #{toggle_sync_qlen => 50}
+%%%
+%%% The disk_log handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ LoggerDLH =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, LoggerDLH) of
+ {ok,_} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Stop and remove the handler.
+stop(Name) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ _ = gen_server:call(Name,stop),
+ _ = supervisor:delete_child(logger_sup, Name),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logging and overload control.
+-define(update_dl_sync(C, Interval),
+ if C == 0 -> Interval;
+ true -> C-1 end).
+
+%% check for overload between every request (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize requests have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name, mode := _Mode0}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% request to react quickly to large bursts of requests and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
+
+ %% kill the handler if it can't keep up with the load
+ logger_h_common:kill_if_choked(Name, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this function is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ _NewFlushed = logger_h_common:flush_log_requests(?FLUSH_MAX_N),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log request
+ ?observe(Name,{flushed,_NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,_NewFlushed,
+ State1#{mode => ?set_mode(Name,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this function is called to write to disk_log
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{dl_sync := DLSync,
+ dl_sync_int := DLSyncInt,
+ last_qlen := LastQLen,
+ last_log_ts := T0}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log requests
+ {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
+
+ %% only send a synhrounous request to the disk_log process
+ %% every DLSyncInt time, to give the handler time between
+ %% writes so it can keep up with incoming messages
+ {Status,LastQLen1,State1} =
+ if DoWrite, DLSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ NewState = disk_log_write(Name, Bin, State),
+ {ok, element(2,process_info(self(),message_queue_len)),
+ NewState};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ NewState = disk_log_write(Name, Bin, State),
+ {ok, LastQLen, NewState};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped, LastQLen, State}
+ end,
+
+ %% Check if the time since the previous log request is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log requests is fast and no new
+ %% request comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1,State2} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ {?change_mode(Name,Mode,async), 0, disk_log_sync(Name,State1)};
+ true ->
+ {Mode, BurstMsgCount,State1}
+ end,
+
+ State3 =
+ ?update_calls_or_casts(_CallOrCast,1,State2),
+ State4 =
+ ?update_max_time(Time,
+ State3#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}),
+ {Status,State4}.
+
+
+open_disk_log(Name, LogOpts) ->
+ #{file := File,
+ max_no_bytes := MaxNoBytes,
+ max_no_files := MaxNoFiles,
+ type := Type} = LogOpts,
+ case filelib:ensure_dir(File) of
+ ok ->
+ Size =
+ if Type == halt -> MaxNoBytes;
+ Type == wrap -> {MaxNoBytes,MaxNoFiles}
+ end,
+ Opts = [{name, Name},
+ {file, File},
+ {size, Size},
+ {type, Type},
+ {linkto, self()},
+ {repair, false},
+ {format, external},
+ {notify, true},
+ {quiet, true},
+ {mode, read_write}],
+ case disk_log:open(Opts) of
+ {ok,Name} ->
+ ok;
+ Error = {error,_Reason} ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+close_disk_log(Name, _) ->
+ _ = ?disk_log_sync(Name),
+ _ = disk_log:lclose(Name),
+ ok.
+
+disk_log_write(Name, Bin, State) ->
+ Result =
+ case ?disk_log_blog(Name, Bin) of
+ ok ->
+ ok;
+ LogError ->
+ _ = case maps:get(prev_log_result, State) of
+ LogError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,log,
+ LogOpts,
+ LogError})
+ end,
+ LogError
+ end,
+ State#{prev_log_result => Result}.
+
+disk_log_sync(Name, State) ->
+ Result =
+ case ?disk_log_sync(Name) of
+ ok ->
+ ok;
+ SyncError ->
+ _ = case maps:get(prev_sync_result, State) of
+ SyncError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,sync,
+ LogOpts,
+ SyncError})
+ end,
+ SyncError
+ end,
+ State#{prev_sync_result => Result}.
+
+error_notify_new(Info,Info, _Term) ->
+ ok;
+error_notify_new(_Info0,_Info1, Term) ->
+ logger_h_common:error_notify(Term).
diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl
new file mode 100644
index 0000000000..85928f0fd6
--- /dev/null
+++ b/lib/kernel/src/logger_filters.erl
@@ -0,0 +1,123 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters).
+
+-export([domain/2,
+ level/2,
+ progress/2,
+ remote_gl/2]).
+
+-include("logger_internal.hrl").
+-define(IS_ACTION(A), (A==log orelse A==stop)).
+
+-spec domain(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: {Action,Compare,MatchDomain},
+ Action :: log | stop,
+ Compare :: prefix_of | starts_with | equals | no_domain,
+ MatchDomain :: list(atom()).
+domain(#{meta:=Meta}=Log,{Action,Compare,MatchDomain})
+ when ?IS_ACTION(Action) andalso
+ (Compare==prefix_of orelse
+ Compare==starts_with orelse
+ Compare==equals orelse
+ Compare==no_domain) andalso
+ is_list(MatchDomain) ->
+ filter_domain(Compare,Meta,MatchDomain,on_match(Action,Log));
+domain(Log,Extra) ->
+ erlang:error(badarg,[Log,Extra]).
+
+-spec level(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: {Action,Operator,MatchLevel},
+ Action :: log | stop,
+ Operator :: neq | eq | lt | gt | lteq | gteq,
+ MatchLevel :: logger:level().
+level(#{level:=L1}=Log,{Action,Op,L2})
+ when ?IS_ACTION(Action) andalso
+ (Op==neq orelse
+ Op==eq orelse
+ Op==lt orelse
+ Op==gt orelse
+ Op==lteq orelse
+ Op==gteq) andalso
+ ?IS_LEVEL(L2) ->
+ filter_level(Op,L1,L2,on_match(Action,Log));
+level(Log,Extra) ->
+ erlang:error(badarg,[Log,Extra]).
+
+-spec progress(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: log | stop.
+progress(Log,Action) when ?IS_ACTION(Action) ->
+ filter_progress(Log,on_match(Action,Log));
+progress(Log,Action) ->
+ erlang:error(badarg,[Log,Action]).
+
+-spec remote_gl(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: log | stop.
+remote_gl(Log,Action) when ?IS_ACTION(Action) ->
+ filter_remote_gl(Log,on_match(Action,Log));
+remote_gl(Log,Action) ->
+ erlang:error(badarg,[Log,Action]).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+filter_domain(prefix_of,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(Domain,MatchDomain,OnMatch);
+filter_domain(starts_with,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(MatchDomain,Domain,OnMatch);
+filter_domain(equals,#{domain:=Domain},Domain,OnMatch) ->
+ OnMatch;
+filter_domain(Action,Meta,_,OnMatch) ->
+ case maps:is_key(domain,Meta) of
+ false when Action==no_domain -> OnMatch;
+ _ -> ignore
+ end.
+
+is_prefix(D1,D2,OnMatch) when is_list(D1), is_list(D2) ->
+ case lists:prefix(D1,D2) of
+ true -> OnMatch;
+ false -> ignore
+ end;
+is_prefix(_,_,_) ->
+ ignore.
+
+filter_level(Op,L1,L2,OnMatch) ->
+ case logger:compare_levels(L1,L2) of
+ eq when Op==eq; Op==lteq; Op==gteq -> OnMatch;
+ lt when Op==lt; Op==lteq; Op==neq -> OnMatch;
+ gt when Op==gt; Op==gteq; Op==neq -> OnMatch;
+ _ -> ignore
+ end.
+
+filter_progress(#{msg:={report,#{label:={_,progress}}}},OnMatch) ->
+ OnMatch;
+filter_progress(_,_) ->
+ ignore.
+
+filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() ->
+ OnMatch;
+filter_remote_gl(_,_) ->
+ ignore.
+
+on_match(log,Log) -> Log;
+on_match(stop,_) -> stop.
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
new file mode 100644
index 0000000000..386e7832e2
--- /dev/null
+++ b/lib/kernel/src/logger_formatter.erl
@@ -0,0 +1,295 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter).
+
+-export([format/2]).
+
+-include("logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type template() :: [atom()|tuple()|string()].
+
+%%%-----------------------------------------------------------------
+%%% API
+-spec format(Log,Config) -> String when
+ Log :: logger:log(),
+ Config :: #{single_line=>boolean(),
+ legacy_header=>boolean(),
+ report_cb=>fun((logger:report()) -> {io:format(),[term()]}),
+ chars_limit=>pos_integer()| unlimited,
+ max_size=>pos_integer() | unlimited,
+ depth=>pos_integer() | unlimited,
+ template=>template(),
+ utc=>boolean()},
+ String :: string().
+format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
+ when is_map(Config0) ->
+ Config = add_default_config(Config0),
+ Meta1 = maybe_add_legacy_header(Level,Meta,Config),
+ Template = maps:get(template,Config),
+ {BT,AT0} = lists:splitwith(fun(msg) -> false; (_) -> true end, Template),
+ {DoMsg,AT} =
+ case AT0 of
+ [msg|Rest] -> {true,Rest};
+ _ ->{false,AT0}
+ end,
+ B = do_format(Level,"",Meta1,BT,Config),
+ A = do_format(Level,"",Meta1,AT,Config),
+ MsgStr =
+ if DoMsg ->
+ Config1 =
+ case maps:get(chars_limit,Config) of
+ unlimited ->
+ Config;
+ Size0 ->
+ Size =
+ case Size0 - string:length([B,A]) of
+ S when S>=0 -> S;
+ _ -> 0
+ end,
+ Config#{chars_limit=>Size}
+ end,
+ MsgStr0 = format_msg(Msg0,Meta1,Config1),
+ case maps:get(single_line,Config) of
+ true ->
+ %% Trim leading and trailing whitespaces, and replace
+ %% newlines with ", "
+ re:replace(string:trim(MsgStr0),",?\r?\n\s*",", ",
+ [{return,list},global]);
+ _false ->
+ MsgStr0
+ end;
+ true ->
+ ""
+ end,
+ truncate(B ++ MsgStr ++ A,maps:get(max_size,Config)).
+
+do_format(Level,Msg,Data,[level|Format],Config) ->
+ [to_string(level,Level,Config)|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[msg|Format],Config) ->
+ [Msg|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[Key|Format],Config) when is_atom(Key); is_tuple(Key) ->
+ Value = value(Key,Data),
+ [to_string(Key,Value,Config)|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[Str|Format],Config) ->
+ [Str|do_format(Level,Msg,Data,Format,Config)];
+do_format(_Level,_Msg,_Data,[],_Config) ->
+ [].
+
+value(Key,Meta) when is_atom(Key), is_map(Meta) ->
+ maps:get(Key,Meta,"");
+value(Key,_) when is_atom(Key) ->
+ "";
+value(Keys,Meta) when is_tuple(Keys) ->
+ value(tuple_to_list(Keys),Meta);
+value([Key|Keys],Meta) ->
+ value(Keys,value(Key,Meta));
+value([],Value) ->
+ Value.
+
+to_string(time,Time,Config) ->
+ format_time(Time,Config);
+to_string(mfa,MFA,_Config) ->
+ format_mfa(MFA);
+to_string(_,Value,_Config) ->
+ to_string(Value).
+
+to_string(X) when is_atom(X) ->
+ atom_to_list(X);
+to_string(X) when is_integer(X) ->
+ integer_to_list(X);
+to_string(X) when is_pid(X) ->
+ pid_to_list(X);
+to_string(X) when is_reference(X) ->
+ ref_to_list(X);
+to_string(X) when is_list(X) ->
+ case io_lib:printable_unicode_list(lists:flatten(X)) of
+ true -> X;
+ _ -> io_lib:format("~tp",[X])
+ end;
+to_string(X) ->
+ io_lib:format("~tp",[X]).
+
+format_msg({string,Chardata},Meta,Config) ->
+ try unicode:characters_to_list(Chardata)
+ catch _:_ -> format_msg({"INVALID STRING: ~tp",[Chardata]},Meta,Config)
+ end;
+format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config) when is_function(Fun,1) ->
+ format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config));
+format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) ->
+ try Fun(Report) of
+ {Format,Args} when is_list(Format), is_list(Args) ->
+ format_msg({Format,Args},maps:remove(report_cb,Meta),Config);
+ Other ->
+ format_msg({"REPORT_CB ERROR: ~tp; Returned: ~tp",
+ [Report,Other]},Meta,Config)
+ catch C:R ->
+ format_msg({"REPORT_CB CRASH: ~tp; Reason: ~tp",
+ [Report,{C,R}]},Meta,Config)
+ end;
+format_msg({report,Report},Meta,Config) ->
+ format_msg({report,Report},
+ Meta#{report_cb=>fun logger:format_report/1},
+ Config);
+format_msg(Msg,_Meta,#{depth:=Depth,chars_limit:=CharsLimit}) ->
+ limit_size(Msg, Depth, CharsLimit).
+
+limit_size(Msg,Depth,unlimited) ->
+ limit_size(Msg,Depth,[]);
+limit_size(Msg,Depth,CharsLimit) when is_integer(CharsLimit) ->
+ limit_size(Msg,Depth,[{chars_limit,CharsLimit}]);
+limit_size({Format,Args},unlimited,Opts) when is_list(Opts) ->
+ try io_lib:format(Format,Args,Opts)
+ catch _:_ ->
+ io_lib:format("FORMAT ERROR: ~tp - ~tp",[Format,Args],Opts)
+ end;
+limit_size({Format0,Args},Depth,Opts) when is_integer(Depth) ->
+ try
+ Format1 = io_lib:scan_format(Format0, Args),
+ Format = limit_format(Format1, Depth),
+ io_lib:build_text(Format,Opts)
+ catch _:_ ->
+ limit_size({"FORMAT ERROR: ~tp - ~tp",[Format0,Args]},Depth,Opts)
+ end.
+
+limit_format([#{control_char:=C0}=M0|T], Depth) when C0 =:= $p;
+ C0 =:= $w ->
+ C = C0 - ($a - $A), %To uppercase.
+ #{args:=Args} = M0,
+ M = M0#{control_char:=C,args:=Args++[Depth]},
+ [M|limit_format(T, Depth)];
+limit_format([H|T], Depth) ->
+ [H|limit_format(T, Depth)];
+limit_format([], _) ->
+ [].
+
+truncate(String,unlimited) ->
+ String;
+truncate(String,Size) ->
+ Length = string:length(String),
+ if Length>Size ->
+ case lists:reverse(lists:flatten(String)) of
+ [$\n|_] ->
+ string:slice(String,0,Size-4)++"...\n";
+ _ ->
+ string:slice(String,0,Size-3)++"..."
+ end;
+ true ->
+ String
+ end.
+
+format_time(Timestamp,Config) when is_integer(Timestamp) ->
+ {Date,Time,Micro} = timestamp_to_datetimemicro(Timestamp,Config),
+ format_time(Date,Time,Micro);
+format_time(Other,_Config) ->
+ %% E.g. a string
+ to_string(Other).
+
+format_time({Y,M,D},{H,Min,S},Micro) ->
+ io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w",
+ [Y,M,D,H,Min,S,Micro]).
+
+%% Assuming this is monotonic time in microseconds
+timestamp_to_datetimemicro(Timestamp,Config) when is_integer(Timestamp) ->
+ SysTime = Timestamp + erlang:time_offset(microsecond),
+ Micro = SysTime rem 1000000,
+ Sec = SysTime div 1000000,
+ UniversalTime = erlang:posixtime_to_universaltime(Sec),
+ {Date,Time} =
+ case Config of
+ #{utc:=true} -> UniversalTime;
+ _ -> erlang:universaltime_to_localtime(UniversalTime)
+ end,
+ {Date,Time,Micro}.
+
+format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_integer(A) ->
+ atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A);
+format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_list(A) ->
+ format_mfa({M,F,length(A)});
+format_mfa(MFA) ->
+ to_string(MFA).
+
+maybe_add_legacy_header(Level,
+ #{time:=Timestamp}=Meta,
+ #{legacy_header:=true}=Config) ->
+ #{title:=Title}=MyMeta = add_legacy_title(Level,maps:get(?MODULE,Meta,#{})),
+ {{Y,Mo,D},{H,Mi,S},Micro} = timestamp_to_datetimemicro(Timestamp,Config),
+ Header = io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===",
+ [Title,D,month(Mo),Y,H,Mi,S,Micro,utcstr(Config)]),
+ Meta#{?MODULE=>MyMeta#{header=>Header}};
+maybe_add_legacy_header(_,Meta,_) ->
+ Meta.
+
+add_legacy_title(_Level,#{title:=_}=MyMeta) ->
+ MyMeta;
+add_legacy_title(Level,MyMeta) ->
+ Title = string:uppercase(atom_to_list(Level)) ++ " REPORT",
+ MyMeta#{title=>Title}.
+
+month(1) -> "Jan";
+month(2) -> "Feb";
+month(3) -> "Mar";
+month(4) -> "Apr";
+month(5) -> "May";
+month(6) -> "Jun";
+month(7) -> "Jul";
+month(8) -> "Aug";
+month(9) -> "Sep";
+month(10) -> "Oct";
+month(11) -> "Nov";
+month(12) -> "Dec".
+
+utcstr(#{utc:=true}) -> "UTC ";
+utcstr(_) -> "".
+
+add_default_config(#{utc:=_}=Config0) ->
+ Default =
+ #{legacy_header=>false,
+ single_line=>false,
+ chars_limit=>unlimited},
+ MaxSize = get_max_size(maps:get(max_size,Config0,false)),
+ Depth = get_depth(maps:get(depth,Config0,false)),
+ add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize,
+ depth=>Depth}));
+add_default_config(Config) ->
+ add_default_config(Config#{utc=>logger:get_utc_config()}).
+
+add_default_template(#{template:=_}=Config) ->
+ Config;
+add_default_template(Config) ->
+ Config#{template=>default_template(Config)}.
+
+default_template(#{legacy_header:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_HEADER;
+default_template(#{single_line:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_SINGLE;
+default_template(_) ->
+ ?DEFAULT_FORMAT_TEMPLATE.
+
+get_max_size(false) ->
+ logger:get_max_size();
+get_max_size(S) ->
+ max(10,S).
+
+get_depth(false) ->
+ logger:get_format_depth();
+get_depth(S) ->
+ max(5,S).
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
new file mode 100644
index 0000000000..7caad366ae
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.erl
@@ -0,0 +1,301 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_h_common).
+
+-include("logger_h_common.hrl").
+-include("logger_internal.hrl").
+
+-export([log_to_binary/2,
+ check_common_config/1,
+ call_cast_or_drop/2,
+ check_load/1,
+ limit_burst/1,
+ kill_if_choked/4,
+ flush_log_requests/0,
+ flush_log_requests/1,
+ handler_exit/2,
+ cancel_timer/1,
+ stop_or_restart/3,
+ overload_levels_ok/1,
+ error_notify/1,
+ info_notify/1]).
+
+%%%-----------------------------------------------------------------
+%%% Covert log data on any form to binary
+-spec log_to_binary(Log,Config) -> LogString when
+ Log :: logger:log(),
+ Config :: logger:config(),
+ LogString :: binary().
+log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) ->
+ do_log_to_binary(Log,Config);
+log_to_binary(#{msg:={report,_},meta:=Meta}=Log,Config) ->
+ DefaultReportCb = fun logger:format_otp_report/1,
+ do_log_to_binary(Log#{meta=>Meta#{report_cb=>DefaultReportCb}},Config);
+log_to_binary(Log,Config) ->
+ do_log_to_binary(Log,Config).
+
+do_log_to_binary(Log,Config) ->
+ {Formatter,FormatterConfig} = maps:get(formatter,Config),
+ String = try_format(Log,Formatter,FormatterConfig),
+ try unicode:characters_to_binary(String)
+ catch _:_ ->
+ ?LOG_INTERNAL(debug,[{formatter_error,Formatter},
+ {config,FormatterConfig},
+ {log,Log},
+ {bad_return_value,String}]),
+ <<"FORMATTER ERROR: bad_return_value">>
+ end.
+
+try_format(Log,Formatter,FormatterConfig) ->
+ try Formatter:format(Log,FormatterConfig)
+ catch
+ C:R:S ->
+ ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter},
+ {config,FormatterConfig},
+ {log,Log},
+ {reason,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}}]),
+ case {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG} of
+ {Formatter,FormatterConfig} ->
+ "DEFAULT FORMATTER CRASHED";
+ {DefaultFormatter,DefaultConfig} ->
+ try_format(Log#{msg=>{"FORMATTER CRASH: ~tp",
+ [maps:get(msg,Log)]}},
+ DefaultFormatter,DefaultConfig)
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Check that the configuration term is valid
+check_common_config({toggle_sync_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({drop_new_reqs_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({flush_reqs_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({enable_burst_limit,Bool}) when Bool == true;
+ Bool == false ->
+ valid;
+check_common_config({burst_limit_size,N}) when is_integer(N) ->
+ valid;
+check_common_config({burst_window_time,N}) when is_integer(N) ->
+ valid;
+check_common_config({enable_kill_overloaded,Bool}) when Bool == true;
+ Bool == false ->
+ valid;
+check_common_config({handler_overloaded_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({handler_overloaded_mem,N}) when is_integer(N) ->
+ valid;
+check_common_config({handler_restart_after,NorA}) when is_integer(NorA);
+ NorA == never ->
+ valid;
+check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA);
+ NorA == no_repeat ->
+ valid;
+check_common_config(_) ->
+ invalid.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload Protection
+call_cast_or_drop(Name, Bin) ->
+ %% If the handler process is getting overloaded, the log request
+ %% will be synchronous instead of asynchronous (slows down the
+ %% logging tempo of a process doing lots of logging. If the
+ %% handler is choked, drop mode is set and no request will be sent.
+ try ?get_mode(Name) of
+ async ->
+ gen_server:cast(Name, {log,Bin});
+ sync ->
+ try gen_server:call(Name, {log,Bin}, ?DEFAULT_CALL_TIMEOUT) of
+ %% if return value from call == dropped, the
+ %% message has been flushed by handler and should
+ %% therefore not be counted as dropped in stats
+ ok -> ok;
+ dropped -> ok
+ catch
+ _:{timeout,_} ->
+ ?observe(Name,{dropped,1})
+ end;
+ drop -> ?observe(Name,{dropped,1})
+ catch
+ %% if the ETS table doesn't exist (maybe because of a
+ %% handler restart), we can only drop the request
+ _:_ -> ?observe(Name,{dropped,1})
+ end,
+ ok.
+
+handler_exit(_Name, Reason) ->
+ exit(Reason).
+
+check_load(State = #{id:=Name, mode := Mode,
+ toggle_sync_qlen := ToggleSyncQLen,
+ drop_new_reqs_qlen := DropNewQLen,
+ flush_reqs_qlen := FlushQLen}) ->
+ {_,Mem} = process_info(self(), memory),
+ ?observe(Name,{max_mem,Mem}),
+ %% make sure the handler process doesn't get scheduled
+ %% out between the message_queue_len check below and the
+ %% action that follows (flush or write).
+ {_,QLen} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,QLen}),
+
+ {Mode1,_NewDrops,_NewFlushes} =
+ if
+ QLen >= FlushQLen ->
+ {flush, 0,1};
+ QLen >= DropNewQLen ->
+ %% Note that drop mode will force log requests to
+ %% be dropped on the client side (never sent get to
+ %% the handler).
+ IncDrops = if Mode == drop -> 0; true -> 1 end,
+ {?change_mode(Name, Mode, drop), IncDrops,0};
+ QLen >= ToggleSyncQLen ->
+ {?change_mode(Name, Mode, sync), 0,0};
+ true ->
+ {?change_mode(Name, Mode, async), 0,0}
+ end,
+ State1 = ?update_other(drops,DROPS,_NewDrops,State),
+ {Mode1, QLen, Mem,
+ ?update_other(flushes,FLUSHES,_NewFlushes,
+ State1#{last_qlen => QLen})}.
+
+limit_burst(#{enable_burst_limit := false}) ->
+ {true,0,0};
+limit_burst(#{burst_win_ts := BurstWinT0,
+ burst_msg_count := BurstMsgCount,
+ burst_window_time := BurstWinTime,
+ burst_limit_size := BurstLimitSz}) ->
+ if (BurstMsgCount >= BurstLimitSz) ->
+ %% the limit for allowed messages has been reached
+ BurstWinT1 = ?timestamp(),
+ case ?diff_time(BurstWinT1,BurstWinT0) of
+ BurstCheckTime when BurstCheckTime < (BurstWinTime*1000) ->
+ %% we're still within the burst time frame
+ {false,BurstWinT0,BurstMsgCount};
+ _BurstCheckTime ->
+ %% burst time frame passed, reset counters
+ {true,BurstWinT1,0}
+ end;
+ true ->
+ %% the limit for allowed messages not yet reached
+ {true,BurstWinT0,BurstMsgCount+1}
+ end.
+
+kill_if_choked(Name, QLen, Mem,
+ #{enable_kill_overloaded := KillIfOL,
+ handler_overloaded_qlen := HOLQLen,
+ handler_overloaded_mem := HOLMem}) ->
+ if KillIfOL andalso
+ ((QLen > HOLQLen) orelse (Mem > HOLMem)) ->
+ handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}});
+ true ->
+ ok
+ end.
+
+flush_log_requests() ->
+ flush_log_requests(-1).
+
+flush_log_requests(Limit) ->
+ process_flag(priority, high),
+ Flushed = flush_log_requests(0, Limit),
+ process_flag(priority, normal),
+ Flushed.
+
+flush_log_requests(Limit, Limit) ->
+ Limit;
+flush_log_requests(N, Limit) ->
+ %% flush log requests but leave other requests, such as
+ %% file/disk_log_sync, info and change_config, so that these
+ %% have a chance to be processed even under heavy load
+ receive
+ {'$gen_cast',{log,_}} ->
+ flush_log_requests(N+1, Limit);
+ {'$gen_call',{Pid,MRef},{log,_}} ->
+ Pid ! {MRef, dropped},
+ flush_log_requests(N+1, Limit)
+ after
+ 0 -> N
+ end.
+
+cancel_timer(TRef) when is_atom(TRef) -> ok;
+cancel_timer(TRef) -> timer:cancel(TRef).
+
+
+stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
+ #{handler_restart_after := RestartAfter}) ->
+ %% If we're terminating because of an overload situation (see
+ %% logger_h_common:kill_if_choked/4), we need to remove the handler
+ %% and set a restart timer. A separate process must perform this
+ %% in order to avoid deadlock.
+ HandlerPid = self(),
+ RemoveAndRestart =
+ fun() ->
+ MRef = erlang:monitor(process, HandlerPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after 30000 ->
+ error_notify(Reason),
+ exit(HandlerPid, kill)
+ end,
+ case logger:get_handler_config(Name) of
+ {ok,{HMod,HConfig}} when is_integer(RestartAfter) ->
+ _ = logger:remove_handler(Name),
+ _ = timer:apply_after(RestartAfter, logger, add_handler,
+ [Name,HMod,HConfig]);
+ {ok,_} ->
+ _ = logger:remove_handler(Name);
+ {error,CfgReason} when is_integer(RestartAfter) ->
+ error_notify({Name,restart_impossible,CfgReason});
+ {error,_} ->
+ ok
+ end
+ end,
+ spawn(RemoveAndRestart),
+ ok;
+
+stop_or_restart(Name, shutdown, _State) ->
+ %% Probably terminated by supervisor. Remove the handler to avoid
+ %% error printouts due to failing handler.
+ _ = case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% Spawning to avoid deadlock
+ spawn(logger,remove_handler,[Name]);
+ _ ->
+ ok
+ end,
+ ok;
+
+stop_or_restart(_Name, _Reason, _State) ->
+ ok.
+
+overload_levels_ok(HandlerConfig) ->
+ TSQL = maps:get(toggle_sync_qlen, HandlerConfig, ?TOGGLE_SYNC_QLEN),
+ DNRQL = maps:get(drop_new_reqs_qlen, HandlerConfig, ?DROP_NEW_REQS_QLEN),
+ FRQL = maps:get(flush_reqs_qlen, HandlerConfig, ?FLUSH_REQS_QLEN),
+ (TSQL < DNRQL) andalso (DNRQL < FRQL).
+
+error_notify(Term) ->
+ ?internal_log(error, Term).
+
+info_notify(Term) ->
+ ?internal_log(info, Term).
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
new file mode 100644
index 0000000000..89378dbb10
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -0,0 +1,262 @@
+
+%%%-----------------------------------------------------------------
+%%% Overload protection configuration
+
+%%! *** NOTE ***
+%%! It's important that:
+%%! TOGGLE_SYNC_QLEN < DROP_NEW_REQS_QLEN < FLUSH_REQS_QLEN
+%%! and that DROP_NEW_REQS_QLEN >= 2.
+%%! Otherwise the handler could end up in drop mode with no new
+%%! log requests to process. This would cause all future requests
+%%! to be dropped (no switch to async mode would ever take place).
+
+%% This specifies the message_queue_len value where the log
+%% requests switch from asynchronous casts to synchronous calls.
+-define(TOGGLE_SYNC_QLEN, 10).
+%% Above this message_queue_len, log requests will be dropped,
+%% i.e. no log requests get sent to the handler process.
+-define(DROP_NEW_REQS_QLEN, 200).
+%% Above this message_queue_len, the handler process will flush
+%% its mailbox and only leave this number of messages in it.
+-define(FLUSH_REQS_QLEN, 1000).
+
+%% Never flush more than this number of messages in one go,
+%% or the handler will be unresponsive for seconds (keep this
+%% number as large as possible or the mailbox could grow large).
+-define(FLUSH_MAX_N, 5000).
+
+%% BURST_LIMIT is the max number of log requests allowed to be
+%% written within a BURST_WINDOW_TIME time frame.
+-define(ENABLE_BURST_LIMIT, true).
+-define(BURST_LIMIT_SIZE, 500).
+-define(BURST_WINDOW_TIME, 1000).
+
+%% This enables/disables the feature to automatically get the
+%% handler terminated if it gets too loaded (and can't keep up).
+-define(ENABLE_KILL_OVERLOADED, false).
+%% If the message_queue_len goes above this size even after
+%% flushing has been performed, the handler is terminated.
+-define(HANDLER_OVERLOADED_QLEN, 20000).
+%% If the memory usage exceeds this level
+-define(HANDLER_OVERLOADED_MEM, 3000000).
+
+%% This is the default time that the handler will wait before
+%% restarting and accepting new requests. The value 'never'
+%% disables restarts.
+-define(HANDLER_RESTART_AFTER, 5000).
+%%-define(HANDLER_RESTART_AFTER, never).
+
+%% The handler sends asynchronous write requests to the process
+%% controlling the i/o device, but every once in this interval
+%% will the write request be synchronous, so that the i/o device
+%% process doesn't get overloaded. This gives the handler time
+%% to keep up with its mailbox in overload situations, even if
+%% the i/o is slow.
+-define(CONTROLLER_SYNC_INTERVAL, 20).
+%% The handler will not perform a file sync operation if the
+%% mailbox size is greater than this number. This is to ensure
+%% the handler process doesn't get overloaded while waiting for
+%% an expensive file sync operation to finish.
+-define(FILESYNC_OK_QLEN, 2).
+%% Do a file/disk_log sync operation every integer() millisec
+%% (if necessary) or set to 'no_repeat' to only do file sync when
+%% the handler is idle. Note that file sync is not guaranteed to
+%% happen automatically if this operation is disabled.
+-define(FILESYNC_REPEAT_INTERVAL, 5000).
+%%-define(FILESYNC_REPEAT_INTERVAL, no_repeat).
+
+%% This is the time after last message received that we think/hope
+%% that the handler has an empty mailbox (no new log request has
+%% come in).
+-define(IDLE_DETECT_TIME_MSEC, 100).
+-define(IDLE_DETECT_TIME_USEC, 100000).
+
+%% Default disk log option values
+-define(DISK_LOG_TYPE, wrap).
+-define(DISK_LOG_MAX_NO_FILES, 10).
+-define(DISK_LOG_MAX_NO_BYTES, 1048576).
+
+%%%-----------------------------------------------------------------
+%%% Overload protection macros
+
+-define(timestamp(), erlang:monotonic_time(microsecond)).
+
+-define(get_mode(HandlerName),
+ case ets:lookup(HandlerName, mode) of
+ [{mode,sync}] ->
+ case whereis(HandlerName)==self() of
+ true -> async;
+ _ -> sync
+ end;
+ [{mode,M}] -> M;
+ _ -> async
+ end).
+
+-define(set_mode(HandlerName, M),
+ begin ets:insert(HandlerName, {mode,M}), M end).
+
+-define(change_mode(HandlerName, M0, M1),
+ if M0 == M1 ->
+ M0;
+ true ->
+ ets:insert(HandlerName, {mode,M1}),
+ M1
+ end).
+
+-define(min(X1, X2),
+ if X2 == undefined -> X1;
+ X2 < X1 -> X2;
+ true -> X1
+ end).
+
+-define(max(X1, X2),
+ if
+ X2 == undefined -> X1;
+ X2 > X1 -> X2;
+ true -> X1
+ end).
+
+-define(diff_time(OS_T1, OS_T0), OS_T1-OS_T0).
+
+%%%-----------------------------------------------------------------
+%%% The test hook macros make it possible to observe and manipulate
+%%% internal handler functionality. When enabled, these macros will
+%%% slow down execution and therefore should not be include in code
+%%% to be officially released.
+
+%% -define(TEST_HOOKS, true).
+-ifdef(TEST_HOOKS).
+ -define(TEST_HOOKS_TAB, logger_h_test_hooks).
+
+ -define(init_test_hooks(),
+ _ = case ets:whereis(?TEST_HOOKS_TAB) of
+ undefined -> ets:new(?TEST_HOOKS_TAB, [named_table,public]);
+ _ -> ok
+ end,
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(set_internal_log(MOD_FUNC),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,MOD_FUNC})).
+
+ -define(set_result(OPERATION, RESULT),
+ ets:insert(?TEST_HOOKS_TAB, {OPERATION,RESULT})).
+
+ -define(set_defaults(),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(internal_log(TYPE, TERM),
+ try ets:lookup(?TEST_HOOKS_TAB, internal_log) of
+ [{_,{LMOD,LFUNC}}] -> apply(LMOD, LFUNC, [TYPE,TERM]);
+ _ -> logger:internal_log(TYPE, TERM)
+ catch _:_ -> logger:internal_log(TYPE, TERM) end).
+
+ -define(file_write(DEVICE, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, file_write) of
+ [{_,ok}] -> file:write(DEVICE, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:write(DEVICE, DATA) end).
+
+ -define(file_datasync(DEVICE),
+ try ets:lookup(?TEST_HOOKS_TAB, file_datasync) of
+ [{_,ok}] -> file:datasync(DEVICE);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:datasync(DEVICE) end).
+
+ -define(disk_log_blog(LOG, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_blog) of
+ [{_,ok}] -> disk_log:blog(LOG, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:blog(LOG, DATA) end).
+
+ -define(disk_log_sync(LOG),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of
+ [{_,ok}] -> disk_log:sync(LOG);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:sync(LOG) end).
+
+ -define(DEFAULT_CALL_TIMEOUT, 5000).
+
+-else. % DEFAULTS!
+ -define(TEST_HOOKS_TAB, undefined).
+ -define(init_test_hooks(), ok).
+ -define(set_internal_log(_MOD_FUNC), ok).
+ -define(set_result(_OPERATION, _RESULT), ok).
+ -define(set_defaults(), ok).
+ -define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)).
+ -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)).
+ -define(file_datasync(DEVICE), file:datasync(DEVICE)).
+ -define(disk_log_blog(LOG, DATA), disk_log:blog(LOG, DATA)).
+ -define(disk_log_sync(LOG), disk_log:sync(LOG)).
+ -define(DEFAULT_CALL_TIMEOUT, 10000).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable statistics counters in the state of the
+%%% handler which is useful for analysing the overload protection
+%%% behaviour. These counters should not be included in code to be
+%%% officially released (as some counters will grow very large
+%%% over time).
+
+%%-define(SAVE_STATS, true).
+-ifdef(SAVE_STATS).
+ -define(merge_with_stats(STATE),
+ STATE#{flushes => 0, flushed => 0, drops => 0,
+ casts => 0, calls => 0,
+ max_qlen => 0, max_time => 0}).
+
+ -define(update_max_qlen(QLEN, STATE),
+ begin #{max_qlen := QLEN0} = STATE,
+ STATE#{max_qlen => ?max(QLEN0,QLEN)} end).
+
+ -define(update_calls_or_casts(CALL_OR_CAST, INC, STATE),
+ case CALL_OR_CAST of
+ cast ->
+ #{casts := CASTS0} = STATE,
+ STATE#{casts => CASTS0+INC};
+ call ->
+ #{calls := CALLS0} = STATE,
+ STATE#{calls => CALLS0+INC}
+ end).
+
+ -define(update_max_time(TIME, STATE),
+ begin #{max_time := TIME0} = STATE,
+ STATE#{max_time => ?max(TIME0,TIME)} end).
+
+ -define(update_other(OTHER, VAR, INCVAL, STATE),
+ begin #{OTHER := VAR} = STATE,
+ STATE#{OTHER => VAR+INCVAL} end).
+
+-else. % DEFAULT!
+ -define(merge_with_stats(STATE), STATE).
+ -define(update_max_qlen(_QLEN, STATE), STATE).
+ -define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE).
+ -define(update_max_time(_TIME, STATE), STATE).
+ -define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable callbacks that make it possible to analyse
+%%% the overload protection behaviour from outside the handler
+%%% process (including dropped requests on the client side).
+%%% An external callback module (?OBSERVER_MOD) is required which
+%%% is not part of the kernel application. For this reason, these
+%%% callbacks should not be included in code to be officially released.
+
+%%-define(OBSERVER_MOD, logger_test).
+-ifdef(OBSERVER_MOD).
+ -define(start_observation(NAME), ?OBSERVER:start_observation(NAME)).
+ -define(observe(NAME,EVENT), ?OBSERVER:observe(NAME,EVENT)).
+
+-else. % DEFAULT!
+ -define(start_observation(_NAME), ok).
+ -define(observe(_NAME,_EVENT), ok).
+-endif.
+%%! <---
diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl
new file mode 100644
index 0000000000..82df499c2b
--- /dev/null
+++ b/lib/kernel/src/logger_internal.hrl
@@ -0,0 +1,98 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-include_lib("kernel/include/logger.hrl").
+-define(LOGGER_TABLE,logger).
+-define(LOGGER_KEY,'$logger_config$').
+-define(HANDLER_KEY,'$handler_config$').
+-define(LOGGER_META_KEY,'$logger_metadata$').
+-define(STANDARD_HANDLER, logger_std_h).
+-define(DEFAULT_HANDLER_FILTERS,
+ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp])).
+-define(DEFAULT_HANDLER_FILTERS(Domain),
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,prefix_of,Domain}}},
+ {no_domain,{fun logger_filters:domain/2,{log,no_domain,[]}}}]).
+-define(DEFAULT_FORMATTER,logger_formatter).
+-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true,
+ template=>?DEFAULT_FORMAT_TEMPLATE_HEADER}).
+-define(DEFAULT_FORMAT_TEMPLATE_HEADER,
+ [{logger_formatter,header},"\n",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE_SINGLE,
+ [time," ",level,": ",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE,
+ [time," ",level,":\n",msg,"\n"]).
+
+-define(DEFAULT_LOGGER_CALL_TIMEOUT, infinity).
+
+-define(LOG_INTERNAL(Level,Report),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ %% Spawn this to avoid deadlocks
+ _ = spawn(logger,macro_log,[?LOCATION,Level,Report,
+ logger:add_default_metadata(#{})]),
+ ok;
+ false ->
+ ok
+ end).
+
+%%%-----------------------------------------------------------------
+%%% Levels
+%%% Using same as syslog
+-define(LEVELS,[emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug]).
+-define(EMERGENCY,0).
+-define(ALERT,1).
+-define(CRITICAL,2).
+-define(ERROR,3).
+-define(WARNING,4).
+-define(NOTICE,5).
+-define(INFO,6).
+-define(DEBUG,7).
+
+-define(IS_LEVEL(L),
+ (L=:=emergency orelse
+ L=:=alert orelse
+ L=:=critical orelse
+ L=:=error orelse
+ L=:=warning orelse
+ L=:=notice orelse
+ L=:=info orelse
+ L=:=debug)).
+
+-define(IS_MSG(Msg),
+ ((is_tuple(Msg) andalso tuple_size(Msg)==2)
+ andalso
+ (is_list(element(1,Msg)) andalso is_list(element(2,Msg)))
+ orelse
+ (element(1,Msg)==report andalso ?IS_REPORT(element(2,Msg)))
+ orelse
+ (element(1,Msg)==string andalso ?IS_STRING(element(2,Msg))))).
+
+-define(IS_REPORT(Report),
+ (is_map(Report) orelse (is_list(Report) andalso is_tuple(hd(Report))))).
+
+-define(IS_STRING(String),
+ (is_list(String) orelse is_binary(String))).
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
new file mode 100644
index 0000000000..6ef3b8582a
--- /dev/null
+++ b/lib/kernel/src/logger_server.erl
@@ -0,0 +1,440 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_server).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0,
+ add_handler/3, remove_handler/1,
+ add_filter/2, remove_filter/2,
+ set_module_level/2, reset_module_level/1,
+ cache_module_level/1,
+ set_config/2, set_config/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2]).
+
+-include("logger_internal.hrl").
+
+-define(SERVER, logger).
+
+-record(state, {tid}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+add_handler(Id,Module,Config0) ->
+ case sanity_check(logger,handlers,[Id]) of
+ ok ->
+ try check_mod(Module) of
+ ok ->
+ case sanity_check(Id,Config0) of
+ ok ->
+ Default = default_config(Id),
+ Config = maps:merge(Default,Config0),
+ call({add_handler,Id,Module,Config});
+ Error ->
+ Error
+ end
+ catch throw:Error ->
+ {error,Error}
+ end;
+ Error ->
+ Error
+ end.
+
+remove_handler(HandlerId) ->
+ call({remove_handler,HandlerId}).
+
+add_filter(Owner,Filter) ->
+ case sanity_check(Owner,filters,[Filter]) of
+ ok -> call({add_filter,Owner,Filter});
+ Error -> Error
+ end.
+
+remove_filter(Owner,FilterId) ->
+ call({remove_filter,Owner,FilterId}).
+
+set_module_level(Module,Level) when is_atom(Module) ->
+ case sanity_check(logger,level,Level) of
+ ok -> call({set_module_level,Module,Level});
+ Error -> Error
+ end;
+set_module_level(Module,_) ->
+ {error,{not_a_module,Module}}.
+
+reset_module_level(Module) when is_atom(Module) ->
+ call({reset_module_level,Module});
+reset_module_level(Module) ->
+ {error,{not_a_module,Module}}.
+
+cache_module_level(Module) ->
+ gen_server:cast(?SERVER,{cache_module_level,Module}).
+
+
+set_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok -> call({update_config,Owner,#{Key=>Value}});
+ Error -> Error
+ end.
+
+set_config(Owner,Config0) ->
+ case sanity_check(Owner,Config0) of
+ ok ->
+ Config = maps:merge(default_config(Owner),Config0),
+ call({set_config,Owner,Config});
+ Error ->
+ Error
+ end.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([]) ->
+ process_flag(trap_exit, true),
+ Tid = logger_config:new(?LOGGER_TABLE),
+ LoggerConfig = maps:merge(default_config(logger),
+ #{handlers=>[logger_simple]}),
+ logger_config:create(Tid,logger,LoggerConfig),
+ SimpleConfig0 = maps:merge(default_config(logger_simple),
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS,
+ logger_simple=>#{buffer=>true}}),
+ %% If this fails, then the node should crash
+ {ok,SimpleConfig} =
+ logger_simple:adding_handler(logger_simple,SimpleConfig0),
+ logger_config:create(Tid,logger_simple,logger_simple,SimpleConfig),
+ {ok, #state{tid=Tid}}.
+
+handle_call({add_handler,Id,Module,HConfig}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:exist(Tid,Id) of
+ true ->
+ {error,{already_exist,Id}};
+ false ->
+ %% inform the handler
+ case call_h(Module,adding_handler,[Id,HConfig],{ok,HConfig}) of
+ {ok,HConfig1} ->
+ logger_config:create(Tid,Id,Module,HConfig1),
+ {ok,Config} = do_get_config(Tid,logger),
+ Handlers = maps:get(handlers,Config,[]),
+ do_set_config(Tid,logger,
+ Config#{handlers=>[Id|Handlers]}),
+ ok;
+ {error,HReason} ->
+ {error,{handler_not_added,HReason}}
+ end
+ end,
+ {reply,Reply,State};
+handle_call({remove_handler,HandlerId}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,HandlerId) of
+ {ok,{Module,_}} ->
+ {ok,Config} = do_get_config(Tid,logger),
+ Handlers0 = maps:get(handlers,Config,[]),
+ Handlers = lists:delete(HandlerId,Handlers0),
+ %% inform the handler
+ _ = call_h(Module,removing_handler,[HandlerId],ok),
+ do_set_config(Tid,logger,Config#{handlers=>Handlers}),
+ logger_config:delete(Tid,HandlerId),
+ ok;
+ _ ->
+ {error,{not_found,HandlerId}}
+ end,
+ {reply,Reply,State};
+handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
+ Reply = do_add_filter(Tid,Id,Filter),
+ {reply,Reply,State};
+handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
+ Reply = do_remove_filter(Tid,Id,FilterId),
+ {reply,Reply,State};
+handle_call({update_config,Id,NewConfig}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,Id) of
+ {ok,{Module,OldConfig}} ->
+ Config = maps:merge(OldConfig,NewConfig),
+ case call_h(Module,changing_config,[Id,OldConfig,Config],
+ {ok,Config}) of
+ {ok,Config1} ->
+ do_set_config(Tid,Id,Config1);
+ Error ->
+ Error
+ end;
+ {ok,OldConfig} ->
+ Config = maps:merge(OldConfig,NewConfig),
+ do_set_config(Tid,Id,Config);
+ Error ->
+ Error
+ end,
+ {reply,Reply,State};
+handle_call({set_config,logger,Config}, _From, #state{tid=Tid}=State) ->
+ Reply = do_set_config(Tid,logger,Config),
+ {reply,Reply,State};
+handle_call({set_config,HandlerId,Config}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,HandlerId) of
+ {ok,{Module,OldConfig}} ->
+ case call_h(Module,changing_config,[HandlerId,OldConfig,Config],
+ {ok,Config}) of
+ {ok,Config1} ->
+ do_set_config(Tid,HandlerId,Config1);
+ Error ->
+ Error
+ end;
+ _ ->
+ {error,{not_found,HandlerId}}
+ end,
+ {reply,Reply,State};
+handle_call({set_module_level,Module,Level}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:set_module_level(Tid,Module,Level),
+ {reply,Reply,State};
+handle_call({reset_module_level,Module}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:reset_module_level(Tid,Module),
+ {reply,Reply,State}.
+
+handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) ->
+ logger_config:cache_module_level(Tid,Module),
+ {noreply, State}.
+
+%% Interface for those who can't call the API - e.g. the emulator, or
+%% places related to code loading.
+%%
+%% This can also be log events from remote nodes which are sent from
+%% logger.erl when the group leader of the client process is on a
+%% same node as the client process itself.
+handle_info({log,Level,Format,Args,Meta}, State) ->
+ logger:log(Level,Format,Args,Meta),
+ {noreply, State};
+handle_info({log,Level,Report,Meta}, State) ->
+ logger:log(Level,Report,Meta),
+ {noreply, State};
+handle_info({Ref,_Reply},State) when is_reference(Ref) ->
+ %% Assuming this is a timed-out gen_server reply - ignoring
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+call(Request) ->
+ case whereis(?SERVER) of
+ Pid when Pid==self() ->
+ {error,{attempting_syncronous_call_to_self,Request}};
+ _ ->
+ gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
+ end.
+
+do_add_filter(Tid,Id,{FId,_} = Filter) ->
+ case do_get_config(Tid,Id) of
+ {ok,Config} ->
+ Filters = maps:get(filters,Config,[]),
+ case lists:keymember(FId,1,Filters) of
+ true ->
+ {error,{already_exist,FId}};
+ false ->
+ do_set_config(Tid,Id,Config#{filters=>[Filter|Filters]})
+ end;
+ Error ->
+ Error
+ end.
+
+do_remove_filter(Tid,Id,FilterId) ->
+ case do_get_config(Tid,Id) of
+ {ok,Config} ->
+ Filters0 = maps:get(filters,Config,[]),
+ case lists:keytake(FilterId,1,Filters0) of
+ {value,_,Filters} ->
+ do_set_config(Tid,Id,Config#{filters=>Filters});
+ false ->
+ {error,{not_found,FilterId}}
+ end;
+ Error ->
+ Error
+ end.
+
+do_get_config(Tid,Id) ->
+ case logger_config:get(Tid,Id) of
+ {ok,{_,Config}} ->
+ {ok,Config};
+ {ok,Config} ->
+ {ok,Config};
+ Error ->
+ Error
+ end.
+
+do_set_config(Tid,Id,Config) ->
+ logger_config:set(Tid,Id,Config),
+ ok.
+
+default_config(logger) ->
+ #{level=>info,
+ filters=>[],
+ filter_default=>log,
+ handlers=>[]};
+default_config(_) ->
+ #{level=>info,
+ filters=>[],
+ filter_default=>log,
+ formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}.
+
+sanity_check(Owner,Key,Value) ->
+ sanity_check_1(Owner,[{Key,Value}]).
+
+sanity_check(HandlerId,Config) when is_map(Config) ->
+ sanity_check_1(HandlerId,maps:to_list(Config));
+sanity_check(_,Config) ->
+ {error,{invalid_handler_config,Config}}.
+
+sanity_check_1(Owner,Config) when is_list(Config) ->
+ try
+ Type = get_type(Owner),
+ check_config(Type,Config)
+ catch throw:Error -> {error,Error}
+ end.
+
+get_type(logger) ->
+ logger;
+get_type(Id) ->
+ check_id(Id),
+ handler.
+
+check_config(Owner,[{level,Level}|Config]) ->
+ check_level(Level),
+ check_config(Owner,Config);
+check_config(logger,[{handlers,Handlers}|Config]) ->
+ check_handlers(Handlers),
+ check_config(logger,Config);
+check_config(Owner,[{filters,Filters}|Config]) ->
+ check_filters(Filters),
+ check_config(Owner,Config);
+check_config(Owner,[{filter_default,FD}|Config]) ->
+ check_filter_default(FD),
+ check_config(Owner,Config);
+check_config(handler,[{formatter,Formatter}|Config]) ->
+ check_formatter(Formatter),
+ check_config(handler,Config);
+check_config(logger,[C|_]) ->
+ throw({invalid_logger_config,C});
+check_config(handler,[{_,_}|Config]) ->
+ %% Arbitrary config elements are allowed for handlers
+ check_config(handler,Config);
+check_config(_,[]) ->
+ ok.
+
+check_id(Id) when is_atom(Id) ->
+ ok;
+check_id(Id) ->
+ throw({invalid_id,Id}).
+
+check_mod(Mod) when is_atom(Mod) ->
+ ok;
+check_mod(Mod) ->
+ throw({invalid_module,Mod}).
+
+check_level({LevelInt,cached}) when LevelInt>=?EMERGENCY, LevelInt=<?DEBUG ->
+ ok;
+check_level(Level) ->
+ case lists:member(Level,?LEVELS) of
+ true ->
+ ok;
+ false ->
+ throw({invalid_level,Level})
+ end.
+
+check_handlers([Id|Handlers]) ->
+ check_id(Id),
+ check_handlers(Handlers);
+check_handlers([]) ->
+ ok;
+check_handlers(Handlers) ->
+ throw({invalid_handlers,Handlers}).
+
+check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) ->
+ check_filters(Filters);
+check_filters([Filter|_]) ->
+ throw({invalid_filter,Filter});
+check_filters([]) ->
+ ok;
+check_filters(Filters) ->
+ throw({invalid_filters,Filters}).
+
+check_filter_default(FD) when FD==stop; FD==log ->
+ ok;
+check_filter_default(FD) ->
+ throw({invalid_filter_default,FD}).
+
+check_formatter({logger_formatter,Config}) when is_map(Config) ->
+ check_logger_formatter_config(maps:to_list(Config));
+check_formatter({logger_formatter,Config}) ->
+ throw({invalid_formatter_config,Config});
+check_formatter({Mod,_}) ->
+ %% no knowledge of other formatters
+ check_mod(Mod);
+check_formatter(Formatter) ->
+ throw({invalid_formatter,Formatter}).
+
+
+check_logger_formatter_config([{template,T}|Config]) when is_list(T) ->
+ case lists:all(fun(X) when is_atom(X) -> true;
+ (X) when is_tuple(X), is_atom(element(1,X)) -> true;
+ (X) when is_list(X) -> io_lib:printable_unicode_list(X);
+ (_) -> false
+ end,
+ T) of
+ true ->
+ check_logger_formatter_config(Config);
+ false ->
+ throw({invalid_formatter_template,T})
+ end;
+check_logger_formatter_config([{legacy_header,LH}|Config]) when is_boolean(LH) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([{single_line,SL}|Config]) when is_boolean(SL) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([{utc,Utc}|Config]) when is_boolean(Utc) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([C|_]) ->
+ throw({invalid_formatter_config,C});
+check_logger_formatter_config([]) ->
+ ok.
+
+call_h(Module, Function, Args, DefRet) ->
+ %% Not calling code:ensure_loaded + erlang:function_exported here,
+ %% since in some rare terminal cases, the code_server might not
+ %% exist and we'll get a deadlock in removing the handler.
+ try apply(Module, Function, Args)
+ catch
+ C:R:S ->
+ case {C,R,S} of
+ {error,undef,[{Module,Function,Args,_}|_]} ->
+ DefRet;
+ _ ->
+ {error,{callback_crashed,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}}}
+ end
+ end.
diff --git a/lib/kernel/src/logger_simple.erl b/lib/kernel/src/logger_simple.erl
new file mode 100644
index 0000000000..23ff6ccd2e
--- /dev/null
+++ b/lib/kernel/src/logger_simple.erl
@@ -0,0 +1,236 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple).
+
+-export([adding_handler/2, removing_handler/1, log/2]).
+-export([get_buffer/0]).
+
+%% This module implements a simple handler for logger. It is the
+%% default used during system start.
+
+%%%-----------------------------------------------------------------
+%%% API
+get_buffer() ->
+ case whereis(?MODULE) of
+ undefined ->
+ {error,noproc};
+ Pid ->
+ Ref = erlang:monitor(process,Pid),
+ Pid ! {get_buffer,self()},
+ receive
+ {buffer,Buffer} ->
+ erlang:demonitor(Ref,[flush]),
+ {ok,Buffer};
+ {'DOWN',Ref,process,Pid,Reason} ->
+ {error,Reason}
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logger callback
+
+adding_handler(?MODULE,Config) ->
+ Me = self(),
+ case whereis(?MODULE) of
+ undefined ->
+ {Pid,Ref} = spawn_opt(fun() -> init(Me,Config) end,
+ [link,monitor,{message_queue_data,off_heap}]),
+ receive
+ {'DOWN',Ref,process,Pid,Reason} ->
+ {error,Reason};
+ {Pid,started} ->
+ erlang:demonitor(Ref),
+ {ok,Config}
+ end;
+ _ ->
+ {error,{handler_process_name_already_exists,?MODULE}}
+ end.
+
+removing_handler(?MODULE) ->
+ case whereis(?MODULE) of
+ undefined ->
+ ok;
+ Pid ->
+ Ref = erlang:monitor(process,Pid),
+ unlink(Pid),
+ Pid ! stop,
+ receive {'DOWN',Ref,process,Pid,_} ->
+ ok
+ end
+ end.
+
+log(#{meta:=#{error_logger:=#{tag:=info_report,type:=Type}}},_Config)
+ when Type=/=std_info ->
+ %% Skip info reports that are not 'std_info' (ref simple logger in
+ %% error_logger)
+ ok;
+log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) ->
+ _ = case whereis(?MODULE) of
+ undefined ->
+ %% Is the node on the way down? Real emergency?
+ %% Log directly from client just to get it out
+ do_log(
+ #{level=>error,
+ msg=>{report,{error,simple_handler_process_dead}},
+ meta=>#{time=>erlang:monotonic_time(microsecond)}}),
+ do_log(Log);
+ _ ->
+ ?MODULE ! {log,Log}
+ end,
+ ok;
+log(_,_) ->
+ %% Unexpected log.
+ %% We don't want to crash the simple logger, so ignore this.
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Process
+init(Starter,Config) ->
+ register(?MODULE,self()),
+ Starter ! {self(),started},
+ BufferSize =
+ case Config of
+ #{?MODULE:=#{buffer:=true}} ->
+ 10;
+ _ ->
+ infinity
+ end,
+ loop(#{buffer_size=>BufferSize,dropped=>0,buffer=>[]},infinity).
+
+loop(Buffer,Timeout) ->
+ receive
+ stop ->
+ ok;
+ {get_buffer,From} ->
+ loop(Buffer#{send_to=>From},0);
+ {log,#{msg:=_,meta:=#{time:=_}}=Log} ->
+ do_log(Log),
+ loop(update_buffer(Buffer,Log),Timeout);
+ _ ->
+ %% Unexpected message - flush it!
+ loop(Buffer,Timeout)
+ after Timeout ->
+ #{dropped:=D,buffer:=B,send_to:=Pid} = Buffer,
+ LogList = lists:reverse(B) ++ drop_msg(D),
+ Pid ! {buffer,LogList},
+ loop(Buffer#{buffer_size=>infinity,
+ dropped=>0,
+ buffer=>[],
+ send_to=>false},
+ infinity)
+ end.
+
+update_buffer(#{buffer_size:=infinity}=Buffer,_Log) ->
+ Buffer;
+update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) ->
+ Buffer#{dropped=>D+1};
+update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) ->
+ Buffer#{buffer_size=>S-1,buffer=>[Log|B]}.
+
+drop_msg(0) ->
+ [];
+drop_msg(N) ->
+ [#{level=>info,
+ msg=>{"Simple handler buffer full, dropped ~w messages",[N]},
+ meta=>#{time=>erlang:monotonic_time(microsecond)}}].
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+%% Can't do io_lib:format
+
+do_log(#{msg:={report,Report},
+ meta:=#{time:=T,error_logger:=#{type:=Type}}}) ->
+ display_date(T),
+ display_report(Type,Report);
+do_log(#{msg:=Msg,meta:=#{time:=T}}) ->
+ display_date(T),
+ display(Msg).
+
+display_date(Timestamp0) when is_integer(Timestamp0) ->
+ Timestamp = Timestamp0 + erlang:time_offset(microsecond),
+ Micro = Timestamp rem 1000000,
+ Sec = Timestamp div 1000000,
+ {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime(
+ erlang:posixtime_to_universaltime(Sec)),
+ erlang:display_string(
+ integer_to_list(Y) ++ "-" ++
+ pad(Mo,2) ++ "-" ++
+ pad(D,2) ++ " " ++
+ pad(H,2) ++ ":" ++
+ pad(Mi,2) ++ ":" ++
+ pad(S,2) ++ "." ++
+ pad(Micro,6) ++ " ").
+
+pad(Int,Size) when is_integer(Int) ->
+ pad(integer_to_list(Int),Size);
+pad(Str,Size) when length(Str)==Size ->
+ Str;
+pad(Str,Size) ->
+ pad([$0|Str],Size).
+
+display({string,Chardata}) ->
+ try unicode:characters_to_list(Chardata) of
+ String -> erlang:display_string(String), erlang:display_string("\n")
+ catch _:_ -> erlang:display(Chardata)
+ end;
+display({report,Report}) when is_map(Report) ->
+ display_report(maps:to_list(Report));
+display({report,Report}) ->
+ display_report(Report);
+display({F, A}) when is_list(F), is_list(A) ->
+ erlang:display_string(F ++ "\n"),
+ [begin
+ erlang:display_string("\t"),
+ erlang:display(Arg)
+ end || Arg <- A],
+ ok.
+
+display_report(Atom, A) when is_atom(Atom) ->
+ %% The widest atom seems to be 'supervisor_report' at 17.
+ ColumnWidth = 20,
+ AtomString = atom_to_list(Atom),
+ AtomLength = length(AtomString),
+ Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
+ erlang:display_string(AtomString ++ Padding),
+ display_report(A);
+display_report(F, A) ->
+ erlang:display({F, A}).
+
+display_report([A, []]) ->
+ %% Special case for crash reports when process has no links
+ display_report(A);
+display_report(A = [_|_]) ->
+ case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
+ true ->
+ erlang:display_string("\n"),
+ lists:foreach(
+ fun({Key, Value}) ->
+ erlang:display_string(
+ " " ++
+ atom_to_list(Key) ++
+ ": "),
+ erlang:display(Value)
+ end, A);
+ false ->
+ erlang:display(A)
+ end;
+display_report(A) ->
+ erlang:display(A).
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
new file mode 100644
index 0000000000..cbc9db372c
--- /dev/null
+++ b/lib/kernel/src/logger_std_h.erl
@@ -0,0 +1,799 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h).
+
+-behaviour(gen_server).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+-include_lib("kernel/include/file.hrl").
+
+%% API
+-export([start_link/3, info/1, filesync/1, reset/1]).
+
+%% gen_server and proc_lib callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/2, removing_handler/1,
+ changing_config/3, swap_buffer/2]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Start a standard handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+-spec filesync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+filesync(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, filesync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+filesync(Name) ->
+ {error,{badarg,{filesync,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec info(Name) -> Info | {error,Reason} when
+ Name :: atom(),
+ Info :: term(),
+ Reason :: handler_busy | {badarg,term()}.
+
+info(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec reset(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+reset(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(Name, Config) ->
+ case check_config(adding, Name, Config) of
+ {ok, Config1} ->
+ %% create initial handler state by merging defaults with config
+ HConfig = maps:get(?MODULE, Config1, #{}),
+ HState = maps:merge(get_init_state(), HConfig),
+ case logger_h_common:overload_levels_ok(HState) of
+ true ->
+ case start(Name, Config1, HState) of
+ ok ->
+ %% Make sure wait_for_buffer is not stored, so we
+ %% won't hang and wait for buffer on a restart
+ {ok, maps:remove(wait_for_buffer,Config1)};
+ Error ->
+ Error
+ end;
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = HState,
+ {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(Name,
+ OldConfig=#{id:=Id, ?MODULE:=#{type:=Type}},
+ NewConfig=#{id:=Id}) ->
+ MyConfig = maps:get(?MODULE, NewConfig, #{}),
+ case maps:get(type, MyConfig, Type) of
+ Type ->
+ MyConfig1 = MyConfig#{type=>Type},
+ changing_config1(Name, OldConfig,
+ NewConfig#{?MODULE=>MyConfig1});
+ _ ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}
+ end;
+changing_config(_Name, OldConfig, NewConfig) ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}.
+
+changing_config1(Name, OldConfig, NewConfig) ->
+ case check_config(changing, Name, NewConfig) of
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(Name, {change_config,OldConfig,NewConfig1},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> Result;
+ HError -> HError
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ Error ->
+ Error
+ end.
+
+check_config(adding, Name, Config0) ->
+ %% Merge in defaults on top level
+ Config = maps:merge(#{id => Name}, Config0),
+ %% Merge in defaults on handler level
+ MyConfig0 = maps:get(?MODULE, Config, #{}),
+ MyConfig = maps:merge(#{type => standard_io},
+ MyConfig0),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok ->
+ {ok,Config#{?MODULE=>MyConfig}};
+ Error ->
+ Error
+ end;
+check_config(changing, _Name, Config) ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok -> {ok,Config};
+ Error -> Error
+ end.
+
+check_my_config([{type,Type} | Config]) when Type == standard_io;
+ Type == standard_error ->
+ check_my_config(Config);
+check_my_config([{type,{file,File}} | Config]) when is_list(File) ->
+ check_my_config(Config);
+check_my_config([{type,{file,File,Modes}} | Config]) when is_list(File),
+ is_list(Modes) ->
+ check_my_config(Config);
+check_my_config([Other | Config]) ->
+ case logger_h_common:check_common_config(Other) of
+ valid ->
+ check_my_config(Config);
+ invalid ->
+ {error,{invalid_config,?MODULE,Other}}
+ end;
+check_my_config([]) ->
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Name) ->
+ stop(Name).
+
+%%%-----------------------------------------------------------------
+%%% Get buffer when swapping from simple handler
+swap_buffer(Name,Buffer) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ Name ! {buffer,Buffer}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(Log, Config) -> ok | dropped when
+ Log :: logger:log(),
+ Config :: logger:config().
+
+log(Log,Config=#{id:=Name}) ->
+ %% if the handler has crashed, we must drop this request
+ %% and hope the handler restarts so we can try again
+ true = is_pid(whereis(Name)),
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ logger_h_common:call_cast_or_drop(Name, Bin).
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name, Config,
+ State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) ->
+ register(Name, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case do_init(Name, Type) of
+ {ok,InitState} ->
+ catch ets:new(Name, [public, named_table]),
+ ?set_mode(Name, async),
+ State = maps:merge(State0, InitState),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{mode => async,
+ file_ctrl_sync => FileCtrlSyncInt,
+ last_qlen => 0,
+ last_log_ts => T0,
+ burst_win_ts => T0,
+ burst_msg_count => 0}),
+ proc_lib:init_ack({ok,self()}),
+ gen_server:cast(self(), {repeated_filesync,T0}),
+ enter_loop(Config, State1);
+ Error ->
+ logger_h_common:error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+do_init(Name, Std) when Std=:=standard_io; Std=:=standard_error ->
+ case open_log_file(Name, Std) of
+ {ok,FileCtrlPid} ->
+ {ok,#{id=>Name,type=>Std,file_ctrl_pid=>FileCtrlPid}};
+ Error ->
+ Error
+ end;
+do_init(Name, FileInfo) when is_tuple(FileInfo) ->
+ case open_log_file(Name, FileInfo) of
+ {ok,FileCtrlPid} ->
+ {ok,#{id=>Name,type=>FileInfo,file_ctrl_pid=>FileCtrlPid}};
+ Error ->
+ Error
+ end.
+
+enter_loop(#{wait_for_buffer:=true}=Config,State) ->
+ State1 =
+ receive
+ {buffer,Buffer} ->
+ lists:foldl(
+ fun(Log,S) ->
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ {_,S1} = do_log(Bin,cast,S),
+ S1
+ end,
+ State,
+ Buffer)
+ end,
+ gen_server:enter_loop(?MODULE,[],State1);
+enter_loop(_Config,State) ->
+ gen_server:enter_loop(?MODULE,[],State).
+
+%% This is the synchronous log request.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply,Result, State1};
+
+handle_call(filesync, _From, State = #{type := Type,
+ file_ctrl_pid := FileCtrlPid}) ->
+ if is_atom(Type) ->
+ {reply, ok, State};
+ true ->
+ {reply, file_ctrl_filesync_sync(FileCtrlPid), State}
+ end;
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0,
+ last_log_ts := LastLogTS}) ->
+ HConfig = maps:get(?MODULE, NewConfig, #{}),
+ State1 = maps:merge(State, HConfig),
+ case logger_h_common:overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ gen_server:cast(self(), {repeated_filesync,
+ LastLogTS})
+ end,
+ {reply, ok, State1};
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = State1,
+ {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp()}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+%% This is the asynchronous log request.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% request, the repeat operation must be active!
+handle_cast({repeated_filesync,LastLogTS0},
+ State = #{type := Type,
+ file_ctrl_pid := FileCtrlPid,
+ filesync_repeat_interval := FSyncInt,
+ last_log_ts := LastLogTS1}) ->
+ State1 =
+ if not is_atom(Type), is_integer(FSyncInt) ->
+ %% only do filesync if something has been
+ %% written since last time we checked
+ if LastLogTS1 == LastLogTS0 ->
+ ok;
+ true ->
+ file_ctrl_filesync_async(FileCtrlPid)
+ end,
+ {ok,TRef} =
+ timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),{repeated_filesync,LastLogTS1}]),
+ State#{rep_sync_tref => TRef};
+ true ->
+ State
+ end,
+ {noreply,State1}.
+
+handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) ->
+ case maps:get(file_ctrl_pid, State, undefined) of
+ Pid ->
+ %% file error, terminate handler
+ logger_h_common:handler_exit(Name,
+ {error,{write_failed,FileInfo,Why}});
+ _Other ->
+ %% ignore EXIT
+ ok
+ end,
+ {noreply, State};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
+ type:=_FileInfo}) ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
+ undefined)),
+ case is_process_alive(FWPid) of
+ true ->
+ unlink(FWPid),
+ _ = file_ctrl_stop(FWPid),
+ MRef = erlang:monitor(process, FWPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ exit(FWPid, kill)
+ end;
+ false ->
+ ok
+ end,
+ logger_h_common:stop_or_restart(Name, Reason, State).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%%
+get_init_state() ->
+ #{toggle_sync_qlen => ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen => ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen => ?FLUSH_REQS_QLEN,
+ enable_burst_limit => ?ENABLE_BURST_LIMIT,
+ burst_limit_size => ?BURST_LIMIT_SIZE,
+ burst_window_time => ?BURST_WINDOW_TIME,
+ enable_kill_overloaded => ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen => ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem => ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after => ?HANDLER_RESTART_AFTER,
+ file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+%%%-----------------------------------------------------------------
+%%% Add a standard handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Handler specific config should be provided with a sub map associated
+%%% with a key named the same as this module, e.g:
+%%%
+%%% Config = #{logger_std_h => #{toggle_sync_qlen => 50}
+%%%
+%%% The standard handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ LoggerStdH =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, LoggerStdH) of
+ {ok,_Pid} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Stop and remove the handler.
+stop(Name) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ _ = gen_server:call(Name,stop),
+ _ = supervisor:delete_child(logger_sup, Name),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logging and overload control.
+-define(update_file_ctrl_sync(C, Interval),
+ if C == 0 -> Interval;
+ true -> C-1 end).
+
+%% check for overload between every request (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize requests have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% request to react quickly to large bursts of requests and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
+
+ %% kill the handler if it can't keep up with the load
+ logger_h_common:kill_if_choked(Name, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this clause is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ _NewFlushed = logger_h_common:flush_log_requests(?FLUSH_MAX_N),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log request
+ ?observe(Name,{flushed,_NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,_NewFlushed,
+ State1#{mode => ?set_mode(Name,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this clause is called to write to file
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{file_ctrl_pid := FileCtrlPid,
+ file_ctrl_sync := FileCtrlSync,
+ last_qlen := LastQLen,
+ last_log_ts := T0,
+ file_ctrl_sync_int := FileCtrlSyncInt}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log requests
+ {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
+
+ %% only send a synhrounous request to the file controller process
+ %% every FileCtrlSyncInt time, to give the handler time between
+ %% file writes so it can keep up with incoming messages
+ {Result,LastQLen1} =
+ if DoWrite, FileCtrlSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ file_write_sync(FileCtrlPid, Bin, false),
+ {ok,element(2, process_info(self(), message_queue_len))};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ file_write_async(FileCtrlPid, Bin),
+ {ok,LastQLen};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped,LastQLen}
+ end,
+
+ %% Check if the time since the previous log request is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log requests is fast and no new
+ %% request comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ %% do filesync if necessary
+ case maps:get(type, State) of
+ Std when is_atom(Std) ->
+ ok;
+ _File ->
+ file_ctrl_filesync_async(FileCtrlPid)
+ end,
+ {?change_mode(Name, Mode, async),0};
+ true ->
+ {Mode,BurstMsgCount}
+ end,
+ State1 =
+ ?update_calls_or_casts(_CallOrCast,1,State),
+ State2 =
+ ?update_max_time(Time,
+ State1#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ file_ctrl_sync =>
+ ?update_file_ctrl_sync(FileCtrlSync,
+ FileCtrlSyncInt)}),
+ {Result,State2}.
+
+open_log_file(HandlerName, FileInfo) ->
+ case file_ctrl_start(HandlerName, FileInfo) of
+ OK = {ok,_FileCtrlPid} -> OK;
+ Error -> Error
+ end.
+
+do_open_log_file({file,File}) ->
+ do_open_log_file({file,File,[raw,append,delayed_write]});
+
+do_open_log_file({file,File,[]}) ->
+ do_open_log_file({file,File,[raw,append,delayed_write]});
+
+do_open_log_file({file,File,Modes}) ->
+ try
+ case filelib:ensure_dir(File) of
+ ok ->
+ file:open(File, Modes);
+ Error ->
+ Error
+ end
+ catch
+ _:Reason -> {error,Reason}
+ end.
+
+close_log_file(Std) when Std == standard_io; Std == standard_error ->
+ ok;
+close_log_file(Fd) ->
+ _ = file:datasync(Fd),
+ _ = file:close(Fd).
+
+%%%-----------------------------------------------------------------
+%%% File control process
+
+file_ctrl_start(HandlerName, FileInfo) ->
+ Starter = self(),
+ FileCtrlPid =
+ spawn_link(fun() ->
+ file_ctrl_init(HandlerName, FileInfo, Starter)
+ end),
+ receive
+ {FileCtrlPid,ok} ->
+ {ok,FileCtrlPid};
+ {FileCtrlPid,Error} ->
+ Error
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,file_ctrl_process_not_started}
+ end.
+
+file_ctrl_stop(Pid) ->
+ Pid ! stop.
+
+file_write_async(Pid, Bin) ->
+ Pid ! {log,Bin},
+ ok.
+
+file_write_sync(Pid, Bin, FileSync) ->
+ case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of
+ {error,Reason} ->
+ {error,{write_failed,Bin,Reason}};
+ Result ->
+ Result
+ end.
+
+file_ctrl_filesync_async(Pid) ->
+ Pid ! filesync,
+ ok.
+
+file_ctrl_filesync_sync(Pid) ->
+ file_ctrl_call(Pid, {filesync,self()}).
+
+file_ctrl_call(Pid, Msg) ->
+ MRef = monitor(process, Pid),
+ Pid ! {Msg,MRef},
+ receive
+ {MRef,Result} ->
+ demonitor(MRef, [flush]),
+ Result;
+ {'DOWN',MRef,_Type,_Object,Reason} ->
+ {error,Reason}
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,{no_response,Pid}}
+ end.
+
+file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
+ process_flag(message_queue_data, off_heap),
+ FileName = element(2, FileInfo),
+ case do_open_log_file(FileInfo) of
+ {ok,Fd} ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(Fd, file, FileName, false, ok, ok, HandlerName);
+ {error,Reason} ->
+ Starter ! {self(),{error,{open_failed,FileName,Reason}}}
+ end;
+file_ctrl_init(HandlerName, StdDev, Starter) ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(StdDev, standard_io, StdDev, false, ok, ok, HandlerName).
+
+file_ctrl_loop(Fd, Type, DevName, Synced,
+ PrevWriteResult, PrevSyncResult, HandlerName) ->
+ receive
+ %% asynchronous request
+ {log,Bin} ->
+ Result = if Type == file ->
+ write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName);
+ true ->
+ io:put_chars(Fd, Bin)
+ end,
+ file_ctrl_loop(Fd, Type, DevName, false,
+ Result, PrevSyncResult, HandlerName);
+
+ %% synchronous request
+ {{log,From,Bin,FileSync},MRef} ->
+ if Type == file ->
+ %% check that file hasn't been deleted
+ CheckFile =
+ fun() -> {ok,_} = file:read_file_info(DevName) end,
+ spawn_link(CheckFile),
+ WResult = write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName),
+ {Synced1,SResult} =
+ if not FileSync ->
+ {false,PrevSyncResult};
+ true ->
+ case sync_dev(Fd, DevName,
+ PrevSyncResult, HandlerName) of
+ ok -> {true,ok};
+ Error -> {false,Error}
+ end
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, Synced1,
+ WResult, SResult, HandlerName);
+ true ->
+ _ = io:put_chars(Fd, Bin),
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, false,
+ ok, PrevSyncResult, HandlerName)
+ end;
+
+ filesync when not Synced ->
+ Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName),
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, Result, HandlerName);
+
+ filesync ->
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, PrevSyncResult, HandlerName);
+
+ {{filesync,From},MRef} ->
+ Result = if not Synced ->
+ sync_dev(Fd, DevName, PrevSyncResult, HandlerName);
+ true ->
+ ok
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, Result, HandlerName);
+
+ stop ->
+ _ = close_log_file(Fd),
+ stopped
+ end.
+
+write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) ->
+ case ?file_write(Fd, Bin) of
+ ok ->
+ ok;
+ PrevWriteResult ->
+ %% don't report same error twice
+ PrevWriteResult;
+ Error ->
+ logger_h_common:error_notify({HandlerName,write,FileName,Error}),
+ Error
+ end.
+
+sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
+ case ?file_datasync(Fd) of
+ ok ->
+ ok;
+ PrevSyncResult ->
+ %% don't report same error twice
+ PrevSyncResult;
+ Error ->
+ logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
+ Error
+ end.
+
diff --git a/lib/kernel/src/logger_sup.erl b/lib/kernel/src/logger_sup.erl
new file mode 100644
index 0000000000..4e4de94d5c
--- /dev/null
+++ b/lib/kernel/src/logger_sup.erl
@@ -0,0 +1,53 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Supervisor callbacks
+-export([init/1]).
+
+-define(SERVER, ?MODULE).
+
+%%%===================================================================
+%%% API functions
+%%%===================================================================
+
+start_link() ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+%%%===================================================================
+%%% Supervisor callbacks
+%%%===================================================================
+
+init([]) ->
+
+ SupFlags = #{strategy => one_for_one,
+ intensity => 1,
+ period => 5},
+
+ {ok, {SupFlags, []}}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================