aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel
diff options
context:
space:
mode:
authorSiri Hansen <[email protected]>2018-10-08 14:25:18 +0200
committerSiri Hansen <[email protected]>2018-10-19 11:57:46 +0200
commitbc693e588be932663b75a7261ac19655ab597ae2 (patch)
tree478c91a3d83fb55441dfb7b936e90572d1f57d51 /lib/kernel
parent05bcb5c45860e6cbb9480f8b5de9ff0ce614b548 (diff)
downloadotp-bc693e588be932663b75a7261ac19655ab597ae2.tar.gz
otp-bc693e588be932663b75a7261ac19655ab597ae2.tar.bz2
otp-bc693e588be932663b75a7261ac19655ab597ae2.zip
[logger] Move common handler code to logger_h_common
There was a lot of duplicated code in logger_std_h and logger_disk_log_h. Most of this is now moved to logger_h_common, which now also serves as the gen_server callback.
Diffstat (limited to 'lib/kernel')
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl642
-rw-r--r--lib/kernel/src/logger_h_common.erl646
-rw-r--r--lib/kernel/src/logger_std_h.erl672
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl62
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl42
5 files changed, 850 insertions, 1214 deletions
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
index 2a81458ec8..8c09dd071f 100644
--- a/lib/kernel/src/logger_disk_log_h.erl
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -19,59 +19,35 @@
%%
-module(logger_disk_log_h).
--behaviour(gen_server).
-
-include("logger.hrl").
-include("logger_internal.hrl").
-include("logger_h_common.hrl").
%%% API
--export([start_link/3, info/1, filesync/1, reset/1]).
+-export([info/1, filesync/1, reset/1]).
-%% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+%% logger_h_common callbacks
+-export([init/2, check_config/4, reset_state/1,
+ async_filesync/2, sync_filesync/2,
+ async_write/3, sync_write/3,
+ handle_info/2, terminate/3]).
%% logger callbacks
-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
filter_config/1]).
-%% handler internal
--export([log_handler_info/4]).
-
%%%===================================================================
%%% API
%%%===================================================================
%%%-----------------------------------------------------------------
-%%% Start a disk_log handler process and link to caller.
-%%% This function is called by the kernel supervisor when this
-%%% handler process gets added (as a result of calling add/3).
--spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
- Name :: atom(),
- Config :: logger:handler_config(),
- HandlerState :: map(),
- Pid :: pid(),
- Reason :: term().
-
-start_link(Name, Config, HandlerState) ->
- proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
-
-%%%-----------------------------------------------------------------
%%%
-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-filesync(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- disk_log_sync, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
filesync(Name) ->
- {error,{badarg,{filesync,[Name]}}}.
+ logger_h_common:filesync(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -80,15 +56,8 @@ filesync(Name) ->
Info :: term(),
Reason :: handler_busy | {badarg,term()}.
-info(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- info, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
info(Name) ->
- {error,{badarg,{info,[Name]}}}.
+ logger_h_common:info(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -96,16 +65,8 @@ info(Name) ->
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-reset(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- reset, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
reset(Name) ->
- {error,{badarg,{reset,[Name]}}}.
-
+ logger_h_common:reset(?MODULE,Name).
%%%===================================================================
%%% logger callbacks
@@ -113,30 +74,55 @@ reset(Name) ->
%%%-----------------------------------------------------------------
%%% Handler being added
-adding_handler(#{id:=Name}=Config) ->
- case check_config(adding, Config) of
- {ok, #{config:=HConfig}=Config1} ->
- %% create initial handler state by merging defaults with config
- HState = maps:merge(get_init_state(), HConfig),
- case logger_h_common:overload_levels_ok(HState) of
- true ->
- start(Name, Config1, HState);
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = HState,
- {error,{invalid_levels,{SMQL,DMQL,FQL}}}
- end;
+adding_handler(Config) ->
+ logger_h_common:adding_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Config) ->
+ logger_h_common:removing_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config) ->
+ logger_h_common:log(LogEvent, Config).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(Config) ->
+ logger_h_common:filter_config(Config).
+
+%%%===================================================================
+%%% logger_h_common callbacks
+%%%===================================================================
+init(Name, #{file:=File,type:=Type,max_no_bytes:=MNB,max_no_files:=MNF}) ->
+ case open_disk_log(Name, File, Type, MNB, MNF) of
+ ok ->
+ {ok,#{log_opts => #{file => File,
+ type => Type,
+ max_no_bytes => MNB,
+ max_no_files => MNF},
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}};
Error ->
Error
end.
-%%%-----------------------------------------------------------------
-%%% Updating handler config
-changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+check_config(Name,set,undefined,HConfig0) ->
+ HConfig=merge_default_logopts(Name,maps:merge(get_default_config(),HConfig0)),
+ check_config(HConfig);
+check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig),
- ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
- NewHConfig0 = maps:get(config, NewConfig, #{}),
Default =
case SetOrUpdate of
set ->
@@ -146,66 +132,25 @@ changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
OldHConfig
end,
- %% Allow (accidentially) included read-only fields - just overwrite them
- NewHConfig = maps:merge(maps:merge(Default,NewHConfig0),ReadOnly),
+ NewHConfig = maps:merge(Default,NewHConfig0),
- %% But fail if write-once fields are changed
+ %% Fail if write-once fields are changed
case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of
WriteOnce ->
- changing_config1(maps:get(handler_pid,OldHConfig),
- OldConfig,
- NewConfig#{config=>NewHConfig});
+ check_config(NewHConfig);
Other ->
{Old,New} = logger_server:diff_maps(WriteOnce,Other),
- {error,{illegal_config_change,#{config=>Old},#{config=>New}}}
+ {error,{illegal_config_change,?MODULE,Old,New}}
end.
-changing_config1(HPid, OldConfig, NewConfig) ->
- case check_config(changing, NewConfig) of
- Result = {ok,NewConfig1} ->
- try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
- ?DEFAULT_CALL_TIMEOUT) of
- ok -> Result;
- Error -> Error
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
- Error ->
- Error
- end.
-
-check_config(adding, #{id:=Name}=Config) ->
- %% merge handler specific config data
- HConfig1 = maps:get(config, Config, #{}),
- HConfig2 = maps:merge(get_default_config(), HConfig1),
- HConfig3 = merge_default_logopts(Name, HConfig2),
- case check_h_config(maps:to_list(HConfig3)) of
- ok ->
- {ok,Config#{config=>HConfig3}};
- Error ->
- Error
- end;
-check_config(changing, Config) ->
- HConfig = maps:get(config, Config, #{}),
+check_config(HConfig) ->
case check_h_config(maps:to_list(HConfig)) of
- ok -> {ok,Config};
- Error -> Error
+ ok ->
+ {ok,HConfig};
+ {error,{Key,Value}} ->
+ {error,{invalid_config,?MODULE,#{Key=>Value}}}
end.
-merge_default_logopts(Name, HConfig) ->
- Type = maps:get(type, HConfig, wrap),
- {DefaultNoFiles,DefaultNoBytes} =
- case Type of
- halt -> {undefined,infinity};
- _wrap -> {10,1048576}
- end,
- {ok,Dir} = file:get_cwd(),
- Defaults = #{file => filename:join(Dir,Name),
- max_no_files => DefaultNoFiles,
- max_no_bytes => DefaultNoBytes,
- type => Type},
- maps:merge(Defaults, HConfig).
-
check_h_config([{file,File}|Config]) when is_list(File) ->
check_h_config(Config);
check_h_config([{max_no_files,undefined}|Config]) ->
@@ -218,454 +163,75 @@ check_h_config([{max_no_bytes,N}|Config]) when is_integer(N), N>0 ->
check_h_config(Config);
check_h_config([{type,Type}|Config]) when Type==wrap; Type==halt ->
check_h_config(Config);
-check_h_config([Other | Config]) ->
- case logger_h_common:check_common_config(Other) of
- valid ->
- check_h_config(Config);
- invalid ->
- {error,{invalid_config,?MODULE,Other}}
- end;
+check_h_config([Other | _]) ->
+ {error,Other};
check_h_config([]) ->
ok.
-%%%-----------------------------------------------------------------
-%%% Handler being removed
-removing_handler(#{id:=Name}) ->
- stop(Name).
+get_default_config() ->
+ #{}.
-%%%-----------------------------------------------------------------
-%%% Log a string or report
--spec log(LogEvent, Config) -> ok when
- LogEvent :: logger:log_event(),
- Config :: logger:handler_config().
+merge_default_logopts(Name, HConfig) ->
+ Type = maps:get(type, HConfig, wrap),
+ {DefaultNoFiles,DefaultNoBytes} =
+ case Type of
+ halt -> {undefined,infinity};
+ _wrap -> {10,1048576}
+ end,
+ {ok,Dir} = file:get_cwd(),
+ Defaults = #{file => filename:join(Dir,Name),
+ max_no_files => DefaultNoFiles,
+ max_no_bytes => DefaultNoBytes,
+ type => Type},
+ maps:merge(Defaults, HConfig).
-log(LogEvent, Config = #{id := Name,
- config := #{handler_pid := HPid,
- mode_tab := ModeTab}}) ->
- %% if the handler has crashed, we must drop this event
- %% and hope the handler restarts so we can try again
- true = is_process_alive(HPid),
- Bin = logger_h_common:log_to_binary(LogEvent, Config),
- logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+async_filesync(Name,State) ->
+ {_,State1} = disk_log_sync(Name,State),
+ State1.
-%%%-----------------------------------------------------------------
-%%% Remove internal fields from configuration
-filter_config(#{config:=HConfig}=Config) ->
- Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
+sync_filesync(Name,State) ->
+ disk_log_sync(Name,State).
-%%%===================================================================
-%%% gen_server callbacks
-%%%===================================================================
+async_write(Name, Bin, State) ->
+ {_,State1} = disk_log_write(Name, Bin, State),
+ State1.
-init([Name,
- Config = #{config := HConfig = #{file:=File,
- type:=Type,
- max_no_bytes:=MNB,
- max_no_files:=MNF}},
- State = #{dl_sync_int := DLSyncInt}]) ->
-
- RegName = ?name_to_reg_name(?MODULE,Name),
- register(RegName, self()),
- process_flag(trap_exit, true),
- process_flag(message_queue_data, off_heap),
-
- ?init_test_hooks(),
- ?start_observation(Name),
-
- LogOpts = #{file=>File, type=>Type, max_no_bytes=>MNB, max_no_files=>MNF},
- case open_disk_log(Name, File, Type, MNB, MNF) of
- ok ->
- try ets:new(Name, [public]) of
- ModeTab ->
- ?set_mode(ModeTab, async),
- T0 = ?timestamp(),
- State1 =
- ?merge_with_stats(State#{
- id => Name,
- mode_tab => ModeTab,
- mode => async,
- dl_sync => DLSyncInt,
- log_opts => LogOpts,
- last_qlen => 0,
- last_log_ts => T0,
- burst_win_ts => T0,
- burst_msg_count => 0,
- last_op => sync,
- prev_log_result => ok,
- prev_sync_result => ok,
- prev_disk_log_info => undefined}),
- Config1 =
- Config#{config => HConfig#{handler_pid => self(),
- mode_tab => ModeTab}},
- proc_lib:init_ack({ok,self(),Config1}),
- gen_server:cast(self(), repeated_disk_log_sync),
- case logger_h_common:unset_restart_flag(Name, ?MODULE) of
- true ->
- %% inform about restart
- gen_server:cast(self(), {log_handler_info,
- "Handler ~p restarted",
- [Name]});
- false ->
- %% initial start
- ok
- end,
- gen_server:enter_loop(?MODULE, [], State1)
- catch
- _:Error ->
- unregister(RegName),
- logger_h_common:error_notify({open_disk_log,Name,Error}),
- proc_lib:init_ack(Error)
- end;
- Error ->
- unregister(RegName),
- logger_h_common:error_notify({open_disk_log,Name,Error}),
- proc_lib:init_ack(Error)
- end.
+sync_write(Name, Bin, State) ->
+ disk_log_write(Name, Bin, State).
-%% This is the synchronous log event.
-handle_call({log, Bin}, _From, State) ->
- {Result,State1} = do_log(Bin, call, State),
- %% Result == ok | dropped
- {reply, Result, State1};
-
-handle_call(disk_log_sync, _From, State = #{id := Name}) ->
- State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State),
- {reply, Result, State1};
-
-handle_call({change_config,_OldConfig,NewConfig}, _From,
- State = #{filesync_repeat_interval := FSyncInt0}) ->
- HConfig = maps:get(config, NewConfig, #{}),
- State1 = #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = maps:merge(State, HConfig),
- case logger_h_common:overload_levels_ok(State1) of
- true ->
- _ =
- case maps:get(filesync_repeat_interval, HConfig, undefined) of
- undefined ->
- ok;
- no_repeat ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined));
- FSyncInt0 ->
- ok;
- _FSyncInt1 ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined)),
- _ = gen_server:cast(self(), repeated_disk_log_sync)
- end,
- {reply, ok, State1};
- false ->
- {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
- end;
-
-handle_call(info, _From, State) ->
- {reply, State, State};
-
-handle_call(reset, _From, State) ->
- State1 = ?merge_with_stats(State),
- {reply, ok, State1#{last_qlen => 0,
- last_log_ts => ?timestamp(),
- prev_log_result => ok,
- prev_sync_result => ok,
- prev_disk_log_info => undefined}};
-
-handle_call(stop, _From, State) ->
- {stop, {shutdown,stopped}, ok, State}.
-
-
-%% This is the asynchronous log event.
-handle_cast({log, Bin}, State) ->
- {_,State1} = do_log(Bin, cast, State),
- {noreply, State1};
-
-handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
- log_handler_info(Name, Format, Args, State),
- {noreply, State};
-
-%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
-%% clause gets called repeatedly by the handler. In order to
-%% guarantee that a filesync *always* happens after the last log
-%% event, the repeat operation must be active!
-handle_cast(repeated_disk_log_sync,
- State = #{id := Name,
- filesync_repeat_interval := FSyncInt,
- last_op := LastOp}) ->
- State1 =
- if is_integer(FSyncInt) ->
- %% only do filesync if something has been
- %% written since last time we checked
- NewState = if LastOp == sync ->
- State;
- true ->
- disk_log_sync(Name, State)
- end,
- {ok,TRef} =
- timer:apply_after(FSyncInt, gen_server,cast,
- [self(),repeated_disk_log_sync]),
- NewState#{rep_sync_tref => TRef, last_op => sync};
- true ->
- State
- end,
- {noreply,State1}.
+reset_state(State) ->
+ State#{prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}.
%% The disk log owner must handle status messages from disk_log.
handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) ->
- {noreply, State};
+ State;
handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}},
State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
+ State#{prev_disk_log_info => Info};
handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}},
State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
+ State#{prev_disk_log_info => Info};
handle_info({disk_log, _Node, Log, full},
State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}),
- {noreply, State#{prev_disk_log_info => full}};
+ State#{prev_disk_log_info => full};
handle_info({disk_log, _Node, Log, Info = {error_status,_Status}},
State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
-
-handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) ->
- {noreply, State};
-
+ State#{prev_disk_log_info => Info};
handle_info(_, State) ->
- {noreply, State}.
+ State.
-terminate(Reason, State = #{id := Name}) ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
- undefined)),
+terminate(Name, _Reason, _State) ->
_ = close_disk_log(Name, normal),
- ok = logger_h_common:stop_or_restart(Name, Reason, State),
- unregister(?name_to_reg_name(?MODULE, Name)),
ok.
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
%%%-----------------------------------------------------------------
%%% Internal functions
-
-%%%-----------------------------------------------------------------
-%%%
-get_default_config() ->
- #{sync_mode_qlen => ?SYNC_MODE_QLEN,
- drop_mode_qlen => ?DROP_MODE_QLEN,
- flush_qlen => ?FLUSH_QLEN,
- burst_limit_enable => ?BURST_LIMIT_ENABLE,
- burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
- burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
- overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
- overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
- overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
- overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
-
-get_init_state() ->
- #{dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
-
-%%%-----------------------------------------------------------------
-%%% Add a disk_log handler to the logger.
-%%% This starts a dedicated handler process which should always
-%%% exist if the handler is registered with logger (and should not
-%%% exist if the handler is not registered).
-%%%
-%%% Config is the logger:handler_config() map. Handler specific parameters
-%%% should be provided with a sub map associated with a key named
-%%% 'config', e.g:
-%%%
-%%% Config = #{config => #{sync_mode_qlen => 50}
-%%%
-%%% The 'config' sub map will also contain parameters for configuring
-%%% the disk_log:
-%%%
-%%% Config = #{config => #{file => file:filename(),
-%%% max_no_bytes => integer(),
-%%% max_no_files => integer(),
-%%% type => wrap | halt}}.
-%%%
-%%% If type == halt, then max_no_files is ignored.
-%%%
-%%% The disk_log handler process is linked to logger_sup, which is
-%%% part of the kernel application's supervision tree.
-start(Name, Config, HandlerState) ->
- LoggerDLH =
- #{id => Name,
- start => {?MODULE, start_link, [Name,Config,HandlerState]},
- restart => temporary,
- shutdown => 2000,
- type => worker,
- modules => [?MODULE]},
- case supervisor:start_child(logger_sup, LoggerDLH) of
- {ok,Pid,Config1} ->
- ok = logger_handler_watcher:register_handler(Name,Pid),
- {ok,Config1};
- Error ->
- Error
- end.
-
-%%%-----------------------------------------------------------------
-%%% Stop and remove the handler.
-stop(Name) ->
- case whereis(?name_to_reg_name(?MODULE,Name)) of
- undefined ->
- ok;
- Pid ->
- %% We don't want to do supervisor:terminate_child here
- %% since we need to distinguish this explicit stop from a
- %% system termination in order to avoid circular attempts
- %% at removing the handler (implying deadlocks and
- %% timeouts).
- %% And we don't need to do supervisor:delete_child, since
- %% the restart type is temporary, which means that the
- %% child specification is automatically removed from the
- %% supervisor when the process dies.
- _ = gen_server:call(Pid, stop),
- ok
- end.
-
-%%%-----------------------------------------------------------------
-%%% Logging and overload control.
--define(update_dl_sync(C, Interval),
- if C == 0 -> Interval;
- true -> C-1 end).
-
-%% check for overload between every event (and set Mode to async,
-%% sync or drop accordingly), but never flush the whole mailbox
-%% before LogWindowSize events have been handled
-do_log(Bin, CallOrCast, State = #{id:=Name, mode := Mode0}) ->
- T1 = ?timestamp(),
-
- %% check if the handler is getting overloaded, or if it's
- %% recovering from overload (the check must be done for each
- %% event to react quickly to large bursts of events and
- %% to ensure that the handler can never end up in drop mode
- %% with an empty mailbox, which would stop operation)
- {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
-
- if (Mode1 == drop) andalso (Mode0 =/= drop) ->
- log_handler_info(Name, "Handler ~p switched to drop mode",
- [Name], State);
- (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
- log_handler_info(Name, "Handler ~p switched to ~w mode",
- [Name,Mode1], State);
- true ->
- ok
- end,
-
- %% kill the handler if it can't keep up with the load
- logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State),
-
- if Mode1 == flush ->
- flush(Name, QLen, T1, State1);
- true ->
- write(Name, Mode1, T1, Bin, CallOrCast, State1)
- end.
-
-%% this function is called by do_log/3 after an overload check
-%% has been performed, where QLen > FlushQLen
-flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
- %% flush messages in the mailbox (a limited number in
- %% order to not cause long delays)
- NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N),
-
- %% write info in log about flushed messages
- log_handler_info(Name, "Handler ~p flushed ~w log events",
- [Name,NewFlushed], State),
-
- %% because of the receive loop when flushing messages, the
- %% handler will be scheduled out often and the mailbox could
- %% grow very large, so we'd better check the queue again here
- {_,_QLen1} = process_info(self(), message_queue_len),
- ?observe(Name,{max_qlen,_QLen1}),
-
- %% Add 1 for the current log event
- ?observe(Name,{flushed,NewFlushed+1}),
-
- State1 = ?update_max_time(?diff_time(T1,_T0),State),
- {dropped,?update_other(flushed,FLUSHED,NewFlushed,
- State1#{mode => ?set_mode(ModeTab,async),
- last_qlen => 0,
- last_log_ts => T1})}.
-
-%% this function is called to write to disk_log
-write(Name, Mode, T1, Bin, _CallOrCast,
- State = #{mode_tab := ModeTab,
- dl_sync := DLSync,
- dl_sync_int := DLSyncInt,
- last_qlen := LastQLen,
- last_log_ts := T0}) ->
- %% check if we need to limit the number of writes
- %% during a burst of log events
- {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
-
- %% only send a synhrounous event to the disk_log process
- %% every DLSyncInt time, to give the handler time between
- %% writes so it can keep up with incoming messages
- {Status,LastQLen1,State1} =
- if DoWrite, DLSync == 0 ->
- ?observe(Name,{_CallOrCast,1}),
- NewState = disk_log_write(Name, Bin, State),
- {ok, element(2,process_info(self(),message_queue_len)),
- NewState};
- DoWrite ->
- ?observe(Name,{_CallOrCast,1}),
- NewState = disk_log_write(Name, Bin, State),
- {ok, LastQLen, NewState};
- not DoWrite ->
- ?observe(Name,{flushed,1}),
- {dropped, LastQLen, State}
- end,
-
- %% Check if the time since the previous log event is long enough -
- %% and the queue length small enough - to assume the mailbox has
- %% been emptied, and if so, do filesync operation and reset mode to
- %% async. Note that this is the best we can do to detect an idle
- %% handler without setting a timer after each log call/cast. If the
- %% time between two consecutive log events is fast and no new
- %% event comes in after the last one, idle state won't be detected!
- Time = ?diff_time(T1,T0),
- {Mode1,BurstMsgCount1,State2} =
- if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
- (Time > ?IDLE_DETECT_TIME_USEC) ->
- {?change_mode(ModeTab,Mode,async), 0, disk_log_sync(Name,State1)};
- true ->
- {Mode, BurstMsgCount,State1}
- end,
-
- State3 =
- ?update_calls_or_casts(_CallOrCast,1,State2),
- State4 =
- ?update_max_time(Time,
- State3#{mode => Mode1,
- last_qlen := LastQLen1,
- last_log_ts => T1,
- burst_win_ts => BurstWinT,
- burst_msg_count => BurstMsgCount1,
- dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}),
- {Status,State4}.
-
-
-log_handler_info(Name, Format, Args, State) ->
- Config =
- case logger:get_handler_config(Name) of
- {ok,Conf} -> Conf;
- _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
- end,
- Meta = #{time=>erlang:system_time(microsecond)},
- Bin = logger_h_common:log_to_binary(#{level => notice,
- msg => {Format,Args},
- meta => Meta}, Config),
- _ = disk_log_write(Name, Bin, State),
- ok.
-
-
open_disk_log(Name, File, Type, MaxNoBytes, MaxNoFiles) ->
case filelib:ensure_dir(File) of
ok ->
@@ -701,7 +267,7 @@ close_disk_log(Name, _) ->
disk_log_write(Name, Bin, State) ->
case ?disk_log_blog(Name, Bin) of
ok ->
- State#{prev_log_result => ok, last_op => write};
+ {ok,State#{prev_log_result => ok, last_op => write}};
LogError ->
_ = case maps:get(prev_log_result, State) of
LogError ->
@@ -713,13 +279,13 @@ disk_log_write(Name, Bin, State) ->
LogOpts,
LogError})
end,
- State#{prev_log_result => LogError}
+ {LogError,State#{prev_log_result => LogError}}
end.
disk_log_sync(Name, State) ->
case ?disk_log_sync(Name) of
ok ->
- State#{prev_sync_result => ok, last_op => sync};
+ {ok,State#{prev_sync_result => ok, last_op => sync}};
SyncError ->
_ = case maps:get(prev_sync_result, State) of
SyncError ->
@@ -731,7 +297,7 @@ disk_log_sync(Name, State) ->
LogOpts,
SyncError})
end,
- State#{prev_sync_result => SyncError}
+ {SyncError,State#{prev_sync_result => SyncError}}
end.
error_notify_new(Info,Info, _Term) ->
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index 94c640cb92..d01851b2b1 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -18,26 +18,541 @@
%% %CopyrightEnd%
%%
-module(logger_h_common).
+-behaviour(gen_server).
-include("logger_h_common.hrl").
-include("logger_internal.hrl").
--export([log_to_binary/2,
- check_common_config/1,
- call_cast_or_drop/4,
- check_load/1,
- limit_burst/1,
- kill_if_choked/5,
- flush_log_events/0,
- flush_log_events/1,
- handler_exit/2,
- set_restart_flag/2,
- unset_restart_flag/2,
- cancel_timer/1,
- stop_or_restart/3,
- overload_levels_ok/1,
- error_notify/1,
- info_notify/1]).
+%% API
+-export([start_link/3, info/2, filesync/2, reset/2]).
+
+%% gen_server and proc_lib callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
+
+%% Library functions for handlers
+-export([error_notify/1]).
+
+%%%-----------------------------------------------------------------
+-define(CONFIG_KEYS,[sync_mode_qlen,
+ drop_mode_qlen,
+ flush_qlen,
+ burst_limit_enable,
+ burst_limit_max_count,
+ burst_limit_window_time,
+ overload_kill_enable,
+ overload_kill_qlen,
+ overload_kill_mem_size,
+ overload_kill_restart_after,
+ filesync_repeat_interval]).
+-define(READ_ONLY_KEYS,[handler_pid,mode_tab]).
+
+%%%-----------------------------------------------------------------
+%%%
+filesync(Module, Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(Module,Name),
+ filesync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+filesync(_, Name) ->
+ {error,{badarg,{filesync,[Name]}}}.
+
+info(Module, Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(Module,Name),
+ info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(_, Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+reset(Module, Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(Module,Name),
+ reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(_, Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(#{id:=Name,module:=Module}=Config) ->
+ HConfig = maps:get(config, Config, #{}),
+ HandlerConfig0 = maps:without(?CONFIG_KEYS,HConfig),
+ case Module:check_config(Name,set,undefined,HandlerConfig0) of
+ {ok,HandlerConfig} ->
+ ModifiedCommon = maps:with(?CONFIG_KEYS,HandlerConfig),
+ CommonConfig0 = maps:with(?CONFIG_KEYS,HConfig),
+ CommonConfig = maps:merge(
+ maps:merge(get_default_config(), CommonConfig0),
+ ModifiedCommon),
+ case check_config(CommonConfig) of
+ ok ->
+ State = maps:merge(get_init_state(), CommonConfig),
+ HConfig1 = maps:merge(CommonConfig,HandlerConfig),
+ Config1 = Config#{config=>HConfig1},
+ case overload_levels_ok(State) of
+ true ->
+ start(Name, Config1, State);
+ false ->
+ #{sync_mode_qlen := SMQL,
+ drop_mode_qlen := DMQL,
+ flush_qlen := FQL} = State,
+ {error,{invalid_levels,{SMQL,DMQL,FQL}}}
+ end;
+ {error,Faulty} ->
+ {error,{invalid_config,Module,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(#{id:=Name, module:=Module}) ->
+ case whereis(?name_to_reg_name(Module,Name)) of
+ undefined ->
+ ok;
+ Pid ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ %% And we don't need to do supervisor:delete_child, since
+ %% the restart type is temporary, which means that the
+ %% child specification is automatically removed from the
+ %% supervisor when the process dies.
+ _ = gen_server:call(Pid, stop),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate,
+ OldConfig=#{id:=Name,config:=OldHConfig,module:=Module},
+ NewConfig0) ->
+ NewHConfig0 = maps:get(config, NewConfig0, #{}),
+ OldHandlerConfig = maps:without(?CONFIG_KEYS++?READ_ONLY_KEYS,OldHConfig),
+ NewHandlerConfig0 = maps:without(?CONFIG_KEYS++?READ_ONLY_KEYS,NewHConfig0),
+ case Module:check_config(Name, SetOrUpdate,
+ OldHandlerConfig,NewHandlerConfig0) of
+ {ok, NewHandlerConfig} ->
+ ModifiedCommon = maps:with(?CONFIG_KEYS,NewHandlerConfig),
+ NewCommonConfig0 = maps:with(?CONFIG_KEYS,NewHConfig0),
+ CommonDefault =
+ case SetOrUpdate of
+ set ->
+ get_default_config();
+ update ->
+ maps:with(?CONFIG_KEYS,OldHConfig)
+ end,
+ NewCommonConfig = maps:merge(
+ maps:merge(CommonDefault,NewCommonConfig0),
+ ModifiedCommon),
+ case check_config(NewCommonConfig) of
+ ok ->
+ ReadOnly = maps:with(?READ_ONLY_KEYS,OldHConfig),
+ NewHConfig = maps:merge(
+ maps:merge(NewCommonConfig,NewHandlerConfig),
+ ReadOnly),
+ NewConfig = NewConfig0#{config=>NewHConfig},
+ HPid = maps:get(handler_pid,OldHConfig),
+ try gen_server:call(HPid, {change_config,OldConfig,NewConfig},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> {ok,NewConfig};
+ Error -> Error
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ {error,Faulty} ->
+ {error,{invalid_config,Module,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config = #{id := Name,
+ config := #{handler_pid := HPid,
+ mode_tab := ModeTab}}) ->
+ %% if the handler has crashed, we must drop this event
+ %% and hope the handler restarts so we can try again
+ true = is_process_alive(HPid),
+ Bin = log_to_binary(LogEvent, Config),
+ call_cast_or_drop(Name, HPid, ModeTab, Bin).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without(?READ_ONLY_KEYS,HConfig)}.
+
+%%%-----------------------------------------------------------------
+%%% Add a standard handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Handler specific config should be provided with a sub map associated
+%%% with a key named 'config', e.g:
+%%%
+%%% Config = #{config => #{sync_mode_qlen => 50}
+%%%
+%%% The standard handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ ChildSpec =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, ChildSpec) of
+ {ok,Pid,Config1} ->
+ ok = logger_handler_watcher:register_handler(Name,Pid),
+ {ok,Config1};
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Start a standard handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:handler_config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+get_init_state() ->
+ #{ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name, Config = #{config := HConfig, module := Module},
+ State = #{filesync_repeat_interval := FSyncInt,
+ ctrl_sync_int := CtrlSyncInt}]) ->
+ RegName = ?name_to_reg_name(Module,Name),
+ register(RegName, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case Module:init(Name, HConfig) of
+ {ok,HState} ->
+ try ets:new(Name, [public]) of
+ ModeTab ->
+ ?set_mode(ModeTab, async),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{id => Name,
+ module => Module,
+ mode_tab => ModeTab,
+ mode => async,
+ ctrl_sync_count => CtrlSyncInt,
+ last_qlen => 0,
+ last_log_ts => T0,
+ last_op => sync,
+ burst_win_ts => T0,
+ burst_msg_count => 0,
+ handler_state => HState}),
+ Config1 =
+ Config#{config => HConfig#{handler_pid => self(),
+ mode_tab => ModeTab}},
+ proc_lib:init_ack({ok,self(),Config1}),
+ if is_integer(FSyncInt) ->
+ gen_server:cast(self(), repeated_filesync);
+ true ->
+ ok
+ end,
+ case unset_restart_flag(Name, Module) of
+ true ->
+ %% inform about restart
+ gen_server:cast(self(), {log_handler_info,
+ "Handler ~p restarted",
+ [Name]});
+ false ->
+ %% initial start
+ ok
+ end,
+ gen_server:enter_loop(?MODULE, [], State1)
+ catch
+ _:Error ->
+ unregister(RegName),
+ error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end;
+ Error ->
+ unregister(RegName),
+ error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+%% This is the synchronous log event.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply,Result, State1};
+
+handle_call(filesync, _From, State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState}) ->
+ {Result,HandlerState1} = Module:sync_filesync(Name,HandlerState),
+ {reply, Result, State#{handler_state=>HandlerState1, last_op=>sync}};
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0}) ->
+ HConfig = maps:get(config, NewConfig, #{}),
+ State1 = maps:merge(State, HConfig),
+ case overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ gen_server:cast(self(), repeated_filesync)
+ end,
+ {reply, ok, State1};
+ false ->
+ #{sync_mode_qlen := SMQL,
+ drop_mode_qlen := DMQL,
+ flush_qlen := FQL} = State1,
+ {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, #{module:=Module,handler_state:=HandlerState}=State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp(),
+ handler_state => Module:reset_state(HandlerState)}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+%% This is the asynchronous log event.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
+ log_handler_info(Name, Format, Args, State),
+ {noreply, State};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% event, the repeat operation must be active!
+handle_cast(repeated_filesync,State = #{filesync_repeat_interval := no_repeat}) ->
+ {noreply,State};
+handle_cast(repeated_filesync,
+ State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState,
+ filesync_repeat_interval := FSyncInt,
+ last_op := LastOp}) ->
+ HandlerState1 =
+ if LastOp == sync ->
+ HandlerState;
+ true ->
+ Module:async_filesync(Name,HandlerState)
+ end,
+ {ok,TRef} = timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),repeated_filesync]),
+ {noreply,State#{handler_state=>HandlerState1,
+ rep_sync_tref => TRef,
+ last_op => sync}}.
+
+handle_info(Info, #{module := Module, handler_state := HandlerState} = State) ->
+ {noreply,State#{handler_state => Module:handle_info(Info,HandlerState)}}.
+
+terminate(Reason, State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState}) ->
+ _ = cancel_timer(maps:get(rep_sync_tref, State, undefined)),
+ _ = Module:terminate(Name, Reason, HandlerState),
+ ok = stop_or_restart(Name, Reason, State),
+ unregister(?name_to_reg_name(Module, Name)),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%% check for overload between every event (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize events have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name, module:=Module, mode:=Mode0}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% event to react quickly to large bursts of events and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = check_load(State),
+
+ if (Mode1 == drop) andalso (Mode0 =/= drop) ->
+ log_handler_info(Name, "Handler ~p switched to drop mode",
+ [Name], State);
+ (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
+ log_handler_info(Name, "Handler ~p switched to ~w mode",
+ [Name,Mode1], State);
+ true ->
+ ok
+ end,
+
+ %% kill the handler if it can't keep up with the load
+ kill_if_choked(Name, Module, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this clause is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ NewFlushed = flush_log_events(?FLUSH_MAX_N),
+
+ %% write info in log about flushed messages
+ log_handler_info(Name, "Handler ~p flushed ~w log events",
+ [Name,NewFlushed], State),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log event
+ ?observe(Name,{flushed,NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,NewFlushed,
+ State1#{mode => ?set_mode(ModeTab,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this clause is called to write to file
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{module := Module,
+ handler_state := HandlerState,
+ mode_tab := ModeTab,
+ ctrl_sync_count := CtrlSync,
+ ctrl_sync_int := CtrlSyncInt,
+ last_qlen := LastQLen,
+ last_log_ts := T0}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log events
+ {DoWrite,BurstWinT,BurstMsgCount} = limit_burst(State),
+
+ %% only log synhrounously every CtrlSyncInt time, to give the
+ %% handler time between writes so it can keep up with incoming
+ %% messages
+ {Result,LastQLen1,HandlerState1} =
+ if DoWrite, CtrlSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ {_,HS1} = Module:sync_write(Name, Bin, HandlerState),
+ {ok,element(2, process_info(self(), message_queue_len)),HS1};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ HS1 = Module:async_write(Name, Bin, HandlerState),
+ {ok,LastQLen,HS1};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped,LastQLen,HandlerState}
+ end,
+
+ %% Check if the time since the previous log event is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log events is fast and no new
+ %% event comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1,HandlerState2} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ HS2 = Module:async_filesync(Name,HandlerState),
+ {?change_mode(ModeTab, Mode, async),0,HS2};
+ true ->
+ {Mode,BurstMsgCount,HandlerState1}
+ end,
+ State1 =
+ ?update_calls_or_casts(_CallOrCast,1,State),
+ State2 =
+ ?update_max_time(Time,
+ State1#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ last_op => write,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ ctrl_sync_count => if CtrlSync==0 -> CtrlSyncInt;
+ true -> CtrlSync-1
+ end,
+ handler_state => HandlerState2}),
+ {Result,State2}.
+
+log_handler_info(Name, Format, Args, #{module:=Module,
+ handler_state:=HandlerState}) ->
+ Config =
+ case logger:get_handler_config(Name) of
+ {ok,Conf} -> Conf;
+ _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
+ end,
+ Meta = #{time=>erlang:system_time(microsecond)},
+ Bin = log_to_binary(#{level => notice,
+ msg => {Format,Args},
+ meta => Meta}, Config),
+ _ = Module:async_write(Name, Bin, HandlerState),
+ ok.
%%%-----------------------------------------------------------------
%%% Convert log data on any form to binary
@@ -94,46 +609,52 @@ string_to_binary(String) ->
throw(Error)
end.
-
%%%-----------------------------------------------------------------
%%% Check that the configuration term is valid
-check_common_config({mode_tab,_Tid}) ->
- valid;
-check_common_config({handler_pid,Pid}) when is_pid(Pid) ->
- valid;
-
-check_common_config({sync_mode_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({drop_mode_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({flush_qlen,N}) when is_integer(N) ->
- valid;
-
-check_common_config({burst_limit_enable,Bool}) when Bool == true;
- Bool == false ->
- valid;
-check_common_config({burst_limit_max_count,N}) when is_integer(N) ->
- valid;
-check_common_config({burst_limit_window_time,N}) when is_integer(N) ->
- valid;
-
-check_common_config({overload_kill_enable,Bool}) when Bool == true;
- Bool == false ->
- valid;
-check_common_config({overload_kill_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({overload_kill_mem_size,N}) when is_integer(N) ->
- valid;
-check_common_config({overload_kill_restart_after,NorA}) when is_integer(NorA);
- NorA == infinity ->
- valid;
-
-check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA);
- NorA == no_repeat ->
- valid;
-check_common_config(_) ->
- invalid.
+check_config(Config) when is_map(Config) ->
+ check_common_config(maps:to_list(Config)).
+check_common_config([{sync_mode_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{drop_mode_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{flush_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_enable,Bool}|Config]) when is_boolean(Bool) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_max_count,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_window_time,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_enable,Bool}|Config]) when is_boolean(Bool) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_mem_size,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_restart_after,NorA}|Config])
+ when is_integer(NorA); NorA == infinity ->
+ check_common_config(Config);
+check_common_config([{filesync_repeat_interval,NorA}|Config])
+ when is_integer(NorA); NorA == no_repeat ->
+ check_common_config(Config);
+check_common_config([{Key,Value}|_]) ->
+ {error,#{Key=>Value}};
+check_common_config([]) ->
+ ok.
+
+get_default_config() ->
+ #{sync_mode_qlen => ?SYNC_MODE_QLEN,
+ drop_mode_qlen => ?DROP_MODE_QLEN,
+ flush_qlen => ?FLUSH_QLEN,
+ burst_limit_enable => ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
%%%-----------------------------------------------------------------
%%% Overload Protection
@@ -243,16 +764,16 @@ limit_burst(#{burst_win_ts := BurstWinT0,
{true,BurstWinT0,BurstMsgCount+1}
end.
-kill_if_choked(Name, QLen, Mem, HandlerMod,
+kill_if_choked(Name, Module, QLen, Mem,
State = #{overload_kill_enable := KillIfOL,
overload_kill_qlen := OLKillQLen,
overload_kill_mem_size := OLKillMem}) ->
if KillIfOL andalso
((QLen > OLKillQLen) orelse (Mem > OLKillMem)) ->
- HandlerMod:log_handler_info(Name,
- "Handler ~p overloaded and stopping",
- [Name], State),
- set_restart_flag(Name, HandlerMod),
+ log_handler_info(Name,
+ "Handler ~p overloaded and stopping",
+ [Name], State),
+ set_restart_flag(Name, Module),
handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}});
true ->
ok
@@ -290,9 +811,9 @@ cancel_timer(TRef) -> timer:cancel(TRef).
stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
#{overload_kill_restart_after := RestartAfter}) ->
%% If we're terminating because of an overload situation (see
- %% logger_h_common:kill_if_choked/4), we need to remove the handler
- %% and set a restart timer. A separate process must perform this
- %% in order to avoid deadlock.
+ %% kill_if_choked/5), we need to remove the handler and set a
+ %% restart timer. A separate process must perform this in order to
+ %% avoid deadlock.
HandlerPid = self(),
ConfigResult = logger:get_handler_config(Name),
RemoveAndRestart =
@@ -334,6 +855,3 @@ overload_levels_ok(HandlerConfig) ->
error_notify(Term) ->
?internal_log(error, Term).
-
-info_notify(Term) ->
- ?internal_log(info, Term).
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 42e0f5caf4..95e7d17c0d 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -19,8 +19,6 @@
%%
-module(logger_std_h).
--behaviour(gen_server).
-
-include("logger.hrl").
-include("logger_internal.hrl").
-include("logger_h_common.hrl").
@@ -28,52 +26,30 @@
-include_lib("kernel/include/file.hrl").
%% API
--export([start_link/3, info/1, filesync/1, reset/1]).
+-export([info/1, filesync/1, reset/1]).
-%% gen_server and proc_lib callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+%% logger_h_common callbacks
+-export([init/2, check_config/4, reset_state/1,
+ async_filesync/2, sync_filesync/2,
+ async_write/3, sync_write/3,
+ handle_info/2, terminate/3]).
%% logger callbacks
-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
filter_config/1]).
-%% handler internal
--export([log_handler_info/4]).
-
%%%===================================================================
%%% API
%%%===================================================================
%%%-----------------------------------------------------------------
-%%% Start a standard handler process and link to caller.
-%%% This function is called by the kernel supervisor when this
-%%% handler process gets added
--spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
- Name :: atom(),
- Config :: logger:handler_config(),
- HandlerState :: map(),
- Pid :: pid(),
- Reason :: term().
-
-start_link(Name, Config, HandlerState) ->
- proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
-
-%%%-----------------------------------------------------------------
%%%
-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-filesync(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- filesync, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
filesync(Name) ->
- {error,{badarg,{filesync,[Name]}}}.
+ logger_h_common:filesync(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -82,15 +58,8 @@ filesync(Name) ->
Info :: term(),
Reason :: handler_busy | {badarg,term()}.
-info(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- info, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
info(Name) ->
- {error,{badarg,{info,[Name]}}}.
+ logger_h_common:info(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -98,16 +67,8 @@ info(Name) ->
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-reset(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- reset, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
reset(Name) ->
- {error,{badarg,{reset,[Name]}}}.
-
+ logger_h_common:reset(?MODULE,Name).
%%%===================================================================
%%% logger callbacks
@@ -115,30 +76,48 @@ reset(Name) ->
%%%-----------------------------------------------------------------
%%% Handler being added
-adding_handler(#{id:=Name}=Config) ->
- case check_config(adding, Config) of
- {ok, #{config:=HConfig}=Config1} ->
- %% create initial handler state by merging defaults with config
- HState = maps:merge(get_init_state(), HConfig),
- case logger_h_common:overload_levels_ok(HState) of
- true ->
- start(Name, Config1, HState);
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = HState,
- {error,{invalid_levels,{SMQL,DMQL,FQL}}}
- end;
+adding_handler(Config) ->
+ logger_h_common:adding_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Config) ->
+ logger_h_common:removing_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config) ->
+ logger_h_common:log(LogEvent, Config).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(Config) ->
+ logger_h_common:filter_config(Config).
+
+%%%===================================================================
+%%% logger_h_common callbacks
+%%%===================================================================
+init(Name, #{type := Type}) ->
+ case open_log_file(Name, Type) of
+ {ok,FileCtrlPid} ->
+ {ok,#{type=>Type,file_ctrl_pid=>FileCtrlPid}};
Error ->
Error
end.
-%%%-----------------------------------------------------------------
-%%% Updating handler config
-changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+check_config(_Name,set,undefined,NewHConfig) ->
+ check_config(maps:merge(get_default_config(),NewHConfig));
+check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
WriteOnce = maps:with([type],OldHConfig),
- ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
- NewHConfig0 = maps:get(config, NewConfig, #{}),
Default =
case SetOrUpdate of
set ->
@@ -148,48 +127,24 @@ changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
OldHConfig
end,
- %% Allow (accidentially) included read-only fields - just overwrite them
- NewHConfig = maps:merge(maps:merge(Default, NewHConfig0),ReadOnly),
+ NewHConfig = maps:merge(Default, NewHConfig0),
- %% But fail if write-once fields are changed
+ %% Fail if write-once fields are changed
case maps:with([type],NewHConfig) of
WriteOnce ->
- changing_config1(maps:get(handler_pid,OldHConfig),
- OldConfig,
- NewConfig#{config=>NewHConfig});
+ check_config(NewHConfig);
Other ->
- {error,{illegal_config_change,#{config=>WriteOnce},#{config=>Other}}}
+ {error,{illegal_config_change,?MODULE,WriteOnce,Other}}
end.
-changing_config1(HPid, OldConfig, NewConfig) ->
- case check_config(changing, NewConfig) of
- Result = {ok,NewConfig1} ->
- try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
- ?DEFAULT_CALL_TIMEOUT) of
- ok -> Result;
- HError -> HError
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
- Error ->
- Error
- end.
-
-check_config(adding, Config) ->
- %% Merge in defaults on handler level
- HConfig0 = maps:get(config, Config, #{}),
- HConfig = maps:merge(get_default_config(),HConfig0),
+check_config(#{type:=Type}=HConfig) ->
case check_h_config(maps:to_list(HConfig)) of
+ ok when is_atom(Type) ->
+ {ok,HConfig#{filesync_repeat_interval=>no_repeat}};
ok ->
- {ok,Config#{config=>HConfig}};
- Error ->
- Error
- end;
-check_config(changing, Config) ->
- HConfig = maps:get(config, Config, #{}),
- case check_h_config(maps:to_list(HConfig)) of
- ok -> {ok,Config};
- Error -> Error
+ {ok,HConfig};
+ {error,{Key,Value}} ->
+ {error,{invalid_config,?MODULE,#{Key=>Value}}}
end.
check_h_config([{type,Type} | Config]) when Type == standard_io;
@@ -200,219 +155,44 @@ check_h_config([{type,{file,File}} | Config]) when is_list(File) ->
check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File),
is_list(Modes) ->
check_h_config(Config);
-check_h_config([Other | Config]) ->
- case logger_h_common:check_common_config(Other) of
- valid ->
- check_h_config(Config);
- invalid ->
- {error,{invalid_config,?MODULE,Other}}
- end;
+check_h_config([Other | _]) ->
+ {error,Other};
check_h_config([]) ->
ok.
-
-%%%-----------------------------------------------------------------
-%%% Handler being removed
-removing_handler(#{id:=Name}) ->
- stop(Name).
-
-%%%-----------------------------------------------------------------
-%%% Log a string or report
--spec log(LogEvent, Config) -> ok when
- LogEvent :: logger:log_event(),
- Config :: logger:handler_config().
-
-log(LogEvent, Config = #{id := Name,
- config := #{handler_pid := HPid,
- mode_tab := ModeTab}}) ->
- %% if the handler has crashed, we must drop this event
- %% and hope the handler restarts so we can try again
- true = is_process_alive(HPid),
- Bin = logger_h_common:log_to_binary(LogEvent, Config),
- logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
-
-%%%-----------------------------------------------------------------
-%%% Remove internal fields from configuration
-filter_config(#{config:=HConfig}=Config) ->
- Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
-
-%%%===================================================================
-%%% gen_server callbacks
-%%%===================================================================
-
-init([Name, Config = #{config := HConfig},
- State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) ->
- RegName = ?name_to_reg_name(?MODULE,Name),
- register(RegName, self()),
- process_flag(trap_exit, true),
- process_flag(message_queue_data, off_heap),
-
- ?init_test_hooks(),
- ?start_observation(Name),
-
- case do_init(Name, Type) of
- {ok,InitState} ->
- try ets:new(Name, [public]) of
- ModeTab ->
- ?set_mode(ModeTab, async),
- State = maps:merge(State0, InitState),
- T0 = ?timestamp(),
- State1 =
- ?merge_with_stats(State#{
- mode_tab => ModeTab,
- mode => async,
- file_ctrl_sync => FileCtrlSyncInt,
- last_qlen => 0,
- last_log_ts => T0,
- last_op => sync,
- burst_win_ts => T0,
- burst_msg_count => 0}),
- Config1 =
- Config#{config => HConfig#{handler_pid => self(),
- mode_tab => ModeTab}},
- proc_lib:init_ack({ok,self(),Config1}),
- gen_server:cast(self(), repeated_filesync),
- gen_server:enter_loop(?MODULE, [], State1)
- catch
- _:Error ->
- unregister(RegName),
- logger_h_common:error_notify({init_handler,Name,Error}),
- proc_lib:init_ack(Error)
- end;
- Error ->
- unregister(RegName),
- logger_h_common:error_notify({init_handler,Name,Error}),
- proc_lib:init_ack(Error)
- end.
-
-do_init(Name, Type) ->
- case open_log_file(Name, Type) of
- {ok,FileCtrlPid} ->
- case logger_h_common:unset_restart_flag(Name, ?MODULE) of
- true ->
- %% inform about restart
- gen_server:cast(self(), {log_handler_info,
- "Handler ~p restarted",
- [Name]});
- false ->
- %% initial start
- ok
- end,
- {ok,#{id=>Name,type=>Type,file_ctrl_pid=>FileCtrlPid}};
- Error ->
- Error
- end.
-
-%% This is the synchronous log event.
-handle_call({log, Bin}, _From, State) ->
- {Result,State1} = do_log(Bin, call, State),
- %% Result == ok | dropped
- {reply,Result, State1};
-
-handle_call(filesync, _From, State = #{type := Type,
- file_ctrl_pid := FileCtrlPid}) ->
- if is_atom(Type) ->
- {reply, ok, State};
- true ->
- {reply, file_ctrl_filesync_sync(FileCtrlPid), State#{last_op=>sync}}
- end;
-
-handle_call({change_config,_OldConfig,NewConfig}, _From,
- State = #{filesync_repeat_interval := FSyncInt0}) ->
- HConfig = maps:get(config, NewConfig, #{}),
- State1 = maps:merge(State, HConfig),
- case logger_h_common:overload_levels_ok(State1) of
- true ->
- _ =
- case maps:get(filesync_repeat_interval, HConfig, undefined) of
- undefined ->
- ok;
- no_repeat ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined));
- FSyncInt0 ->
- ok;
- _FSyncInt1 ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined)),
- gen_server:cast(self(), repeated_filesync)
- end,
- {reply, ok, State1};
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = State1,
- {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
- end;
-
-handle_call(info, _From, State) ->
- {reply, State, State};
-
-handle_call(reset, _From, State) ->
- State1 = ?merge_with_stats(State),
- {reply, ok, State1#{last_qlen => 0,
- last_log_ts => ?timestamp()}};
-
-handle_call(stop, _From, State) ->
- {stop, {shutdown,stopped}, ok, State}.
-
-%% This is the asynchronous log event.
-handle_cast({log, Bin}, State) ->
- {_,State1} = do_log(Bin, cast, State),
- {noreply, State1};
-
-handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
- log_handler_info(Name, Format, Args, State),
- {noreply, State};
-
-%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
-%% clause gets called repeatedly by the handler. In order to
-%% guarantee that a filesync *always* happens after the last log
-%% event, the repeat operation must be active!
-handle_cast(repeated_filesync,
- State = #{type := Type,
- file_ctrl_pid := FileCtrlPid,
- filesync_repeat_interval := FSyncInt,
- last_op := LastOp}) ->
- State1 =
- if not is_atom(Type), is_integer(FSyncInt) ->
- %% only do filesync if something has been
- %% written since last time we checked
- if LastOp == sync ->
- ok;
- true ->
- file_ctrl_filesync_async(FileCtrlPid)
- end,
- {ok,TRef} =
- timer:apply_after(FSyncInt, gen_server,cast,
- [self(),repeated_filesync]),
- State#{rep_sync_tref => TRef, last_op => sync};
- true ->
- State
- end,
- {noreply,State1}.
-
-handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) ->
- case maps:get(file_ctrl_pid, State, undefined) of
- Pid ->
- %% file error, terminate handler
- logger_h_common:handler_exit(Name,
- {error,{write_failed,FileInfo,Why}});
- _Other ->
- %% ignore EXIT
- ok
- end,
- {noreply, State};
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
- type:=_FileInfo}) ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
- undefined)),
+get_default_config() ->
+ #{type => standard_io}.
+
+async_filesync(_Name,#{type := Type}=State) when is_atom(Type) ->
+ State;
+async_filesync(_Name,#{file_ctrl_pid := FileCtrlPid}=State) ->
+ ok = file_ctrl_filesync_async(FileCtrlPid),
+ State.
+
+sync_filesync(_Name,#{type := Type}=State) when is_atom(Type) ->
+ {ok,State};
+sync_filesync(_Name,#{file_ctrl_pid := FileCtrlPid}=State) ->
+ Result = file_ctrl_filesync_sync(FileCtrlPid),
+ {Result,State}.
+
+async_write(_Name, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ ok = file_write_async(FileCtrlPid, Bin),
+ State.
+
+sync_write(_Name, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ Result = file_write_sync(FileCtrlPid, Bin),
+ {Result,State}.
+
+reset_state(State) ->
+ State.
+
+handle_info({'EXIT',Pid,Why}, #{type := FileInfo, file_ctrl_pid := Pid}) ->
+ %% file_ctrl_pid died, file error, terminate handler
+ exit({error,{write_failed,FileInfo,Why}});
+handle_info(_, State) ->
+ State.
+
+terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) ->
case is_process_alive(FWPid) of
true ->
unlink(FWPid),
@@ -427,13 +207,7 @@ terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
end;
false ->
ok
- end,
- ok = logger_h_common:stop_or_restart(Name, Reason, State),
- unregister(?name_to_reg_name(?MODULE, Name)),
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
+ end.
%%%===================================================================
%%% Internal functions
@@ -441,203 +215,6 @@ code_change(_OldVsn, State, _Extra) ->
%%%-----------------------------------------------------------------
%%%
-get_default_config() ->
- #{type => standard_io,
- sync_mode_qlen => ?SYNC_MODE_QLEN,
- drop_mode_qlen => ?DROP_MODE_QLEN,
- flush_qlen => ?FLUSH_QLEN,
- burst_limit_enable => ?BURST_LIMIT_ENABLE,
- burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
- burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
- overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
- overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
- overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
- overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
-
-get_init_state() ->
- #{file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
-
-%%%-----------------------------------------------------------------
-%%% Add a standard handler to the logger.
-%%% This starts a dedicated handler process which should always
-%%% exist if the handler is registered with logger (and should not
-%%% exist if the handler is not registered).
-%%%
-%%% Handler specific config should be provided with a sub map associated
-%%% with a key named 'config', e.g:
-%%%
-%%% Config = #{config => #{sync_mode_qlen => 50}
-%%%
-%%% The standard handler process is linked to logger_sup, which is
-%%% part of the kernel application's supervision tree.
-start(Name, Config, HandlerState) ->
- LoggerStdH =
- #{id => Name,
- start => {?MODULE, start_link, [Name,Config,HandlerState]},
- restart => temporary,
- shutdown => 2000,
- type => worker,
- modules => [?MODULE]},
- case supervisor:start_child(logger_sup, LoggerStdH) of
- {ok,Pid,Config1} ->
- ok = logger_handler_watcher:register_handler(Name,Pid),
- {ok,Config1};
- Error ->
- Error
- end.
-
-%%%-----------------------------------------------------------------
-%%% Stop and remove the handler.
-stop(Name) ->
- case whereis(?name_to_reg_name(?MODULE,Name)) of
- undefined ->
- ok;
- Pid ->
- %% We don't want to do supervisor:terminate_child here
- %% since we need to distinguish this explicit stop from a
- %% system termination in order to avoid circular attempts
- %% at removing the handler (implying deadlocks and
- %% timeouts).
- %% And we don't need to do supervisor:delete_child, since
- %% the restart type is temporary, which means that the
- %% child specification is automatically removed from the
- %% supervisor when the process dies.
- _ = gen_server:call(Pid, stop),
- ok
- end.
-
-%%%-----------------------------------------------------------------
-%%% Logging and overload control.
--define(update_file_ctrl_sync(C, Interval),
- if C == 0 -> Interval;
- true -> C-1 end).
-
-%% check for overload between every event (and set Mode to async,
-%% sync or drop accordingly), but never flush the whole mailbox
-%% before LogWindowSize events have been handled
-do_log(Bin, CallOrCast, State = #{id:=Name, mode:=Mode0}) ->
- T1 = ?timestamp(),
-
- %% check if the handler is getting overloaded, or if it's
- %% recovering from overload (the check must be done for each
- %% event to react quickly to large bursts of events and
- %% to ensure that the handler can never end up in drop mode
- %% with an empty mailbox, which would stop operation)
- {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
-
- if (Mode1 == drop) andalso (Mode0 =/= drop) ->
- log_handler_info(Name, "Handler ~p switched to drop mode",
- [Name], State);
- (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
- log_handler_info(Name, "Handler ~p switched to ~w mode",
- [Name,Mode1], State);
- true ->
- ok
- end,
-
- %% kill the handler if it can't keep up with the load
- logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State),
-
- if Mode1 == flush ->
- flush(Name, QLen, T1, State1);
- true ->
- write(Name, Mode1, T1, Bin, CallOrCast, State1)
- end.
-
-%% this clause is called by do_log/3 after an overload check
-%% has been performed, where QLen > FlushQLen
-flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
- %% flush messages in the mailbox (a limited number in
- %% order to not cause long delays)
- NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N),
-
- %% write info in log about flushed messages
- log_handler_info(Name, "Handler ~p flushed ~w log events",
- [Name,NewFlushed], State),
-
- %% because of the receive loop when flushing messages, the
- %% handler will be scheduled out often and the mailbox could
- %% grow very large, so we'd better check the queue again here
- {_,_QLen1} = process_info(self(), message_queue_len),
- ?observe(Name,{max_qlen,_QLen1}),
-
- %% Add 1 for the current log event
- ?observe(Name,{flushed,NewFlushed+1}),
-
- State1 = ?update_max_time(?diff_time(T1,_T0),State),
- {dropped,?update_other(flushed,FLUSHED,NewFlushed,
- State1#{mode => ?set_mode(ModeTab,async),
- last_qlen => 0,
- last_log_ts => T1})}.
-
-%% this clause is called to write to file
-write(_Name, Mode, T1, Bin, _CallOrCast,
- State = #{mode_tab := ModeTab,
- file_ctrl_pid := FileCtrlPid,
- file_ctrl_sync := FileCtrlSync,
- last_qlen := LastQLen,
- last_log_ts := T0,
- file_ctrl_sync_int := FileCtrlSyncInt}) ->
- %% check if we need to limit the number of writes
- %% during a burst of log events
- {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
-
- %% only send a synhrounous event to the file controller process
- %% every FileCtrlSyncInt time, to give the handler time between
- %% file writes so it can keep up with incoming messages
- {Result,LastQLen1} =
- if DoWrite, FileCtrlSync == 0 ->
- ?observe(_Name,{_CallOrCast,1}),
- file_write_sync(FileCtrlPid, Bin, false),
- {ok,element(2, process_info(self(), message_queue_len))};
- DoWrite ->
- ?observe(_Name,{_CallOrCast,1}),
- file_write_async(FileCtrlPid, Bin),
- {ok,LastQLen};
- not DoWrite ->
- ?observe(_Name,{flushed,1}),
- {dropped,LastQLen}
- end,
-
- %% Check if the time since the previous log event is long enough -
- %% and the queue length small enough - to assume the mailbox has
- %% been emptied, and if so, do filesync operation and reset mode to
- %% async. Note that this is the best we can do to detect an idle
- %% handler without setting a timer after each log call/cast. If the
- %% time between two consecutive log events is fast and no new
- %% event comes in after the last one, idle state won't be detected!
- Time = ?diff_time(T1,T0),
- {Mode1,BurstMsgCount1} =
- if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
- (Time > ?IDLE_DETECT_TIME_USEC) ->
- %% do filesync if necessary
- case maps:get(type, State) of
- Std when is_atom(Std) ->
- ok;
- _File ->
- file_ctrl_filesync_async(FileCtrlPid)
- end,
- {?change_mode(ModeTab, Mode, async),0};
- true ->
- {Mode,BurstMsgCount}
- end,
- State1 =
- ?update_calls_or_casts(_CallOrCast,1,State),
- State2 =
- ?update_max_time(Time,
- State1#{mode => Mode1,
- last_qlen := LastQLen1,
- last_log_ts => T1,
- last_op => write,
- burst_win_ts => BurstWinT,
- burst_msg_count => BurstMsgCount1,
- file_ctrl_sync =>
- ?update_file_ctrl_sync(FileCtrlSync,
- FileCtrlSyncInt)}),
- {Result,State2}.
-
open_log_file(HandlerName, FileInfo) ->
case file_ctrl_start(HandlerName, FileInfo) of
OK = {ok,_FileCtrlPid} -> OK;
@@ -669,19 +246,6 @@ close_log_file(Fd) ->
_ = file:close(Fd).
-log_handler_info(Name, Format, Args, #{file_ctrl_pid := FileCtrlPid}) ->
- Config =
- case logger:get_handler_config(Name) of
- {ok,Conf} -> Conf;
- _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
- end,
- Meta = #{time=>erlang:system_time(microsecond)},
- Bin = logger_h_common:log_to_binary(#{level => notice,
- msg => {Format,Args},
- meta => Meta}, Config),
- _ = file_write_async(FileCtrlPid, Bin),
- ok.
-
%%%-----------------------------------------------------------------
%%% File control process
@@ -708,13 +272,8 @@ file_write_async(Pid, Bin) ->
Pid ! {log,Bin},
ok.
-file_write_sync(Pid, Bin, FileSync) ->
- case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of
- {error,Reason} ->
- {error,{write_failed,Bin,Reason}};
- Result ->
- Result
- end.
+file_write_sync(Pid, Bin) ->
+ file_ctrl_call(Pid, {log,self(),Bin}).
file_ctrl_filesync_async(Pid) ->
Pid ! filesync,
@@ -766,33 +325,22 @@ file_ctrl_loop(Fd, Type, DevName, Synced,
Result, PrevSyncResult, HandlerName);
%% synchronous event
- {{log,From,Bin,FileSync},MRef} ->
- if Type == file ->
- %% check that file hasn't been deleted
- CheckFile =
- fun() -> {ok,_} = file:read_file_info(DevName) end,
- spawn_link(CheckFile),
- WResult = write_to_dev(Fd, Bin, DevName,
- PrevWriteResult, HandlerName),
- {Synced1,SResult} =
- if not FileSync ->
- {false,PrevSyncResult};
- true ->
- case sync_dev(Fd, DevName,
- PrevSyncResult, HandlerName) of
- ok -> {true,ok};
- Error -> {false,Error}
- end
- end,
- From ! {MRef,ok},
- file_ctrl_loop(Fd, Type, DevName, Synced1,
- WResult, SResult, HandlerName);
- true ->
- _ = io:put_chars(Fd, Bin),
- From ! {MRef,ok},
- file_ctrl_loop(Fd, Type, DevName, false,
- ok, PrevSyncResult, HandlerName)
- end;
+ {{log,From,Bin},MRef} ->
+ WResult =
+ if Type == file ->
+ %% check that file hasn't been deleted
+ CheckFile =
+ fun() -> {ok,_} = file:read_file_info(DevName) end,
+ spawn_link(CheckFile),
+ write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName);
+ true ->
+ _ = io:put_chars(Fd, Bin),
+ ok
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, false,
+ WResult, PrevSyncResult, HandlerName);
filesync when not Synced ->
Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName),
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index 905c2c52c5..7bac4c2cb2 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -332,16 +332,18 @@ errors(Config) ->
%%! Check how bad log_opts are handled!
{error,{illegal_config_change,
- #{config:=#{type:=wrap}},
- #{config:=#{type:=halt}}}} =
+ logger_disk_log_h,
+ #{type:=wrap},
+ #{type:=halt}}} =
logger:update_handler_config(Name1,
config,
#{type=>halt,
file=>LogFile1}),
{error,{illegal_config_change,
- #{config:=#{file:=LogFile1}},
- #{config:=#{file:="newfilename"}}}} =
+ logger_disk_log_h,
+ #{file:=LogFile1},
+ #{file:="newfilename"}}} =
logger:update_handler_config(Name1,
config,
#{file=>"newfilename"}),
@@ -411,7 +413,7 @@ formatter_fail(cleanup,_Config) ->
ok.
config_fail(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,#{bad:=bad}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{bad => bad},
filter_default=>log,
@@ -433,12 +435,9 @@ config_fail(_Config) ->
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
%% can't change the disk log options for a log already in use
- {error,{illegal_config_change,_,_}} =
+ {error,{illegal_config_change,logger_disk_log_h,_,_}} =
logger:update_handler_config(?MODULE,config,
#{max_no_files=>2}),
- %% can't change name of an existing handler
- {error,{illegal_config_change,_,_}} =
- logger:update_handler_config(?MODULE,id,bad),
%% incorrect values of OP params
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
{error,{invalid_levels,_}} =
@@ -446,7 +445,7 @@ config_fail(_Config) ->
HConfig#{sync_mode_qlen=>100,
flush_qlen=>99}),
%% invalid name of config parameter
- {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
+ {error,{invalid_config,logger_disk_log_h,#{filesync_rep_int:=2000}}} =
logger:update_handler_config(?MODULE, config,
HConfig#{filesync_rep_int => 2000}),
ok.
@@ -487,10 +486,11 @@ reconfig(Config) ->
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
- log_opts := #{type := ?DISK_LOG_TYPE,
- max_no_files := ?DISK_LOG_MAX_NO_FILES,
- max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
- file := DiskLogFile}} =
+ handler_state :=
+ #{log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := DiskLogFile}}} =
logger_disk_log_h:info(?MODULE),
{ok,#{config :=
#{sync_mode_qlen := ?SYNC_MODE_QLEN,
@@ -572,10 +572,11 @@ reconfig(Config) ->
max_no_files => 1,
max_no_bytes => 1024,
file => File}}),
- #{log_opts := #{type := halt,
- max_no_files := 1,
- max_no_bytes := 1024,
- file := File}} =
+ #{handler_state :=
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}}} =
logger_disk_log_h:info(?MODULE),
{ok,#{config :=
#{type := halt,
@@ -596,13 +597,13 @@ reconfig(Config) ->
%% You are not allowed to actively set the write once fields
%% (type, max_no_files, max_no_bytes, file) in runtime.
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,_,_,_}} =
logger:set_handler_config(?MODULE,config,#{type=>wrap}),
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,_,_,_}} =
logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,_,_,_}} =
logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,_,_,_}} =
logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
{ok,C7} = logger:get_handler_config(?MODULE),
ct:log("C7: ~p",[C7]),
@@ -639,7 +640,7 @@ sync(Config) ->
%% wait for automatic disk_log_sync
check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
- %% check that if there's no repeated disk_log_sync active,
+ %% check that if there's no repeated filesync active,
%% a disk_log_sync is still performed when handler goes idle
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
@@ -667,12 +668,12 @@ sync(Config) ->
try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000),
- %% switch repeated disk_log_sync on and verify that the looping works
+ %% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
- OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync},
- %% receive 1 initial repeated_disk_log_sync, then 1 per sec
- start_tracer([{logger_disk_log_h,handle_cast,2}],
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 initial repeated_filesync, then 1 per sec
+ start_tracer([{logger_h_common,handle_cast,2}],
[OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
@@ -851,7 +852,8 @@ write_failure(Config) ->
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]),
+ ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts,
+ maps:get(handler_state,HState))]),
ok = log_on_remote_node(Node, "Logged1"),
rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
@@ -901,7 +903,7 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- LogOpts = maps:get(log_opts, HState),
+ LogOpts = maps:get(log_opts, maps:get(handler_state,HState)),
SyncInt = 500,
ok = rpc:call(Node, logger, update_handler_config,
@@ -1606,7 +1608,7 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]},Caller},
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]},Caller},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller);
tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index b6a09f4980..affa04d410 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -140,7 +140,7 @@ all() ->
].
add_remove_instance_tty(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{type:=tty}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{type => tty},
filter_default=>log,
@@ -234,7 +234,7 @@ errors(Config) ->
{error,
{handler_not_added,
- {invalid_config,logger_std_h,{type,faulty_type}}}} =
+ {invalid_config,logger_std_h,#{type:=faulty_type}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{type => faulty_type}}),
@@ -308,13 +308,13 @@ formatter_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
config_fail(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{bad:=bad}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{bad => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
{error,{handler_not_added,{invalid_config,logger_std_h,
- {restart_type,bad}}}} =
+ #{restart_type:=bad}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{restart_type => bad},
filter_default=>log,
@@ -334,7 +334,7 @@ config_fail(_Config) ->
ok = logger:add_handler(?MODULE,logger_std_h,
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{illegal_config_change,#{config:=#{type:=_}},#{config:=#{type:=_}}}} =
+ {error,{illegal_config_change,logger_std_h,#{type:=_},#{type:=_}}} =
logger:set_handler_config(?MODULE,config,
#{type=>{file,"file"}}),
@@ -342,7 +342,7 @@ config_fail(_Config) ->
logger:set_handler_config(?MODULE,config,
#{sync_mode_qlen=>100,
flush_qlen=>99}),
- {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
+ {error,{invalid_config,logger_std_h,#{filesync_rep_int:=2000}}} =
logger:set_handler_config(?MODULE, config,
#{filesync_rep_int => 2000}),
@@ -468,8 +468,8 @@ reconfig(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
#{id := ?MODULE,
- type := standard_io,
- file_ctrl_pid := FileCtrlPid,
+ handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
sync_mode_qlen := ?SYNC_MODE_QLEN,
drop_mode_qlen := ?DROP_MODE_QLEN,
flush_qlen := ?FLUSH_QLEN,
@@ -480,7 +480,7 @@ reconfig(Config) ->
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = DefaultInfo =
+ filesync_repeat_interval := no_repeat} = DefaultInfo =
logger_std_h:info(?MODULE),
{ok,
@@ -496,7 +496,7 @@ reconfig(Config) ->
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ filesync_repeat_interval := no_repeat} =
DefaultHConf}}
= logger:get_handler_config(?MODULE),
@@ -511,10 +511,10 @@ reconfig(Config) ->
overload_kill_qlen => 100000,
overload_kill_mem_size => 10000000,
overload_kill_restart_after => infinity,
- filesync_repeat_interval => no_repeat}),
+ filesync_repeat_interval => 5000}),
#{id := ?MODULE,
- type := standard_io,
- file_ctrl_pid := FileCtrlPid,
+ handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
sync_mode_qlen := 1,
drop_mode_qlen := 2,
flush_qlen := 3,
@@ -584,7 +584,7 @@ reconfig(Config) ->
%% You are not allowed to actively set 'type' in runtime, since
%% this is a write once field.
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,logger_std_h,_,_}} =
logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
{ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
ct:log("C6: ~p",[C6]),
@@ -620,7 +620,7 @@ file_opts(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{type := OkType} = logger_std_h:info(?MODULE),
+ #{handler_state := #{type := OkType}} = logger_std_h:info(?MODULE),
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
@@ -697,9 +697,9 @@ sync(Config) ->
%% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
- OneSync = {logger_std_h,handle_cast,repeated_filesync},
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
%% receive 1 initial repeated_filesync, then 1 per sec
- start_tracer([{logger_std_h,handle_cast,2}],
+ start_tracer([{logger_h_common,handle_cast,2}],
[OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
ok = logger:update_handler_config(?MODULE, config,
@@ -1533,7 +1533,7 @@ start_op_trace() ->
{ok,_} = dbg:p(self(), [c]),
MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
- {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1),
+ {ok,_} = dbg:tpl(logger_h_common, check_load, 1, MS1),
{ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
@@ -1607,7 +1607,9 @@ analyse(Msgs) ->
start_tracer(Trace,Expected) ->
Pid = self(),
- FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)),
+ FileCtrlPid = maps:get(file_ctrl_pid,
+ maps:get(handler_state,
+ logger_std_h:info(?MODULE))),
dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
dbg:p(whereis(h_proc_name()),[c]),
dbg:p(FileCtrlPid,[c]),
@@ -1628,7 +1630,7 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}},
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},