diff options
author | James Wheare <[email protected]> | 2012-09-25 15:23:55 +0100 |
---|---|---|
committer | Henrik Nord <[email protected]> | 2012-10-01 11:47:56 +0200 |
commit | 79ce4791a326b15bec80e1a3870136548419a212 (patch) | |
tree | 2296a5ec233e57ae09aac3291ec49555fb3a000b /lib/mnesia | |
parent | af9a8a011fd06ae54187bae73192c52495090933 (diff) | |
download | otp-79ce4791a326b15bec80e1a3870136548419a212.tar.gz otp-79ce4791a326b15bec80e1a3870136548419a212.tar.bz2 otp-79ce4791a326b15bec80e1a3870136548419a212.zip |
mnesia: Use chained send_after instead of send_interval
timer:send_interval behaves badly when resuming from sleep on some
platforms. For example, if I sleep for 10 minutes, and have a
send_interval running once per minute, when I resume, 10 messages
will be sent immediately, eliminating the benefit of only running
the work periodically. This is admittedly a separate bug with
send_interval, but the workaround is straightforward, and also
protects from messages piling up in the queue when the work takes
longer than the interval.
This patch fixes piled up error reports on resume from sleep:
** WARNING ** Mnesia is overloaded: {dump_log, write_threshold}
You'll still be warned if mnesia is overloaded, just not repeatedly.
Additionally, erlang:send_after is more efficient than using the
timer module equivalent [1]
[1] http://www.erlang.org/doc/efficiency_guide/commoncaveats.html#id57251
Diffstat (limited to 'lib/mnesia')
-rw-r--r-- | lib/mnesia/src/mnesia_controller.erl | 15 | ||||
-rw-r--r-- | lib/mnesia/src/mnesia_recover.erl | 66 | ||||
-rw-r--r-- | lib/mnesia/src/mnesia_tm.erl | 3 |
3 files changed, 51 insertions, 33 deletions
diff --git a/lib/mnesia/src/mnesia_controller.erl b/lib/mnesia/src/mnesia_controller.erl index d488a33d67..ec67d9ec12 100644 --- a/lib/mnesia/src/mnesia_controller.erl +++ b/lib/mnesia/src/mnesia_controller.erl @@ -593,6 +593,12 @@ multicall(Nodes, Msg) -> {PatchedGood, Bad}. %% Make the replies look like rpc:multicalls.. %% rpc:multicall(Nodes, ?MODULE, call, [Msg]). +next_async_dump_log() -> + Interval = mnesia_monitor:get_env(dump_log_time_threshold), + Msg = {next_async_dump_log, time_threshold}, + Ref = erlang:send_after(Interval, self(), Msg), + Ref. + %%%---------------------------------------------------------------------- %%% Callback functions from gen_server %%%---------------------------------------------------------------------- @@ -614,9 +620,7 @@ init([Parent]) -> mnesia_lib:unset(original_nodes), mnesia_recover:connect_nodes(Diff), - Interval = mnesia_monitor:get_env(dump_log_time_threshold), - Msg = {async_dump_log, time_threshold}, - {ok, Ref} = timer:send_interval(Interval, Msg), + Ref = next_async_dump_log(), mnesia_dumper:start_regulator(), Empty = gb_trees:empty(), @@ -1121,6 +1125,11 @@ handle_sync_tabs([], _From) -> %% {stop, Reason, State} (terminate/2 is called) %%---------------------------------------------------------------------- +handle_info({next_async_dump_log, InitBy}, State) -> + async_dump_log(InitBy), + Ref = next_async_dump_log(), + noreply(State#state{dump_log_timer_ref=Ref}); + handle_info({async_dump_log, InitBy}, State) -> Worker = #dump_log{initiated_by = InitBy}, State2 = add_worker(Worker, State), diff --git a/lib/mnesia/src/mnesia_recover.erl b/lib/mnesia/src/mnesia_recover.erl index 4750291a10..b64f428f15 100644 --- a/lib/mnesia/src/mnesia_recover.erl +++ b/lib/mnesia/src/mnesia_recover.erl @@ -45,7 +45,8 @@ note_log_decision/2, outcome/2, start/0, - start_garb/0, + next_garb/0, + next_check_overload/0, still_pending/1, sync_trans_tid_serial/1, sync/0, @@ -91,10 +92,38 @@ start() -> init() -> call(init). -start_garb() -> +next_garb() -> Pid = whereis(mnesia_recover), - {ok, _} = timer:send_interval(timer:minutes(2), Pid, garb_decisions), - {ok, _} = timer:send_interval(timer:seconds(10), Pid, check_overload). + erlang:send_after(timer:minutes(2), Pid, garb_decisions). + +next_check_overload() -> + Pid = whereis(mnesia_recover), + erlang:send_after(timer:seconds(10), Pid, check_overload). + + +do_check_overload(S) -> + %% Time to check if mnesia_tm is overloaded + case whereis(mnesia_tm) of + Pid when is_pid(Pid) -> + Threshold = 100, + Prev = S#state.tm_queue_len, + {message_queue_len, Len} = + process_info(Pid, message_queue_len), + if + Len > Threshold, Prev > Threshold -> + What = {mnesia_tm, message_queue_len, [Prev, Len]}, + mnesia_lib:report_system_event({mnesia_overload, What}), + mnesia_lib:overload_set(mnesia_tm, true), + S#state{tm_queue_len = 0}; + Len > Threshold -> + S#state{tm_queue_len = Len}; + true -> + mnesia_lib:overload_set(mnesia_tm, false), + S#state{tm_queue_len = 0} + end; + undefined -> + S + end. allow_garb() -> cast(allow_garb). @@ -853,34 +882,13 @@ handle_info({connect_nodes, Ns, From}, State) -> handle_call({connect_nodes,Ns},From,State); handle_info(check_overload, S) -> - %% Time to check if mnesia_tm is overloaded - case whereis(mnesia_tm) of - Pid when is_pid(Pid) -> - - Threshold = 100, - Prev = S#state.tm_queue_len, - {message_queue_len, Len} = - process_info(Pid, message_queue_len), - if - Len > Threshold, Prev > Threshold -> - What = {mnesia_tm, message_queue_len, [Prev, Len]}, - mnesia_lib:report_system_event({mnesia_overload, What}), - mnesia_lib:overload_set(mnesia_tm, true), - {noreply, S#state{tm_queue_len = 0}}; - - Len > Threshold -> - {noreply, S#state{tm_queue_len = Len}}; - - true -> - mnesia_lib:overload_set(mnesia_tm, false), - {noreply, S#state{tm_queue_len = 0}} - end; - undefined -> - {noreply, S} - end; + State2 = do_check_overload(S), + next_check_overload(), + {noreply, State2}; handle_info(garb_decisions, State) -> do_garb_decisions(), + next_garb(), {noreply, State}; handle_info({force_decision, Tid}, State) -> diff --git a/lib/mnesia/src/mnesia_tm.erl b/lib/mnesia/src/mnesia_tm.erl index 0af7f55c06..b5b14ac05b 100644 --- a/lib/mnesia/src/mnesia_tm.erl +++ b/lib/mnesia/src/mnesia_tm.erl @@ -103,7 +103,8 @@ init(Parent) -> end, mnesia_schema:purge_tmp_files(), - mnesia_recover:start_garb(), + mnesia_recover:next_garb(), + mnesia_recover:next_check_overload(), ?eval_debug_fun({?MODULE, init}, [{nodes, AllOthers}]), |