diff options
Diffstat (limited to 'lib/sasl/test')
-rw-r--r-- | lib/sasl/test/release_handler_SUITE.erl | 46 |
1 files changed, 29 insertions, 17 deletions
diff --git a/lib/sasl/test/release_handler_SUITE.erl b/lib/sasl/test/release_handler_SUITE.erl index 4935782cf2..edd2efdf05 100644 --- a/lib/sasl/test/release_handler_SUITE.erl +++ b/lib/sasl/test/release_handler_SUITE.erl @@ -22,7 +22,8 @@ -include_lib("common_test/include/ct.hrl"). -include("test_lib.hrl"). --compile(export_all). +-compile([export_all, nowarn_export_all]). +-export([scheduler_wall_time/0, garbage_collect/0]). %% rpc'ed % Default timetrap timeout (set in init_per_testcase). %-define(default_timeout, ?t:minutes(40)). @@ -1085,8 +1086,9 @@ otp_9395_update_many_mods(Conf) when is_list(Conf) -> Rel2Dir = filename:dirname(Rel2), %% Start a slave node + PA = filename:dirname(code:which(?MODULE)), {ok, Node} = t_start_node(otp_9395_update_many_mods, Rel1, - filename:join(Rel1Dir,"sys.config")), + filename:join(Rel1Dir,"sys.config"), "-pa " ++ PA), %% Start a lot of processes on the new node, all with refs to each %% module that will be updated @@ -1109,8 +1111,8 @@ otp_9395_update_many_mods(Conf) when is_list(Conf) -> [RelVsn2, filename:join(Rel2Dir, "sys.config")]), %% First, install release directly and check how much time it takes - rpc:call(Node,erlang,garbage_collect,[]), - rpc:call(Node,erlang,system_flag,[scheduler_wall_time,true]), + rpc:call(Node,?MODULE,garbage_collect,[]), + SWTFlag0 = spawn_link(Node, ?MODULE, scheduler_wall_time, []), {TInst0,{ok, _, []}} = timer:tc(rpc,call,[Node, release_handler, install_release, [RelVsn2]]), SWT0 = rpc:call(Node,erlang,statistics,[scheduler_wall_time]), @@ -1135,9 +1137,9 @@ otp_9395_update_many_mods(Conf) when is_list(Conf) -> %% Finally install release after check and purge, and check that %% this install was faster than the first. - rpc:call(Node,erlang,system_flag,[scheduler_wall_time,false]), - rpc:call(Node,erlang,garbage_collect,[]), - rpc:call(Node,erlang,system_flag,[scheduler_wall_time,true]), + SWTFlag0 ! die, + rpc:call(Node,?MODULE,garbage_collect,[]), + _SWTFlag1 = spawn_link(Node, ?MODULE, scheduler_wall_time, []), {TInst2,{ok, _RelVsn1, []}} = timer:tc(rpc,call,[Node, release_handler, install_release, [RelVsn2]]), SWT2 = rpc:call(Node,erlang,statistics,[scheduler_wall_time]), @@ -1161,6 +1163,15 @@ otp_9395_update_many_mods(Conf) when is_list(Conf) -> ok. +scheduler_wall_time() -> + erlang:system_flag(scheduler_wall_time,true), + receive _Msg -> normal end. + +garbage_collect() -> + Pids = processes(), + [erlang:garbage_collect(Pid) || Pid <- Pids]. + + otp_9395_update_many_mods(cleanup,_Conf) -> stop_node(node_name(otp_9395_update_many_mods)). @@ -1190,8 +1201,9 @@ otp_9395_rm_many_mods(Conf) when is_list(Conf) -> Rel2Dir = filename:dirname(Rel2), %% Start a slave node + PA = filename:dirname(code:which(?MODULE)), {ok, Node} = t_start_node(otp_9395_rm_many_mods, Rel1, - filename:join(Rel1Dir,"sys.config")), + filename:join(Rel1Dir,"sys.config"), "-pa " ++ PA), %% Start a lot of processes on the new node, all with refs to each %% module that will be updated @@ -1214,8 +1226,8 @@ otp_9395_rm_many_mods(Conf) when is_list(Conf) -> [RelVsn2, filename:join(Rel2Dir, "sys.config")]), %% First, install release directly and check how much time it takes - rpc:call(Node,erlang,garbage_collect,[]), - rpc:call(Node,erlang,system_flag,[scheduler_wall_time,true]), + rpc:call(Node,?MODULE,garbage_collect,[]), + SWTFlag0 = spawn_link(Node, ?MODULE, scheduler_wall_time, []), {TInst0,{ok, _, []}} = timer:tc(rpc,call,[Node, release_handler, install_release, [RelVsn2]]), SWT0 = rpc:call(Node,erlang,statistics,[scheduler_wall_time]), @@ -1240,9 +1252,9 @@ otp_9395_rm_many_mods(Conf) when is_list(Conf) -> %% Finally install release after check and purge, and check that %% this install was faster than the first. - rpc:call(Node,erlang,system_flag,[scheduler_wall_time,false]), - rpc:call(Node,erlang,garbage_collect,[]), - rpc:call(Node,erlang,system_flag,[scheduler_wall_time,true]), + SWTFlag0 ! die, + rpc:call(Node,?MODULE,garbage_collect,[]), + _SWTFlag1 = spawn_link(Node, ?MODULE, scheduler_wall_time, []), {TInst2,{ok, _RelVsn1, []}} = timer:tc(rpc,call,[Node, release_handler, install_release, [RelVsn2]]), SWT2 = rpc:call(Node,erlang,statistics,[scheduler_wall_time]), @@ -2568,15 +2580,15 @@ check_gg_info(Node,OtherAlive,OtherDead,Synced,N) -> GGI = rpc:call(Node, global_group, info, []), GI = rpc:call(Node, global, info,[]), try do_check_gg_info(OtherAlive,OtherDead,Synced,GGI,GI) - catch _:E when N==0 -> + catch _:E:Stacktrace when N==0 -> ?t:format("~nERROR: check_gg_info failed for ~p:~n~p~n" "when GGI was: ~p~nand GI was: ~p~n", - [Node,{E,erlang:get_stacktrace()},GGI,GI]), + [Node,{E,Stacktrace},GGI,GI]), ?t:fail("check_gg_info failed"); - _:E -> + _:E:Stacktrace -> ?t:format("~nWARNING: check_gg_info failed for ~p:~n~p~n" "when GGI was: ~p~nand GI was: ~p~n", - [Node,{E,erlang:get_stacktrace()},GGI,GI]), + [Node,{E,Stacktrace},GGI,GI]), timer:sleep(1000), check_gg_info(Node,OtherAlive,OtherDead,Synced,N-1) end. |