aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel/test/heart_SUITE.erl
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel/test/heart_SUITE.erl')
-rw-r--r--lib/kernel/test/heart_SUITE.erl134
1 files changed, 59 insertions, 75 deletions
diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl
index 39cd29cea0..548b27db97 100644
--- a/lib/kernel/test/heart_SUITE.erl
+++ b/lib/kernel/test/heart_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(heart_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2, start/1, restart/1,
@@ -37,31 +37,35 @@
-define(DEFAULT_TIMEOUT_SECS, 120).
+-define(UNIQ_NODE_NAME,
+ list_to_atom(?MODULE_STRING ++ "__" ++
+ atom_to_list(?FUNCTION_NAME) ++ "_" ++
+ integer_to_list(erlang:unique_integer([positive])))).
+
init_per_testcase(_Func, Config) ->
- Dog=test_server:timetrap(test_server:seconds(?DEFAULT_TIMEOUT_SECS)),
- [{watchdog, Dog}|Config].
+ Config.
-end_per_testcase(_Func, Config) ->
+end_per_testcase(_Func, _Config) ->
Nodes = nodes(),
lists:foreach(fun(X) ->
NNam = list_to_atom(hd(string:tokens(atom_to_list(X),"@"))),
case NNam of
heart_test ->
- ?t:format(1, "WARNING: Killed ~p~n", [X]),
+ ct:pal(?HI_VERBOSITY, "WARNING: Killed ~p~n", [X]),
rpc:cast(X, erlang, halt, []);
_ ->
ok
end
- end, Nodes),
- Dog=?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
+ end, Nodes).
%%-----------------------------------------------------------------
%% Test suite for heart.
%% Should be started in a CC view with:
%% erl -sname master -rsh ctrsh
%%-----------------------------------------------------------------
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,2}}].
all() -> [
start, restart, reboot,
@@ -97,7 +101,7 @@ end_per_suite(Config) when is_list(Config) ->
start_check(Type, Name) ->
start_check(Type, Name, []).
start_check(Type, Name, Envs) ->
- Args = case ?t:os_type() of
+ Args = case test_server:os_type() of
{win32,_} ->
"+t50000 -heart " ++ env_encode([{"HEART_COMMAND", no_reboot}|Envs]);
_ ->
@@ -107,32 +111,30 @@ start_check(Type, Name, Envs) ->
loose ->
loose_node:start(Name, Args, ?DEFAULT_TIMEOUT_SECS);
_ ->
- ?t:start_node(Name, Type, [{args, Args}])
+ test_server:start_node(Name, Type, [{args, Args}])
end,
erlang:monitor_node(Node, true),
case rpc:call(Node, erlang, whereis, [heart]) of
Pid when is_pid(Pid) ->
ok;
_ ->
- test_server:fail(heart_not_started)
+ ct:fail(heart_not_started)
end,
{ok, Node}.
-start(doc) -> [];
-start(suite) -> {req, [{time, 10}]};
start(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
rpc:call(Node, init, reboot, []),
receive
{nodedown, Node} -> ok
- after 2000 -> test_server:fail(node_not_closed)
+ after 2000 -> ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
case net_adm:ping(Node) of
pang ->
ok;
_ ->
- test_server:fail(node_rebooted)
+ ct:fail(node_rebooted)
end,
test_server:stop_node(Node).
@@ -145,34 +147,24 @@ start(Config) when is_list(Config) ->
%% restart
%% Purpose:
%% Check that a node is up and running after a init:restart/0
-restart(doc) -> [];
-restart(suite) ->
- case ?t:os_type() of
- {Fam, _} when Fam == unix; Fam == win32 ->
- {req, [{time,10}]};
- _ ->
- {skip, "Only run on unix and win32"}
- end;
restart(Config) when is_list(Config) ->
- {ok, Node} = start_check(loose, heart_test),
+ {ok, Node} = start_check(loose, ?UNIQ_NODE_NAME),
rpc:call(Node, init, restart, []),
receive
{nodedown, Node} ->
ok
- after 2000 ->
- test_server:fail(node_not_closed)
+ after 5000 ->
+ ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
node_check_up_down(Node, 2000),
loose_node:stop(Node).
%% reboot
%% Purpose:
%% Check that a node is up and running after a init:reboot/0
-reboot(doc) -> [];
-reboot(suite) -> {req, [{time, 10}]};
reboot(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
[atom_to_list(lib:progname()) ++
@@ -182,9 +174,9 @@ reboot(Config) when is_list(Config) ->
{nodedown, Node} ->
ok
after 2000 ->
- test_server:fail(node_not_closed)
+ ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
node_check_up_down(Node, 2000),
ok.
@@ -196,7 +188,6 @@ reboot(Config) when is_list(Config) ->
%% May currently dump core in beam debug build due to lock-order violation
%% This should be removed when a non-lockad information retriever is implemented
%% for crash dumps
-node_start_immediately_after_crash(suite) -> {req, [{time, 10}]};
node_start_immediately_after_crash(Config) when is_list(Config) ->
Config2 = ignore_cores:setup(?MODULE, node_start_immediately_after_crash, Config, true),
try
@@ -207,7 +198,8 @@ node_start_immediately_after_crash(Config) when is_list(Config) ->
node_start_immediately_after_crash_test(Config) when is_list(Config) ->
- {ok, Node} = start_check(loose, heart_test_imm, [{"ERL_CRASH_DUMP_SECONDS", "0"}]),
+ {ok, Node} = start_check(loose, ?UNIQ_NODE_NAME,
+ [{"ERL_CRASH_DUMP_SECONDS", "0"}]),
ok = rpc:call(Node, heart, set_cmd,
[atom_to_list(lib:progname()) ++
@@ -229,13 +221,13 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) ->
T0 = now(),
receive {nodedown, Node} ->
- test_server:format("Took ~.2f s. for node to go down~n", [timer:now_diff(now(), T0)/1000000]),
+ io:format("Took ~.2f s. for node to go down~n", [timer:now_diff(now(), T0)/1000000]),
ok
%% timeout is very liberal here. nodedown is received in about 1 s. on linux (palantir)
%% and in about 10 s. on solaris (carcharoth)
- after (15000*test_server:timetrap_scale_factor()) -> test_server:fail(node_not_closed)
+ after (15000*test_server:timetrap_scale_factor()) -> ct:fail(node_not_closed)
end,
- test_server:sleep(3000),
+ timer:sleep(3000),
node_check_up_down(Node, 2000),
loose_node:stop(Node).
@@ -248,7 +240,6 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) ->
%% May currently dump core in beam debug build due to lock-order violation
%% This should be removed when a non-lockad information retriever is implemented
%% for crash dumps
-node_start_soon_after_crash(suite) -> {req, [{time, 10}]};
node_start_soon_after_crash(Config) when is_list(Config) ->
Config2 = ignore_cores:setup(?MODULE, node_start_soon_after_crash, Config, true),
try
@@ -258,7 +249,8 @@ node_start_soon_after_crash(Config) when is_list(Config) ->
end.
node_start_soon_after_crash_test(Config) when is_list(Config) ->
- {ok, Node} = start_check(loose, heart_test_soon, [{"ERL_CRASH_DUMP_SECONDS", "10"}]),
+ {ok, Node} = start_check(loose, ?UNIQ_NODE_NAME,
+ [{"ERL_CRASH_DUMP_SECONDS", "10"}]),
ok = rpc:call(Node, heart, set_cmd,
[atom_to_list(lib:progname()) ++
@@ -278,9 +270,9 @@ node_start_soon_after_crash_test(Config) when is_list(Config) ->
rpc:cast(Node, Mod, do, []),
receive {nodedown, Node} -> ok
- after (15000*test_server:timetrap_scale_factor()) -> test_server:fail(node_not_closed)
+ after (15000*test_server:timetrap_scale_factor()) -> ct:fail(node_not_closed)
end,
- test_server:sleep(20000),
+ timer:sleep(20000),
node_check_up_down(Node, 15000),
loose_node:stop(Node).
@@ -293,16 +285,15 @@ node_check_up_down(Node, Tmo) ->
receive
{nodedown, Node} -> ok
after Tmo ->
- test_server:fail(node_not_closed2)
+ ct:fail(node_not_closed2)
end;
_ ->
- test_server:fail(node_not_rebooted)
+ ct:fail(node_not_rebooted)
end.
%% Only tests bad command, correct behaviour is tested in reboot/1.
-set_cmd(suite) -> [];
set_cmd(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
Cmd = wrong_atom,
{error, {bad_cmd, Cmd}} = rpc:call(Node, heart, set_cmd, [Cmd]),
Cmd1 = lists:duplicate(2047, $a),
@@ -314,9 +305,8 @@ set_cmd(Config) when is_list(Config) ->
stop_node(Node),
ok.
-clear_cmd(suite) -> {req,[{time,15}]};
clear_cmd(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
[atom_to_list(lib:progname()) ++
" -noshell -heart " ++ name(Node) ++ "&"]),
@@ -325,14 +315,14 @@ clear_cmd(Config) when is_list(Config) ->
{nodedown, Node} ->
ok
after 2000 ->
- test_server:fail(node_not_closed)
+ ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
case net_adm:ping(Node) of
pong ->
erlang:monitor_node(Node, true);
_ ->
- test_server:fail(node_not_rebooted)
+ ct:fail(node_not_rebooted)
end,
ok = rpc:call(Node, heart, set_cmd,
["erl -noshell -heart " ++ name(Node) ++ "&"]),
@@ -342,20 +332,19 @@ clear_cmd(Config) when is_list(Config) ->
{nodedown, Node} ->
ok
after 2000 ->
- test_server:fail(node_not_closed)
+ ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
case net_adm:ping(Node) of
pang ->
ok;
_ ->
- test_server:fail(node_rebooted)
+ ct:fail(node_rebooted)
end,
ok.
-get_cmd(suite) -> [];
get_cmd(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
Cmd = "test",
ok = rpc:call(Node, heart, set_cmd, [Cmd]),
{ok, Cmd} = rpc:call(Node, heart, get_cmd, []),
@@ -363,7 +352,7 @@ get_cmd(Config) when is_list(Config) ->
ok.
callback_api(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
none = rpc:call(Node, heart, get_callback, []),
M0 = self(),
F0 = ok,
@@ -391,13 +380,13 @@ callback_api(Config) when is_list(Config) ->
{ok, {M2,F2}} = rpc:call(Node, heart, get_callback, []),
ok = rpc:call(Node, heart, set_callback, [M2,F3]),
receive {nodedown, Node} -> ok
- after 5000 -> test_server:fail(node_not_killed)
+ after 5000 -> ct:fail(node_not_killed)
end,
stop_node(Node),
ok.
options_api(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
none = rpc:call(Node, heart, get_options, []),
M0 = self(),
F0 = ok,
@@ -425,15 +414,13 @@ options_api(Config) when is_list(Config) ->
ok.
-dont_drop(suite) ->
%%% Removed as it may crash epmd/distribution in colourful
%%% ways. While we ARE finding out WHY, it would
%%% be nice for others to be able to run the kernel test suite
-%%% without "exploding machines", so thats why I removed it for now.
- [];
-dont_drop(doc) ->
- ["Tests that the heart command does not get dropped when ",
- "set just before halt on very high I/O load."];
+%%% without "exploding machines", so that's why I removed it for now.
+
+%% Tests that the heart command does not get dropped when
+%% set just before halt on very high I/O load..
dont_drop(Config) when is_list(Config) ->
%%% Have to do it some times to make it happen...
[ok,ok,ok,ok,ok,ok,ok,ok,ok,ok] = do_dont_drop(Config,10),
@@ -455,7 +442,7 @@ do_dont_drop(Config,N) ->
Env = [{"HEART_COMMAND", FirstCmd}],
Func = "start_heart_stress",
Arg = NN3 ++ "@" ++ Host ++ " " ++
- filename:join(?config(data_dir, Config), "simple_echo"),
+ filename:join(proplists:get_value(data_dir, Config), "simple_echo"),
start_node_run(Name,Env,Func,Arg),
case wait_for_any_of(list_to_atom(NN2 ++ "@" ++ Host),
list_to_atom(NN3 ++ "@" ++ Host)) of
@@ -488,16 +475,13 @@ wait_for_any_of(N1,N2,Times) ->
end.
-kill_pid(suite) ->
- [];
-kill_pid(doc) ->
- ["Tests that heart kills the old erlang node before executing ",
- "heart command."];
+%% Tests that heart kills the old erlang node before executing
+%% heart command.
kill_pid(Config) when is_list(Config) ->
ok = do_kill_pid(Config).
do_kill_pid(_Config) ->
- Name = heart_test,
+ Name = ?UNIQ_NODE_NAME,
Env = [{"HEART_COMMAND", "nickeNyfikenFarEttJobb"}],
{ok,Node} = start_node_run(Name,Env,suicide_by_heart,[]),
ok = wait_for_node(Node,15),