aboutsummaryrefslogtreecommitdiffstats
path: root/lib/common_test
diff options
context:
space:
mode:
authorPeter Andersson <[email protected]>2012-07-15 01:48:03 +0200
committerPeter Andersson <[email protected]>2012-08-08 14:20:30 +0200
commit2a3fc026842241b0a1e1b2b1f691bd212499ffe0 (patch)
tree26734b39e09bcea382413a6570ee3874d4c906a2 /lib/common_test
parentbb1734e95a5f6a7315819c24bc1fdd799534c787 (diff)
downloadotp-2a3fc026842241b0a1e1b2b1f691bd212499ffe0.tar.gz
otp-2a3fc026842241b0a1e1b2b1f691bd212499ffe0.tar.bz2
otp-2a3fc026842241b0a1e1b2b1f691bd212499ffe0.zip
Implement support for test case execution break/continue
Diffstat (limited to 'lib/common_test')
-rw-r--r--lib/common_test/src/ct.erl87
-rw-r--r--lib/common_test/src/ct_framework.erl23
-rw-r--r--lib/common_test/src/ct_logs.erl4
-rw-r--r--lib/common_test/src/ct_run.erl152
4 files changed, 201 insertions, 65 deletions
diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl
index 571d99029f..4054b441ee 100644
--- a/lib/common_test/src/ct.erl
+++ b/lib/common_test/src/ct.erl
@@ -66,7 +66,8 @@
capture_start/0, capture_stop/0, capture_get/0, capture_get/1,
fail/1, fail/2, comment/1, comment/2, make_priv_dir/0,
testcases/2, userdata/2, userdata/3,
- timetrap/1, get_timetrap_info/0, sleep/1]).
+ timetrap/1, get_timetrap_info/0, sleep/1,
+ break/1, break/2, continue/0, continue/1]).
%% New API for manipulating with config handlers
-export([add_config/2, remove_config/2]).
@@ -151,7 +152,8 @@ run(TestDirs) ->
%%% {repeat,N} | {duration,DurTime} | {until,StopTime} |
%%% {force_stop,Bool} | {decrypt,DecryptKeyOrFile} |
%%% {refresh_logs,LogDir} | {logopts,LogOpts} | {basic_html,Bool} |
-%%% {ct_hooks, CTHs} | {enable_builtin_hooks,Bool}
+%%% {ct_hooks, CTHs} | {enable_builtin_hooks,Bool} |
+%%% {noinput,Bool}
%%% TestDirs = [string()] | string()
%%% Suites = [string()] | [atom()] | string() | atom()
%%% Cases = [atom()] | atom()
@@ -840,10 +842,11 @@ userdata(TestDir, Suite, Case) when is_atom(Case) ->
%%%-----------------------------------------------------------------
%%% @spec get_status() -> TestStatus | {error,Reason} | no_tests_running
%%% TestStatus = [StatusElem]
-%%% StatusElem = {current,{Suite,TestCase}} | {successful,Successful} |
+%%% StatusElem = {current,TestCaseInfo} | {successful,Successful} |
%%% {failed,Failed} | {skipped,Skipped} | {total,Total}
+%%% TestCaseInfo = {Suite,TestCase} | [{Suite,TestCase}]
%%% Suite = atom()
-%%% TestCase = atom()
+%%% TestCase = atom() |
%%% Successful = integer()
%%% Failed = integer()
%%% Skipped = {UserSkipped,AutoSkipped}
@@ -853,7 +856,8 @@ userdata(TestDir, Suite, Case) when is_atom(Case) ->
%%% Reason = term()
%%%
%%% @doc Returns status of ongoing test. The returned list contains info about
-%%% which test case is currently executing, as well as counters for
+%%% which test case is currently executing (a list of cases when a
+%%% parallel test case group is executing), as well as counters for
%%% successful, failed, skipped, and total test cases so far.
get_status() ->
case get_testdata(curr_tc) of
@@ -878,6 +882,8 @@ get_testdata(Key) ->
Error;
{'EXIT',_Reason} ->
no_tests_running;
+ [CurrTC] when Key == curr_tc ->
+ {ok,CurrTC};
Data ->
{ok,Data}
end.
@@ -1047,3 +1053,74 @@ sleep({seconds,Ss}) ->
sleep(trunc(Ss * 1000));
sleep(Time) ->
test_server:adjusted_sleep(Time).
+
+%%%-----------------------------------------------------------------
+%%% @spec break(Comment) -> ok | {error,Reason}
+%%% Comment = string()
+%%% Reason = {multiple_cases_running,TestCases}
+%%% TestCases = [atom()]
+%%%
+%%% @doc <p>This function will cancel all timetraps and pause the
+%%% execution of the current test case until the user calls the
+%%% <c>continue/0</c> function. It gives the user the opportunity
+%%% to interact with the erlang node running the tests, e.g. for
+%%% debugging purposes or for manually executing a part of the
+%%% test case. If a parallel group is executing, <c>break/2</c>
+%%% should be called instead.</p>
+break(Comment) ->
+ case get_testdata(curr_tc) of
+ {ok,{_,TestCase}} ->
+ test_server:break(?MODULE, Comment);
+ {ok,Cases} when is_list(Cases) ->
+ {error,{multiple_cases_running,
+ [TC || {_,TC} <- Cases]}};
+ Error ->
+ {error,Error}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% @spec break(TestCase, Comment) -> ok | {error,Reason}
+%%% TestCase = atom()
+%%% Comment = string()
+%%% Reason = test_case_not_running
+%%%
+%%% @doc <p>This function works the same way as <c>break/1</c>,
+%%% only the <c>TestCase</c> argument makes it possible to
+%%% pause a test case executing in a parallel group. The
+%%% <c>continue/1</c> function should be used to resume
+%%% execution of <c>TestCase</c>.</p>
+break(TestCase, Comment) ->
+ case get_testdata(curr_tc) of
+ {ok,Cases} when is_list(Cases) ->
+ case lists:keymember(TestCase, 2, Cases) of
+ true ->
+ test_server:break(?MODULE, TestCase, Comment);
+ false ->
+ {error,test_case_not_running}
+ end;
+ {ok,{_,TestCase}} ->
+ test_server:break(?MODULE, TestCase, Comment);
+ Error ->
+ {error,Error}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% @spec continue() -> ok
+%%%
+%%% @doc <p>This function must be called in order to continue after a
+%%% test case (not executing in a parallel group) has called
+%%% <c>break/1</c>.</p>
+continue() ->
+ test_server:continue().
+
+%%%-----------------------------------------------------------------
+%%% @spec continue(TestCase) -> ok
+%%% TestCase = atom()
+%%%
+%%% @doc <p>This function must be called in order to continue after a
+%%% test case has called <c>break/2</c>. If the paused test case,
+%%% <c>TestCase</c>, executes in a parallel group, this
+%%% function - rather than <c>continue/0</c> - must be used
+%%% in order to let the test case proceed.</p>
+continue(TestCase) ->
+ test_server:continue(TestCase).
diff --git a/lib/common_test/src/ct_framework.erl b/lib/common_test/src/ct_framework.erl
index 11575cd0fb..0740eb7ec9 100644
--- a/lib/common_test/src/ct_framework.erl
+++ b/lib/common_test/src/ct_framework.erl
@@ -71,8 +71,13 @@ init_tc(Mod,Func,Config) ->
{skip,{require_failed_in_suite0,Reason}};
{Suite,{suite0_failed,_}=Failure} ->
{skip,Failure};
- _ ->
- ct_util:set_testdata({curr_tc,{Suite,Func}}),
+ CurrTC ->
+ case CurrTC of
+ undefined ->
+ ct_util:set_testdata({curr_tc,[{Suite,Func}]});
+ Running when is_list(Running) ->
+ ct_util:set_testdata({curr_tc,[{Suite,Func}|Running]})
+ end,
case ct_util:read_suite_data({seq,Suite,Func}) of
undefined ->
init_tc1(Mod,Suite,Func,Config);
@@ -206,7 +211,8 @@ init_tc2(Mod,Suite,Func,SuiteInfo,MergeResult,Config) ->
case catch configure(MergedInfo,MergedInfo,SuiteInfo,
FuncSpec,Config) of
{suite0_failed,Reason} ->
- ct_util:set_testdata({curr_tc,{Mod,{suite0_failed,{require,Reason}}}}),
+ ct_util:set_testdata({curr_tc,{Mod,{suite0_failed,
+ {require,Reason}}}}),
{skip,{require_failed_in_suite0,Reason}};
{error,Reason} ->
{auto_skip,{require_failed,Reason}};
@@ -633,7 +639,16 @@ end_tc(Mod,Func,TCPid,Result,Args,Return) ->
end,
ct_util:reset_silent_connections(),
-
+
+ %% reset the curr_tc state, or delete this TC from the list of
+ %% executing cases (if in a parallel group)
+ case ct_util:get_testdata(curr_tc) of
+ Running = [_,_|_] ->
+ ct_util:set_testdata({curr_tc,lists:delete({Mod,Func}, Running)});
+ [_] ->
+ ct_util:set_testdata({curr_tc,undefined})
+ end,
+
case FinalResult of
{skip,{sequence_failed,_,_}} ->
%% ct_logs:init_tc is never called for a skipped test case
diff --git a/lib/common_test/src/ct_logs.erl b/lib/common_test/src/ct_logs.erl
index 1ccbdc3718..73a66a8763 100644
--- a/lib/common_test/src/ct_logs.erl
+++ b/lib/common_test/src/ct_logs.erl
@@ -378,11 +378,11 @@ tc_print(Category,Format,Args) ->
ok.
get_heading(default) ->
- io_lib:format("-----------------------------"
+ io_lib:format("\n-----------------------------"
"-----------------------\n~s\n",
[log_timestamp(now())]);
get_heading(Category) ->
- io_lib:format("-----------------------------"
+ io_lib:format("\n-----------------------------"
"-----------------------\n~s ~w\n",
[log_timestamp(now()),Category]).
diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl
index 46aec04ec1..9b9c43a917 100644
--- a/lib/common_test/src/ct_run.erl
+++ b/lib/common_test/src/ct_run.erl
@@ -1428,67 +1428,111 @@ do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) ->
"run ct:start_interactive()\n\n",[]),
{error,interactive_mode};
_Pid ->
- %% save stylesheet info
- ct_util:set_testdata({stylesheet,Opts#opts.stylesheet}),
- %% save logopts
- ct_util:set_testdata({logopts,Opts#opts.logopts}),
- %% enable silent connections
- case Opts#opts.silent_connections of
- [] ->
- Conns = ct_util:override_silence_all_connections(),
- ct_logs:log("Silent connections", "~p", [Conns]);
- Conns when is_list(Conns) ->
- ct_util:override_silence_connections(Conns),
- ct_logs:log("Silent connections", "~p", [Conns]);
- _ ->
- ok
- end,
- log_ts_names(Opts1#opts.testspecs),
- TestSuites = suite_tuples(Tests),
-
- {_TestSuites1,SuiteMakeErrors,AllMakeErrors} =
- case application:get_env(common_test, auto_compile) of
- {ok,false} ->
- {TestSuites1,SuitesNotFound} =
- verify_suites(TestSuites),
- {TestSuites1,SuitesNotFound,SuitesNotFound};
- _ ->
- {SuiteErrs,HelpErrs} = auto_compile(TestSuites),
- {TestSuites,SuiteErrs,SuiteErrs++HelpErrs}
- end,
+ compile_and_run(Tests, Skip, Opts1, Args)
+ end
+ end.
- case continue(AllMakeErrors) of
- true ->
- SavedErrors = save_make_errors(SuiteMakeErrors),
- ct_repeat:log_loop_info(Args),
+compile_and_run(Tests, Skip, Opts, Args) ->
+ %% save stylesheet info
+ ct_util:set_testdata({stylesheet,Opts#opts.stylesheet}),
+ %% save logopts
+ ct_util:set_testdata({logopts,Opts#opts.logopts}),
+ %% enable silent connections
+ case Opts#opts.silent_connections of
+ [] ->
+ Conns = ct_util:override_silence_all_connections(),
+ ct_logs:log("Silent connections", "~p", [Conns]);
+ Conns when is_list(Conns) ->
+ ct_util:override_silence_connections(Conns),
+ ct_logs:log("Silent connections", "~p", [Conns]);
+ _ ->
+ ok
+ end,
+ log_ts_names(Opts#opts.testspecs),
+ TestSuites = suite_tuples(Tests),
+
+ {_TestSuites1,SuiteMakeErrors,AllMakeErrors} =
+ case application:get_env(common_test, auto_compile) of
+ {ok,false} ->
+ {TestSuites1,SuitesNotFound} =
+ verify_suites(TestSuites),
+ {TestSuites1,SuitesNotFound,SuitesNotFound};
+ _ ->
+ {SuiteErrs,HelpErrs} = auto_compile(TestSuites),
+ {TestSuites,SuiteErrs,SuiteErrs++HelpErrs}
+ end,
+
+ case continue(AllMakeErrors) of
+ true ->
+ SavedErrors = save_make_errors(SuiteMakeErrors),
+ ct_repeat:log_loop_info(Args),
+
+ {Tests1,Skip1} = final_tests(Tests,Skip,SavedErrors),
+
+ possibly_spawn(true == proplists:get_value(noinput, Args),
+ Tests1, Skip1, Opts);
+ false ->
+ io:nl(),
+ ct_util:stop(clean),
+ BadMods =
+ lists:foldr(
+ fun({{_,_},Ms}, Acc) ->
+ Ms ++ lists:foldl(
+ fun(M, Acc1) ->
+ lists:delete(M, Acc1)
+ end, Acc, Ms)
+ end, [], AllMakeErrors),
+ {error,{make_failed,BadMods}}
+ end.
- {Tests1,Skip1} = final_tests(Tests,Skip,SavedErrors),
+%% keep the shell as the top controlling process
+possibly_spawn(false, Tests, Skip, Opts) ->
+ TestResult = (catch do_run_test(Tests, Skip, Opts)),
+ case TestResult of
+ {EType,_} = Error when EType == user_error;
+ EType == error ->
+ ct_util:stop(clean),
+ exit(Error);
+ _ ->
+ ct_util:stop(normal),
+ TestResult
+ end;
- R = (catch do_run_test(Tests1, Skip1, Opts1)),
- case R of
- {EType,_} = Error when EType == user_error ;
+%% we must return control to the shell now, so we spawn
+%% a test supervisor process to keep an eye on the test run
+possibly_spawn(true, Tests, Skip, Opts) ->
+ CTUtilSrv = whereis(ct_util_server),
+ Supervisor =
+ fun() ->
+ process_flag(trap_exit, true),
+ link(CTUtilSrv),
+ TestRun =
+ fun() ->
+ TestResult = (catch do_run_test(Tests, Skip, Opts)),
+ case TestResult of
+ {EType,_} = Error when EType == user_error;
EType == error ->
ct_util:stop(clean),
exit(Error);
_ ->
ct_util:stop(normal),
- R
- end;
- false ->
- io:nl(),
- ct_util:stop(clean),
- BadMods =
- lists:foldr(
- fun({{_,_},Ms}, Acc) ->
- Ms ++ lists:foldl(
- fun(M, Acc1) ->
- lists:delete(M, Acc1)
- end, Acc, Ms)
- end, [], AllMakeErrors),
- {error,{make_failed,BadMods}}
- end
- end
- end.
+ exit({ok,TestResult})
+ end
+ end,
+ TestRunPid = spawn_link(TestRun),
+ receive
+ {'EXIT',TestRunPid,{ok,TestResult}} ->
+ io:format(user, "~nCommon Test returned ~p~n~n",
+ [TestResult]);
+ {'EXIT',TestRunPid,Error} ->
+ exit(Error)
+ end
+ end,
+ unlink(CTUtilSrv),
+ SupPid = spawn(Supervisor),
+ io:format(user, "~nTest control handed over to process ~p~n~n",
+ [SupPid]),
+ SupPid.
%% attempt to compile the modules specified in TestSuites
auto_compile(TestSuites) ->
@@ -1900,7 +1944,7 @@ do_run_test(Tests, Skip, Opts) ->
[code:del_path(Dir) || Dir <- AddedToPath],
ok;
Error ->
- Error
+ exit(Error)
end.
delete_dups([S | Suites]) ->