diff options
Diffstat (limited to 'lib/common_test/src/ct_run.erl')
-rw-r--r-- | lib/common_test/src/ct_run.erl | 103 |
1 files changed, 60 insertions, 43 deletions
diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl index 4d74fd6a80..b37baa9f3e 100644 --- a/lib/common_test/src/ct_run.erl +++ b/lib/common_test/src/ct_run.erl @@ -77,7 +77,8 @@ multiply_timetraps = 1, scale_timetraps = false, create_priv_dir, - testspecs = [], + testspec_files = [], + current_testspec, tests, starter}). @@ -225,18 +226,24 @@ finish(Tracing, ExitStatus, Args) -> if ExitStatus == interactive_mode -> interactive_mode; true -> - %% it's possible to tell CT to finish execution with a call - %% to a different function than the normal halt/1 BIF - %% (meant to be used mainly for reading the CT exit status) - case get_start_opt(halt_with, - fun([HaltMod,HaltFunc]) -> - {list_to_atom(HaltMod), - list_to_atom(HaltFunc)} end, - Args) of - undefined -> - halt(ExitStatus); - {M,F} -> - apply(M, F, [ExitStatus]) + case get_start_opt(vts, true, Args) of + true -> + %% VTS mode, don't halt the node + ok; + _ -> + %% it's possible to tell CT to finish execution with a call + %% to a different function than the normal halt/1 BIF + %% (meant to be used mainly for reading the CT exit status) + case get_start_opt(halt_with, + fun([HaltMod,HaltFunc]) -> + {list_to_atom(HaltMod), + list_to_atom(HaltFunc)} end, + Args) of + undefined -> + halt(ExitStatus); + {M,F} -> + apply(M, F, [ExitStatus]) + end end end. @@ -244,7 +251,7 @@ script_start1(Parent, Args) -> %% read general start flags Label = get_start_opt(label, fun([Lbl]) -> Lbl end, Args), Profile = get_start_opt(profile, fun([Prof]) -> Prof end, Args), - Vts = get_start_opt(vts, true, Args), + Vts = get_start_opt(vts, true, undefined, Args), Shell = get_start_opt(shell, true, Args), Cover = get_start_opt(cover, fun([CoverFile]) -> ?abs(CoverFile) end, Args), CoverStop = get_start_opt(cover_stop, @@ -330,8 +337,8 @@ script_start1(Parent, Args) -> Stylesheet = get_start_opt(stylesheet, fun([SS]) -> ?abs(SS) end, Args), %% basic_html - used by ct_logs - BasicHtml = case proplists:get_value(basic_html, Args) of - undefined -> + BasicHtml = case {Vts,proplists:get_value(basic_html, Args)} of + {undefined,undefined} -> application:set_env(common_test, basic_html, false), undefined; _ -> @@ -364,9 +371,10 @@ script_start1(Parent, Args) -> scale_timetraps = ScaleTT, create_priv_dir = CreatePrivDir, starter = script}, - + %% check if log files should be refreshed or go on to run tests... Result = run_or_refresh(Opts, Args), + %% send final results to starting process waiting in script_start/0 Parent ! {self(), Result}. @@ -485,8 +493,11 @@ execute_one_spec(TS, Opts, Args) -> case check_and_install_configfiles(AllConfig, TheLogDir, Opts) of ok -> % read tests from spec {Run,Skip} = ct_testspec:prepare_tests(TS, node()), - do_run(Run, Skip, Opts#opts{config=AllConfig, - logdir=TheLogDir}, Args); + Result = do_run(Run, Skip, Opts#opts{config=AllConfig, + logdir=TheLogDir, + current_testspec=TS}, Args), + ct_util:delete_testdata(testspec), + Result; Error -> Error end. @@ -577,7 +588,7 @@ combine_test_opts(TS, Specs, Opts) -> Opts#opts{label = Label, profile = Profile, - testspecs = Specs, + testspec_files = Specs, cover = Cover, cover_stop = CoverStop, logdir = which(logdir, LogDir), @@ -702,7 +713,7 @@ script_start4(#opts{label = Label, profile = Profile, logopts = LogOpts, verbosity = Verbosity, enable_builtin_hooks = EnableBuiltinHooks, - logdir = LogDir, testspecs = Specs}, _Args) -> + logdir = LogDir, testspec_files = Specs}, _Args) -> %% label - used by ct_logs application:set_env(common_test, test_label, Label), @@ -757,21 +768,6 @@ script_start4(Opts = #opts{tests = Tests}, Args) -> %%% @doc Print usage information for <code>ct_run</code>. script_usage() -> io:format("\n\nUsage:\n\n"), - io:format("Run tests in web based GUI:\n\n" - "\tct_run -vts [-browser Browser]" - "\n\t[-config ConfigFile1 ConfigFile2 .. ConfigFileN]" - "\n\t[-decrypt_key Key] | [-decrypt_file KeyFile]" - "\n\t[-dir TestDir1 TestDir2 .. TestDirN] |" - "\n\t[-suite Suite [-case Case]]" - "\n\t[-logopts LogOpt1 LogOpt2 .. LogOptN]" - "\n\t[-verbosity GenVLvl | [CategoryVLvl1 .. CategoryVLvlN]]" - "\n\t[-include InclDir1 InclDir2 .. InclDirN]" - "\n\t[-no_auto_compile]" - "\n\t[-abort_if_missing_suites]" - "\n\t[-multiply_timetraps N]" - "\n\t[-scale_timetraps]" - "\n\t[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]" - "\n\t[-basic_html]\n\n"), io:format("Run tests from command line:\n\n" "\tct_run [-dir TestDir1 TestDir2 .. TestDirN] |" "\n\t[[-dir TestDir] -suite Suite1 Suite2 .. SuiteN" @@ -831,7 +827,22 @@ script_usage() -> io:format("Run CT in interactive mode:\n\n" "\tct_run -shell" "\n\t[-config ConfigFile1 ConfigFile2 .. ConfigFileN]" - "\n\t[-decrypt_key Key] | [-decrypt_file KeyFile]\n\n"). + "\n\t[-decrypt_key Key] | [-decrypt_file KeyFile]\n\n"), + io:format("Run tests in web based GUI:\n\n" + "\tct_run -vts [-browser Browser]" + "\n\t[-config ConfigFile1 ConfigFile2 .. ConfigFileN]" + "\n\t[-decrypt_key Key] | [-decrypt_file KeyFile]" + "\n\t[-dir TestDir1 TestDir2 .. TestDirN] |" + "\n\t[-suite Suite [-case Case]]" + "\n\t[-logopts LogOpt1 LogOpt2 .. LogOptN]" + "\n\t[-verbosity GenVLvl | [CategoryVLvl1 .. CategoryVLvlN]]" + "\n\t[-include InclDir1 InclDir2 .. InclDirN]" + "\n\t[-no_auto_compile]" + "\n\t[-abort_if_missing_suites]" + "\n\t[-multiply_timetraps N]" + "\n\t[-scale_timetraps]" + "\n\t[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]" + "\n\t[-basic_html]\n\n"). %%%----------------------------------------------------------------- %%% @hidden @@ -1103,7 +1114,7 @@ run_test2(StartOpts) -> undefined -> case lists:keysearch(prepared_tests, 1, StartOpts) of {value,{_,{Run,Skip},Specs}} -> % use prepared tests - run_prepared(Run, Skip, Opts#opts{testspecs = Specs}, + run_prepared(Run, Skip, Opts#opts{testspec_files = Specs}, StartOpts); false -> run_dir(Opts, StartOpts) @@ -1111,11 +1122,11 @@ run_test2(StartOpts) -> Specs -> Relaxed = get_start_opt(allow_user_terms, value, false, StartOpts), %% using testspec(s) as input for test - run_spec_file(Relaxed, Opts#opts{testspecs = Specs}, StartOpts) + run_spec_file(Relaxed, Opts#opts{testspec_files = Specs}, StartOpts) end. run_spec_file(Relaxed, - Opts = #opts{testspecs = Specs}, + Opts = #opts{testspec_files = Specs}, StartOpts) -> Specs1 = case Specs of [X|_] when is_integer(X) -> [Specs]; @@ -1154,7 +1165,10 @@ run_all_specs([{Specs,TS} | TSs], Opts, StartOpts, TotResult) -> log_ts_names(Specs), Combined = #opts{config = TSConfig} = combine_test_opts(TS, Specs, Opts), AllConfig = merge_vals([Opts#opts.config, TSConfig]), - try run_one_spec(TS, Combined#opts{config = AllConfig}, StartOpts) of + try run_one_spec(TS, + Combined#opts{config = AllConfig, + current_testspec=TS}, + StartOpts) of Result -> run_all_specs(TSs, Opts, StartOpts, [Result | TotResult]) catch @@ -1399,7 +1413,7 @@ run_testspec2(TestSpec) -> case check_and_install_configfiles( Opts#opts.config, LogDir1, Opts) of ok -> - Opts1 = Opts#opts{testspecs = [], + Opts1 = Opts#opts{testspec_files = [], logdir = LogDir1, include = AllInclude}, {Run,Skip} = ct_testspec:prepare_tests(TS, node()), @@ -1706,6 +1720,9 @@ compile_and_run(Tests, Skip, Opts, Args) -> ct_util:set_testdata({stylesheet,Opts#opts.stylesheet}), %% save logopts ct_util:set_testdata({logopts,Opts#opts.logopts}), + %% save info about current testspec (testspec record or undefined) + ct_util:set_testdata({testspec,Opts#opts.current_testspec}), + %% enable silent connections case Opts#opts.silent_connections of [] -> @@ -1720,7 +1737,7 @@ compile_and_run(Tests, Skip, Opts, Args) -> ct_logs:log("Silent connections", "~p", [Conns]) end end, - log_ts_names(Opts#opts.testspecs), + log_ts_names(Opts#opts.testspec_files), TestSuites = suite_tuples(Tests), {_TestSuites1,SuiteMakeErrors,AllMakeErrors} = |