diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/common_test/doc/src/cover_chapter.xml | 87 | ||||
-rw-r--r-- | lib/common_test/src/ct_cover.erl | 32 | ||||
-rw-r--r-- | lib/common_test/test/common_test.cover | 16 | ||||
-rw-r--r-- | lib/common_test/test/ct_cover_SUITE.erl | 53 | ||||
-rw-r--r-- | lib/test_server/doc/src/test_server_ctrl.xml | 93 | ||||
-rw-r--r-- | lib/test_server/doc/src/ts.xml | 4 | ||||
-rw-r--r-- | lib/test_server/src/Makefile | 3 | ||||
-rw-r--r-- | lib/test_server/src/test_server.erl | 11 | ||||
-rw-r--r-- | lib/test_server/src/test_server_ctrl.erl | 283 | ||||
-rw-r--r-- | lib/test_server/src/ts.erl | 31 | ||||
-rw-r--r-- | lib/test_server/test/test_server_SUITE.erl | 100 | ||||
-rw-r--r-- | lib/test_server/test/test_server_SUITE_data/Makefile.src | 4 | ||||
-rw-r--r-- | lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl | 10 | ||||
-rw-r--r-- | lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl | 58 | ||||
-rw-r--r-- | lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl | 4 |
15 files changed, 562 insertions, 227 deletions
diff --git a/lib/common_test/doc/src/cover_chapter.xml b/lib/common_test/doc/src/cover_chapter.xml index b2e64bfff0..4fa92d5583 100644 --- a/lib/common_test/doc/src/cover_chapter.xml +++ b/lib/common_test/doc/src/cover_chapter.xml @@ -108,8 +108,8 @@ specifications</seealso>).</p> </section> + <marker id="cover_stop"></marker> <section> - <marker id="cover_stop"></marker> <title>Stopping the cover tool when tests are completed</title> <p>By default the Cover tool is automatically stopped when the tests are completed. This causes the original (non cover @@ -175,6 +175,11 @@ %% Specific modules to exclude in cover. {excl_mods, Mods}. + + %% Cross cover compilation + %% Tag = atom(), an identifier for a test run + %% Mod = [atom()], modules to compile for accumulated analysis + {cross,[{Tag,Mods}]}. </pre> <p>The <c>incl_dirs_r</c> and <c>excl_dirs_r</c> terms tell Common @@ -190,6 +195,81 @@ specification file for Common Test).</p> </section> + <marker id="cross_cover"/> + <section> + <title>Cross cover analysis</title> + <p>The cross cover mechanism allows cover analysis of modules + across multiple tests. It is useful if some code, e.g. a library + module, is used by many different tests and the accumulated cover + result is desirable.</p> + + <p>This can of course also be achieved in a more customized way by + using the <c>export</c> parameter in the cover specification and + analysing the result off line, but the cross cover mechanism is a + build in solution which also provides the logging.</p> + + <p>The mechanism is easiest explained via an example:</p> + + <p>Let's say that there are two systems, <c>s1</c> and <c>s2</c>, + which are tested in separate test runs. System <c>s1</c> contains + a library module <c>m1</c> which is tested by the <c>s1</c> test + run and is included in <c>s1</c>'s cover specification:</p> + +<code type="none"> +s1.cover: + {incl_mods,[m1]}.</code> + + <p>When analysing code coverage, the result for <c>m1</c> can be + seen in the cover log in the <c>s1</c> test result.</p> + + <p>Now, let's imagine that since <c>m1</c> is a library module, it + is also used quite a bit by system <c>s2</c>. The <c>s2</c> test + run does not specifically test <c>m1</c>, but it might still be + interesting to see which parts of <c>m1</c> is actually covered by + the <c>s2</c> tests. To do this, <c>m1</c> could be included also + in <c>s2</c>'s cover specification:</p> + +<code type="none"> +s2.cover: + {incl_mods,[m1]}.</code> + + <p>This would give an entry for <c>m1</c> also in the cover log + for the <c>s2</c> test run. The problem is that this would only + reflect the coverage by <c>s2</c> tests, not the accumulated + result over <c>s1</c> and <c>s2</c>. And this is where the cross + cover mechanism comes in handy.</p> + + <p>If instead the cover specification for <c>s2</c> was like + this:</p> + +<code type="none"> +s2.cover: + {cross,[{s1,[m1]}]}.</code> + + <p>then <c>m1</c> would be cover compiled in the <c>s2</c> test + run, but not shown in the coverage log. Instead, if + <c>ct_cover:cross_cover_analyse/2</c> is called after both + <c>s1</c> and <c>s2</c> test runs are completed, the accumulated + result for <c>m1</c> would be available in the cross cover log for + the <c>s1</c> test run.</p> + + <p>The call to the analyse function must be like this:</p> + +<code type="none"> +ct_cover:cross_cover_analyse(Level, [{s1,S1LogDir},{s2,S2LogDir}]).</code> + + <p>where <c>S1LogDir</c> and <c>S2LogDir</c> are the directories + named <c><TestName>.logs</c> for each test respectively.</p> + + <p>Note the tags <c>s1</c> and <c>s2</c> which are used in the + cover specification file and in the call to + <c>ct_cover:cross_cover_analyse/2</c>. The point of these are only + to map the modules specified in the cover specification to the log + directory specified in the call to the analyse function. The name + of the tag has no meaning beyond this.</p> + + </section> + <section> <title>Logging</title> <p>To view the result of a code coverage test, follow the @@ -197,6 +277,11 @@ takes you to the code coverage overview page. If you have successfully performed a detailed coverage analysis, you find links to each individual module coverage page here.</p> + + <p>If cross cover analysis has been performed, and there are + accumulated coverage results for the current test, then the - + "Coverdata collected over all tests" link will take you to these + results.</p> </section> </chapter> diff --git a/lib/common_test/src/ct_cover.erl b/lib/common_test/src/ct_cover.erl index d39f50ba00..ae671c750a 100644 --- a/lib/common_test/src/ct_cover.erl +++ b/lib/common_test/src/ct_cover.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2006-2009. All Rights Reserved. +%% Copyright Ericsson AB 2006-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -24,7 +24,7 @@ -module(ct_cover). --export([get_spec/1, add_nodes/1, remove_nodes/1]). +-export([get_spec/1, add_nodes/1, remove_nodes/1, cross_cover_analyse/2]). -include("ct_util.hrl"). @@ -100,6 +100,22 @@ remove_nodes(Nodes) -> %%%----------------------------------------------------------------- +%%% @spec cross_cover_analyse(Level,Tests) -> ok +%%% Level = overview | details +%%% Tests = [{Tag,Dir}] +%%% Tag = atom() +%%% Dir = string() +%%% +%%% @doc Accumulate cover results over multiple tests. +%%% See the chapter about <seealso +%%% marker="cover_chapter#cross_cover">cross cover +%%% analysis</seealso> in the users's guide. +%%% +cross_cover_analyse(Level,Tests) -> + test_server_ctrl:cross_cover_analyse(Level,Tests). + + +%%%----------------------------------------------------------------- %%% @hidden %% Read cover specification file and return the parsed info. @@ -249,9 +265,11 @@ get_app_info(App=#cover{app=Name}, [{excl_mods,Name,Mods1}|Terms]) -> Mods = App#cover.excl_mods, get_app_info(App#cover{excl_mods=Mods++Mods1},Terms); -get_app_info(App=#cover{app=Name}, [{cross_apps,Name,AppMods1}|Terms]) -> - AppMods = App#cover.cross, - get_app_info(App#cover{cross=AppMods++AppMods1},Terms); +get_app_info(App=#cover{app=none}, [{cross,Cross}|Terms]) -> + get_app_info(App, [{cross,none,Cross}|Terms]); +get_app_info(App=#cover{app=Name}, [{cross,Name,Cross1}|Terms]) -> + Cross = App#cover.cross, + get_app_info(App#cover{cross=Cross++Cross1},Terms); get_app_info(App=#cover{app=none}, [{src_dirs,Dirs}|Terms]) -> get_app_info(App, [{src_dirs,none,Dirs}|Terms]); @@ -354,10 +372,10 @@ remove_excludes_and_dups(CoverData=#cover{excl_mods=Excl,incl_mods=Incl}) -> files2mods(Info=#cover{excl_mods=ExclFs, incl_mods=InclFs, - cross=CrossFs}) -> + cross=Cross}) -> Info#cover{excl_mods=files2mods1(ExclFs), incl_mods=files2mods1(InclFs), - cross=files2mods1(CrossFs)}. + cross=[{Tag,files2mods1(Fs)} || {Tag,Fs} <- Cross]}. files2mods1([M|Fs]) when is_atom(M) -> [M|files2mods1(Fs)]; diff --git a/lib/common_test/test/common_test.cover b/lib/common_test/test/common_test.cover index 66697854ea..3aa49623e7 100644 --- a/lib/common_test/test/common_test.cover +++ b/lib/common_test/test/common_test.cover @@ -1,10 +1,10 @@ %% -*- erlang -*- {incl_app,common_test,details}. -{cross_apps,common_test,[erl2html2, - test_server, - test_server_ctrl, - test_server_gl, - test_server_h, - test_server_io, - test_server_node, - test_server_sup]}. +{cross,common_test,[{test_server,[erl2html2, + test_server, + test_server_ctrl, + test_server_gl, + test_server_h, + test_server_io, + test_server_node, + test_server_sup]}]}. diff --git a/lib/common_test/test/ct_cover_SUITE.erl b/lib/common_test/test/ct_cover_SUITE.erl index bebfce70d0..cb49dc423f 100644 --- a/lib/common_test/test/ct_cover_SUITE.erl +++ b/lib/common_test/test/ct_cover_SUITE.erl @@ -77,7 +77,8 @@ all() -> slave_start_slave, cover_node_option, ct_cover_add_remove_nodes, - otp_9956 + otp_9956, + cross ]. %%-------------------------------------------------------------------- @@ -161,6 +162,43 @@ otp_9956(Config) -> check_calls(Events,{?suite,otp_9956,1},1), ok. +%% Test cross cover mechanism +cross(Config) -> + {ok,Events1} = run_test(cross1,Config), + check_calls(Events1,1), + + CoverFile2 = create_cover_file(cross1,[{cross,[{cross1,[?mod]}]}],Config), + {ok,Events2} = run_test(cross2,[{cover,CoverFile2}],Config), + check_calls(Events2,1), + + %% Get the log dirs for each test and run cross cover analyse + [D11,D12] = lists:sort(get_run_dirs(Events1)), + [D21,D22] = lists:sort(get_run_dirs(Events2)), + + ct_cover:cross_cover_analyse(details,[{cross1,D11},{cross2,D21}]), + ct_cover:cross_cover_analyse(details,[{cross1,D12},{cross2,D22}]), + + %% Get the cross cover logs and read for each test + [C11,C12,C21,C22] = + [filename:join(D,"cross_cover.html") || D <- [D11,D12,D21,D22]], + + {ok,CrossData} = file:read_file(C11), + {ok,CrossData} = file:read_file(C12), + + {ok,Def} = file:read_file(C21), + {ok,Def} = file:read_file(C22), + + %% A simple test: just check that the test module exists in the + %% log from cross1 test, and that it does not exist in the log + %% from cross2 test. + TestMod = list_to_binary(atom_to_list(?mod)), + {_,_} = binary:match(CrossData,TestMod), + nomatch = binary:match(Def,TestMod), + {_,_} = binary:match(Def, + <<"No cross cover modules exist for this application">>), + + ok. + %%%----------------------------------------------------------------- %%% HELP FUNCTIONS @@ -229,15 +267,18 @@ check_cover(Node) when is_atom(Node) -> false end. +%% Get the log dir "run.<timestamp>" for all (both!) tests +get_run_dirs(Events) -> + [filename:dirname(TCLog) || + {ct_test_support_eh, + {event,tc_logfile,_Node, + {{?suite,init_per_suite},TCLog}}} <- Events]. + %% Check that each coverlog includes N calls to ?mod:foo/0 check_calls(Events,N) -> check_calls(Events,{?mod,foo,0},N). check_calls(Events,MFA,N) -> - CoverLogs = - [filename:join(filename:dirname(TCLog),"all.coverdata") || - {ct_test_support_eh, - {event,tc_logfile,ct@falco, - {{?suite,init_per_suite},TCLog}}} <- Events], + CoverLogs = [filename:join(D,"all.coverdata") || D <- get_run_dirs(Events)], do_check_logs(CoverLogs,MFA,N). do_check_logs([CoverLog|CoverLogs],{Mod,_,_} = MFA,N) -> diff --git a/lib/test_server/doc/src/test_server_ctrl.xml b/lib/test_server/doc/src/test_server_ctrl.xml index 41bc0bcc75..af96f1fe7e 100644 --- a/lib/test_server/doc/src/test_server_ctrl.xml +++ b/lib/test_server/doc/src/test_server_ctrl.xml @@ -427,11 +427,21 @@ Optional, if not given the test server controller node <p>A <c>CoverFile</c> can have the following entries:</p> <code type="none"> {exclude, all | ExcludeModuleList}. -{include, IncludeModuleList}. </code> +{include, IncludeModuleList}. +{cross, CrossCoverInfo}.</code> <p>Note that each line must end with a full stop. <c>ExcludeModuleList</c> and <c>IncludeModuleList</c> are lists of atoms, where each atom is a module name. </p> + + <p><c>CrossCoverInfo</c> is used when collecting cover data + over multiple tests. Modules listed here are compiled, but + they will not be analysed when the test is finished. See + <seealso + marker="#cross_cover_analyse-2">cross_cover_analyse/2</seealso> + for more information about the cross cover mechanism and the + format of <c>CrossCoverInfo</c>. + </p> <p>If both an <c>Application</c> and a <c>CoverFile</c> is given, all modules in the application are cover compiled, except for the modules listed in <c>ExcludeModuleList</c>. The @@ -467,30 +477,71 @@ Optional, if not given the test server controller node </desc> </func> <func> - <name>cross_cover_analyse(Level) -> ok</name> - <fsummary>Analyse cover data collected from all tests</fsummary> + <name>cross_cover_analyse(Level, Tests) -> ok</name> + <fsummary>Analyse cover data collected from multiple tests</fsummary> <type> <v>Level = details | overview</v> + <v>Tests = [{Tag,LogDir}]</v> + <v>Tag = atom()</v> + <d>Test identifier.</d> + <v>LogDir = string()</v> + <d>Log directory for the test identified by <c>Tag</c>. This + can either be the <c>run.<timestamp></c> directory or + the parent directory of this (in which case the latest + <c>run.<timestamp></c> directory is chosen.</d> </type> <desc> - <p>Analyse cover data collected from all tests. The modules - analysed are the ones listed in the cross cover file - <c>cross.cover</c> in the current directory of the test - server.</p> - <p>The modules listed in the <c>cross.cover</c> file are - modules that are heavily used by other applications than the - one they belong to. This function should be run after all - tests are completed, and the result will be stored in a file - called cross_cover.html in the run.<timestamp> - directory of the application the modules belong to. - </p> - <p>The <c>cross.cover</c> file contains elements like this:</p> - <pre> -{App,Modules}. </pre> - <p>where <c>App</c> can be an application name or the atom - <c>all</c>. The application (or all applications) will cover - compile the listed <c>Modules</c>. - </p> + <p>Analyse cover data collected from multiple tests. The modules + analysed are the ones listed in <c>cross</c> statements in + the cover files. These are modules that are heavily used by + other tests than the one where they belong or are explicitly + tested. They should then be listed as cross modules in the + cover file for the test where they are used but do not + belong. Se example below.</p> + <p>This function should be run after all tests are completed, + and the result will be stored in a file called + <c>cross_cover.html</c> in the <c>run.<timestamp></c> + directory of the test the modules belong to.</p> + <p>Note that the function can be executed on any node, and it + does not require <c>test_server_ctrl</c> to be started first.</p> + <p>The <c>cross</c> statement in the cover file must be like this:</p> + <code type="none"> +{cross,[{Tag,Modules}]}.</code> + <p>where <c>Tag</c> is the same as <c>Tag</c> in the + <c>Tests</c> parameter to this function and <c>Modules</c> is a + list of module names (atoms).</p> + <p><em>Example:</em></p> + <p>If the module <c>m1</c> belongs to system <c>s1</c> but is + heavily used also in the tests for another system <c>s2</c>, + then the cover files for the two systems' tests could be like + this:</p> +<code type="none"> +s1.cover: + {include,[m1]}. + +s2.cover: + {include,[....]}. % modules belonging to system s2 + {cross,[{s1,[m1]}]}.</code> + <p>When the tests for both <c>s1</c> and <c>s2</c> are completed, run</p> +<code type="none"> +test_server_ctrl:cross_cover_analyse(Level,[{s1,S1LogDir},{s2,S2LogDir}]) +</code> + + <p>and the accumulated cover data for <c>m1</c> will be written to + <c>S1LogDir/[run.<timestamp>/]cross_cover.html</c>.</p> + <p>Note that the <c>m1</c> module will also be presented in the + normal coverage log for <c>s1</c> (due to the include statement in + <c>s1.cover</c>), but that only includes the coverage achieved by the + <c>s1</c> test itself.</p> + <p>The Tag in the <c>cross</c> statement in the cover file has + no other purpose than mapping the list of modules + (<c>[m1]</c> in the example above) to the correct log + directory where it should be included in the + <c>cross_cover.html</c> file (<c>S1LogDir</c> in the example + above). I.e. the value of <c>Tag</c> has no meaning, it + could be <c>foo</c> as well as <c>s1</c> above, as long as + the same <c>Tag</c> is used in the cover file and in the + call to this function.</p> </desc> </func> <func> diff --git a/lib/test_server/doc/src/ts.xml b/lib/test_server/doc/src/ts.xml index 4a2c536e96..82ba3a5017 100644 --- a/lib/test_server/doc/src/ts.xml +++ b/lib/test_server/doc/src/ts.xml @@ -5,7 +5,7 @@ <header> <copyright> <year>2007</year> - <year>2011</year> + <year>2012</year> <holder>Ericsson AB, All Rights Reserved</holder> </copyright> <legalnotice> @@ -450,7 +450,7 @@ This option is mandatory for remote targets <desc> <p>Analyse cover data collected from all tests. </p> - <p>See test_server_ctrl:cross_cover_analyse/1 + <p>See test_server_ctrl:cross_cover_analyse/2 </p> </desc> </func> diff --git a/lib/test_server/src/Makefile b/lib/test_server/src/Makefile index 20e7a5942c..3261936472 100644 --- a/lib/test_server/src/Makefile +++ b/lib/test_server/src/Makefile @@ -69,7 +69,6 @@ INTERNAL_HRL_FILES = test_server_internal.hrl TS_HRL_FILES= ts.hrl C_FILES = AUTOCONF_FILES = configure.in conf_vars.in -COVER_FILES = cross.cover PROGRAMS = configure config.sub config.guess install-sh CONFIG = ts.config ts.unix.config ts.win32.config @@ -137,7 +136,7 @@ release_tests_spec: opt $(INSTALL_DATA) $(ERL_FILES) $(TS_ERL_FILES) \ $(HRL_FILES) $(INTERNAL_HRL_FILES) $(TS_HRL_FILES) \ $(TS_TARGET_FILES) \ - $(AUTOCONF_FILES) $(C_FILES) $(COVER_FILES) $(CONFIG) \ + $(AUTOCONF_FILES) $(C_FILES) $(CONFIG) \ "$(RELEASE_PATH)/test_server" $(INSTALL_SCRIPT) $(PROGRAMS) "$(RELEASE_PATH)/test_server" diff --git a/lib/test_server/src/test_server.erl b/lib/test_server/src/test_server.erl index 4d8fc55e4d..37cd8fac99 100644 --- a/lib/test_server/src/test_server.erl +++ b/lib/test_server/src/test_server.erl @@ -95,7 +95,8 @@ init_purify() -> %% is found, else {error,application_not_found}. cover_compile({none,_Exclude,Include,Cross}) -> - CompileMods = Include++Cross, + CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross), + CompileMods = Include++CrossMods, case length(CompileMods) of 0 -> io:fwrite("WARNING: No modules to cover compile!\n\n",[]), @@ -109,7 +110,8 @@ cover_compile({none,_Exclude,Include,Cross}) -> {ok,Include} end; cover_compile({App,all,Include,Cross}) -> - CompileMods = Include++Cross, + CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross), + CompileMods = Include++CrossMods, case length(CompileMods) of 0 -> io:fwrite("WARNING: No modules to cover compile!\n\n",[]), @@ -127,9 +129,10 @@ cover_compile({App,all,Include,Cross}) -> {ok,Include} end; cover_compile({App,Exclude,Include,Cross}) -> + CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross), case code:lib_dir(App) of {error,bad_name} -> - case Include++Cross of + case Include++CrossMods of [] -> io:format("\nWARNING: Can't find lib_dir for \'~w\'\n" "Not cover compiling!\n\n",[App]), @@ -150,7 +153,7 @@ cover_compile({App,Exclude,Include,Cross}) -> WC = filename:join(EbinDir,"*.beam"), AllMods = module_names(filelib:wildcard(WC)), AnalyseMods = (AllMods ++ Include) -- Exclude, - CompileMods = AnalyseMods ++ Cross, + CompileMods = AnalyseMods ++ CrossMods, case length(CompileMods) of 0 -> io:fwrite("WARNING: No modules to cover compile!\n\n",[]), diff --git a/lib/test_server/src/test_server_ctrl.erl b/lib/test_server/src/test_server_ctrl.erl index a6f370dd1a..c5c57426b4 100644 --- a/lib/test_server/src/test_server_ctrl.erl +++ b/lib/test_server/src/test_server_ctrl.erl @@ -53,8 +53,7 @@ -export([reject_io_reqs/1, get_levels/0, set_levels/3]). -export([multiply_timetraps/1, scale_timetraps/1, get_timetrap_parameters/0]). -export([create_priv_dir/1]). --export([cover/2, cover/3, cover/8, - cross_cover_analyse/2, cross_cover_analyse/3, trc/1, stop_trace/0]). +-export([cover/2, cover/3, cover/8, cross_cover_analyse/2, trc/1, stop_trace/0]). -export([testcase_callback/1]). -export([set_random_seed/1]). -export([kill_slavenodes/0]). @@ -88,14 +87,16 @@ -define(data_dir_suffix, "_data/"). -define(suitelog_name, "suite.log"). -define(coverlog_name, "cover.html"). +-define(raw_coverlog_name, "cover.log"). -define(cross_coverlog_name, "cross_cover.html"). +-define(raw_cross_coverlog_name, "cross_cover.log"). +-define(cross_cover_info, "cross_cover.info"). -define(cover_total, "total_cover.log"). -define(unexpected_io_log, "unexpected_io.log"). -define(last_file, "last_name"). -define(last_link, "last_link"). -define(last_test, "last_test"). -define(html_ext, ".html"). --define(cross_cover_file, "cross.cover"). -define(now, erlang:now()). -define(void_fun, fun() -> ok end). @@ -408,7 +409,9 @@ cover(CoverFile, Analyse) -> cover(App, CoverFile, Analyse) -> controller_call({cover,{App,CoverFile},Analyse,true}). cover(App, CoverFile, Exclude, Include, Cross, Export, Analyse, Stop) -> - controller_call({cover,{App,{CoverFile,Exclude,Include,Cross,Export}},Analyse,Stop}). + controller_call({cover, + {App,{CoverFile,Exclude,Include,Cross,Export}}, + Analyse,Stop}). testcase_callback(ModFunc) -> controller_call({testcase_callback,ModFunc}). @@ -4897,33 +4900,52 @@ pinfo(P) -> %% - it does not belong to the application, but is listed in the %% {include,List} part of the App.cover file %% - it does not belong to the application, but is listed in the -%% cross.cover file (in the test_server application) under 'all' -%% or under the tested application. -%% -%% The modules listed in the cross.cover file are modules that are -%% hevily used by other applications than the one they belong -%% to. After all tests are completed, these modules can be analysed -%% with coverage data from all tests - see cross_cover_analyse/1. The -%% result is stored in a file called cross_cover.html in the -%% run.<timestamp> directory of the application the modules belong -%% to. -%% -%% For example, the lists module is listed in cross.cover to be -%% included in all tests. lists belongs to the stdlib -%% application. cross_cover_analyse/1 will create a file named -%% cross_cover.html under the newest stdlib.logs/run.xxx directory, -%% where the coverage result for the lists module from all tests is -%% presented. -%% -%% The lists module is also presented in the normal coverage log -%% for stdlib, but that only includes the coverage achieved by -%% the stdlib tests themselves. -%% -%% The Cross cover file cross.cover contains elements like this: -%% {App,Modules}. -%% where App can be an application name or the atom all. The -%% application (or all applications) shall cover compile the listed -%% Modules. +%% {cross,[{Tag,List}]} part of the App.cover file +%% +%% The modules listed in the 'cross' part of the cover file are +%% modules that are heavily used by other tests than the one where +%% they are explicitly tested. They should then be listed as 'cross' +%% in the cover file for the test where they are used but do not +%% belong. +%% +%% After all tests are completed, the these modules can be analysed +%% with coverage data from all tests where they are compiled - see +%% cross_cover_analyse/2. The result is stored in a file called +%% cross_cover.html in the run.<timestamp> directory of the +%% test the modules belong to. +%% +%% Example: +%% If the module m1 belongs to system s1 but is heavily used also in +%% the tests for another system s2, then the cover files for the two +%% systems could be like this: +%% +%% s1.cover: +%% {include,[m1]}. +%% +%% s2.cover: +%% {include,[....]}. % modules belonging to system s2 +%% {cross,[{s1,[m1]}]}. +%% +%% When the tests for both s1 and s2 are completed, run +%% cross_cover_analyse(Level,[{s1,S1LogDir},{s2,S2LogDir}]), and +%% the accumulated cover data for m1 will be written to +%% S1LogDir/[run.<timestamp>/]cross_cover.html +%% +%% S1LogDir and S2LogDir are either the run.<timestamp> directories +%% for the two tests, or the parent directory of these, in which case +%% the latest run.<timestamp> directory will be chosen. +%% +%% Note that the m1 module will also be presented in the normal +%% coverage log for s1 (due to the include statement in s1.cover), but +%% that only includes the coverage achieved by the s1 test itself. +%% +%% The Tag in the 'cross' statement in the cover file has no other +%% purpose than mapping the list of modules ([m1] in the example +%% above) to the correct log directory where it should be included in +%% the cross_cover.html file (S1LogDir in the example above). +%% I.e. the value of the Tag has no meaning, it could be foo as well +%% as s1 above, as long as the same Tag is used in the cover file and +%% in the call to cross_cover_analyse/2. %% Cover compilation @@ -4932,8 +4954,7 @@ cover_compile({App,{_File,Exclude,Include,Cross,_Export}}) -> cover_compile1({App,Exclude,Include,Cross}); cover_compile({App,CoverFile}) -> - Cross = get_cross_modules(App), - {Exclude,Include} = read_cover_file(CoverFile), + {Exclude,Include,Cross} = read_cover_file(CoverFile), cover_compile1({App,Exclude,Include,Cross}). cover_compile1(What) -> @@ -4944,41 +4965,57 @@ cover_compile1(What) -> %% (Exclude), and a list of modules that are not members of the %% application but shall be compiled (Include). read_cover_file(none) -> - {[],[]}; + {[],[],[]}; read_cover_file(CoverFile) -> case file:consult(CoverFile) of {ok,List} -> - case check_cover_file(List, [], []) of - {ok,Exclude,Include} -> {Exclude,Include}; + case check_cover_file(List, [], [], []) of + {ok,Exclude,Include,Cross} -> {Exclude,Include,Cross}; error -> io:fwrite("Faulty format of CoverFile ~p\n", [CoverFile]), - {[],[]} + {[],[],[]} end; {error,Reason} -> io:fwrite("Can't read CoverFile ~p\nReason: ~p\n", [CoverFile,Reason]), - {[],[]} + {[],[],[]} end. -check_cover_file([{exclude,all}|Rest], _, Include) -> - check_cover_file(Rest, all, Include); -check_cover_file([{exclude,Exclude}|Rest], _, Include) -> +check_cover_file([{exclude,all}|Rest], _, Include, Cross) -> + check_cover_file(Rest, all, Include, Cross); +check_cover_file([{exclude,Exclude}|Rest], _, Include, Cross) -> case lists:all(fun(M) -> is_atom(M) end, Exclude) of true -> - check_cover_file(Rest, Exclude, Include); + check_cover_file(Rest, Exclude, Include, Cross); false -> error end; -check_cover_file([{include,Include}|Rest], Exclude, _) -> +check_cover_file([{include,Include}|Rest], Exclude, _, Cross) -> case lists:all(fun(M) -> is_atom(M) end, Include) of true -> - check_cover_file(Rest, Exclude, Include); + check_cover_file(Rest, Exclude, Include, Cross); false -> error end; -check_cover_file([], Exclude, Include) -> - {ok,Exclude,Include}. +check_cover_file([{cross,Cross}|Rest], Exclude, Include, _) -> + case check_cross(Cross) of + true -> + check_cover_file(Rest, Exclude, Include, Cross); + false -> + error + end; +check_cover_file([], Exclude, Include, Cross) -> + {ok,Exclude,Include,Cross}. +check_cross([{Tag,Modules}|Rest]) -> + case lists:all(fun(M) -> is_atom(M) end, [Tag|Modules]) of + true -> + check_cross(Rest); + false -> + false + end; +check_cross([]) -> + true. %% Cover analysis, per application @@ -4999,16 +5036,17 @@ cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, Stop, TestDir) -> "<p><a href=\"~s\">Coverdata collected over all tests</a></p>", [?cross_coverlog_name]), - {CoverFile,_Included,Excluded} = + {CoverFile,_Included,Excluded,Cross} = case CoverInfo of - {File,Excl,Incl,_Cross,Export} -> + {File,Excl,Incl,Cr,Export} -> cover:export(Export), - {File,Incl,Excl}; + {File,Incl,Excl,Cr}; File -> - {Excl,Incl} = read_cover_file(File), - {File,Incl,Excl} + {Excl,Incl,Cr} = read_cover_file(File), + {File,Incl,Excl,Cr} end, io:fwrite(CoverLog, "<p>CoverFile: <code>~p</code>\n", [CoverFile]), + write_cross_cover_info(TestDir,Cross), case length(cover:imported_modules()) of Imps when Imps > 0 -> @@ -5021,6 +5059,8 @@ cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, Stop, TestDir) -> io:fwrite(CoverLog, "<p>Excluded module(s): <code>~p</code>\n", [Excluded]), Coverage = cover_analyse(Analyse, AnalyseMods, Stop), + file:write_file(filename:join(TestDir,?raw_coverlog_name), + term_to_binary(Coverage)), case lists:filter(fun({_M,{_,_,_}}) -> false; (_) -> true @@ -5042,20 +5082,20 @@ cover_analyse(Analyse, AnalyseMods, Stop) -> test_server:cover_analyse({Analyse,TestDir}, AnalyseMods, Stop). -%% Cover analysis, cross application +%% Cover analysis - accumulated over multiple tests %% This can be executed on any node after all tests are finished. -%% Apps = [{App,Dir}] -%% App = atom(), application name -%% Dir = string(), the log directory for App, normally where -%% run.<timestamp> is found. -%% Modules = [atom()], modules that have been cover compiled during tests -%% of other apps than the one they belong to. -cross_cover_analyse(Analyse, Apps) -> - cross_cover_analyse(Analyse, Apps, get_cross_modules()). -cross_cover_analyse(Analyse, Apps, Modules) -> - Apps1 = get_latest_run_dirs(Apps), - Apps2 = add_cross_modules(Modules,Apps1), - CoverdataFiles = get_coverdata_files(Apps2), +%% Analyse = overview | details +%% TagDirs = [{Tag,Dir}] +%% Tag = atom(), identifier +%% Dir = string(), the log directory for Tag, it can be a +%% run.<timestamp> directory or the parent directory of +%% such (in which case the latest run.<timestamp> directory +%% is used) +cross_cover_analyse(Analyse, TagDirs0) -> + TagDirs = get_latest_run_dirs(TagDirs0), + TagMods = get_all_cross_info(TagDirs,[]), + TagDirMods = add_cross_modules(TagMods,TagDirs), + CoverdataFiles = get_coverdata_files(TagDirMods), lists:foreach(fun(CDF) -> cover:import(CDF) end, CoverdataFiles), io:fwrite("Cover analysing...\n", []), DetailsFun = @@ -5065,39 +5105,52 @@ cross_cover_analyse(Analyse, Apps, Modules) -> OutFile = filename:join(Dir, atom_to_list(M) ++ ".CROSS_COVER.html"), - cover:analyse_to_file(M, OutFile, [html]), - {file,OutFile} + case cover:analyse_to_file(M, OutFile, [html]) of + {ok,_} -> + {file,OutFile}; + Error -> + Error + end end; _ -> fun(_,_) -> undefined end end, - Coverage = analyse_apps(Apps2, DetailsFun, []), + Coverage = analyse_tests(TagDirMods, DetailsFun, []), cover:stop(), - write_cross_cover_logs(Coverage,Apps2). + write_cross_cover_logs(Coverage,TagDirMods). -%% For each application from which there are cross cover analysed +write_cross_cover_info(_Dir,[]) -> + ok; +write_cross_cover_info(Dir,Cross) -> + {ok,Fd} = file:open(filename:join(Dir,?cross_cover_info),[write]), + lists:foreach(fun(C) -> io:format(Fd,"~p.~n",[C]) end, Cross), + file:close(Fd). + +%% For each test from which there are cross cover analysed %% modules, write a cross cover log (cross_cover.html). -write_cross_cover_logs([{App,Coverage}|T],Apps) -> - case lists:keyfind(App,1,Apps) of +write_cross_cover_logs([{Tag,Coverage}|T],TagDirMods) -> + case lists:keyfind(Tag,1,TagDirMods) of {_,Dir,Mods} when Mods=/=[] -> + file:write_file(filename:join(Dir,?raw_cross_coverlog_name), + term_to_binary(Coverage)), CoverLogName = filename:join(Dir,?cross_coverlog_name), {ok,CoverLog} = file:open(CoverLogName, [write]), write_coverlog_header(CoverLog), io:fwrite(CoverLog, "<h1>Coverage results for \'~w\' from all tests</h1>\n", - [App]), + [Tag]), write_cover_result_table(CoverLog, Coverage), io:fwrite("Written file ~p\n", [CoverLogName]); _ -> ok end, - write_cross_cover_logs(T,Apps); + write_cross_cover_logs(T,TagDirMods); write_cross_cover_logs([],_) -> io:fwrite("done\n", []). %% Get the latest run.<timestamp> directories -get_latest_run_dirs([{App,Dir}|Apps]) -> - [{App,get_latest_run_dir(Dir)} | get_latest_run_dirs(Apps)]; +get_latest_run_dirs([{Tag,Dir}|Rest]) -> + [{Tag,get_latest_run_dir(Dir)} | get_latest_run_dirs(Rest)]; get_latest_run_dirs([]) -> []. @@ -5116,44 +5169,47 @@ get_latest_dir([_|T],Latest) -> get_latest_dir([],Latest) -> Latest. -%% Associate the cross cover modules with their applications. -add_cross_modules(Mods,Apps)-> - do_add_cross_modules(Mods,[{App,Dir,[]} || {App,Dir} <- Apps]). -do_add_cross_modules([Mod|Mods],Apps)-> - App = get_app(Mod), - NewApps = - case lists:keytake(App,1,Apps) of - {value,{App,Dir,AppMods},Rest} -> - [{App,Dir,lists:umerge([Mod],AppMods)}|Rest]; +get_all_cross_info([{_Tag,Dir}|Rest],Acc) -> + case file:consult(filename:join(Dir,?cross_cover_info)) of + {ok,TagMods} -> + get_all_cross_info(Rest,TagMods++Acc); + _ -> + get_all_cross_info(Rest,Acc) + end; +get_all_cross_info([],Acc) -> + Acc. + +%% Associate the cross cover modules with their log directories +add_cross_modules(TagMods,TagDirs)-> + do_add_cross_modules(TagMods,[{Tag,Dir,[]} || {Tag,Dir} <- TagDirs]). +do_add_cross_modules([{Tag,Mods1}|TagMods],TagDirMods)-> + NewTagDirMods = + case lists:keytake(Tag,1,TagDirMods) of + {value,{Tag,Dir,Mods},Rest} -> + [{Tag,Dir,lists:umerge(lists:sort(Mods1),Mods)}|Rest]; false -> - Apps + TagDirMods end, - do_add_cross_modules(Mods,NewApps); -do_add_cross_modules([],Apps) -> - %% Just to get the modules in the same order as app-only cover log - [{App,Dir,lists:reverse(Mods)} || {App,Dir,Mods} <- Apps]. - -get_app(Module) -> - Beam = code:which(Module), - AppDir = filename:basename(filename:dirname(filename:dirname(Beam))), - [AppStr|_] = string:tokens(AppDir,"-"), - list_to_atom(AppStr). + do_add_cross_modules(TagMods,NewTagDirMods); +do_add_cross_modules([],TagDirMods) -> + %% Just to get the modules in the same order as in the normal cover log + [{Tag,Dir,lists:reverse(Mods)} || {Tag,Dir,Mods} <- TagDirMods]. %% Find all exported coverdata files. -get_coverdata_files(Apps) -> +get_coverdata_files(TagDirMods) -> lists:flatmap( - fun({_,LatestAppDir,_}) -> - filelib:wildcard(filename:join(LatestAppDir,"all.coverdata")) + fun({_,LatestDir,_}) -> + filelib:wildcard(filename:join(LatestDir,"all.coverdata")) end, - Apps). + TagDirMods). -%% For each application, analyse all modules +%% For each test, analyse all modules %% Used for cross cover analysis. -analyse_apps([{App,LastTest,Modules}|T], DetailsFun, Acc) -> +analyse_tests([{Tag,LastTest,Modules}|T], DetailsFun, Acc) -> Cov = analyse_modules(LastTest, Modules, DetailsFun, []), - analyse_apps(T, DetailsFun, [{App,Cov}|Acc]); -analyse_apps([], _DetailsFun, Acc) -> + analyse_tests(T, DetailsFun, [{Tag,Cov}|Acc]); +analyse_tests([], _DetailsFun, Acc) -> Acc. %% Analyse each module @@ -5166,27 +5222,6 @@ analyse_modules(_Dir, [], _DetailsFun, Acc) -> Acc. -%% Read the cross cover file (cross.cover) -get_cross_modules() -> - get_cross_modules(all). -get_cross_modules(App) -> - case file:consult(?cross_cover_file) of - {ok,List} -> - get_cross_modules(App, List, []); - _X -> - [] - end. - -get_cross_modules(App, [{_To,Modules}|T], Acc) when App==all-> - get_cross_modules(App, T, Acc ++ Modules); -get_cross_modules(App, [{To,Modules}|T], Acc) when To==App; To==all-> - get_cross_modules(App, T, Acc ++ Modules); -get_cross_modules(App, [_H|T], Acc) -> - get_cross_modules(App, T, Acc); -get_cross_modules(_App, [], Acc) -> - Acc. - - %% Support functions for writing the cover logs (both cross and normal) write_coverlog_header(CoverLog) -> case catch diff --git a/lib/test_server/src/ts.erl b/lib/test_server/src/ts.erl index 115e783070..cfd7161dbd 100644 --- a/lib/test_server/src/ts.erl +++ b/lib/test_server/src/ts.erl @@ -160,8 +160,8 @@ help(installed) -> " the given run options\n", " ts:cross_cover_analyse(Level)\n" " - Used after ts:run with option cover or \n" - " cover_details. Analyses modules specified in\n" - " cross.cover.\n" + " cover_details. Analyses modules specified with\n" + " a 'cross' statement in the cover spec file.\n" " Level can be 'overview' or 'details'.\n", " ts:compile_testcases()~n" " ts:compile_testcases(Apps)~n" @@ -528,8 +528,7 @@ cross_cover_analyse([Level]) -> cross_cover_analyse(Level); cross_cover_analyse(Level) -> Apps = get_last_app_tests(), - Modules = get_cross_modules(Apps,[]), - test_server_ctrl:cross_cover_analyse(Level,Apps,Modules). + test_server_ctrl:cross_cover_analyse(Level,Apps). get_last_app_tests() -> AllTests = filelib:wildcard(filename:join(["*","*_test.logs"])), @@ -558,30 +557,6 @@ get_last_app_tests([Dir|Dirs],RE,Acc) -> get_last_app_tests([],_,Acc) -> Acc. -get_cross_modules([{App,_}|Apps],Acc) -> - Mods = cross_modules(App), - get_cross_modules(Apps,lists:umerge(Mods,Acc)); -get_cross_modules([],Acc) -> - Acc. - -cross_modules(App) -> - case default_coverfile(App) of - none -> - []; - File -> - case catch file:consult(File) of - {ok,CoverSpec} -> - case lists:keyfind(cross_apps,1,CoverSpec) of - false -> - []; - {cross_apps,App,Modules} -> - lists:usort(Modules) - end; - _ -> - [] - end - end. - %%% Implementation. check_and_run(Fun) -> diff --git a/lib/test_server/test/test_server_SUITE.erl b/lib/test_server/test/test_server_SUITE.erl index 95a3423fef..fb82a87fd0 100644 --- a/lib/test_server/test/test_server_SUITE.erl +++ b/lib/test_server/test/test_server_SUITE.erl @@ -80,7 +80,7 @@ all() -> [test_server_SUITE, test_server_parallel01_SUITE, test_server_conf02_SUITE, test_server_conf01_SUITE, test_server_skip_SUITE, test_server_shuffle01_SUITE, - test_server_break_SUITE]. + test_server_break_SUITE, test_server_cover_SUITE]. %%-------------------------------------------------------------------- @@ -93,37 +93,95 @@ test_server_SUITE(Config) -> % rpc:call(Node,dbg, tracer,[]), % rpc:call(Node,dbg, p,[all,c]), % rpc:call(Node,dbg, tpl,[test_server_ctrl,x]), - run_test_server_tests("test_server_SUITE", 38, 1, 30, - 19, 9, 1, 11, 2, 25, Config). + run_test_server_tests("test_server_SUITE", + [{test_server_SUITE,skip_case7,"SKIPPED!"}], + 38, 1, 30, 19, 9, 1, 11, 2, 25, Config). test_server_parallel01_SUITE(Config) -> - run_test_server_tests("test_server_parallel01_SUITE", 37, 0, 19, - 19, 0, 0, 0, 0, 37, Config). + run_test_server_tests("test_server_parallel01_SUITE", [], + 37, 0, 19, 19, 0, 0, 0, 0, 37, Config). test_server_shuffle01_SUITE(Config) -> - run_test_server_tests("test_server_shuffle01_SUITE", 130, 0, 0, - 76, 0, 0, 0, 0, 130, Config). + run_test_server_tests("test_server_shuffle01_SUITE", [], + 130, 0, 0, 76, 0, 0, 0, 0, 130, Config). test_server_skip_SUITE(Config) -> - run_test_server_tests("test_server_skip_SUITE", 3, 0, 1, - 0, 0, 1, 3, 0, 0, Config). + run_test_server_tests("test_server_skip_SUITE", [], + 3, 0, 1, 0, 0, 1, 3, 0, 0, Config). test_server_conf01_SUITE(Config) -> - run_test_server_tests("test_server_conf01_SUITE", 24, 0, 12, - 12, 0, 0, 0, 0, 24, Config). + run_test_server_tests("test_server_conf01_SUITE", [], + 24, 0, 12, 12, 0, 0, 0, 0, 24, Config). test_server_conf02_SUITE(Config) -> - run_test_server_tests("test_server_conf02_SUITE", 26, 0, 12, - 12, 0, 0, 0, 0, 26, Config). + run_test_server_tests("test_server_conf02_SUITE", [], + 26, 0, 12, 12, 0, 0, 0, 0, 26, Config). test_server_break_SUITE(Config) -> - D = run_test_server_tests("test_server_break_SUITE", 8, 2, 6, - 4, 0, 0, 0, 2, 6, Config), - D. + run_test_server_tests("test_server_break_SUITE", [], + 8, 2, 6, 4, 0, 0, 0, 2, 6, Config). -run_test_server_tests(SuiteName, NCases, NFail, NExpected, NSucc, +test_server_cover_SUITE(Config) -> + case test_server:is_cover() of + true -> + {skip, "Cover already running"}; + false -> + PrivDir = ?config(priv_dir,Config), + + %% Test suite has two test cases + %% tc1 calls cover_helper:foo/0 + %% tc2 calls cover_helper:bar/0 + %% Each function in cover_helper is one line. + %% + %% First test run skips tc2, so only cover_helper:foo/0 is executed. + %% Cover file specifies to include cover_helper in this test run. + CoverFile1 = filename:join(PrivDir,"t1.cover"), + CoverSpec1 = {include,[cover_helper]}, + file:write_file(CoverFile1,io_lib:format("~p.~n",[CoverSpec1])), + run_test_server_tests("test_server_cover_SUITE", + [{test_server_cover_SUITE,tc2,"SKIPPED!"}], + 4, 0, 2, 1, 1, 0, 1, 0, 3, + CoverFile1, Config), + + %% Next test run skips tc1, so only cover_helper:bar/0 is executed. + %% Cover file specifies cross compilation of cover_helper + CoverFile2 = filename:join(PrivDir,"t2.cover"), + CoverSpec2 = {cross,[{t1,[cover_helper]}]}, + file:write_file(CoverFile2,io_lib:format("~p.~n",[CoverSpec2])), + run_test_server_tests("test_server_cover_SUITE", + [{test_server_cover_SUITE,tc1,"SKIPPED!"}], + 4, 0, 2, 1, 1, 0, 1, 0, 3, CoverFile2, Config), + + %% Cross cover analyse + WorkDir = ?config(work_dir,Config), + WC = filename:join([WorkDir,"test_server_cover_SUITE.logs","run.*"]), + [D2,D1|_] = lists:reverse(lists:sort(filelib:wildcard(WC))), + TagDirs = [{t1,D1},{t2,D2}], + test_server_ctrl:cross_cover_analyse(details,TagDirs), + + %% Check that cover log shows only what is really included + %% in the test and cross cover log show the accumulated + %% result. + {ok,Cover1} = file:read_file(filename:join(D1,"cover.log")), + [{cover_helper,{1,1,_}}] = binary_to_term(Cover1), + {ok,Cover2} = file:read_file(filename:join(D2,"cover.log")), + [] = binary_to_term(Cover2), + {ok,Cross} = file:read_file(filename:join(D1,"cross_cover.log")), + [{cover_helper,{2,0,_}}] = binary_to_term(Cross), + ok + end. + + +run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc, NUsrSkip, NAutoSkip, NActualSkip, NActualFail, NActualSucc, Config) -> + run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc, + NUsrSkip, NAutoSkip, + NActualSkip, NActualFail, NActualSucc, false, Config). + +run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc, + NUsrSkip, NAutoSkip, + NActualSkip, NActualFail, NActualSucc, Cover, Config) -> WorkDir = proplists:get_value(work_dir, Config), ct:log("<a href=\"file://~s\">Test case log files</a>\n", @@ -131,11 +189,17 @@ run_test_server_tests(SuiteName, NCases, NFail, NExpected, NSucc, Node = proplists:get_value(node, Config), {ok,_Pid} = rpc:call(Node,test_server_ctrl, start, []), + case Cover of + false -> + ok; + _ -> + rpc:call(Node,test_server_ctrl,cover,[Cover,details]) + end, rpc:call(Node, test_server_ctrl,add_dir_with_skip, [SuiteName, [proplists:get_value(data_dir,Config)],SuiteName, - [{test_server_SUITE,skip_case7,"SKIPPED!"}]]), + Skip]), until(fun() -> rpc:call(Node,test_server_ctrl,jobs,[]) =:= [] diff --git a/lib/test_server/test/test_server_SUITE_data/Makefile.src b/lib/test_server/test/test_server_SUITE_data/Makefile.src index ec8ddd78b0..c770627f04 100644 --- a/lib/test_server/test/test_server_SUITE_data/Makefile.src +++ b/lib/test_server/test/test_server_SUITE_data/Makefile.src @@ -5,4 +5,6 @@ all: erlc test_server_shuffle01_SUITE.erl erlc test_server_conf02_SUITE.erl erlc test_server_skip_SUITE.erl - erlc test_server_break_SUITE.erl
\ No newline at end of file + erlc test_server_break_SUITE.erl + erlc test_server_cover_SUITE.erl + erlc +debug_info test_server_cover_SUITE_data/cover_helper.erl
\ No newline at end of file diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl index 70e30a3334..d9f009679a 100644 --- a/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl +++ b/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl @@ -41,7 +41,7 @@ init_per_suite(Config) -> spawn(fun break_and_continue_sup/0), Config. -end_per_suite(Config) -> +end_per_suite(_Config) -> ok. init_per_testcase(Case,Config) when Case==break_in_init_tc -> @@ -90,19 +90,19 @@ break_in_end_tc_after_fail(Config) when is_list(Config) -> break_in_end_tc_after_abort(Config) when is_list(Config) -> ?t:adjusted_sleep(2000). % will cause a timetrap timeout -%%%----------------------------------------------------------------- -%%% Internal functions - %% This test case checks that all breaks in previous test cases was %% also continued, and that the break lasted as long as expected. %% The reason for this is that some of the breaks above are in %% end_per_testcase, and failures there will only produce a warning, %% not an error - so this is to catch the error for real. -check_all_breaks(Config) -> +check_all_breaks(Config) when is_list(Config) -> break_and_continue_sup ! {done,self()}, receive {Breaks,Continued} -> check_all_breaks(Breaks,Continued) end. +%%%----------------------------------------------------------------- +%%% Internal functions + check_all_breaks([{From,Case,T,Start}|Breaks],[{From,End}|Continued]) -> Diff = timer:now_diff(End,Start), diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl new file mode 100644 index 0000000000..b1ae70a302 --- /dev/null +++ b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl @@ -0,0 +1,58 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2012. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% +-module(test_server_cover_SUITE). + +-export([all/1, init_per_suite/1, end_per_suite/1]). +-export([init_per_testcase/2, end_per_testcase/2]). +-export([tc1/1, tc2/1]). + +-include_lib("test_server/include/test_server.hrl"). + +all(suite) -> + [tc1,tc2]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_testcase(_Case,Config) -> + Dog = ?t:timetrap({minutes,10}), + [{watchdog, Dog}|Config]. + +end_per_testcase(_Case,Config) -> + Dog=?config(watchdog, Config), + ?t:timetrap_cancel(Dog), + ok. + + +%%%----------------------------------------------------------------- +%%% Test cases +tc1(Config) when is_list(Config) -> + cover_helper:foo(), + ok. + +tc2(Config) when is_list(Config) -> + cover_helper:bar(), + ok. + +%%%----------------------------------------------------------------- +%%% Internal functions + diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl new file mode 100644 index 0000000000..6c74eb4e8a --- /dev/null +++ b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl @@ -0,0 +1,4 @@ +-module(cover_helper). +-compile(export_all). +foo() -> ok. +bar() -> ok. |