aboutsummaryrefslogtreecommitdiffstats
path: root/lib/test_server
diff options
context:
space:
mode:
Diffstat (limited to 'lib/test_server')
-rw-r--r--lib/test_server/doc/src/test_server_ctrl.xml93
-rw-r--r--lib/test_server/doc/src/ts.xml4
-rw-r--r--lib/test_server/src/Makefile7
-rw-r--r--lib/test_server/src/erl2html2.erl29
-rw-r--r--lib/test_server/src/test_server.erl92
-rw-r--r--lib/test_server/src/test_server_ctrl.erl318
-rw-r--r--lib/test_server/src/test_server_sup.erl28
-rw-r--r--lib/test_server/src/ts.erl31
-rw-r--r--lib/test_server/src/ts_lib.erl1
-rw-r--r--lib/test_server/test/test_server_SUITE.erl100
-rw-r--r--lib/test_server/test/test_server_SUITE_data/Makefile.src4
-rw-r--r--lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl10
-rw-r--r--lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl58
-rw-r--r--lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl4
-rw-r--r--lib/test_server/test/test_server_test_lib.erl19
15 files changed, 512 insertions, 286 deletions
diff --git a/lib/test_server/doc/src/test_server_ctrl.xml b/lib/test_server/doc/src/test_server_ctrl.xml
index 41bc0bcc75..af96f1fe7e 100644
--- a/lib/test_server/doc/src/test_server_ctrl.xml
+++ b/lib/test_server/doc/src/test_server_ctrl.xml
@@ -427,11 +427,21 @@ Optional, if not given the test server controller node
<p>A <c>CoverFile</c> can have the following entries:</p>
<code type="none">
{exclude, all | ExcludeModuleList}.
-{include, IncludeModuleList}. </code>
+{include, IncludeModuleList}.
+{cross, CrossCoverInfo}.</code>
<p>Note that each line must end with a full
stop. <c>ExcludeModuleList</c> and <c>IncludeModuleList</c>
are lists of atoms, where each atom is a module name.
</p>
+
+ <p><c>CrossCoverInfo</c> is used when collecting cover data
+ over multiple tests. Modules listed here are compiled, but
+ they will not be analysed when the test is finished. See
+ <seealso
+ marker="#cross_cover_analyse-2">cross_cover_analyse/2</seealso>
+ for more information about the cross cover mechanism and the
+ format of <c>CrossCoverInfo</c>.
+ </p>
<p>If both an <c>Application</c> and a <c>CoverFile</c> is
given, all modules in the application are cover compiled,
except for the modules listed in <c>ExcludeModuleList</c>. The
@@ -467,30 +477,71 @@ Optional, if not given the test server controller node
</desc>
</func>
<func>
- <name>cross_cover_analyse(Level) -> ok</name>
- <fsummary>Analyse cover data collected from all tests</fsummary>
+ <name>cross_cover_analyse(Level, Tests) -> ok</name>
+ <fsummary>Analyse cover data collected from multiple tests</fsummary>
<type>
<v>Level = details | overview</v>
+ <v>Tests = [{Tag,LogDir}]</v>
+ <v>Tag = atom()</v>
+ <d>Test identifier.</d>
+ <v>LogDir = string()</v>
+ <d>Log directory for the test identified by <c>Tag</c>. This
+ can either be the <c>run.&lt;timestamp&gt;</c> directory or
+ the parent directory of this (in which case the latest
+ <c>run.&lt;timestamp&gt;</c> directory is chosen.</d>
</type>
<desc>
- <p>Analyse cover data collected from all tests. The modules
- analysed are the ones listed in the cross cover file
- <c>cross.cover</c> in the current directory of the test
- server.</p>
- <p>The modules listed in the <c>cross.cover</c> file are
- modules that are heavily used by other applications than the
- one they belong to. This function should be run after all
- tests are completed, and the result will be stored in a file
- called cross_cover.html in the run.&lt;timestamp&gt;
- directory of the application the modules belong to.
- </p>
- <p>The <c>cross.cover</c> file contains elements like this:</p>
- <pre>
-{App,Modules}. </pre>
- <p>where <c>App</c> can be an application name or the atom
- <c>all</c>. The application (or all applications) will cover
- compile the listed <c>Modules</c>.
- </p>
+ <p>Analyse cover data collected from multiple tests. The modules
+ analysed are the ones listed in <c>cross</c> statements in
+ the cover files. These are modules that are heavily used by
+ other tests than the one where they belong or are explicitly
+ tested. They should then be listed as cross modules in the
+ cover file for the test where they are used but do not
+ belong. Se example below.</p>
+ <p>This function should be run after all tests are completed,
+ and the result will be stored in a file called
+ <c>cross_cover.html</c> in the <c>run.&lt;timestamp&gt;</c>
+ directory of the test the modules belong to.</p>
+ <p>Note that the function can be executed on any node, and it
+ does not require <c>test_server_ctrl</c> to be started first.</p>
+ <p>The <c>cross</c> statement in the cover file must be like this:</p>
+ <code type="none">
+{cross,[{Tag,Modules}]}.</code>
+ <p>where <c>Tag</c> is the same as <c>Tag</c> in the
+ <c>Tests</c> parameter to this function and <c>Modules</c> is a
+ list of module names (atoms).</p>
+ <p><em>Example:</em></p>
+ <p>If the module <c>m1</c> belongs to system <c>s1</c> but is
+ heavily used also in the tests for another system <c>s2</c>,
+ then the cover files for the two systems' tests could be like
+ this:</p>
+<code type="none">
+s1.cover:
+ {include,[m1]}.
+
+s2.cover:
+ {include,[....]}. % modules belonging to system s2
+ {cross,[{s1,[m1]}]}.</code>
+ <p>When the tests for both <c>s1</c> and <c>s2</c> are completed, run</p>
+<code type="none">
+test_server_ctrl:cross_cover_analyse(Level,[{s1,S1LogDir},{s2,S2LogDir}])
+</code>
+
+ <p>and the accumulated cover data for <c>m1</c> will be written to
+ <c>S1LogDir/[run.&lt;timestamp&gt;/]cross_cover.html</c>.</p>
+ <p>Note that the <c>m1</c> module will also be presented in the
+ normal coverage log for <c>s1</c> (due to the include statement in
+ <c>s1.cover</c>), but that only includes the coverage achieved by the
+ <c>s1</c> test itself.</p>
+ <p>The Tag in the <c>cross</c> statement in the cover file has
+ no other purpose than mapping the list of modules
+ (<c>[m1]</c> in the example above) to the correct log
+ directory where it should be included in the
+ <c>cross_cover.html</c> file (<c>S1LogDir</c> in the example
+ above). I.e. the value of <c>Tag</c> has no meaning, it
+ could be <c>foo</c> as well as <c>s1</c> above, as long as
+ the same <c>Tag</c> is used in the cover file and in the
+ call to this function.</p>
</desc>
</func>
<func>
diff --git a/lib/test_server/doc/src/ts.xml b/lib/test_server/doc/src/ts.xml
index 4a2c536e96..82ba3a5017 100644
--- a/lib/test_server/doc/src/ts.xml
+++ b/lib/test_server/doc/src/ts.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2007</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -450,7 +450,7 @@ This option is mandatory for remote targets
<desc>
<p>Analyse cover data collected from all tests.
</p>
- <p>See test_server_ctrl:cross_cover_analyse/1
+ <p>See test_server_ctrl:cross_cover_analyse/2
</p>
</desc>
</func>
diff --git a/lib/test_server/src/Makefile b/lib/test_server/src/Makefile
index 20e7a5942c..43a03f4e1d 100644
--- a/lib/test_server/src/Makefile
+++ b/lib/test_server/src/Makefile
@@ -69,7 +69,6 @@ INTERNAL_HRL_FILES = test_server_internal.hrl
TS_HRL_FILES= ts.hrl
C_FILES =
AUTOCONF_FILES = configure.in conf_vars.in
-COVER_FILES = cross.cover
PROGRAMS = configure config.sub config.guess install-sh
CONFIG = ts.config ts.unix.config ts.win32.config
@@ -113,10 +112,10 @@ configure: configure.in
# Special Build Targets
# ----------------------------------------------------
$(APP_TARGET): $(APP_SRC) ../vsn.mk
- sed -e 's;%VSN%;$(VSN);' $< > $@
+ $(vsn_verbose)sed -e 's;%VSN%;$(VSN);' $< > $@
$(APPUP_TARGET): $(APPUP_SRC) ../vsn.mk
- sed -e 's;%VSN%;$(VSN);' $< > $@
+ $(vsn_verbose)sed -e 's;%VSN%;$(VSN);' $< > $@
# ----------------------------------------------------
# Release Target
@@ -137,7 +136,7 @@ release_tests_spec: opt
$(INSTALL_DATA) $(ERL_FILES) $(TS_ERL_FILES) \
$(HRL_FILES) $(INTERNAL_HRL_FILES) $(TS_HRL_FILES) \
$(TS_TARGET_FILES) \
- $(AUTOCONF_FILES) $(C_FILES) $(COVER_FILES) $(CONFIG) \
+ $(AUTOCONF_FILES) $(C_FILES) $(CONFIG) \
"$(RELEASE_PATH)/test_server"
$(INSTALL_SCRIPT) $(PROGRAMS) "$(RELEASE_PATH)/test_server"
diff --git a/lib/test_server/src/erl2html2.erl b/lib/test_server/src/erl2html2.erl
index 9c459c05d4..1729257809 100644
--- a/lib/test_server/src/erl2html2.erl
+++ b/lib/test_server/src/erl2html2.erl
@@ -34,11 +34,17 @@ convert(File, Dest) ->
%%
%% FIXME: The colours should *really* be set with
%% stylesheets...
+ Encoding = encoding(File),
Header = ["<!DOCTYPE HTML PUBLIC "
"\"-//W3C//DTD HTML 3.2 Final//EN\">\n"
"<!-- autogenerated by '"++atom_to_list(?MODULE)++"'. -->\n"
"<html>\n"
- "<head><title>", File, "</title></head>\n\n"
+ "<head>\n"
+ "<meta http-equiv=\"Content-Type\" content=\"text/html;"
+ "charset=",
+ Encoding,"\"/>\n"
+ "<title>", File, "</title>\n"
+ "</head>\n\n"
"<body bgcolor=\"white\" text=\"black\""
" link=\"blue\" vlink=\"purple\" alink=\"red\">\n"],
convert(File, Dest, Header).
@@ -55,12 +61,12 @@ convert(File, Dest, Header) ->
case file:open(Dest,[write,raw]) of
{ok,DFd} ->
file:write(DFd,[Header,"<pre>\n"]),
- Lines = build_html(SFd,DFd,Functions),
+ _Lines = build_html(SFd,DFd,Functions),
file:write(DFd,["</pre>\n",footer(),
"</body>\n</html>\n"]),
%% {_, Time2} = statistics(runtime),
%% io:format("Converted ~p lines in ~.2f Seconds.~n",
- %% [Lines, Time2/1000]),
+ %% [_Lines, Time2/1000]),
file:close(SFd),
file:close(DFd),
ok;
@@ -180,3 +186,20 @@ possibly_enhance(Str,false) ->
%%% End of the file
footer() ->
"".
+
+%%%-----------------------------------------------------------------
+%%% Read encoding from source file
+encoding(File) ->
+ Encoding =
+ case epp:read_encoding(File) of
+ none ->
+ epp:default_encoding();
+ E ->
+ E
+ end,
+ html_encoding(Encoding).
+
+html_encoding(latin1) ->
+ "iso-8859-1";
+html_encoding(utf8) ->
+ "utf-8".
diff --git a/lib/test_server/src/test_server.erl b/lib/test_server/src/test_server.erl
index 14cdfd391a..37cd8fac99 100644
--- a/lib/test_server/src/test_server.erl
+++ b/lib/test_server/src/test_server.erl
@@ -60,8 +60,6 @@
-include("test_server_internal.hrl").
-include_lib("kernel/include/file.hrl").
--define(pl2a(M), test_server_sup:package_atom(M)).
-
init_target_info() ->
[$.|Emu] = code:objfile_extension(),
{_, OTPRel} = init:script_id(),
@@ -97,7 +95,8 @@ init_purify() ->
%% is found, else {error,application_not_found}.
cover_compile({none,_Exclude,Include,Cross}) ->
- CompileMods = Include++Cross,
+ CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross),
+ CompileMods = Include++CrossMods,
case length(CompileMods) of
0 ->
io:fwrite("WARNING: No modules to cover compile!\n\n",[]),
@@ -111,7 +110,8 @@ cover_compile({none,_Exclude,Include,Cross}) ->
{ok,Include}
end;
cover_compile({App,all,Include,Cross}) ->
- CompileMods = Include++Cross,
+ CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross),
+ CompileMods = Include++CrossMods,
case length(CompileMods) of
0 ->
io:fwrite("WARNING: No modules to cover compile!\n\n",[]),
@@ -129,9 +129,10 @@ cover_compile({App,all,Include,Cross}) ->
{ok,Include}
end;
cover_compile({App,Exclude,Include,Cross}) ->
+ CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross),
case code:lib_dir(App) of
{error,bad_name} ->
- case Include++Cross of
+ case Include++CrossMods of
[] ->
io:format("\nWARNING: Can't find lib_dir for \'~w\'\n"
"Not cover compiling!\n\n",[App]),
@@ -152,7 +153,7 @@ cover_compile({App,Exclude,Include,Cross}) ->
WC = filename:join(EbinDir,"*.beam"),
AllMods = module_names(filelib:wildcard(WC)),
AnalyseMods = (AllMods ++ Include) -- Exclude,
- CompileMods = AnalyseMods ++ Cross,
+ CompileMods = AnalyseMods ++ CrossMods,
case length(CompileMods) of
0 ->
io:fwrite("WARNING: No modules to cover compile!\n\n",[]),
@@ -317,14 +318,21 @@ pmap(Fun,List) ->
do_cover_for_node(Node,CoverFunc) ->
+ do_cover_for_node(Node,CoverFunc,true).
+do_cover_for_node(Node,CoverFunc,StickUnstick) ->
%% In case a slave node is starting another slave node! I.e. this
%% function is executed on a slave node - then the cover function
%% must be executed on the master node. This is for instance the
%% case in test_server's own tests.
MainCoverNode = cover:get_main_node(),
- Sticky = unstick_all_sticky(MainCoverNode,Node),
+ Sticky =
+ if StickUnstick -> unstick_all_sticky(MainCoverNode,Node);
+ true -> ok
+ end,
rpc:call(MainCoverNode,cover,CoverFunc,[Node]),
- stick_all_sticky(Node,Sticky).
+ if StickUnstick -> stick_all_sticky(Node,Sticky);
+ true -> ok
+ end.
unstick_all_sticky(Node) ->
unstick_all_sticky(node(),Node).
@@ -911,7 +919,7 @@ run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit,
put(test_server_logopts, LogOpts),
Where = [{Mod,Func}],
put(test_server_loc, Where),
- FWInitResult = test_server_sup:framework_call(init_tc,[?pl2a(Mod),Func,Args0],
+ FWInitResult = test_server_sup:framework_call(init_tc,[Mod,Func,Args0],
{ok,Args0}),
set_tc_state(running),
{{Time,Value},Loc,Opts} =
@@ -1045,7 +1053,7 @@ do_end_tc_call(Mod, Func, Res, Return) ->
Ref = make_ref(),
if FwMod == "ct_framework" ; FwMod == "undefined"; FwMod == false ->
case test_server_sup:framework_call(
- end_tc, [?pl2a(Mod),Func,Res, Return], ok) of
+ end_tc, [Mod,Func,Res, Return], ok) of
{fail,FWReason} ->
{failed,FWReason};
ok ->
@@ -1060,7 +1068,7 @@ do_end_tc_call(Mod, Func, Res, Return) ->
end;
true ->
case test_server_sup:framework_call(FwMod, end_tc,
- [?pl2a(Mod),Func,Res], Ref) of
+ [Mod,Func,Res], Ref) of
{fail,FWReason} ->
{failed,FWReason};
_Else ->
@@ -1294,11 +1302,11 @@ get_loc(Pid) ->
fw_error_notify(Mod, Func, Args, Error) ->
test_server_sup:framework_call(error_notification,
- [?pl2a(Mod),Func,[Args],
+ [Mod,Func,[Args],
{Error,unknown}]).
fw_error_notify(Mod, Func, Args, Error, Loc) ->
test_server_sup:framework_call(error_notification,
- [?pl2a(Mod),Func,[Args],
+ [Mod,Func,[Args],
{Error,Loc}]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2186,12 +2194,9 @@ start_node(Name, Type, Options) ->
%% Cannot run cover on shielded node or on a node started
%% by a shielded node.
- Cover = case is_cover() of
+ Cover = case is_cover(Node) of
true ->
- not is_shielded(Name)
- andalso same_version(Node)
- andalso proplists:get_value(start_cover,Options,
- true);
+ proplists:get_value(start_cover,Options,true);
false ->
false
end,
@@ -2230,15 +2235,8 @@ wait_for_node(Slave) ->
Result = receive {sync_result,R} -> R end,
case Result of
ok ->
- Cover = case is_cover() of
- true ->
- not is_shielded(Slave) andalso same_version(Slave);
- false ->
- false
- end,
-
net_adm:ping(Slave),
- case Cover of
+ case is_cover(Slave) of
true ->
do_cover_for_node(Slave,start);
_ ->
@@ -2256,12 +2254,9 @@ wait_for_node(Slave) ->
%% Kills a (remote) node.
%% Also inform test_server_ctrl so it can clean up!
stop_node(Slave) ->
- Nocover = is_shielded(Slave) orelse not same_version(Slave),
- case is_cover() of
- true when not Nocover ->
- do_cover_for_node(Slave,flush);
- _ ->
- ok
+ Cover = is_cover(Slave),
+ if Cover -> do_cover_for_node(Slave,flush,false);
+ true -> ok
end,
group_leader() ! {sync_apply,self(),{test_server_ctrl,stop_node,[Slave]}},
Result = receive {sync_result,R} -> R end,
@@ -2273,10 +2268,15 @@ stop_node(Slave) ->
{nodedown, Slave} ->
format(minor, "Stopped slave node: ~p", [Slave]),
format(major, "=node_stop ~p", [Slave]),
+ if Cover -> do_cover_for_node(Slave,stop,false);
+ true -> ok
+ end,
true
after 30000 ->
format("=== WARNING: Node ~p does not seem to terminate.",
[Slave]),
+ erlang:monitor_node(Slave, false),
+ receive {nodedown, Slave} -> ok after 0 -> ok end,
false
end;
{error, _Reason} ->
@@ -2288,9 +2288,27 @@ stop_node(Slave) ->
[Slave]),
case net_adm:ping(Slave)of
pong ->
+ erlang:monitor_node(Slave, true),
slave:stop(Slave),
- true;
+ receive
+ {nodedown, Slave} ->
+ format(minor, "Stopped slave node: ~p", [Slave]),
+ format(major, "=node_stop ~p", [Slave]),
+ if Cover -> do_cover_for_node(Slave,stop,false);
+ true -> ok
+ end,
+ true
+ after 30000 ->
+ format("=== WARNING: Node ~p does not seem to terminate.",
+ [Slave]),
+ erlang:monitor_node(Slave, false),
+ receive {nodedown, Slave} -> ok after 0 -> ok end,
+ false
+ end;
pang ->
+ if Cover -> do_cover_for_node(Slave,stop,false);
+ true -> ok
+ end,
false
end
end.
@@ -2377,6 +2395,14 @@ same_version(Name) ->
OtherVersion = rpc:call(Name, erlang, system_info, [version]),
ThisVersion =:= OtherVersion.
+is_cover(Name) ->
+ case is_cover() of
+ true ->
+ not is_shielded(Name) andalso same_version(Name);
+ false ->
+ false
+ end.
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% temp_name(Stem) -> string()
%% Stem = string()
diff --git a/lib/test_server/src/test_server_ctrl.erl b/lib/test_server/src/test_server_ctrl.erl
index bc08c12089..c5c57426b4 100644
--- a/lib/test_server/src/test_server_ctrl.erl
+++ b/lib/test_server/src/test_server_ctrl.erl
@@ -53,8 +53,7 @@
-export([reject_io_reqs/1, get_levels/0, set_levels/3]).
-export([multiply_timetraps/1, scale_timetraps/1, get_timetrap_parameters/0]).
-export([create_priv_dir/1]).
--export([cover/2, cover/3, cover/8,
- cross_cover_analyse/2, cross_cover_analyse/3, trc/1, stop_trace/0]).
+-export([cover/2, cover/3, cover/8, cross_cover_analyse/2, trc/1, stop_trace/0]).
-export([testcase_callback/1]).
-export([set_random_seed/1]).
-export([kill_slavenodes/0]).
@@ -88,17 +87,18 @@
-define(data_dir_suffix, "_data/").
-define(suitelog_name, "suite.log").
-define(coverlog_name, "cover.html").
+-define(raw_coverlog_name, "cover.log").
-define(cross_coverlog_name, "cross_cover.html").
+-define(raw_cross_coverlog_name, "cross_cover.log").
+-define(cross_cover_info, "cross_cover.info").
-define(cover_total, "total_cover.log").
-define(unexpected_io_log, "unexpected_io.log").
-define(last_file, "last_name").
-define(last_link, "last_link").
-define(last_test, "last_test").
-define(html_ext, ".html").
--define(cross_cover_file, "cross.cover").
-define(now, erlang:now()).
--define(pl2a(M), test_server_sup:package_atom(M)).
-define(void_fun, fun() -> ok end).
-define(mod_result(X), if X == skip -> skipped;
X == auto_skip -> skipped;
@@ -409,7 +409,9 @@ cover(CoverFile, Analyse) ->
cover(App, CoverFile, Analyse) ->
controller_call({cover,{App,CoverFile},Analyse,true}).
cover(App, CoverFile, Exclude, Include, Cross, Export, Analyse, Stop) ->
- controller_call({cover,{App,{CoverFile,Exclude,Include,Cross,Export}},Analyse,Stop}).
+ controller_call({cover,
+ {App,{CoverFile,Exclude,Include,Cross,Export}},
+ Analyse,Stop}).
testcase_callback(ModFunc) ->
controller_call({testcase_callback,ModFunc}).
@@ -2277,7 +2279,7 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
set_io_buffering(undefined),
{Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, false, SkipMode),
test_server_sup:framework_call(report, [tc_auto_skip,
- {?pl2a(Mod),Func,Comment}]),
+ {Mod,Func,Comment}]),
run_test_cases_loop(Cases, Config, TimetrapData, ParentMode,
delete_status(Ref, Status));
_ ->
@@ -2286,7 +2288,7 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
wait_for_cases(Ref),
{Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, true, SkipMode),
test_server_sup:framework_call(report, [tc_auto_skip,
- {?pl2a(Mod),Func,Comment}]),
+ {Mod,Func,Comment}]),
case CurrIOHandler of
{Ref,_} ->
%% current_io_handler was set by start conf of this
@@ -2303,7 +2305,7 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
%% this is a skipped end conf for a non-parallel group that's not
%% nested under a parallel group
{Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, false, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{?pl2a(Mod),Func,Comment}]),
+ test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
%% Check if this group is auto skipped because of error in the init conf.
%% If so, check if the parent group is a sequence, and if it is, skip
@@ -2334,7 +2336,7 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
%% this is a skipped end conf for a non-parallel group nested under
%% a parallel group (io buffering is active)
{Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, true, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{?pl2a(Mod),Func,Comment}]),
+ test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
case CurrIOHandler of
{Ref,_} ->
%% current_io_handler was set by start conf of this
@@ -2350,7 +2352,7 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
%% this is a skipped start conf for a group which is not nested
%% under a parallel group
{Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, false, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{?pl2a(Mod),Func,Comment}]),
+ test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
run_test_cases_loop(Cases, Config, TimetrapData, [conf(Ref,[])|Mode], Status);
{_,Ref0} when is_reference(Ref0) ->
%% this is a skipped start conf for a group nested under a parallel group
@@ -2361,7 +2363,7 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
ok
end,
{Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, true, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{?pl2a(Mod),Func,Comment}]),
+ test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
run_test_cases_loop(Cases, Config, TimetrapData, [conf(Ref,[])|Mode], Status)
end;
@@ -2369,7 +2371,7 @@ run_test_cases_loop([{auto_skip_case,{Case,Comment},SkipMode}|Cases],
Config, TimetrapData, Mode, Status) ->
{Mod,Func} = skip_case(auto, undefined, get(test_server_case_num)+1,
Case, Comment, is_io_buffered(), SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{?pl2a(Mod),Func,Comment}]),
+ test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
run_test_cases_loop(Cases, Config, TimetrapData, Mode,
update_status(skipped, Mod, Func, Status));
@@ -2385,7 +2387,7 @@ run_test_cases_loop([{skip_case,{conf,Ref,Case,Comment}}|Cases0],
%% skipped start conf
{skip_cases_upto(Ref, Cases0, Comment, conf, Mode),Config}
end,
- test_server_sup:framework_call(report, [tc_user_skip,{?pl2a(Mod),Func,Comment}]),
+ test_server_sup:framework_call(report, [tc_user_skip,{Mod,Func,Comment}]),
run_test_cases_loop(Cases, Config1, TimetrapData, Mode,
update_status(skipped, Mod, Func, Status));
@@ -2393,7 +2395,7 @@ run_test_cases_loop([{skip_case,{Case,Comment}}|Cases],
Config, TimetrapData, Mode, Status) ->
{Mod,Func} = skip_case(user, undefined, get(test_server_case_num)+1,
Case, Comment, is_io_buffered()),
- test_server_sup:framework_call(report, [tc_user_skip,{?pl2a(Mod),Func,Comment}]),
+ test_server_sup:framework_call(report, [tc_user_skip,{Mod,Func,Comment}]),
run_test_cases_loop(Cases, Config, TimetrapData, Mode,
update_status(skipped, Mod, Func, Status));
@@ -3571,7 +3573,7 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
end,
test_server_sup:framework_call(report,
- [tc_start,{{?pl2a(Mod),Func},MinorName}]),
+ [tc_start,{{Mod,Func},MinorName}]),
print_props((RunInit==skip_init), get_props(Mode)),
GroupName = case get_name(Mode) of
@@ -3756,7 +3758,7 @@ progress(skip, CaseNum, Mod, Func, Loc, Reason, Time,
print(major, "=result skipped", []),
print(1, "*** SKIPPED *** ~s",
[get_info_str(Func, CaseNum, get(test_server_cases))]),
- test_server_sup:framework_call(report, [tc_done,{?pl2a(Mod),Func,
+ test_server_sup:framework_call(report, [tc_done,{Mod,Func,
{skipped,Reason1}}]),
ReasonStr = reason_to_string(Reason1),
ReasonStr1 = lists:flatten([string:strip(S,left) ||
@@ -3787,7 +3789,7 @@ progress(failed, CaseNum, Mod, Func, Loc, timetrap_timeout, T,
print(1, "*** FAILED *** ~s",
[get_info_str(Func, CaseNum, get(test_server_cases))]),
test_server_sup:framework_call(report,
- [tc_done,{?pl2a(Mod),Func,
+ [tc_done,{Mod,Func,
{failed,timetrap_timeout}}]),
FormatLastLoc = test_server_sup:format_loc(get_last_loc(Loc)),
ErrorReason = io_lib:format("{timetrap_timeout,~s}", [FormatLastLoc]),
@@ -3813,7 +3815,7 @@ progress(failed, CaseNum, Mod, Func, Loc, {testcase_aborted,Reason}, _T,
print(1, "*** FAILED *** ~s",
[get_info_str(Func, CaseNum, get(test_server_cases))]),
test_server_sup:framework_call(report,
- [tc_done,{?pl2a(Mod),Func,
+ [tc_done,{Mod,Func,
{failed,testcase_aborted}}]),
FormatLastLoc = test_server_sup:format_loc(get_last_loc(Loc)),
ErrorReason = io_lib:format("{testcase_aborted,~s}", [FormatLastLoc]),
@@ -3838,7 +3840,7 @@ progress(failed, CaseNum, Mod, Func, unknown, Reason, Time,
print(major, "=result failed: ~p, ~p", [Reason,unknown]),
print(1, "*** FAILED *** ~s",
[get_info_str(Func, CaseNum, get(test_server_cases))]),
- test_server_sup:framework_call(report, [tc_done,{?pl2a(Mod),Func,
+ test_server_sup:framework_call(report, [tc_done,{Mod,Func,
{failed,Reason}}]),
TimeStr = io_lib:format(if is_float(Time) -> "~.3fs";
true -> "~w"
@@ -3874,7 +3876,7 @@ progress(failed, CaseNum, Mod, Func, Loc, Reason, Time,
print(major, "=result failed: ~p, ~p", [Reason,Loc]),
print(1, "*** FAILED *** ~s",
[get_info_str(Func, CaseNum, get(test_server_cases))]),
- test_server_sup:framework_call(report, [tc_done,{?pl2a(Mod),Func,
+ test_server_sup:framework_call(report, [tc_done,{Mod,Func,
{failed,Reason}}]),
TimeStr = io_lib:format(if is_float(Time) -> "~.3fs";
true -> "~w"
@@ -3899,7 +3901,7 @@ progress(failed, CaseNum, Mod, Func, Loc, Reason, Time,
progress(ok, _CaseNum, Mod, Func, _Loc, RetVal, Time,
Comment0, {St0,St1}) ->
print(minor, "successfully completed test case", []),
- test_server_sup:framework_call(report, [tc_done,{?pl2a(Mod),Func,ok}]),
+ test_server_sup:framework_call(report, [tc_done,{Mod,Func,ok}]),
Comment =
case RetVal of
{comment,RetComment} ->
@@ -4546,7 +4548,7 @@ collect_case_invoke(Mod, Case, MFA, St) ->
end;
_ ->
Suite = test_server_sup:framework_call(get_suite,
- [?pl2a(Mod),Case],
+ [Mod,Case],
[]),
collect_subcases(Mod, Case, MFA, St, Suite)
end.
@@ -4898,33 +4900,52 @@ pinfo(P) ->
%% - it does not belong to the application, but is listed in the
%% {include,List} part of the App.cover file
%% - it does not belong to the application, but is listed in the
-%% cross.cover file (in the test_server application) under 'all'
-%% or under the tested application.
-%%
-%% The modules listed in the cross.cover file are modules that are
-%% hevily used by other applications than the one they belong
-%% to. After all tests are completed, these modules can be analysed
-%% with coverage data from all tests - see cross_cover_analyse/1. The
-%% result is stored in a file called cross_cover.html in the
-%% run.<timestamp> directory of the application the modules belong
-%% to.
-%%
-%% For example, the lists module is listed in cross.cover to be
-%% included in all tests. lists belongs to the stdlib
-%% application. cross_cover_analyse/1 will create a file named
-%% cross_cover.html under the newest stdlib.logs/run.xxx directory,
-%% where the coverage result for the lists module from all tests is
-%% presented.
-%%
-%% The lists module is also presented in the normal coverage log
-%% for stdlib, but that only includes the coverage achieved by
-%% the stdlib tests themselves.
-%%
-%% The Cross cover file cross.cover contains elements like this:
-%% {App,Modules}.
-%% where App can be an application name or the atom all. The
-%% application (or all applications) shall cover compile the listed
-%% Modules.
+%% {cross,[{Tag,List}]} part of the App.cover file
+%%
+%% The modules listed in the 'cross' part of the cover file are
+%% modules that are heavily used by other tests than the one where
+%% they are explicitly tested. They should then be listed as 'cross'
+%% in the cover file for the test where they are used but do not
+%% belong.
+%%
+%% After all tests are completed, the these modules can be analysed
+%% with coverage data from all tests where they are compiled - see
+%% cross_cover_analyse/2. The result is stored in a file called
+%% cross_cover.html in the run.<timestamp> directory of the
+%% test the modules belong to.
+%%
+%% Example:
+%% If the module m1 belongs to system s1 but is heavily used also in
+%% the tests for another system s2, then the cover files for the two
+%% systems could be like this:
+%%
+%% s1.cover:
+%% {include,[m1]}.
+%%
+%% s2.cover:
+%% {include,[....]}. % modules belonging to system s2
+%% {cross,[{s1,[m1]}]}.
+%%
+%% When the tests for both s1 and s2 are completed, run
+%% cross_cover_analyse(Level,[{s1,S1LogDir},{s2,S2LogDir}]), and
+%% the accumulated cover data for m1 will be written to
+%% S1LogDir/[run.<timestamp>/]cross_cover.html
+%%
+%% S1LogDir and S2LogDir are either the run.<timestamp> directories
+%% for the two tests, or the parent directory of these, in which case
+%% the latest run.<timestamp> directory will be chosen.
+%%
+%% Note that the m1 module will also be presented in the normal
+%% coverage log for s1 (due to the include statement in s1.cover), but
+%% that only includes the coverage achieved by the s1 test itself.
+%%
+%% The Tag in the 'cross' statement in the cover file has no other
+%% purpose than mapping the list of modules ([m1] in the example
+%% above) to the correct log directory where it should be included in
+%% the cross_cover.html file (S1LogDir in the example above).
+%% I.e. the value of the Tag has no meaning, it could be foo as well
+%% as s1 above, as long as the same Tag is used in the cover file and
+%% in the call to cross_cover_analyse/2.
%% Cover compilation
@@ -4933,8 +4954,7 @@ cover_compile({App,{_File,Exclude,Include,Cross,_Export}}) ->
cover_compile1({App,Exclude,Include,Cross});
cover_compile({App,CoverFile}) ->
- Cross = get_cross_modules(App),
- {Exclude,Include} = read_cover_file(CoverFile),
+ {Exclude,Include,Cross} = read_cover_file(CoverFile),
cover_compile1({App,Exclude,Include,Cross}).
cover_compile1(What) ->
@@ -4945,41 +4965,57 @@ cover_compile1(What) ->
%% (Exclude), and a list of modules that are not members of the
%% application but shall be compiled (Include).
read_cover_file(none) ->
- {[],[]};
+ {[],[],[]};
read_cover_file(CoverFile) ->
case file:consult(CoverFile) of
{ok,List} ->
- case check_cover_file(List, [], []) of
- {ok,Exclude,Include} -> {Exclude,Include};
+ case check_cover_file(List, [], [], []) of
+ {ok,Exclude,Include,Cross} -> {Exclude,Include,Cross};
error ->
io:fwrite("Faulty format of CoverFile ~p\n", [CoverFile]),
- {[],[]}
+ {[],[],[]}
end;
{error,Reason} ->
io:fwrite("Can't read CoverFile ~p\nReason: ~p\n",
[CoverFile,Reason]),
- {[],[]}
+ {[],[],[]}
end.
-check_cover_file([{exclude,all}|Rest], _, Include) ->
- check_cover_file(Rest, all, Include);
-check_cover_file([{exclude,Exclude}|Rest], _, Include) ->
+check_cover_file([{exclude,all}|Rest], _, Include, Cross) ->
+ check_cover_file(Rest, all, Include, Cross);
+check_cover_file([{exclude,Exclude}|Rest], _, Include, Cross) ->
case lists:all(fun(M) -> is_atom(M) end, Exclude) of
true ->
- check_cover_file(Rest, Exclude, Include);
+ check_cover_file(Rest, Exclude, Include, Cross);
false ->
error
end;
-check_cover_file([{include,Include}|Rest], Exclude, _) ->
+check_cover_file([{include,Include}|Rest], Exclude, _, Cross) ->
case lists:all(fun(M) -> is_atom(M) end, Include) of
true ->
- check_cover_file(Rest, Exclude, Include);
+ check_cover_file(Rest, Exclude, Include, Cross);
false ->
error
end;
-check_cover_file([], Exclude, Include) ->
- {ok,Exclude,Include}.
+check_cover_file([{cross,Cross}|Rest], Exclude, Include, _) ->
+ case check_cross(Cross) of
+ true ->
+ check_cover_file(Rest, Exclude, Include, Cross);
+ false ->
+ error
+ end;
+check_cover_file([], Exclude, Include, Cross) ->
+ {ok,Exclude,Include,Cross}.
+check_cross([{Tag,Modules}|Rest]) ->
+ case lists:all(fun(M) -> is_atom(M) end, [Tag|Modules]) of
+ true ->
+ check_cross(Rest);
+ false ->
+ false
+ end;
+check_cross([]) ->
+ true.
%% Cover analysis, per application
@@ -5000,16 +5036,17 @@ cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, Stop, TestDir) ->
"<p><a href=\"~s\">Coverdata collected over all tests</a></p>",
[?cross_coverlog_name]),
- {CoverFile,_Included,Excluded} =
+ {CoverFile,_Included,Excluded,Cross} =
case CoverInfo of
- {File,Excl,Incl,_Cross,Export} ->
+ {File,Excl,Incl,Cr,Export} ->
cover:export(Export),
- {File,Incl,Excl};
+ {File,Incl,Excl,Cr};
File ->
- {Excl,Incl} = read_cover_file(File),
- {File,Incl,Excl}
+ {Excl,Incl,Cr} = read_cover_file(File),
+ {File,Incl,Excl,Cr}
end,
io:fwrite(CoverLog, "<p>CoverFile: <code>~p</code>\n", [CoverFile]),
+ write_cross_cover_info(TestDir,Cross),
case length(cover:imported_modules()) of
Imps when Imps > 0 ->
@@ -5022,6 +5059,8 @@ cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, Stop, TestDir) ->
io:fwrite(CoverLog, "<p>Excluded module(s): <code>~p</code>\n", [Excluded]),
Coverage = cover_analyse(Analyse, AnalyseMods, Stop),
+ file:write_file(filename:join(TestDir,?raw_coverlog_name),
+ term_to_binary(Coverage)),
case lists:filter(fun({_M,{_,_,_}}) -> false;
(_) -> true
@@ -5043,20 +5082,20 @@ cover_analyse(Analyse, AnalyseMods, Stop) ->
test_server:cover_analyse({Analyse,TestDir}, AnalyseMods, Stop).
-%% Cover analysis, cross application
+%% Cover analysis - accumulated over multiple tests
%% This can be executed on any node after all tests are finished.
-%% Apps = [{App,Dir}]
-%% App = atom(), application name
-%% Dir = string(), the log directory for App, normally where
-%% run.<timestamp> is found.
-%% Modules = [atom()], modules that have been cover compiled during tests
-%% of other apps than the one they belong to.
-cross_cover_analyse(Analyse, Apps) ->
- cross_cover_analyse(Analyse, Apps, get_cross_modules()).
-cross_cover_analyse(Analyse, Apps, Modules) ->
- Apps1 = get_latest_run_dirs(Apps),
- Apps2 = add_cross_modules(Modules,Apps1),
- CoverdataFiles = get_coverdata_files(Apps2),
+%% Analyse = overview | details
+%% TagDirs = [{Tag,Dir}]
+%% Tag = atom(), identifier
+%% Dir = string(), the log directory for Tag, it can be a
+%% run.<timestamp> directory or the parent directory of
+%% such (in which case the latest run.<timestamp> directory
+%% is used)
+cross_cover_analyse(Analyse, TagDirs0) ->
+ TagDirs = get_latest_run_dirs(TagDirs0),
+ TagMods = get_all_cross_info(TagDirs,[]),
+ TagDirMods = add_cross_modules(TagMods,TagDirs),
+ CoverdataFiles = get_coverdata_files(TagDirMods),
lists:foreach(fun(CDF) -> cover:import(CDF) end, CoverdataFiles),
io:fwrite("Cover analysing...\n", []),
DetailsFun =
@@ -5066,39 +5105,52 @@ cross_cover_analyse(Analyse, Apps, Modules) ->
OutFile = filename:join(Dir,
atom_to_list(M) ++
".CROSS_COVER.html"),
- cover:analyse_to_file(M, OutFile, [html]),
- {file,OutFile}
+ case cover:analyse_to_file(M, OutFile, [html]) of
+ {ok,_} ->
+ {file,OutFile};
+ Error ->
+ Error
+ end
end;
_ ->
fun(_,_) -> undefined end
end,
- Coverage = analyse_apps(Apps2, DetailsFun, []),
+ Coverage = analyse_tests(TagDirMods, DetailsFun, []),
cover:stop(),
- write_cross_cover_logs(Coverage,Apps2).
+ write_cross_cover_logs(Coverage,TagDirMods).
-%% For each application from which there are cross cover analysed
+write_cross_cover_info(_Dir,[]) ->
+ ok;
+write_cross_cover_info(Dir,Cross) ->
+ {ok,Fd} = file:open(filename:join(Dir,?cross_cover_info),[write]),
+ lists:foreach(fun(C) -> io:format(Fd,"~p.~n",[C]) end, Cross),
+ file:close(Fd).
+
+%% For each test from which there are cross cover analysed
%% modules, write a cross cover log (cross_cover.html).
-write_cross_cover_logs([{App,Coverage}|T],Apps) ->
- case lists:keyfind(App,1,Apps) of
+write_cross_cover_logs([{Tag,Coverage}|T],TagDirMods) ->
+ case lists:keyfind(Tag,1,TagDirMods) of
{_,Dir,Mods} when Mods=/=[] ->
+ file:write_file(filename:join(Dir,?raw_cross_coverlog_name),
+ term_to_binary(Coverage)),
CoverLogName = filename:join(Dir,?cross_coverlog_name),
{ok,CoverLog} = file:open(CoverLogName, [write]),
write_coverlog_header(CoverLog),
io:fwrite(CoverLog,
"<h1>Coverage results for \'~w\' from all tests</h1>\n",
- [App]),
+ [Tag]),
write_cover_result_table(CoverLog, Coverage),
io:fwrite("Written file ~p\n", [CoverLogName]);
_ ->
ok
end,
- write_cross_cover_logs(T,Apps);
+ write_cross_cover_logs(T,TagDirMods);
write_cross_cover_logs([],_) ->
io:fwrite("done\n", []).
%% Get the latest run.<timestamp> directories
-get_latest_run_dirs([{App,Dir}|Apps]) ->
- [{App,get_latest_run_dir(Dir)} | get_latest_run_dirs(Apps)];
+get_latest_run_dirs([{Tag,Dir}|Rest]) ->
+ [{Tag,get_latest_run_dir(Dir)} | get_latest_run_dirs(Rest)];
get_latest_run_dirs([]) ->
[].
@@ -5117,44 +5169,47 @@ get_latest_dir([_|T],Latest) ->
get_latest_dir([],Latest) ->
Latest.
-%% Associate the cross cover modules with their applications.
-add_cross_modules(Mods,Apps)->
- do_add_cross_modules(Mods,[{App,Dir,[]} || {App,Dir} <- Apps]).
-do_add_cross_modules([Mod|Mods],Apps)->
- App = get_app(Mod),
- NewApps =
- case lists:keytake(App,1,Apps) of
- {value,{App,Dir,AppMods},Rest} ->
- [{App,Dir,lists:umerge([Mod],AppMods)}|Rest];
+get_all_cross_info([{_Tag,Dir}|Rest],Acc) ->
+ case file:consult(filename:join(Dir,?cross_cover_info)) of
+ {ok,TagMods} ->
+ get_all_cross_info(Rest,TagMods++Acc);
+ _ ->
+ get_all_cross_info(Rest,Acc)
+ end;
+get_all_cross_info([],Acc) ->
+ Acc.
+
+%% Associate the cross cover modules with their log directories
+add_cross_modules(TagMods,TagDirs)->
+ do_add_cross_modules(TagMods,[{Tag,Dir,[]} || {Tag,Dir} <- TagDirs]).
+do_add_cross_modules([{Tag,Mods1}|TagMods],TagDirMods)->
+ NewTagDirMods =
+ case lists:keytake(Tag,1,TagDirMods) of
+ {value,{Tag,Dir,Mods},Rest} ->
+ [{Tag,Dir,lists:umerge(lists:sort(Mods1),Mods)}|Rest];
false ->
- Apps
+ TagDirMods
end,
- do_add_cross_modules(Mods,NewApps);
-do_add_cross_modules([],Apps) ->
- %% Just to get the modules in the same order as app-only cover log
- [{App,Dir,lists:reverse(Mods)} || {App,Dir,Mods} <- Apps].
-
-get_app(Module) ->
- Beam = code:which(Module),
- AppDir = filename:basename(filename:dirname(filename:dirname(Beam))),
- [AppStr|_] = string:tokens(AppDir,"-"),
- list_to_atom(AppStr).
+ do_add_cross_modules(TagMods,NewTagDirMods);
+do_add_cross_modules([],TagDirMods) ->
+ %% Just to get the modules in the same order as in the normal cover log
+ [{Tag,Dir,lists:reverse(Mods)} || {Tag,Dir,Mods} <- TagDirMods].
%% Find all exported coverdata files.
-get_coverdata_files(Apps) ->
+get_coverdata_files(TagDirMods) ->
lists:flatmap(
- fun({_,LatestAppDir,_}) ->
- filelib:wildcard(filename:join(LatestAppDir,"all.coverdata"))
+ fun({_,LatestDir,_}) ->
+ filelib:wildcard(filename:join(LatestDir,"all.coverdata"))
end,
- Apps).
+ TagDirMods).
-%% For each application, analyse all modules
+%% For each test, analyse all modules
%% Used for cross cover analysis.
-analyse_apps([{App,LastTest,Modules}|T], DetailsFun, Acc) ->
+analyse_tests([{Tag,LastTest,Modules}|T], DetailsFun, Acc) ->
Cov = analyse_modules(LastTest, Modules, DetailsFun, []),
- analyse_apps(T, DetailsFun, [{App,Cov}|Acc]);
-analyse_apps([], _DetailsFun, Acc) ->
+ analyse_tests(T, DetailsFun, [{Tag,Cov}|Acc]);
+analyse_tests([], _DetailsFun, Acc) ->
Acc.
%% Analyse each module
@@ -5167,27 +5222,6 @@ analyse_modules(_Dir, [], _DetailsFun, Acc) ->
Acc.
-%% Read the cross cover file (cross.cover)
-get_cross_modules() ->
- get_cross_modules(all).
-get_cross_modules(App) ->
- case file:consult(?cross_cover_file) of
- {ok,List} ->
- get_cross_modules(App, List, []);
- _X ->
- []
- end.
-
-get_cross_modules(App, [{_To,Modules}|T], Acc) when App==all->
- get_cross_modules(App, T, Acc ++ Modules);
-get_cross_modules(App, [{To,Modules}|T], Acc) when To==App; To==all->
- get_cross_modules(App, T, Acc ++ Modules);
-get_cross_modules(App, [_H|T], Acc) ->
- get_cross_modules(App, T, Acc);
-get_cross_modules(_App, [], Acc) ->
- Acc.
-
-
%% Support functions for writing the cover logs (both cross and normal)
write_coverlog_header(CoverLog) ->
case catch
diff --git a/lib/test_server/src/test_server_sup.erl b/lib/test_server/src/test_server_sup.erl
index c7553cccb5..a6d426887e 100644
--- a/lib/test_server/src/test_server_sup.erl
+++ b/lib/test_server/src/test_server_sup.erl
@@ -28,7 +28,7 @@
get_username/0, get_os_family/0,
hostatom/0, hostatom/1, hoststr/0, hoststr/1,
framework_call/2,framework_call/3,framework_call/4,
- format_loc/1, package_str/1, package_atom/1,
+ format_loc/1,
call_trace/1]).
-include("test_server_internal.hrl").
-define(crash_dump_tar,"crash_dumps.tar.gz").
@@ -553,7 +553,7 @@ format_loc([{Mod,Func,Line}|Rest]) ->
format_loc([{Mod,LineOrFunc}]) ->
format_loc({Mod,LineOrFunc});
format_loc({Mod,Func}) when is_atom(Func) ->
- io_lib:format("{~s,~w}",[package_str(Mod),Func]);
+ io_lib:format("{~w,~w}",[Mod,Func]);
format_loc(Loc) ->
io_lib:format("~p",[Loc]).
@@ -562,15 +562,15 @@ format_loc1([{Mod,Func,Line}]) ->
format_loc1([{Mod,Func,Line}|Rest]) ->
[" ",format_loc1({Mod,Func,Line}),",\n"|format_loc1(Rest)];
format_loc1({Mod,Func,Line}) ->
- ModStr = package_str(Mod),
+ ModStr = atom_to_list(Mod),
case {lists:member(no_src, get(test_server_logopts)),
lists:reverse(ModStr)} of
{false,[$E,$T,$I,$U,$S,$_|_]} ->
- io_lib:format("{~s,~w,<a href=\"~s~s#~w\">~w</a>}",
- [ModStr,Func,downcase(ModStr),?src_listing_ext,
+ io_lib:format("{~w,~w,<a href=\"~s~s#~w\">~w</a>}",
+ [Mod,Func,downcase(ModStr),?src_listing_ext,
Line,Line]);
_ ->
- io_lib:format("{~s,~w,~w}",[ModStr,Func,Line])
+ io_lib:format("{~w,~w,~w}",[Mod,Func,Line])
end.
downcase(S) -> downcase(S, []).
@@ -581,22 +581,6 @@ downcase([C|Rest], Result) ->
downcase([], Result) ->
lists:reverse(Result).
-package_str(Mod) when is_atom(Mod) ->
- atom_to_list(Mod);
-package_str(Mod) when is_list(Mod), is_atom(hd(Mod)) ->
- %% convert [s1,s2] -> "s1.s2"
- [_|M] = lists:flatten(["."++atom_to_list(S) || S <- Mod]),
- M;
-package_str(Mod) when is_list(Mod) ->
- Mod.
-
-package_atom(Mod) when is_atom(Mod) ->
- Mod;
-package_atom(Mod) when is_list(Mod), is_atom(hd(Mod)) ->
- list_to_atom(package_str(Mod));
-package_atom(Mod) when is_list(Mod) ->
- list_to_atom(Mod).
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% call_trace(TraceSpecFile) -> ok
%%
diff --git a/lib/test_server/src/ts.erl b/lib/test_server/src/ts.erl
index 115e783070..cfd7161dbd 100644
--- a/lib/test_server/src/ts.erl
+++ b/lib/test_server/src/ts.erl
@@ -160,8 +160,8 @@ help(installed) ->
" the given run options\n",
" ts:cross_cover_analyse(Level)\n"
" - Used after ts:run with option cover or \n"
- " cover_details. Analyses modules specified in\n"
- " cross.cover.\n"
+ " cover_details. Analyses modules specified with\n"
+ " a 'cross' statement in the cover spec file.\n"
" Level can be 'overview' or 'details'.\n",
" ts:compile_testcases()~n"
" ts:compile_testcases(Apps)~n"
@@ -528,8 +528,7 @@ cross_cover_analyse([Level]) ->
cross_cover_analyse(Level);
cross_cover_analyse(Level) ->
Apps = get_last_app_tests(),
- Modules = get_cross_modules(Apps,[]),
- test_server_ctrl:cross_cover_analyse(Level,Apps,Modules).
+ test_server_ctrl:cross_cover_analyse(Level,Apps).
get_last_app_tests() ->
AllTests = filelib:wildcard(filename:join(["*","*_test.logs"])),
@@ -558,30 +557,6 @@ get_last_app_tests([Dir|Dirs],RE,Acc) ->
get_last_app_tests([],_,Acc) ->
Acc.
-get_cross_modules([{App,_}|Apps],Acc) ->
- Mods = cross_modules(App),
- get_cross_modules(Apps,lists:umerge(Mods,Acc));
-get_cross_modules([],Acc) ->
- Acc.
-
-cross_modules(App) ->
- case default_coverfile(App) of
- none ->
- [];
- File ->
- case catch file:consult(File) of
- {ok,CoverSpec} ->
- case lists:keyfind(cross_apps,1,CoverSpec) of
- false ->
- [];
- {cross_apps,App,Modules} ->
- lists:usort(Modules)
- end;
- _ ->
- []
- end
- end.
-
%%% Implementation.
check_and_run(Fun) ->
diff --git a/lib/test_server/src/ts_lib.erl b/lib/test_server/src/ts_lib.erl
index d9a699ca9f..c0200ab67c 100644
--- a/lib/test_server/src/ts_lib.erl
+++ b/lib/test_server/src/ts_lib.erl
@@ -143,7 +143,6 @@ suite_order(inets) -> 28;
suite_order(asn1) -> 30;
suite_order(os_mon) -> 32;
suite_order(snmp) -> 38;
-suite_order(mnemosyne) -> 40;
suite_order(mnesia_session) -> 42;
suite_order(mnesia) -> 44;
suite_order(system) -> 999; %% IMPORTANT: system SHOULD always be last!
diff --git a/lib/test_server/test/test_server_SUITE.erl b/lib/test_server/test/test_server_SUITE.erl
index 95a3423fef..fb82a87fd0 100644
--- a/lib/test_server/test/test_server_SUITE.erl
+++ b/lib/test_server/test/test_server_SUITE.erl
@@ -80,7 +80,7 @@ all() ->
[test_server_SUITE, test_server_parallel01_SUITE,
test_server_conf02_SUITE, test_server_conf01_SUITE,
test_server_skip_SUITE, test_server_shuffle01_SUITE,
- test_server_break_SUITE].
+ test_server_break_SUITE, test_server_cover_SUITE].
%%--------------------------------------------------------------------
@@ -93,37 +93,95 @@ test_server_SUITE(Config) ->
% rpc:call(Node,dbg, tracer,[]),
% rpc:call(Node,dbg, p,[all,c]),
% rpc:call(Node,dbg, tpl,[test_server_ctrl,x]),
- run_test_server_tests("test_server_SUITE", 38, 1, 30,
- 19, 9, 1, 11, 2, 25, Config).
+ run_test_server_tests("test_server_SUITE",
+ [{test_server_SUITE,skip_case7,"SKIPPED!"}],
+ 38, 1, 30, 19, 9, 1, 11, 2, 25, Config).
test_server_parallel01_SUITE(Config) ->
- run_test_server_tests("test_server_parallel01_SUITE", 37, 0, 19,
- 19, 0, 0, 0, 0, 37, Config).
+ run_test_server_tests("test_server_parallel01_SUITE", [],
+ 37, 0, 19, 19, 0, 0, 0, 0, 37, Config).
test_server_shuffle01_SUITE(Config) ->
- run_test_server_tests("test_server_shuffle01_SUITE", 130, 0, 0,
- 76, 0, 0, 0, 0, 130, Config).
+ run_test_server_tests("test_server_shuffle01_SUITE", [],
+ 130, 0, 0, 76, 0, 0, 0, 0, 130, Config).
test_server_skip_SUITE(Config) ->
- run_test_server_tests("test_server_skip_SUITE", 3, 0, 1,
- 0, 0, 1, 3, 0, 0, Config).
+ run_test_server_tests("test_server_skip_SUITE", [],
+ 3, 0, 1, 0, 0, 1, 3, 0, 0, Config).
test_server_conf01_SUITE(Config) ->
- run_test_server_tests("test_server_conf01_SUITE", 24, 0, 12,
- 12, 0, 0, 0, 0, 24, Config).
+ run_test_server_tests("test_server_conf01_SUITE", [],
+ 24, 0, 12, 12, 0, 0, 0, 0, 24, Config).
test_server_conf02_SUITE(Config) ->
- run_test_server_tests("test_server_conf02_SUITE", 26, 0, 12,
- 12, 0, 0, 0, 0, 26, Config).
+ run_test_server_tests("test_server_conf02_SUITE", [],
+ 26, 0, 12, 12, 0, 0, 0, 0, 26, Config).
test_server_break_SUITE(Config) ->
- D = run_test_server_tests("test_server_break_SUITE", 8, 2, 6,
- 4, 0, 0, 0, 2, 6, Config),
- D.
+ run_test_server_tests("test_server_break_SUITE", [],
+ 8, 2, 6, 4, 0, 0, 0, 2, 6, Config).
-run_test_server_tests(SuiteName, NCases, NFail, NExpected, NSucc,
+test_server_cover_SUITE(Config) ->
+ case test_server:is_cover() of
+ true ->
+ {skip, "Cover already running"};
+ false ->
+ PrivDir = ?config(priv_dir,Config),
+
+ %% Test suite has two test cases
+ %% tc1 calls cover_helper:foo/0
+ %% tc2 calls cover_helper:bar/0
+ %% Each function in cover_helper is one line.
+ %%
+ %% First test run skips tc2, so only cover_helper:foo/0 is executed.
+ %% Cover file specifies to include cover_helper in this test run.
+ CoverFile1 = filename:join(PrivDir,"t1.cover"),
+ CoverSpec1 = {include,[cover_helper]},
+ file:write_file(CoverFile1,io_lib:format("~p.~n",[CoverSpec1])),
+ run_test_server_tests("test_server_cover_SUITE",
+ [{test_server_cover_SUITE,tc2,"SKIPPED!"}],
+ 4, 0, 2, 1, 1, 0, 1, 0, 3,
+ CoverFile1, Config),
+
+ %% Next test run skips tc1, so only cover_helper:bar/0 is executed.
+ %% Cover file specifies cross compilation of cover_helper
+ CoverFile2 = filename:join(PrivDir,"t2.cover"),
+ CoverSpec2 = {cross,[{t1,[cover_helper]}]},
+ file:write_file(CoverFile2,io_lib:format("~p.~n",[CoverSpec2])),
+ run_test_server_tests("test_server_cover_SUITE",
+ [{test_server_cover_SUITE,tc1,"SKIPPED!"}],
+ 4, 0, 2, 1, 1, 0, 1, 0, 3, CoverFile2, Config),
+
+ %% Cross cover analyse
+ WorkDir = ?config(work_dir,Config),
+ WC = filename:join([WorkDir,"test_server_cover_SUITE.logs","run.*"]),
+ [D2,D1|_] = lists:reverse(lists:sort(filelib:wildcard(WC))),
+ TagDirs = [{t1,D1},{t2,D2}],
+ test_server_ctrl:cross_cover_analyse(details,TagDirs),
+
+ %% Check that cover log shows only what is really included
+ %% in the test and cross cover log show the accumulated
+ %% result.
+ {ok,Cover1} = file:read_file(filename:join(D1,"cover.log")),
+ [{cover_helper,{1,1,_}}] = binary_to_term(Cover1),
+ {ok,Cover2} = file:read_file(filename:join(D2,"cover.log")),
+ [] = binary_to_term(Cover2),
+ {ok,Cross} = file:read_file(filename:join(D1,"cross_cover.log")),
+ [{cover_helper,{2,0,_}}] = binary_to_term(Cross),
+ ok
+ end.
+
+
+run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc,
NUsrSkip, NAutoSkip,
NActualSkip, NActualFail, NActualSucc, Config) ->
+ run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc,
+ NUsrSkip, NAutoSkip,
+ NActualSkip, NActualFail, NActualSucc, false, Config).
+
+run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc,
+ NUsrSkip, NAutoSkip,
+ NActualSkip, NActualFail, NActualSucc, Cover, Config) ->
WorkDir = proplists:get_value(work_dir, Config),
ct:log("<a href=\"file://~s\">Test case log files</a>\n",
@@ -131,11 +189,17 @@ run_test_server_tests(SuiteName, NCases, NFail, NExpected, NSucc,
Node = proplists:get_value(node, Config),
{ok,_Pid} = rpc:call(Node,test_server_ctrl, start, []),
+ case Cover of
+ false ->
+ ok;
+ _ ->
+ rpc:call(Node,test_server_ctrl,cover,[Cover,details])
+ end,
rpc:call(Node,
test_server_ctrl,add_dir_with_skip,
[SuiteName,
[proplists:get_value(data_dir,Config)],SuiteName,
- [{test_server_SUITE,skip_case7,"SKIPPED!"}]]),
+ Skip]),
until(fun() ->
rpc:call(Node,test_server_ctrl,jobs,[]) =:= []
diff --git a/lib/test_server/test/test_server_SUITE_data/Makefile.src b/lib/test_server/test/test_server_SUITE_data/Makefile.src
index ec8ddd78b0..c770627f04 100644
--- a/lib/test_server/test/test_server_SUITE_data/Makefile.src
+++ b/lib/test_server/test/test_server_SUITE_data/Makefile.src
@@ -5,4 +5,6 @@ all:
erlc test_server_shuffle01_SUITE.erl
erlc test_server_conf02_SUITE.erl
erlc test_server_skip_SUITE.erl
- erlc test_server_break_SUITE.erl \ No newline at end of file
+ erlc test_server_break_SUITE.erl
+ erlc test_server_cover_SUITE.erl
+ erlc +debug_info test_server_cover_SUITE_data/cover_helper.erl \ No newline at end of file
diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl
index 70e30a3334..d9f009679a 100644
--- a/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl
+++ b/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl
@@ -41,7 +41,7 @@ init_per_suite(Config) ->
spawn(fun break_and_continue_sup/0),
Config.
-end_per_suite(Config) ->
+end_per_suite(_Config) ->
ok.
init_per_testcase(Case,Config) when Case==break_in_init_tc ->
@@ -90,19 +90,19 @@ break_in_end_tc_after_fail(Config) when is_list(Config) ->
break_in_end_tc_after_abort(Config) when is_list(Config) ->
?t:adjusted_sleep(2000). % will cause a timetrap timeout
-%%%-----------------------------------------------------------------
-%%% Internal functions
-
%% This test case checks that all breaks in previous test cases was
%% also continued, and that the break lasted as long as expected.
%% The reason for this is that some of the breaks above are in
%% end_per_testcase, and failures there will only produce a warning,
%% not an error - so this is to catch the error for real.
-check_all_breaks(Config) ->
+check_all_breaks(Config) when is_list(Config) ->
break_and_continue_sup ! {done,self()},
receive {Breaks,Continued} ->
check_all_breaks(Breaks,Continued)
end.
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
check_all_breaks([{From,Case,T,Start}|Breaks],[{From,End}|Continued]) ->
Diff = timer:now_diff(End,Start),
diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl
new file mode 100644
index 0000000000..b1ae70a302
--- /dev/null
+++ b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl
@@ -0,0 +1,58 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_cover_SUITE).
+
+-export([all/1, init_per_suite/1, end_per_suite/1]).
+-export([init_per_testcase/2, end_per_testcase/2]).
+-export([tc1/1, tc2/1]).
+
+-include_lib("test_server/include/test_server.hrl").
+
+all(suite) ->
+ [tc1,tc2].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(_Case,Config) ->
+ Dog = ?t:timetrap({minutes,10}),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(_Case,Config) ->
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+tc1(Config) when is_list(Config) ->
+ cover_helper:foo(),
+ ok.
+
+tc2(Config) when is_list(Config) ->
+ cover_helper:bar(),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl
new file mode 100644
index 0000000000..6c74eb4e8a
--- /dev/null
+++ b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl
@@ -0,0 +1,4 @@
+-module(cover_helper).
+-compile(export_all).
+foo() -> ok.
+bar() -> ok.
diff --git a/lib/test_server/test/test_server_test_lib.erl b/lib/test_server/test/test_server_test_lib.erl
index 4e89abf308..d466aa0110 100644
--- a/lib/test_server/test/test_server_test_lib.erl
+++ b/lib/test_server/test/test_server_test_lib.erl
@@ -83,14 +83,21 @@ start_slave(Config,_Level) ->
post_end_per_testcase(_TC, Config, Return, State) ->
Node = proplists:get_value(node, Config),
- case test_server:is_cover() of
- true ->
- cover:flush(Node);
- false ->
- ok
+ Cover = test_server:is_cover(),
+ if Cover-> cover:flush(Node);
+ true -> ok
end,
+ erlang:monitor_node(Node, true),
slave:stop(Node),
-
+ receive
+ {nodedown, Node} ->
+ if Cover -> cover:stop(Node);
+ true -> ok
+ end
+ after 5000 ->
+ erlang:monitor_node(Node, false),
+ receive {nodedown, Node} -> ok after 0 -> ok end %flush
+ end,
{Return, State}.
%% Parse an .suite log file