aboutsummaryrefslogtreecommitdiffstats
path: root/lib/common_test
diff options
context:
space:
mode:
Diffstat (limited to 'lib/common_test')
-rw-r--r--lib/common_test/doc/src/common_test_app.xml11
-rw-r--r--lib/common_test/doc/src/cover_chapter.xml87
-rw-r--r--lib/common_test/doc/src/ct_hooks_chapter.xml43
-rw-r--r--lib/common_test/doc/src/notes.xml50
-rw-r--r--lib/common_test/doc/src/run_test_chapter.xml6
-rw-r--r--lib/common_test/doc/src/write_test_chapter.xml52
-rw-r--r--lib/common_test/src/ct_cover.erl32
-rw-r--r--lib/common_test/src/ct_netconfc.erl6
-rw-r--r--lib/common_test/src/ct_slave.erl28
-rw-r--r--lib/common_test/src/cth_surefire.erl183
-rw-r--r--lib/common_test/test/Makefile3
-rw-r--r--lib/common_test/test/common_test.cover16
-rw-r--r--lib/common_test/test/ct_cover_SUITE.erl53
-rw-r--r--lib/common_test/test/ct_surefire_SUITE.erl351
-rw-r--r--lib/common_test/test/ct_surefire_SUITE_data/surefire_SUITE.erl92
-rw-r--r--lib/common_test/test/ct_test_support.erl31
16 files changed, 921 insertions, 123 deletions
diff --git a/lib/common_test/doc/src/common_test_app.xml b/lib/common_test/doc/src/common_test_app.xml
index b6d4a633cb..151159ad69 100644
--- a/lib/common_test/doc/src/common_test_app.xml
+++ b/lib/common_test/doc/src/common_test_app.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2003</year><year>2012</year>
+ <year>2003</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -170,7 +170,9 @@
<v> UserData = term()</v>
<v> Conns = [atom()]</v>
<v> CSSFile = string()</v>
- <v> CTHs = [CTHModule | {CTHModule, CTHInitArgs} | {CTHModule, CTHInitArgs, CTHPriority}]</v>
+ <v> CTHs = [CTHModule |</v>
+ <v>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs} |</v>
+ <v>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs, CTHPriority}]</v>
<v> CTHModule = atom()</v>
<v> CTHInitArgs = term()</v>
</type>
@@ -297,8 +299,9 @@
<v> UserData = term()</v>
<v> Conns = [atom()]</v>
<v> CSSFile = string()</v>
- <v> CTHs = [CTHModule | {CTHModule, CTHInitArgs} |
- {CTHModule, CTHInitArgs, CTHPriority}]</v>
+ <v> CTHs = [CTHModule |</v>
+ <v> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs} |</v>
+ <v> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs, CTHPriority}]</v>
<v> CTHModule = atom()</v>
<v> CTHInitArgs = term()</v>
</type>
diff --git a/lib/common_test/doc/src/cover_chapter.xml b/lib/common_test/doc/src/cover_chapter.xml
index b2e64bfff0..4fa92d5583 100644
--- a/lib/common_test/doc/src/cover_chapter.xml
+++ b/lib/common_test/doc/src/cover_chapter.xml
@@ -108,8 +108,8 @@
specifications</seealso>).</p>
</section>
+ <marker id="cover_stop"></marker>
<section>
- <marker id="cover_stop"></marker>
<title>Stopping the cover tool when tests are completed</title>
<p>By default the Cover tool is automatically stopped when the
tests are completed. This causes the original (non cover
@@ -175,6 +175,11 @@
%% Specific modules to exclude in cover.
{excl_mods, Mods}.
+
+ %% Cross cover compilation
+ %% Tag = atom(), an identifier for a test run
+ %% Mod = [atom()], modules to compile for accumulated analysis
+ {cross,[{Tag,Mods}]}.
</pre>
<p>The <c>incl_dirs_r</c> and <c>excl_dirs_r</c> terms tell Common
@@ -190,6 +195,81 @@
specification file for Common Test).</p>
</section>
+ <marker id="cross_cover"/>
+ <section>
+ <title>Cross cover analysis</title>
+ <p>The cross cover mechanism allows cover analysis of modules
+ across multiple tests. It is useful if some code, e.g. a library
+ module, is used by many different tests and the accumulated cover
+ result is desirable.</p>
+
+ <p>This can of course also be achieved in a more customized way by
+ using the <c>export</c> parameter in the cover specification and
+ analysing the result off line, but the cross cover mechanism is a
+ build in solution which also provides the logging.</p>
+
+ <p>The mechanism is easiest explained via an example:</p>
+
+ <p>Let's say that there are two systems, <c>s1</c> and <c>s2</c>,
+ which are tested in separate test runs. System <c>s1</c> contains
+ a library module <c>m1</c> which is tested by the <c>s1</c> test
+ run and is included in <c>s1</c>'s cover specification:</p>
+
+<code type="none">
+s1.cover:
+ {incl_mods,[m1]}.</code>
+
+ <p>When analysing code coverage, the result for <c>m1</c> can be
+ seen in the cover log in the <c>s1</c> test result.</p>
+
+ <p>Now, let's imagine that since <c>m1</c> is a library module, it
+ is also used quite a bit by system <c>s2</c>. The <c>s2</c> test
+ run does not specifically test <c>m1</c>, but it might still be
+ interesting to see which parts of <c>m1</c> is actually covered by
+ the <c>s2</c> tests. To do this, <c>m1</c> could be included also
+ in <c>s2</c>'s cover specification:</p>
+
+<code type="none">
+s2.cover:
+ {incl_mods,[m1]}.</code>
+
+ <p>This would give an entry for <c>m1</c> also in the cover log
+ for the <c>s2</c> test run. The problem is that this would only
+ reflect the coverage by <c>s2</c> tests, not the accumulated
+ result over <c>s1</c> and <c>s2</c>. And this is where the cross
+ cover mechanism comes in handy.</p>
+
+ <p>If instead the cover specification for <c>s2</c> was like
+ this:</p>
+
+<code type="none">
+s2.cover:
+ {cross,[{s1,[m1]}]}.</code>
+
+ <p>then <c>m1</c> would be cover compiled in the <c>s2</c> test
+ run, but not shown in the coverage log. Instead, if
+ <c>ct_cover:cross_cover_analyse/2</c> is called after both
+ <c>s1</c> and <c>s2</c> test runs are completed, the accumulated
+ result for <c>m1</c> would be available in the cross cover log for
+ the <c>s1</c> test run.</p>
+
+ <p>The call to the analyse function must be like this:</p>
+
+<code type="none">
+ct_cover:cross_cover_analyse(Level, [{s1,S1LogDir},{s2,S2LogDir}]).</code>
+
+ <p>where <c>S1LogDir</c> and <c>S2LogDir</c> are the directories
+ named <c>&lt;TestName&gt;.logs</c> for each test respectively.</p>
+
+ <p>Note the tags <c>s1</c> and <c>s2</c> which are used in the
+ cover specification file and in the call to
+ <c>ct_cover:cross_cover_analyse/2</c>. The point of these are only
+ to map the modules specified in the cover specification to the log
+ directory specified in the call to the analyse function. The name
+ of the tag has no meaning beyond this.</p>
+
+ </section>
+
<section>
<title>Logging</title>
<p>To view the result of a code coverage test, follow the
@@ -197,6 +277,11 @@
takes you to the code coverage overview page. If you have
successfully performed a detailed coverage analysis, you
find links to each individual module coverage page here.</p>
+
+ <p>If cross cover analysis has been performed, and there are
+ accumulated coverage results for the current test, then the -
+ "Coverdata collected over all tests" link will take you to these
+ results.</p>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/ct_hooks_chapter.xml b/lib/common_test/doc/src/ct_hooks_chapter.xml
index 86237f5fc1..fe871eb516 100644
--- a/lib/common_test/doc/src/ct_hooks_chapter.xml
+++ b/lib/common_test/doc/src/ct_hooks_chapter.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2011</year><year>2012</year>
+ <year>2011</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -439,14 +439,14 @@ terminate(State) ->
<table>
<row>
- <cell><em>CTH Name</em></cell>
- <cell><em>Is Built-in</em></cell>
- <cell><em>Description</em></cell>
+ <cell align="left"><em>CTH Name</em></cell>
+ <cell align="left"><em>Is Built-in</em></cell>
+ <cell align="left"><em>Description</em></cell>
</row>
<row>
- <cell>cth_log_redirect</cell>
- <cell>yes</cell>
- <cell>Captures all error_logger and SASL logging events and prints them
+ <cell align="left">cth_log_redirect</cell>
+ <cell align="left">yes</cell>
+ <cell align="left">Captures all error_logger and SASL logging events and prints them
to the current test case log. If an event can not be associated with a
testcase it will be printed in the common test framework log. This will
happen for testcases which are run in parallel and events which occur
@@ -455,14 +455,29 @@ terminate(State) ->
using the normal SASL mechanisms. </cell>
</row>
<row>
- <cell>cth_surefire</cell>
- <cell>no</cell>
- <cell>Captures all test results and outputs them as surefire XML into
- a file. The file which is created is by default called junit_report.xml.
- The name can be by setting the path option for this hook. e.g.
+ <cell align="left">cth_surefire</cell>
+ <cell align="left">no</cell>
+ <cell align="left"><p>Captures all test results and outputs them as surefire
+ XML into a file. The file which is created is by default
+ called junit_report.xml. The file name can be changed by
+ setting the <c>path</c> option for this hook, e.g.</p>
+
<code>-ct_hooks cth_surefire [{path,"/tmp/report.xml"}]</code>
- Surefire XML can forinstance be used by Jenkins to display test
- results.</cell>
+
+ <p>If the <c>url_base</c> option is set, an additional
+ attribute named <c>url</c> will be added to each
+ <c>testsuite</c> and <c>testcase</c> XML element. The value will
+ be constructed from the <c>url_base</c> and a relative path
+ to the test suite or test case log respectively, e.g.</p>
+
+ <code>-ct_hooks cth_surefire [{url_base, "http://myserver.com/"}]</code>
+ <p>will give a url attribute value similar to</p>
+
+ <code>"http://myserver.com/[email protected]_11.19.39/
+x86_64-unknown-linux-gnu.my_test.logs/run.2012-12-12_11.19.39/suite.log.html"</code>
+
+ <p>Surefire XML can for instance be used by Jenkins to display test
+ results.</p></cell>
</row>
</table>
diff --git a/lib/common_test/doc/src/notes.xml b/lib/common_test/doc/src/notes.xml
index 7e33b71de1..8c3b13951d 100644
--- a/lib/common_test/doc/src/notes.xml
+++ b/lib/common_test/doc/src/notes.xml
@@ -32,6 +32,56 @@
<file>notes.xml</file>
</header>
+<section><title>Common_Test 1.6.3.1</title>
+
+ <section><title>Known Bugs and Problems</title>
+ <list>
+ <item>
+ <p>
+ The following corrections/changes are done in the
+ cth_surefire hook:</p>
+ <p>
+ <list> <item> Earlier there would always be a
+ 'properties' element under the 'testsuites' element. This
+ would exist even if there were no 'property' element
+ inside it. This has been changed so if there are no
+ 'property' elements to display, then there will not be a
+ 'properties' element either. </item> <item> The XML file
+ will now (unless other is specified) be stored in the top
+ log directory. Earlier, the default directory would be
+ the current working directory for the erlang node, which
+ would mostly, but not always, be the top log directory.
+ </item> <item> The 'hostname' attribute in the
+ 'testsuite' element would earlier never have the correct
+ value. This has been corrected. </item> <item> The
+ 'errors' attribute in the 'testsuite' element would
+ earlier display the number of failed testcases. This has
+ been changed and will now always have the value 0, while
+ the 'failures' attribute will show the number of failed
+ testcases. </item> <item> A new attribute 'skipped' is
+ added to the 'testsuite' element. This will display the
+ number of skipped testcases. These would earlier be
+ included in the number of failed test cases. </item>
+ <item> The total number of tests displayed by the 'tests'
+ attribute in the 'testsuite' element would earlier
+ include init/end_per_suite and init/end_per_group. This
+ is no longer the case. The 'tests' attribute will now
+ only count "real" test cases. </item> <item> Earlier,
+ auto skipped test cases would have no value in the 'log'
+ attribute. This is now corrected. </item> <item> A new
+ attributes 'log' is added to the 'testsuite' element.
+ </item> <item> A new option named 'url_base' is added for
+ this hook. If this option is used, a new attribute named
+ 'url' will be added to the 'testcase' and 'testsuite'
+ elements. </item> </list></p>
+ <p>
+ Own Id: OTP-10589</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Common_Test 1.6.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/common_test/doc/src/run_test_chapter.xml b/lib/common_test/doc/src/run_test_chapter.xml
index b804f134c6..d5f5d89e05 100644
--- a/lib/common_test/doc/src/run_test_chapter.xml
+++ b/lib/common_test/doc/src/run_test_chapter.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2003</year><year>2012</year>
+ <year>2003</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -752,7 +752,9 @@
PrivDirOption = auto_per_run | auto_per_tc | manual_per_tc
EventHandlers = atom() | [atom()]
InitArgs = [term()]
- CTHModules = [CTHModule | {CTHModule, CTHInitArgs} | {CTHModule, CTHInitArgs, CTHPriority}]
+ CTHModules = [CTHModule |
+ {CTHModule, CTHInitArgs} |
+ {CTHModule, CTHInitArgs, CTHPriority}]
CTHModule = atom()
CTHInitArgs = term()
Dir = string()
diff --git a/lib/common_test/doc/src/write_test_chapter.xml b/lib/common_test/doc/src/write_test_chapter.xml
index 248d7de8b6..cc8d913994 100644
--- a/lib/common_test/doc/src/write_test_chapter.xml
+++ b/lib/common_test/doc/src/write_test_chapter.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2003</year><year>2012</year>
+ <year>2003</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -982,38 +982,36 @@
<p>Example:</p>
<pre>
+ Some printouts during test case execution:
- Some printouts during test case execution:
+ io:format("1. Standard IO, importance = ~w~n", [?STD_IMPORTANCE]),
+ ct:log("2. Uncategorized, importance = ~w", [?STD_IMPORTANCE]),
+ ct:log(info, "3. Categorized info, importance = ~w", [?STD_IMPORTANCE]]),
+ ct:log(info, ?LOW_IMPORTANCE, "4. Categorized info, importance = ~w", [?LOW_IMPORTANCE]),
+ ct:log(error, "5. Categorized error, importance = ~w", [?HI_IMPORTANCE]),
+ ct:log(error, ?HI_IMPORTANCE, "6. Categorized error, importance = ~w", [?MAX_IMPORTANCE]),
- io:format("1. Standard IO, importance = ~w~n", [?STD_IMPORTANCE]),
- ct:log("2. Uncategorized, importance = ~w", [?STD_IMPORTANCE]),
- ct:log(info, "3. Categorized info, importance = ~w", [?STD_IMPORTANCE]]),
- ct:log(info, ?LOW_IMPORTANCE, "4. Categorized info, importance = ~w", [?LOW_IMPORTANCE]),
- ct:log(error, "5. Categorized error, importance = ~w", [?HI_IMPORTANCE]),
- ct:log(error, ?HI_IMPORTANCE, "6. Categorized error, importance = ~w", [?MAX_IMPORTANCE]),
+ If starting the test without specifying any verbosity levels:
- If starting the test without specifying any verbosity levels:
+ $ ct_run ...
- $ ct_run ...
+ the following gets printed:
- the following gets printed:
-
- 1. Standard IO, importance = 50
- 2. Uncategorized, importance = 50
- 3. Categorized info, importance = 50
- 5. Categorized error, importance = 75
- 6. Categorized error, importance = 99
-
- If starting the test with:
-
- $ ct_run -verbosity 1 and info 75
-
- the following gets printed:
+ 1. Standard IO, importance = 50
+ 2. Uncategorized, importance = 50
+ 3. Categorized info, importance = 50
+ 5. Categorized error, importance = 75
+ 6. Categorized error, importance = 99
+
+ If starting the test with:
+
+ $ ct_run -verbosity 1 and info 75
+
+ the following gets printed:
- 3. Categorized info, importance = 50
- 4. Categorized info, importance = 25
- 6. Categorized error, importance = 99
- </pre>
+ 3. Categorized info, importance = 50
+ 4. Categorized info, importance = 25
+ 6. Categorized error, importance = 99</pre>
<p>How categories can be mapped to CSS tags is documented in the
<seealso marker="run_test_chapter#html_stylesheet">Running Tests</seealso>
diff --git a/lib/common_test/src/ct_cover.erl b/lib/common_test/src/ct_cover.erl
index d39f50ba00..ae671c750a 100644
--- a/lib/common_test/src/ct_cover.erl
+++ b/lib/common_test/src/ct_cover.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2006-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2006-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,7 @@
-module(ct_cover).
--export([get_spec/1, add_nodes/1, remove_nodes/1]).
+-export([get_spec/1, add_nodes/1, remove_nodes/1, cross_cover_analyse/2]).
-include("ct_util.hrl").
@@ -100,6 +100,22 @@ remove_nodes(Nodes) ->
%%%-----------------------------------------------------------------
+%%% @spec cross_cover_analyse(Level,Tests) -> ok
+%%% Level = overview | details
+%%% Tests = [{Tag,Dir}]
+%%% Tag = atom()
+%%% Dir = string()
+%%%
+%%% @doc Accumulate cover results over multiple tests.
+%%% See the chapter about <seealso
+%%% marker="cover_chapter#cross_cover">cross cover
+%%% analysis</seealso> in the users's guide.
+%%%
+cross_cover_analyse(Level,Tests) ->
+ test_server_ctrl:cross_cover_analyse(Level,Tests).
+
+
+%%%-----------------------------------------------------------------
%%% @hidden
%% Read cover specification file and return the parsed info.
@@ -249,9 +265,11 @@ get_app_info(App=#cover{app=Name}, [{excl_mods,Name,Mods1}|Terms]) ->
Mods = App#cover.excl_mods,
get_app_info(App#cover{excl_mods=Mods++Mods1},Terms);
-get_app_info(App=#cover{app=Name}, [{cross_apps,Name,AppMods1}|Terms]) ->
- AppMods = App#cover.cross,
- get_app_info(App#cover{cross=AppMods++AppMods1},Terms);
+get_app_info(App=#cover{app=none}, [{cross,Cross}|Terms]) ->
+ get_app_info(App, [{cross,none,Cross}|Terms]);
+get_app_info(App=#cover{app=Name}, [{cross,Name,Cross1}|Terms]) ->
+ Cross = App#cover.cross,
+ get_app_info(App#cover{cross=Cross++Cross1},Terms);
get_app_info(App=#cover{app=none}, [{src_dirs,Dirs}|Terms]) ->
get_app_info(App, [{src_dirs,none,Dirs}|Terms]);
@@ -354,10 +372,10 @@ remove_excludes_and_dups(CoverData=#cover{excl_mods=Excl,incl_mods=Incl}) ->
files2mods(Info=#cover{excl_mods=ExclFs,
incl_mods=InclFs,
- cross=CrossFs}) ->
+ cross=Cross}) ->
Info#cover{excl_mods=files2mods1(ExclFs),
incl_mods=files2mods1(InclFs),
- cross=files2mods1(CrossFs)}.
+ cross=[{Tag,files2mods1(Fs)} || {Tag,Fs} <- Cross]}.
files2mods1([M|Fs]) when is_atom(M) ->
[M|files2mods1(Fs)];
diff --git a/lib/common_test/src/ct_netconfc.erl b/lib/common_test/src/ct_netconfc.erl
index 11c8235040..1ccbc86d8f 100644
--- a/lib/common_test/src/ct_netconfc.erl
+++ b/lib/common_test/src/ct_netconfc.erl
@@ -1073,7 +1073,8 @@ handle_msg({get_event_streams=Op,Streams,Timeout}, From, State) ->
SimpleXml = encode_rpc_operation(get,[Filter]),
do_send_rpc(Op, SimpleXml, Timeout, From, State).
-handle_msg({ssh_cm, _CM, {data, _Ch, _Type, Data}}, State) ->
+handle_msg({ssh_cm, CM, {data, Ch, _Type, Data}}, State) ->
+ ssh_connection:adjust_window(CM,Ch,size(Data)),
handle_data(Data, State);
handle_msg({ssh_cm, _CM, _SshCloseMsg}, State) ->
%% _SshCloseMsg can probably be one of
@@ -1805,7 +1806,8 @@ get_tag([]) ->
%%% SSH stuff
ssh_receive_data() ->
receive
- {ssh_cm, _CM, {data, _Ch, _Type, Data}} ->
+ {ssh_cm, CM, {data, Ch, _Type, Data}} ->
+ ssh_connection:adjust_window(CM,Ch,size(Data)),
{ok, Data};
{ssh_cm, _CM, {Closed, _Ch}} = X when Closed == closed; Closed == eof ->
{error,X};
diff --git a/lib/common_test/src/ct_slave.erl b/lib/common_test/src/ct_slave.erl
index 58633b7de6..1fd8c04f8b 100644
--- a/lib/common_test/src/ct_slave.erl
+++ b/lib/common_test/src/ct_slave.erl
@@ -449,15 +449,29 @@ wait_for_node_alive(Node, N) ->
% call init:stop on a remote node
do_stop(ENode) ->
- case test_server:is_cover() of
- true ->
- MainCoverNode = cover:get_main_node(),
- rpc:call(MainCoverNode,cover,flush,[ENode]);
- false ->
- ok
+ {Cover,MainCoverNode} =
+ case test_server:is_cover() of
+ true ->
+ Main = cover:get_main_node(),
+ rpc:call(Main,cover,flush,[ENode]),
+ {true,Main};
+ false ->
+ {false,undefined}
end,
spawn(ENode, init, stop, []),
- wait_for_node_dead(ENode, 5).
+ case wait_for_node_dead(ENode, 5) of
+ {ok,ENode} ->
+ if Cover ->
+ %% To avoid that cover is started again if a node
+ %% with the same name is started later.
+ rpc:call(MainCoverNode,cover,stop,[ENode]);
+ true ->
+ ok
+ end,
+ {ok,ENode};
+ Error ->
+ Error
+ end.
% wait N seconds until node is disconnected
wait_for_node_dead(Node, 0) ->
diff --git a/lib/common_test/src/cth_surefire.erl b/lib/common_test/src/cth_surefire.erl
index 76b0f0b5ea..e6eaad8d48 100644
--- a/lib/common_test/src/cth_surefire.erl
+++ b/lib/common_test/src/cth_surefire.erl
@@ -1,3 +1,22 @@
+%%--------------------------------------------------------------------
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%--------------------------------------------------------------------
+
%%% @doc Common Test Framework functions handling test specifications.
%%%
%%% <p>This module creates a junit report of the test run if plugged in
@@ -27,18 +46,28 @@
-export([terminate/1]).
-record(state, { filepath, axis, properties, package, hostname,
- curr_suite, curr_suite_ts, curr_group = [], curr_tc,
- curr_log_dir, timer, tc_log,
+ curr_suite, curr_suite_ts, curr_group = [],
+ curr_log_dir, timer, tc_log, url_base,
test_cases = [],
test_suites = [] }).
--record(testcase, { log, group, classname, name, time, failure, timestamp }).
--record(testsuite, { errors, failures, hostname, name, tests,
+-record(testcase, { log, url, group, classname, name, time, result, timestamp }).
+-record(testsuite, { errors, failures, skipped, hostname, name, tests,
time, timestamp, id, package,
- properties, testcases }).
+ properties, testcases, log, url }).
+
+-define(default_report,"junit_report.xml").
+-define(suite_log,"suite.log.html").
+
+%% Number of dirs from log root to testcase log file.
+%% ct_run.<node>.<timestamp>/<test_name>/run.<timestamp>/<tc_log>.html
+-define(log_depth,3).
id(Opts) ->
- filename:absname(proplists:get_value(path, Opts, "junit_report.xml")).
+ case proplists:get_value(path, Opts) of
+ undefined -> ?default_report;
+ Path -> filename:absname(Path)
+ end.
init(Path, Opts) ->
{ok, Host} = inet:gethostname(),
@@ -47,10 +76,24 @@ init(Path, Opts) ->
package = proplists:get_value(package,Opts),
axis = proplists:get_value(axis,Opts,[]),
properties = proplists:get_value(properties,Opts,[]),
+ url_base = proplists:get_value(url_base,Opts),
timer = now() }.
pre_init_per_suite(Suite,Config,#state{ test_cases = [] } = State) ->
- {Config, init_tc(State#state{ curr_suite = Suite, curr_suite_ts = now() },
+ TcLog = proplists:get_value(tc_logfile,Config),
+ CurrLogDir = filename:dirname(TcLog),
+ Path =
+ case State#state.filepath of
+ ?default_report ->
+ RootDir = get_test_root(TcLog),
+ filename:join(RootDir,?default_report);
+ P ->
+ P
+ end,
+ {Config, init_tc(State#state{ filepath = Path,
+ curr_suite = Suite,
+ curr_suite_ts = now(),
+ curr_log_dir = CurrLogDir},
Config) };
pre_init_per_suite(Suite,Config,State) ->
%% Have to close the previous suite
@@ -59,7 +102,8 @@ pre_init_per_suite(Suite,Config,State) ->
post_init_per_suite(_Suite,Config, Result, State) ->
{Result, end_tc(init_per_suite,Config,Result,State)}.
-pre_end_per_suite(_Suite,Config,State) -> {Config, init_tc(State, Config)}.
+pre_end_per_suite(_Suite,Config,State) ->
+ {Config, init_tc(State, Config)}.
post_end_per_suite(_Suite,Config,Result,State) ->
{Result, end_tc(end_per_suite,Config,Result,State)}.
@@ -71,13 +115,15 @@ pre_init_per_group(Group,Config,State) ->
post_init_per_group(_Group,Config,Result,State) ->
{Result, end_tc(init_per_group,Config,Result,State)}.
-pre_end_per_group(_Group,Config,State) -> {Config, init_tc(State, Config)}.
+pre_end_per_group(_Group,Config,State) ->
+ {Config, init_tc(State, Config)}.
post_end_per_group(_Group,Config,Result,State) ->
NewState = end_tc(end_per_group, Config, Result, State),
{Result, NewState#state{ curr_group = tl(NewState#state.curr_group)}}.
-pre_init_per_testcase(_TC,Config,State) -> {Config, init_tc(State, Config)}.
+pre_init_per_testcase(_TC,Config,State) ->
+ {Config, init_tc(State, Config)}.
post_end_per_testcase(TC,Config,Result,State) ->
{Result, end_tc(TC,Config, Result,State)}.
@@ -88,11 +134,19 @@ on_tc_fail(_TC, Res, State) ->
TCs = State#state.test_cases,
TC = hd(TCs),
NewTC = TC#testcase{
- failure =
+ result =
{fail,lists:flatten(io_lib:format("~p",[Res]))} },
State#state{ test_cases = [NewTC | tl(TCs)]}.
-on_tc_skip(Tc,{Type,_Reason} = Res, State) when Type == tc_auto_skip ->
+on_tc_skip(Tc,{Type,_Reason} = Res, State0) when Type == tc_auto_skip ->
+ TcStr = atom_to_list(Tc),
+ State =
+ case State0#state.test_cases of
+ [#testcase{name=TcStr}|TCs] ->
+ State0#state{test_cases=TCs};
+ _ ->
+ State0
+ end,
do_tc_skip(Res, end_tc(Tc,[],Res,init_tc(State,[])));
on_tc_skip(_Tc, _Res, State = #state{test_cases = []}) ->
State;
@@ -103,7 +157,7 @@ do_tc_skip(Res, State) ->
TCs = State#state.test_cases,
TC = hd(TCs),
NewTC = TC#testcase{
- failure =
+ result =
{skipped,lists:flatten(io_lib:format("~p",[Res]))} },
State#state{ test_cases = [NewTC | tl(TCs)]}.
@@ -117,33 +171,52 @@ end_tc(Func, Config, Res, State) when is_atom(Func) ->
end_tc(atom_to_list(Func), Config, Res, State);
end_tc(Name, _Config, _Res, State = #state{ curr_suite = Suite,
curr_group = Groups,
- timer = TS, tc_log = Log } ) ->
+ curr_log_dir = CurrLogDir,
+ timer = TS,
+ tc_log = Log0,
+ url_base = UrlBase } ) ->
+ Log =
+ case Log0 of
+ "" ->
+ LowerSuiteName = string:to_lower(atom_to_list(Suite)),
+ filename:join(CurrLogDir,LowerSuiteName++"."++Name++".html");
+ _ ->
+ Log0
+ end,
+ Url = make_url(UrlBase,Log),
ClassName = atom_to_list(Suite),
PGroup = string:join([ atom_to_list(Group)||
Group <- lists:reverse(Groups)],"."),
TimeTakes = io_lib:format("~f",[timer:now_diff(now(),TS) / 1000000]),
State#state{ test_cases = [#testcase{ log = Log,
+ url = Url,
timestamp = now_to_string(TS),
classname = ClassName,
group = PGroup,
name = Name,
time = TimeTakes,
- failure = passed }| State#state.test_cases]}.
+ result = passed }|
+ State#state.test_cases],
+ tc_log = ""}. % so old tc_log is not set if next is on_tc_skip
close_suite(#state{ test_cases = [] } = State) ->
State;
-close_suite(#state{ test_cases = TCs } = State) ->
- Total = length(TCs),
- Succ = length(lists:filter(fun(#testcase{ failure = F }) ->
- F == passed
- end,TCs)),
- Fail = Total - Succ,
+close_suite(#state{ test_cases = TCs, url_base = UrlBase } = State) ->
+ {Total,Fail,Skip} = count_tcs(TCs,0,0,0),
TimeTaken = timer:now_diff(now(),State#state.curr_suite_ts) / 1000000,
+ SuiteLog = filename:join(State#state.curr_log_dir,?suite_log),
+ SuiteUrl = make_url(UrlBase,SuiteLog),
Suite = #testsuite{ name = atom_to_list(State#state.curr_suite),
package = State#state.package,
+ hostname = State#state.hostname,
time = io_lib:format("~f",[TimeTaken]),
timestamp = now_to_string(State#state.curr_suite_ts),
- errors = Fail, tests = Total,
- testcases = lists:reverse(TCs) },
+ errors = 0,
+ failures = Fail,
+ skipped = Skip,
+ tests = Total,
+ testcases = lists:reverse(TCs),
+ log = SuiteLog,
+ url = SuiteUrl},
State#state{ test_cases = [],
test_suites = [Suite | State#state.test_suites]}.
@@ -159,14 +232,15 @@ terminate(State) ->
-to_xml(#testcase{ group = Group, classname = CL, log = L, name = N, time = T, timestamp = TS, failure = F}) ->
+to_xml(#testcase{ group = Group, classname = CL, log = L, url = U, name = N, time = T, timestamp = TS, result = R}) ->
["<testcase ",
- [["group=\"",Group,"\""]||Group /= ""]," "
+ [["group=\"",Group,"\" "]||Group /= ""],
"name=\"",N,"\" "
"time=\"",T,"\" "
- "timestamp=\"",TS,"\" "
+ "timestamp=\"",TS,"\" ",
+ [["url=\"",U,"\" "]||U /= undefined],
"log=\"",L,"\">",
- case F of
+ case R of
passed ->
[];
{skipped,Reason} ->
@@ -176,22 +250,29 @@ to_xml(#testcase{ group = Group, classname = CL, log = L, name = N, time = T, ti
["<failure message=\"Test ",N," in ",CL," failed!\" type=\"crash\">",
sanitize(Reason),"</failure>"]
end,"</testcase>"];
-to_xml(#testsuite{ package = P, hostname = H, errors = E, time = Time,
- timestamp = TS, tests = T, name = N, testcases = Cases }) ->
+to_xml(#testsuite{ package = P, hostname = H, errors = E, failures = F,
+ skipped = S, time = Time, timestamp = TS, tests = T, name = N,
+ testcases = Cases, log = Log, url = Url }) ->
["<testsuite ",
[["package=\"",P,"\" "]||P /= undefined],
- [["hostname=\"",P,"\" "]||H /= undefined],
- [["name=\"",N,"\" "]||N /= undefined],
- [["time=\"",Time,"\" "]||Time /= undefined],
- [["timestamp=\"",TS,"\" "]||TS /= undefined],
+ "hostname=\"",H,"\" "
+ "name=\"",N,"\" "
+ "time=\"",Time,"\" "
+ "timestamp=\"",TS,"\" "
"errors=\"",integer_to_list(E),"\" "
- "tests=\"",integer_to_list(T),"\">",
+ "failures=\"",integer_to_list(F),"\" "
+ "skipped=\"",integer_to_list(S),"\" "
+ "tests=\"",integer_to_list(T),"\" ",
+ [["url=\"",Url,"\" "]||Url /= undefined],
+ "log=\"",Log,"\">",
[to_xml(Case) || Case <- Cases],
"</testsuite>"];
to_xml(#state{ test_suites = TestSuites, axis = Axis, properties = Props }) ->
["<testsuites>",properties_to_xml(Axis,Props),
[to_xml(TestSuite) || TestSuite <- TestSuites],"</testsuites>"].
+properties_to_xml([],[]) ->
+ [];
properties_to_xml(Axis,Props) ->
["<properties>",
[["<property name=\"",Name,"\" axis=\"yes\" value=\"",Value,"\" />"] || {Name,Value} <- Axis],
@@ -217,3 +298,37 @@ sanitize([]) ->
now_to_string(Now) ->
{{YY,MM,DD},{HH,Mi,SS}} = calendar:now_to_local_time(Now),
io_lib:format("~p-~2..0B-~2..0BT~2..0B:~2..0B:~2..0B",[YY,MM,DD,HH,Mi,SS]).
+
+make_url(undefined,_) ->
+ undefined;
+make_url(_,[]) ->
+ undefined;
+make_url(UrlBase0,Log) ->
+ UrlBase = string:strip(UrlBase0,right,$/),
+ RelativeLog = get_relative_log_url(Log),
+ string:join([UrlBase,RelativeLog],"/").
+
+get_test_root(Log) ->
+ LogParts = filename:split(Log),
+ filename:join(lists:sublist(LogParts,1,length(LogParts)-?log_depth)).
+
+get_relative_log_url(Log) ->
+ LogParts = filename:split(Log),
+ Start = length(LogParts)-?log_depth,
+ Length = ?log_depth+1,
+ string:join(lists:sublist(LogParts,Start,Length),"/").
+
+count_tcs([#testcase{name=ConfCase}|TCs],Ok,F,S)
+ when ConfCase=="init_per_suite";
+ ConfCase=="end_per_suite";
+ ConfCase=="init_per_group";
+ ConfCase=="end_per_group" ->
+ count_tcs(TCs,Ok,F,S);
+count_tcs([#testcase{result=passed}|TCs],Ok,F,S) ->
+ count_tcs(TCs,Ok+1,F,S);
+count_tcs([#testcase{result={fail,_}}|TCs],Ok,F,S) ->
+ count_tcs(TCs,Ok,F+1,S);
+count_tcs([#testcase{result={skipped,_}}|TCs],Ok,F,S) ->
+ count_tcs(TCs,Ok,F,S+1);
+count_tcs([],Ok,F,S) ->
+ {Ok+F+S,F,S}.
diff --git a/lib/common_test/test/Makefile b/lib/common_test/test/Makefile
index df816f9a61..d469d03e04 100644
--- a/lib/common_test/test/Makefile
+++ b/lib/common_test/test/Makefile
@@ -56,7 +56,8 @@ MODULES= \
ct_snmp_SUITE \
ct_group_leader_SUITE \
ct_cover_SUITE \
- ct_groups_search_SUITE
+ ct_groups_search_SUITE \
+ ct_surefire_SUITE
ERL_FILES= $(MODULES:%=%.erl)
diff --git a/lib/common_test/test/common_test.cover b/lib/common_test/test/common_test.cover
index 66697854ea..3aa49623e7 100644
--- a/lib/common_test/test/common_test.cover
+++ b/lib/common_test/test/common_test.cover
@@ -1,10 +1,10 @@
%% -*- erlang -*-
{incl_app,common_test,details}.
-{cross_apps,common_test,[erl2html2,
- test_server,
- test_server_ctrl,
- test_server_gl,
- test_server_h,
- test_server_io,
- test_server_node,
- test_server_sup]}.
+{cross,common_test,[{test_server,[erl2html2,
+ test_server,
+ test_server_ctrl,
+ test_server_gl,
+ test_server_h,
+ test_server_io,
+ test_server_node,
+ test_server_sup]}]}.
diff --git a/lib/common_test/test/ct_cover_SUITE.erl b/lib/common_test/test/ct_cover_SUITE.erl
index bebfce70d0..cb49dc423f 100644
--- a/lib/common_test/test/ct_cover_SUITE.erl
+++ b/lib/common_test/test/ct_cover_SUITE.erl
@@ -77,7 +77,8 @@ all() ->
slave_start_slave,
cover_node_option,
ct_cover_add_remove_nodes,
- otp_9956
+ otp_9956,
+ cross
].
%%--------------------------------------------------------------------
@@ -161,6 +162,43 @@ otp_9956(Config) ->
check_calls(Events,{?suite,otp_9956,1},1),
ok.
+%% Test cross cover mechanism
+cross(Config) ->
+ {ok,Events1} = run_test(cross1,Config),
+ check_calls(Events1,1),
+
+ CoverFile2 = create_cover_file(cross1,[{cross,[{cross1,[?mod]}]}],Config),
+ {ok,Events2} = run_test(cross2,[{cover,CoverFile2}],Config),
+ check_calls(Events2,1),
+
+ %% Get the log dirs for each test and run cross cover analyse
+ [D11,D12] = lists:sort(get_run_dirs(Events1)),
+ [D21,D22] = lists:sort(get_run_dirs(Events2)),
+
+ ct_cover:cross_cover_analyse(details,[{cross1,D11},{cross2,D21}]),
+ ct_cover:cross_cover_analyse(details,[{cross1,D12},{cross2,D22}]),
+
+ %% Get the cross cover logs and read for each test
+ [C11,C12,C21,C22] =
+ [filename:join(D,"cross_cover.html") || D <- [D11,D12,D21,D22]],
+
+ {ok,CrossData} = file:read_file(C11),
+ {ok,CrossData} = file:read_file(C12),
+
+ {ok,Def} = file:read_file(C21),
+ {ok,Def} = file:read_file(C22),
+
+ %% A simple test: just check that the test module exists in the
+ %% log from cross1 test, and that it does not exist in the log
+ %% from cross2 test.
+ TestMod = list_to_binary(atom_to_list(?mod)),
+ {_,_} = binary:match(CrossData,TestMod),
+ nomatch = binary:match(Def,TestMod),
+ {_,_} = binary:match(Def,
+ <<"No cross cover modules exist for this application">>),
+
+ ok.
+
%%%-----------------------------------------------------------------
%%% HELP FUNCTIONS
@@ -229,15 +267,18 @@ check_cover(Node) when is_atom(Node) ->
false
end.
+%% Get the log dir "run.<timestamp>" for all (both!) tests
+get_run_dirs(Events) ->
+ [filename:dirname(TCLog) ||
+ {ct_test_support_eh,
+ {event,tc_logfile,_Node,
+ {{?suite,init_per_suite},TCLog}}} <- Events].
+
%% Check that each coverlog includes N calls to ?mod:foo/0
check_calls(Events,N) ->
check_calls(Events,{?mod,foo,0},N).
check_calls(Events,MFA,N) ->
- CoverLogs =
- [filename:join(filename:dirname(TCLog),"all.coverdata") ||
- {ct_test_support_eh,
- {event,tc_logfile,ct@falco,
- {{?suite,init_per_suite},TCLog}}} <- Events],
+ CoverLogs = [filename:join(D,"all.coverdata") || D <- get_run_dirs(Events)],
do_check_logs(CoverLogs,MFA,N).
do_check_logs([CoverLog|CoverLogs],{Mod,_,_} = MFA,N) ->
diff --git a/lib/common_test/test/ct_surefire_SUITE.erl b/lib/common_test/test/ct_surefire_SUITE.erl
new file mode 100644
index 0000000000..69e98cef48
--- /dev/null
+++ b/lib/common_test/test/ct_surefire_SUITE.erl
@@ -0,0 +1,351 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% File: ct_surefire_SUITE
+%%%
+%%% Description:
+%%% Test cth_surefire hook
+%%%
+%%%-------------------------------------------------------------------
+-module(ct_surefire_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
+-include_lib("xmerl/include/xmerl.hrl").
+
+-define(eh, ct_test_support_eh).
+
+-define(url_base,"http://my.host.com/").
+
+%%--------------------------------------------------------------------
+%% TEST SERVER CALLBACK FUNCTIONS
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Description: Since Common Test starts another Test Server
+%% instance, the tests need to be performed on a separate node (or
+%% there will be clashes with logging processes etc).
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config1 = ct_test_support:init_per_suite(Config),
+ Config1.
+
+end_per_suite(Config) ->
+ ct_test_support:end_per_suite(Config).
+
+init_per_testcase(TestCase, Config) ->
+ ct_test_support:init_per_testcase(TestCase, Config).
+
+end_per_testcase(TestCase, Config) ->
+ ct_test_support:end_per_testcase(TestCase, Config).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ default,
+ absolute_path,
+ relative_path,
+ url,
+ logdir
+ ].
+
+%%--------------------------------------------------------------------
+%% TEST CASES
+%%--------------------------------------------------------------------
+
+%%%-----------------------------------------------------------------
+%%%
+default(Config) when is_list(Config) ->
+ run(default,[cth_surefire],Config),
+ PrivDir = ?config(priv_dir,Config),
+ XmlRe = filename:join([PrivDir,"*","junit_report.xml"]),
+ check_xml(default,XmlRe).
+
+absolute_path(Config) when is_list(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Path = filename:join(PrivDir,"abspath.xml"),
+ run(absolute_path,[{cth_surefire,[{path,Path}]}],Config),
+ check_xml(absolute_path,Path).
+
+relative_path(Config) when is_list(Config) ->
+ Path = "relpath.xml",
+ run(relative_path,[{cth_surefire,[{path,Path}]}],Config),
+ PrivDir = ?config(priv_dir,Config),
+ XmlRe = filename:join([PrivDir,"*",Path]),
+ check_xml(relative_path,XmlRe).
+
+url(Config) when is_list(Config) ->
+ Path = "url.xml",
+ run(url,[{cth_surefire,[{url_base,?url_base},
+ {path,Path}]}],Config),
+ PrivDir = ?config(priv_dir,Config),
+ XmlRe = filename:join([PrivDir,"*",Path]),
+ check_xml(url,XmlRe).
+
+logdir(Config) when is_list(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ LogDir = filename:join(PrivDir,"specific_logdir"),
+ file:make_dir(LogDir),
+ Path = "logdir.xml",
+ run(logdir,[{cth_surefire,[{path,Path}]}],Config,[{logdir,LogDir}]),
+ PrivDir = ?config(priv_dir,Config),
+ XmlRe = filename:join([LogDir,"*",Path]),
+ check_xml(logdir,XmlRe).
+
+%%%-----------------------------------------------------------------
+%%% HELP FUNCTIONS
+%%%-----------------------------------------------------------------
+run(Case,CTHs,Config) ->
+ run(Case,CTHs,Config,[]).
+run(Case,CTHs,Config,ExtraOpts) ->
+ DataDir = ?config(data_dir, Config),
+ Suite = filename:join(DataDir, "surefire_SUITE"),
+ {Opts,ERPid} = setup([{suite,Suite},{ct_hooks,CTHs},{label,Case}|ExtraOpts],
+ Config),
+ ok = execute(Case, Opts, ERPid, Config).
+
+setup(Test, Config) ->
+ Opts0 = ct_test_support:get_opts(Config),
+ Opts1 =
+ case lists:keymember(logdir,1,Test) of
+ true -> lists:keydelete(logdir,1,Opts0);
+ false -> Opts0
+ end,
+ Level = ?config(trace_level, Config),
+ EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
+ Opts = Opts1 ++ [{event_handler,{?eh,EvHArgs}}|Test],
+ ERPid = ct_test_support:start_event_receiver(Config),
+ {Opts,ERPid}.
+
+execute(Name, Opts, ERPid, Config) ->
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(Name,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+
+ TestEvents = events_to_check(Name),
+ ct_test_support:verify_events(TestEvents, Events, Config).
+
+reformat(Events, EH) ->
+ ct_test_support:reformat(Events, EH).
+
+%%%-----------------------------------------------------------------
+%%% TEST EVENTS
+%%%-----------------------------------------------------------------
+events_to_check(Test) ->
+ %% 2 tests (ct:run_test + script_start) is default
+ events_to_check(Test, 2).
+
+events_to_check(_, 0) ->
+ [];
+events_to_check(Test, N) ->
+ test_events(Test) ++ events_to_check(Test, N-1).
+
+test_events(_) ->
+ [{?eh,start_logging,'_'},
+ {?eh,start_info,{1,1,9}},
+ {?eh,tc_start,{surefire_SUITE,init_per_suite}},
+ {?eh,tc_done,{surefire_SUITE,init_per_suite,ok}},
+ {?eh,tc_start,{surefire_SUITE,tc_ok}},
+ {?eh,tc_done,{surefire_SUITE,tc_ok,ok}},
+ {?eh,test_stats,{1,0,{0,0}}},
+ {?eh,tc_start,{surefire_SUITE,tc_fail}},
+ {?eh,tc_done,{surefire_SUITE,tc_fail,
+ {failed,{error,{test_case_failed,"this test should fail"}}}}},
+ {?eh,test_stats,{1,1,{0,0}}},
+ {?eh,tc_start,{surefire_SUITE,tc_skip}},
+ {?eh,tc_done,{surefire_SUITE,tc_skip,{skipped,"this test is skipped"}}},
+ {?eh,test_stats,{1,1,{1,0}}},
+ {?eh,tc_start,{surefire_SUITE,tc_autoskip_require}},
+ {?eh,tc_done,{surefire_SUITE,tc_autoskip_require,
+ {skipped,{require_failed,'_'}}}},
+ {?eh,test_stats,{1,1,{1,1}}},
+ [{?eh,tc_start,{surefire_SUITE,{init_per_group,g,[]}}},
+ {?eh,tc_done,{surefire_SUITE,{init_per_group,g,[]},ok}},
+ {?eh,tc_start,{surefire_SUITE,tc_ok}},
+ {?eh,tc_done,{surefire_SUITE,tc_ok,ok}},
+ {?eh,test_stats,{2,1,{1,1}}},
+ {?eh,tc_start,{surefire_SUITE,tc_fail}},
+ {?eh,tc_done,{surefire_SUITE,tc_fail,
+ {failed,{error,{test_case_failed,"this test should fail"}}}}},
+ {?eh,test_stats,{2,2,{1,1}}},
+ {?eh,tc_start,{surefire_SUITE,tc_skip}},
+ {?eh,tc_done,{surefire_SUITE,tc_skip,{skipped,"this test is skipped"}}},
+ {?eh,test_stats,{2,2,{2,1}}},
+ {?eh,tc_start,{surefire_SUITE,tc_autoskip_require}},
+ {?eh,tc_done,{surefire_SUITE,tc_autoskip_require,
+ {skipped,{require_failed,'_'}}}},
+ {?eh,test_stats,{2,2,{2,2}}},
+ {?eh,tc_start,{surefire_SUITE,{end_per_group,g,[]}}},
+ {?eh,tc_done,{surefire_SUITE,{end_per_group,g,[]},ok}}],
+ [{?eh,tc_start,{surefire_SUITE,{init_per_group,g_fail,[]}}},
+ {?eh,tc_done,{surefire_SUITE,{init_per_group,g_fail,[]},
+ {failed,{error,all_cases_should_be_skipped}}}},
+ {?eh,tc_auto_skip,{surefire_SUITE,tc_ok,
+ {failed,
+ {surefire_SUITE,init_per_group,
+ {'EXIT',all_cases_should_be_skipped}}}}},
+ {?eh,test_stats,{2,2,{2,3}}},
+ {?eh,tc_auto_skip,{surefire_SUITE,end_per_group,
+ {failed,
+ {surefire_SUITE,init_per_group,
+ {'EXIT',all_cases_should_be_skipped}}}}}],
+ {?eh,tc_start,{surefire_SUITE,end_per_suite}},
+ {?eh,tc_done,{surefire_SUITE,end_per_suite,ok}},
+ {?eh,stop_logging,[]}].
+
+
+%%%-----------------------------------------------------------------
+%%% Check generated xml log files
+check_xml(Case,XmlRe) ->
+ case filelib:wildcard(XmlRe) of
+ [] ->
+ ct:fail("No xml files found with regexp ~p~n", [XmlRe]);
+ [_] = Xmls when Case==absolute_path ->
+ do_check_xml(Case,Xmls);
+ [_,_] = Xmls ->
+ do_check_xml(Case,Xmls)
+ end.
+
+%% Allowed structure:
+%% <testsuites>
+%% <testsuite>
+%% <properties>
+%% <property/>
+%% ...
+%% </properties>
+%% <testcase>
+%% [<failure/> | <error/> | <skipped/> ]
+%% </testcase>
+%% ...
+%% </testsuite>
+%% ...
+%% </testsuites>
+do_check_xml(Case,[Xml|Xmls]) ->
+ ct:log("Checking <a href=~p>~s</a>~n",[Xml,Xml]),
+ {E,_} = xmerl_scan:file(Xml),
+ Expected = events_to_result(lists:flatten(test_events(Case))),
+ ParseResult = testsuites(Case,E),
+ ct:log("Expecting: ~p~n",[[Expected]]),
+ ct:log("Actual : ~p~n",[ParseResult]),
+ [Expected] = ParseResult,
+ do_check_xml(Case,Xmls);
+do_check_xml(_,[]) ->
+ ok.
+
+%% Scanning the XML to get the same type of result as events_to_result/1
+testsuites(Case,#xmlElement{name=testsuites,content=TS}) ->
+ %% OTP-10589 - move properties element to <testsuite>
+ false = lists:keytake(properties,#xmlElement.name,TS),
+ testsuite(Case,TS).
+
+testsuite(Case,[#xmlElement{name=testsuite,content=TC,attributes=A}|TS]) ->
+ {ET,EF,ES} = events_to_numbers(lists:flatten(test_events(Case))),
+ {T,E,F,S} = get_numbers_from_attrs(A,false,false,false,false),
+ ct:log("Expecting total:~p, error:~p, failure:~p, skipped:~p~n",[ET,0,EF,ES]),
+ ct:log("Actual total:~p, error:~p, failure:~p, skipped:~p~n",[T,E,F,S]),
+ {ET,0,EF,ES} = {T,E,F,S},
+
+ %% properties should only be there if given a options to hook
+ false = lists:keytake(properties,#xmlElement.name,TC),
+ %% system-out and system-err is not used by common_test
+ false = lists:keytake('system-out',#xmlElement.name,TC),
+ false = lists:keytake('system-err',#xmlElement.name,TC),
+ R=testcase(Case,TC),
+ [R|testsuite(Case,TS)];
+testsuite(_Case,[]) ->
+ [].
+
+testcase(url=Case,[#xmlElement{name=testcase,attributes=A,content=C}|TC]) ->
+ R = failed_or_skipped(C),
+ case R of
+ [s] ->
+ case lists:keyfind(url,#xmlAttribute.name,A) of
+ false -> ok;
+ #xmlAttribute{value=UrlAttr} ->
+ lists:keyfind(url,#xmlAttribute.name,A),
+ true = lists:prefix(?url_base,UrlAttr)
+ end;
+ _ ->
+ #xmlAttribute{value=UrlAttr} =
+ lists:keyfind(url,#xmlAttribute.name,A),
+ true = lists:prefix(?url_base,UrlAttr)
+ end,
+ [R|testcase(Case,TC)];
+testcase(Case,[#xmlElement{name=testcase,attributes=A,content=C}|TC]) ->
+ false = lists:keyfind(url,#xmlAttribute.name,A),
+ R = failed_or_skipped(C),
+ [R|testcase(Case,TC)];
+testcase(_Case,[]) ->
+ [].
+
+failed_or_skipped([#xmlElement{name=failure}|E]) ->
+ [f|failed_or_skipped(E)];
+failed_or_skipped([#xmlElement{name=error}|E]) ->
+ [e|failed_or_skipped(E)];
+failed_or_skipped([#xmlElement{name=skipped}|E]) ->
+ [s|failed_or_skipped(E)];
+failed_or_skipped([]) ->
+ [].
+
+%% Using the expected events to produce the expected result of the XML scanning.
+%% The result is a list of test suites:
+%% Testsuites = [Testsuite]
+%% Testsuite = [Testcase]
+%% Testcase = [] | [f] | [s], indicating ok, failed and skipped respectively
+events_to_result([{?eh,tc_done,{_Suite,_Case,R}}|E]) ->
+ [result(R)|events_to_result(E)];
+events_to_result([{?eh,tc_auto_skip,_}|E]) ->
+ [[s]|events_to_result(E)];
+events_to_result([_|E]) ->
+ events_to_result(E);
+events_to_result([]) ->
+ [].
+
+result(ok) ->[];
+result({skipped,_}) -> [s];
+result({failed,_}) -> [f].
+
+%% Using the expected events' last test_stats element to produce the
+%% expected number of totla, errors, failed and skipped testcases.
+events_to_numbers(E) ->
+ RevE = lists:reverse(E),
+ {?eh,test_stats,{Ok,F,{US,AS}}} = lists:keyfind(test_stats,2,RevE),
+ {Ok+F+US+AS,F,US+AS}.
+
+get_numbers_from_attrs([#xmlAttribute{name=tests,value=X}|A],false,E,F,S) ->
+ get_numbers_from_attrs(A,list_to_integer(X),E,F,S);
+get_numbers_from_attrs([#xmlAttribute{name=errors,value=X}|A],T,false,F,S) ->
+ get_numbers_from_attrs(A,T,list_to_integer(X),F,S);
+get_numbers_from_attrs([#xmlAttribute{name=failures,value=X}|A],T,E,false,S) ->
+ get_numbers_from_attrs(A,T,E,list_to_integer(X),S);
+get_numbers_from_attrs([#xmlAttribute{name=skipped,value=X}|A],T,E,F,false) ->
+ get_numbers_from_attrs(A,T,E,F,list_to_integer(X));
+get_numbers_from_attrs([_|A],T,E,F,S) ->
+ get_numbers_from_attrs(A,T,E,F,S);
+get_numbers_from_attrs([],T,E,F,S) ->
+ {T,E,F,S}.
diff --git a/lib/common_test/test/ct_surefire_SUITE_data/surefire_SUITE.erl b/lib/common_test/test/ct_surefire_SUITE_data/surefire_SUITE.erl
new file mode 100644
index 0000000000..677aee46c5
--- /dev/null
+++ b/lib/common_test/test/ct_surefire_SUITE_data/surefire_SUITE.erl
@@ -0,0 +1,92 @@
+%%--------------------------------------------------------------------
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%----------------------------------------------------------------------
+%% File: surefire_SUITE.erl
+%%
+%% Description:
+%% This file contains the test cases for cth_surefire.
+%%
+%% @author Support
+%% @doc Test of surefire support in common_test
+%% @end
+%%----------------------------------------------------------------------
+%%----------------------------------------------------------------------
+-module(surefire_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+%% Default timetrap timeout (set in init_per_testcase).
+-define(default_timeout, ?t:minutes(1)).
+
+all() ->
+ testcases() ++ [{group,g},{group,g_fail}].
+
+groups() ->
+ [{g,testcases()},
+ {g_fail,[tc_ok]}].
+
+testcases() ->
+ [tc_ok,
+ tc_fail,
+ tc_skip,
+ tc_autoskip_require].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_group(g_fail, _Config) ->
+ exit(all_cases_should_be_skipped);
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_Group, Config) ->
+ Config.
+
+init_per_testcase(_Case, Config) ->
+ Dog = test_server:timetrap(?default_timeout),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(_Case, Config) ->
+ Dog=?config(watchdog, Config),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+break(_Config) ->
+ test_server:break(""),
+ ok.
+
+tc_ok(_Config) ->
+ ok.
+
+tc_fail(_Config) ->
+ ct:fail("this test should fail").
+
+tc_skip(_Config) ->
+ {skip,"this test is skipped"}.
+
+tc_autoskip_require() ->
+ [{require,whatever}].
+tc_autoskip_require(Config) ->
+ ct:fail("this test should never be executed - it should be autoskipped").
diff --git a/lib/common_test/test/ct_test_support.erl b/lib/common_test/test/ct_test_support.erl
index e5e2e68fcb..fc572aa82f 100644
--- a/lib/common_test/test/ct_test_support.erl
+++ b/lib/common_test/test/ct_test_support.erl
@@ -117,11 +117,7 @@ end_per_suite(Config) ->
CTNode = proplists:get_value(ct_node, Config),
PrivDir = proplists:get_value(priv_dir, Config),
true = rpc:call(CTNode, code, del_path, [filename:join(PrivDir,"")]),
- case test_server:is_cover() of
- true -> cover:flush(CTNode);
- false -> ok
- end,
- slave:stop(CTNode),
+ slave_stop(CTNode),
ok.
%%%-----------------------------------------------------------------
@@ -152,11 +148,7 @@ end_per_testcase(_TestCase, Config) ->
case wait_for_ct_stop(CTNode) of
%% Common test was not stopped to we restart node.
false ->
- case test_server:is_cover() of
- true -> cover:flush(CTNode);
- false -> ok
- end,
- slave:stop(CTNode),
+ slave_stop(CTNode),
start_slave(Config,proplists:get_value(trace_level,Config)),
{fail, "Could not stop common_test"};
true ->
@@ -1274,3 +1266,22 @@ rm_files([F | Fs]) ->
rm_files([]) ->
ok.
+%%%-----------------------------------------------------------------
+%%%
+slave_stop(Node) ->
+ Cover = test_server:is_cover(),
+ if Cover-> cover:flush(Node);
+ true -> ok
+ end,
+ erlang:monitor_node(Node, true),
+ slave:stop(Node),
+ receive
+ {nodedown, Node} ->
+ if Cover -> cover:stop(Node);
+ true -> ok
+ end
+ after 5000 ->
+ erlang:monitor_node(Node, false),
+ receive {nodedown, Node} -> ok after 0 -> ok end %flush
+ end,
+ ok.