aboutsummaryrefslogtreecommitdiffstats
path: root/lib/common_test
diff options
context:
space:
mode:
Diffstat (limited to 'lib/common_test')
-rw-r--r--[-rwxr-xr-x]lib/common_test/configure.in0
-rw-r--r--lib/common_test/doc/src/cover_chapter.xml25
-rw-r--r--lib/common_test/doc/src/ct_hooks.xml50
-rw-r--r--lib/common_test/doc/src/ct_run.xml3
-rw-r--r--lib/common_test/doc/src/event_handler_chapter.xml29
-rw-r--r--lib/common_test/doc/src/notes.xml178
-rw-r--r--lib/common_test/doc/src/run_test_chapter.xml18
-rw-r--r--[-rwxr-xr-x]lib/common_test/priv/run_test.in0
-rw-r--r--lib/common_test/src/common_test.app.src7
-rw-r--r--lib/common_test/src/ct.erl5
-rw-r--r--lib/common_test/src/ct_conn_log_h.erl9
-rw-r--r--lib/common_test/src/ct_cover.erl30
-rw-r--r--lib/common_test/src/ct_framework.erl107
-rw-r--r--lib/common_test/src/ct_gen_conn.erl7
-rw-r--r--lib/common_test/src/ct_hooks.erl38
-rw-r--r--lib/common_test/src/ct_logs.erl49
-rw-r--r--lib/common_test/src/ct_netconfc.erl27
-rw-r--r--lib/common_test/src/ct_run.erl256
-rw-r--r--lib/common_test/src/ct_telnet.erl334
-rw-r--r--lib/common_test/src/ct_telnet_client.erl137
-rw-r--r--lib/common_test/src/ct_testspec.erl7
-rw-r--r--lib/common_test/src/ct_util.erl25
-rw-r--r--lib/common_test/src/ct_util.hrl1
-rw-r--r--lib/common_test/src/cth_conn_log.erl12
-rw-r--r--lib/common_test/src/cth_surefire.erl4
-rw-r--r--lib/common_test/src/unix_telnet.erl29
-rw-r--r--lib/common_test/test/Makefile3
-rw-r--r--lib/common_test/test/ct_config_info_SUITE.erl4
-rw-r--r--lib/common_test/test/ct_cover_SUITE.erl15
-rw-r--r--lib/common_test/test/ct_cover_nomerge_SUITE.erl221
-rw-r--r--lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_local_SUITE.erl63
-rw-r--r--lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_remote_SUITE.erl75
-rw-r--r--lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_remote_nostop_SUITE.erl68
-rw-r--r--lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_test_mod.erl4
-rw-r--r--lib/common_test/test/ct_cover_nomerge_SUITE_data/local.spec6
-rw-r--r--lib/common_test/test/ct_cover_nomerge_SUITE_data/remote.spec6
-rw-r--r--lib/common_test/test/ct_cover_nomerge_SUITE_data/remote_nostop.spec6
-rw-r--r--lib/common_test/test/ct_error_SUITE.erl52
-rw-r--r--lib/common_test/test/ct_group_info_SUITE.erl30
-rw-r--r--lib/common_test/test/ct_groups_spec_SUITE.erl75
-rw-r--r--lib/common_test/test/ct_hooks_SUITE.erl2
-rw-r--r--lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl14
-rw-r--r--lib/common_test/test/ct_master_SUITE.erl3
-rw-r--r--lib/common_test/test/ct_repeat_1_SUITE.erl35
-rw-r--r--lib/common_test/test/ct_repeat_testrun_SUITE.erl4
-rw-r--r--lib/common_test/test/ct_sequence_1_SUITE.erl19
-rw-r--r--lib/common_test/test/ct_skip_SUITE.erl118
-rw-r--r--lib/common_test/test/ct_surefire_SUITE.erl2
-rw-r--r--lib/common_test/test/ct_telnet_SUITE.erl60
-rw-r--r--lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl105
-rw-r--r--lib/common_test/test/ct_test_server_if_1_SUITE.erl8
-rw-r--r--lib/common_test/test/ct_testspec_1_SUITE.erl92
-rw-r--r--lib/common_test/test/telnet_server.erl108
-rw-r--r--lib/common_test/vsn.mk2
54 files changed, 1840 insertions, 747 deletions
diff --git a/lib/common_test/configure.in b/lib/common_test/configure.in
index b2e6ad997a..b2e6ad997a 100755..100644
--- a/lib/common_test/configure.in
+++ b/lib/common_test/configure.in
diff --git a/lib/common_test/doc/src/cover_chapter.xml b/lib/common_test/doc/src/cover_chapter.xml
index a215c8c2f3..accb94e1a9 100644
--- a/lib/common_test/doc/src/cover_chapter.xml
+++ b/lib/common_test/doc/src/cover_chapter.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2006</year><year>2013</year>
+ <year>2006</year><year>2014</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -81,10 +81,7 @@
specify that previously exported data should be imported and
included in the analysis for a test (you can specify multiple
import files). This way it is possible to analyse total code coverage
- without necessarily running all tests at once. Note that even if
- you run separate tests in one test run, code coverage data will
- not be passed on from one test to another unless you specify an
- export file for Common Test to use for this purpose.</p>
+ without necessarily running all tests at once.</p>
<p>To activate the code coverage support, you simply specify the
name of the cover specification file as you start Common Test.
@@ -266,10 +263,20 @@ ct_cover:cross_cover_analyse(Level, [{s1,S1LogDir},{s2,S2LogDir}]).</code>
<section>
<title>Logging</title>
- <p>To view the result of a code coverage test, follow the
- "Coverage log" link on the test suite results page. This
- takes you to the code coverage overview page. If you have
- successfully performed a detailed coverage analysis, you
+ <p>To view the result of a code coverage test, click the button
+ labled "COVER LOG" in the top level index page for the test run.</p>
+
+ <p>Prior to Erlang/OTP 17.1, if your test run consisted of
+ multiple tests, cover would be started and stopped for each test
+ within the test run. Separate logs would be available via the
+ "Coverage log" link on the test suite result pages. These links
+ are still available, but now they all point to the same page as
+ the button on the top level index page. The log contains the
+ accumulated results for the complete test run. See the release
+ notes for more information about this change.</p>
+
+ <p>The buttonc takes you to the code coverage overview page. If you
+ have successfully performed a detailed coverage analysis, you
find links to each individual module coverage page here.</p>
<p>If cross cover analysis has been performed, and there are
diff --git a/lib/common_test/doc/src/ct_hooks.xml b/lib/common_test/doc/src/ct_hooks.xml
index 859ff9df14..cab6dfea51 100644
--- a/lib/common_test/doc/src/ct_hooks.xml
+++ b/lib/common_test/doc/src/ct_hooks.xml
@@ -450,12 +450,15 @@
</func>
<func>
- <name>Module:on_tc_fail(TestcaseName, Reason, CTHState) -&gt;
+ <name>Module:on_tc_fail(TestName, Reason, CTHState) -&gt;
NewCTHState</name>
<fsummary>Called after the CTH scope ends</fsummary>
<type>
- <v>TestcaseName = init_per_suite | end_per_suite |
- init_per_group | end_per_group | atom()</v>
+ <v>TestName = init_per_suite | end_per_suite |
+ {init_per_group,GroupName} | {end_per_group,GroupName} |
+ {FuncName,GroupName} | FuncName</v>
+ <v>FuncName = atom()</v>
+ <v>GroupName = atom()</v>
<v>Reason = term()</v>
<v>CTHState = NewCTHState = term()</v>
</type>
@@ -463,14 +466,16 @@
<desc>
<p> OPTIONAL </p>
- <p>This function is called whenever a testcase fails.
- It is called after the post function has been called for
- the testcase which failed. i.e.
- if init_per_suite fails this function is called after
+ <p>This function is called whenever a test case (or config function)
+ fails. It is called after the post function has been called for
+ the failed test case. I.e. if init_per_suite fails, this function
+ is called after
<seealso marker="#Module:post_init_per_suite-4">
- post_init_per_suite</seealso>, and if a testcase fails it is called
+ post_init_per_suite</seealso>, and if a test case fails, it is called
after <seealso marker="#Module:post_end_per_testcase-4">
- post_end_per_testcase</seealso>.</p>
+ post_end_per_testcase</seealso>. If the failed test case belongs
+ to a test case group, the first argument is a tuple
+ <c>{FuncName,GroupName}</c>, otherwise simply the function name.</p>
<p>The data which comes with the Reason follows the same format as the
<seealso marker="event_handler_chapter#failreason">FailReason
@@ -481,12 +486,14 @@
</func>
<func>
- <name>Module:on_tc_skip(TestcaseName, Reason, CTHState) -&gt;
+ <name>Module:on_tc_skip(TestName, Reason, CTHState) -&gt;
NewCTHState</name>
<fsummary>Called after the CTH scope ends</fsummary>
<type>
- <v>TestcaseName = end_per_suite | {init_per_group,GroupName} |
- {end_per_group,GroupName} | atom()</v>
+ <v>TestName = init_per_suite | end_per_suite |
+ {init_per_group,GroupName} | {end_per_group,GroupName} |
+ {FuncName,GroupName} | FuncName</v>
+ <v>FuncName = atom()</v>
<v>GroupName = atom()</v>
<v>Reason = {tc_auto_skip | tc_user_skip, term()}</v>
<v>CTHState = NewCTHState = term()</v>
@@ -495,14 +502,17 @@
<desc>
<p> OPTIONAL </p>
- <p>This function is called whenever a testcase is skipped.
- It is called after the post function has been called for the
- testcase which was skipped.
- i.e. if init_per_group is skipped this function is called after
- <seealso marker="#Module:post_init_per_suite-4">post_init_per_group
- </seealso>, and if a testcase is skipped it is called after
- <seealso marker="#Module:post_end_per_testcase-4">post_end_per_testcase
- </seealso>.</p>
+ <p>This function is called whenever a test case (or config function)
+ is skipped. It is called after the post function has been called
+ for the skipped test case. I.e. if init_per_group is skipped, this
+ function is called after
+ <seealso marker="#Module:post_init_per_group-4">
+ post_init_per_group</seealso>, and if a test case is skipped,
+ it is called after
+ <seealso marker="#Module:post_end_per_testcase-4">
+ post_end_per_testcase</seealso>. If the skipped test case belongs to a
+ test case group, the first argument is a tuple <c>{FuncName,GroupName}</c>,
+ otherwise simply the function name.</p>
<p>The data which comes with the Reason follows the same format as
<seealso marker="event_handler_chapter#tc_auto_skip">tc_auto_skip
diff --git a/lib/common_test/doc/src/ct_run.xml b/lib/common_test/doc/src/ct_run.xml
index 39259b092a..d8e79ca80e 100644
--- a/lib/common_test/doc/src/ct_run.xml
+++ b/lib/common_test/doc/src/ct_run.xml
@@ -108,6 +108,7 @@
EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
[-include InclDir1 InclDir2 .. InclDirN]
[-no_auto_compile]
+ [-abort_if_missing_suites]
[-muliply_timetraps Multiplier]
[-scale_timetraps]
[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
@@ -144,6 +145,7 @@
EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
[-include InclDir1 InclDir2 .. InclDirN]
[-no_auto_compile]
+ [-abort_if_missing_suites]
[-muliply_timetraps Multiplier]
[-scale_timetraps]
[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
@@ -171,6 +173,7 @@
[-decrypt_key Key] | [-decrypt_file KeyFile]
[-include InclDir1 InclDir2 .. InclDirN]
[-no_auto_compile]
+ [-abort_if_missing_suites]
[-muliply_timetraps Multiplier]
[-scale_timetraps]
[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
diff --git a/lib/common_test/doc/src/event_handler_chapter.xml b/lib/common_test/doc/src/event_handler_chapter.xml
index 47d0ba59fb..45f01c12ec 100644
--- a/lib/common_test/doc/src/event_handler_chapter.xml
+++ b/lib/common_test/doc/src/event_handler_chapter.xml
@@ -227,11 +227,13 @@
<item>
<marker id="tc_auto_skip"></marker>
- <c>#event{name = tc_auto_skip, data = {Suite,Func,Reason}}</c>
+ <c>#event{name = tc_auto_skip, data = {Suite,TestName,Reason}}</c>
<p><c>Suite = atom()</c>, the name of the suite.</p>
- <p><c>Func = atom() | {end_per_group,GroupName}</c>, the name of the test case
- or configuration function.</p>
- <p><c>GroupName = atom()</c>, name of the group.</p>
+ <p><c>TestName = init_per_suite | end_per_suite |
+ {init_per_group,GroupName} | {end_per_group,GroupName} |
+ {FuncName,GroupName} | FuncName</c></p>
+ <p><c>FuncName = atom()</c>, the name of the test case or configuration function.</p>
+ <p><c>GroupName = atom()</c>, the name of the test case group.</p>
<p><c>Reason = {failed,FailReason} |
{require_failed_in_suite0,RequireInfo}</c>,
reason for auto skipping <c>Func</c>.</p>
@@ -252,21 +254,26 @@
<c>init_per_group</c>, a failed <c>require</c> in <c>suite/0</c>, or a failed test case
in a sequence. Note that this event is never received as a result of a test case getting
skipped because of <c>init_per_testcase</c> failing, since that information is carried with
- the <c>tc_done</c> event.
+ the <c>tc_done</c> event. If a failed test case belongs to a test case group, the second
+ data element is a tuple <c>{FuncName,GroupName}</c>, otherwise simply the function name.
</p></item>
-
+
<item>
<marker id="tc_user_skip"></marker>
- <c>#event{name = tc_user_skip, data = {Suite,Func,Comment}}</c>
+ <c>#event{name = tc_user_skip, data = {Suite,TestName,Comment}}</c>
<p><c>Suite = atom()</c>, the name of the suite.</p>
- <p><c>Func = atom() | {end_per_group,GroupName}</c>, the name of the test case
- or configuration function.</p>
- <p><c>GroupName = atom()</c>, name of the group.</p>
+ <p><c>TestName = init_per_suite | end_per_suite |
+ {init_per_group,GroupName} | {end_per_group,GroupName} |
+ {FuncName,GroupName} | FuncName</c></p>
+ <p><c>FuncName = atom()</c>, the name of the test case or configuration function.</p>
+ <p><c>GroupName = atom()</c>, the name of the test case group.</p>
<p><c>Comment = string()</c>, reason for skipping the test case.</p>
<p>This event specifies that a test case has been skipped by the user.
It is only ever received if the skip was declared in a test specification.
Otherwise, user skip information is received as a <c>{skipped,SkipReason}</c>
- result in the <c>tc_done</c> event for the test case.
+ result in the <c>tc_done</c> event for the test case. If a skipped test case belongs
+ to a test case group, the second data element is a tuple <c>{FuncName,GroupName}</c>,
+ otherwise simply the function name.
</p></item>
<item><c>#event{name = test_stats, data = {Ok,Failed,Skipped}}</c>
diff --git a/lib/common_test/doc/src/notes.xml b/lib/common_test/doc/src/notes.xml
index f10d5f85bf..ddfeb0964b 100644
--- a/lib/common_test/doc/src/notes.xml
+++ b/lib/common_test/doc/src/notes.xml
@@ -32,6 +32,184 @@
<file>notes.xml</file>
</header>
+<section><title>Common_Test 1.8</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ The error generated if a test case process received an
+ exit from a linked process while executing
+ init_per_testcase/2, was handled incorrectly by Common
+ Test. The problem has been solved, and Common Test now
+ reports this type of error correctly, with proper error
+ reason and exit location as well.</p>
+ <p>
+ Own Id: OTP-11643</p>
+ </item>
+ <item>
+ <p>
+ Running a parallel test case group with two or more
+ instances of the same test case would result in identical
+ log file names, and one test case instance would
+ overwrite the log file of another. This problem has been
+ solved.</p>
+ <p>
+ Own Id: OTP-11644</p>
+ </item>
+ <item>
+ <p>
+ Application upgrade (appup) files are corrected for the
+ following applications: </p>
+ <p>
+ <c>asn1, common_test, compiler, crypto, debugger,
+ dialyzer, edoc, eldap, erl_docgen, et, eunit, gs, hipe,
+ inets, observer, odbc, os_mon, otp_mibs, parsetools,
+ percept, public_key, reltool, runtime_tools, ssh,
+ syntax_tools, test_server, tools, typer, webtool, wx,
+ xmerl</c></p>
+ <p>
+ A new test utility for testing appup files is added to
+ test_server. This is now used by most applications in
+ OTP.</p>
+ <p>
+ (Thanks to Tobias Schlager)</p>
+ <p>
+ Own Id: OTP-11744</p>
+ </item>
+ <item>
+ <p>
+ The <c>cth_surefire</c> hook would crash in
+ <c>pre_init_per_suite/3</c> if a previous hook returned
+ <c>{skip,Reason}</c> or <c>{fail,Reason}</c> instead of a
+ <c>Config</c> list. This error has been corrected, and
+ <c>cth_surefire</c> will now simply propagate the
+ received <c>InitData</c> value instead.</p>
+ <p>
+ Own Id: OTP-11811</p>
+ </item>
+ <item>
+ <p>
+ Specs of return values are corrected for
+ <c>ct_netconfc:get/2,3</c>,
+ <c>ct_netconfc:get_config/3,4</c>,
+ <c>ct_netconfc:action/2,3</c>,
+ <c>ct_netconfc:send_rpc/2,3</c> and
+ <c>ct_netconfc:send/2,3</c>.</p>
+ <p>
+ Own Id: OTP-11834 Aux Id: seq12574 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ ct_telnet can now log all communication taking place
+ during a telnet session. Previously, only information
+ about ct_telnet operations and commands, as well as
+ explicitly requested data from the server, was logged.</p>
+ <p>
+ Furthermore, a logging mechanism based on an Error Logger
+ event handler and a dedicated Common Test hook,
+ <c>cth_conn_log</c>, now makes it possible to print data
+ for individual connections to separate log files. Please
+ see the <c>ct_telnet</c> reference manual for more
+ information and examples.</p>
+ <p>
+ Important note: A new argument, <c>ConnName</c> has been
+ added to the <c>unix_telnet:connect/5</c> callback
+ function. This forces users that use private ct_telnet
+ callback modules to update their code according to
+ <c>unix_telnet:connect/6</c>. Please see the
+ <c>unix_telnet</c> reference manual and source code
+ module for details.</p>
+ <p>
+ Own Id: OTP-11440 Aux Id: seq12457 </p>
+ </item>
+ <item>
+ <p>
+ A new timeout option has been introduced for the
+ <c>ct_telnet:expect/3</c> function. With
+ <c>{total_timeout,Time}</c> it's possible to set a time
+ limit for the complete expect operation. After
+ <c>Time</c> milliseconds, <c>expect/3</c> returns
+ <c>{error,timeout}</c>. The default value used if
+ <c>total_timeout</c> is not specified, is infinity (i.e.
+ no time limit). Please see the <c>ct_telnet</c> reference
+ manual for more information.</p>
+ <p>
+ Own Id: OTP-11689</p>
+ </item>
+ <item>
+ <p>
+ Some function specs are corrected or moved and some edoc
+ comments are corrected in order to allow use of edoc.
+ (Thanks to Pierre Fenoll)</p>
+ <p>
+ Own Id: OTP-11702</p>
+ </item>
+ <item>
+ <p>
+ Test case group name information has been added to the
+ data sent with <c>tc_user_skip</c> and
+ <c>tc_auto_skip</c> event messages, as well as the data
+ passed in calls to the CT Hook functions
+ <c>on_tc_skip/3</c> and <c>on_tc_fail/3</c>. The
+ modification only affects the function name
+ element/argument. This value remains an atom if the test
+ case in question does not belong to a test case group.
+ Otherwise a tuple <c>{FuncName,GroupName}</c>
+ (<c>{atom(),atom()}</c>) is passed instead.</p>
+ <p>
+ Note that this change may (depending on the patterns used
+ for matching) require modifications of user event
+ handlers and hook modules. Please see the Event Handling
+ chapter in the Common Test User's Guide, and the
+ reference manual for <c>ct_hooks</c>, for details.</p>
+ <p>
+ Note also that the Test Server framework callback
+ function <c>report/2</c> has been modified. This change
+ only affects users with test frameworks interfacing Test
+ Server rather than Common Test. See the
+ <c>test_server_ctrl</c> reference manual for details.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-11732 Aux Id: seq12541 </p>
+ </item>
+ <item>
+ <p>
+ If Common Test can't prompt the user to abort or continue
+ the test run when one or more test suites fail to
+ compile, a new option,
+ <c>{abort_if_missing_suites,Bool}</c>, can be used to
+ specify whether it should proceed with the test run, or
+ stop execution. The default value of <c>Bool</c> is
+ <c>false</c> (i.e. to proceed even if suites are
+ missing).</p>
+ <p>
+ Own Id: OTP-11769</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Known Bugs and Problems</title>
+ <list>
+ <item>
+ <p>
+ common_test: Fix problems reported by Dialyzer.</p>
+ <p>
+ Own Id: OTP-11525</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Common_Test 1.7.4</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/common_test/doc/src/run_test_chapter.xml b/lib/common_test/doc/src/run_test_chapter.xml
index a4a77ee400..864f82cb63 100644
--- a/lib/common_test/doc/src/run_test_chapter.xml
+++ b/lib/common_test/doc/src/run_test_chapter.xml
@@ -59,7 +59,15 @@
<p>If compilation should fail for one or more suites, the compilation errors
are printed to tty and the operator is asked if the test run should proceed
without the missing suites, or be aborted. If the operator chooses to proceed,
- it is noted in the HTML log which tests have missing suites.</p>
+ it is noted in the HTML log which tests have missing suites. If Common Test is
+ unable to prompt the user after compilation failure (if Common Test doesn't
+ control stdin), the test run will proceed automatically without the missing
+ suites. This behaviour can however be modified with the
+ <c><![CDATA[ct_run]]></c> flag <c><![CDATA[-abort_if_missing_suites]]></c>,
+ or the <c><![CDATA[ct:run_test/1]]></c> option
+ <c><![CDATA[{abort_if_missing_suites,TrueOrFalse}]]></c>. If
+ <c><![CDATA[abort_if_missing_suites]]></c> is set (to true), the test run
+ will stop immediately if some suites fail to compile.</p>
<p>Any help module (i.e. regular Erlang module with name not ending with
"_SUITE") that resides in the same test object directory as a suite
@@ -167,6 +175,7 @@
<seealso marker="ct_hooks_chapter#builtin_cths">Built-in Common Test Hooks</seealso>. Default is <c>true</c>.</item>
<item><c><![CDATA[-include]]></c>, specifies include directories (see above).</item>
<item><c><![CDATA[-no_auto_compile]]></c>, disables the automatic test suite compilation feature (see above).</item>
+ <item><c><![CDATA[-abort_if_missing_suites]]></c>, aborts the test run if one or more suites fail to compile (see above).</item>
<item><c><![CDATA[-multiply_timetraps <n>]]></c>, extends <seealso marker="write_test_chapter#timetraps">timetrap
timeout</seealso> values.</item>
<item><c><![CDATA[-scale_timetraps <bool>]]></c>, enables automatic <seealso marker="write_test_chapter#timetraps">timetrap
@@ -589,8 +598,8 @@
Common Test will either execute one test run per specification file, or
join the files and perform all tests within one single test run. The first
behaviour is the default one. The latter requires that the start
- flag/option <c>join_suites</c> is provided, e.g.
- <c>run_test -spec ./my_tests1.ts ./my_tests2.ts -join_suites</c>.</p>
+ flag/option <c>join_specs</c> is provided, e.g.
+ <c>run_test -spec ./my_tests1.ts ./my_tests2.ts -join_specs</c>.</p>
<p>Joining a number of specifications, or running them separately, can
also be accomplished with (and may be combined with) test specification
@@ -744,6 +753,9 @@
{auto_compile, Bool},
{auto_compile, NodeRefs, Bool},
+ {abort_if_missing_suites, Bool},
+ {abort_if_missing_suites, NodeRefs, Bool},
+
{config, ConfigFiles}.
{config, ConfigDir, ConfigBaseNames}.
{config, NodeRefs, ConfigFiles}.
diff --git a/lib/common_test/priv/run_test.in b/lib/common_test/priv/run_test.in
index 1508751e4f..1508751e4f 100755..100644
--- a/lib/common_test/priv/run_test.in
+++ b/lib/common_test/priv/run_test.in
diff --git a/lib/common_test/src/common_test.app.src b/lib/common_test/src/common_test.app.src
index 18c1dec784..e28751fb59 100644
--- a/lib/common_test/src/common_test.app.src
+++ b/lib/common_test/src/common_test.app.src
@@ -62,5 +62,10 @@
ct_master,
ct_master_logs]},
{applications, [kernel,stdlib]},
- {env, []}]}.
+ {env, []},
+ {runtime_dependencies,["xmerl-1.3.7","webtool-0.8.10","tools-2.6.14",
+ "test_server-3.7","stdlib-2.0","ssh-3.0.1",
+ "snmp-4.25.1","sasl-2.4","runtime_tools-1.8.14",
+ "kernel-3.0","inets-5.10","erts-6.0",
+ "debugger-4.0","crypto-3.3","compiler-5.0"]}]}.
diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl
index e6732f7fc7..85afdc7834 100644
--- a/lib/common_test/src/ct.erl
+++ b/lib/common_test/src/ct.erl
@@ -150,7 +150,8 @@ run(TestDirs) ->
%%% {silent_connections,Conns} | {stylesheet,CSSFile} |
%%% {cover,CoverSpecFile} | {cover_stop,Bool} | {step,StepOpts} |
%%% {event_handler,EventHandlers} | {include,InclDirs} |
-%%% {auto_compile,Bool} | {create_priv_dir,CreatePrivDir} |
+%%% {auto_compile,Bool} | {abort_if_missing_suites,Bool} |
+%%% {create_priv_dir,CreatePrivDir} |
%%% {multiply_timetraps,M} | {scale_timetraps,Bool} |
%%% {repeat,N} | {duration,DurTime} | {until,StopTime} |
%%% {force_stop,ForceStop} | {decrypt,DecryptKeyOrFile} |
@@ -772,7 +773,7 @@ comment(Format, Args) when is_list(Format), is_list(Args) ->
send_html_comment(Comment) ->
Html = "<font color=\"green\">" ++ Comment ++ "</font>",
- ct_util:set_testdata({comment,Html}),
+ ct_util:set_testdata({{comment,group_leader()},Html}),
test_server:comment(Html).
%%%-----------------------------------------------------------------
diff --git a/lib/common_test/src/ct_conn_log_h.erl b/lib/common_test/src/ct_conn_log_h.erl
index d733df27dc..cff02a46d9 100644
--- a/lib/common_test/src/ct_conn_log_h.erl
+++ b/lib/common_test/src/ct_conn_log_h.erl
@@ -204,13 +204,8 @@ pretty_head({{{Y,Mo,D},{H,Mi,S}},MicroS},ConnMod,Text0) ->
micro2milli(MicroS)]).
pretty_title(#conn_log{client=Client}=Info) ->
- case actionstr(Info) of
- {no_server,Action} ->
- io_lib:format("= Client ~w ~s ",[Client,Action]);
- Action ->
- io_lib:format("= Client ~w ~s ~ts ",[Client,Action,
- serverstr(Info)])
- end.
+ io_lib:format("= Client ~w ~s ~ts ",
+ [Client,actionstr(Info),serverstr(Info)]).
actionstr(#conn_log{action=send}) -> "----->";
actionstr(#conn_log{action=cmd}) -> "----->";
diff --git a/lib/common_test/src/ct_cover.erl b/lib/common_test/src/ct_cover.erl
index ae671c750a..cf2860ae25 100644
--- a/lib/common_test/src/ct_cover.erl
+++ b/lib/common_test/src/ct_cover.erl
@@ -47,18 +47,21 @@ add_nodes(Nodes) ->
undefined ->
{error,cover_not_running};
_ ->
- {File,Nodes0,Import,Export,AppInfo} = ct_util:get_testdata(cover),
+ Nodes0 = cover:which_nodes(),
Nodes1 = [Node || Node <- Nodes,
lists:member(Node,Nodes0) == false],
ct_logs:log("COVER INFO",
"Adding nodes to cover test: ~w", [Nodes1]),
case cover:start(Nodes1) of
- Result = {ok,_} ->
- ct_util:set_testdata({cover,{File,Nodes1++Nodes0,
- Import,Export,AppInfo}}),
-
+ Result = {ok,StartedNodes} ->
+ ct_logs:log("COVER INFO",
+ "Successfully added nodes to cover test: ~w",
+ [StartedNodes]),
Result;
Error ->
+ ct_logs:log("COVER INFO",
+ "Failed to add nodes to cover test: ~tp",
+ [Error]),
Error
end
end.
@@ -81,19 +84,20 @@ remove_nodes(Nodes) ->
undefined ->
{error,cover_not_running};
_ ->
- {File,Nodes0,Import,Export,AppInfo} = ct_util:get_testdata(cover),
+ Nodes0 = cover:which_nodes(),
ToRemove = [Node || Node <- Nodes, lists:member(Node,Nodes0)],
ct_logs:log("COVER INFO",
- "Removing nodes from cover test: ~w", [ToRemove]),
+ "Removing nodes from cover test: ~w", [ToRemove]),
case cover:stop(ToRemove) of
ok ->
- Nodes1 = lists:foldl(fun(N,Deleted) ->
- lists:delete(N,Deleted)
- end, Nodes0, ToRemove),
- ct_util:set_testdata({cover,{File,Nodes1,
- Import,Export,AppInfo}}),
+ ct_logs:log("COVER INFO",
+ "Successfully removed nodes from cover test.",
+ []),
ok;
Error ->
+ ct_logs:log("COVER INFO",
+ "Failed to remove nodes from cover test: ~tp",
+ [Error]),
Error
end
end.
@@ -149,7 +153,7 @@ get_spec_test(File) ->
{value,{_,[Exp]}} ->
filename:absname(Exp);
_ ->
- []
+ undefined
end,
Nodes =
case lists:keysearch(nodes, 1, Terms) of
diff --git a/lib/common_test/src/ct_framework.erl b/lib/common_test/src/ct_framework.erl
index 580588fbd2..e8ea7992b4 100644
--- a/lib/common_test/src/ct_framework.erl
+++ b/lib/common_test/src/ct_framework.erl
@@ -249,8 +249,8 @@ init_tc2(Mod,Suite,Func,SuiteInfo,MergeResult,Config) ->
end
end.
-ct_suite_init(Suite, Func, PostInitHook, Config) when is_list(Config) ->
- case ct_hooks:init_tc(Suite, Func, Config) of
+ct_suite_init(Suite, FuncSpec, PostInitHook, Config) when is_list(Config) ->
+ case ct_hooks:init_tc(Suite, FuncSpec, Config) of
NewConfig when is_list(NewConfig) ->
PostInitHookResult = do_post_init_hook(PostInitHook, NewConfig),
{ok, [PostInitHookResult ++ NewConfig]};
@@ -657,13 +657,21 @@ end_tc(Mod,Func,TCPid,Result,Args,Return) ->
_ ->
ok
end,
- ct_util:delete_testdata(comment),
+ if Func == end_per_group; Func == end_per_suite ->
+ %% clean up any saved comments
+ ct_util:match_delete_testdata({comment,'_'});
+ true ->
+ %% attemp to delete any saved comment for this TC
+ case process_info(TCPid, group_leader) of
+ {group_leader,TCGL} ->
+ ct_util:delete_testdata({comment,TCGL});
+ _ ->
+ ok
+ end
+ end,
ct_util:delete_suite_data(last_saved_config),
- FuncSpec = case group_or_func(Func,Args) of
- {_,_GroupName,_} = Group -> Group;
- _ -> Func
- end,
+ FuncSpec = group_or_func(Func,Args),
{Result1,FinalNotify} =
case ct_hooks:end_tc(
@@ -853,7 +861,7 @@ error_notification(Mod,Func,_Args,{Error,Loc}) ->
_ ->
%% this notification comes from the test case process, so
%% we can add error info to comment with test_server:comment/1
- case ct_util:get_testdata(comment) of
+ case ct_util:get_testdata({comment,group_leader()}) of
undefined ->
test_server:comment(ErrorHtml);
Comment ->
@@ -1236,38 +1244,7 @@ report(What,Data) ->
ct_logs:make_all_suites_index({TestName,RunDir}),
ok;
tests_start ->
- case ct_util:get_testdata(cover) of
- undefined ->
- ok;
- {_CovFile,_CovNodes,CovImport,CovExport,_CovAppData} ->
- %% Always import cover data from files specified by CovImport
- %% if no CovExport defined. If CovExport is defined, only
- %% import from CovImport files initially, then use CovExport
- %% to pass coverdata between proceeding tests (in the same run).
- Imps =
- case CovExport of
- [] -> % don't export data between tests
- CovImport;
- _ ->
- case filelib:is_file(CovExport) of
- true ->
- [CovExport];
- false ->
- CovImport
- end
- end,
- lists:foreach(
- fun(Imp) ->
- case cover:import(Imp) of
- ok ->
- ok;
- {error,Reason} ->
- ct_logs:log("COVER INFO",
- "Importing cover data from: ~ts fails! "
- "Reason: ~p", [Imp,Reason])
- end
- end, Imps)
- end;
+ ok;
tests_done ->
ok;
severe_error ->
@@ -1277,28 +1254,35 @@ report(What,Data) ->
ct_util:set_testdata({What,Data}),
ok;
tc_start ->
- %% Data = {{Suite,Func},LogFileName}
+ %% Data = {{Suite,{Func,GroupName}},LogFileName}
+ Data1 = case Data of
+ {{Suite,{Func,undefined}},LFN} -> {{Suite,Func},LFN};
+ _ -> Data
+ end,
ct_event:sync_notify(#event{name=tc_logfile,
node=node(),
- data=Data}),
+ data=Data1}),
ok;
tc_done ->
- {_Suite,Case,Result} = Data,
+ {Suite,{Func,GrName},Result} = Data,
+ Data1 = if GrName == undefined -> {Suite,Func,Result};
+ true -> Data
+ end,
case Result of
{failed, _} ->
- ct_hooks:on_tc_fail(What, Data);
+ ct_hooks:on_tc_fail(What, Data1);
{skipped,{failed,{_,init_per_testcase,_}}} ->
- ct_hooks:on_tc_skip(tc_auto_skip, Data);
+ ct_hooks:on_tc_skip(tc_auto_skip, Data1);
{skipped,{require_failed,_}} ->
- ct_hooks:on_tc_skip(tc_auto_skip, Data);
+ ct_hooks:on_tc_skip(tc_auto_skip, Data1);
{skipped,_} ->
- ct_hooks:on_tc_skip(tc_user_skip, Data);
+ ct_hooks:on_tc_skip(tc_user_skip, Data1);
{auto_skipped,_} ->
- ct_hooks:on_tc_skip(tc_auto_skip, Data);
+ ct_hooks:on_tc_skip(tc_auto_skip, Data1);
_Else ->
ok
end,
- case {Case,Result} of
+ case {Func,Result} of
{init_per_suite,_} ->
ok;
{end_per_suite,_} ->
@@ -1327,20 +1311,17 @@ report(What,Data) ->
tc_user_skip ->
%% test case or config function specified as skipped in testspec,
%% or init config func for suite/group has returned {skip,Reason}
- %% Data = {Suite,Case,Comment} |
- %% {Suite,{GroupConfigFunc,GroupName},Comment}
+ %% Data = {Suite,{Func,GroupName},Comment}
{Func,Data1} = case Data of
- {Suite,{ConfigFunc,undefined},Cmt} ->
- {ConfigFunc,{Suite,ConfigFunc,Cmt}};
- {_,{ConfigFunc,_},_} -> {ConfigFunc,Data};
- {_,Case,_} -> {Case,Data}
+ {Suite,{F,undefined},Comment} ->
+ {F,{Suite,F,Comment}};
+ D = {_,{F,_},_} ->
+ {F,D}
end,
-
ct_event:sync_notify(#event{name=tc_user_skip,
node=node(),
data=Data1}),
ct_hooks:on_tc_skip(What, Data1),
-
if Func /= init_per_suite, Func /= init_per_group,
Func /= end_per_suite, Func /= end_per_group ->
add_to_stats(user_skipped);
@@ -1350,13 +1331,12 @@ report(What,Data) ->
tc_auto_skip ->
%% test case skipped because of error in config function, or
%% config function skipped because of error in info function
- %% Data = {Suite,Case,Comment} |
- %% {Suite,{GroupConfigFunc,GroupName},Comment}
+ %% Data = {Suite,{Func,GroupName},Comment}
{Func,Data1} = case Data of
- {Suite,{ConfigFunc,undefined},Cmt} ->
- {ConfigFunc,{Suite,ConfigFunc,Cmt}};
- {_,{ConfigFunc,_},_} -> {ConfigFunc,Data};
- {_,Case,_} -> {Case,Data}
+ {Suite,{F,undefined},Comment} ->
+ {F,{Suite,F,Comment}};
+ D = {_,{F,_},_} ->
+ {F,D}
end,
%% this test case does not have a log, so printouts
%% from event handlers should end up in the main log
@@ -1364,7 +1344,6 @@ report(What,Data) ->
node=node(),
data=Data1}),
ct_hooks:on_tc_skip(What, Data1),
-
if Func /= end_per_suite,
Func /= end_per_group ->
add_to_stats(auto_skipped);
diff --git a/lib/common_test/src/ct_gen_conn.erl b/lib/common_test/src/ct_gen_conn.erl
index 078d6b1a44..56082086f6 100644
--- a/lib/common_test/src/ct_gen_conn.erl
+++ b/lib/common_test/src/ct_gen_conn.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -307,7 +307,8 @@ call(Pid, Msg, Timeout) ->
end.
return({To,Ref},Result) ->
- To ! {Ref, Result}.
+ To ! {Ref, Result},
+ ok.
init_gen(Parent,Opts) ->
process_flag(trap_exit,true),
@@ -344,7 +345,7 @@ loop(Opts) ->
link(NewPid),
put(conn_pid,NewPid),
loop(Opts#gen_opts{conn_pid=NewPid,
- cb_state=NewState});
+ cb_state=NewState});
Error ->
ct_util:unregister_connection(self()),
log("Reconnect failed. Giving up!",
diff --git a/lib/common_test/src/ct_hooks.erl b/lib/common_test/src/ct_hooks.erl
index e845e9e908..df4c98d9d1 100644
--- a/lib/common_test/src/ct_hooks.erl
+++ b/lib/common_test/src/ct_hooks.erl
@@ -64,11 +64,16 @@ terminate(Hooks) ->
%% @doc Called as each test case is started. This includes all configuration
%% tests.
--spec init_tc(Mod :: atom(), Func :: atom(), Args :: list()) ->
+-spec init_tc(Mod :: atom(),
+ FuncSpec :: atom() |
+ {ConfigFunc :: init_per_group | end_per_group,
+ GroupName :: atom(),
+ Properties :: list()},
+ Args :: list()) ->
NewConfig :: proplists:proplist() |
- {skip, Reason :: term()} |
- {auto_skip, Reason :: term()} |
- {fail, Reason :: term()}.
+ {skip, Reason :: term()} |
+ {auto_skip, Reason :: term()} |
+ {fail, Reason :: term()}.
init_tc(Mod, init_per_suite, Config) ->
Info = try proplists:get_value(ct_hooks, Mod:suite(),[]) of
@@ -82,8 +87,8 @@ init_tc(Mod, init_per_suite, Config) ->
call(fun call_generic/3, Config ++ Info, [pre_init_per_suite, Mod]);
init_tc(Mod, end_per_suite, Config) ->
call(fun call_generic/3, Config, [pre_end_per_suite, Mod]);
-init_tc(Mod, {init_per_group, GroupName, Opts}, Config) ->
- maybe_start_locker(Mod, GroupName, Opts),
+init_tc(Mod, {init_per_group, GroupName, Properties}, Config) ->
+ maybe_start_locker(Mod, GroupName, Properties),
call(fun call_generic/3, Config, [pre_init_per_group, GroupName]);
init_tc(_Mod, {end_per_group, GroupName, _}, Config) ->
call(fun call_generic/3, Config, [pre_end_per_group, GroupName]);
@@ -93,15 +98,18 @@ init_tc(_Mod, TC, Config) ->
%% @doc Called as each test case is completed. This includes all configuration
%% tests.
-spec end_tc(Mod :: atom(),
- Func :: atom(),
+ FuncSpec :: atom() |
+ {ConfigFunc :: init_per_group | end_per_group,
+ GroupName :: atom(),
+ Properties :: list()},
Args :: list(),
Result :: term(),
- Resturn :: term()) ->
+ Return :: term()) ->
NewConfig :: proplists:proplist() |
- {skip, Reason :: term()} |
- {auto_skip, Reason :: term()} |
- {fail, Reason :: term()} |
- ok | '$ct_no_change'.
+ {skip, Reason :: term()} |
+ {auto_skip, Reason :: term()} |
+ {fail, Reason :: term()} |
+ ok | '$ct_no_change'.
end_tc(Mod, init_per_suite, Config, _Result, Return) ->
call(fun call_generic/3, Return, [post_init_per_suite, Mod, Config],
@@ -112,18 +120,20 @@ end_tc(Mod, end_per_suite, Config, Result, _Return) ->
end_tc(_Mod, {init_per_group, GroupName, _}, Config, _Result, Return) ->
call(fun call_generic/3, Return, [post_init_per_group, GroupName, Config],
'$ct_no_change');
-end_tc(Mod, {end_per_group, GroupName, Opts}, Config, Result, _Return) ->
+end_tc(Mod, {end_per_group, GroupName, Properties}, Config, Result, _Return) ->
Res = call(fun call_generic/3, Result,
[post_end_per_group, GroupName, Config], '$ct_no_change'),
- maybe_stop_locker(Mod, GroupName,Opts),
+ maybe_stop_locker(Mod, GroupName, Properties),
Res;
end_tc(_Mod, TC, Config, Result, _Return) ->
call(fun call_generic/3, Result, [post_end_per_testcase, TC, Config],
'$ct_no_change').
+%% Case = TestCase | {TestCase,GroupName}
on_tc_skip(How, {Suite, Case, Reason}) ->
call(fun call_cleanup/3, {How, Reason}, [on_tc_skip, Suite, Case]).
+%% Case = TestCase | {TestCase,GroupName}
on_tc_fail(_How, {Suite, Case, Reason}) ->
call(fun call_cleanup/3, Reason, [on_tc_fail, Suite, Case]).
diff --git a/lib/common_test/src/ct_logs.erl b/lib/common_test/src/ct_logs.erl
index a4ad65c0a4..43eabb18d5 100644
--- a/lib/common_test/src/ct_logs.erl
+++ b/lib/common_test/src/ct_logs.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -62,6 +62,7 @@
-define(totals_name, "totals.info").
-define(log_cache_name, "ct_log_cache").
-define(misc_io_log, "misc_io.log.html").
+-define(coverlog_name, "cover.html"). % must be same as in test_server_ctrl
-define(table_color1,"#ADD8E6").
-define(table_color2,"#E4F0FE").
@@ -1304,7 +1305,8 @@ total_row(Success, Fail, UserSkip, AutoSkip, NotBuilt, All) ->
"<td align=right>",integer_to_list(AllSkip),
" (",UserSkipStr,"/",AutoSkipStr,")</td>\n",
"<td align=right><b>",integer_to_list(NotBuilt),"<b></td>\n",
- AllInfo, "</tr>\n</tfoot>\n"].
+ AllInfo, "</tr>\n",
+ xhtml("","</tfoot>\n")].
not_built(_BaseName,_LogDir,_All,[]) ->
0;
@@ -1368,6 +1370,19 @@ index_header(Label, StartTime) ->
format_time(StartTime),
{[],[1],[2,3,4,5]})
end,
+ Cover =
+ case filelib:is_regular(?abs(?coverlog_name)) of
+ true ->
+ xhtml(["<p><a href=\"",?coverlog_name,
+ "\">Cover Log</a></p><br>\n"],
+ ["<br />"
+ "<div id=\"button_holder\" class=\"btn\">\n"
+ "<a href=\"",?coverlog_name,
+ "\">COVER LOG</a>\n</div><br /><br />"]);
+ false ->
+ xhtml("<br>\n", "<br /><br /><br />\n")
+ end,
+
[Head |
["<center>\n",
xhtml(["<p><a href=\"",?ct_log_name,
@@ -1375,8 +1390,8 @@ index_header(Label, StartTime) ->
["<br />"
"<div id=\"button_holder\" class=\"btn\">\n"
"<a href=\"",?ct_log_name,
- "\">COMMON TEST FRAMEWORK LOG</a>\n</div>"]),
- xhtml("<br>\n", "<br /><br /><br />\n"),
+ "\">COMMON TEST FRAMEWORK LOG</a>\n</div><br>\n"]),
+ Cover,
xhtml(["<table border=\"3\" cellpadding=\"5\" "
"bgcolor=\"",?table_color3,"\">\n"],
["<table id=\"",?sortable_table_name,"\">\n",
@@ -1519,7 +1534,8 @@ all_suites_index_footer() ->
xhtml("<br><br>\n", "<br /><br />\n") | footer()].
all_runs_index_footer() ->
- ["</tbody>\n</table>\n",
+ [xhtml("", "</tbody>\n"),
+ "</table>\n",
"</center>\n",
xhtml("<br><br>\n", "<br /><br />\n") | footer()].
@@ -1676,7 +1692,7 @@ config_table(Vars) ->
config_table_header() ->
[
xhtml(["<h2>Configuration</h2>\n"
- "<table border=\"3\" cellpadding=\"5\" bgcolor=\"",?table_color1,"\"\n"],
+ "<table border=\"3\" cellpadding=\"5\" bgcolor=\"",?table_color1,"\">\n"],
["<h4>CONFIGURATION</h4>\n",
"<table id=\"",?sortable_table_name,"\">\n",
"<thead>\n"]),
@@ -1692,7 +1708,7 @@ config_table1([{Key,Value}|Vars]) ->
"<td>", io_lib:format("~p",[Value]), "</td>\n</tr>\n"]) |
config_table1(Vars)];
config_table1([]) ->
- ["</tbody>\n</table>\n"].
+ [xhtml("","</tbody>\n"),"</table>\n"].
make_all_runs_index(When) ->
@@ -1842,14 +1858,27 @@ dir_diff_all_runs(LogDirs=[Dir|Dirs], Cached=[CElem|CElems],
LatestInCache, AllRunsDirs) ->
DirDate = datestr_from_dirname(Dir),
if DirDate > LatestInCache ->
- %% Dir is a new run entry
+ %% Dir is a new run entry (not cached)
dir_diff_all_runs(Dirs, Cached, LatestInCache,
[Dir|AllRunsDirs]);
DirDate == LatestInCache, CElems /= [] ->
- %% Dir is an existing run entry
+ %% Dir is an existing (cached) run entry
+
+ %% Only add the cached element instead of Dir if the totals
+ %% are "non-empty" (a test might be executing on a different
+ %% node and results haven't been saved yet)
+ ElemToAdd =
+ case CElem of
+ {_CDir,{_NodeStr,_Label,_Logs,{0,0,0,0,0}},_IxLink} ->
+ %% "empty" element in cache - this could be an
+ %% incomplete test and should be checked again
+ Dir;
+ _ ->
+ CElem
+ end,
dir_diff_all_runs(Dirs, CElems,
datestr_from_dirname(element(1,hd(CElems))),
- [CElem|AllRunsDirs]);
+ [ElemToAdd|AllRunsDirs]);
DirDate == LatestInCache, CElems == [] ->
%% we're done, Dirs must all be new
lists:reverse(Dirs)++[CElem|AllRunsDirs];
diff --git a/lib/common_test/src/ct_netconfc.erl b/lib/common_test/src/ct_netconfc.erl
index 35920ec1dc..a3861dc745 100644
--- a/lib/common_test/src/ct_netconfc.erl
+++ b/lib/common_test/src/ct_netconfc.erl
@@ -1,7 +1,7 @@
%%----------------------------------------------------------------------
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2012-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2012-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -536,7 +536,7 @@ send(Client, SimpleXml) ->
Client :: client(),
SimpleXml :: simple_xml(),
Timeout :: timeout(),
- Result :: ok | {error,error_reason()}.
+ Result :: simple_xml() | {error,error_reason()}.
%% @doc Send an XML document to the server.
%%
%% The given XML document is sent as is to the server. This function
@@ -556,7 +556,7 @@ send_rpc(Client, SimpleXml) ->
Client :: client(),
SimpleXml :: simple_xml(),
Timeout :: timeout(),
- Result :: ok | {error,error_reason()}.
+ Result :: [simple_xml()] | {error,error_reason()}.
%% @doc Send a Netconf <code>rpc</code> request to the server.
%%
%% The given XML document is wrapped in a valid Netconf
@@ -635,7 +635,7 @@ get(Client, Filter) ->
Client :: client(),
Filter :: simple_xml() | xpath(),
Timeout :: timeout(),
- Result :: {ok,simple_xml()} | {error,error_reason()}.
+ Result :: {ok,[simple_xml()]} | {error,error_reason()}.
%% @doc Get data.
%%
%% This operation returns both configuration and state data from the
@@ -661,7 +661,7 @@ get_config(Client, Source, Filter) ->
Source :: netconf_db(),
Filter :: simple_xml() | xpath(),
Timeout :: timeout(),
- Result :: {ok,simple_xml()} | {error,error_reason()}.
+ Result :: {ok,[simple_xml()]} | {error,error_reason()}.
%% @doc Get configuration data.
%%
%% To be able to access another source than `running', the server
@@ -759,7 +759,7 @@ action(Client,Action) ->
Client :: client(),
Action :: simple_xml(),
Timeout :: timeout(),
- Result :: {ok,simple_xml()} | {error,error_reason()}.
+ Result :: {ok,[simple_xml()]} | {error,error_reason()}.
%% @doc Execute an action.
%%
%% @end
@@ -1334,7 +1334,7 @@ handle_data(NewData,#state{connection=Connection,buff=Buff} = State) ->
%% first answer
P=#pending{tref=TRef,caller=Caller} =
lists:last(Pending),
- timer:cancel(TRef),
+ _ = timer:cancel(TRef),
Reason1 = {failed_to_parse_received_data,Reason},
ct_gen_conn:return(Caller,{error,Reason1}),
lists:delete(P,Pending)
@@ -1454,7 +1454,7 @@ decode({Tag,Attrs,_}=E, #state{connection=Connection,pending=Pending}=State) ->
{noreply,State#state{hello_status = {error,Reason}}}
end;
#pending{tref=TRef,caller=Caller} ->
- timer:cancel(TRef),
+ _ = timer:cancel(TRef),
case decode_hello(E) of
{ok,SessionId,Capabilities} ->
ct_gen_conn:return(Caller,ok),
@@ -1482,7 +1482,7 @@ decode({Tag,Attrs,_}=E, #state{connection=Connection,pending=Pending}=State) ->
case [P || P = #pending{msg_id=undefined,op=undefined} <- Pending] of
[#pending{tref=TRef,
caller=Caller}] ->
- timer:cancel(TRef),
+ _ = timer:cancel(TRef),
ct_gen_conn:return(Caller,E),
{noreply,State#state{pending=[]}};
_ ->
@@ -1504,7 +1504,7 @@ get_msg_id(Attrs) ->
decode_rpc_reply(MsgId,{_,Attrs,Content0}=E,#state{pending=Pending} = State) ->
case lists:keytake(MsgId,#pending.msg_id,Pending) of
{value, #pending{tref=TRef,op=Op,caller=Caller}, Pending1} ->
- timer:cancel(TRef),
+ _ = timer:cancel(TRef),
Content = forward_xmlns_attr(Attrs,Content0),
{CallerReply,{ServerReply,State2}} =
do_decode_rpc_reply(Op,Content,State#state{pending=Pending1}),
@@ -1519,7 +1519,7 @@ decode_rpc_reply(MsgId,{_,Attrs,Content0}=E,#state{pending=Pending} = State) ->
msg_id=undefined,
op=undefined,
caller=Caller}] ->
- timer:cancel(TRef),
+ _ = timer:cancel(TRef),
ct_gen_conn:return(Caller,E),
{noreply,State#state{pending=[]}};
_ ->
@@ -1862,10 +1862,7 @@ ssh_open(#options{host=Host,timeout=Timeout,port=Port,ssh=SshOpts,name=Name}) ->
end;
{error, Reason} ->
ssh:close(CM),
- {error,{ssh,could_not_open_channel,Reason}};
- Other ->
- %% Bug in ssh?? got {closed,0} here once...
- {error,{ssh,unexpected_from_session_channel,Other}}
+ {error,{ssh,could_not_open_channel,Reason}}
end;
{error,Reason} ->
{error,{ssh,could_not_connect_to_server,Reason}}
diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl
index 7c797be03e..00d0aab507 100644
--- a/lib/common_test/src/ct_run.erl
+++ b/lib/common_test/src/ct_run.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2004-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -71,6 +71,7 @@
enable_builtin_hooks,
include = [],
auto_compile,
+ abort_if_missing_suites,
silent_connections = [],
stylesheet,
multiply_timetraps = 1,
@@ -246,9 +247,11 @@ script_start1(Parent, Args) ->
Vts = get_start_opt(vts, true, Args),
Shell = get_start_opt(shell, true, Args),
Cover = get_start_opt(cover, fun([CoverFile]) -> ?abs(CoverFile) end, Args),
- CoverStop = get_start_opt(cover_stop, fun([CS]) -> list_to_atom(CS) end, Args),
+ CoverStop = get_start_opt(cover_stop,
+ fun([CS]) -> list_to_atom(CS) end, Args),
LogDir = get_start_opt(logdir, fun([LogD]) -> LogD end, Args),
- LogOpts = get_start_opt(logopts, fun(Os) -> [list_to_atom(O) || O <- Os] end,
+ LogOpts = get_start_opt(logopts,
+ fun(Os) -> [list_to_atom(O) || O <- Os] end,
[], Args),
Verbosity = verbosity_args2opts(Args),
MultTT = get_start_opt(multiply_timetraps,
@@ -311,6 +314,12 @@ script_start1(Parent, Args) ->
application:set_env(common_test, auto_compile, false),
{false,[]}
end,
+
+ %% abort test run if some suites can't be compiled
+ AbortIfMissing = get_start_opt(abort_if_missing_suites,
+ fun([]) -> true;
+ ([Bool]) -> list_to_atom(Bool)
+ end, false, Args),
%% silent connections
SilentConns =
get_start_opt(silent_connections,
@@ -347,6 +356,7 @@ script_start1(Parent, Args) ->
ct_hooks = CTHooks,
enable_builtin_hooks = EnableBuiltinHooks,
auto_compile = AutoCompile,
+ abort_if_missing_suites = AbortIfMissing,
include = IncludeDirs,
silent_connections = SilentConns,
stylesheet = Stylesheet,
@@ -551,6 +561,9 @@ combine_test_opts(TS, Specs, Opts) ->
ACBool
end,
+ AbortIfMissing = choose_val(Opts#opts.abort_if_missing_suites,
+ TSOpts#opts.abort_if_missing_suites),
+
BasicHtml =
case choose_val(Opts#opts.basic_html,
TSOpts#opts.basic_html) of
@@ -578,6 +591,7 @@ combine_test_opts(TS, Specs, Opts) ->
enable_builtin_hooks = EnableBuiltinHooks,
stylesheet = Stylesheet,
auto_compile = AutoCompile,
+ abort_if_missing_suites = AbortIfMissing,
include = AllInclude,
multiply_timetraps = MultTT,
scale_timetraps = ScaleTT,
@@ -753,6 +767,7 @@ script_usage() ->
"\n\t[-verbosity GenVLvl | [CategoryVLvl1 .. CategoryVLvlN]]"
"\n\t[-include InclDir1 InclDir2 .. InclDirN]"
"\n\t[-no_auto_compile]"
+ "\n\t[-abort_if_missing_suites]"
"\n\t[-multiply_timetraps N]"
"\n\t[-scale_timetraps]"
"\n\t[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]"
@@ -775,6 +790,7 @@ script_usage() ->
"\n\t[-ct_hooks CTHook1 CTHook2 .. CTHookN]"
"\n\t[-include InclDir1 InclDir2 .. InclDirN]"
"\n\t[-no_auto_compile]"
+ "\n\t[-abort_if_missing_suites]"
"\n\t[-multiply_timetraps N]"
"\n\t[-scale_timetraps]"
"\n\t[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]"
@@ -799,6 +815,7 @@ script_usage() ->
"\n\t[-ct_hooks CTHook1 CTHook2 .. CTHookN]"
"\n\t[-include InclDir1 InclDir2 .. InclDirN]"
"\n\t[-no_auto_compile]"
+ "\n\t[-abort_if_missing_suites]"
"\n\t[-multiply_timetraps N]"
"\n\t[-scale_timetraps]"
"\n\t[-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]"
@@ -1026,6 +1043,10 @@ run_test2(StartOpts) ->
{ACBool,[]}
end,
+ %% abort test run if some suites can't be compiled
+ AbortIfMissing = get_start_opt(abort_if_missing_suites, value, false,
+ StartOpts),
+
%% decrypt config file
case proplists:get_value(decrypt, StartOpts) of
undefined ->
@@ -1067,6 +1088,7 @@ run_test2(StartOpts) ->
ct_hooks = CTHooks,
enable_builtin_hooks = EnableBuiltinHooks,
auto_compile = AutoCompile,
+ abort_if_missing_suites = AbortIfMissing,
include = Include,
silent_connections = SilentConns,
stylesheet = Stylesheet,
@@ -1401,6 +1423,7 @@ get_data_for_node(#testspec{label = Labels,
ct_hooks = CTHooks,
enable_builtin_hooks = EnableBuiltinHooks,
auto_compile = ACs,
+ abort_if_missing_suites = AiMSs,
include = Incl,
multiply_timetraps = MTs,
scale_timetraps = STs,
@@ -1435,6 +1458,7 @@ get_data_for_node(#testspec{label = Labels,
EvHandlers = [{H,A} || {N,H,A} <- EvHs, N==Node],
FiltCTHooks = [Hook || {N,Hook} <- CTHooks, N==Node],
AutoCompile = proplists:get_value(Node, ACs),
+ AbortIfMissing = proplists:get_value(Node, AiMSs),
Include = [I || {N,I} <- Incl, N==Node],
#opts{label = Label,
profile = Profile,
@@ -1451,6 +1475,7 @@ get_data_for_node(#testspec{label = Labels,
ct_hooks = FiltCTHooks,
enable_builtin_hooks = EnableBuiltinHooks,
auto_compile = AutoCompile,
+ abort_if_missing_suites = AbortIfMissing,
include = Include,
multiply_timetraps = MT,
scale_timetraps = ST,
@@ -1621,7 +1646,7 @@ do_run(Tests, Misc, LogDir, LogOpts) when is_list(Misc),
do_run(Tests, [], Opts#opts{logdir = LogDir}, []);
do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) ->
- #opts{label = Label, profile = Profile, cover = Cover,
+ #opts{label = Label, profile = Profile,
verbosity = VLvls} = Opts,
%% label - used by ct_logs
TestLabel =
@@ -1645,22 +1670,6 @@ do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) ->
non_existing ->
{error,no_path_to_test_server};
_ ->
- Opts1 = if Cover == undefined ->
- Opts;
- true ->
- case ct_cover:get_spec(Cover) of
- {error,Reason} ->
- exit({error,Reason});
- CoverSpec ->
- CoverStop =
- case Opts#opts.cover_stop of
- undefined -> true;
- Stop -> Stop
- end,
- Opts#opts{coverspec = CoverSpec,
- cover_stop = CoverStop}
- end
- end,
%% This env variable is used by test_server to determine
%% which framework it runs under.
case os:getenv("TEST_SERVER_FRAMEWORK") of
@@ -1686,7 +1695,7 @@ do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) ->
_Pid ->
ct_util:set_testdata({starter,Opts#opts.starter}),
compile_and_run(Tests, Skip,
- Opts1#opts{verbosity=Verbosity}, Args)
+ Opts#opts{verbosity=Verbosity}, Args)
end
end.
@@ -1722,8 +1731,8 @@ compile_and_run(Tests, Skip, Opts, Args) ->
{SuiteErrs,HelpErrs} = auto_compile(TestSuites),
{TestSuites,SuiteErrs,SuiteErrs++HelpErrs}
end,
-
- case continue(AllMakeErrors) of
+
+ case continue(AllMakeErrors, Opts#opts.abort_if_missing_suites) of
true ->
SavedErrors = save_make_errors(SuiteMakeErrors),
ct_repeat:log_loop_info(Args),
@@ -2047,9 +2056,9 @@ final_skip([Skip|Skips], Final) ->
final_skip([], Final) ->
lists:reverse(Final).
-continue([]) ->
+continue([], _) ->
true;
-continue(_MakeErrors) ->
+continue(_MakeErrors, AbortIfMissingSuites) ->
io:nl(),
OldGl = group_leader(),
case set_group_leader_same_as_shell() of
@@ -2077,26 +2086,26 @@ continue(_MakeErrors) ->
true
end;
false -> % no shell process to use
- true
+ not AbortIfMissingSuites
end.
set_group_leader_same_as_shell() ->
%%! Locate the shell process... UGLY!!!
GS2or3 = fun(P) ->
- case process_info(P,initial_call) of
- {initial_call,{group,server,X}} when X == 2 ; X == 3 ->
- true;
- _ ->
- false
- end
- end,
+ case process_info(P,initial_call) of
+ {initial_call,{group,server,X}} when X == 2 ; X == 3 ->
+ true;
+ _ ->
+ false
+ end
+ end,
case [P || P <- processes(), GS2or3(P),
- true == lists:keymember(shell,1,
- element(2,process_info(P,dictionary)))] of
- [GL|_] ->
- group_leader(GL, self());
- [] ->
- false
+ true == lists:keymember(shell,1,
+ element(2,process_info(P,dictionary)))] of
+ [GL|_] ->
+ group_leader(GL, self());
+ [] ->
+ false
end.
check_and_add([{TestDir0,M,_} | Tests], Added, PA) ->
@@ -2121,67 +2130,11 @@ check_and_add([{TestDir0,M,_} | Tests], Added, PA) ->
check_and_add([], _, PA) ->
{ok,PA}.
-do_run_test(Tests, Skip, Opts) ->
+do_run_test(Tests, Skip, Opts0) ->
case check_and_add(Tests, [], []) of
{ok,AddedToPath} ->
ct_util:set_testdata({stats,{0,0,{0,0}}}),
- ct_util:set_testdata({cover,undefined}),
test_server_ctrl:start_link(local),
- case Opts#opts.coverspec of
- CovData={CovFile,
- CovNodes,
- _CovImport,
- CovExport,
- #cover{app = CovApp,
- level = CovLevel,
- excl_mods = CovExcl,
- incl_mods = CovIncl,
- cross = CovCross,
- src = _CovSrc}} ->
- ct_logs:log("COVER INFO",
- "Using cover specification file: ~ts~n"
- "App: ~w~n"
- "Cross cover: ~w~n"
- "Including ~w modules~n"
- "Excluding ~w modules",
- [CovFile,CovApp,CovCross,
- length(CovIncl),length(CovExcl)]),
-
- %% cover export file will be used for export and import
- %% between tests so make sure it doesn't exist initially
- case filelib:is_file(CovExport) of
- true ->
- DelResult = file:delete(CovExport),
- ct_logs:log("COVER INFO",
- "Warning! "
- "Export file ~ts already exists. "
- "Deleting with result: ~p",
- [CovExport,DelResult]);
- false ->
- ok
- end,
-
- %% tell test_server which modules should be cover compiled
- %% note that actual compilation is done when tests start
- test_server_ctrl:cover(CovApp, CovFile, CovExcl, CovIncl,
- CovCross, CovExport, CovLevel,
- Opts#opts.cover_stop),
- %% save cover data (used e.g. to add nodes dynamically)
- ct_util:set_testdata({cover,CovData}),
- %% start cover on specified nodes
- if (CovNodes /= []) and (CovNodes /= undefined) ->
- ct_logs:log("COVER INFO",
- "Nodes included in cover "
- "session: ~w",
- [CovNodes]),
- cover:start(CovNodes);
- true ->
- ok
- end,
- true;
- _ ->
- false
- end,
%% let test_server expand the test tuples and count no of cases
{Suites,NoOfCases} = count_test_cases(Tests, Skip),
@@ -2206,24 +2159,31 @@ do_run_test(Tests, Skip, Opts) ->
end,
%% if the verbosity level is set lower than ?STD_IMPORTANCE, tell
%% test_server to ignore stdout printouts to the test case log file
- case proplists:get_value(default, Opts#opts.verbosity) of
+ case proplists:get_value(default, Opts0#opts.verbosity) of
VLvl when is_integer(VLvl), (?STD_IMPORTANCE < (100-VLvl)) ->
test_server_ctrl:reject_io_reqs(true);
_Lower ->
ok
end,
- test_server_ctrl:multiply_timetraps(Opts#opts.multiply_timetraps),
- test_server_ctrl:scale_timetraps(Opts#opts.scale_timetraps),
+ test_server_ctrl:multiply_timetraps(Opts0#opts.multiply_timetraps),
+ test_server_ctrl:scale_timetraps(Opts0#opts.scale_timetraps),
test_server_ctrl:create_priv_dir(choose_val(
- Opts#opts.create_priv_dir,
+ Opts0#opts.create_priv_dir,
auto_per_run)),
+
+ {ok,LogDir} = ct_logs:get_log_dir(true),
+ {TsCoverInfo,Opts} = maybe_start_cover(Opts0, LogDir),
+
ct_event:notify(#event{name=start_info,
node=node(),
data={NoOfTests,NoOfSuites,NoOfCases}}),
CleanUp = add_jobs(Tests, Skip, Opts, []),
unlink(whereis(test_server_ctrl)),
catch test_server_ctrl:wait_finish(),
+
+ maybe_stop_cover(Opts, TsCoverInfo, LogDir),
+
%% check if last testcase has left a "dead" trace window
%% behind, and if so, kill it
case ct_util:get_testdata(interpret) of
@@ -2256,6 +2216,102 @@ do_run_test(Tests, Skip, Opts) ->
exit(Error)
end.
+maybe_start_cover(Opts=#opts{cover=Cover,cover_stop=CoverStop0},LogDir) ->
+ if Cover == undefined ->
+ {undefined,Opts};
+ true ->
+ case ct_cover:get_spec(Cover) of
+ {error,Reason} ->
+ exit({error,Reason});
+ CoverSpec ->
+ CoverStop =
+ case CoverStop0 of
+ undefined -> true;
+ Stop -> Stop
+ end,
+ start_cover(Opts#opts{coverspec=CoverSpec,
+ cover_stop=CoverStop},
+ LogDir)
+ end
+ end.
+
+start_cover(Opts=#opts{coverspec=CovData,cover_stop=CovStop},LogDir) ->
+ {CovFile,
+ CovNodes,
+ CovImport,
+ _CovExport,
+ #cover{app = CovApp,
+ level = CovLevel,
+ excl_mods = CovExcl,
+ incl_mods = CovIncl,
+ cross = CovCross,
+ src = _CovSrc}} = CovData,
+ ct_logs:log("COVER INFO",
+ "Using cover specification file: ~ts~n"
+ "App: ~w~n"
+ "Cross cover: ~w~n"
+ "Including ~w modules~n"
+ "Excluding ~w modules",
+ [CovFile,CovApp,CovCross,
+ length(CovIncl),length(CovExcl)]),
+
+ %% Tell test_server to print a link in its coverlog
+ %% pointing to the real coverlog which will be written in
+ %% maybe_stop_cover/2
+ test_server_ctrl:cover({log,LogDir}),
+
+ %% Cover compile all modules
+ {ok,TsCoverInfo} = test_server_ctrl:cover_compile(CovApp,CovFile,
+ CovExcl,CovIncl,
+ CovCross,CovLevel,
+ CovStop),
+ ct_logs:log("COVER INFO",
+ "Compilation completed - test_server cover info: ~tp",
+ [TsCoverInfo]),
+
+ %% start cover on specified nodes
+ if (CovNodes /= []) and (CovNodes /= undefined) ->
+ ct_logs:log("COVER INFO",
+ "Nodes included in cover "
+ "session: ~w",
+ [CovNodes]),
+ cover:start(CovNodes);
+ true ->
+ ok
+ end,
+ lists:foreach(
+ fun(Imp) ->
+ case cover:import(Imp) of
+ ok ->
+ ok;
+ {error,Reason} ->
+ ct_logs:log("COVER INFO",
+ "Importing cover data from: ~ts fails! "
+ "Reason: ~p", [Imp,Reason])
+ end
+ end, CovImport),
+ {TsCoverInfo,Opts}.
+
+maybe_stop_cover(_,undefined,_) ->
+ ok;
+maybe_stop_cover(#opts{coverspec=CovData},TsCoverInfo,LogDir) ->
+ {_CovFile,
+ _CovNodes,
+ _CovImport,
+ CovExport,
+ _AppData} = CovData,
+ case CovExport of
+ undefined -> ok;
+ _ ->
+ ct_logs:log("COVER INFO","Exporting cover data to ~tp",[CovExport]),
+ cover:export(CovExport)
+ end,
+ ct_logs:log("COVER INFO","Analysing cover data to ~tp",[LogDir]),
+ test_server_ctrl:cover_analyse(TsCoverInfo,LogDir),
+ ct_logs:log("COVER INFO","Analysis completed.",[]),
+ ok.
+
+
delete_dups([S | Suites]) ->
Suites1 = lists:delete(S, Suites),
[S | delete_dups(Suites1)];
diff --git a/lib/common_test/src/ct_telnet.erl b/lib/common_test/src/ct_telnet.erl
index b4d82a53cf..3b2652d06c 100644
--- a/lib/common_test/src/ct_telnet.erl
+++ b/lib/common_test/src/ct_telnet.erl
@@ -46,100 +46,80 @@
%%
%% == Logging ==
%%
-%% `ct_telnet' can be configured to uses the `error_logger' for logging telnet
-%% traffic. A special purpose error handler is implemented in
-%% `ct_conn_log_h'. To use this error handler, add the `cth_conn_log'
-%% hook in your test suite, e.g:
-%%
+%% The default logging behaviour of `ct_telnet' is to print information
+%% to the test case HTML log about performed operations and commands
+%% and their corresponding results. What won't be printed to the HTML log
+%% are text strings sent from the telnet server that are not explicitly
+%% received by means of a `ct_telnet' function such as `expect/3'.
+%% `ct_telnet' may however be configured to use a special purpose event handler,
+%% implemented in `ct_conn_log_h', for logging <b>all</b> telnet traffic.
+%% To use this handler, you need to install a Common Test hook named
+%% `cth_conn_log'. Example (using the test suite info function):
%%
%% ```
%% suite() ->
-%% [{ct_hooks, [{cth_conn_log, [{conn_mod(),hook_options()}]}]}].
-%%'''
+%% [{ct_hooks, [{cth_conn_log, [{conn_mod(),hook_options()}]}]}].
+%% '''
%%
%% `conn_mod()' is the name of the common_test module implementing
%% the connection protocol, i.e. `ct_telnet'.
%%
-%% The hook option `log_type' specifies the type of logging:
-%%
-%% <dl>
-%% <dt>`raw'</dt>
-%% <dd>The sent and received telnet data is logged to a separate
-%% text file as is, without any formatting. A link to the file is
-%% added to the test case HTML log.</dd>
+%% The `cth_conn_log' hook performs unformatted logging of telnet data to
+%% a separate text file. All telnet communication is captured and printed,
+%% including arbitrary data sent from the server. The link to this text file
+%% can be found on the top of the test case HTML log.
%%
-%% <dt>`html (default)'</dt>
-%% <dd>The sent and received telnet traffic is pretty printed
-%% directly in the test case HTML log.</dd>
-%%
-%% <dt>`silent'</dt>
-%% <dd>Telnet traffic is not logged.</dd>
-%% </dl>
-%%
-%% By default, all telnet traffic is logged in one single log
-%% file. However, it is possible to have different connections logged
-%% in separate files. To do this, use the hook option `hosts' and
-%% list the names of the servers/connections that will be used in the
-%% suite. Note that the connections must be named for this to work
+%% By default, data for all telnet connections is logged in one common
+%% file (named `default'), which might get messy e.g. if multiple telnet
+%% sessions are running in parallel. It is therefore possible to create a
+%% separate log file for each connection. To configure this, use the hook
+%% option `hosts' and list the names of the servers/connections that will be
+%% used in the suite. Note that the connections must be named for this to work
%% (see the `open' function below).
%%
-%% The `hosts' option has no effect if `log_type' is set to `html' or
-%% `silent'.
-%%
-%% The hook options can also be specified in a configuration file with
-%% the configuration variable `ct_conn_log':
+%% The hook option named `log_type' may be used to change the `cth_conn_log'
+%% behaviour. The default value of this option is `raw', which results in the
+%% behaviour described above. If the value is set to `html', all telnet
+%% communication is printed to the test case HTML log instead.
%%
-%% ```
-%% {ct_conn_log,[{conn_mod(),hook_options()}]}.
-%% '''
-%%
-%% For example:
+%% All `cth_conn_log' hook options described above can also be specified in
+%% a configuration file with the configuration variable `ct_conn_log'. Example:
%%
%% ```
-%% {ct_conn_log,[{ct_telnet,[{log_type,raw},
-%% {hosts,[key_or_name()]}]}]}
+%% {ct_conn_log, [{ct_telnet,[{log_type,raw},
+%% {hosts,[key_or_name()]}]}]}
%% '''
%%
%% <b>Note</b> that hook options specified in a configuration file
-%% will overwrite any hardcoded hook options in the test suite.
+%% will overwrite any hardcoded hook options in the test suite!
%%
-%% === Logging example 1 ===
+%% === Logging example ===
%%
-%% The following `ct_hooks' statement will cause raw printing of
-%% telnet traffic to separate logs for the connections named
-%% `server1' and `server2'. Any other connections will be logged
-%% to default telnet log.
+%% The following `ct_hooks' statement will cause printing of telnet traffic
+%% to separate logs for the connections named `server1' and `server2'.
+%% Traffic for any other connections will be logged in the default telnet log.
%%
%% ```
%% suite() ->
-%% [{ct_hooks, [{cth_conn_log, [{ct_telnet,[{log_type,raw}},
-%% {hosts,[server1,server2]}]}
-%% ]}]}].
+%% [{ct_hooks,
+%% [{cth_conn_log, [{ct_telnet,[{hosts,[server1,server2]}]}]}]}].
%%'''
%%
-%% === Logging example 2 ===
-%%
-%% The following configuration file will cause raw logging of all
-%% telnet traffic into one single text file.
+%% As previously explained, the above specification could also be provided
+%% by means of an entry like this in a configuration file:
%%
%% ```
-%% {ct_conn_log,[{ct_telnet,[{log_type,raw}]}]}.
+%% {ct_conn_log, [{ct_telnet,[{hosts,[server1,server2]}]}]}.
%% '''
%%
-%% The `ct_hooks' statement must look like this:
+%% in which case the `ct_hooks' statement in the test suite may simply look
+%% like this:
%%
%% ```
%% suite() ->
-%% [{ct_hooks, [{cth_conn_log, []}]}].
+%% [{ct_hooks, [{cth_conn_log, []}]}].
%% '''
%%
-%% The same `ct_hooks' statement without the configuration file would
-%% cause HTML logging of all telnet connections into the test case
-%% HTML log.
-%%
-%% <b>Note</b> that if the `cth_conn_log' hook is not added, telnet
-%% traffic is still logged in the test case HTML log file (on the legacy
-%% `ct_telnet' format).
%% @end
%% @type connection_type() = telnet | ts1 | ts2
@@ -205,6 +185,7 @@ open(Name) ->
%%% Name = target_name()
%%% ConnType = ct_telnet:connection_type()
%%% Handle = ct_telnet:handle()
+%%% Reason = term()
%%%
%%% @doc Open a telnet connection to the specified target host.
open(Name,ConnType) ->
@@ -234,6 +215,7 @@ open(KeyOrName,ConnType,TargetMod) ->
%%% TargetMod = atom()
%%% Extra = term()
%%% Handle = handle()
+%%% Reason = term()
%%%
%%% @doc Open a telnet connection to the specified target host.
%%%
@@ -281,13 +263,22 @@ open(KeyOrName,ConnType,TargetMod,Extra) ->
end,
log(undefined,open,"Connecting to ~p(~p)",
[KeyOrName,Addr1]),
- ct_gen_conn:start(KeyOrName,full_addr(Addr1,ConnType),
- {TargetMod,KeepAlive,Extra},?MODULE)
+ Reconnect =
+ case ct:get_config({telnet_settings,reconnection_attempts}) of
+ 0 -> false;
+ _ -> true
+ end,
+ ct_gen_conn:start(full_addr(Addr1,ConnType),
+ {TargetMod,KeepAlive,Extra},
+ ?MODULE, [{name,KeyOrName},
+ {reconnect,Reconnect},
+ {old,true}])
end.
%%%-----------------------------------------------------------------
%%% @spec close(Connection) -> ok | {error,Reason}
-%%% Connection = ct_telnet:connection()
+%%% Connection = ct_telnet:connection()
+%%% Reason = term()
%%%
%%% @doc Close the telnet connection and stop the process managing it.
%%%
@@ -300,7 +291,7 @@ close(Connection) ->
{ok,Pid} ->
log(undefined,close,"Connection closed, handle: ~w",[Pid]),
case ct_gen_conn:stop(Pid) of
- {error,{process_down,Pid,noproc}} ->
+ {error,{process_down,Pid,_}} ->
{error,already_closed};
Result ->
Result
@@ -322,6 +313,7 @@ cmd(Connection,Cmd) ->
%%% Cmd = string()
%%% Timeout = integer()
%%% Data = [string()]
+%%% Reason = term()
%%% @doc Send a command via telnet and wait for prompt.
cmd(Connection,Cmd,Timeout) ->
case get_handle(Connection) of
@@ -342,6 +334,7 @@ cmdf(Connection,CmdFormat,Args) ->
%%% Args = list()
%%% Timeout = integer()
%%% Data = [string()]
+%%% Reason = term()
%%% @doc Send a telnet command and wait for prompt
%%% (uses a format string and list of arguments to build the command).
cmdf(Connection,CmdFormat,Args,Timeout) when is_list(Args) ->
@@ -352,6 +345,7 @@ cmdf(Connection,CmdFormat,Args,Timeout) when is_list(Args) ->
%%% @spec get_data(Connection) -> {ok,Data} | {error,Reason}
%%% Connection = ct_telnet:connection()
%%% Data = [string()]
+%%% Reason = term()
%%% @doc Get all data which has been received by the telnet client
%%% since last command was sent.
get_data(Connection) ->
@@ -366,6 +360,7 @@ get_data(Connection) ->
%%% @spec send(Connection,Cmd) -> ok | {error,Reason}
%%% Connection = ct_telnet:connection()
%%% Cmd = string()
+%%% Reason = term()
%%% @doc Send a telnet command and return immediately.
%%%
%%% <p>The resulting output from the command can be read with
@@ -383,6 +378,7 @@ send(Connection,Cmd) ->
%%% Connection = ct_telnet:connection()
%%% CmdFormat = string()
%%% Args = list()
+%%% Reason = term()
%%% @doc Send a telnet command and return immediately (uses a format
%%% string and a list of arguments to build the command).
sendf(Connection,CmdFormat,Args) when is_list(Args) ->
@@ -405,9 +401,11 @@ expect(Connection,Patterns) ->
%%% Prompt = string()
%%% Tag = term()
%%% Opts = [Opt]
-%%% Opt = {timeout,Timeout} | repeat | {repeat,N} | sequence |
-%%% {halt,HaltPatterns} | ignore_prompt | no_prompt_check
-%%% Timeout = integer()
+%%% Opt = {idle_timeout,IdleTimeout} | {total_timeout,TotalTimeout} |
+%%% repeat | {repeat,N} | sequence | {halt,HaltPatterns} |
+%%% ignore_prompt | no_prompt_check
+%%% IdleTimeout = infinity | integer()
+%%% TotalTimeout = infinity | integer()
%%% N = integer()
%%% HaltPatterns = Patterns
%%% MatchList = [Match]
@@ -433,11 +431,16 @@ expect(Connection,Patterns) ->
%%% will also include the matched <code>Tag</code>. Else, only
%%% <code>RxMatch</code> is returned.</p>
%%%
-%%% <p>The <code>timeout</code> option indicates that the function
+%%% <p>The <code>idle_timeout</code> option indicates that the function
%%% shall return if the telnet client is idle (i.e. if no data is
-%%% received) for more than <code>Timeout</code> milliseconds. Default
+%%% received) for more than <code>IdleTimeout</code> milliseconds. Default
%%% timeout is 10 seconds.</p>
%%%
+%%% <p>The <code>total_timeout</code> option sets a time limit for
+%%% the complete expect operation. After <code>TotalTimeout</code>
+%%% milliseconds, <code>{error,timeout}</code> is returned. The default
+%%% value is <code>infinity</code> (i.e. no time limit).</p>
+%%%
%%% <p>The function will always return when a prompt is found, unless
%%% any of the <code>ignore_prompt</code> or
%%% <code>no_prompt_check</code> options are used, in which case it
@@ -570,14 +573,14 @@ handle_msg({cmd,Cmd,Timeout},State) ->
State#state.buffer,
prompt,
State#state.prx,
- [{timeout,2000}]);
+ [{idle_timeout,2000}]);
{ip,false} ->
silent_teln_expect(State#state.name,
State#state.teln_pid,
State#state.buffer,
prompt,
State#state.prx,
- [{timeout,200}]);
+ [{idle_timeout,200}]);
{ip,true} ->
ok
end,
@@ -601,11 +604,12 @@ handle_msg({cmd,Cmd,Timeout},State) ->
end_gen_log(),
{Return,State#state{buffer=NewBuffer,prompt=Prompt}};
handle_msg({send,Cmd},State) ->
- log(State,send,"Cmd: ~p",[Cmd]),
-
+ start_gen_log(heading(send,State#state.name)),
+ log(State,send,"Sending: ~p",[Cmd]),
+
debug_cont_gen_log("Throwing Buffer:",[]),
debug_log_lines(State#state.buffer),
-
+
case {State#state.type,State#state.prompt} of
{ts,_} ->
silent_teln_expect(State#state.name,
@@ -613,18 +617,19 @@ handle_msg({send,Cmd},State) ->
State#state.buffer,
prompt,
State#state.prx,
- [{timeout,2000}]);
+ [{idle_timeout,2000}]);
{ip,false} ->
silent_teln_expect(State#state.name,
State#state.teln_pid,
State#state.buffer,
prompt,
State#state.prx,
- [{timeout,200}]);
+ [{idle_timeout,200}]);
{ip,true} ->
ok
end,
ct_telnet_client:send_data(State#state.teln_pid,Cmd),
+ end_gen_log(),
{ok,State#state{buffer=[],prompt=false}};
handle_msg(get_data,State) ->
start_gen_log(heading(get_data,State#state.name)),
@@ -752,8 +757,8 @@ check_if_prompt_was_reached(Data,_) when is_list(Data) ->
check_if_prompt_was_reached(_,_) ->
false.
-%%% @hidden
-%% Functions for logging ct_telnet reports and telnet data
+%%%-----------------------------------------------------------------
+%%% Functions for logging ct_telnet reports and telnet data
heading(Action,undefined) ->
io_lib:format("~w ~w",[?MODULE,Action]);
@@ -763,6 +768,8 @@ heading(Action,Name) ->
force_log(State,Action,String,Args) ->
log(State,Action,String,Args,true).
+%%%-----------------------------------------------------------------
+%%% @hidden
log(State,Action,String,Args) when is_record(State, state) ->
log(State,Action,String,Args,false);
log(Name,Action,String,Args) when is_atom(Name) ->
@@ -770,6 +777,8 @@ log(Name,Action,String,Args) when is_atom(Name) ->
log(TelnPid,Action,String,Args) when is_pid(TelnPid) ->
log(#state{teln_pid=TelnPid},Action,String,Args,false).
+%%%-----------------------------------------------------------------
+%%% @hidden
log(undefined,String,Args) ->
log(#state{},undefined,String,Args,false);
log(Name,String,Args) when is_atom(Name) ->
@@ -777,72 +786,73 @@ log(Name,String,Args) when is_atom(Name) ->
log(TelnPid,String,Args) when is_pid(TelnPid) ->
log(#state{teln_pid=TelnPid},undefined,String,Args).
+%%%-----------------------------------------------------------------
+%%% @hidden
log(#state{name=Name,teln_pid=TelnPid,host=Host,port=Port},
Action,String,Args,ForcePrint) ->
Name1 = if Name == undefined -> get({ct_telnet_pid2name,TelnPid});
true -> Name
end,
Silent = get(silent),
- case ct_util:get_testdata({cth_conn_log,?MODULE}) of
- Result when Result /= undefined, Result /= silent, Silent /= true ->
- {PrintHeader,PreBR} = if Action==undefined ->
- {false,""};
- true ->
- {true,"\n"}
- end,
- error_logger:info_report(#conn_log{header=PrintHeader,
- client=self(),
- conn_pid=TelnPid,
- address={Host,Port},
- name=Name1,
- action=Action,
- module=?MODULE},
- {PreBR++String,Args});
- Result when Result /= undefined ->
- ok;
- _ when Action == open; Action == close; Action == reconnect;
- Action == info; Action == error ->
- ct_gen_conn:log(heading(Action,Name1),String,Args);
- _ when ForcePrint == false ->
- case ct_util:is_silenced(telnet) of
- true ->
- ok;
- false ->
- ct_gen_conn:cont_log(String,Args)
+
+ if Action == general_io ->
+ case ct_util:get_testdata({cth_conn_log,?MODULE}) of
+ HookMode when HookMode /= undefined, HookMode /= silent,
+ Silent /= true ->
+ error_logger:info_report(#conn_log{header=false,
+ client=self(),
+ conn_pid=TelnPid,
+ address={Host,Port},
+ name=Name1,
+ action=Action,
+ module=?MODULE},
+ {String,Args});
+ _ -> %% hook inactive or silence requested
+ ok
end;
- _ when ForcePrint == true ->
- case ct_util:is_silenced(telnet) of
- true ->
- %% call log/3 now instead of cont_log/2 since
- %% start_gen_log/1 will not have been previously called
+
+ true ->
+ if Action == open; Action == close; Action == reconnect;
+ Action == info; Action == error ->
ct_gen_conn:log(heading(Action,Name1),String,Args);
- false ->
- ct_gen_conn:cont_log(String,Args)
+
+ ForcePrint == false ->
+ case ct_util:is_silenced(telnet) of
+ true ->
+ ok;
+ false ->
+ ct_gen_conn:cont_log(String,Args)
+ end;
+
+ ForcePrint == true ->
+ case ct_util:is_silenced(telnet) of
+ true ->
+ %% call log/3 now instead of cont_log/2 since
+ %% start_gen_log/1 will not have been previously
+ %% called
+ ct_gen_conn:log(heading(Action,Name1),String,Args);
+ false ->
+ ct_gen_conn:cont_log(String,Args)
+ end
end
end.
+%%%-----------------------------------------------------------------
+%%% @hidden
start_gen_log(Heading) ->
- case ct_util:get_testdata({cth_conn_log,?MODULE}) of
- undefined ->
- %% check if output is suppressed
- case ct_util:is_silenced(telnet) of
- true -> ok;
- false -> ct_gen_conn:start_log(Heading)
- end;
- _ ->
- ok
+ %% check if output is suppressed
+ case ct_util:is_silenced(telnet) of
+ true -> ok;
+ false -> ct_gen_conn:start_log(Heading)
end.
+%%%-----------------------------------------------------------------
+%%% @hidden
end_gen_log() ->
- case ct_util:get_testdata({cth_conn_log,?MODULE}) of
- undefined ->
- %% check if output is suppressed
- case ct_util:is_silenced(telnet) of
- true -> ok;
- false -> ct_gen_conn:end_log()
- end;
- _ ->
- ok
+ %% check if output is suppressed
+ case ct_util:is_silenced(telnet) of
+ true -> ok;
+ false -> ct_gen_conn:end_log()
end.
%%% @hidden
@@ -863,14 +873,13 @@ teln_cmd(Pid,Cmd,Prx,Timeout) ->
teln_receive_until_prompt(Pid,Prx,Timeout).
teln_get_all_data(Pid,Prx,Data,Acc,LastLine) ->
- case check_for_prompt(Prx,lists:reverse(LastLine) ++ Data) of
+ case check_for_prompt(Prx,LastLine++Data) of
{prompt,Lines,_PromptType,Rest} ->
teln_get_all_data(Pid,Prx,Rest,[Lines|Acc],[]);
{noprompt,Lines,LastLine1} ->
case ct_telnet_client:get_data(Pid) of
{ok,[]} ->
- {ok,lists:reverse(lists:append([Lines|Acc])),
- lists:reverse(LastLine1)};
+ {ok,lists:reverse(lists:append([Lines|Acc])),LastLine1};
{ok,Data1} ->
teln_get_all_data(Pid,Prx,Data1,[Lines|Acc],LastLine1)
end
@@ -879,7 +888,8 @@ teln_get_all_data(Pid,Prx,Data,Acc,LastLine) ->
%% Expect options record
-record(eo,{teln_pid,
prx,
- timeout,
+ idle_timeout,
+ total_timeout,
haltpatterns=[],
seq=false,
repeat=false,
@@ -921,11 +931,12 @@ teln_expect(Name,Pid,Data,Pattern0,Prx,Opts) ->
Seq = get_seq(Opts),
Pattern = convert_pattern(Pattern0,Seq),
- Timeout = get_timeout(Opts),
+ {IdleTimeout,TotalTimeout} = get_timeouts(Opts),
EO = #eo{teln_pid=Pid,
prx=Prx,
- timeout=Timeout,
+ idle_timeout=IdleTimeout,
+ total_timeout=TotalTimeout,
seq=Seq,
haltpatterns=HaltPatterns,
prompt_check=PromptCheck},
@@ -964,11 +975,22 @@ rm_dupl([P|Ps],Acc) ->
rm_dupl([],Acc) ->
lists:reverse(Acc).
-get_timeout(Opts) ->
- case lists:keysearch(timeout,1,Opts) of
- {value,{timeout,T}} -> T;
- false -> ?DEFAULT_TIMEOUT
- end.
+get_timeouts(Opts) ->
+ {case lists:keysearch(idle_timeout,1,Opts) of
+ {value,{_,T}} ->
+ T;
+ false ->
+ %% this check is for backwards compatibility (pre CT v1.8)
+ case lists:keysearch(timeout,1,Opts) of
+ {value,{_,T}} -> T;
+ false -> ?DEFAULT_TIMEOUT
+ end
+ end,
+ case lists:keysearch(total_timeout,1,Opts) of
+ {value,{_,T}} -> T;
+ false -> infinity
+ end}.
+
get_repeat(Opts) ->
case lists:keysearch(repeat,1,Opts) of
{value,{repeat,N}} when is_integer(N) ->
@@ -1010,7 +1032,8 @@ repeat_expect(Name,Pid,Data,Pattern,Acc,EO) ->
{error,Reason}
end.
-teln_expect1(Name,Pid,Data,Pattern,Acc,EO) ->
+teln_expect1(Name,Pid,Data,Pattern,Acc,EO=#eo{idle_timeout=IdleTO,
+ total_timeout=TotalTO}) ->
ExpectFun = case EO#eo.seq of
true -> fun() ->
seq_expect(Name,Pid,Data,Pattern,Acc,EO)
@@ -1027,12 +1050,12 @@ teln_expect1(Name,Pid,Data,Pattern,Acc,EO) ->
NotFinished ->
%% Get more data
Fun = fun() -> get_data1(EO#eo.teln_pid) end,
- case ct_gen_conn:do_within_time(Fun, EO#eo.timeout) of
- {error,Reason} ->
+ case timer:tc(ct_gen_conn, do_within_time, [Fun, IdleTO]) of
+ {_,{error,Reason}} ->
%% A timeout will occur when the telnet connection
- %% is idle for EO#eo.timeout milliseconds.
+ %% is idle for EO#eo.idle_timeout milliseconds.
{error,Reason};
- {ok,Data1} ->
+ {_,{ok,Data1}} when TotalTO == infinity ->
case NotFinished of
{nomatch,Rest} ->
%% One expect
@@ -1040,6 +1063,21 @@ teln_expect1(Name,Pid,Data,Pattern,Acc,EO) ->
{continue,Patterns1,Acc1,Rest} ->
%% Sequence
teln_expect1(Name,Pid,Rest++Data1,Patterns1,Acc1,EO)
+ end;
+ {Elapsed,{ok,Data1}} ->
+ TVal = trunc(TotalTO - (Elapsed/1000)),
+ if TVal =< 0 ->
+ {error,timeout};
+ true ->
+ EO1 = EO#eo{total_timeout = TVal},
+ case NotFinished of
+ {nomatch,Rest} ->
+ %% One expect
+ teln_expect1(Name,Pid,Rest++Data1,Pattern,[],EO1);
+ {continue,Patterns1,Acc1,Rest} ->
+ %% Sequence
+ teln_expect1(Name,Pid,Rest++Data1,Patterns1,Acc1,EO1)
+ end
end
end
end.
@@ -1299,7 +1337,7 @@ teln_receive_until_prompt(Pid,Prx,Timeout) ->
teln_receive_until_prompt(Pid,Prx,Acc,LastLine) ->
{ok,Data} = ct_telnet_client:get_data(Pid),
- case check_for_prompt(Prx,LastLine ++ Data) of
+ case check_for_prompt(Prx,LastLine++Data) of
{prompt,Lines,PromptType,Rest} ->
Return = lists:reverse(lists:append([Lines|Acc])),
{ok,Return,PromptType,Rest};
diff --git a/lib/common_test/src/ct_telnet_client.erl b/lib/common_test/src/ct_telnet_client.erl
index 2cbcba9c77..ce30dcb74b 100644
--- a/lib/common_test/src/ct_telnet_client.erl
+++ b/lib/common_test/src/ct_telnet_client.erl
@@ -32,7 +32,9 @@
-module(ct_telnet_client).
--export([open/1, open/2, open/3, open/4, close/1]).
+%% -define(debug, true).
+
+-export([open/2, open/3, open/4, open/5, close/1]).
-export([send_data/2, get_data/1]).
-define(TELNET_PORT, 23).
@@ -64,20 +66,23 @@
-define(TERMINAL_TYPE, 24).
-define(WINDOW_SIZE, 31).
--record(state,{get_data, keep_alive=true}).
+-record(state,{conn_name, get_data, keep_alive=true, log_pos=1}).
-open(Server) ->
- open(Server, ?TELNET_PORT, ?OPEN_TIMEOUT, true).
+open(Server, ConnName) ->
+ open(Server, ?TELNET_PORT, ?OPEN_TIMEOUT, true, ConnName).
-open(Server, Port) ->
- open(Server, Port, ?OPEN_TIMEOUT, true).
+open(Server, Port, ConnName) ->
+ open(Server, Port, ?OPEN_TIMEOUT, true, ConnName).
-open(Server, Port, Timeout) ->
- open(Server, Port, Timeout, true).
+open(Server, Port, Timeout, ConnName) ->
+ open(Server, Port, Timeout, true, ConnName).
-open(Server, Port, Timeout, KeepAlive) ->
+open(Server, Port, Timeout, KeepAlive, ConnName) ->
Self = self(),
- Pid = spawn(fun() -> init(Self, Server, Port, Timeout, KeepAlive) end),
+ Pid = spawn(fun() ->
+ init(Self, Server, Port, Timeout,
+ KeepAlive, ConnName)
+ end),
receive
{open,Pid} ->
{ok,Pid};
@@ -86,29 +91,34 @@ open(Server, Port, Timeout, KeepAlive) ->
end.
close(Pid) ->
- Pid ! close.
+ Pid ! {close,self()},
+ receive closed -> ok
+ after 5000 -> ok
+ end.
send_data(Pid, Data) ->
Pid ! {send_data, Data++"\n"},
ok.
get_data(Pid) ->
- Pid ! {get_data, self()},
+ Pid ! {get_data,self()},
receive
{data,Data} ->
- {ok, Data}
+ {ok,Data}
end.
%%%-----------------------------------------------------------------
%%% Internal functions
-init(Parent, Server, Port, Timeout, KeepAlive) ->
+init(Parent, Server, Port, Timeout, KeepAlive, ConnName) ->
case gen_tcp:connect(Server, Port, [list,{packet,0}], Timeout) of
{ok,Sock} ->
- dbg("Connected to: ~p (port: ~w, keep_alive: ~w)\n", [Server,Port,KeepAlive]),
- send([?IAC,?DO,?SUPPRESS_GO_AHEAD], Sock),
+ dbg("~p connected to: ~p (port: ~w, keep_alive: ~w)\n",
+ [ConnName,Server,Port,KeepAlive]),
+ send([?IAC,?DO,?SUPPRESS_GO_AHEAD], Sock, ConnName),
Parent ! {open,self()},
- loop(#state{get_data=10, keep_alive=KeepAlive}, Sock, []),
+ loop(#state{conn_name=ConnName, get_data=10, keep_alive=KeepAlive},
+ Sock, []),
gen_tcp:close(Sock);
Error ->
Parent ! {Error,self()}
@@ -118,6 +128,13 @@ loop(State, Sock, Acc) ->
receive
{tcp_closed,_} ->
dbg("Connection closed\n", []),
+ Data = lists:reverse(lists:append(Acc)),
+ dbg("Printing queued messages: ~tp",[Data]),
+ ct_telnet:log(State#state.conn_name,
+ general_io, "~ts",
+ [lists:sublist(Data,
+ State#state.log_pos,
+ length(Data))]),
receive
{get_data,Pid} ->
Pid ! closed
@@ -125,11 +142,11 @@ loop(State, Sock, Acc) ->
ok
end;
{tcp,_,Msg0} ->
- dbg("tcp msg: ~p~n",[Msg0]),
+ dbg("tcp msg: ~tp~n",[Msg0]),
Msg = check_msg(Sock,Msg0,[]),
loop(State, Sock, [Msg | Acc]);
{send_data,Data} ->
- send(Data, Sock),
+ send(Data, Sock, State#state.conn_name),
loop(State, Sock, Acc);
{get_data,Pid} ->
NewState =
@@ -144,54 +161,100 @@ loop(State, Sock, Acc) ->
end;
_ ->
Data = lists:reverse(lists:append(Acc)),
- dbg("get_data ~p\n",[Data]),
+ Len = length(Data),
+ dbg("get_data ~tp\n",[Data]),
+ ct_telnet:log(State#state.conn_name,
+ general_io, "~ts",
+ [lists:sublist(Data,
+ State#state.log_pos,
+ Len)]),
Pid ! {data,Data},
- State
+ State#state{log_pos = 1}
end,
loop(NewState, Sock, []);
{get_data_delayed,Pid} ->
NewState =
case State of
#state{keep_alive = true, get_data = 0} ->
- if Acc == [] -> send([?IAC,?NOP], Sock);
+ if Acc == [] -> send([?IAC,?NOP], Sock,
+ State#state.conn_name);
true -> ok
end,
State#state{get_data=10};
_ ->
State
end,
- NewAcc =
+ {NewAcc,Pos} =
case erlang:is_process_alive(Pid) of
- true ->
+ true when Acc /= [] ->
Data = lists:reverse(lists:append(Acc)),
- dbg("get_data_delayed ~p\n",[Data]),
+ Len = length(Data),
+ dbg("get_data_delayed ~tp\n",[Data]),
+ ct_telnet:log(State#state.conn_name,
+ general_io, "~ts",
+ [lists:sublist(Data,
+ State#state.log_pos,
+ Len)]),
Pid ! {data,Data},
- [];
+ {[],1};
+ true when Acc == [] ->
+ dbg("get_data_delayed nodata\n",[]),
+ Pid ! {data,[]},
+ {[],1};
false ->
- Acc
+ {Acc,NewState#state.log_pos}
end,
- loop(NewState, Sock, NewAcc);
- close ->
+ loop(NewState#state{log_pos=Pos}, Sock, NewAcc);
+ {close,Pid} ->
dbg("Closing connection\n", []),
+ if Acc == [] ->
+ ok;
+ true ->
+ Data = lists:reverse(lists:append(Acc)),
+ dbg("Printing queued messages: ~tp",[Data]),
+ ct_telnet:log(State#state.conn_name,
+ general_io, "~ts",
+ [lists:sublist(Data,
+ State#state.log_pos,
+ length(Data))])
+ end,
gen_tcp:close(Sock),
- ok
+ Pid ! closed
after wait(State#state.keep_alive,?IDLE_TIMEOUT) ->
- if
- Acc == [] -> send([?IAC,?NOP], Sock);
- true -> ok
- end,
- loop(State, Sock, Acc)
+ Data = lists:reverse(lists:append(Acc)),
+ case Data of
+ [] ->
+ send([?IAC,?NOP], Sock, State#state.conn_name),
+ loop(State, Sock, Acc);
+ _ when State#state.log_pos == length(Data)+1 ->
+ loop(State, Sock, Acc);
+ _ ->
+ dbg("Idle timeout, printing ~tp\n",[Data]),
+ Len = length(Data),
+ ct_telnet:log(State#state.conn_name,
+ general_io, "~ts",
+ [lists:sublist(Data,
+ State#state.log_pos,
+ Len)]),
+ loop(State#state{log_pos = Len+1}, Sock, Acc)
+ end
end.
wait(true, Time) -> Time;
wait(false, _) -> infinity.
-send(Data, Sock) ->
+send(Data, Sock, ConnName) ->
case Data of
[?IAC|_] = Cmd ->
cmd_dbg(Cmd);
_ ->
- dbg("Sending: ~p\n", [Data])
+ dbg("Sending: ~tp\n", [Data]),
+ try io_lib:format("[~w] ~ts", [?MODULE,Data]) of
+ Str ->
+ ct_telnet:log(ConnName, general_io, Str, [])
+ catch
+ _:_ -> ok
+ end
end,
gen_tcp:send(Sock, Data),
ok.
diff --git a/lib/common_test/src/ct_testspec.erl b/lib/common_test/src/ct_testspec.erl
index c07ea323e6..10a9bdac67 100644
--- a/lib/common_test/src/ct_testspec.erl
+++ b/lib/common_test/src/ct_testspec.erl
@@ -1120,8 +1120,9 @@ should_be_added(Tag,Node,_Data,Spec) ->
%% list terms *without* possible duplicates here
Tag == logdir; Tag == logopts;
Tag == basic_html; Tag == label;
- Tag == auto_compile; Tag == stylesheet;
- Tag == verbosity; Tag == silent_connections ->
+ Tag == auto_compile; Tag == abort_if_missing_suites;
+ Tag == stylesheet; Tag == verbosity;
+ Tag == silent_connections ->
lists:keymember(ref2node(Node,Spec#testspec.nodes),1,
read_field(Spec,Tag)) == false;
%% for terms *with* possible duplicates
@@ -1496,6 +1497,8 @@ valid_terms() ->
{include,3},
{auto_compile,2},
{auto_compile,3},
+ {abort_if_missing_suites,2},
+ {abort_if_missing_suites,3},
{stylesheet,2},
{stylesheet,3},
{suites,3},
diff --git a/lib/common_test/src/ct_util.erl b/lib/common_test/src/ct_util.erl
index f5eb3a72f0..56027586d1 100644
--- a/lib/common_test/src/ct_util.erl
+++ b/lib/common_test/src/ct_util.erl
@@ -37,7 +37,7 @@
save_suite_data_async/3, save_suite_data_async/2,
read_suite_data/1,
delete_suite_data/0, delete_suite_data/1, match_delete_suite_data/1,
- delete_testdata/0, delete_testdata/1,
+ delete_testdata/0, delete_testdata/1, match_delete_testdata/1,
set_testdata/1, get_testdata/1, get_testdata/2,
set_testdata_async/1, update_testdata/2, update_testdata/3,
set_verbosity/1, get_verbosity/1]).
@@ -270,6 +270,9 @@ delete_testdata() ->
delete_testdata(Key) ->
call({delete_testdata, Key}).
+match_delete_testdata(KeyPat) ->
+ call({match_delete_testdata, KeyPat}).
+
update_testdata(Key, Fun) ->
update_testdata(Key, Fun, []).
@@ -361,7 +364,25 @@ loop(Mode,TestData,StartDir) ->
{{delete_testdata,Key},From} ->
TestData1 = lists:keydelete(Key,1,TestData),
return(From,ok),
- loop(From,TestData1,StartDir);
+ loop(From,TestData1,StartDir);
+ {{match_delete_testdata,{Key1,Key2}},From} ->
+ %% handles keys with 2 elements
+ TestData1 =
+ lists:filter(fun({Key,_}) when not is_tuple(Key) ->
+ true;
+ ({Key,_}) when tuple_size(Key) =/= 2 ->
+ true;
+ ({{_,KeyB},_}) when Key1 == '_' ->
+ KeyB =/= Key2;
+ ({{KeyA,_},_}) when Key2 == '_' ->
+ KeyA =/= Key1;
+ (_) when Key1 == '_' ; Key2 == '_' ->
+ false;
+ (_) ->
+ true
+ end, TestData),
+ return(From,ok),
+ loop(From,TestData1,StartDir);
{{set_testdata,New = {Key,_Val}},From} ->
TestData1 = lists:keydelete(Key,1,TestData),
return(From,ok),
diff --git a/lib/common_test/src/ct_util.hrl b/lib/common_test/src/ct_util.hrl
index a82d58cc42..845bb55486 100644
--- a/lib/common_test/src/ct_util.hrl
+++ b/lib/common_test/src/ct_util.hrl
@@ -48,6 +48,7 @@
release_shell=false,
include=[],
auto_compile=[],
+ abort_if_missing_suites=[],
stylesheet=[],
multiply_timetraps=[],
scale_timetraps=[],
diff --git a/lib/common_test/src/cth_conn_log.erl b/lib/common_test/src/cth_conn_log.erl
index a731c8054c..1e60f2751e 100644
--- a/lib/common_test/src/cth_conn_log.erl
+++ b/lib/common_test/src/cth_conn_log.erl
@@ -91,16 +91,18 @@ merge_log_info([{Mod,ConfOpts}|ConfList],HookList) ->
{value,{_,HookOpts},HL1} ->
{ConfOpts ++ HookOpts, HL1} % ConfOpts overwrites HookOpts!
end,
- [{Mod,get_log_opts(Opts)} | merge_log_info(ConfList,HookList1)];
+ [{Mod,get_log_opts(Mod,Opts)} | merge_log_info(ConfList,HookList1)];
merge_log_info([],HookList) ->
- [{Mod,get_log_opts(Opts)} || {Mod,Opts} <- HookList].
+ [{Mod,get_log_opts(Mod,Opts)} || {Mod,Opts} <- HookList].
-get_log_opts(Opts) ->
- LogType = proplists:get_value(log_type,Opts,html),
+get_log_opts(Mod,Opts) ->
+ DefaultLogType = if Mod == ct_telnet -> raw;
+ true -> html
+ end,
+ LogType = proplists:get_value(log_type,Opts,DefaultLogType),
Hosts = proplists:get_value(hosts,Opts,[]),
{LogType,Hosts}.
-
pre_init_per_testcase(TestCase,Config,CthState) ->
Logs =
lists:map(
diff --git a/lib/common_test/src/cth_surefire.erl b/lib/common_test/src/cth_surefire.erl
index 7ed2018bdf..bb12171ea7 100644
--- a/lib/common_test/src/cth_surefire.erl
+++ b/lib/common_test/src/cth_surefire.erl
@@ -79,6 +79,10 @@ init(Path, Opts) ->
url_base = proplists:get_value(url_base,Opts),
timer = now() }.
+pre_init_per_suite(Suite,SkipOrFail,State) when is_tuple(SkipOrFail) ->
+ {SkipOrFail, init_tc(State#state{curr_suite = Suite,
+ curr_suite_ts = now()},
+ SkipOrFail) };
pre_init_per_suite(Suite,Config,#state{ test_cases = [] } = State) ->
TcLog = proplists:get_value(tc_logfile,Config),
CurrLogDir = filename:dirname(TcLog),
diff --git a/lib/common_test/src/unix_telnet.erl b/lib/common_test/src/unix_telnet.erl
index e049c3bf39..10666b979d 100644
--- a/lib/common_test/src/unix_telnet.erl
+++ b/lib/common_test/src/unix_telnet.erl
@@ -17,8 +17,8 @@
%% %CopyrightEnd%
%%
-%%% @doc Callback module for ct_telnet for talking telnet
-%%% to a unix host.
+%%% @doc Callback module for ct_telnet, for connecting to a telnet
+%%% server on a unix host.
%%%
%%% <p>It requires the following entry in the config file:</p>
%%% <pre>
@@ -28,15 +28,15 @@
%%% {password,Password},
%%% {keep_alive,Bool}]}. % optional</pre>
%%%
-%%% <p>To talk telnet to the host specified by
+%%% <p>To communicate via telnet to the host specified by
%%% <code>HostNameOrIpAddress</code>, use the interface functions in
-%%% <code>ct</code>, e.g. <code>open(Name), cmd(Name,Cmd), ...</code>.</p>
+%%% <code>ct_telnet</code>, e.g. <code>open(Name), cmd(Name,Cmd), ...</code>.</p>
%%%
%%% <p><code>Name</code> is the name you allocated to the unix host in
%%% your <code>require</code> statement. E.g.</p>
-%%% <pre> suite() -> [{require,Name,{unix,[telnet,username,password]}}].</pre>
+%%% <pre> suite() -> [{require,Name,{unix,[telnet]}}].</pre>
%%% <p>or</p>
-%%% <pre> ct:require(Name,{unix,[telnet,username,password]}).</pre>
+%%% <pre> ct:require(Name,{unix,[telnet]}).</pre>
%%%
%%% <p>The "keep alive" activity (i.e. that Common Test sends NOP to the server
%%% every 10 seconds if the connection is idle) may be enabled or disabled for one
@@ -62,20 +62,18 @@
-define(prx,"login: |Password: |\\\$ |> ").
%%%-----------------------------------------------------------------
-%%% @hidden
%%% @spec get_prompt_regexp() -> PromptRegexp
%%% PromptRegexp = ct_telnet:prompt_regexp()
%%%
%%% @doc Callback for ct_telnet.erl.
%%%
-%%% <p>Return the prompt regexp for telnet connections to the
-%%% interwatch instrument.</p>
+%%% <p>Return a suitable regexp string that will match common
+%%% prompts for users on unix hosts.</p>
get_prompt_regexp() ->
?prx.
%%%-----------------------------------------------------------------
-%%% @hidden
%%% @spec connect(ConnName,Ip,Port,Timeout,KeepAlive,Extra) ->
%%% {ok,Handle} | {error,Reason}
%%% ConnName = ct:target_name()
@@ -83,14 +81,15 @@ get_prompt_regexp() ->
%%% Port = integer()
%%% Timeout = integer()
%%% KeepAlive = bool()
-%%% Extra = {Username,Password}
+%%% Extra = ct:target_name() | {Username,Password}
%%% Username = string()
%%% Password = string()
%%% Handle = ct_telnet:handle()
+%%% Reason = term()
%%%
%%% @doc Callback for ct_telnet.erl.
%%%
-%%% <p>Setup telnet connection to a UNIX host.</p>
+%%% <p>Setup telnet connection to a unix host.</p>
connect(ConnName,Ip,Port,Timeout,KeepAlive,Extra) ->
case Extra of
{Username,Password} ->
@@ -109,7 +108,7 @@ connect(ConnName,Ip,Port,Timeout,KeepAlive,Extra) ->
connect1(Name,Ip,Port,Timeout,KeepAlive,Username,Password) ->
start_gen_log("unix_telnet connect"),
Result =
- case ct_telnet_client:open(Ip,Port,Timeout,KeepAlive) of
+ case ct_telnet_client:open(Ip,Port,Timeout,KeepAlive,Name) of
{ok,Pid} ->
case ct_telnet:silent_teln_expect(Name,Pid,[],
[prompt],?prx,[]) of
@@ -143,13 +142,13 @@ connect1(Name,Ip,Port,Timeout,KeepAlive,Username,Password) ->
{ok,[{prompt,_OtherPrompt1},{prompt,_OtherPrompt2}],_} ->
{ok,Pid};
Error ->
- log(Name,error,
+ log(Name,conn_error,
"Did not get expected prompt from ~p:~p\n~p\n",
[Ip,Port,Error]),
{error,Error}
end;
Error ->
- log(Name,error,
+ log(Name,conn_error,
"Could not open telnet connection to ~p:~p\n~p\n",
[Ip,Port,Error]),
Error
diff --git a/lib/common_test/test/Makefile b/lib/common_test/test/Makefile
index 085f19d023..a0ac47f12a 100644
--- a/lib/common_test/test/Makefile
+++ b/lib/common_test/test/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2008-2013. All Rights Reserved.
+# Copyright Ericsson AB 2008-2014. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -61,6 +61,7 @@ MODULES= \
ct_snmp_SUITE \
ct_group_leader_SUITE \
ct_cover_SUITE \
+ ct_cover_nomerge_SUITE \
ct_groups_search_SUITE \
ct_surefire_SUITE \
ct_telnet_SUITE
diff --git a/lib/common_test/test/ct_config_info_SUITE.erl b/lib/common_test/test/ct_config_info_SUITE.erl
index 8f2f0eb75f..9c242a41df 100644
--- a/lib/common_test/test/ct_config_info_SUITE.erl
+++ b/lib/common_test/test/ct_config_info_SUITE.erl
@@ -125,7 +125,7 @@ test_events(config_info) ->
[{?eh,tc_start,{config_info_1_SUITE,{init_per_group,g1,[]}}},
{?eh,tc_done,{config_info_1_SUITE,{init_per_group,g1,[]},
{failed,{timetrap_timeout,350}}}},
- {?eh,tc_auto_skip,{config_info_1_SUITE,t11,
+ {?eh,tc_auto_skip,{config_info_1_SUITE,{t11,g1},
{failed,{config_info_1_SUITE,init_per_group,{timetrap_timeout,350}}}}},
{?eh,tc_auto_skip,{config_info_1_SUITE,{end_per_group,g1},
{failed,{config_info_1_SUITE,init_per_group,
@@ -142,7 +142,7 @@ test_events(config_info) ->
[{?eh,tc_start,{config_info_1_SUITE,{init_per_group,g4,[]}}},
{?eh,tc_done,{config_info_1_SUITE,{init_per_group,g4,[]},
{failed,{timetrap_timeout,400}}}},
- {?eh,tc_auto_skip,{config_info_1_SUITE,t41,
+ {?eh,tc_auto_skip,{config_info_1_SUITE,{t41,g4},
{failed,{config_info_1_SUITE,init_per_group,
{timetrap_timeout,400}}}}},
{?eh,tc_auto_skip,{config_info_1_SUITE,{end_per_group,g4},
diff --git a/lib/common_test/test/ct_cover_SUITE.erl b/lib/common_test/test/ct_cover_SUITE.erl
index ec2680f664..47080b5577 100644
--- a/lib/common_test/test/ct_cover_SUITE.erl
+++ b/lib/common_test/test/ct_cover_SUITE.erl
@@ -172,8 +172,8 @@ cross(Config) ->
check_calls(Events2,1),
%% Get the log dirs for each test and run cross cover analyse
- [D11,D12] = lists:sort(get_run_dirs(Events1)),
- [D21,D22] = lists:sort(get_run_dirs(Events2)),
+ [D11,D12] = lists:sort(get_log_dirs(Events1)),
+ [D21,D22] = lists:sort(get_log_dirs(Events2)),
ct_cover:cross_cover_analyse(details,[{cross1,D11},{cross2,D21}]),
ct_cover:cross_cover_analyse(details,[{cross1,D12},{cross2,D22}]),
@@ -267,18 +267,17 @@ check_cover(Node) when is_atom(Node) ->
false
end.
-%% Get the log dir "run.<timestamp>" for all (both!) tests
-get_run_dirs(Events) ->
- [filename:dirname(TCLog) ||
+%% Get the log dir "ct_run.<timestamp>" for all (both!) tests
+get_log_dirs(Events) ->
+ [LogDir ||
{ct_test_support_eh,
- {event,tc_logfile,_Node,
- {{?suite,init_per_suite},TCLog}}} <- Events].
+ {event,start_logging,_Node,LogDir}} <- Events].
%% Check that each coverlog includes N calls to ?mod:foo/0
check_calls(Events,N) ->
check_calls(Events,{?mod,foo,0},N).
check_calls(Events,MFA,N) ->
- CoverLogs = [filename:join(D,"all.coverdata") || D <- get_run_dirs(Events)],
+ CoverLogs = [filename:join(D,"all.coverdata") || D <- get_log_dirs(Events)],
do_check_logs(CoverLogs,MFA,N).
do_check_logs([CoverLog|CoverLogs],{Mod,_,_} = MFA,N) ->
diff --git a/lib/common_test/test/ct_cover_nomerge_SUITE.erl b/lib/common_test/test/ct_cover_nomerge_SUITE.erl
new file mode 100644
index 0000000000..8e2ee1b500
--- /dev/null
+++ b/lib/common_test/test/ct_cover_nomerge_SUITE.erl
@@ -0,0 +1,221 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2014. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% File: ct_cover_nomerge_SUITE
+%%%
+%%% Description:
+%%% Test code cover analysis support when merge_tests=false
+%%%
+%%%-------------------------------------------------------------------
+-module(ct_cover_nomerge_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
+-define(eh, ct_test_support_eh).
+-define(mod, cover_test_mod).
+
+%%--------------------------------------------------------------------
+%% TEST SERVER CALLBACK FUNCTIONS
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Description: Since Common Test starts another Test Server
+%% instance, the tests need to be performed on a separate node (or
+%% there will be clashes with logging processes etc).
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ case test_server:is_cover() of
+ true ->
+ {skip,"Test server is running cover already - skipping"};
+ false ->
+ ct_test_support:init_per_suite(Config)
+ end.
+
+end_per_suite(Config) ->
+ ct_test_support:end_per_suite(Config).
+
+init_per_testcase(TestCase, Config) ->
+ ct_test_support:init_per_testcase(TestCase, Config).
+
+end_per_testcase(TestCase, Config) ->
+ try apply(?MODULE,TestCase,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ct_test_support:end_per_testcase(TestCase, Config).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ local,
+ remote,
+ remote_nostop
+ ].
+
+%%--------------------------------------------------------------------
+%% TEST CASES
+%%--------------------------------------------------------------------
+
+local(Config) ->
+ DataDir = ?config(data_dir, Config),
+ Spec = filename:join(DataDir, "local.spec"),
+ CoverSpec = [{incl_mods,[?mod]}],
+ CoverFile = create_cover_file(local,CoverSpec,Config),
+ {Opts,ERPid} = setup([{spec,Spec},{label,local},{cover,CoverFile}], Config),
+ {ok,Events} = execute(local, local, Opts, ERPid, Config),
+ false = check_cover(Config),
+ check_calls(Events,2),
+ ok.
+
+remote(Config) ->
+ DataDir = ?config(data_dir, Config),
+ Spec = filename:join(DataDir, "remote.spec"),
+ %% extending some timers for slow test hosts
+ {ok,Node} = ct_slave:start(ct_nomerge,[{boot_timeout,15},
+ {init_timeout,15},
+ {startup_timeout,15}]),
+
+ CoverSpec = [{nodes,[Node]},
+ {incl_mods,[?mod]}],
+ CoverFile = create_cover_file(remote,CoverSpec,Config),
+ {Opts,ERPid} = setup([{spec,Spec},{label,remote},{cover,CoverFile}], Config),
+ {ok,Events} = execute(remote, remote, Opts, ERPid, Config),
+ false = check_cover(Config),
+ check_calls(Events,2),
+ ok.
+remote(cleanup,_Config) ->
+ {ok,_} = ct_slave:stop(ct_nomerge),
+ ok.
+
+remote_nostop(Config) ->
+ DataDir = ?config(data_dir, Config),
+ Spec = filename:join(DataDir, "remote_nostop.spec"),
+ %% extending some timers for slow test hosts
+ {ok,Node} = ct_slave:start(ct_nomerge,[{boot_timeout,15},
+ {init_timeout,15},
+ {startup_timeout,15}]),
+
+ CoverSpec = [{nodes,[Node]},
+ {incl_mods,[?mod]}],
+ CoverFile = create_cover_file(remote_nostop,CoverSpec,Config),
+ {Opts,ERPid} = setup([{spec,Spec},{label,remote_nostop},
+ {cover,CoverFile},{cover_stop,false}],
+ Config),
+ {ok,Events} = execute(remote_nostop, remote_nostop, Opts, ERPid, Config),
+ {true,[Node],[cover_test_mod]} = check_cover(Config),
+ check_calls(Events,2),
+ ok.
+remote_nostop(cleanup,Config) ->
+ CtNode = ?config(ct_node,Config),
+ ok = rpc:call(CtNode,cover,stop,[]),
+ {ok,_} = ct_slave:stop(ct_nomerge),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% HELP FUNCTIONS
+%%%-----------------------------------------------------------------
+setup(Test, Config) ->
+ Opts0 = ct_test_support:get_opts(Config),
+ Level = ?config(trace_level, Config),
+ EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
+ Opts = Opts0 ++ [{event_handler,{?eh,EvHArgs}}|Test],
+ ERPid = ct_test_support:start_event_receiver(Config),
+ {Opts,ERPid}.
+
+execute(Name, Testcase, Opts, ERPid, Config) ->
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(Name,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+ TestEvents = events_to_check(Testcase),
+ R = ct_test_support:verify_events(TestEvents, Events, Config),
+ {R,Events}.
+
+reformat(Events, EH) ->
+ ct_test_support:reformat(Events, EH).
+
+events_to_check(local) ->
+ events_to_check1(cover_nomerge_local_SUITE);
+events_to_check(remote) ->
+ events_to_check1(cover_nomerge_remote_SUITE);
+events_to_check(remote_nostop) ->
+ events_to_check1(cover_nomerge_remote_nostop_SUITE).
+events_to_check1(Suite) ->
+ OneTest =
+ [{?eh,start_logging,{'DEF','RUNDIR'}}] ++
+ [{?eh,tc_done,{Suite,t1,ok}}] ++
+ [{?eh,tc_done,{Suite,t2,ok}}] ++
+ [{?eh,stop_logging,[]}],
+
+ %% 2 tests (ct:run_test + script_start) is default
+ OneTest ++ OneTest.
+
+check_cover(Config) when is_list(Config) ->
+ CTNode = proplists:get_value(ct_node, Config),
+ check_cover(CTNode);
+check_cover(Node) when is_atom(Node) ->
+ case rpc:call(Node,test_server,is_cover,[]) of
+ true ->
+ {true,
+ rpc:call(Node,cover,which_nodes,[]),
+ rpc:call(Node,cover,modules,[])};
+ false ->
+ false
+ end.
+
+%% Get the log dir "ct_run.<timestamp>" for all (both!) tests
+get_log_dirs(Events) ->
+ [LogDir ||
+ {ct_test_support_eh,
+ {event,start_logging,_Node,LogDir}} <- Events].
+
+%% Check that each coverlog includes N calls to ?mod:foo/0
+check_calls(Events,N) ->
+ check_calls(Events,{?mod,foo,0},N).
+check_calls(Events,MFA,N) ->
+ CoverLogs = [filename:join(D,"all.coverdata") || D <- get_log_dirs(Events)],
+ do_check_logs(CoverLogs,MFA,N).
+
+do_check_logs([CoverLog|CoverLogs],{Mod,_,_} = MFA,N) ->
+ {ok,_} = cover:start(),
+ ok = cover:import(CoverLog),
+ {ok,Calls} = cover:analyse(Mod,calls,function),
+ ok = cover:stop(),
+ {MFA,N} = lists:keyfind(MFA,1,Calls),
+ do_check_logs(CoverLogs,MFA,N);
+do_check_logs([],_,_) ->
+ ok.
+
+create_cover_file(Filename,Terms,Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ File = filename:join(PrivDir,Filename) ++ ".cover",
+ {ok,Fd} = file:open(File,[write]),
+ lists:foreach(fun(Term) ->
+ file:write(Fd,io_lib:format("~p.~n",[Term]))
+ end,Terms),
+ ok = file:close(Fd),
+ File.
diff --git a/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_local_SUITE.erl b/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_local_SUITE.erl
new file mode 100644
index 0000000000..e1fe3b5fc9
--- /dev/null
+++ b/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_local_SUITE.erl
@@ -0,0 +1,63 @@
+%%--------------------------------------------------------------------
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2014. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%----------------------------------------------------------------------
+-module(cover_nomerge_local_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+%% Default timetrap timeout (set in init_per_testcase).
+-define(default_timeout, ?t:minutes(1)).
+
+suite() ->
+ [].
+
+all() ->
+ [t1,t2].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_testcase(_Case, Config) ->
+ Dog = test_server:timetrap(?default_timeout),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(Case, Config) ->
+ Dog=?config(watchdog, Config),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+break(_Config) ->
+ test_server:break(""),
+ ok.
+
+t1(_Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ ok = cover_test_mod:foo(),
+ ok.
+
+t2(_Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ ok = cover_test_mod:foo(),
+ ok.
diff --git a/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_remote_SUITE.erl b/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_remote_SUITE.erl
new file mode 100644
index 0000000000..a77ae0c2db
--- /dev/null
+++ b/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_remote_SUITE.erl
@@ -0,0 +1,75 @@
+%%--------------------------------------------------------------------
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2014. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%----------------------------------------------------------------------
+-module(cover_nomerge_remote_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+%% Default timetrap timeout (set in init_per_testcase).
+-define(default_timeout, ?t:minutes(1)).
+
+suite() ->
+ [].
+
+all() ->
+ [t1,t2].
+
+init_per_suite(Config) ->
+ {ok,Host} = inet:gethostname(),
+ Node = list_to_atom("ct_nomerge@"++Host),
+ pong = net_adm:ping(Node),
+
+%% Include this row, and exclude the equivalent row in end_per_suite =>
+%% fails every now and then with missing data. Why?
+%% ct_cover:remove_nodes([Node]),
+ ct_cover:add_nodes([Node]),
+ [{node,Node}|Config].
+
+end_per_suite(Config) ->
+ Node = ?config(node,Config),
+ ct_cover:remove_nodes([Node]),
+ Config.
+
+init_per_testcase(_Case, Config) ->
+ Dog = test_server:timetrap(?default_timeout),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(Case, Config) ->
+ Dog=?config(watchdog, Config),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+break(_Config) ->
+ test_server:break(""),
+ ok.
+
+t1(Config) ->
+ Node = ?config(node,Config),
+ cover_compiled = rpc:call(Node, code, which, [cover_test_mod]),
+ ok = rpc:call(Node, cover_test_mod, foo, []),
+ ok.
+
+t2(Config) ->
+ Node = ?config(node,Config),
+ cover_compiled = rpc:call(Node, code, which, [cover_test_mod]),
+ ok = rpc:call(Node, cover_test_mod, foo, []),
+ ok.
diff --git a/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_remote_nostop_SUITE.erl b/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_remote_nostop_SUITE.erl
new file mode 100644
index 0000000000..0b3159f2c3
--- /dev/null
+++ b/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_nomerge_remote_nostop_SUITE.erl
@@ -0,0 +1,68 @@
+%%--------------------------------------------------------------------
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2014. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%----------------------------------------------------------------------
+-module(cover_nomerge_remote_nostop_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+%% Default timetrap timeout (set in init_per_testcase).
+-define(default_timeout, ?t:minutes(1)).
+
+suite() ->
+ [].
+
+all() ->
+ [t1,t2].
+
+init_per_suite(Config) ->
+ {ok,Host} = inet:gethostname(),
+ Node = list_to_atom("ct_nomerge@"++Host),
+ pong = net_adm:ping(Node),
+ [{node,Node}|Config].
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_testcase(_Case, Config) ->
+ Dog = test_server:timetrap(?default_timeout),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(Case, Config) ->
+ Dog=?config(watchdog, Config),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+break(_Config) ->
+ test_server:break(""),
+ ok.
+
+t1(Config) ->
+ Node = ?config(node,Config),
+ cover_compiled = rpc:call(Node, code, which, [cover_test_mod]),
+ ok = rpc:call(Node, cover_test_mod, foo, []),
+ ok.
+
+t2(Config) ->
+ Node = ?config(node,Config),
+ cover_compiled = rpc:call(Node, code, which, [cover_test_mod]),
+ ok = rpc:call(Node, cover_test_mod, foo, []),
+ ok.
diff --git a/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_test_mod.erl b/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_test_mod.erl
new file mode 100644
index 0000000000..d4f69452c3
--- /dev/null
+++ b/lib/common_test/test/ct_cover_nomerge_SUITE_data/cover_test_mod.erl
@@ -0,0 +1,4 @@
+-module(cover_test_mod).
+-compile(export_all).
+foo() ->
+ ok.
diff --git a/lib/common_test/test/ct_cover_nomerge_SUITE_data/local.spec b/lib/common_test/test/ct_cover_nomerge_SUITE_data/local.spec
new file mode 100644
index 0000000000..893c48b010
--- /dev/null
+++ b/lib/common_test/test/ct_cover_nomerge_SUITE_data/local.spec
@@ -0,0 +1,6 @@
+{merge_tests,false}.
+
+{alias,dir,"."}.
+
+{cases, dir, cover_nomerge_local_SUITE, [t1]}.
+{cases, dir, cover_nomerge_local_SUITE, [t2]}.
diff --git a/lib/common_test/test/ct_cover_nomerge_SUITE_data/remote.spec b/lib/common_test/test/ct_cover_nomerge_SUITE_data/remote.spec
new file mode 100644
index 0000000000..78c4332270
--- /dev/null
+++ b/lib/common_test/test/ct_cover_nomerge_SUITE_data/remote.spec
@@ -0,0 +1,6 @@
+{merge_tests,false}.
+
+{alias,dir,"."}.
+
+{cases, dir, cover_nomerge_remote_SUITE, [t1]}.
+{cases, dir, cover_nomerge_remote_SUITE, [t2]}.
diff --git a/lib/common_test/test/ct_cover_nomerge_SUITE_data/remote_nostop.spec b/lib/common_test/test/ct_cover_nomerge_SUITE_data/remote_nostop.spec
new file mode 100644
index 0000000000..049f586c72
--- /dev/null
+++ b/lib/common_test/test/ct_cover_nomerge_SUITE_data/remote_nostop.spec
@@ -0,0 +1,6 @@
+{merge_tests,false}.
+
+{alias,dir,"."}.
+
+{cases, dir, cover_nomerge_remote_nostop_SUITE, [t1]}.
+{cases, dir, cover_nomerge_remote_nostop_SUITE, [t2]}.
diff --git a/lib/common_test/test/ct_error_SUITE.erl b/lib/common_test/test/ct_error_SUITE.erl
index 194e7d42ae..ecf231529a 100644
--- a/lib/common_test/test/ct_error_SUITE.erl
+++ b/lib/common_test/test/ct_error_SUITE.erl
@@ -369,8 +369,8 @@ test_events(cfg_error) ->
{'EXIT',init_per_suite_fails}}}}},
{?eh,test_stats,{0,0,{0,1}}},
{?eh,tc_auto_skip,
- {cfg_error_1_SUITE,tc2,{failed,{cfg_error_1_SUITE,init_per_suite,
- {'EXIT',init_per_suite_fails}}}}},
+ {cfg_error_1_SUITE,{tc2,g1},{failed,{cfg_error_1_SUITE,init_per_suite,
+ {'EXIT',init_per_suite_fails}}}}},
{?eh,test_stats,{0,0,{0,2}}},
{?eh,tc_auto_skip,
{cfg_error_1_SUITE,end_per_suite,{failed,{cfg_error_1_SUITE,init_per_suite,
@@ -386,7 +386,7 @@ test_events(cfg_error) ->
{'EXIT',{{badmatch,[1,2]},'_'}}}}}},
{?eh,test_stats,{0,0,{0,3}}},
{?eh,tc_auto_skip,
- {cfg_error_2_SUITE,tc2,
+ {cfg_error_2_SUITE,{tc2,g1},
{failed,{cfg_error_2_SUITE,init_per_suite,
{'EXIT',{{badmatch,[1,2]},'_'}}}}}},
{?eh,test_stats,{0,0,{0,4}}},
@@ -403,7 +403,7 @@ test_events(cfg_error) ->
{failed,{cfg_error_3_SUITE,init_per_suite,{timetrap_timeout,2000}}}}},
{?eh,test_stats,{0,0,{0,5}}},
{?eh,tc_auto_skip,
- {cfg_error_3_SUITE,tc2,
+ {cfg_error_3_SUITE,{tc2,g1},
{failed,{cfg_error_3_SUITE,init_per_suite,{timetrap_timeout,2000}}}}},
{?eh,test_stats,{0,0,{0,6}}},
{?eh,tc_auto_skip,
@@ -417,7 +417,7 @@ test_events(cfg_error) ->
{failed,{cfg_error_4_SUITE,init_per_suite,bad_return}}}},
{?eh,test_stats,{0,0,{0,7}}},
{?eh,tc_auto_skip,
- {cfg_error_4_SUITE,tc2,
+ {cfg_error_4_SUITE,{tc2,g1},
{failed,{cfg_error_4_SUITE,init_per_suite,bad_return}}}},
{?eh,test_stats,{0,0,{0,8}}},
{?eh,tc_auto_skip,
@@ -431,7 +431,7 @@ test_events(cfg_error) ->
{failed,{cfg_error_5_SUITE,init_per_suite,bad_return}}}},
{?eh,test_stats,{0,0,{0,9}}},
{?eh,tc_auto_skip,
- {cfg_error_5_SUITE,tc2,
+ {cfg_error_5_SUITE,{tc2,g1},
{failed,{cfg_error_5_SUITE,init_per_suite,bad_return}}}},
{?eh,test_stats,{0,0,{0,10}}},
{?eh,tc_auto_skip,
@@ -477,7 +477,7 @@ test_events(cfg_error) ->
{cfg_error_8_SUITE,{init_per_group,g1,[]},
{failed,{error,{init_per_group_fails,g1}}}}},
{?eh,tc_auto_skip,
- {cfg_error_8_SUITE,tc1,
+ {cfg_error_8_SUITE,{tc1,g1},
{failed,{cfg_error_8_SUITE,init_per_group,
{'EXIT',{init_per_group_fails,g1}}}}}},
{?eh,test_stats,{4,0,{0,11}}},
@@ -489,7 +489,7 @@ test_events(cfg_error) ->
[{?eh,tc_start,{cfg_error_8_SUITE,{init_per_group,g2,[]}}},
{?eh,tc_done,{cfg_error_8_SUITE,{init_per_group,g2,[]},
{failed,{timetrap_timeout,2000}}}},
- {?eh,tc_auto_skip,{cfg_error_8_SUITE,tc1,
+ {?eh,tc_auto_skip,{cfg_error_8_SUITE,{tc1,g2},
{failed,{cfg_error_8_SUITE,init_per_group,
{timetrap_timeout,2000}}}}},
{?eh,test_stats,{4,0,{0,12}}},
@@ -502,7 +502,7 @@ test_events(cfg_error) ->
{cfg_error_8_SUITE,{init_per_group,g3,[]},
{failed,{error,{{badmatch,42},'_'}}}}},
{?eh,tc_auto_skip,
- {cfg_error_8_SUITE,tc1,
+ {cfg_error_8_SUITE,{tc1,g3},
{failed,{cfg_error_8_SUITE,init_per_group,
{'EXIT',{{badmatch,42},'_'}}}}}},
{?eh,test_stats,{4,0,{0,13}}},
@@ -528,7 +528,7 @@ test_events(cfg_error) ->
{?eh,tc_done,{cfg_error_8_SUITE,{init_per_group,g6,[]},
{failed,{error,{sub_group_failed,g6}}}}},
{?eh,tc_auto_skip,
- {cfg_error_8_SUITE,tc2,
+ {cfg_error_8_SUITE,{tc2,g6},
{failed,{cfg_error_8_SUITE,init_per_group,
{'EXIT',{sub_group_failed,g6}}}}}},
{?eh,test_stats,{6,0,{0,14}}},
@@ -1111,11 +1111,11 @@ test_events(timetrap_fun_group) ->
[{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g4,[]}}},
{?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g4,[]},
{user_timetrap_error,{kaboom,'_'}}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc0,g4},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{0,11,{0,1}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc2,g4},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{0,11,{0,2}}},
@@ -1126,11 +1126,11 @@ test_events(timetrap_fun_group) ->
[{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g5,[]}}},
{?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g5,[]},
{user_timetrap_error,{kaboom,'_'}}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc0,g5},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{0,11,{0,3}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc2,g5},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{0,11,{0,4}}},
@@ -1141,11 +1141,11 @@ test_events(timetrap_fun_group) ->
[{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g6,[]}}},
{?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g6,[]},
{failed,{timetrap_timeout,{'$approx',500}}}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc0,g6},
{failed,{timetrap_8_SUITE,init_per_group,
{timetrap_timeout,'_'}}}}},
{?eh,test_stats,{0,11,{0,5}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc2,g6},
{failed,{timetrap_8_SUITE,init_per_group,
{timetrap_timeout,'_'}}}}},
{?eh,test_stats,{0,11,{0,6}}},
@@ -1294,11 +1294,11 @@ test_events(timetrap_fun_group) ->
[{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg4,[parallel]}}},
{?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg4,[parallel]},
{user_timetrap_error,{kaboom,'_'}}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc0,pg4},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{4,26,{0,7}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc2,pg4},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{4,26,{0,8}}},
@@ -1310,11 +1310,11 @@ test_events(timetrap_fun_group) ->
[{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg5,[parallel]}}},
{?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg5,[parallel]},
{user_timetrap_error,{kaboom,'_'}}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc0,pg5},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{4,26,{0,9}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc2,pg5},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{4,26,{0,10}}},
@@ -1326,11 +1326,11 @@ test_events(timetrap_fun_group) ->
[{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg6,[parallel]}}},
{?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg6,[parallel]},
{failed,{timetrap_timeout,{'$approx',500}}}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc0,pg6},
{failed,{timetrap_8_SUITE,init_per_group,
{timetrap_timeout,'_'}}}}},
{?eh,test_stats,{4,26,{0,11}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc2,pg6},
{failed,{timetrap_8_SUITE,init_per_group,
{timetrap_timeout,'_'}}}}},
{?eh,test_stats,{4,26,{0,12}}},
@@ -1407,10 +1407,10 @@ test_events(timetrap_fun_group) ->
{?eh,tc_done,{timetrap_8_SUITE,tc0,
{user_timetrap_error,{kaboom,'_'}}}},
{?eh,test_stats,{9,31,{0,12}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc1,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc1,sg1},
{failed,{timetrap_8_SUITE,tc0}}}},
{?eh,test_stats,{9,31,{0,13}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc2,sg1},
{failed,{timetrap_8_SUITE,tc0}}}},
{?eh,test_stats,{9,31,{0,14}}},
{?eh,tc_start,{timetrap_8_SUITE,{end_per_group,sg1,[sequence]}}},
@@ -1425,10 +1425,10 @@ test_events(timetrap_fun_group) ->
{?eh,tc_done,{timetrap_8_SUITE,tc0,
{failed,{timetrap_timeout,{'$approx',1000}}}}},
{?eh,test_stats,{10,32,{0,14}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc1,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc1,sg2},
{failed,{timetrap_8_SUITE,tc0}}}},
{?eh,test_stats,{10,32,{0,15}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{tc2,sg2},
{failed,{timetrap_8_SUITE,tc0}}}},
{?eh,test_stats,{10,32,{0,16}}},
{?eh,tc_start,{timetrap_8_SUITE,{end_per_group,sg2,[sequence]}}},
diff --git a/lib/common_test/test/ct_group_info_SUITE.erl b/lib/common_test/test/ct_group_info_SUITE.erl
index e7bc5baaa1..83ac7dbbcf 100644
--- a/lib/common_test/test/ct_group_info_SUITE.erl
+++ b/lib/common_test/test/ct_group_info_SUITE.erl
@@ -273,7 +273,7 @@ test_events(timetrap_all) ->
{init_per_group,g11,[]},
{auto_skipped,{group0_failed,bad_return_value}}}},
{?eh,tc_auto_skip,
- {group_timetrap_1_SUITE,t111,{group0_failed,bad_return_value}}},
+ {group_timetrap_1_SUITE,{t111,g11},{group0_failed,bad_return_value}}},
{?eh,test_stats,{0,13,{0,1}}},
{?eh,tc_auto_skip,{group_timetrap_1_SUITE,
{end_per_group,g11},
@@ -431,7 +431,7 @@ test_events(timetrap_all_no_ips) ->
{?eh,tc_done,{group_timetrap_2_SUITE,
{init_per_group,g11,[]},
{auto_skipped,{group0_failed,bad_return_value}}}},
- {?eh,tc_auto_skip,{group_timetrap_2_SUITE,t111,
+ {?eh,tc_auto_skip,{group_timetrap_2_SUITE,{t111,g11},
{group0_failed,bad_return_value}}},
{?eh,test_stats,{0,13,{0,1}}},
{?eh,tc_auto_skip,{group_timetrap_2_SUITE,
@@ -512,7 +512,7 @@ test_events(timetrap_all_no_ipg) ->
{?eh,tc_done,{ct_framework,
{init_per_group,g11,[{suite,group_timetrap_3_SUITE}]},
{auto_skipped,{group0_failed,bad_return_value}}}},
- {?eh,tc_auto_skip,{group_timetrap_3_SUITE,t111,{group0_failed,bad_return_value}}},
+ {?eh,tc_auto_skip,{group_timetrap_3_SUITE,{t111,g11},{group0_failed,bad_return_value}}},
{?eh,test_stats,{0,13,{0,1}}},
{?eh,tc_auto_skip,{ct_framework,{end_per_group,g11},
{group0_failed,bad_return_value}}}],
@@ -551,7 +551,7 @@ test_events(require) ->
{?eh,tc_done,{group_require_1_SUITE,{init_per_group,g4,[]},
{auto_skipped,{require_failed,
{name_in_use,common2_alias,common2}}}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,t41,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{t41,g4},
{require_failed,
{name_in_use,common2_alias,common2}}}},
{?eh,test_stats,{4,0,{0,1}}},
@@ -580,7 +580,7 @@ test_events(require) ->
{init_per_group,g8,[]},
{auto_skipped,{require_failed,
{not_available,non_existing}}}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,t81,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{t81,g8},
{require_failed,{not_available,non_existing}}}},
{?eh,test_stats,{8,0,{0,2}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,{end_per_group,g8},
@@ -604,7 +604,7 @@ test_events(require) ->
{?eh,tc_done,{group_require_1_SUITE,
{init_per_group,g11,[]},
{auto_skipped,{group0_failed,bad_return_value}}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,t111,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{t111,g11},
{group0_failed,bad_return_value}}},
{?eh,test_stats,{9,0,{0,4}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,
@@ -646,7 +646,7 @@ test_events(require_default) ->
{?eh,tc_done,{group_require_1_SUITE,
{init_per_group,g4,[]},
{auto_skipped,{require_failed,{not_available,common3}}}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,t41,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{t41,g4},
{require_failed,{not_available,common3}}}},
{?eh,test_stats,{4,0,{0,1}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,{end_per_group,g4},
@@ -674,7 +674,7 @@ test_events(require_default) ->
{init_per_group,g8,[]},
{auto_skipped,{require_failed,
{not_available,non_existing}}}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,t81,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{t81,g8},
{require_failed,{not_available,non_existing}}}},
{?eh,test_stats,{8,0,{0,2}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,{end_per_group,g8},
@@ -699,7 +699,7 @@ test_events(require_default) ->
{?eh,tc_done,{group_require_1_SUITE,
{init_per_group,g11,[]},
{auto_skipped,{group0_failed,bad_return_value}}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,t111,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{t111,g11},
{group0_failed,bad_return_value}}},
{?eh,test_stats,{9,0,{0,4}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,
@@ -740,7 +740,7 @@ test_events(require_no_ips) ->
{?eh,tc_done,{group_require_2_SUITE,{init_per_group,g4,[]},
{auto_skipped,{require_failed,
{name_in_use,common2_alias,common2}}}}},
- {?eh,tc_auto_skip,{group_require_2_SUITE,t41,
+ {?eh,tc_auto_skip,{group_require_2_SUITE,{t41,g4},
{require_failed,{name_in_use,common2_alias,common2}}}},
{?eh,test_stats,{4,0,{0,1}}},
{?eh,tc_auto_skip,{group_require_2_SUITE,{end_per_group,g4},
@@ -768,7 +768,7 @@ test_events(require_no_ips) ->
{init_per_group,g8,[]},
{auto_skipped,{require_failed,
{not_available,non_existing}}}}},
- {?eh,tc_auto_skip,{group_require_2_SUITE,t81,
+ {?eh,tc_auto_skip,{group_require_2_SUITE,{t81,g8},
{require_failed,{not_available,non_existing}}}},
{?eh,test_stats,{8,0,{0,2}}},
{?eh,tc_auto_skip,{group_require_2_SUITE,{end_per_group,g8},
@@ -792,7 +792,7 @@ test_events(require_no_ips) ->
{?eh,tc_done,{group_require_2_SUITE,
{init_per_group,g11,[]},
{auto_skipped,{group0_failed,bad_return_value}}}},
- {?eh,tc_auto_skip,{group_require_2_SUITE,t111,
+ {?eh,tc_auto_skip,{group_require_2_SUITE,{t111,g11},
{group0_failed,bad_return_value}}},
{?eh,test_stats,{9,0,{0,4}}},
{?eh,tc_auto_skip,{group_require_2_SUITE,
@@ -831,7 +831,7 @@ test_events(require_no_ipg) ->
[{?eh,tc_start,{ct_framework,{init_per_group,g4,[{suite,group_require_3_SUITE}]}}},
{?eh,tc_done,{ct_framework,{init_per_group,g4,[{suite,group_require_3_SUITE}]},
{auto_skipped,{require_failed,{name_in_use,common2_alias,common2}}}}},
- {?eh,tc_auto_skip,{group_require_3_SUITE,t41,
+ {?eh,tc_auto_skip,{group_require_3_SUITE,{t41,g4},
{require_failed,{name_in_use,common2_alias,common2}}}},
{?eh,test_stats,{4,0,{0,1}}},
{?eh,tc_auto_skip,{ct_framework,{end_per_group,g4},
@@ -857,7 +857,7 @@ test_events(require_no_ipg) ->
[{?eh,tc_start,{ct_framework,{init_per_group,g8,[{suite,group_require_3_SUITE}]}}},
{?eh,tc_done,{ct_framework,{init_per_group,g8,[{suite,group_require_3_SUITE}]},
{auto_skipped,{require_failed,{not_available,non_existing}}}}},
- {?eh,tc_auto_skip,{group_require_3_SUITE,t81,
+ {?eh,tc_auto_skip,{group_require_3_SUITE,{t81,g8},
{require_failed,{not_available,non_existing}}}},
{?eh,test_stats,{8,0,{0,2}}},
{?eh,tc_auto_skip,{ct_framework,{end_per_group,g8},
@@ -879,7 +879,7 @@ test_events(require_no_ipg) ->
[{?eh,tc_start,{ct_framework,{init_per_group,g11,[{suite,group_require_3_SUITE}]}}},
{?eh,tc_done,{ct_framework,{init_per_group,g11,[{suite,group_require_3_SUITE}]},
{auto_skipped,{group0_failed,bad_return_value}}}},
- {?eh,tc_auto_skip,{group_require_3_SUITE,t111,{group0_failed,bad_return_value}}},
+ {?eh,tc_auto_skip,{group_require_3_SUITE,{t111,g11},{group0_failed,bad_return_value}}},
{?eh,test_stats,{9,0,{0,4}}},
{?eh,tc_auto_skip,{ct_framework,{end_per_group,g11},
{group0_failed,bad_return_value}}}],
diff --git a/lib/common_test/test/ct_groups_spec_SUITE.erl b/lib/common_test/test/ct_groups_spec_SUITE.erl
index 5a6d5ac0ac..de4ab77229 100644
--- a/lib/common_test/test/ct_groups_spec_SUITE.erl
+++ b/lib/common_test/test/ct_groups_spec_SUITE.erl
@@ -246,7 +246,8 @@ test_events(override_with_all) ->
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g1,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t11,ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t12,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t13,{failed,{groups_spec_1_SUITE,t12}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t13,g1},
+ {failed,{groups_spec_1_SUITE,t12}}}},
{?eh,test_stats,{3,2,{0,1}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g1,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g1,[sequence]},ok}}],
@@ -327,19 +328,27 @@ test_events(override_with_all) ->
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g3,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t31,ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t32,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t33,{failed,{groups_spec_1_SUITE,t32}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t33,g3},
+ {failed,{groups_spec_1_SUITE,t32}}}},
{?eh,test_stats,{14,9,{0,2}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g3,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g3,[sequence]},ok}}],
{?eh,tc_done,{groups_spec_1_SUITE,t22,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t41,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t51,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t52,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t53,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t54,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t42,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t23,{failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t41,g4},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t51,g5},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t52,g5},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t53,g5},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t54,g5},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t42,g4},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t23,g2},
+ {failed,{groups_spec_1_SUITE,t22}}}},
{?eh,test_stats,{14,10,{0,9}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g2,[sequence]}}},
@@ -355,7 +364,8 @@ test_events(override_with_all) ->
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g3,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t31,ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t32,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t33,{failed,{groups_spec_1_SUITE,t32}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t33,g3},
+ {failed,{groups_spec_1_SUITE,t32}}}},
{?eh,test_stats,{16,11,{0,10}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g3,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g3,[sequence]},ok}}],
@@ -372,8 +382,10 @@ test_events(override_with_all) ->
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g5,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t51,ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t52,{failed,{timetrap_timeout,2000}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t53,{failed,{groups_spec_1_SUITE,t52}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t54,{failed,{groups_spec_1_SUITE,t52}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t53,g5},
+ {failed,{groups_spec_1_SUITE,t52}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t54,g5},
+ {failed,{groups_spec_1_SUITE,t52}}}},
{?eh,test_stats,{18,13,{0,12}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g5,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g5,[sequence]},ok}}],
@@ -417,7 +429,8 @@ test_events(override_with_spec) ->
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g1,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t11,ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t12,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t13,{failed,{groups_spec_1_SUITE,t12}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t13,g1},
+ {failed,{groups_spec_1_SUITE,t12}}}},
{?eh,test_stats,{3,2,{0,1}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g1,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g1,[sequence]},ok}}],
@@ -493,18 +506,26 @@ test_events(override_with_spec) ->
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g3,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t31,ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t32,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t33,{failed,{groups_spec_1_SUITE,t32}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t33,g3},
+ {failed,{groups_spec_1_SUITE,t32}}}},
{?eh,test_stats,{14,9,{0,2}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g3,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g3,[sequence]},ok}}],
{?eh,tc_done,{groups_spec_1_SUITE,t22,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t41,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t51,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t52,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t53,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t54,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t42,{failed,{groups_spec_1_SUITE,t22}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t23,{failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t41,g4},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t51,g5},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t52,g5},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t53,g5},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t54,g5},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t42,g4},
+ {failed,{groups_spec_1_SUITE,t22}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t23,g2},
+ {failed,{groups_spec_1_SUITE,t22}}}},
{?eh,test_stats,{14,10,{0,9}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g2,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g2,[sequence]},ok}}],
@@ -521,7 +542,8 @@ test_events(override_with_spec) ->
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g3,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t31,ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t32,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t33,{failed,{groups_spec_1_SUITE,t32}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t33,g3},
+ {failed,{groups_spec_1_SUITE,t32}}}},
{?eh,test_stats,{16,11,{0,10}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g3,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g3,[sequence]},ok}}],
@@ -535,8 +557,10 @@ test_events(override_with_spec) ->
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g5,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t51,ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t52,{failed,{timetrap_timeout,2000}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t53,{failed,{groups_spec_1_SUITE,t52}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t54,{failed,{groups_spec_1_SUITE,t52}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t53,g5},
+ {failed,{groups_spec_1_SUITE,t52}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t54,g5},
+ {failed,{groups_spec_1_SUITE,t52}}}},
{?eh,test_stats,{18,13,{0,12}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g5,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g5,[sequence]},ok}}],
@@ -555,7 +579,8 @@ test_events(override_with_spec) ->
[{?eh,tc_start,{groups_spec_1_SUITE,{init_per_group,g1,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{init_per_group,g1,[sequence]},ok}},
{?eh,tc_done,{groups_spec_1_SUITE,t12,{failed,{error,crashes}}}},
- {?eh,tc_auto_skip,{groups_spec_1_SUITE,t13,{failed,{groups_spec_1_SUITE,t12}}}},
+ {?eh,tc_auto_skip,{groups_spec_1_SUITE,{t13,g1},
+ {failed,{groups_spec_1_SUITE,t12}}}},
{?eh,test_stats,{19,15,{0,13}}},
{?eh,tc_start,{groups_spec_1_SUITE,{end_per_group,g1,[sequence]}}},
{?eh,tc_done,{groups_spec_1_SUITE,{end_per_group,g1,[sequence]},ok}}],
diff --git a/lib/common_test/test/ct_hooks_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE.erl
index b5855da9df..c8fc4bd59b 100644
--- a/lib/common_test/test/ct_hooks_SUITE.erl
+++ b/lib/common_test/test/ct_hooks_SUITE.erl
@@ -786,7 +786,7 @@ test_events(skip_pre_end_cth) ->
{?eh,cth,{'_',post_end_per_group,[group1,'$proplist','_',[]]}},
{?eh,tc_done,{ct_scope_per_group_cth_SUITE,{end_per_group,group1,[]},
{skipped,"Test skip"}}}],
- {?eh,cth,{'_',on_tc_skip,[end_per_group,
+ {?eh,cth,{'_',on_tc_skip,[{end_per_group,group1},
{tc_user_skip,{skipped,"Test skip"}},
[]]}},
{?eh,tc_start,{ct_scope_per_group_cth_SUITE,end_per_suite}},
diff --git a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl
index 9ee2a90896..6caac7e447 100644
--- a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl
+++ b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/empty_cth.erl
@@ -229,9 +229,9 @@ post_end_per_testcase(TC,Config,Return,State) ->
%% This function should be used for extra cleanup which might be needed.
%% It is not possible to modify the config or the status of the test run.
-spec on_tc_fail(TC :: init_per_suite | end_per_suite |
- init_per_group | end_per_group | atom(),
- Reason :: term(), State :: #state{}) ->
- NewState :: #state{}.
+ init_per_group | end_per_group | atom() |
+ {Function :: atom(), GroupName :: atom()},
+ Reason :: term(), State :: #state{}) -> NewState :: #state{}.
on_tc_fail(TC, Reason, State) ->
gen_event:notify(
?CT_EVMGR_REF, #event{ name = cth, node = node(),
@@ -243,11 +243,11 @@ on_tc_fail(TC, Reason, State) ->
%% or due to an init function failing. Test case can be
%% end_per_suite, init_per_group, end_per_group and the actual test cases.
-spec on_tc_skip(TC :: end_per_suite |
- init_per_group | end_per_group | atom(),
+ init_per_group | end_per_group | atom() |
+ {Function :: atom(), GroupName :: atom()},
{tc_auto_skip, {failed, {Mod :: atom(), Function :: atom(), Reason :: term()}}} |
- {tc_user_skip, {skipped, Reason :: term()}},
- State :: #state{}) ->
- NewState :: #state{}.
+ {tc_user_skip, {skipped, Reason :: term()}},
+ State :: #state{}) -> NewState :: #state{}.
on_tc_skip(TC, Reason, State) ->
gen_event:notify(
?CT_EVMGR_REF, #event{ name = cth, node = node(),
diff --git a/lib/common_test/test/ct_master_SUITE.erl b/lib/common_test/test/ct_master_SUITE.erl
index 7408cbe376..e90513f888 100644
--- a/lib/common_test/test/ct_master_SUITE.erl
+++ b/lib/common_test/test/ct_master_SUITE.erl
@@ -81,7 +81,8 @@ end_per_testcase(TestCase, Config) ->
ct_test_support:end_per_testcase(TestCase, Config).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() -> [{timetrap,{seconds,60}},
+ {ct_hooks,[ts_install_cth]}].
all() ->
[ct_master_test].
diff --git a/lib/common_test/test/ct_repeat_1_SUITE.erl b/lib/common_test/test/ct_repeat_1_SUITE.erl
index 98eaa28763..e37aeb196c 100644
--- a/lib/common_test/test/ct_repeat_1_SUITE.erl
+++ b/lib/common_test/test/ct_repeat_1_SUITE.erl
@@ -225,7 +225,7 @@ test_events(repeat_cs_and_grs) ->
{?eh,test_stats,{3,1,{0,0}}},
[{?eh,tc_done,{repeat_1_SUITE,{init_per_group,gr_fail_init,[]},
{failed,{error,fails_on_purpose}}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}},
{?eh,test_stats,{3,1,{0,1}}},
@@ -247,7 +247,7 @@ test_events(repeat_cs_and_grs) ->
{?eh,test_stats,{7,2,{0,1}}},
[{?eh,tc_done,{repeat_1_SUITE,{init_per_group,gr_fail_init,[]},
{failed,{error,fails_on_purpose}}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}},
{?eh,test_stats,{7,2,{0,2}}},
@@ -269,7 +269,7 @@ test_events(repeat_seq) ->
ok}},
{?eh,test_stats,{1,0,{0,0}}},
{?eh,test_stats,{1,1,{0,0}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_2,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_2,repeat_seq_1},
{failed,{repeat_1_SUITE,tc_fail_1}}}},
{?eh,test_stats,{1,1,{0,1}}},
{?eh,tc_done,{repeat_1_SUITE,
@@ -291,7 +291,7 @@ test_events(repeat_seq) ->
{?eh,tc_done,{repeat_1_SUITE,
{end_per_group,gr_fail_result,[]},
{return_group_result,failed}}}],
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_2,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_2,repeat_seq_2},
{group_result,gr_fail_result,failed}}},
{?eh,test_stats,{4,2,{0,3}}},
{?eh,tc_done,{repeat_1_SUITE,
@@ -315,7 +315,7 @@ test_events(repeat_seq) ->
{failed,
{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_2,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_2,repeat_seq_3},
{group_result,gr_fail_init,failed}}},
{?eh,test_stats,{7,2,{0,6}}},
{?eh,tc_done,{repeat_1_SUITE,
@@ -329,12 +329,13 @@ test_events(repeat_seq) ->
[{?eh,tc_done,{repeat_1_SUITE,
{init_per_group,repeat_seq_4,[sequence,{repeat,2}]},
ok}},
+ {?eh,tc_done,{repeat_1_SUITE,tc_fail_1,'_'}},
{?eh,test_stats,{8,3,{0,8}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,
- tc_ok_1,{failed,{repeat_1_SUITE,tc_fail_1}}}},
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,gr_ok_1},
+ {failed,{repeat_1_SUITE,tc_fail_1}}}},
{?eh,test_stats,{8,3,{0,9}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,
- tc_ok_1,{failed,{repeat_1_SUITE,tc_fail_1}}}},
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,repeat_seq_4},
+ {failed,{repeat_1_SUITE,tc_fail_1}}}},
{?eh,test_stats,{8,3,{0,10}}},
{?eh,tc_done,{repeat_1_SUITE,
{end_per_group,repeat_seq_4,[sequence,{repeat,2}]},
@@ -764,7 +765,7 @@ test_events(repeat_gr_until_any_fail) ->
{init_per_group,gr_ok_then_fail_init,[]},
{failed,{error,failing_this_time}}}},
{?eh,tc_auto_skip,
- {repeat_1_SUITE,tc_ok_1,
+ {repeat_1_SUITE,{tc_ok_1,gr_ok_then_fail_init},
{failed,
{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}},
@@ -963,7 +964,7 @@ test_events(repeat_gr_until_all_ok) ->
[{?eh,tc_done,{repeat_1_SUITE,
{init_per_group,gr_fail_init_then_ok,[]},
{failed,{error,failing_this_time}}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,gr_fail_init_then_ok},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}},
{?eh,test_stats,{7,1,{0,1}}},
@@ -1237,10 +1238,10 @@ test_events(repeat_seq_until_any_fail) ->
{?eh,tc_done,{repeat_1_SUITE,tc_ok_then_fail_1,
{failed,{error,failing_this_time}}}},
{?eh,test_stats,{15,1,{0,0}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_2,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_2,repeat_seq_until_any_fail_3},
{failed,{repeat_1_SUITE,tc_ok_then_fail_1}}}},
{?eh,test_stats,{15,1,{0,1}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,gr_ok_1},
{failed,{repeat_1_SUITE,tc_ok_then_fail_1}}}},
{?eh,test_stats,{15,1,{0,2}}},
{?eh,tc_done,{repeat_1_SUITE,
@@ -1264,10 +1265,10 @@ test_events(repeat_seq_until_any_fail) ->
[{?eh,tc_done,{repeat_1_SUITE,
{end_per_group,gr_ok_then_fail_result,[]},
{return_group_result,failed}}}],
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,gr_ok_1},
{group_result,gr_ok_then_fail_result,failed}}},
{?eh,test_stats,{19,1,{0,3}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,repeat_seq_until_any_fail_4},
{group_result,gr_ok_then_fail_result,failed}}},
{?eh,test_stats,{19,1,{0,4}}},
{?eh,tc_done,{repeat_1_SUITE,
@@ -1296,10 +1297,10 @@ test_events(repeat_seq_until_any_fail) ->
{?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_ok_then_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}}],
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,gr_ok_2},
{group_result,gr_ok_then_fail_init,failed}}},
{?eh,test_stats,{24,1,{0,6}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{tc_ok_1,repeat_seq_until_any_fail_5},
{group_result,gr_ok_then_fail_init,failed}}},
{?eh,test_stats,{24,1,{0,7}}},
{?eh,tc_done,{repeat_1_SUITE,
diff --git a/lib/common_test/test/ct_repeat_testrun_SUITE.erl b/lib/common_test/test/ct_repeat_testrun_SUITE.erl
index bb2aba2c5a..b6f285322d 100644
--- a/lib/common_test/test/ct_repeat_testrun_SUITE.erl
+++ b/lib/common_test/test/ct_repeat_testrun_SUITE.erl
@@ -343,9 +343,9 @@ skip_first_tc1(Suite) ->
{?eh,tc_done,{Suite,tc2,?skipped}},
{?eh,test_stats,{'_',0,{0,1}}},
{?eh,tc_done,{Suite,{init_per_group,g,[]},?skipped}},
- {?eh,tc_auto_skip,{Suite,tc1,?skip_reason}},
+ {?eh,tc_auto_skip,{Suite,{tc1,g},?skip_reason}},
{?eh,test_stats,{'_',0,{0,2}}},
- {?eh,tc_auto_skip,{Suite,tc2,?skip_reason}},
+ {?eh,tc_auto_skip,{Suite,{tc2,g},?skip_reason}},
{?eh,test_stats,{'_',0,{0,3}}},
{?eh,tc_auto_skip,{Suite,{end_per_group,g},?skip_reason}},
{?eh,tc_done,{Suite,tc2,?skipped}},
diff --git a/lib/common_test/test/ct_sequence_1_SUITE.erl b/lib/common_test/test/ct_sequence_1_SUITE.erl
index 8c87236838..5a775a1117 100644
--- a/lib/common_test/test/ct_sequence_1_SUITE.erl
+++ b/lib/common_test/test/ct_sequence_1_SUITE.erl
@@ -185,7 +185,8 @@ test_events(subgroup_return_fail) ->
{?eh,tc_done,{subgroups_1_SUITE,{end_per_group,return_fail,[]},
{return_group_result,failed}}}],
{?eh,tc_auto_skip,
- {subgroups_1_SUITE,ok_tc,{group_result,return_fail,failed}}},
+ {subgroups_1_SUITE,{ok_tc,ok_group},
+ {group_result,return_fail,failed}}},
{?eh,test_stats,{0,1,{0,1}}},
{?eh,tc_start,
{subgroups_1_SUITE,{end_per_group,subgroup_return_fail,[sequence]}}},
@@ -208,14 +209,15 @@ test_events(subgroup_init_fail) ->
[{?eh,tc_start,{subgroups_1_SUITE,{init_per_group,fail_init,[]}}},
{?eh,tc_done,{subgroups_1_SUITE,{init_per_group,fail_init,[]},
{failed,{error,init_per_group_fails_on_purpose}}}},
- {?eh,tc_auto_skip,{subgroups_1_SUITE,ok_tc,
+ {?eh,tc_auto_skip,{subgroups_1_SUITE,{ok_tc,fail_init},
{failed,{subgroups_1_SUITE,init_per_group,
{'EXIT',init_per_group_fails_on_purpose}}}}},
{?eh,test_stats,{0,0,{0,1}}},
{?eh,tc_auto_skip,{subgroups_1_SUITE,{end_per_group,fail_init},
{failed,{subgroups_1_SUITE,init_per_group,
{'EXIT',init_per_group_fails_on_purpose}}}}}],
- {?eh,tc_auto_skip,{subgroups_1_SUITE,ok_tc,{group_result,fail_init,failed}}},
+ {?eh,tc_auto_skip,{subgroups_1_SUITE,{ok_tc,ok_group},
+ {group_result,fail_init,failed}}},
{?eh,test_stats,{0,0,{0,2}}},
{?eh,tc_start,{subgroups_1_SUITE,{end_per_group,subgroup_init_fail,[sequence]}}},
{?eh,tc_done,{subgroups_1_SUITE,
@@ -237,7 +239,8 @@ test_events(subgroup_after_failed_case) ->
{?eh,tc_start,{subgroups_1_SUITE,failing_tc}},
{?eh,tc_done,{subgroups_1_SUITE,failing_tc,{failed,{error,{{badmatch,3},'_'}}}}},
{?eh,test_stats,{0,1,{0,0}}},
- {?eh,tc_auto_skip,{subgroups_1_SUITE,ok_tc,{failed,{subgroups_1_SUITE,failing_tc}}}},
+ {?eh,tc_auto_skip,{subgroups_1_SUITE,{ok_tc,ok_group},
+ {failed,{subgroups_1_SUITE,failing_tc}}}},
{?eh,test_stats,{0,1,{0,1}}},
{?eh,tc_start,{subgroups_1_SUITE,
{end_per_group,subgroup_after_failed_case,[sequence]}}},
@@ -265,7 +268,8 @@ test_events(case_after_subgroup_return_fail) ->
{?eh,tc_start,{subgroups_1_SUITE,{end_per_group,return_fail,[]}}},
{?eh,tc_done,{subgroups_1_SUITE,{end_per_group,return_fail,[]},
{return_group_result,failed}}}],
- {?eh,tc_auto_skip,{subgroups_1_SUITE,ok_tc,{group_result,return_fail,failed}}},
+ {?eh,tc_auto_skip,{subgroups_1_SUITE,{ok_tc,case_after_subgroup_return_fail},
+ {group_result,return_fail,failed}}},
{?eh,test_stats,{0,1,{0,1}}},
{?eh,tc_start,{subgroups_1_SUITE,
{end_per_group,case_after_subgroup_return_fail,[sequence]}}},
@@ -289,7 +293,7 @@ test_events(case_after_subgroup_fail_init) ->
{?eh,tc_done,{subgroups_1_SUITE,
{init_per_group,fail_init,[]},
{failed,{error,init_per_group_fails_on_purpose}}}},
- {?eh,tc_auto_skip,{subgroups_1_SUITE,ok_tc,
+ {?eh,tc_auto_skip,{subgroups_1_SUITE,{ok_tc,fail_init},
{failed,
{subgroups_1_SUITE,init_per_group,
{'EXIT',init_per_group_fails_on_purpose}}}}},
@@ -300,7 +304,8 @@ test_events(case_after_subgroup_fail_init) ->
{'EXIT',init_per_group_fails_on_purpose}}}}}],
{?eh,tc_auto_skip,
- {subgroups_1_SUITE,ok_tc,{group_result,fail_init,failed}}},
+ {subgroups_1_SUITE,{ok_tc,case_after_subgroup_fail_init},
+ {group_result,fail_init,failed}}},
{?eh,test_stats,{0,0,{0,2}}},
{?eh,tc_start,{subgroups_1_SUITE,
{end_per_group,case_after_subgroup_fail_init,[sequence]}}},
diff --git a/lib/common_test/test/ct_skip_SUITE.erl b/lib/common_test/test/ct_skip_SUITE.erl
index b0a6c839a2..6fb803b928 100644
--- a/lib/common_test/test/ct_skip_SUITE.erl
+++ b/lib/common_test/test/ct_skip_SUITE.erl
@@ -153,10 +153,10 @@ testspec_skip(Config) when is_list(Config) ->
{skip_groups, TestDir, user_skip_6_SUITE, psub1, "SKIPPED"}],
{Opts,ERPid} = setup_testspec([{ts1,TestSpec1},
- {ts2,TestSpec2},
- {ts3,TestSpec3},
- {ts4,TestSpec4},
- {ts5,TestSpec5}], Config),
+ {ts2,TestSpec2},
+ {ts3,TestSpec3},
+ {ts4,TestSpec4},
+ {ts5,TestSpec5}], Config),
ok = ct_test_support:run(Opts, Config),
@@ -234,8 +234,8 @@ test_events(auto_skip) ->
{?eh,tc_done,
{auto_skip_2_SUITE,init_per_suite,{failed,{error,init_per_suite_failed}}}},
{?eh,tc_auto_skip,
- {auto_skip_2_SUITE,tc1,{failed,{auto_skip_2_SUITE,init_per_suite,
- {'EXIT',init_per_suite_failed}}}}},
+ {auto_skip_2_SUITE,{tc1,g1},{failed,{auto_skip_2_SUITE,init_per_suite,
+ {'EXIT',init_per_suite_failed}}}}},
{?eh,test_stats,{0,0,{0,3}}},
{?eh,tc_auto_skip,
{auto_skip_2_SUITE,end_per_suite,{failed,{auto_skip_2_SUITE,init_per_suite,
@@ -274,12 +274,12 @@ test_events(auto_skip) ->
{?eh,tc_done,
{auto_skip_5_SUITE,{init_per_group,g1,[]},{failed,{error,{group,g1,failed}}}}},
{?eh,tc_auto_skip,
- {auto_skip_5_SUITE,tc1,{failed,{auto_skip_5_SUITE,init_per_group,
- {'EXIT',{group,g1,failed}}}}}},
+ {auto_skip_5_SUITE,{tc1,g1},{failed,{auto_skip_5_SUITE,init_per_group,
+ {'EXIT',{group,g1,failed}}}}}},
{?eh,test_stats,{2,0,{0,6}}},
{?eh,tc_auto_skip,
- {auto_skip_5_SUITE,tc2,{failed,{auto_skip_5_SUITE,init_per_group,
- {'EXIT',{group,g1,failed}}}}}},
+ {auto_skip_5_SUITE,{tc2,g1},{failed,{auto_skip_5_SUITE,init_per_group,
+ {'EXIT',{group,g1,failed}}}}}},
{?eh,test_stats,{2,0,{0,7}}},
{?eh,tc_auto_skip,
{auto_skip_5_SUITE,{end_per_group,g1},
@@ -295,20 +295,20 @@ test_events(auto_skip) ->
{?eh,tc_done,
{auto_skip_6_SUITE,{init_per_group,g1,[]},{failed,{error,{group,g1,failed}}}}},
{?eh,tc_auto_skip,
- {auto_skip_6_SUITE,tc1,{failed,{auto_skip_6_SUITE,init_per_group,
- {'EXIT',{group,g1,failed}}}}}},
+ {auto_skip_6_SUITE,{tc1,g1},{failed,{auto_skip_6_SUITE,init_per_group,
+ {'EXIT',{group,g1,failed}}}}}},
{?eh,test_stats,{2,0,{0,8}}},
{?eh,tc_auto_skip,
- {auto_skip_6_SUITE,tc3,{failed,{auto_skip_6_SUITE,init_per_group,
- {'EXIT',{group,g1,failed}}}}}},
+ {auto_skip_6_SUITE,{tc3,g2},{failed,{auto_skip_6_SUITE,init_per_group,
+ {'EXIT',{group,g1,failed}}}}}},
{?eh,test_stats,{2,0,{0,9}}},
{?eh,tc_auto_skip,
- {auto_skip_6_SUITE,tc4,{failed,{auto_skip_6_SUITE,init_per_group,
- {'EXIT',{group,g1,failed}}}}}},
+ {auto_skip_6_SUITE,{tc4,g2},{failed,{auto_skip_6_SUITE,init_per_group,
+ {'EXIT',{group,g1,failed}}}}}},
{?eh,test_stats,{2,0,{0,10}}},
{?eh,tc_auto_skip,
- {auto_skip_6_SUITE,tc2,{failed,{auto_skip_6_SUITE,init_per_group,
- {'EXIT',{group,g1,failed}}}}}},
+ {auto_skip_6_SUITE,{tc2,g1},{failed,{auto_skip_6_SUITE,init_per_group,
+ {'EXIT',{group,g1,failed}}}}}},
{?eh,test_stats,{2,0,{0,11}}},
{?eh,tc_auto_skip,
{auto_skip_6_SUITE,{end_per_group,g1},
@@ -324,12 +324,12 @@ test_events(auto_skip) ->
{?eh,tc_done,{auto_skip_6_SUITE,{init_per_group,g4,[]},
{failed,{error,{group,g4,failed}}}}},
{?eh,tc_auto_skip,
- {auto_skip_6_SUITE,tc3,{failed,{auto_skip_6_SUITE,init_per_group,
- {'EXIT',{group,g4,failed}}}}}},
+ {auto_skip_6_SUITE,{tc3,g4},{failed,{auto_skip_6_SUITE,init_per_group,
+ {'EXIT',{group,g4,failed}}}}}},
{?eh,test_stats,{3,0,{0,12}}},
{?eh,tc_auto_skip,
- {auto_skip_6_SUITE,tc4,{failed,{auto_skip_6_SUITE,init_per_group,
- {'EXIT',{group,g4,failed}}}}}},
+ {auto_skip_6_SUITE,{tc4,g4},{failed,{auto_skip_6_SUITE,init_per_group,
+ {'EXIT',{group,g4,failed}}}}}},
{?eh,test_stats,{3,0,{0,13}}},
{?eh,tc_auto_skip,
{auto_skip_6_SUITE,{end_per_group,g4},
@@ -498,13 +498,13 @@ test_events(auto_skip) ->
[{suite,auto_skip_12_SUITE}]},
{auto_skipped,
{require_failed,{not_available,unknown_variable_g1}}}}},
- {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc1,
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,{tc1,g1},
{require_failed,{not_available,unknown_variable_g1}}}},
{?eh,test_stats,{10,0,{0,25}}},
- {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc2,
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,{tc2,g1},
{require_failed,{not_available,unknown_variable_g1}}}},
{?eh,test_stats,{10,0,{0,26}}},
- {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc3,
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,{tc3,g2},
{require_failed,{not_available,unknown_variable_g1}}}},
{?eh,test_stats,{10,0,{0,27}}},
{?eh,tc_auto_skip,{ct_framework,{end_per_group,g1},
@@ -516,13 +516,13 @@ test_events(auto_skip) ->
[{suite,auto_skip_12_SUITE}]},
{auto_skipped,
{require_failed,{not_available,unknown_variable_g1}}}}},
- {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc1,
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,{tc1,g1},
{require_failed,{not_available,unknown_variable_g1}}}},
{?eh,test_stats,{10,0,{0,28}}},
- {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc2,
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,{tc2,g1},
{require_failed,{not_available,unknown_variable_g1}}}},
{?eh,test_stats,{10,0,{0,29}}},
- {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc3,
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,{tc3,g2},
{require_failed,{not_available,unknown_variable_g1}}}},
{?eh,test_stats,{10,0,{0,30}}},
{?eh,tc_auto_skip,{ct_framework,{end_per_group,g1},
@@ -544,7 +544,7 @@ test_events(auto_skip) ->
[{suite,auto_skip_12_SUITE}]},
{auto_skipped,
{require_failed,{not_available,unknown_variable_g4}}}}},
- {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc3,
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,{tc3,g4},
{require_failed,{not_available,unknown_variable_g4}}}},
{?eh,test_stats,{12,0,{0,31}}},
{?eh,tc_auto_skip,{ct_framework,{end_per_group,g4},
@@ -574,10 +574,10 @@ test_events(user_skip) ->
{user_skip_1_SUITE,tc1,"Whole suite skipped"}},
{?eh,test_stats,{0,0,{1,0}}},
{?eh,tc_user_skip,
- {user_skip_1_SUITE,tc2,"Whole suite skipped"}},
+ {user_skip_1_SUITE,{tc2,g1},"Whole suite skipped"}},
{?eh,test_stats,{0,0,{2,0}}},
{?eh,tc_user_skip,
- {user_skip_1_SUITE,tc3,"Whole suite skipped"}},
+ {user_skip_1_SUITE,{tc3,g1},"Whole suite skipped"}},
{?eh,test_stats,{0,0,{3,0}}},
{?eh,tc_user_skip,
{user_skip_1_SUITE,tc4,"Whole suite skipped"}},
@@ -638,9 +638,9 @@ test_events(user_skip) ->
[{?eh,tc_start,{user_skip_4_SUITE,{init_per_group,g1,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{init_per_group,g1,[]},{skipped,"Group skipped"}}},
- {?eh,tc_user_skip,{user_skip_4_SUITE,tc1,"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{tc1,g1},"Group skipped"}},
{?eh,test_stats,{3,0,{10,0}}},
- {?eh,tc_user_skip,{user_skip_4_SUITE,tc2,"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{tc2,g1},"Group skipped"}},
{?eh,test_stats,{3,0,{11,0}}},
{?eh,tc_user_skip,{user_skip_4_SUITE,{end_per_group,g1},"Group skipped"}}],
@@ -657,10 +657,10 @@ test_events(user_skip) ->
[{?eh,tc_start,{user_skip_4_SUITE,{init_per_group,g3,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{init_per_group,g3,[]},{skipped,"Group skipped"}}},
- {?eh,tc_user_skip,{user_skip_4_SUITE,tc5,"Group skipped"}},
- {?eh,tc_user_skip,{user_skip_4_SUITE,tc6,"Group skipped"}},
- {?eh,tc_user_skip,{user_skip_4_SUITE,tc7,"Group skipped"}},
- {?eh,tc_user_skip,{user_skip_4_SUITE,tc8,"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{tc5,g3},"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{tc6,g4},"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{tc7,g4},"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{tc8,g3},"Group skipped"}},
{?eh,test_stats,{5,0,{15,0}}},
{?eh,tc_user_skip,{user_skip_4_SUITE,{end_per_group,g3},"Group skipped"}}],
@@ -671,9 +671,9 @@ test_events(user_skip) ->
{?eh,test_stats,{6,0,{15,0}}},
[{?eh,tc_start,{user_skip_4_SUITE,{init_per_group,g6,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{init_per_group,g6,[]},{skipped,"Group skipped"}}},
- {?eh,tc_user_skip,{user_skip_4_SUITE,tc10,"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{tc10,g6},"Group skipped"}},
{?eh,test_stats,{6,0,{16,0}}},
- {?eh,tc_user_skip,{user_skip_4_SUITE,tc11,"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{tc11,g6},"Group skipped"}},
{?eh,test_stats,{6,0,{17,0}}},
{?eh,tc_user_skip,{user_skip_4_SUITE,{end_per_group,g6},"Group skipped"}}],
{?eh,tc_start,{user_skip_4_SUITE,{end_per_group,g5,[]}}},
@@ -687,9 +687,9 @@ test_events(user_skip) ->
{skipped,{bad,'Whole suite skipped'}}}},
{?eh,tc_user_skip,{user_skip_5_SUITE,tc1,{bad,'Whole suite skipped'}}},
{?eh,test_stats,{6,0,{18,0}}},
- {?eh,tc_user_skip,{user_skip_5_SUITE,tc2,{bad,'Whole suite skipped'}}},
+ {?eh,tc_user_skip,{user_skip_5_SUITE,{tc2,g1},{bad,'Whole suite skipped'}}},
{?eh,test_stats,{6,0,{19,0}}},
- {?eh,tc_user_skip,{user_skip_5_SUITE,tc3,{bad,'Whole suite skipped'}}},
+ {?eh,tc_user_skip,{user_skip_5_SUITE,{tc3,g1},{bad,'Whole suite skipped'}}},
{?eh,test_stats,{6,0,{20,0}}},
{?eh,tc_user_skip,{user_skip_5_SUITE,tc4,{bad,'Whole suite skipped'}}},
{?eh,test_stats,{6,0,{21,0}}},
@@ -700,10 +700,10 @@ test_events(user_skip) ->
{?eh,tc_done,{user_skip_6_SUITE,
{init_per_group,ptop1,[parallel]},
{skipped,"Top group skipped"}}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc1,"Top group skipped"}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc3,"Top group skipped"}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc4,"Top group skipped"}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc2,"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc1,ptop1},"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc3,psub1},"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc4,psub1},"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc2,ptop1},"Top group skipped"}},
{?eh,tc_user_skip,{user_skip_6_SUITE,{end_per_group,ptop1},
"Top group skipped"}}]},
@@ -718,8 +718,8 @@ test_events(user_skip) ->
{?eh,tc_done,{user_skip_6_SUITE,
{init_per_group,psub2,[parallel]},
{skipped,"Sub group skipped"}}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc3,"Sub group skipped"}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc4,"Sub group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc3,psub2},"Sub group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc4,psub2},"Sub group skipped"}},
{?eh,tc_user_skip,{user_skip_6_SUITE,{end_per_group,psub2},
"Sub group skipped"}}]},
@@ -745,14 +745,14 @@ test_events(testspec_skip) ->
{user_skip_7_SUITE,{init_per_group,ptop1,[parallel]}}},
{?eh,tc_done,
{user_skip_7_SUITE,{init_per_group,ptop1,[parallel]},ok}},
- {?eh,tc_user_skip,{user_skip_7_SUITE,tc1,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{tc1,ptop1},"SKIPPED"}},
{?eh,test_stats,{0,0,{1,0}}},
{parallel,
[{?eh,tc_start,
{user_skip_7_SUITE,{init_per_group,psub1,[parallel]}}},
{?eh,tc_done,
{user_skip_7_SUITE,{init_per_group,psub1,[parallel]},ok}},
- {?eh,tc_user_skip,{user_skip_7_SUITE,tc3,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{tc3,psub1},"SKIPPED"}},
{?eh,tc_start,{user_skip_7_SUITE,tc4}},
{?eh,tc_done,{user_skip_7_SUITE,tc4,ok}},
{?eh,test_stats,{1,0,{2,0}}},
@@ -778,13 +778,13 @@ test_events(testspec_skip) ->
{?eh,tc_start,{ct_framework,init_per_suite}},
{?eh,tc_done,{ct_framework,init_per_suite,ok}},
{?eh,tc_user_skip,{user_skip_7_SUITE,{init_per_group,ptop1},"SKIPPED"}},
- {?eh,tc_user_skip,{user_skip_7_SUITE,tc1,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{tc1,ptop1},"SKIPPED"}},
{?eh,test_stats,{0,0,{1,0}}},
- {?eh,tc_user_skip,{user_skip_7_SUITE,tc3,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{tc3,psub1},"SKIPPED"}},
{?eh,test_stats,{0,0,{2,0}}},
- {?eh,tc_user_skip,{user_skip_7_SUITE,tc4,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{tc4,psub1},"SKIPPED"}},
{?eh,test_stats,{0,0,{3,0}}},
- {?eh,tc_user_skip,{user_skip_7_SUITE,tc2,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{tc2,ptop1},"SKIPPED"}},
{?eh,test_stats,{0,0,{4,0}}},
{?eh,tc_user_skip,{user_skip_7_SUITE,{end_per_group,ptop1},"SKIPPED"}},
{?eh,tc_start,{ct_framework,end_per_suite}},
@@ -804,8 +804,8 @@ test_events(testspec_skip) ->
{user_skip_7_SUITE,{init_per_group,ptop1,[parallel]},ok}},
{?eh,tc_user_skip,
{user_skip_7_SUITE,{init_per_group,psub1},"SKIPPED"}},
- {?eh,tc_user_skip,{user_skip_7_SUITE,tc3,"SKIPPED"}},
- {?eh,tc_user_skip,{user_skip_7_SUITE,tc4,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{tc3,psub1},"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{tc4,psub1},"SKIPPED"}},
{?eh,test_stats,{0,0,{2,0}}},
{?eh,tc_user_skip,{user_skip_7_SUITE,{end_per_group,psub1},"SKIPPED"}},
{?eh,tc_start,{user_skip_7_SUITE,tc1}},
@@ -837,13 +837,13 @@ test_events(testspec_skip) ->
{?eh,tc_done,{user_skip_6_SUITE,
{init_per_group,ptop1,[parallel]},
{skipped,"Top group skipped"}}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc1,"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc1,ptop1},"Top group skipped"}},
{?eh,test_stats,{0,0,{1,0}}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc3,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc3,psub1},"SKIPPED"}},
{?eh,test_stats,{0,0,{2,0}}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc4,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc4,psub1},"SKIPPED"}},
{?eh,test_stats,{0,0,{3,0}}},
- {?eh,tc_user_skip,{user_skip_6_SUITE,tc2,"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{tc2,ptop1},"Top group skipped"}},
{?eh,test_stats,{0,0,{4,0}}},
{?eh,tc_user_skip,
{user_skip_6_SUITE,{end_per_group,ptop1},"Top group skipped"}}]},
diff --git a/lib/common_test/test/ct_surefire_SUITE.erl b/lib/common_test/test/ct_surefire_SUITE.erl
index c5e44682b0..db7a0be915 100644
--- a/lib/common_test/test/ct_surefire_SUITE.erl
+++ b/lib/common_test/test/ct_surefire_SUITE.erl
@@ -205,7 +205,7 @@ test_events(_) ->
[{?eh,tc_start,{surefire_SUITE,{init_per_group,g_fail,[]}}},
{?eh,tc_done,{surefire_SUITE,{init_per_group,g_fail,[]},
{failed,{error,all_cases_should_be_skipped}}}},
- {?eh,tc_auto_skip,{surefire_SUITE,tc_ok,
+ {?eh,tc_auto_skip,{surefire_SUITE,{tc_ok,g_fail},
{failed,
{surefire_SUITE,init_per_group,
{'EXIT',all_cases_should_be_skipped}}}}},
diff --git a/lib/common_test/test/ct_telnet_SUITE.erl b/lib/common_test/test/ct_telnet_SUITE.erl
index acce4eca14..84e69c2b54 100644
--- a/lib/common_test/test/ct_telnet_SUITE.erl
+++ b/lib/common_test/test/ct_telnet_SUITE.erl
@@ -72,19 +72,32 @@ init_per_suite(Config) ->
end_per_suite(Config) ->
ct_test_support:end_per_suite(Config).
-init_per_testcase(TestCase, Config) when TestCase=/=unix_telnet->
+init_per_testcase(TestCase, Config) when TestCase /= unix_telnet ->
+ ct:pal("Testcase ~p starting!", [TestCase]),
TS = telnet_server:start([{port,?erl_telnet_server_port},
{users,[{?erl_telnet_server_user,
?erl_telnet_server_pwd}]}]),
ct_test_support:init_per_testcase(TestCase, [{telnet_server,TS}|Config]);
init_per_testcase(TestCase, Config) ->
- ct_test_support:init_per_testcase(TestCase, Config).
-
+ ct:pal("Testcase ~p starting. Checking connection to telnet server...",
+ [TestCase]),
+ ct:require(testconn, {unix,[telnet]}),
+ case {os:type(),ct_telnet:open(testconn)} of
+ {_,{ok,Handle}} ->
+ ok = ct_telnet:close(Handle),
+ ct:pal("Connection ok, starting tests!", []),
+ ct_test_support:init_per_testcase(TestCase, Config);
+ {{unix,_},{error,Reason}} ->
+ ct:fail("No connection to telnet server! Reason: ~tp", [Reason]);
+ {_,{error,Reason}} ->
+ {skip,{no_access_to_telnet_server,Reason}}
+ end.
+
+end_per_testcase(TestCase, Config) when TestCase /= unix_telnet ->
+ ct:pal("Stopping the telnet_server now!", []),
+ telnet_server:stop(?config(telnet_server,Config)),
+ ct_test_support:end_per_testcase(TestCase, Config);
end_per_testcase(TestCase, Config) ->
- case ?config(telnet_server,Config) of
- undefined -> ok;
- TS -> telnet_server:stop(TS)
- end,
ct_test_support:end_per_testcase(TestCase, Config).
@@ -167,28 +180,39 @@ telnet_config(unix_telnet, legacy) ->
{ct_conn_log,[]}];
%% LogType same as GroupName
telnet_config(unix_telnet, LogType) ->
+ LogTypeTerm = if LogType == raw -> [];
+ true -> [{log_type,LogType}]
+ end,
[{unix, ct:get_config(unix)},
{ct_conn_log,
- [{ct_telnet,[{log_type,LogType},
- {hosts,[telnet_server_conn1,
- telnet_server_conn2,
- telnet_server_conn3,
- telnet_server_conn4]}]}]}];
+ [{ct_telnet, LogTypeTerm ++
+ [{hosts,[telnet_server_conn1,
+ telnet_server_conn2,
+ telnet_server_conn3,
+ telnet_server_conn4]}]}]}];
telnet_config(_, LogType) ->
+ LogTypeTerm = if LogType == raw -> [];
+ true -> [{log_type,LogType}]
+ end,
[{unix,[{telnet,"localhost"},
{port, ?erl_telnet_server_port},
{username,?erl_telnet_server_user},
{password,?erl_telnet_server_pwd},
- {keep_alive,true}]} |
+ {keep_alive,true}]},
+ {telnet_settings, [{connect_timeout,10000},
+ {command_timeout,10000},
+ {reconnection_attempts,0},
+ {reconnection_interval,0},
+ {keep_alive,true}]} |
if LogType == legacy ->
[{ct_conn_log,[]}];
true ->
[{ct_conn_log,
- [{ct_telnet,[{log_type,LogType},
- {hosts,[telnet_server_conn1,
- telnet_server_conn2,
- telnet_server_conn3,
- telnet_server_conn4]}]}]}]
+ [{ct_telnet, LogTypeTerm ++
+ [{hosts,[telnet_server_conn1,
+ telnet_server_conn2,
+ telnet_server_conn3,
+ telnet_server_conn4]}]}]}]
end].
%%%-----------------------------------------------------------------
diff --git a/lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl b/lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl
index 8d142e85a8..c0f79d0f10 100644
--- a/lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl
+++ b/lib/common_test/test/ct_telnet_SUITE_data/ct_telnet_own_server_SUITE.erl
@@ -16,11 +16,14 @@ suite() ->
].
all() ->
- [expect,
+ [
+ expect,
expect_repeat,
expect_sequence,
expect_error_prompt,
- expect_error_timeout,
+ expect_error_timeout1,
+ expect_error_timeout2,
+ expect_error_timeout3,
no_prompt_check,
no_prompt_check_repeat,
no_prompt_check_sequence,
@@ -28,7 +31,11 @@ all() ->
ignore_prompt,
ignore_prompt_repeat,
ignore_prompt_sequence,
- ignore_prompt_timeout].
+ ignore_prompt_timeout,
+ large_string,
+ server_speaks,
+ server_disconnects
+ ].
groups() ->
[].
@@ -85,13 +92,34 @@ expect_error_prompt(_) ->
%% Check that expect returns after idle timeout, and even if the
%% expected pattern is received - as long as not newline or prompt is
%% received it will not match.
-expect_error_timeout(_) ->
+expect_error_timeout1(_) ->
{ok, Handle} = ct_telnet:open(telnet_server_conn1),
ok = ct_telnet:send(Handle, "echo_no_prompt xxx"),
{error,timeout} = ct_telnet:expect(Handle, ["xxx"], [{timeout,1000}]),
ok = ct_telnet:close(Handle),
ok.
+expect_error_timeout2(_) ->
+ {ok, Handle} = ct_telnet:open(telnet_server_conn1),
+ ok = ct_telnet:send(Handle, "echo_no_prompt xxx"),
+ {error,timeout} = ct_telnet:expect(Handle, ["xxx"], [{idle_timeout,1000},
+ {total_timeout,infinity}]),
+ ok = ct_telnet:close(Handle),
+ ok.
+
+%% Check that if server loops and pattern not matching, the operation
+%% can be aborted
+expect_error_timeout3(_) ->
+ {ok, Handle} = ct_telnet:open(telnet_server_conn1),
+ ok = ct_telnet:send(Handle, "echo_loop 5000 xxx"),
+ {error,timeout} = ct_telnet:expect(Handle, ["yyy"],
+ [{idle_timeout,infinity},
+ {total_timeout,3000}]),
+ ok = ct_telnet:send(Handle, "echo ayt"),
+ {ok,["ayt"]} = ct_telnet:expect(Handle, ["ayt"]),
+ ok = ct_telnet:close(Handle),
+ ok.
+
%% expect with ignore_prompt option should not return even if a prompt
%% is found. The pattern after the prompt (here "> ") can be matched.
ignore_prompt(_) ->
@@ -188,3 +216,72 @@ no_prompt_check_timeout(_) ->
{timeout,1000}]),
ok = ct_telnet:close(Handle),
ok.
+
+%% Check that it's possible to receive multiple chunks of data sent from
+%% the server with one get_data call
+large_string(_) ->
+ {ok, Handle} = ct_telnet:open(telnet_server_conn1),
+ String = "abcd efgh ijkl mnop qrst uvwx yz ",
+ BigString = lists:flatmap(fun(S) -> S end,
+ [String || _ <- lists:seq(1,10)]),
+ VerifyStr = [C || C <- BigString, C/=$ ],
+
+ {ok,Data} = ct_telnet:cmd(Handle, "echo_sep "++BigString),
+ ct:log("[CMD] Received ~w chars: ~s", [length(lists:flatten(Data)),Data]),
+ VerifyStr = [C || C <- lists:flatten(Data), C/=$ , C/=$\r, C/=$\n, C/=$>],
+
+ %% Test #1: With a long sleep value, all data gets gets buffered and
+ %% ct_telnet can receive it with one single request to ct_telnet_client.
+ %% Test #2: With a short sleep value, ct_telnet needs multiple calls to
+ %% ct_telnet_client to collect the data. This iterative operation should
+ %% yield the same result as the single request case.
+
+ ok = ct_telnet:send(Handle, "echo_sep "++BigString),
+ timer:sleep(1000),
+ {ok,Data1} = ct_telnet:get_data(Handle),
+ ct:log("[GET DATA #1] Received ~w chars: ~s",
+ [length(lists:flatten(Data1)),Data1]),
+ VerifyStr = [C || C <- lists:flatten(Data1), C/=$ , C/=$\r, C/=$\n, C/=$>],
+
+ ok = ct_telnet:send(Handle, "echo_sep "++BigString),
+ timer:sleep(50),
+ {ok,Data2} = ct_telnet:get_data(Handle),
+ ct:log("[GET DATA #2] Received ~w chars: ~s", [length(lists:flatten(Data2)),Data2]),
+ VerifyStr = [C || C <- lists:flatten(Data2), C/=$ , C/=$\r, C/=$\n, C/=$>],
+
+ ok = ct_telnet:close(Handle),
+ ok.
+
+%% The server says things. Manually check that it gets printed correctly
+%% in the general IO log.
+server_speaks(_) ->
+ {ok, Handle} = ct_telnet:open(telnet_server_conn1),
+ ok = ct_telnet:send(Handle, "echo_no_prompt This is the first message\r\n"),
+ ok = ct_telnet:send(Handle, "echo_no_prompt This is the second message\r\n"),
+ %% let ct_telnet_client get an idle timeout
+ timer:sleep(15000),
+ ok = ct_telnet:send(Handle, "echo_no_prompt This is the third message\r\n"),
+ {ok,_} = ct_telnet:expect(Handle, ["the"], [no_prompt_check]),
+ {error,timeout} = ct_telnet:expect(Handle, ["the"], [no_prompt_check,
+ {timeout,1000}]),
+ ok = ct_telnet:send(Handle, "echo_no_prompt This is the fourth message\r\n"),
+ %% give the server time to respond
+ timer:sleep(2000),
+ %% closing the connection should print last message in log
+ ok = ct_telnet:close(Handle),
+ ok.
+
+%% Let the server close the connection. Make sure buffered data gets printed
+%% to the general IO log.
+server_disconnects(_) ->
+ {ok, Handle} = ct_telnet:open(telnet_server_conn1),
+ ok = ct_telnet:send(Handle, "disconnect_after 1500"),
+ %% wait until the get_data operation (triggered by send/2) times out
+ %% before sending the msg
+ timer:sleep(500),
+ ok = ct_telnet:send(Handle, "echo_no_prompt This is the message\r\n"),
+ %% when the server closes the connection, the last message should be
+ %% printed in the log
+ timer:sleep(3000),
+ _ = ct_telnet:close(Handle),
+ ok.
diff --git a/lib/common_test/test/ct_test_server_if_1_SUITE.erl b/lib/common_test/test/ct_test_server_if_1_SUITE.erl
index 9882fa980c..b6ef3062d4 100644
--- a/lib/common_test/test/ct_test_server_if_1_SUITE.erl
+++ b/lib/common_test/test/ct_test_server_if_1_SUITE.erl
@@ -168,7 +168,7 @@ test_events(ts_if_1) ->
{?eh,tc_start,{ts_if_1_SUITE,tc4}},
{?eh,tc_done,{ts_if_1_SUITE,tc4,{failed,{error,failed_on_purpose}}}},
{?eh,test_stats,{1,3,{0,2}}},
- {?eh,tc_auto_skip,{ts_if_1_SUITE,tc5,{failed,{ts_if_1_SUITE,tc4}}}},
+ {?eh,tc_auto_skip,{ts_if_1_SUITE,{tc5,seq2},{failed,{ts_if_1_SUITE,tc4}}}},
{?eh,test_stats,{1,3,{0,3}}},
{?eh,tc_start,{ts_if_1_SUITE,{end_per_group,seq2,[sequence]}}},
{?eh,tc_done,{ts_if_1_SUITE,{end_per_group,seq2,[sequence]},ok}}],
@@ -199,7 +199,7 @@ test_events(ts_if_1) ->
[{?eh,tc_start,{ts_if_1_SUITE,{init_per_group,g1,[]}}},
{?eh,tc_done,{ts_if_1_SUITE,{init_per_group,g1,[]},
{skipped,g1_got_skipped}}},
- {?eh,tc_user_skip,{ts_if_1_SUITE,gtc1,g1_got_skipped}},
+ {?eh,tc_user_skip,{ts_if_1_SUITE,{gtc1,g1},g1_got_skipped}},
{?eh,test_stats,{1,4,{3,6}}},
{?eh,tc_user_skip,{ts_if_1_SUITE,{end_per_group,g1},g1_got_skipped}}],
@@ -208,7 +208,7 @@ test_events(ts_if_1) ->
{?eh,tc_done,{ts_if_1_SUITE,{init_per_group,g2,[parallel]},ok}},
[{?eh,tc_start,{ts_if_1_SUITE,{init_per_group,g3,[]}}},
{?eh,tc_done,{ts_if_1_SUITE,{init_per_group,g3,[]},{skipped,g3_got_skipped}}},
- {?eh,tc_user_skip,{ts_if_1_SUITE,gtc2,g3_got_skipped}},
+ {?eh,tc_user_skip,{ts_if_1_SUITE,{gtc2,g3},g3_got_skipped}},
{?eh,test_stats,{1,4,{4,6}}},
{?eh,tc_user_skip,{ts_if_1_SUITE,{end_per_group,g3},g3_got_skipped}}],
{?eh,tc_start,{ts_if_1_SUITE,{end_per_group,g2,[parallel]}}},
@@ -279,7 +279,7 @@ test_events(ts_if_1) ->
{init_per_group,g1,[]},
{auto_skipped,{group0_failed,bad_return_value}}}},
{?eh,tc_auto_skip,
- {ts_if_7_SUITE,tc2,{group0_failed,bad_return_value}}},
+ {ts_if_7_SUITE,{tc2,g1},{group0_failed,bad_return_value}}},
{?eh,test_stats,{2,7,{4,11}}},
{?eh,tc_auto_skip,
{ts_if_7_SUITE,{end_per_group,g1},{group0_failed,bad_return_value}}},
diff --git a/lib/common_test/test/ct_testspec_1_SUITE.erl b/lib/common_test/test/ct_testspec_1_SUITE.erl
index 187b5e6d3a..c2670316b6 100644
--- a/lib/common_test/test/ct_testspec_1_SUITE.erl
+++ b/lib/common_test/test/ct_testspec_1_SUITE.erl
@@ -763,35 +763,35 @@ test_events(skip_all_groups) ->
{?eh,start_info,{1,1,12}},
{?eh,tc_start,{groups_11_SUITE,init_per_suite}},
{?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1a},"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1a,test_group_1a},"SKIPPED!"}},
{?eh,test_stats,{0,0,{1,0}}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1b,test_group_1a},"SKIPPED!"}},
{?eh,test_stats,{0,0,{2,0}}},
{?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1a},"SKIPPED!"}},
{?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1b},"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1a,test_group_1b},"SKIPPED!"}},
{?eh,test_stats,{0,0,{3,0}}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1b,test_group_1b},"SKIPPED!"}},
{?eh,test_stats,{0,0,{4,0}}},
{?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1b},"SKIPPED!"}},
{?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_2},"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_2a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_2a,test_group_2},"SKIPPED!"}},
{?eh,test_stats,{0,0,{5,0}}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_3a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_3a,test_group_3},"SKIPPED!"}},
{?eh,test_stats,{0,0,{6,0}}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_3b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_3b,test_group_3},"SKIPPED!"}},
{?eh,test_stats,{0,0,{7,0}}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_2b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_2b,test_group_2},"SKIPPED!"}},
{?eh,test_stats,{0,0,{8,0}}},
{?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_2},"SKIPPED!"}},
{?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_4},"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_5a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_5a,test_group_5},"SKIPPED!"}},
{?eh,test_stats,{0,0,{9,0}}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_7a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_7a,test_group_7},"SKIPPED!"}},
{?eh,test_stats,{0,0,{10,0}}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_7b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_7b,test_group_7},"SKIPPED!"}},
{?eh,test_stats,{0,0,{11,0}}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_5b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_5b,test_group_5},"SKIPPED!"}},
{?eh,test_stats,{0,0,{12,0}}},
{?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_4},"SKIPPED!"}},
{?eh,tc_start,{groups_11_SUITE,end_per_suite}},
@@ -826,17 +826,17 @@ test_events(skip_group) ->
{?eh,tc_done,{groups_11_SUITE,{end_per_group,test_group_1a,[]},ok}}],
{?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1b},
"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1a,test_group_1b},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1b,test_group_1b},"SKIPPED!"}},
{?eh,test_stats,{2,0,{2,0}}},
{?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1b},
"SKIPPED!"}},
{?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_2},
"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_2a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_3a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_3b,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_2b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_2a,test_group_2},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_3a,test_group_3},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_3b,test_group_3},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_2b,test_group_2},"SKIPPED!"}},
{?eh,test_stats,{2,0,{6,0}}},
{?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_2},
"SKIPPED!"}},
@@ -864,15 +864,15 @@ test_events(skip_group_all_testcases) ->
{?eh,tc_start,{groups_11_SUITE,init_per_suite}},
{?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1a},
"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1a,test_group_1a},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1b,test_group_1a},"SKIPPED!"}},
{?eh,test_stats,{0,0,{2,0}}},
{?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1a},
"SKIPPED!"}},
{?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1b},
"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1a,test_group_1b},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1b,test_group_1b},"SKIPPED!"}},
{?eh,test_stats,{0,0,{4,0}}},
{?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1b},
"SKIPPED!"}},
@@ -901,13 +901,13 @@ test_events(skip_group_testcase) ->
{?eh,tc_start,{groups_11_SUITE,{init_per_group,test_group_1a,[]}}},
{?eh,tc_start,{groups_11_SUITE,testcase_1a}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1b,test_group_1a},"SKIPPED!"}},
{?eh,test_stats,{1,0,{1,0}}},
{?eh,tc_done,{groups_11_SUITE,{end_per_group,test_group_1a,[]},'_'}},
{?eh,tc_start,{groups_11_SUITE,{init_per_group,test_group_1b,[]}}},
{?eh,tc_start,{groups_11_SUITE,testcase_1b}},
- {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{testcase_1a,test_group_1b},"SKIPPED!"}},
{?eh,test_stats,{2,0,{2,0}}},
{?eh,tc_done,{groups_11_SUITE,{end_per_group,test_group_1b,[]},'_'}},
@@ -1045,8 +1045,8 @@ test_events(skip_subgroup) ->
{?eh,tc_user_skip,{groups_12_SUITE,
{init_per_group,test_group_8},"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_8a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_8b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_8a,test_group_8},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_8b,test_group_8},"SKIPPED!"}},
{?eh,tc_user_skip,{groups_12_SUITE,
{end_per_group,test_group_8},"SKIPPED!"}},
@@ -1146,12 +1146,12 @@ test_events(skip_subgroup_all_testcases) ->
{?eh,tc_done,{groups_12_SUITE,{init_per_group,test_group_4,[]},ok}},
{?eh,tc_user_skip,{groups_12_SUITE,
{init_per_group,test_group_5},"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_5a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_7a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_7b,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_8a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_8b,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_5b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_5a,test_group_5},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_7a,test_group_7},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_7b,test_group_7},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_8a,test_group_8},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_8b,test_group_8},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_5b,test_group_5},"SKIPPED!"}},
{?eh,test_stats,{0,0,{6,0}}},
{?eh,tc_user_skip,{groups_12_SUITE,
{end_per_group,test_group_5},"SKIPPED!"}},
@@ -1240,9 +1240,9 @@ test_events(skip_subgroup_testcase) ->
{?eh,tc_done,
{groups_12_SUITE,{init_per_group,test_group_6,[parallel]},ok}},
[{?eh,tc_start,{groups_12_SUITE,{init_per_group,test_group_7,'_'}}},
- {?eh,tc_user_skip, {groups_12_SUITE,testcase_7a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_7a,test_group_7},"SKIPPED!"}},
{?eh,test_stats,{1,0,{1,0}}},
- {?eh,tc_user_skip, {groups_12_SUITE,testcase_7b,"SKIPPED!"}},
+ {?eh,tc_user_skip, {groups_12_SUITE,{testcase_7b,test_group_7},"SKIPPED!"}},
{?eh,test_stats,{1,0,{2,0}}},
{?eh,tc_start,{groups_12_SUITE,{end_per_group,test_group_7,'_'}}}],
{shuffle,
@@ -1281,22 +1281,22 @@ test_events(sub_skipped_by_top) ->
{?eh,tc_start,{groups_12_SUITE,init_per_suite}},
{?eh,tc_user_skip,{groups_12_SUITE,{init_per_group,test_group_4},
"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_5a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_7a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_7b,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_8a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_8b,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_5b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_5a,test_group_5},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_7a,test_group_7},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_7b,test_group_7},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_8a,test_group_8},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_8b,test_group_8},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_5b,test_group_5},"SKIPPED!"}},
{?eh,tc_user_skip,{groups_12_SUITE,
{end_per_group,test_group_4},"SKIPPED!"}},
{?eh,tc_user_skip,{groups_12_SUITE,
{init_per_group,test_group_4},"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_5a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_7a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_7b,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_8a,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_8b,"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,testcase_5b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_5a,test_group_5},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_7a,test_group_7},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_7b,test_group_7},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_8a,test_group_8},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_8b,test_group_8},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,{testcase_5b,test_group_5},"SKIPPED!"}},
{?eh,test_stats,{0,0,{12,0}}},
{?eh,tc_user_skip,{groups_12_SUITE,
{end_per_group,test_group_4},"SKIPPED!"}},
diff --git a/lib/common_test/test/telnet_server.erl b/lib/common_test/test/telnet_server.erl
index 1760100d8e..1d341d6106 100644
--- a/lib/common_test/test/telnet_server.erl
+++ b/lib/common_test/test/telnet_server.erl
@@ -51,32 +51,51 @@ stop(Pid) ->
init(Opts) ->
Port = proplists:get_value(port,Opts),
Users = proplists:get_value(users,Opts,[]),
- {ok, LSock} = gen_tcp:listen(Port, [list, {packet, 0},
- {active, true}]),
+ {ok, LSock} = listen(5, Port, [list, {packet, 0},
+ {active, true},
+ {reuseaddr,true}]),
State = #state{listen=LSock,users=Users},
accept(State),
- ok = gen_tcp:close(LSock).
+ ok = gen_tcp:close(LSock),
+ dbg("telnet_server closed the listen socket ~p\n", [LSock]),
+ timer:sleep(1000),
+ ok.
+
+listen(0, _Port, _Opts) ->
+ {error,eaddrinuse};
+listen(Retries, Port, Opts) ->
+ case gen_tcp:listen(Port, Opts) of
+ {error,eaddrinuse} ->
+ dbg("Listen port not released, trying again..."),
+ timer:sleep(5000),
+ listen(Retries-1, Port, Opts);
+ Ok = {ok,_LSock} ->
+ Ok;
+ Error ->
+ exit(Error)
+ end.
accept(#state{listen=LSock}=State) ->
Server = self(),
Acceptor = spawn_link(fun() -> do_accept(LSock,Server) end),
receive
{Acceptor,Sock} when is_port(Sock) ->
+ dbg("Connected to client on socket ~p\n", [Sock]),
case init_client(State#state{client=Sock}) of
stopped ->
- io:format("[telnet_server] telnet_server stopped\n"),
+ dbg("telnet_server stopped\n"),
ok;
R ->
- io:format("[telnet_server] connection to client"
- "closed with reason ~p~n",[R]),
+ dbg("Connection to client "
+ "closed with reason ~p~n",[R]),
accept(State)
end;
{Acceptor,closed} ->
- io:format("[telnet_server] listen socket closed unexpectedly, "
- "terminating telnet_server\n"),
+ dbg("Listen socket closed unexpectedly, "
+ "terminating telnet_server\n"),
ok;
stop ->
- io:format("[telnet_server] telnet_server stopped\n"),
+ dbg("telnet_server stopped\n"),
ok
end.
@@ -97,19 +116,21 @@ init_client(#state{client=Sock}=State) ->
dbg("Server sending: ~p~n",["login: "]),
R = case gen_tcp:send(Sock,"login: ") of
ok ->
- loop(State);
+ loop(State, 1);
Error ->
Error
end,
_ = gen_tcp:close(Sock),
R.
-loop(State) ->
+loop(State, N) ->
receive
{tcp,_,Data} ->
try handle_data(Data,State) of
{ok,State1} ->
- loop(State1)
+ loop(State1, N);
+ closed ->
+ closed
catch
throw:Error ->
Error
@@ -118,6 +139,11 @@ loop(State) ->
closed;
{tcp_error,_,Error} ->
{error,tcp,Error};
+ disconnect ->
+ Sock = State#state.client,
+ dbg("Server closing connection on socket ~p~n", [Sock]),
+ ok = gen_tcp:close(Sock),
+ closed;
stop ->
stopped
end.
@@ -130,10 +156,16 @@ handle_data(Data,State) ->
case get_line(Data,[]) of
{Line,Rest} ->
WholeLine = lists:flatten(lists:reverse(State#state.buffer,Line)),
- {ok,State1} = do_handle_data(WholeLine,State),
- case Rest of
- [] -> {ok,State1};
- _ -> handle_data(Rest,State1)
+ case do_handle_data(WholeLine,State) of
+ {ok,State1} ->
+ case Rest of
+ [] -> {ok,State1};
+ _ -> handle_data(Rest,State1)
+ end;
+ {close,State1} ->
+ dbg("Server closing connection~n",[]),
+ gen_tcp:close(State1#state.client),
+ closed
end;
false ->
{ok,State#state{buffer=[Data|State#state.buffer]}}
@@ -163,22 +195,42 @@ do_handle_data(Data,#state{authorized=false}=State) ->
check_user(Data,State);
do_handle_data(Data,#state{authorized={user,_}}=State) ->
check_pwd(Data,State);
-do_handle_data("echo "++ Data,State) ->
+do_handle_data("echo " ++ Data,State) ->
send(Data++"\r\n> ",State),
{ok,State};
-do_handle_data("echo_no_prompt "++ Data,State) ->
+do_handle_data("echo_sep " ++ Data,State) ->
+ Msgs = string:tokens(Data," "),
+ lists:foreach(fun(Msg) ->
+ send(Msg,State),
+ timer:sleep(10)
+ end, Msgs),
+ send("\r\n> ",State),
+ {ok,State};
+do_handle_data("echo_no_prompt " ++ Data,State) ->
send(Data,State),
{ok,State};
-do_handle_data("echo_ml "++ Data,State) ->
+do_handle_data("echo_ml " ++ Data,State) ->
Lines = string:tokens(Data," "),
ReturnData = string:join(Lines,"\n"),
send(ReturnData++"\r\n> ",State),
{ok,State};
-do_handle_data("echo_ml_no_prompt "++ Data,State) ->
+do_handle_data("echo_ml_no_prompt " ++ Data,State) ->
Lines = string:tokens(Data," "),
ReturnData = string:join(Lines,"\n"),
send(ReturnData,State),
{ok,State};
+do_handle_data("echo_loop " ++ Data,State) ->
+ [TStr|Lines] = string:tokens(Data," "),
+ ReturnData = string:join(Lines,"\n"),
+ send_loop(list_to_integer(TStr),ReturnData,State),
+ {ok,State};
+do_handle_data("disconnect_after " ++WaitStr,State) ->
+ Wait = list_to_integer(string:strip(WaitStr,right,$\n)),
+ dbg("Server will close connection in ~w ms...", [Wait]),
+ erlang:send_after(Wait,self(),disconnect),
+ {ok,State};
+do_handle_data("disconnect" ++_,State) ->
+ {close,State};
do_handle_data([],State) ->
send("> ",State),
{ok,State};
@@ -212,6 +264,20 @@ send(Data,State) ->
throw({error,send,Error})
end.
+send_loop(T,Data,State) ->
+ dbg("Server sending ~p in loop for ~w ms...~n",[Data,T]),
+ send_loop(now(),T,Data,State).
+
+send_loop(T0,T,Data,State) ->
+ ElapsedMS = trunc(timer:now_diff(now(),T0)/1000),
+ if ElapsedMS >= T ->
+ ok;
+ true ->
+ send(Data,State),
+ timer:sleep(500),
+ send_loop(T0,T,Data,State)
+ end.
+
get_line([$\r,$\n|Rest],Acc) ->
{lists:reverse(Acc),Rest};
get_line([$\r,0|Rest],Acc) ->
@@ -226,4 +292,4 @@ get_line([],_) ->
dbg(_F) ->
dbg(_F,[]).
dbg(_F,_A) ->
- io:format("[telnet_server] "++_F,_A).
+ io:format("[telnet_server] " ++ _F,_A).
diff --git a/lib/common_test/vsn.mk b/lib/common_test/vsn.mk
index 568405b110..f8a5aab686 100644
--- a/lib/common_test/vsn.mk
+++ b/lib/common_test/vsn.mk
@@ -1 +1 @@
-COMMON_TEST_VSN = 1.7.4
+COMMON_TEST_VSN = 1.8