aboutsummaryrefslogtreecommitdiffstats
path: root/lib/common_test
diff options
context:
space:
mode:
Diffstat (limited to 'lib/common_test')
-rw-r--r--lib/common_test/doc/src/Makefile43
-rw-r--r--lib/common_test/doc/src/basics_chapter.xml230
-rw-r--r--lib/common_test/doc/src/common_test_app.xml776
-rw-r--r--lib/common_test/doc/src/config_file_chapter.xml558
-rw-r--r--lib/common_test/doc/src/cover_chapter.xml349
-rw-r--r--lib/common_test/doc/src/ct.xml1412
-rw-r--r--lib/common_test/doc/src/ct_cover.xml106
-rw-r--r--lib/common_test/doc/src/ct_ftp.xml277
-rw-r--r--lib/common_test/doc/src/ct_hooks.xml860
-rw-r--r--lib/common_test/doc/src/ct_hooks_chapter.xml691
-rw-r--r--lib/common_test/doc/src/ct_master.xml220
-rw-r--r--lib/common_test/doc/src/ct_master_chapter.xml291
-rw-r--r--lib/common_test/doc/src/ct_netconfc.xml1039
-rw-r--r--lib/common_test/doc/src/ct_property_test.xml116
-rw-r--r--lib/common_test/doc/src/ct_rpc.xml220
-rw-r--r--lib/common_test/doc/src/ct_run.xml297
-rw-r--r--lib/common_test/doc/src/ct_slave.xml221
-rw-r--r--lib/common_test/doc/src/ct_snmp.xml523
-rw-r--r--lib/common_test/doc/src/ct_ssh.xml1150
-rw-r--r--lib/common_test/doc/src/ct_telnet.xml601
-rw-r--r--lib/common_test/doc/src/dependencies_chapter.xml382
-rw-r--r--lib/common_test/doc/src/event_handler_chapter.xml384
-rw-r--r--lib/common_test/doc/src/example_chapter.xml910
-rw-r--r--lib/common_test/doc/src/getting_started_chapter.xml286
-rw-r--r--lib/common_test/doc/src/install_chapter.xml25
-rw-r--r--lib/common_test/doc/src/introduction.xml75
-rw-r--r--lib/common_test/doc/src/notes.xml85
-rw-r--r--lib/common_test/doc/src/part.xml33
-rw-r--r--lib/common_test/doc/src/ref_man.xml38
-rw-r--r--lib/common_test/doc/src/run_test_chapter.xml2230
-rw-r--r--lib/common_test/doc/src/test_structure_chapter.xml146
-rw-r--r--lib/common_test/doc/src/unix_telnet.xml134
-rw-r--r--lib/common_test/doc/src/why_test_chapter.xml43
-rw-r--r--lib/common_test/doc/src/write_test_chapter.xml1363
-rw-r--r--lib/common_test/include/ct.hrl8
-rw-r--r--lib/common_test/src/Makefile13
-rw-r--r--lib/common_test/src/common_test.app.src36
-rw-r--r--lib/common_test/src/ct.erl75
-rw-r--r--lib/common_test/src/ct_config.erl3
-rw-r--r--lib/common_test/src/ct_conn_log_h.erl46
-rw-r--r--lib/common_test/src/ct_framework.erl33
-rw-r--r--lib/common_test/src/ct_logs.erl239
-rw-r--r--lib/common_test/src/ct_make.erl7
-rw-r--r--lib/common_test/src/ct_master_logs.erl13
-rw-r--r--lib/common_test/src/ct_release_test.erl27
-rw-r--r--lib/common_test/src/ct_run.erl4
-rw-r--r--lib/common_test/src/ct_util.erl6
-rw-r--r--lib/common_test/src/cth_conn_log.erl2
-rw-r--r--lib/common_test/src/erl2html2.erl311
-rw-r--r--lib/common_test/src/test_server.erl2783
-rw-r--r--lib/common_test/src/test_server_ctrl.erl5692
-rw-r--r--lib/common_test/src/test_server_gl.erl350
-rw-r--r--lib/common_test/src/test_server_internal.hrl60
-rw-r--r--lib/common_test/src/test_server_io.erl452
-rw-r--r--lib/common_test/src/test_server_node.erl766
-rw-r--r--lib/common_test/src/test_server_sup.erl943
-rw-r--r--lib/common_test/test/Makefile13
-rw-r--r--lib/common_test/test/common_test.cover7
-rw-r--r--lib/common_test/test/ct_event_handler_SUITE_data/eh_A.erl2
-rw-r--r--lib/common_test/test/ct_hooks_SUITE.erl2
-rw-r--r--lib/common_test/test/ct_master_SUITE.erl2
-rw-r--r--lib/common_test/test/ct_misc_1_SUITE.erl1
-rw-r--r--lib/common_test/test/ct_release_test_SUITE.erl190
-rw-r--r--lib/common_test/test/ct_release_test_SUITE_data/release_test_SUITE.erl118
-rw-r--r--lib/common_test/test/ct_test_support.erl12
-rw-r--r--lib/common_test/test/ct_test_support_eh.erl2
-rw-r--r--lib/common_test/test/erl2html2_SUITE.erl277
-rw-r--r--lib/common_test/test/erl2html2_SUITE_data/Makefile.src2
-rw-r--r--lib/common_test/test/erl2html2_SUITE_data/header1.hrl4
-rw-r--r--lib/common_test/test/erl2html2_SUITE_data/include/header2.hrl0
-rw-r--r--lib/common_test/test/erl2html2_SUITE_data/include/header3.hrl1
-rw-r--r--lib/common_test/test/erl2html2_SUITE_data/m1.erl52
-rw-r--r--lib/common_test/test/test_server_SUITE.erl449
-rw-r--r--lib/common_test/test/test_server_SUITE_data/Makefile.src11
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_SUITE.erl514
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_SUITE_data/dummy_file1
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_break_SUITE.erl149
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_conf01_SUITE.erl188
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_conf02_SUITE.erl295
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_cover_SUITE.erl59
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl4
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_parallel01_SUITE.erl519
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_shuffle01_SUITE.erl477
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_skip_SUITE.erl43
-rw-r--r--lib/common_test/test/test_server_SUITE_data/test_server_unicode_SUITE.erl82
-rw-r--r--lib/common_test/test/test_server_test_lib.erl217
-rw-r--r--lib/common_test/test/test_server_test_lib.hrl23
-rw-r--r--lib/common_test/test_server/Makefile96
-rw-r--r--lib/common_test/test_server/conf_vars.in25
-rw-r--r--lib/common_test/test_server/configure.in509
-rw-r--r--lib/common_test/test_server/cross.cover20
-rw-r--r--lib/common_test/test_server/ts.config46
-rw-r--r--lib/common_test/test_server/ts.erl1019
-rw-r--r--lib/common_test/test_server/ts.hrl38
-rw-r--r--lib/common_test/test_server/ts.unix.config6
-rw-r--r--lib/common_test/test_server/ts.win32.config8
-rw-r--r--lib/common_test/test_server/ts_autoconf_win32.erl256
-rw-r--r--lib/common_test/test_server/ts_benchmark.erl87
-rw-r--r--lib/common_test/test_server/ts_erl_config.erl403
-rw-r--r--lib/common_test/test_server/ts_install.erl465
-rw-r--r--lib/common_test/test_server/ts_install_cth.erl270
-rw-r--r--lib/common_test/test_server/ts_lib.erl372
-rw-r--r--lib/common_test/test_server/ts_make.erl114
-rw-r--r--lib/common_test/test_server/ts_run.erl455
-rw-r--r--lib/common_test/vsn.mk2
105 files changed, 30819 insertions, 5071 deletions
diff --git a/lib/common_test/doc/src/Makefile b/lib/common_test/doc/src/Makefile
index cedddd75ac..a0c89b1222 100644
--- a/lib/common_test/doc/src/Makefile
+++ b/lib/common_test/doc/src/Makefile
@@ -36,26 +36,24 @@ RELSYSDIR = $(RELEASE_PATH)/lib/$(APPLICATION)-$(VSN)
# Target Specs
# ----------------------------------------------------
-# REMEMBER: links to HTML files for these modules in ref_man.xml
-CT_MODULES = \
- ct \
- ct_master \
- ct_cover \
- ct_telnet \
- ct_ftp \
- ct_ssh \
- ct_rpc \
- ct_snmp \
- unix_telnet \
- ct_slave \
- ct_property_test \
- ct_netconfc
-
CT_XML_FILES = $(CT_MODULES:=.xml)
XML_APPLICATION_FILES = ref_man.xml
XML_REF1_FILES = ct_run.xml
-XML_REF3_FILES = $(CT_XML_FILES) ct_hooks.xml
+# REMEMBER: links to HTML files for these modules in ref_man.xml
+XML_REF3_FILES = ct.xml \
+ ct_master.xml \
+ ct_cover.xml \
+ ct_telnet.xml \
+ ct_ftp.xml \
+ ct_ssh.xml \
+ ct_rpc.xml \
+ ct_snmp.xml \
+ unix_telnet.xml \
+ ct_slave.xml \
+ ct_property_test.xml \
+ ct_netconfc.xml \
+ ct_hooks.xml
XML_REF6_FILES = common_test_app.xml
XML_PART_FILES = part.xml
@@ -80,8 +78,6 @@ XML_CHAPTER_FILES = \
notes.xml \
notes_history.xml
-MAKE_EDOC = make_edoc
-
BOOK_FILES = book.xml
GIF_FILES = \
@@ -92,7 +88,7 @@ GIF_FILES = \
INSTALL_NOTES = ../../notes.html
XML_FILES=$(XML_APPLICATION_FILES) $(XML_REF1_FILES) $(XML_REF3_FILES) $(XML_REF6_FILES) \
- $(XML_PART_FILES) $(XML_CHAPTER_FILES) $(XML_REF_FILES) $(BOOK_FILES)
+ $(XML_PART_FILES) $(XML_CHAPTER_FILES) $(BOOK_FILES)
# ----------------------------------------------------
@@ -119,19 +115,11 @@ DVIPS_FLAGS +=
# Targets
# ----------------------------------------------------
-CT_SRC_DIR = $(ERL_TOP)/../internal_tools/common_test/src
-
$(HTMLDIR)/%.gif: %.gif
$(INSTALL_DATA) $< $@
docs: pdf html man
-$(CT_XML_FILES): %.xml: ../../src/%.erl
- escript $(DOCGEN)/priv/bin/xml_from_edoc.escript -preprocess true -i $(XMERL_DIR)/include \
- -i ../../../test_server/include -i ../../include \
- -i ../../../../erts/lib/kernel/include -i ../../../../lib/kernel/include \
- -i ../../../../erts/lib/snmp/include -i ../../../../lib/snmp/include ../../src/$(@:%.xml=%.erl)
-
$(TOP_PDF_FILE): $(XML_FILES)
pdf: $(TOP_PDF_FILE)
@@ -146,7 +134,6 @@ man: $(MAN6_FILES) $(MAN3_FILES) $(MAN1_FILES)
debug opt:
clean clean_docs:
- rm -f $(CT_XML_FILES)
rm -rf $(HTMLDIR)/*
rm -f $(MAN1DIR)/*
rm -f $(MAN3DIR)/*
diff --git a/lib/common_test/doc/src/basics_chapter.xml b/lib/common_test/doc/src/basics_chapter.xml
index a01e3a9272..1a5a686fa0 100644
--- a/lib/common_test/doc/src/basics_chapter.xml
+++ b/lib/common_test/doc/src/basics_chapter.xml
@@ -31,74 +31,74 @@
</header>
<marker id="basics"></marker>
<section>
- <title>Introduction</title>
+ <title>General</title>
- <p>The <em>Common Test</em> framework (CT) is a tool which supports
- implementation and automated execution of test cases towards arbitrary
- types of target systems. The CT framework is based on the OTP Test
- Server and it's the main tool being used in all testing- and verification
- activities that are part of Erlang/OTP system development- and maintenance.
+ <p>The <c>Common Test</c> framework is a tool that supports
+ implementation and automated execution of test cases to any
+ types of target systems. <c>Common Test</c> is the main tool being used
+ in all testing- and verification activities that are part of Erlang/OTP
+ system development and maintenance.
</p>
- <p>Test cases can be executed individually or in batches. Common Test
- also features a distributed testing mode with central control and logging
- (a feature that makes it possible to test multiple systems independently in
- one common session, useful e.g. for running automated large-scale regression
- tests).
+ <p>Test cases can be executed individually or in batches. <c>Common Test</c>
+ also features a distributed testing mode with central control and logging.
+ With this feature, multiple systems can be tested independently in
+ one common session. This is useful, for example, when running automated
+ large-scale regression tests.
</p>
<p>
- The SUT (System Under Test) may consist of one or several target
- nodes. CT contains a generic test server which, together with
- other test utilities, is used to perform test case execution.
- It is possible to start the tests from a GUI or from the OS- or
+ The System Under Test (SUT) can consist of one or more target
+ nodes. <c>Common Test</c> contains a generic test server that,
+ together with other test utilities, is used to perform test case execution.
+ The tests can be started from a GUI, from the OS shell, or from an
Erlang shell. <em>Test suites</em> are files (Erlang
modules) that contain the <em>test cases</em> (Erlang functions)
to be executed. <em>Support modules</em> provide functions
- that the test cases utilize in order to carry out the tests.
+ that the test cases use to do the tests.
</p>
- <p>In a black-box testing scenario, CT based test programs connect to
- the target system(s) via standard O&amp;M and CLI protocols. CT
+ <p>In a black-box testing scenario, <c>Common Test</c>-based test programs connect to
+ the target system(s) through standard O&amp;M and CLI protocols. <c>Common Test</c>
provides implementations of, and wrapper interfaces to, some of these
- protocols (most of which exist as stand-alone components and
+ protocols (most of which exist as standalone components and
applications in OTP). The wrappers simplify configuration and add
- verbosity for logging purposes. CT will be continously extended with
- useful support modules. (Note however that it's
- a straightforward task to use any arbitrary Erlang/OTP component
- for testing purposes with Common Test, without needing a CT wrapper
- for it. It's as simple as calling Erlang functions). There
- are a number of target independent interfaces supported in CT, such as
- Generic Telnet, FTP, etc, which can be specialized or used
- directly for controlling instruments, traffic load generators, etc.
+ verbosity for logging purposes. <c>Common Test</c> is continously extended with
+ useful support modules. However, notice that it is
+ a straightforward task to use any Erlang/OTP component
+ for testing purposes with <c>Common Test</c>, without needing a <c>Common Test</c>
+ wrapper for it. It is as simple as calling Erlang functions. A number of
+ target-independent interfaces are supported in <c>Common Test</c>, such as
+ Generic Telnet and FTP. These can be specialized or used
+ directly for controlling instruments, traffic load generators, and so on.
</p>
- <p>Common Test is also a very useful tool for white-box testing Erlang
- code (e.g. module testing), since the test programs can call exported Erlang
- functions directly and there's very little overhead required for
+ <p><c>Common Test</c> is also a very useful tool for white-box testing Erlang
+ code (for example, module testing), as the test programs can call exported Erlang
+ functions directly. there is very little overhead required for
implementing basic test suites and executing simple tests. For black-box
- testing Erlang software, Erlang RPC as well as standard O&amp;M interfaces
- can for example be used.
+ testing Erlang software, Erlang RPC and standard O&amp;M interfaces
+ can be used for example.
</p>
- <p>A test case can handle several connections towards one or
- several target systems, instruments and traffic generators in
- parallel in order to perform the necessary actions for a
- test. The handling of many connections in parallel is one of
- the major strengths of Common Test (thanks to the efficient
- support for concurrency in the Erlang runtime system - which CT users
- can take great advantage of!).
+ <p>A test case can handle several connections to one or
+ more target systems, instruments, and traffic generators in
+ parallel to perform the necessary actions for a test.
+ The handling of many connections in parallel is one of
+ the major strengths of <c>Common Test</c>, thanks to the efficient
+ support for concurrency in the Erlang runtime system, which <c>Common Test</c>
+ users can take great advantage of.
</p>
</section>
<section>
<title>Test Suite Organisation</title>
<p>
- The test suites are organized in test directories and each test suite
- may have a separate data directory. Typically, these files and directories
- are version controlled similarly to other forms of source code (possibly by
- means of a version control system like GIT or Subversion). However, CT does
- not itself put any requirements on (or has any form of awareness of)
+ Test suites are organized in test directories and each test suite
+ can have a separate data directory. Typically, these files and directories
+ are version-controlled similar to other forms of source code (possibly by
+ a version control system like GIT or Subversion). However, <c>Common Test</c>
+ does not itself put any requirements on (or has any awareness of)
possible file and directory versions.
</p>
</section>
@@ -109,8 +109,8 @@
Support libraries contain functions that are useful for all test suites,
or for test suites in a specific functional area or subsystem.
In addition to the general support libraries provided by the
- CT framework, and the various libraries and applications provided by
- Erlang/OTP, there might also be a need for customized (user specific)
+ <c>Common Test</c> framework, and the various libraries and applications provided by
+ Erlang/OTP, there can also be a need for customized (user specific)
support libraries.
</p>
</section>
@@ -121,118 +121,122 @@
Testing is performed by running test suites (sets of test cases) or
individual test cases. A test suite is implemented as an Erlang module named
<c><![CDATA[<suite_name>_SUITE.erl]]></c> which contains a number of test cases.
- A test case is an Erlang function which tests one or more things.
- The test case is the smallest unit that the CT test server deals with.
+ A test case is an Erlang function that tests one or more things.
+ The test case is the smallest unit that the <c>Common Test</c> test server deals with.
</p>
<p>
- Subsets of test cases, called test case groups, may also be defined. A test case
+ Subsets of test cases, called test case groups, can also be defined. A test case
group can have execution properties associated with it. Execution properties
- specify whether the test cases in the group should be executed in
- random order, in parallel, in sequence, and if the execution of the group
- should be repeated. Test case groups may also be nested (i.e. a group may,
- besides test cases, contain sub-groups).
+ specify if the test cases in the group are to be executed in
+ random order, in parallel, or in sequence, and if the execution of the group
+ is to be repeated. Test case groups can also be nested (that is, a group can,
+ besides test cases, contain subgroups).
</p>
<p>
- Besides test cases and groups, the test suite may also contain configuration
+ Besides test cases and groups, the test suite can also contain configuration
functions. These functions are meant to be used for setting up (and verifying)
- environment and state on the SUT (and/or the CT host node), required for
- the tests to execute correctly. Examples of operations: Opening a connection
- to the SUT, initializing a database, running an installation script, etc.
- Configuration may be performed per suite, per test case group and per
- individual test case.
+ environment and state in the SUT (and/or the <c>Common Test</c> host node),
+ required for the tests to execute correctly. Examples of operations are:
+ Opening a connection to the SUT, initializing a database, running an installation
+ script, and so on. Configuration can be performed per suite, per test case group,
+ and per individual test case.
</p>
<p>
The test suite module must conform to a
<seealso marker="common_test">callback interface</seealso>
- specified by the CT test server. See the
- <seealso marker="write_test_chapter#intro">Writing Test Suites</seealso> chapter
- for more information.
+ specified by the <c>Common Test</c> test server. For details, see section
+ <seealso marker="write_test_chapter#intro">Writing Test Suites</seealso>.
</p>
<p>
A test case is considered successful if it returns to the caller, no matter
- what the returned value is. A few return values have special meaning however
- (such as <c>{skip,Reason}</c> which indicates that the test case is skipped,
- <c>{comment,Comment}</c> which prints a comment in the log for the test case and
- <c>{save_config,Config}</c> which makes the CT test server pass <c>Config</c> to
- the next test case).
+ what the returned value is. However, a few return values have special meaning
+ as follows:</p>
+ <list type="bulleted">
+ <item><c>{skip,Reason}</c> indicates that the test case is skipped.</item>
+ <item><c>{comment,Comment}</c> prints a comment in the log for the test case.</item>
+ <item><c>{save_config,Config}</c> makes the <c>Common Test</c> test server pass
+ <c>Config</c> to the next test case.</item>
+ </list>
+ <p>
A test case failure is specified as a runtime error (a crash), no matter what
the reason for termination is. If you use Erlang pattern matching effectively,
- you can take advantage of this property. The result will be concise and
+ you can take advantage of this property. The result is concise and
readable test case functions that look much more like scripts than actual programs.
- Simple example:
+ A simple example:
</p>
<pre>
- session(_Config) ->
- {started,ServerId} = my_server:start(),
- {clients,[]} = my_server:get_clients(ServerId),
- MyId = self(),
- connected = my_server:connect(ServerId, MyId),
- {clients,[MyId]} = my_server:get_clients(ServerId),
- disconnected = my_server:disconnect(ServerId, MyId),
- {clients,[]} = my_server:get_clients(ServerId),
- stopped = my_server:stop(ServerId).
- </pre>
+ session(_Config) ->
+ {started,ServerId} = my_server:start(),
+ {clients,[]} = my_server:get_clients(ServerId),
+ MyId = self(),
+ connected = my_server:connect(ServerId, MyId),
+ {clients,[MyId]} = my_server:get_clients(ServerId),
+ disconnected = my_server:disconnect(ServerId, MyId),
+ {clients,[]} = my_server:get_clients(ServerId),
+ stopped = my_server:stop(ServerId).</pre>
<p>
As a test suite runs, all information (including output to <c>stdout</c>) is
- recorded in several different log files. A minimum of information is displayed
+ recorded in many different log files. A minimum of information is displayed
in the user console (only start and stop information, plus a note
for each failed test case).
</p>
<p>
The result from each test case is recorded in a dedicated HTML log file, created
for the particular test run. An overview page displays each test case represented
- by row in a table showing total execution time, whether the case was successful,
- failed or skipped, plus an optional user comment. (For a failed test case, the
- reason for termination is also printed in the comment field). The overview page
+ by a table row showing total execution time, if the case was successful,
+ failed, or skipped, plus an optional user comment. For a failed test case, the
+ reason for termination is also printed in the comment field. The overview page
has a link to each test case log file, providing simple navigation with any standard
HTML browser.
</p>
</section>
<section>
+<marker id="External_Interfaces"></marker>
<title>External Interfaces</title>
<p>
- The CT test server requires that the test suite defines and exports the
+ The <c>Common Test</c> test server requires that the test suite defines and exports the
following mandatory or optional callback functions:
</p>
<taglist>
- <tag>all()</tag>
- <item>Returns a list of all test cases and groups in the suite. (Mandatory)</item>
- <tag>suite()</tag>
- <item>Info function used to return properties for the suite. (Optional)</item>
- <tag>groups()</tag>
- <item>For declaring test case groups. (Optional)</item>
- <tag>init_per_suite(Config)</tag>
- <item>Suite level configuration function, executed before the first
- test case. (Optional)</item>
- <tag>end_per_suite(Config)</tag>
- <item>Suite level configuration function, executed after the last
- test case. (Optional)</item>
- <tag>group(GroupName)</tag>
- <item>Info function used to return properties for a test case group. (Optional)</item>
- <tag>init_per_group(GroupName, Config)</tag>
- <item>Configuration function for a group, executed before the first
- test case. (Optional)</item>
- <tag>end_per_group(GroupName, Config)</tag>
- <item>Configuration function for a group, executed after the last
- test case. (Optional)</item>
- <tag>init_per_testcase(TestCase, Config)</tag>
- <item>Configuration function for a testcase, executed before each
- test case. (Optional)</item>
- <tag>end_per_testcase(TestCase, Config)</tag>
- <item>Configuration function for a testcase, executed after each
- test case. (Optional)</item>
+ <tag><c>all()</c></tag>
+ <item><p>Returns a list of all test cases and groups in the suite. (Mandatory)</p></item>
+ <tag><c>suite()</c></tag>
+ <item><p>Information function used to return properties for the suite. (Optional)</p></item>
+ <tag><c>groups()</c></tag>
+ <item><p>For declaring test case groups. (Optional)</p></item>
+ <tag><c>init_per_suite(Config)</c></tag>
+ <item><p>Suite level configuration function, executed before the first
+ test case. (Optional)</p></item>
+ <tag><c>end_per_suite(Config)</c></tag>
+ <item><p>Suite level configuration function, executed after the last
+ test case. (Optional)</p></item>
+ <tag><c>group(GroupName)</c></tag>
+ <item><p>Information function used to return properties for a test case group. (Optional)</p></item>
+ <tag><c>init_per_group(GroupName, Config)</c></tag>
+ <item><p>Configuration function for a group, executed before the first
+ test case. (Optional)</p></item>
+ <tag><c>end_per_group(GroupName, Config)</c></tag>
+ <item><p>Configuration function for a group, executed after the last
+ test case. (Optional)</p></item>
+ <tag><c>init_per_testcase(TestCase, Config)</c></tag>
+ <item><p>Configuration function for a testcase, executed before each
+ test case. (Optional)</p></item>
+ <tag><c>end_per_testcase(TestCase, Config)</c></tag>
+ <item><p>Configuration function for a testcase, executed after each
+ test case. (Optional)</p></item>
</taglist>
<p>
- For each test case the CT test server expects these functions:
+ For each test case, the <c>Common Test</c> test server expects the
+ following functions:
</p>
<taglist>
<tag>Testcasename()</tag>
- <item>Info function that returns a list of test case properties. (Optional)</item>
+ <item><p>Information function that returns a list of test case properties. (Optional)</p></item>
<tag>Testcasename(Config)</tag>
- <item>The actual test case function.</item>
+ <item><p>The test case function.</p></item>
</taglist>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/common_test_app.xml b/lib/common_test/doc/src/common_test_app.xml
index cc554eb84e..10c93e2ed1 100644
--- a/lib/common_test/doc/src/common_test_app.xml
+++ b/lib/common_test/doc/src/common_test_app.xml
@@ -33,89 +33,84 @@
<file>common_test_app.sgml</file>
</header>
<module>common_test</module>
- <modulesummary>A framework for automated testing of arbitrary target nodes</modulesummary>
+ <modulesummary>A framework for automated testing of any target nodes.</modulesummary>
<description>
- <p>The <em>Common Test</em> framework is an environment for
+ <p>The <c>Common Test</c> framework is an environment for
implementing and performing automatic and semi-automatic execution of
- test cases.
+ test cases.</p>
- Common Test uses the OTP Test Server as engine for test case
- execution and logging.</p>
-
- <p>In brief, Common Test supports:</p>
+ <p>In brief, <c>Common Test</c> supports:</p>
<list>
- <item>Automated execution of test suites (sets of test cases).</item>
- <item>Logging of the events during execution.</item>
- <item>HTML presentation of test suite results.</item>
- <item>HTML presentation of test suite code.</item>
- <item>Support functions for test suite authors.</item>
- <item>Step by step execution of test cases.</item>
+ <item>Automated execution of test suites (sets of test cases)</item>
+ <item>Logging of events during execution</item>
+ <item>HTML presentation of test suite results</item>
+ <item>HTML presentation of test suite code</item>
+ <item>Support functions for test suite authors</item>
+ <item>Step-by-step execution of test cases</item>
</list>
-
- <p>The following sections describe the mandatory and optional test suite
- functions Common Test will call during test execution. For more details
- see <seealso marker="write_test_chapter">Common Test User's
- Guide.</seealso> </p>
-
+
+ <p>The following section describes the mandatory and optional test suite
+ functions that <c>Common Test</c> calls during test execution.
+ For more details, see section
+ <seealso marker="write_test_chapter">Writing Test Suites</seealso>
+ in the User's Guide.</p>
+
</description>
<section>
- <title>TEST CASE CALLBACK FUNCTIONS</title>
+ <title>Test Case Callback Functions</title>
<p>The following functions define the callback interface
for a test suite.</p>
</section>
-
+
<funcs>
<func>
<name>Module:all() -> Tests | {skip,Reason} </name>
<fsummary>Returns the list of all test case groups and test cases
in the module.</fsummary>
<type>
- <v>Tests = [TestCase | {group,GroupName} |
- {group,GroupName,Properties} |
- {group,GroupName,Properties,SubGroups}]</v>
+ <v>Tests = [TestCase | {group,GroupName} | {group,GroupName,Properties} | {group,GroupName,Properties,SubGroups}]</v>
<v>TestCase = atom()</v>
<v>GroupName = atom()</v>
- <v>Properties = [parallel | sequence | Shuffle | {RepeatType,N}] |
- default</v>
- <v>SubGroups = [{GroupName,Properties} |
- {GroupName,Properties,SubGroups}]</v>
+ <v>Properties = [parallel | sequence | Shuffle | {RepeatType,N}] | default</v>
+ <v>SubGroups = [{GroupName,Properties} | {GroupName,Properties,SubGroups}]</v>
<v>Shuffle = shuffle | {shuffle,Seed}</v>
<v>Seed = {integer(),integer(),integer()}</v>
- <v>RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
- repeat_until_any_ok | repeat_until_any_fail</v>
+ <v>RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail | repeat_until_any_ok | repeat_until_any_fail</v>
<v>N = integer() | forever</v>
<v>Reason = term()</v>
</type>
-
+
<desc>
- <p> MANDATORY </p>
-
- <p>This function must return the list of all test cases and test
- case groups in the test suite module that are to be executed.
- This list also specifies the order the cases and groups will
- be executed by Common Test. A test case is represented by an atom,
+ <p>MANDATORY</p>
+
+ <p>Returns the list of all test cases and test case groups in the
+ test suite module to be executed. This list also specifies the
+ order the cases and groups are executed by <c>Common Test</c>.
+ A test case is represented by an atom,
the name of the test case function. A test case group is
represented by a <c>group</c> tuple, where <c>GroupName</c>,
- an atom, is the name of the group (defined in <seealso marker="#Module:groups-0"><c>groups/0</c></seealso>).
- Execution properties for groups may also be specified, both
- for a top level group and for any of its sub-groups.
- Group execution properties specified here, will override
- properties in the group definition (see <seealso marker="#Module:groups-0"><c>groups/0</c></seealso>).
+ an atom, is the name of the group (defined in
+ <seealso marker="#Module:groups-0"><c>groups/0</c></seealso>).
+ Execution properties for groups can also be specified, both
+ for a top-level group and for any of its subgroups.
+ Group execution properties specified here override
+ properties in the group definition (see
+ <seealso marker="#Module:groups-0"><c>groups/0</c></seealso>).
(With value <c>default</c>, the group definition properties
- will be used).</p>
-
- <p> If <c>{skip,Reason}</c> is returned, all test cases
- in the module will be skipped, and the <c>Reason</c> will
- be printed on the HTML result page.</p>
-
- <p>For details on groups, see
- <seealso marker="write_test_chapter#test_case_groups">Test case
- groups</seealso> in the User's Guide.</p>
-
+ are used).</p>
+
+ <p>If <c>{skip,Reason}</c> is returned, all test cases
+ in the module are skipped and <c>Reason</c>
+ is printed on the HTML result page.</p>
+
+ <p>For details on groups, see section
+ <seealso marker="write_test_chapter#test_case_groups">Test Case
+ Groups</seealso> in the User's Guide.</p>
+
</desc>
</func>
@@ -123,25 +118,24 @@
<name>Module:groups() -> GroupDefs</name>
<fsummary>Returns a list of test case group definitions.</fsummary>
<type>
- <v>GroupDefs = [Group]</v>
- <v>Group = {GroupName,Properties,GroupsAndTestCases}</v>
- <v>GroupName = atom()</v>
- <v>Properties = [parallel | sequence | Shuffle | {RepeatType,N}]</v>
- <v>GroupsAndTestCases = [Group | {group,GroupName} | TestCase]</v>
- <v>TestCase = atom()</v>
- <v>Shuffle = shuffle | {shuffle,Seed}</v>
- <v>Seed = {integer(),integer(),integer()}</v>
- <v>RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
- repeat_until_any_ok | repeat_until_any_fail</v>
- <v>N = integer() | forever</v>
+ <v>GroupDefs = [Group]</v>
+ <v>Group = {GroupName,Properties,GroupsAndTestCases}</v>
+ <v>GroupName = atom()</v>
+ <v>Properties = [parallel | sequence | Shuffle | {RepeatType,N}]</v>
+ <v>GroupsAndTestCases = [Group | {group,GroupName} | TestCase]</v>
+ <v>TestCase = atom()</v>
+ <v>Shuffle = shuffle | {shuffle,Seed}</v>
+ <v>Seed = {integer(),integer(),integer()}</v>
+ <v>RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail | repeat_until_any_ok | repeat_until_any_fail</v>
+ <v>N = integer() | forever</v>
</type>
-
+
<desc>
- <p> OPTIONAL </p>
-
- <p>Function for defining test case groups. Please see
- <seealso marker="write_test_chapter#test_case_groups">Test case
- groups</seealso> in the User's Guide for details.</p>
+ <p>OPTIONAL</p>
+
+ <p>Defines test case groups. For details, see section
+ <seealso marker="write_test_chapter#test_case_groups">Test Case
+ Groups</seealso> in the User's Guide.</p>
</desc>
</func>
@@ -150,75 +144,71 @@
<fsummary>Test suite info function (providing default data
for the suite).</fsummary>
<type>
- <v> Info = {timetrap,Time} | {require,Required} |
- {require,Name,Required} | {userdata,UserData} |
- {silent_connections,Conns} | {stylesheet,CSSFile} |
- {ct_hooks, CTHs}</v>
- <v> Time = TimeVal | TimeFunc</v>
- <v> TimeVal = MilliSec | {seconds,integer()} | {minutes,integer()} |
- {hours,integer()}</v>
- <v> TimeFunc = {Mod,Func,Args} | Fun</v>
- <v> MilliSec = integer()</v>
- <v> Mod = atom()</v>
- <v> Func = atom()</v>
- <v> Args = list()</v>
- <v> Fun = fun()</v>
- <v> Required = Key | {Key,SubKeys} | {Key,SubKey} | {Key,SubKey,SubKeys}</v>
- <v> Key = atom()</v>
- <v> SubKeys = SubKey | [SubKey]</v>
- <v> SubKey = atom()</v>
- <v> Name = atom()</v>
- <v> UserData = term()</v>
- <v> Conns = [atom()]</v>
- <v> CSSFile = string()</v>
- <v> CTHs = [CTHModule |</v>
- <v>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs} |</v>
- <v>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs, CTHPriority}]</v>
- <v> CTHModule = atom()</v>
- <v> CTHInitArgs = term()</v>
+ <v>Info = {timetrap,Time} | {require,Required} | {require,Name,Required} | {userdata,UserData} | {silent_connections,Conns} | {stylesheet,CSSFile} | {ct_hooks, CTHs}</v>
+ <v>Time = TimeVal | TimeFunc</v>
+ <v>TimeVal = MilliSec | {seconds,integer()} | {minutes,integer()} | {hours,integer()}</v>
+ <v>TimeFunc = {Mod,Func,Args} | Fun</v>
+ <v>MilliSec = integer()</v>
+ <v>Mod = atom()</v>
+ <v>Func = atom()</v>
+ <v>Args = list()</v>
+ <v>Fun = fun()</v>
+ <v>Required = Key | {Key,SubKeys} | {Key,SubKey} | {Key,SubKey,SubKeys}</v>
+ <v>Key = atom()</v>
+ <v>SubKeys = SubKey | [SubKey]</v>
+ <v>SubKey = atom()</v>
+ <v>Name = atom()</v>
+ <v>UserData = term()</v>
+ <v>Conns = [atom()]</v>
+ <v>CSSFile = string()</v>
+ <v>CTHs = [CTHModule |</v>
+ <v>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs} |</v>
+ <v>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs, CTHPriority}]</v>
+ <v>CTHModule = atom()</v>
+ <v>CTHInitArgs = term()</v>
</type>
<desc>
-
- <p> OPTIONAL </p>
-
- <p>This is the test suite info function. It is supposed to
- return a list of tagged tuples that specify various properties
- related to the execution of this test suite (common for all
- test cases in the suite).</p>
-
- <p>The <c>timetrap</c> tag sets the maximum time each
- test case is allowed to execute (including <seealso marker="#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
- and <seealso marker="#Module:end_per_testcase-2"><c>end_per_testcase/2</c></seealso>). If the timetrap time is
- exceeded, the test case fails with reason
+
+ <p>OPTIONAL</p>
+
+ <p>The test suite information function. Returns a list of tagged
+ tuples specifying various properties related to the execution of
+ this test suite (common for all test cases in the suite).</p>
+
+ <p>Tag <c>timetrap</c> sets the maximum time that each
+ test case is allowed to execute (including
+ <seealso marker="#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
+ and
+ <seealso marker="#Module:end_per_testcase-2"><c>end_per_testcase/2</c></seealso>).
+ If the timetrap time is exceeded, the test case fails with reason
<c>timetrap_timeout</c>. A <c>TimeFunc</c> function can be used to
- set a new timetrap by returning a <c>TimeVal</c>. It may also be
- used to trigger a timetrap timeout by, at some point, returning a
- value other than a <c>TimeVal</c>. (See the
- <seealso marker="write_test_chapter#timetraps">User's Guide</seealso>
- for details).
- </p>
-
- <p>The <c>require</c> tag specifies configuration variables
- that are required by test cases (and/or configuration functions)
+ set a new timetrap by returning a <c>TimeVal</c>. It can also be
+ used to trigger a timetrap time-out by, at some point, returning a
+ value other than a <c>TimeVal</c>. For details, see section
+ <seealso marker="write_test_chapter#timetraps">Timetrap Time-Outs</seealso>
+ in the User's Guide.</p>
+
+ <p>Tag <c>require</c> specifies configuration variables
+ required by test cases (or configuration functions)
in the suite. If the required configuration variables are not found
- in any of the configuration files, all test cases are skipped. For more
- information about the 'require' functionality, see the
- reference manual for the function
- <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso>.</p>
+ in any of the configuration files, all test cases are skipped.
+ For details about the <c>require</c> functionality, see funtion
+ <seealso marker="ct#require-1"><c>ct:require/1,2</c></seealso>.</p>
- <p>With <c>userdata</c>, it is possible for the user to
- specify arbitrary test suite related information which can be
- read by calling <seealso marker="ct#userdata-2"><c>ct:userdata/2</c></seealso>.</p>
+ <p>With <c>userdata</c>, the user can
+ specify any test suite-related information, which can be
+ read by calling
+ <seealso marker="ct#userdata-2"><c>ct:userdata/2</c></seealso>.</p>
- <p>The <c>ct_hooks</c> tag specifies which
+ <p>Tag <c>ct_hooks</c> specifies the
<seealso marker="ct_hooks_chapter">Common Test Hooks</seealso>
- are to be run together with this suite.</p>
-
- <p>Other tuples than the ones defined will simply be ignored.</p>
+ to be run with this suite.</p>
- <p>For more information about the test suite info function,
- see <seealso marker="write_test_chapter#suite">Test
- suite info function</seealso> in the User's Guide.</p>
+ <p>Other tuples than the ones defined are ignored.</p>
+
+ <p>For details about the test suite information function, see section
+ <seealso marker="write_test_chapter#suite">Test
+ Suite Information Function</seealso> in the User's Guide.</p>
</desc>
</func>
@@ -227,129 +217,133 @@
{skip_and_save,Reason,SaveConfig}</name>
<fsummary>Test suite initializations.</fsummary>
<type>
- <v> Config = NewConfig = SaveConfig = [{Key,Value}]</v>
- <v> Key = atom()</v>
- <v> Value = term()</v>
- <v> Reason = term()</v>
+ <v>Config = NewConfig = SaveConfig = [{Key,Value}]</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
<desc>
-
- <p> OPTIONAL </p>
-
+
+ <p>OPTIONAL</p>
+
<p>This configuration function is called as the first function in the
- suite. It typically contains initializations which are common for
- all test cases in the suite, and which shall only be done
- once. The <c>Config</c> parameter is the configuration data
- which can be modified here. Whatever is returned from this
- function is given as <c>Config</c> to all configuration functions
- and test cases in the suite. If <c>{skip,Reason}</c>
- is returned, all test cases in the suite will be skipped
- and <c>Reason</c> printed in the overview log for the suite.</p>
- <p>For information on <c>save_config</c> and <c>skip_and_save</c>,
- please see
- <seealso marker="dependencies_chapter#save_config">Dependencies
- between Test Cases and Suites</seealso> in the User's Guide.</p>
- </desc>
+ suite. It typically contains initializations that are common for
+ all test cases in the suite, and that must only be done
+ once. Parameter <c>Config</c> is the configuration data
+ that can be modified. Whatever is returned from this
+ function is specified as <c>Config</c> to all configuration functions
+ and test cases in the suite.</p>
+
+ <p>If <c>{skip,Reason}</c>
+ is returned, all test cases in the suite are skipped
+ and <c>Reason</c> is printed in the overview log for the suite.</p>
+
+ <p>For information on <c>save_config</c> and <c>skip_and_save</c>,
+ see section
+ <seealso marker="dependencies_chapter#save_config">Saving
+ Configuration Data</seealso> in the User's Guide.</p>
+ </desc>
</func>
-
+
<func>
<name>Module:end_per_suite(Config) -> term() |
{save_config,SaveConfig}</name>
- <fsummary>Test suite finalization. </fsummary>
+ <fsummary>Test suite finalization.</fsummary>
<type>
- <v> Config = SaveConfig = [{Key,Value}]</v>
- <v> Key = atom()</v>
- <v> Value = term()</v>
+ <v>Config = SaveConfig = [{Key,Value}]</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
</type>
-
+
<desc>
- <p> OPTIONAL </p>
+ <p>OPTIONAL</p>
<p>This function is called as the last test case in the
suite. It is meant to be used for cleaning up after
- <seealso marker="#Module:init_per_suite-1"><c>init_per_suite/1</c></seealso>.
- For information on <c>save_config</c>, please see
- <seealso marker="dependencies_chapter#save_config">Dependencies
- between Test Cases and Suites</seealso> in the User's Guide.</p>
+ <seealso marker="#Module:init_per_suite-1"><c>init_per_suite/1</c></seealso>.</p>
+ <p>For information on <c>save_config</c>, see section
+ <seealso marker="dependencies_chapter#save_config">Saving
+ Configuration Data</seealso> in the User's Guide.</p>
</desc>
</func>
<func>
<name>Module:group(GroupName) -> [Info] </name>
- <fsummary>Test case group info function (providing default data
- for a test case group, i.e. its test cases and sub-groups).</fsummary>
+ <fsummary>Test case group information function (providing default data
+ for a test case group, that is, its test cases and
+ subgroups).</fsummary>
<type>
- <v> Info = {timetrap,Time} | {require,Required} |
- {require,Name,Required} | {userdata,UserData} |
- {silent_connections,Conns} | {stylesheet,CSSFile} |
- {ct_hooks, CTHs}</v>
- <v> Time = TimeVal | TimeFunc</v>
- <v> TimeVal = MilliSec | {seconds,integer()} | {minutes,integer()} |
- {hours,integer()}</v>
- <v> TimeFunc = {Mod,Func,Args} | Fun</v>
- <v> MilliSec = integer()</v>
- <v> Mod = atom()</v>
- <v> Func = atom()</v>
- <v> Args = list()</v>
- <v> Fun = fun()</v>
- <v> Required = Key | {Key,SubKeys} | {Key,Subkey} | {Key,Subkey,SubKeys}</v>
- <v> Key = atom()</v>
- <v> SubKeys = SubKey | [SubKey]</v>
- <v> SubKey = atom()</v>
- <v> Name = atom()</v>
- <v> UserData = term()</v>
- <v> Conns = [atom()]</v>
- <v> CSSFile = string()</v>
- <v> CTHs = [CTHModule |</v>
- <v> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs} |</v>
- <v> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs, CTHPriority}]</v>
- <v> CTHModule = atom()</v>
- <v> CTHInitArgs = term()</v>
- </type>
- <desc>
-
- <p> OPTIONAL </p>
-
- <p>This is the test case group info function. It is supposed to
+ <v>Info = {timetrap,Time} | {require,Required} | {require,Name,Required} | {userdata,UserData} | {silent_connections,Conns} | {stylesheet,CSSFile} | {ct_hooks, CTHs}</v>
+ <v>Time = TimeVal | TimeFunc</v>
+ <v>TimeVal = MilliSec | {seconds,integer()} | {minutes,integer()} | {hours,integer()}</v>
+ <v>TimeFunc = {Mod,Func,Args} | Fun</v>
+ <v>MilliSec = integer()</v>
+ <v>Mod = atom()</v>
+ <v>Func = atom()</v>
+ <v>Args = list()</v>
+ <v>Fun = fun()</v>
+ <v>Required = Key | {Key,SubKeys} | {Key,Subkey} | {Key,Subkey,SubKeys}</v>
+ <v>Key = atom()</v>
+ <v>SubKeys = SubKey | [SubKey]</v>
+ <v>SubKey = atom()</v>
+ <v>Name = atom()</v>
+ <v>UserData = term()</v>
+ <v>Conns = [atom()]</v>
+ <v>CSSFile = string()</v>
+ <v>CTHs = [CTHModule |</v>
+ <v>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs} |</v>
+ <v>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{CTHModule, CTHInitArgs, CTHPriority}]</v>
+ <v>CTHModule = atom()</v>
+ <v>CTHInitArgs = term()</v>
+ </type>
+ <desc>
+
+ <p>OPTIONAL</p>
+
+ <p>The test case group information function. It is supposed to
return a list of tagged tuples that specify various properties
- related to the execution of a test case group (i.e. its test cases
- and sub-groups). Properties set by
+ related to the execution of a test case group (that is, its test
+ cases and subgroups). Properties set by
<seealso marker="#Module:group-1"><c>group/1</c></seealso> override
- properties with the same key that have been previously set by
+ properties with the same key that have been set previously by
<seealso marker="#Module:suite-0"><c>suite/0</c></seealso>.</p>
- <p>The <c>timetrap</c> tag sets the maximum time each
- test case is allowed to execute (including <seealso marker="#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
- and <seealso marker="#Module:end_per_testcase-2"><c>end_per_testcase/2</c></seealso>). If the timetrap time is
+ <p>Tag <c>timetrap</c> sets the maximum time that each
+ test case is allowed to execute (including
+ <seealso marker="#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
+ and
+ <seealso marker="#Module:end_per_testcase-2"><c>end_per_testcase/2</c></seealso>).
+ If the timetrap time is
exceeded, the test case fails with reason
<c>timetrap_timeout</c>. A <c>TimeFunc</c> function can be used to
- set a new timetrap by returning a <c>TimeVal</c>. It may also be
- used to trigger a timetrap timeout by, at some point, returning a
- value other than a <c>TimeVal</c>. (See the
- <seealso marker="write_test_chapter#timetraps">User's Guide</seealso>
- for details).</p>
+ set a new timetrap by returning a <c>TimeVal</c>. It can also be
+ used to trigger a timetrap time-out by, at some point, returning a
+ value other than a <c>TimeVal</c>. For details, see section
+ <seealso marker="write_test_chapter#timetraps">Timetrap
+ Time-Outs</seealso> in the User's Guide.</p>
- <p>The <c>require</c> tag specifies configuration variables
- that are required by test cases (and/or configuration functions)
+ <p>Tag <c>require</c> specifies configuration variables
+ required by test cases (or configuration functions)
in the suite. If the required configuration variables are not found
- in any of the configuration files, all test cases in this group are skipped.
- For more information about the 'require' functionality, see the
- reference manual for the function
- <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso>.</p>
+ in any of the configuration files, all test cases in this group are
+ skipped. For details about the <c>require</c> functionality, see
+ function
+ <seealso marker="ct#require-1"><c>ct:require/1,2</c></seealso>.</p>
- <p>With <c>userdata</c>, it is possible for the user to
- specify arbitrary test case group related information which can be
- read by calling <seealso marker="ct#userdata-2"><c>ct:userdata/2</c></seealso>.</p>
+ <p>With <c>userdata</c>, the user can
+ specify any test case group related information that can be
+ read by calling
+ <seealso marker="ct#userdata-2"><c>ct:userdata/2</c></seealso>.</p>
- <p>The <c>ct_hooks</c> tag specifies which
+ <p>Tag <c>ct_hooks</c> specifies the
<seealso marker="ct_hooks_chapter">Common Test Hooks</seealso>
- are to be run together with this suite.</p>
-
- <p>Other tuples than the ones defined will simply be ignored.</p>
+ to be run with this suite.</p>
- <p>For more information about the test case group info function,
- see <seealso marker="write_test_chapter#suite">Test
- case group info function</seealso> in the User's Guide.</p>
+ <p>Other tuples than the ones defined are ignored.</p>
+
+ <p>For details about the test case group information function,
+ see section <seealso marker="write_test_chapter#group_info">Group
+ Information Function</seealso> in the User's Guide.</p>
</desc>
</func>
@@ -358,59 +352,66 @@
{skip,Reason}</name>
<fsummary>Test case group initializations.</fsummary>
<type>
- <v> GroupName = atom()</v>
- <v> Config = NewConfig = [{Key,Value}]</v>
- <v> Key = atom()</v>
- <v> Value = term()</v>
- <v> Reason = term()</v>
+ <v>GroupName = atom()</v>
+ <v>Config = NewConfig = [{Key,Value}]</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
<desc>
-
- <p> OPTIONAL </p>
-
+
+ <p>OPTIONAL</p>
+
<p>This configuration function is called before execution of a
- test case group. It typically contains initializations which are
- common for all test cases and sub-groups in the group, and which
- shall only be performed once. <c>GroupName</c> is the name of the
- group, as specified in the group definition (see <seealso marker="#Module:groups-0"><c>groups/0</c></seealso>). The
- <c>Config</c> parameter is the configuration data which can be modified
- here. The return value of this function is given as <c>Config</c>
- to all test cases and sub-groups in the group. If <c>{skip,Reason}</c>
- is returned, all test cases in the group will be skipped and
- <c>Reason</c> printed in the overview log for the group.</p>
-
- <p>For information about test case groups, please see
- <seealso marker="write_test_chapter#test_case_groups">Test case
- groups</seealso> chapter in the User's Guide.</p>
+ test case group. It typically contains initializations that are
+ common for all test cases and subgroups in the group, and that
+ must only be performed once. <c>GroupName</c> is the name of the
+ group, as specified in the group definition (see
+ <seealso marker="#Module:groups-0"><c>groups/0</c></seealso>).
+ Parameter <c>Config</c> is the configuration data that can be
+ modified.
+ The return value of this function is given as <c>Config</c>
+ to all test cases and subgroups in the group.</p>
+
+ <p>If <c>{skip,Reason}</c>
+ is returned, all test cases in the group are skipped and
+ <c>Reason</c> is printed in the overview log for the group.</p>
+
+ <p>For information about test case groups, see section
+ <seealso marker="write_test_chapter#test_case_groups">Test Case
+ Groups</seealso> in the User's Guide.</p>
</desc>
</func>
-
+
<func>
<name>Module:end_per_group(GroupName, Config) -> term() |
{return_group_result,Status}</name>
<fsummary>Test case group finalization.</fsummary>
<type>
- <v> GroupName = atom()</v>
- <v> Config = [{Key,Value}]</v>
- <v> Key = atom()</v>
- <v> Value = term()</v>
- <v> Status = ok | skipped | failed</v>
+ <v>GroupName = atom()</v>
+ <v>Config = [{Key,Value}]</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Status = ok | skipped | failed</v>
</type>
-
+
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called after the execution of a test case group is finished.
- It is meant to be used for cleaning up after <seealso marker="#Module:init_per_group-2"><c>init_per_group/2</c></seealso>.
- By means of <c>{return_group_result,Status}</c>, it is possible to return a
- status value for a nested sub-group. The status can be retrieved in
- <seealso marker="#Module:end_per_group-2"><c>end_per_group/2</c></seealso> for the group on the level above. The status will also
- be used by Common Test for deciding if execution of a group should proceed in
- case the property <c>sequence</c> or <c>repeat_until_*</c> is set.</p>
-
- <p>For more information about test case groups, please see
- <seealso marker="write_test_chapter#test_case_groups">Test case
- groups</seealso> chapter in the User's Guide.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called after the execution of a test case group
+ is finished. It is meant to be used for cleaning up after
+ <seealso marker="#Module:init_per_group-2"><c>init_per_group/2</c></seealso>.
+ A status value for a nested subgroup can be returned with
+ <c>{return_group_result,Status}</c>. The status can be retrieved in
+ <seealso marker="#Module:end_per_group-2"><c>end_per_group/2</c></seealso>
+ for the group on the level above. The status is also used by
+ <c>Common Test</c> for deciding if execution of a group is to
+ proceed if property <c>sequence</c> or <c>repeat_until_*</c>
+ is set.</p>
+
+ <p>For details about test case groups, see section
+ <seealso marker="write_test_chapter#test_case_groups">Test Case
+ Groups</seealso> in the User's Guide.</p>
</desc>
</func>
@@ -424,168 +425,173 @@
<v> Value = term()</v>
<v> Reason = term()</v>
</type>
- <desc>
-
+ <desc>
+
<p>OPTIONAL</p>
-
- <p>This function is called before each test case. The
- <c>TestCase</c> argument is the name of the test case, and
+
+ <p>This function is called before each test case. Argument
+ <c>TestCase</c> is the test case name, and
<c>Config</c> (list of key-value tuples) is the configuration
- data that can be modified here. The <c>NewConfig</c> list returned
+ data that can be modified. The <c>NewConfig</c> list returned
from this function is given as <c>Config</c> to the test case.
If <c>{fail,Reason}</c> is returned, the test case is
- marked as failed without being executed. If <c>{skip,Reason}</c> is
- returned, the test case will be skipped and <c>Reason</c> printed
- in the overview log for the suite.</p>
+ marked as failed without being executed.</p>
+
+ <p>If <c>{skip,Reason}</c> is returned, the test case is skipped
+ and <c>Reason</c> is printed in the overview log for the suite.</p>
</desc>
</func>
-
+
<func>
<name>Module:end_per_testcase(TestCase, Config) -> term() | {fail,Reason} | {save_config,SaveConfig}</name>
<fsummary>Test case finalization.</fsummary>
<type>
- <v> TestCase = atom()</v>
- <v> Config = SaveConfig = [{Key,Value}]</v>
- <v> Key = atom()</v>
- <v> Value = term()</v>
- <v> Reason = term()</v>
+ <v>TestCase = atom()</v>
+ <v>Config = SaveConfig = [{Key,Value}]</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
<desc>
-
- <p> OPTIONAL </p>
-
- <p> This function is called after each test case, and can be used
- to clean up after <seealso marker="#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso> and the test case.
- Any return value (besides <c>{fail,Reason}</c> and <c>{save_config,SaveConfig}</c>)
- is ignored. By returning <c>{fail,Reason}</c>, <c>TestCase</c> will be marked as
- failed (even though it was actually successful in the sense that it returned
- a value instead of terminating). For information on <c>save_config</c>, please see
- <seealso marker="dependencies_chapter#save_config">Dependencies between
- Test Cases and Suites</seealso> in the User's Guide</p>
+
+ <p>OPTIONAL</p>
+
+ <p>This function is called after each test case, and can be used
+ to clean up after
+ <seealso marker="#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
+ and the test case. Any return value (besides <c>{fail,Reason}</c>
+ and <c>{save_config,SaveConfig}</c>) is ignored. By returning
+ <c>{fail,Reason}</c>, <c>TestCase</c> is marked as faulty (even
+ though it was successful in the sense that it returned
+ a value instead of terminating).</p>
+
+ <p>For information on <c>save_config</c>, see section
+ <seealso marker="dependencies_chapter#save_config">Saving
+ Configuration Data</seealso> in the User's Guide.</p>
</desc>
</func>
-
+
<func>
<name>Module:Testcase() -> [Info] </name>
- <fsummary>Test case info function. </fsummary>
+ <fsummary>Test case information function.</fsummary>
<type>
- <v> Info = {timetrap,Time} | {require,Required} |
- {require,Name,Required} | {userdata,UserData} |
- {silent_connections,Conns}</v>
- <v> Time = TimeVal | TimeFunc</v>
- <v> TimeVal = MilliSec | {seconds,integer()} | {minutes,integer()} |
- {hours,integer()}</v>
- <v> TimeFunc = {Mod,Func,Args} | Fun</v>
- <v> MilliSec = integer()</v>
- <v> Mod = atom()</v>
- <v> Func = atom()</v>
- <v> Args = list()</v>
- <v> Fun = fun()</v>
- <v> Required = Key | {Key,SubKeys} | {Key,Subkey} | {Key,Subkey,SubKeys}</v>
- <v> Key = atom()</v>
- <v> SubKeys = SubKey | [SubKey]</v>
- <v> SubKey = atom()</v>
- <v> Name = atom()</v>
- <v> UserData = term()</v>
- <v> Conns = [atom()]</v>
+ <v>Info = {timetrap,Time} | {require,Required} | {require,Name,Required} | {userdata,UserData} | {silent_connections,Conns}</v>
+ <v>Time = TimeVal | TimeFunc</v>
+ <v>TimeVal = MilliSec | {seconds,integer()} | {minutes,integer()} | {hours,integer()}</v>
+ <v>TimeFunc = {Mod,Func,Args} | Fun</v>
+ <v>MilliSec = integer()</v>
+ <v>Mod = atom()</v>
+ <v>Func = atom()</v>
+ <v>Args = list()</v>
+ <v>Fun = fun()</v>
+ <v>Required = Key | {Key,SubKeys} | {Key,Subkey} | {Key,Subkey,SubKeys}</v>
+ <v>Key = atom()</v>
+ <v>SubKeys = SubKey | [SubKey]</v>
+ <v>SubKey = atom()</v>
+ <v>Name = atom()</v>
+ <v>UserData = term()</v>
+ <v>Conns = [atom()]</v>
</type>
-
- <desc>
+
+ <desc>
<p>OPTIONAL</p>
-
- <p>This is the test case info function. It is supposed to
+
+ <p>The test case information function. It is supposed to
return a list of tagged tuples that specify various properties
related to the execution of this particular test case.
- Properties set by <seealso marker="#Module:Testcase-0"><c>Testcase/0</c></seealso> override
- properties that have been previously set for the test case
- by <seealso marker="#Module:group-1"><c>group/1</c></seealso> or <seealso marker="#Module:suite-0"><c>suite/0</c></seealso>.</p>
-
- <p>The <c>timetrap</c> tag sets the maximum time the
+ Properties set by
+ <seealso marker="#Module:Testcase-0"><c>Testcase/0</c></seealso>
+ override properties set previously for the test case by
+ <seealso marker="#Module:group-1"><c>group/1</c></seealso> or
+ <seealso marker="#Module:suite-0"><c>suite/0</c></seealso>.</p>
+
+ <p>Tag <c>timetrap</c> sets the maximum time that the
test case is allowed to execute. If the timetrap time is
- exceeded, the test case fails with reason
- <c>timetrap_timeout</c>. <seealso marker="#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
- and <seealso marker="#Module:end_per_testcase-2"><c>end_per_testcase/2</c></seealso> are included in the
- timetrap time. A <c>TimeFunc</c> function can be used to
- set a new timetrap by returning a <c>TimeVal</c>. It may also be
- used to trigger a timetrap timeout by, at some point, returning a
- value other than a <c>TimeVal</c>. (See the
- <seealso marker="write_test_chapter#timetraps">User's Guide</seealso>
- for details).</p>
+ exceeded, the test case fails with reason <c>timetrap_timeout</c>.
+ <seealso marker="#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
+ and
+ <seealso marker="#Module:end_per_testcase-2"><c>end_per_testcase/2</c></seealso>
+ are included in the timetrap time.
+ A <c>TimeFunc</c> function can be used to
+ set a new timetrap by returning a <c>TimeVal</c>. It can also be
+ used to trigger a timetrap time-out by, at some point, returning a
+ value other than a <c>TimeVal</c>. For details, see section
+ <seealso marker="write_test_chapter#timetraps">Timetrap
+ Time-Outs</seealso> in the User's Guide.</p>
- <p>The <c>require</c> tag specifies configuration variables
- that are required by the test case (and/or <c>init/end_per_testcase/2</c>).
+ <p>Tag <c>require</c> specifies configuration variables
+ that are required by the test case (or <c>init_per_testcase/2</c>
+ or <c>end_per_testcase/2</c>).
If the required configuration variables are not found in any of the
- configuration files, the test case is skipped. For more
- information about the 'require' functionality, see the
- reference manual for the function
- <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso>.</p>
-
- <p>If <c>timetrap</c> and/or <c>require</c> is not set, the
- default values specified by <seealso marker="#Module:suite-0"><c>suite/0</c></seealso> (or
- <seealso marker="#Module:group-1"><c>group/1</c></seealso>) will be used.</p>
-
- <p>With <c>userdata</c>, it is possible for the user to
- specify arbitrary test case related information which can be
- read by calling <seealso marker="ct#userdata-3"><c>ct:userdata/3</c></seealso>.</p>
-
- <p>Other tuples than the ones defined will simply be ignored.</p>
+ configuration files, the test case is skipped. For details about
+ the <c>require</c> functionality, see function
+ <seealso marker="ct#require-1"><c>ct:require/1,2</c></seealso>.</p>
+
+ <p>If <c>timetrap</c> or <c>require</c> is not set, the
+ default values specified by
+ <seealso marker="#Module:suite-0"><c>suite/0</c></seealso> (or
+ <seealso marker="#Module:group-1"><c>group/1</c></seealso>) are used.</p>
+
+ <p>With <c>userdata</c>, the user can specify any test case-related
+ information that can be read by calling
+ <seealso marker="ct#userdata-3"><c>ct:userdata/3</c></seealso>.</p>
+
+ <p>Other tuples than the ones defined are ignored.</p>
- <p>For more information about the test case info function,
- see <seealso marker="write_test_chapter#info_function">Test
- case info function</seealso> in the User's Guide.</p>
+ <p>For details about the test case information function, see section
+ <seealso marker="write_test_chapter#info_function">Test
+ Case Information Function</seealso> in the User's Guide.</p>
</desc>
</func>
-
-
+
<func>
<name>Module:Testcase(Config) -> term() | {skip,Reason} | {comment,Comment} | {save_config,SaveConfig} | {skip_and_save,Reason,SaveConfig} | exit() </name>
- <fsummary>A test case</fsummary>
+ <fsummary>A test case.</fsummary>
<type>
- <v> Config = SaveConfig = [{Key,Value}]</v>
- <v> Key = atom()</v>
- <v> Value = term()</v>
- <v> Reason = term()</v>
- <v> Comment = string()</v>
+ <v>Config = SaveConfig = [{Key,Value}]</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
+ <v>Comment = string()</v>
</type>
-
+
<desc>
- <p> MANDATORY </p>
-
- <p>This is the implementation of a test case. Here you must
- call the functions you want to test, and do whatever you
- need to check the result. If something fails, make sure the
- function causes a runtime error, or call <seealso marker="ct#fail-1"><c>ct:fail/1/2</c></seealso>
+ <p>MANDATORY</p>
+
+ <p>The implementation of a test case. Call the functions to test and
+ check the result. If something fails, ensure the
+ function causes a runtime error or call
+ <seealso marker="ct#fail-1"><c>ct:fail/1,2</c></seealso>
(which also causes the test case process to terminate).</p>
-
- <p>Elements from the <c>Config</c> list can e.g. be read
- with <c>proplists:get_value/2</c> (or the macro <c>?config</c>
- defined in <c>ct.hrl</c>).</p>
- <p>You can return <c>{skip,Reason}</c> if you decide not to
- run the test case after all. <c>Reason</c> will then be
- printed in 'Comment' field on the HTML result page.</p>
-
- <p>You can return <c>{comment,Comment}</c> if you wish to
- print some information in the 'Comment' field on the HTML
- result page.</p>
-
- <p>If the function returns anything else, the test case is
- considered successful. (The return value always gets printed
- in the test case log file).</p>
+ <p>Elements from the <c>Config</c> list can, for example, be read
+ with <c>proplists:get_value/2</c> in <c>STDLIB</c>
+ (or the macro <c>?config</c> defined in <c>ct.hrl</c>).</p>
+
+ <p>If you decide not to run the test case after all, return
+ <c>{skip,Reason}</c>. <c>Reason</c> is then
+ printed in field <c>Comment</c> on the HTML result page.</p>
+
+ <p>To print some information in field <c>Comment</c> on the HTML
+ result page, return <c>{comment,Comment}</c>.</p>
- <p>For more information about test case implementation, please
- see <seealso marker="write_test_chapter#test_cases">Test
- cases</seealso> in the User's Guide.</p>
+ <p>If the function returns anything else, the test case is
+ considered successful. The return value always gets printed
+ in the test case log file.</p>
- <p>For information on <c>save_config</c> and <c>skip_and_save</c>, please see
- <seealso marker="dependencies_chapter#save_config">Dependencies between
- Test Cases and Suites</seealso> in the User's Guide.</p>
+ <p>For details about test case implementation, see section
+ <seealso marker="write_test_chapter#test_cases">Test Cases</seealso>
+ in the User's Guide.</p>
+
+ <p>For information on <c>save_config</c> and <c>skip_and_save</c>,
+ see section
+ <seealso marker="dependencies_chapter#save_config">Saving
+ Configuration Data</seealso> in the User's Guide.</p>
</desc>
</func>
-
+
</funcs>
</erlref>
-
diff --git a/lib/common_test/doc/src/config_file_chapter.xml b/lib/common_test/doc/src/config_file_chapter.xml
index c7fd6e0b28..62ebfccb98 100644
--- a/lib/common_test/doc/src/config_file_chapter.xml
+++ b/lib/common_test/doc/src/config_file_chapter.xml
@@ -35,18 +35,18 @@
<section>
<title>General</title>
- <p>To avoid hard coding data values related to the test and/or SUT (System
- Under Test) in the test suites, the data may instead be specified by means
- of configuration files or strings that Common Test reads before
+ <p>To avoid hard-coding data values related to the test and/or System
+ Under Test (SUT) in the test suites, the data can instead be specified through
+ configuration files or strings that <c>Common Test</c> reads before
the start of a test run. External configuration data makes it possible to
- change test properties without having to modify the actual test suites
- using the data. Examples of configuration data:</p>
+ change test properties without modifying the test suites
+ using the data. Examples of configuration data follows:</p>
- <list>
+ <list type="bulleted">
<item>Addresses to the test plant or other instruments</item>
<item>User login information</item>
<item>Names of files needed by the test</item>
- <item>Names of programs that should be executed during the test</item>
+ <item>Names of programs to be executed during the test</item>
<item>Any other variable needed by the test</item>
</list>
@@ -57,154 +57,150 @@
<p>A configuration file can contain any number of elements of the type:</p>
<pre>
- {CfgVarName,Value}.</pre>
+ {CfgVarName,Value}.</pre>
<p>where</p>
<pre>
- CfgVarName = atom()
- Value = term() | [{CfgVarName,Value}]</pre>
+ CfgVarName = atom()
+ Value = term() | [{CfgVarName,Value}]</pre>
</section>
<section>
- <title>Requiring and reading configuration data</title>
+ <title>Requiring and Reading Configuration Data</title>
<marker id="require_config_data"></marker>
<p>In a test suite, one must <em>require</em> that a configuration
- variable (<c>CfgVarName</c> in the definition above) exists before
- attempting to read the associated value in a test case or config function.</p>
-
- <p><c>require</c> is an assert statement that can be part of the <seealso
- marker="write_test_chapter#suite">test suite info function</seealso> or
- <seealso marker="write_test_chapter#info_function">test case info
- function</seealso>. If the required variable is not available, the
- test is skipped (unless a default value has been specified, see the
- <seealso marker="write_test_chapter#info_function">test case info
- function</seealso> chapter for details). There is also a function
- <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso> which can be called from a test case
- in order to check if a specific variable is available. The return
+ variable (<c>CfgVarName</c> in the previous definition) exists before
+ attempting to read the associated value in a test case or configuration function.</p>
+
+ <p><c>require</c> is an assert statement, which can be part of the <seealso
+ marker="write_test_chapter#suite">Test Suite Information Function</seealso> or
+ <seealso marker="write_test_chapter#info_function">Test Case Information
+ Function</seealso>. If the required variable is unavailable, the
+ test is skipped (unless a default value has been specified, see section
+ <seealso marker="write_test_chapter#info_function">Test Case Information
+ Function</seealso> for details). Also, function
+ <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso> can be called
+ from a test case to check if a specific variable is available. The return
value from this function must be checked explicitly and appropriate
- action be taken depending on the result (e.g. to skip the test case
- if the variable in question doesn't exist).</p>
+ action be taken depending on the result (for example, to skip the test case
+ if the variable in question does not exist).</p>
- <p>A <c>require</c> statement in the test suite info- or test case
- info-list should look like this:
+ <p>A <c>require</c> statement in the test suite information case or test case
+ information-list is to look like
<c>{require,CfgVarName}</c> or <c>{require,AliasName,CfgVarName}</c>.
The arguments <c>AliasName</c> and <c>CfgVarName</c> are the same as the
- arguments to <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso> which are described in the
- reference manual for <seealso marker="ct">ct</seealso>.
+ arguments to <seealso marker="ct#require-1"><c>ct:require/1,2</c></seealso>.
<c>AliasName</c> becomes an alias for the configuration variable,
and can be used as reference to the configuration data value.
- The configuration variable may be associated with an
- arbitrary number of alias names, but each name must be unique within
- the same test suite. There are two main uses for alias names:</p>
- <list>
- <item>They may be introduced to identify connections (see below).</item>
- <item>They may used to help adapt configuration data to a test suite
+ The configuration variable can be associated with any
+ number of alias names, but each name must be unique within
+ the same test suite. The two main uses for alias names follows:</p>
+ <list type="bulleted">
+ <item>To identify connections (described later).</item>
+ <item>To help adapt configuration data to a test suite
(or test case) and improve readability.</item>
</list>
- <p>To read the value of a config variable, use the function
- <seealso marker="ct#get_config-1"><c>get_config/1/2/3</c></seealso>
- which is also described in the reference
- manual for <seealso marker="ct">ct</seealso>.</p>
- <p>Example:</p>
+ <p>To read the value of a configuration variable, use function
+ <seealso marker="ct#get_config-1"><c>get_config/1,2,3</c></seealso>.
+ </p>
+ <p><em>Example:</em></p>
<pre>
- suite() ->
- [{require, domain, 'CONN_SPEC_DNS_SUFFIX'}].
+ suite() ->
+ [{require, domain, 'CONN_SPEC_DNS_SUFFIX'}].
- ...
-
- testcase(Config) ->
- Domain = ct:get_config(domain),
- ...</pre>
+ ...
+
+ testcase(Config) ->
+ Domain = ct:get_config(domain),
+ ...</pre>
</section>
<section>
- <title>Using configuration variables defined in multiple files</title>
+ <title>Using Configuration Variables Defined in Multiple Files</title>
<p>If a configuration variable is defined in multiple files and you
- want to access all possible values, you may use the <seealso marker="ct#get_config-3"><c>ct:get_config/3</c></seealso>
- function and specify <c>all</c> in the options list. The values will then
- be returned in a list and the order of the elements corresponds to the order
- that the config files were specified at startup. Please see
- the <seealso marker="ct">ct</seealso> reference manual for details.</p>
+ want to access all possible values, use function
+ <seealso marker="ct#get_config-3"><c>ct:get_config/3</c></seealso>
+ and specify <c>all</c> in the options list. The values are then
+ returned in a list and the order of the elements corresponds to the order
+ that the configuration files were specified at startup.</p>
</section>
<section>
- <title>Encrypted configuration files</title>
+ <title>Encrypted Configuration Files</title>
<marker id="encrypted_config_files"></marker>
- <p>It is possible to encrypt configuration files containing sensitive data
- if these files must be stored in open and shared directories.</p>
- <p>Call <seealso marker="ct#encrypt_config_file-2"><c>ct:encrypt_config_file/2/3</c></seealso> to have Common Test encrypt a
- specified file using the DES3 function in the OTP <c>crypto</c> application.
- The encrypted file can then be used as a regular configuration file,
- in combination with other encrypted files or normal text files. The key
- for decrypting the configuration file must be provided when running the test,
- however. This can be done by means of the <c>decrypt_key</c> or
- <c>decrypt_file</c> flag/option, or a key file in a predefined location.</p>
+ <p>Configuration files containing sensitive data can be encrypted
+ if they must be stored in open and shared directories.</p>
+ <p>To have <c>Common Test</c> encrypt a
+ specified file using function <c>DES3</c> in application <c>Crypto</c>,
+ call <seealso marker="ct#encrypt_config_file-2"><c>ct:encrypt_config_file/2,3</c></seealso>
+ The encrypted file can then be used as a regular configuration file
+ in combination with other encrypted files or normal text files. However, the
+ key for decrypting the configuration file must be provided when running the test.
+ This can be done with flag/option <c>decrypt_key</c> or
+ <c>decrypt_file</c>, or a key file in a predefined location.</p>
- <p>Common Test also provides decryption functions,
- <seealso marker="ct#decrypt_config_file-2"><c>ct:decrypt_config_file/2/3</c></seealso>, for recreating the original text
- files.</p>
-
- <p>Please see the <seealso marker="ct">ct</seealso> reference manual for
- more information.</p>
+ <p><c>Common Test</c> also provides decryption functions,
+ <seealso marker="ct#decrypt_config_file-2"><c>ct:decrypt_config_file/2,3</c></seealso>,
+ for recreating the original text files.</p>
</section>
<section>
- <title>Opening connections by using configuration data</title>
- <p>There are two different methods for opening a connection
- by means of the support functions in e.g. <seealso marker="ct_ssh"><c>ct_ssh</c></seealso>, <seealso marker="ct_ftp"><c>ct_ftp</c></seealso>,
- and <seealso marker="ct_telnet"><c>ct_telnet</c></seealso>:</p>
- <list>
+ <title>Opening Connections Using Configuration Data</title>
+ <p>Two different methods for opening a connection using the support functions
+ in, for example, <seealso marker="ct_ssh"><c>ct_ssh</c></seealso>,
+ <seealso marker="ct_ftp"><c>ct_ftp</c></seealso>, and
+ <seealso marker="ct_telnet"><c>ct_telnet</c></seealso> follows:</p>
+ <list type="bulleted">
<item>Using a configuration target name (an alias) as reference.</item>
<item>Using the configuration variable as reference.</item>
</list>
<p>When a target name is used for referencing the configuration data
- (that specifies the connection to be opened), the same name may be used
+ (that specifies the connection to be opened), the same name can be used
as connection identity in all subsequent calls related to the connection
- (also for closing it). It's only possible to have one open connection
- per target name. If attempting to open a new connection using a name
- already associated with an open connection, Common Test will
- return the already existing handle so that the previously opened connection
- will be used. This is a practical feature since it makes it possible to
+ (also for closing it). Only one open connection per target name
+ is possible. If you attempt to open a new connection using a name
+ already associated with an open connection, <c>Common Test</c>
+ returns the already existing handle so the previously opened connection
+ is used. This feature makes it possible to
call the function for opening a particular connection whenever
- useful. An action like this will not necessarily open any new
- connections unless it's required (which could be the case if e.g. the
- previous connection has been closed unexpectedly by the server).
- Another benefit of using named connections is that it's not
- necessary to pass handle references around in the suite for these
- connections.
+ useful. An action like this does not necessarily open any new
+ connections unless it is required (which could be the case if, for example,
+ the previous connection has been closed unexpectedly by the server).
+ Using named connections also removes the need to pass handle references
+ around in the suite for these connections.
</p>
<p>When a configuration variable name is used as reference to the data
specifying the connection, the handle returned as a result of opening
the connection must be used in all subsequent calls (also for closing
the connection). Repeated calls to the open function with the same
- variable name as reference will result in multiple connections
- being opened. This can be useful e.g. if a test case needs to open
+ variable name as reference results in multiple connections being opened.
+ This can be useful, for example, if a test case needs to open
multiple connections to the same server on the target node (using the
same configuration data for each connection).
</p>
</section>
<section>
- <title>User specific configuration data formats</title>
+ <title>User-Specific Configuration Data Formats</title>
- <p>It is possible for the user to specify configuration data on a
+ <p>The user can specify configuration data on a
different format than key-value tuples in a text file, as described
- so far. The data can e.g. be read from arbitrary files, fetched from
- the web over http, or requested from a user specific process.
- To support this, Common Test provides a callback module plugin
+ so far. The data can, for example, be read from any files, fetched from
+ the web over HTTP, or requested from a user-specific process.
+ To support this, <c>Common Test</c> provides a callback module plugin
mechanism to handle configuration data.</p>
<section>
- <title>Default callback modules for handling configuration data</title>
- <p>The Common Test application includes default callback modules
- for handling configuration data specified in standard config files
- (see above) and in xml files:</p>
- <list>
+ <title>Default Callback Modules for Handling Configuration Data</title>
+ <p><c>Common Test</c> includes default callback modules
+ for handling configuration data specified in standard configuration files
+ (described earlier) and in XML files as follows:</p>
+ <list type="bulleted">
<item>
<c>ct_config_plain</c> - for reading configuration files with
- key-value tuples (standard format). This handler will be used to
+ key-value tuples (standard format). This handler is used to
parse configuration files if no user callback is specified.
</item>
<item>
@@ -215,59 +211,59 @@
</section>
<section>
- <title>Using XML configuration files</title>
- <p>This is an example of an XML configuration file:</p>
- <pre><![CDATA[
-<config>
+ <title>Using XML Configuration Files</title>
+ <p>An example of an XML configuration file follows:</p>
+ <pre>
+ <![CDATA[
+ <config>
<ftp_host>
<ftp>"targethost"</ftp>
<username>"tester"</username>
<password>"letmein"</password>
</ftp_host>
<lm_directory>"/test/loadmodules"</lm_directory>
-</config>]]></pre>
+ </config>]]></pre>
- <p>This configuration file, once read, will produce the same configuration
+ <p>Once read, this file produces the same configuration
variables as the following text file:</p>
<pre>
-{ftp_host, [{ftp,"targethost"},
- {username,"tester"},
- {password,"letmein"}]}.
+ {ftp_host, [{ftp,"targethost"},
+ {username,"tester"},
+ {password,"letmein"}]}.
-{lm_directory, "/test/loadmodules"}.</pre>
+ {lm_directory, "/test/loadmodules"}.</pre>
</section>
<section>
- <title>How to implement a user specific handler</title>
+ <title>Implement a User-Specific Handler</title>
- <p>The user specific handler can be written to handle special
+ <p>The user-specific handler can be written to handle special
configuration file formats. The parameter can be either file
- name(s) or configuration string(s) (the empty list is valid).</p>
+ names or configuration strings (the empty list is valid).</p>
<p>The callback module implementing the handler is responsible for
- checking correctness of configuration strings.</p>
+ checking the correctness of configuration strings.</p>
- <p>To perform validation of the configuration strings, the callback module
- should have the following function exported:</p>
+ <p>To validate the configuration strings, the callback module
+ is to have function <c>Callback:check_parameter/1</c> exported.</p>
- <p><c>Callback:check_parameter/1</c></p>
- <p>The input argument will be passed from Common Test, as defined in the test
- specification or given as an option to <c>ct_run</c> or <c>ct:run_test</c>.</p>
+ <p>The input argument is passed from <c>Common Test</c>, as defined in the test
+ specification, or specified as an option to <c>ct_run</c> or <c>ct:run_test</c>.</p>
- <p>The return value should be any of the following values indicating if given
+ <p>The return value is to be any of the following values, indicating if the specified
configuration parameter is valid:</p>
- <list>
+ <list type="bulleted">
<item>
- <c>{ok, {file, FileName}}</c> - parameter is a file name and
- the file exists,
+ <c>{ok, {file, FileName}}</c> - the parameter is a file name and
+ the file exists.
</item>
<item>
- <c>{ok, {config, ConfigString}}</c> - parameter is a config string
- and it is correct,
+ <c>{ok, {config, ConfigString}}</c> - the parameter is a configuration string
+ and it is correct.
</item>
<item>
- <c>{error, {nofile, FileName}}</c> - there is no file with the given
- name in the current directory,
+ <c>{error, {nofile, FileName}}</c> - there is no file with the specified
+ name in the current directory.
</item>
<item>
<c>{error, {wrong_config, ConfigString}}</c> - the configuration string
@@ -275,196 +271,196 @@
</item>
</list>
- <p>To perform reading of configuration data - initially before the tests
- start, or as a result of data being reloaded during test execution -
- the following function should be exported from the callback module:</p>
-
- <p><c>Callback:read_config/1</c></p>
+ <p>The function <c>Callback:read_config/1</c> is to be exported from the
+ callback module to read configuration data, initially before the tests
+ start, or as a result of data being reloaded during test execution.
+ The input argument is the same as for function <c>check_parameter/1</c>.</p>
- <p>The input argument is the same as for the <c>check_parameter/1</c> function.</p>
- <p>The return value should be either:</p>
+ <p>The return value is to be either of the following:</p>
- <list>
+ <list type="bulleted">
<item>
- <c>{ok, Config}</c> - if the configuration variables are read successfully,
+ <c>{ok, Config}</c> - if the configuration variables are read successfully.
</item>
<item>
<c>{error, {Error, ErrorDetails}}</c> - if the callback module fails to
- proceed with the given configuration parameters.
+ proceed with the specified configuration parameters.
</item>
</list>
<p><c>Config</c> is the proper Erlang key-value list, with possible
- key-value sublists as values, like for the configuration file
- example above:</p>
+ key-value sublists as values, like the earlier configuration file
+ example:</p>
<pre>
- [{ftp_host, [{ftp, "targethost"}, {username, "tester"}, {password, "letmein"}]},
- {lm_directory, "/test/loadmodules"}]</pre>
+ [{ftp_host, [{ftp, "targethost"}, {username, "tester"}, {password, "letmein"}]},
+ {lm_directory, "/test/loadmodules"}]</pre>
</section>
</section>
<section>
- <title>Examples of configuration data handling</title>
+ <title>Examples of Configuration Data Handling</title>
- <p>A config file for using the FTP client to access files on a remote
- host could look like this:</p>
+ <p>A configuration file for using the FTP client to access files on a remote
+ host can look as follows:</p>
<pre>
- {ftp_host, [{ftp,"targethost"},
- {username,"tester"},
- {password,"letmein"}]}.
+ {ftp_host, [{ftp,"targethost"},
+ {username,"tester"},
+ {password,"letmein"}]}.
- {lm_directory, "/test/loadmodules"}.</pre>
+ {lm_directory, "/test/loadmodules"}.</pre>
- <p>The XML version shown in the chapter above can also be used, but it should be
+ <p>The XML version shown earlier can also be used, but it is to be
explicitly specified that the <c>ct_config_xml</c> callback module is to be
- used by Common Test.</p>
+ used by <c>Common Test</c>.</p>
- <p>Example of how to assert that the configuration data is available and
- use it for an FTP session:</p>
+ <p>The following is an example of how to assert that the configuration data is available
+ and can be used for an FTP session:</p>
<pre>
- init_per_testcase(ftptest, Config) ->
- {ok,_} = ct_ftp:open(ftp),
- Config.
-
- end_per_testcase(ftptest, _Config) ->
- ct_ftp:close(ftp).
-
- ftptest() ->
- [{require,ftp,ftp_host},
- {require,lm_directory}].
-
- ftptest(Config) ->
- Remote = filename:join(ct:get_config(lm_directory), "loadmodX"),
- Local = filename:join(?config(priv_dir,Config), "loadmodule"),
- ok = ct_ftp:recv(ftp, Remote, Local),
- ...</pre>
+ init_per_testcase(ftptest, Config) ->
+ {ok,_} = ct_ftp:open(ftp),
+ Config.
+
+ end_per_testcase(ftptest, _Config) ->
+ ct_ftp:close(ftp).
+
+ ftptest() ->
+ [{require,ftp,ftp_host},
+ {require,lm_directory}].
+
+ ftptest(Config) ->
+ Remote = filename:join(ct:get_config(lm_directory), "loadmodX"),
+ Local = filename:join(?config(priv_dir,Config), "loadmodule"),
+ ok = ct_ftp:recv(ftp, Remote, Local),
+ ...</pre>
- <p>An example of how the above functions could be rewritten
- if necessary to open multiple connections to the FTP server:</p>
+ <p>The following is an example of how the functions in the previous example
+ can be rewritten if it is necessary to open multiple connections to the
+ FTP server:</p>
<pre>
- init_per_testcase(ftptest, Config) ->
- {ok,Handle1} = ct_ftp:open(ftp_host),
- {ok,Handle2} = ct_ftp:open(ftp_host),
- [{ftp_handles,[Handle1,Handle2]} | Config].
-
- end_per_testcase(ftptest, Config) ->
- lists:foreach(fun(Handle) -> ct_ftp:close(Handle) end,
- ?config(ftp_handles,Config)).
-
- ftptest() ->
- [{require,ftp_host},
- {require,lm_directory}].
-
- ftptest(Config) ->
- Remote = filename:join(ct:get_config(lm_directory), "loadmodX"),
- Local = filename:join(?config(priv_dir,Config), "loadmodule"),
- [Handle | MoreHandles] = ?config(ftp_handles,Config),
- ok = ct_ftp:recv(Handle, Remote, Local),
- ...</pre>
+ init_per_testcase(ftptest, Config) ->
+ {ok,Handle1} = ct_ftp:open(ftp_host),
+ {ok,Handle2} = ct_ftp:open(ftp_host),
+ [{ftp_handles,[Handle1,Handle2]} | Config].
+
+ end_per_testcase(ftptest, Config) ->
+ lists:foreach(fun(Handle) -> ct_ftp:close(Handle) end,
+ ?config(ftp_handles,Config)).
+
+ ftptest() ->
+ [{require,ftp_host},
+ {require,lm_directory}].
+
+ ftptest(Config) ->
+ Remote = filename:join(ct:get_config(lm_directory), "loadmodX"),
+ Local = filename:join(?config(priv_dir,Config), "loadmodule"),
+ [Handle | MoreHandles] = ?config(ftp_handles,Config),
+ ok = ct_ftp:recv(Handle, Remote, Local),
+ ...</pre>
</section>
<section>
- <title>Example of user specific configuration handler</title>
- <p>A simple configuration handling driver which will ask an external server for
- configuration data can be implemented this way:</p>
+ <title>Example of User-Specific Configuration Handler</title>
+ <p>A simple configuration handling driver, asking an external server for
+ configuration data, can be implemented as follows:</p>
<pre>
--module(config_driver).
--export([read_config/1, check_parameter/1]).
-
-read_config(ServerName)->
- ServerModule = list_to_atom(ServerName),
- ServerModule:start(),
- ServerModule:get_config().
-
-check_parameter(ServerName)->
- ServerModule = list_to_atom(ServerName),
- case code:is_loaded(ServerModule) of
- {file, _}->
- {ok, {config, ServerName}};
- false->
- case code:load_file(ServerModule) of
- {module, ServerModule}->
- {ok, {config, ServerName}};
- {error, nofile}->
- {error, {wrong_config, "File not found: " ++ ServerName ++ ".beam"}}
- end
- end.</pre>
-
- <p>The configuration string for this driver may be "config_server", if the
- config_server.erl module below is compiled and exists in the code path
+ -module(config_driver).
+ -export([read_config/1, check_parameter/1]).
+
+ read_config(ServerName)->
+ ServerModule = list_to_atom(ServerName),
+ ServerModule:start(),
+ ServerModule:get_config().
+
+ check_parameter(ServerName)->
+ ServerModule = list_to_atom(ServerName),
+ case code:is_loaded(ServerModule) of
+ {file, _}->
+ {ok, {config, ServerName}};
+ false->
+ case code:load_file(ServerModule) of
+ {module, ServerModule}->
+ {ok, {config, ServerName}};
+ {error, nofile}->
+ {error, {wrong_config, "File not found: " ++ ServerName ++ ".beam"}}
+ end
+ end.</pre>
+
+ <p>The configuration string for this driver can be <c>config_server</c>, if the
+ <c>config_server.erl</c> module that follows is compiled and exists in the code path
during test execution:</p>
<pre>
--module(config_server).
--export([start/0, stop/0, init/1, get_config/0, loop/0]).
-
--define(REGISTERED_NAME, ct_test_config_server).
-
-start()->
- case whereis(?REGISTERED_NAME) of
- undefined->
- spawn(?MODULE, init, [?REGISTERED_NAME]),
- wait();
- _Pid->
- ok
- end,
- ?REGISTERED_NAME.
-
-init(Name)->
- register(Name, self()),
- loop().
-
-get_config()->
- call(self(), get_config).
-
-stop()->
- call(self(), stop).
-
-call(Client, Request)->
- case whereis(?REGISTERED_NAME) of
- undefined->
- {error, {not_started, Request}};
- Pid->
- Pid ! {Client, Request},
- receive
- Reply->
- {ok, Reply}
- after 4000->
- {error, {timeout, Request}}
- end
- end.
-
-loop()->
- receive
- {Pid, stop}->
- Pid ! ok;
- {Pid, get_config}->
- {D,T} = erlang:localtime(),
- Pid !
- [{localtime, [{date, D}, {time, T}]},
- {node, erlang:node()},
- {now, erlang:now()},
- {config_server_pid, self()},
- {config_server_vsn, ?vsn}],
- ?MODULE:loop()
- end.
-
-wait()->
- case whereis(?REGISTERED_NAME) of
- undefined->
- wait();
- _Pid->
- ok
- end.</pre>
-
- <p>In this example, the handler also provides the ability to dynamically reload
- configuration variables. If <c>ct:reload_config(localtime)</c> is called from
+ -module(config_server).
+ -export([start/0, stop/0, init/1, get_config/0, loop/0]).
+
+ -define(REGISTERED_NAME, ct_test_config_server).
+
+ start()->
+ case whereis(?REGISTERED_NAME) of
+ undefined->
+ spawn(?MODULE, init, [?REGISTERED_NAME]),
+ wait();
+ _Pid->
+ ok
+ end,
+ ?REGISTERED_NAME.
+
+ init(Name)->
+ register(Name, self()),
+ loop().
+
+ get_config()->
+ call(self(), get_config).
+
+ stop()->
+ call(self(), stop).
+
+ call(Client, Request)->
+ case whereis(?REGISTERED_NAME) of
+ undefined->
+ {error, {not_started, Request}};
+ Pid->
+ Pid ! {Client, Request},
+ receive
+ Reply->
+ {ok, Reply}
+ after 4000->
+ {error, {timeout, Request}}
+ end
+ end.
+
+ loop()->
+ receive
+ {Pid, stop}->
+ Pid ! ok;
+ {Pid, get_config}->
+ {D,T} = erlang:localtime(),
+ Pid !
+ [{localtime, [{date, D}, {time, T}]},
+ {node, erlang:node()},
+ {now, erlang:now()},
+ {config_server_pid, self()},
+ {config_server_vsn, ?vsn}],
+ ?MODULE:loop()
+ end.
+
+ wait()->
+ case whereis(?REGISTERED_NAME) of
+ undefined->
+ wait();
+ _Pid->
+ ok
+ end.</pre>
+
+ <p>Here, the handler also provides for dynamically reloading of
+ configuration variables. If
+ <seealso marker="ct#reload_config-1"><c>ct:reload_config(localtime)</c></seealso> is called from
the test case function, all variables loaded with <c>config_driver:read_config/1</c>
- will be updated with their latest values, and the new value for variable
- <c>localtime</c> will be returned.</p>
+ are updated with their latest values, and the new value for variable
+ <c>localtime</c> is returned.</p>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/cover_chapter.xml b/lib/common_test/doc/src/cover_chapter.xml
index f164fff0ad..094aa7d80c 100644
--- a/lib/common_test/doc/src/cover_chapter.xml
+++ b/lib/common_test/doc/src/cover_chapter.xml
@@ -33,256 +33,251 @@
<section>
<marker id="cover"></marker>
<title>General</title>
- <p>Although Common Test was created primarly for the purpose of
- black box testing, nothing prevents it from working perfectly as
- a white box testing tool as well. This is especially true when
+ <p>Although <c>Common Test</c> was created primarily for
+ black-box testing, nothing prevents it from working perfectly as
+ a white-box testing tool as well. This is especially true when
the application to test is written in Erlang. Then the test
- ports are easily realized by means of Erlang function calls.</p>
+ ports are easily realized with Erlang function calls.</p>
- <p>When white box testing an Erlang application, it is useful to
- be able to measure the code coverage of the test. Common Test
+ <p>When white-box testing an Erlang application, it is useful to
+ be able to measure the code coverage of the test. <c>Common Test</c>
provides simple access to the OTP Cover tool for this
- purpose. Common Test handles all necessary communication with
- the Cover tool (starting, compiling, analysing, etc). All the
- Common Test user needs to do is to specify the extent of the
+ purpose. <c>Common Test</c> handles all necessary communication with
+ the Cover tool (starting, compiling, analysing, and so on).
+ The <c>Common Test</c> user only needs to specify the extent of the
code coverage analysis.</p>
</section>
<section>
- <title>Usage</title>
- <p>To specify what modules should be included
- in the code coverage test, you provide a cover specification
- file. Using this file you can point out specific modules or
- specify directories that contain modules which should all be
- included in the analysis. You can also, in the same fashion,
- specify modules that should be excluded from the analysis.</p>
+ <title>Use</title>
+ <p>To specify the modules to be included in the code coverage test,
+ provide a cover specification file. With this file you can point
+ out specific modules or specify directories containing modules to be
+ included in the analysis. You can also specify modules to be excluded
+ from the analysis.</p>
<p>If you are testing a distributed Erlang application, it is
likely that code you want included in the code coverage analysis
- gets executed on an Erlang node other than the one Common Test
- is running on. If this is the case you need to specify these
- other nodes in the cover specification file or add them
- dynamically to the code coverage set of nodes. See the
- <c>ct_cover</c> page in the reference manual for details on the
- latter.</p>
+ gets executed on another Erlang node than the one <c>Common Test</c>
+ is running on. If so, you must specify these other nodes in the
+ cover specification file or add them dynamically to the code coverage
+ set of nodes. For details on the latter, see module
+ <seealso marker="ct_cover"><c>ct_cover</c></seealso>.</p>
<p>In the cover specification file you can also specify your
required level of the code coverage analysis; <c>details</c> or
<c>overview</c>. In detailed mode, you get a coverage overview
- page, showing you per module and total coverage percentages, as
- well as one HTML file printed for each module included in the
- analysis that shows exactly what parts of the code have been
+ page, showing per module and total coverage percentages.
+ You also get an HTML file printed for each module included in the
+ analysis showing exactly what parts of the code have been
executed during the test. In overview mode, only the code
- coverage overview page gets printed.</p>
+ coverage overview page is printed.</p>
<p>You can choose to export and import code coverage data between
tests. If you specify the name of an export file in the cover
- specification file, Common Test will export collected coverage
- data to this file at the end of the test. You may similarly
- specify that previously exported data should be imported and
- included in the analysis for a test (you can specify multiple
- import files). This way it is possible to analyse total code coverage
- without necessarily running all tests at once.</p>
-
- <p>To activate the code coverage support, you simply specify the
- name of the cover specification file as you start Common Test.
- This you do either by using the <c>-cover</c> flag with <c>ct_run</c>.
- Example:</p>
-
- <p><c>$ ct_run -dir $TESTOBJS/db -cover $TESTOBJS/db/config/db.coverspec</c></p>
+ specification file, <c>Common Test</c> exports collected coverage
+ data to this file at the end of the test. You can similarly
+ specify previously exported data to be imported and
+ included in the analysis for a test (multiple import files can be specified).
+ This way, the total code coverage can be analyzed without necessarily
+ running all tests at once.</p>
+
+ <p>To activate the code coverage support, specify the name of the cover
+ specification file as you start <c>Common Test</c>.
+ Do this by using flag <c>-cover</c> with
+ <seealso marker="ct_run"><c>ct_run</c></seealso>,
+ for example:</p>
+ <pre>
+ $ ct_run -dir $TESTOBJS/db -cover $TESTOBJS/db/config/db.coverspec</pre>
- <p>You may also pass the cover specification file name in a
- call to <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>, by adding a <c>{cover,CoverSpec}</c>
- tuple to the <c>Opts</c> argument. Also, you can of course
- enable code coverage in your test specifications (read
- more in the chapter about
- <seealso marker="run_test_chapter#test_specifications">using test
- specifications</seealso>).</p>
+ <p>You can also pass the cover specification file name in a
+ call to <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>,
+ by adding a <c>{cover,CoverSpec}</c> tuple to argument <c>Opts</c>.</p>
+ <p>You can also enable code coverage in your test specifications (see section
+ <seealso marker="run_test_chapter#test_specifications">Test Specifications</seealso>
+ in section Running Tests and Analyzing Results).</p>
</section>
<section>
<marker id="cover_stop"></marker>
- <title>Stopping the cover tool when tests are completed</title>
- <p>By default the Cover tool is automatically stopped when the
- tests are completed. This causes the original (non cover
- compiled) modules to be loaded back in to the test node. If a
- process at this point is still running old code of any of the
+ <title>Stopping the Cover Tool When Tests Are Completed</title>
+ <p>By default, the Cover tool is automatically stopped when the
+ tests are completed. This causes the original (non-cover
+ compiled) modules to be loaded back into the test node. If a
+ process at this point still runs old code of any of the
modules that are cover compiled, meaning that it has not done
any fully qualified function call after the cover compilation,
- the process will now be killed. To avoid this it is possible to
- set the value of the <c>cover_stop</c> option to
- <c>false</c>. This means that the modules will stay cover
- compiled, and it is therefore only recommended if the erlang
- node(s) under test is terminated after the test is completed
- or if cover can be manually stopped.</p>
-
- <p>The option can be set by using the <c>-cover_stop</c> flag with
- <c>ct_run</c>, by adding <c>{cover_stop,true|false}</c> to the
- Opts argument to <seealso
- marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>, or by adding
- a <c>cover_stop</c> term in your test specification (see chapter
- about <seealso
- marker="run_test_chapter#test_specifications">test
- specifications</seealso>).</p>
+ the process is killed. To avoid this, set the value of option
+ <c>cover_stop</c> to <c>false</c>. This means that the
+ modules stay cover compiled. Therefore, this is only recommended
+ if the Erlang nodes under test are terminated after the test is
+ completed, or if cover can be manually stopped.</p>
+
+ <p>The option can be set by using flag <c>-cover_stop</c> with
+ <c>ct_run</c>, by adding <c>{cover_stop,true|false}</c> to argument
+ <c>Opts</c> to
+ <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>,
+ or by adding a <c>cover_stop</c> term in the test specification (see section
+ <seealso marker="run_test_chapter#test_specifications">Test Specifications</seealso>
+ in section Running Tests and Analyzing Results).</p>
</section>
<section>
- <title>The cover specification file</title>
- <p>These are the terms allowed in a cover specification file:</p>
+ <title>The Cover Specification File</title>
+ <p>The following terms are allowed in a cover specification file:</p>
<pre>
- %% List of Nodes on which cover will be active during test.
- %% Nodes = [atom()]
- {nodes, Nodes}.
-
- %% Files with previously exported cover data to include in analysis.
- %% CoverDataFiles = [string()]
- {import, CoverDataFiles}.
-
- %% Cover data file to export from this session.
- %% CoverDataFile = string()
- {export, CoverDataFile}.
-
- %% Cover analysis level.
- %% Level = details | overview
- {level, Level}.
-
- %% Directories to include in cover.
- %% Dirs = [string()]
- {incl_dirs, Dirs}.
-
- %% Directories, including subdirectories, to include.
- {incl_dirs_r, Dirs}.
-
- %% Specific modules to include in cover.
- %% Mods = [atom()]
- {incl_mods, Mods}.
-
- %% Directories to exclude in cover.
- {excl_dirs, Dirs}.
-
- %% Directories, including subdirectories, to exclude.
- {excl_dirs_r, Dirs}.
-
- %% Specific modules to exclude in cover.
- {excl_mods, Mods}.
-
- %% Cross cover compilation
- %% Tag = atom(), an identifier for a test run
- %% Mod = [atom()], modules to compile for accumulated analysis
- {cross,[{Tag,Mods}]}.
- </pre>
-
- <p>The <c>incl_dirs_r</c> and <c>excl_dirs_r</c> terms tell Common
- Test to search the given directories recursively and include
- or exclude any module found during the search. The
- <c>incl_dirs</c> and <c>excl_dirs</c> terms result in a
- non-recursive search for modules (i.e. only modules found in
- the given directories are included or excluded).</p>
- <p><em>Note:</em> Directories containing Erlang modules that are
- to be included in a code coverage test must exist in the code
- server path, or the cover tool will fail to recompile the modules.
- (It is not sufficient to specify these directories in the cover
- specification file for Common Test).</p>
+ %% List of Nodes on which cover will be active during test.
+ %% Nodes = [atom()]
+ {nodes, Nodes}.
+
+ %% Files with previously exported cover data to include in analysis.
+ %% CoverDataFiles = [string()]
+ {import, CoverDataFiles}.
+
+ %% Cover data file to export from this session.
+ %% CoverDataFile = string()
+ {export, CoverDataFile}.
+
+ %% Cover analysis level.
+ %% Level = details | overview
+ {level, Level}.
+
+ %% Directories to include in cover.
+ %% Dirs = [string()]
+ {incl_dirs, Dirs}.
+
+ %% Directories, including subdirectories, to include.
+ {incl_dirs_r, Dirs}.
+
+ %% Specific modules to include in cover.
+ %% Mods = [atom()]
+ {incl_mods, Mods}.
+
+ %% Directories to exclude in cover.
+ {excl_dirs, Dirs}.
+
+ %% Directories, including subdirectories, to exclude.
+ {excl_dirs_r, Dirs}.
+
+ %% Specific modules to exclude in cover.
+ {excl_mods, Mods}.
+
+ %% Cross cover compilation
+ %% Tag = atom(), an identifier for a test run
+ %% Mod = [atom()], modules to compile for accumulated analysis
+ {cross,[{Tag,Mods}]}.</pre>
+
+ <p>The terms <c>incl_dirs_r</c> and <c>excl_dirs_r</c> tell <c>Common
+ Test</c> to search the specified directories recursively and include
+ or exclude any module found during the search. The terms
+ <c>incl_dirs</c> and <c>excl_dirs</c> result in a
+ non-recursive search for modules (that is, only modules found in
+ the specified directories are included or excluded).</p>
+ <note><p>Directories containing Erlang modules to be included in a code
+ coverage test must exist in the code server path. Otherwise,
+ the Cover tool fails to recompile the modules. It is not sufficient to
+ specify these directories in the cover specification file for
+ <c>Common Test</c>.</p></note>
</section>
<section>
<marker id="cross_cover"/>
- <title>Cross cover analysis</title>
+ <title>Cross Cover Analysis</title>
<p>The cross cover mechanism allows cover analysis of modules
- across multiple tests. It is useful if some code, e.g. a library
- module, is used by many different tests and the accumulated cover
- result is desirable.</p>
+ across multiple tests. It is useful if some code, for example, a
+ library module, is used by many different tests and the accumulated
+ cover result is desirable.</p>
- <p>This can of course also be achieved in a more customized way by
- using the <c>export</c> parameter in the cover specification and
- analysing the result off line, but the cross cover mechanism is a
- build in solution which also provides the logging.</p>
+ <p>This can also be achieved in a more customized way by
+ using parameter <c>export</c> in the cover specification and
+ analysing the result off line. However, the cross cover mechanism is a
+ built-in solution that also provides logging.</p>
- <p>The mechanism is easiest explained via an example:</p>
+ <p>The mechanism is easiest explained by an example:</p>
- <p>Let's say that there are two systems, <c>s1</c> and <c>s2</c>,
- which are tested in separate test runs. System <c>s1</c> contains
- a library module <c>m1</c> which is tested by the <c>s1</c> test
- run and is included in <c>s1</c>'s cover specification:</p>
+ <p>Assume that there are two systems, <c>s1</c> and <c>s2</c>,
+ that are tested in separate test runs. System <c>s1</c> contains
+ a library module <c>m1</c> tested by test run <c>s1</c> and
+ is included in the cover specification of <c>s1</c> as follows:</p>
<code type="none">
-s1.cover:
- {incl_mods,[m1]}.</code>
+ s1.cover:
+ {incl_mods,[m1]}.</code>
<p>When analysing code coverage, the result for <c>m1</c> can be
seen in the cover log in the <c>s1</c> test result.</p>
- <p>Now, let's imagine that since <c>m1</c> is a library module, it
- is also used quite a bit by system <c>s2</c>. The <c>s2</c> test
- run does not specifically test <c>m1</c>, but it might still be
- interesting to see which parts of <c>m1</c> is actually covered by
- the <c>s2</c> tests. To do this, <c>m1</c> could be included also
- in <c>s2</c>'s cover specification:</p>
+ <p>Now, imagine that as <c>m1</c> is a library module, it
+ is also often used by system <c>s2</c>. Test run <c>s2</c>
+ does not specifically test <c>m1</c>, but it can still be
+ interesting to see which parts of <c>m1</c> that are covered
+ by the <c>s2</c> tests. To do this, <c>m1</c> can be included also
+ in the cover specification of <c>s2</c> as follows:</p>
<code type="none">
-s2.cover:
- {incl_mods,[m1]}.</code>
+ s2.cover:
+ {incl_mods,[m1]}.</code>
- <p>This would give an entry for <c>m1</c> also in the cover log
- for the <c>s2</c> test run. The problem is that this would only
- reflect the coverage by <c>s2</c> tests, not the accumulated
- result over <c>s1</c> and <c>s2</c>. And this is where the cross
+ <p>This gives an entry for <c>m1</c> also in the cover log
+ for test run <c>s2</c>. The problem is that this only
+ reflects the coverage by <c>s2</c> tests, not the accumulated
+ result over <c>s1</c> and <c>s2</c>. This is where the cross
cover mechanism comes in handy.</p>
- <p>If instead the cover specification for <c>s2</c> was like
- this:</p>
+ <p>If instead the cover specification for <c>s2</c> is like
+ the following:</p>
<code type="none">
-s2.cover:
- {cross,[{s1,[m1]}]}.</code>
+ s2.cover:
+ {cross,[{s1,[m1]}]}.</code>
- <p>then <c>m1</c> would be cover compiled in the <c>s2</c> test
- run, but not shown in the coverage log. Instead, if
- <c>ct_cover:cross_cover_analyse/2</c> is called after both
- <c>s1</c> and <c>s2</c> test runs are completed, the accumulated
- result for <c>m1</c> would be available in the cross cover log for
- the <c>s1</c> test run.</p>
+ <p>Then <c>m1</c> is cover compiled in test run <c>s2</c>,
+ but not shown in the coverage log. Instead, if
+ <seealso marker="ct_cover#cross_cover_analyse-2"><c>ct_cover:cross_cover_analyse/2</c></seealso>
+ is called after both <c>s1</c> and <c>s2</c> test runs are completed,
+ the accumulated result for <c>m1</c> is available in the cross cover
+ log for test run <c>s1</c>.</p>
- <p>The call to the analyse function must be like this:</p>
+ <p>The call to the analyze function must be as follows:</p>
<code type="none">
-ct_cover:cross_cover_analyse(Level, [{s1,S1LogDir},{s2,S2LogDir}]).</code>
+ ct_cover:cross_cover_analyse(Level, [{s1,S1LogDir},{s2,S2LogDir}]).</code>
- <p>where <c>S1LogDir</c> and <c>S2LogDir</c> are the directories
+ <p>Here, <c>S1LogDir</c> and <c>S2LogDir</c> are the directories
named <c>&lt;TestName&gt;.logs</c> for each test respectively.</p>
- <p>Note the tags <c>s1</c> and <c>s2</c> which are used in the
+ <p>Notice the tags <c>s1</c> and <c>s2</c>, which are used in the
cover specification file and in the call to
- <c>ct_cover:cross_cover_analyse/2</c>. The point of these are only
+ <c>ct_cover:cross_cover_analyse/2</c>. The purpose of these is only
to map the modules specified in the cover specification to the log
- directory specified in the call to the analyse function. The name
- of the tag has no meaning beyond this.</p>
+ directory specified in the call to the analyze function. The tag name
+ has no meaning beyond this.</p>
</section>
<section>
<title>Logging</title>
<p>To view the result of a code coverage test, click the button
- labled "COVER LOG" in the top level index page for the test run.</p>
+ labeled "COVER LOG" in the top-level index page for the test run.</p>
- <p>Prior to Erlang/OTP 17.1, if your test run consisted of
+ <p>Before Erlang/OTP 17.1, if your test run consisted of
multiple tests, cover would be started and stopped for each test
- within the test run. Separate logs would be available via the
+ within the test run. Separate logs would be available through the
"Coverage log" link on the test suite result pages. These links
are still available, but now they all point to the same page as
- the button on the top level index page. The log contains the
- accumulated results for the complete test run. See the release
- notes for more information about this change.</p>
+ the button on the top-level index page. The log contains the
+ accumulated results for the complete test run. For details about
+ this change, see the release notes.</p>
- <p>The buttonc takes you to the code coverage overview page. If you
- have successfully performed a detailed coverage analysis, you
- find links to each individual module coverage page here.</p>
+ <p>The button takes you to the code coverage overview page. If you
+ have successfully performed a detailed coverage analysis,
+ links to each individual module coverage page are found here.</p>
- <p>If cross cover analysis has been performed, and there are
- accumulated coverage results for the current test, then the -
- "Coverdata collected over all tests" link will take you to these
+ <p>If cross cover analysis is performed, and there are
+ accumulated coverage results for the current test, the link
+ "Coverdata collected over all tests" takes you to these
results.</p>
</section>
diff --git a/lib/common_test/doc/src/ct.xml b/lib/common_test/doc/src/ct.xml
new file mode 100644
index 0000000000..a87be21d73
--- /dev/null
+++ b/lib/common_test/doc/src/ct.xml
@@ -0,0 +1,1412 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct.xml</file>
+ </header>
+ <module>ct</module>
+ <modulesummary>Main user interface for the Common Test framework.</modulesummary>
+
+ <description>
+
+ <p>Main user interface for the <c>Common Test</c> framework.</p>
+
+ <p>This module implements the command-line interface for running
+ tests and basic functions for <c>Common Test</c> case issues, such as
+ configuration and logging.</p>
+
+ <p><em>Test Suite Support Macros</em></p>
+
+ <p>The <c>config</c> macro is defined in <c>ct.hrl</c>. This macro is
+ to be used to retrieve information from the <c>Config</c> variable sent
+ to all test cases. It is used with two arguments; the first is the name
+ of the configuration variable to retrieve, the second is the
+ <c>Config</c> variable supplied to the test case.</p>
+
+ <p>Possible configuration variables include:</p>
+
+ <list type="bulleted">
+ <item><p><c>data_dir</c> - Data file directory</p></item>
+ <item><p><c>priv_dir</c> - Scratch file directory</p></item>
+ <item><p>Whatever added by
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite/1</c></seealso>
+ or
+ <seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
+ in the test suite.</p></item>
+ </list>
+
+ </description>
+
+ <section>
+ <title>Data Types</title>
+ <marker id="types"/>
+ <taglist>
+
+ <tag><c>handle() = pid()</c></tag>
+ <item><marker id="type-handle"/>
+ <p>The identity (handle) of a connection.</p></item>
+
+ <tag><c>target_name() = atom()</c></tag>
+ <item><marker id="type-target_name"/>
+ <p>A name and association to configuration data introduced
+ through a require statement, or a call to
+ <seealso marker="#require-2"><c>ct:require/2</c></seealso>,
+ for example,
+ <c>ct:require(mynodename,{node,[telnet]})</c>.</p></item>
+
+ </taglist>
+ </section>
+
+ <funcs>
+ <func>
+ <name>abort_current_testcase(Reason) -&gt; ok | {error, ErrorReason}</name>
+ <fsummary>Aborts the currently executing test case.</fsummary>
+ <type>
+ <v>Reason = term()</v>
+ <v>ErrorReason = no_testcase_running | parallel_group</v>
+ </type>
+ <desc><marker id="abort_current_testcase-1"/>
+ <p>Aborts the currently executing test case. The user must know with
+ certainty which test case is currently executing. The function is
+ therefore only safe to call from a function that has been called
+ (or synchronously invoked) by the test case.</p>
+
+ <p><c>Reason</c>, the reason for aborting the test case, is printed
+ in the test case log.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>add_config(Callback, Config) -&gt; ok | {error, Reason}</name>
+ <fsummary>Loads configuration variables using the specified callback
+ module and configuration string.</fsummary>
+ <type>
+ <v>Callback = atom()</v>
+ <v>Config = string()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="add_config-2"/>
+ <p>Loads configuration variables using the specified callback module and
+ configuration string. The callback module is to be either loaded or
+ present in the code part. Loaded configuration variables can later
+ be removed using function
+ <seealso marker="#remove_config-2"><c>ct:remove_config/2</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name>break(Comment) -&gt; ok | {error, Reason}</name>
+ <fsummary>Cancels any active timetrap and pause the execution of the
+ current test case until the user calls function continue/0.</fsummary>
+ <type>
+ <v>Comment = string()</v>
+ <v>Reason = {multiple_cases_running, TestCases} | 'enable break with release_shell option'</v>
+ <v>TestCases = [atom()]</v>
+ </type>
+ <desc><marker id="break-1"/>
+ <p>Cancels any active timetrap and pauses the execution of the
+ current test case until the user calls function <c>continue/0</c>.
+ The user can then interact with the Erlang node running the tests,
+ for example, for debugging purposes or for manually executing a
+ part of the test case. If a parallel group is executing,
+ <seealso marker="#break-2"><c>ct:break/2</c></seealso> is to be
+ called instead.</p>
+ <p>A cancelled timetrap is not automatically reactivated after the
+ break, but must be started exlicitly with
+ <seealso marker="#timetrap-1"><c>ct:timetrap/1</c></seealso>.</p>
+ <p>In order for the break/continue functionality to work, <c>Common
+ Test</c> must release the shell process controlling <c>stdin</c>.
+ This is done by setting start option <c>release_shell</c>
+ to <c>true</c>. For details, see section
+ <seealso marker="run_test_chapter#erlang_shell_or_program">Running
+ Tests from the Erlang Shell or from an Erlang Program</seealso>
+ in the User's Guide.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>break(TestCase, Comment) -&gt; ok | {error, Reason}</name>
+ <fsummary>Works the same way as break/1, only argument TestCase makes it
+ possible to pause a test case executing in a parallel group.</fsummary>
+ <type>
+ <v>TestCase = atom()</v>
+ <v>Comment = string()</v>
+ <v>Reason = 'test case not running' | 'enable break with release_shell option'</v>
+ </type>
+ <desc><marker id="break-2"/>
+ <p>Works the same way as
+ <seealso marker="#break-1"><c>ct:break/1</c></seealso>, only
+ argument <c>TestCase</c> makes it possible to pause a test case
+ executing in a parallel group. Function
+ <seealso marker="#continue-1"><c>ct:continue/1</c></seealso> is to
+ be used to resume execution of <c>TestCase</c>.</p>
+
+ <p>For details, see
+ <seealso marker="#break/1"><c>ct:break/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>capture_get() -&gt; ListOfStrings</name>
+ <fsummary>Equivalent to capture_get([default]).</fsummary>
+ <type>
+ <v>ListOfStrings = [string()]</v>
+ </type>
+ <desc><marker id="capture_get-0"/>
+ <p>Equivalent to
+ <seealso marker="#capture_get-1">ct:capture_get([default])</seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>capture_get(ExclCategories) -&gt; ListOfStrings</name>
+ <fsummary>Returns and purges the list of text strings buffered during
+ the latest session of capturing printouts to stdout.</fsummary>
+ <type>
+ <v>ExclCategories = [atom()]</v>
+ <v>ListOfStrings = [string()]</v>
+ </type>
+ <desc><marker id="capture_get-1"/>
+ <p>Returns and purges the list of text strings buffered during the
+ latest session of capturing printouts to <c>stdout</c>. Log
+ categories that are to be ignored in <c>ListOfStrings</c> can be
+ specified with <c>ExclCategories</c>.
+ If <c>ExclCategories = []</c>, no filtering takes place.</p>
+
+ <p>See also
+ <seealso marker="#capture_start-0"><c>ct:capture_start/0</c></seealso>,
+ <seealso marker="#capture_stop-0"><c>ct:capture_stop/0</c></seealso>,
+ <seealso marker="#log-3"><c>ct:log/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>capture_start() -&gt; ok</name>
+ <fsummary>Starts capturing all text strings printed to stdout
+ during execution of the test case.</fsummary>
+ <desc><marker id="capture_start-0"/>
+ <p>Starts capturing all text strings printed to <c>stdout</c>
+ during execution of the test case.</p>
+
+ <p>See also
+ <seealso marker="#capture_get-1"><c>ct:capture_get/1</c></seealso>,
+ <seealso marker="#capture_stop-0"><c>ct:capture_stop/0</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>capture_stop() -&gt; ok</name>
+ <fsummary>Stops capturing text strings (a session started with
+ capture_start/0).</fsummary>
+ <desc><marker id="capture_stop-0"/>
+ <p>Stops capturing text strings (a session started with
+ <c>capture_start/0</c>).</p>
+
+ <p>See also
+ <seealso marker="#capture_get-1"><c>ct:capture_get/1</c></seealso>,
+ <seealso marker="#capture_start-0"><c>ct:capture_start/0</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>comment(Comment) -&gt; ok</name>
+ <fsummary>Prints the specified Comment in the comment field in the
+ table on the test suite result page.</fsummary>
+ <type>
+ <v>Comment = term()</v>
+ </type>
+ <desc><marker id="comment-1"/>
+ <p>Prints the specified <c>Comment</c> in the comment field in the
+ table on the test suite result page.</p>
+
+ <p>If called several times, only the last comment is printed. The
+ test case return value <c>{comment,Comment}</c> overwrites the
+ string set by this function.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>comment(Format, Args) -&gt; ok</name>
+ <fsummary>Prints the formatted string in the comment field in the
+ table on the test suite result page.</fsummary>
+ <type>
+ <v>Format = string()</v>
+ <v>Args = list()</v>
+ </type>
+ <desc><marker id="comment-2"/>
+ <p>Prints the formatted string in the comment field in the table
+ on the test suite result page.</p>
+
+ <p>Arguments <c>Format</c> and <c>Args</c> are used in a call to
+ <c>io_lib:format/2</c> to create the comment string. The behavior
+ of <c>comment/2</c> is otherwise the same as function
+ <seealso marker="#comment-1"><c>ct:comment/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>continue() -&gt; ok</name>
+ <fsummary>This function must be called to continue after a test
+ case (not executing in a parallel group) has called break/1.</fsummary>
+ <desc><marker id="continue-0"/>
+ <p>This function must be called to continue after a test case
+ (not executing in a parallel group) has called function
+ <seealso marker="#break-1"><c>ct:break/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>continue(TestCase) -&gt; ok</name>
+ <fsummary>This function must be called to continue after a test case
+ has called break/2.</fsummary>
+ <type>
+ <v>TestCase = atom()</v>
+ </type>
+ <desc><marker id="continue-1"/>
+ <p>This function must be called to continue after a test case has
+ called <seealso marker="#break-2"><c>ct:break/2</c></seealso>.
+ If the paused test case, <c>TestCase</c>, executes in a parallel
+ group, this function, rather than <c>continue/0</c>, must be used
+ to let the test case proceed.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>decrypt_config_file(EncryptFileName, TargetFileName) -&gt; ok | {error, Reason}</name>
+ <fsummary>Decrypts EncryptFileName, previously generated with
+ encrypt_config_file/2,3.</fsummary>
+ <type>
+ <v>EncryptFileName = string()</v>
+ <v>TargetFileName = string()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="decrypt_config_file-2"/>
+ <p>Decrypts <c>EncryptFileName</c>, previously generated with
+ <seealso marker="#encrypt_config_file-2"><c>ct:encrypt_config_file/2,3</c></seealso>.
+ The original file contents is saved in the target file. The
+ encryption key, a string, must be available in a text file named
+ <c>.ct_config.crypt</c>, either in the current directory, or the
+ home directory of the user (it is searched for in that order).</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>decrypt_config_file(EncryptFileName, TargetFileName, KeyOrFile) -&gt; ok | {error, Reason}</name>
+ <fsummary>Decrypts EncryptFileName, previously generated with
+ encrypt_config_file/2,3.</fsummary>
+ <type>
+ <v>EncryptFileName = string()</v>
+ <v>TargetFileName = string()</v>
+ <v>KeyOrFile = {key, string()} | {file, string()}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="decrypt_config_file-3"/>
+ <p>Decrypts <c>EncryptFileName</c>, previously generated with
+ <seealso marker="#encrypt_config_file-2"><c>ct:encrypt_config_file/2,3</c></seealso>.
+ The original file contents is saved in the target file. The key
+ must have the same value as that used for encryption.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>encrypt_config_file(SrcFileName, EncryptFileName) -&gt; ok | {error, Reason}</name>
+ <fsummary>Encrypts the source configuration file with DES3 and saves the
+ result in file EncryptFileName.</fsummary>
+ <type>
+ <v>SrcFileName = string()</v>
+ <v>EncryptFileName = string()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="encrypt_config_file-2"/>
+ <p>Encrypts the source configuration file with DES3 and saves the result
+ in file <c>EncryptFileName</c>. The key, a string, must be
+ available in a text file named <c>.ct_config.crypt</c>, either
+ in the current directory, or the home directory of the user (it
+ is searched for in that order).</p>
+
+ <p>For information about using encrypted configuration files when
+ running tests, see section
+ <seealso marker="config_file_chapter#encrypted_config_files">Encrypted
+ Configuration Files</seealso> in the User's Guide.</p>
+
+ <p>For details on DES3 encryption/decryption, see application
+ <seealso marker="crypto:index"><c>Crypto</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>encrypt_config_file(SrcFileName, EncryptFileName, KeyOrFile) -&gt; ok | {error, Reason}</name>
+ <fsummary>Encrypts the source configuration file with DES3 and saves the
+ result in the target file EncryptFileName.</fsummary>
+ <type>
+ <v>SrcFileName = string()</v>
+ <v>EncryptFileName = string()</v>
+ <v>KeyOrFile = {key, string()} | {file, string()}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="encrypt_config_file-3"/>
+ <p>Encrypts the source configuration file with DES3 and saves the result
+ in the target file <c>EncryptFileName</c>. The encryption key
+ to use is either the value in <c>{key,Key}</c> or the value
+ stored in the file specified by <c>{file,File}</c>.</p>
+
+ <p>For information about using encrypted configuration files when
+ running tests, see section
+ <seealso marker="config_file_chapter#encrypted_config_files">Encrypted
+ Configuration Files</seealso> in the User's Guide.</p>
+
+ <p>For details on DES3 encryption/decryption, see application
+ <seealso marker="crypto:index"><c>Crypto</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>fail(Reason) -&gt; ok</name>
+ <fsummary>Terminates a test case with the specified error
+ Reason.</fsummary>
+ <type>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="fail-1"/>
+ <p>Terminates a test case with the specified error <c>Reason</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>fail(Format, Args) -&gt; ok</name>
+ <fsummary>Terminates a test case with an error message specified by
+ a format string and a list of values (used as arguments to
+ io_lib:format/2).</fsummary>
+ <type>
+ <v>Format = string()</v>
+ <v>Args = list()</v>
+ </type>
+ <desc><marker id="fail-2"/>
+ <p>Terminates a test case with an error message specified by a
+ format string and a list of values (used as arguments to
+ <c>io_lib:format/2</c>).</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_config(Required) -&gt; Value</name>
+ <fsummary>Equivalent to get_config(Required, undefined, []).</fsummary>
+ <desc><marker id="get_config-1"/>
+ <p>Equivalent to <seealso marker="#get_config-3"><c>ct:get_config(Required,
+ undefined, [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_config(Required, Default) -&gt; Value</name>
+ <fsummary>Equivalent to get_config(Required, Default, []).</fsummary>
+ <desc><marker id="get_config-2"/>
+ <p>Equivalent to <seealso marker="#get_config-3"><c>ct:get_config(Required,
+ Default, [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_config(Required, Default, Opts) -&gt; ValueOrElement</name>
+ <fsummary>Reads configuration data values.</fsummary>
+ <type>
+ <v>Required = KeyOrName | {KeyOrName, SubKey} | {KeyOrName, SubKey, SubKey}</v>
+ <v>KeyOrName = atom()</v>
+ <v>SubKey = atom()</v>
+ <v>Default = term()</v>
+ <v>Opts = [Opt] | []</v>
+ <v>Opt = element | all</v>
+ <v>ValueOrElement = term() | Default</v>
+ </type>
+ <desc><marker id="get_config-3"/>
+ <p>Reads configuration data values.</p>
+
+ <p>Returns the matching values or configuration elements, given a
+ configuration variable key or its associated name (if one has been
+ specified with
+ <seealso marker="#require-2"><c>ct:require/2</c></seealso>
+ or a <c>require</c> statement).</p>
+
+ <p><em>Example:</em></p>
+
+ <p>Given the following configuration file:</p>
+
+ <pre>
+ {unix,[{telnet,IpAddr},
+ {user,[{username,Username},
+ {password,Password}]}]}.</pre>
+
+ <p>Then:</p>
+
+ <pre>
+ ct:get_config(unix,Default) -&gt; [{telnet,IpAddr},
+ {user, [{username,Username}, {password,Password}]}]
+ ct:get_config({unix,telnet},Default) -&gt; IpAddr
+ ct:get_config({unix,user,username},Default) -&gt; Username
+ ct:get_config({unix,ftp},Default) -&gt; Default
+ ct:get_config(unknownkey,Default) -&gt; Default</pre>
+
+ <p>If a configuration variable key has been associated with a name (by
+ <seealso marker="#require-2"><c>ct:require/2</c></seealso>
+ or a <c>require</c> statement), the name can be used instead
+ of the key to read the value:</p>
+
+ <pre>
+ ct:require(myuser,{unix,user}) -&gt; ok.
+ ct:get_config(myuser,Default) -&gt; [{username,Username}, {password,Password}]</pre>
+
+ <p>If a configuration variable is defined in multiple files, use option
+ <c>all</c> to access all possible values. The values are returned
+ in a list. The order of the elements corresponds to the order
+ that the configuration files were specified at startup.</p>
+
+ <p>If configuration elements (key-value tuples) are to be returned as
+ result instead of values, use option <c>element</c>. The
+ returned elements are then on the form <c>{Required,Value}</c>.</p>
+
+ <p>See also
+ <seealso marker="#get_config-1"><c>ct:get_config/1</c></seealso>,
+ <seealso marker="#get_config-2"><c>ct:get_config/2</c></seealso>,
+ <seealso marker="#require-1"><c>ct:require/1</c></seealso>,
+ <seealso marker="#require-2"><c>ct:require/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_event_mgr_ref() -&gt; EvMgrRef</name>
+ <fsummary>Gets a reference to the <c>Common Test</c> event manager.</fsummary>
+ <type>
+ <v>EvMgrRef = atom()</v>
+ </type>
+ <desc><marker id="get_event_mgr_ref-0"/>
+ <p>Gets a reference to the <c>Common Test</c> event manager.
+ The reference can be used to, for example, add a user-specific
+ event handler while tests are running.</p>
+
+ <p><em>Example:</em></p>
+
+ <pre>
+ gen_event:add_handler(ct:get_event_mgr_ref(), my_ev_h, [])</pre>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_status() -&gt; TestStatus | {error, Reason} | no_tests_running</name>
+ <fsummary>Returns status of ongoing test.</fsummary>
+ <type>
+ <v>TestStatus = [StatusElem]</v>
+ <v>StatusElem = {current, TestCaseInfo} | {successful, Successful} | {failed, Failed} | {skipped, Skipped} | {total, Total}</v>
+ <v>TestCaseInfo = {Suite, TestCase} | [{Suite, TestCase}]</v>
+ <v>Suite = atom()</v>
+ <v>TestCase = atom()</v>
+ <v>Successful = integer()</v>
+ <v>Failed = integer()</v>
+ <v>Skipped = {UserSkipped, AutoSkipped}</v>
+ <v>UserSkipped = integer()</v>
+ <v>AutoSkipped = integer()</v>
+ <v>Total = integer()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="get_status-0"/>
+ <p>Returns status of ongoing test. The returned list contains
+ information about which test case is executing (a list of cases
+ when a parallel test case group is executing), as well as
+ counters for successful, failed, skipped, and total test cases
+ so far.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_target_name(Handle) -&gt; {ok, TargetName} | {error, Reason}</name>
+ <fsummary>Returns the name of the target that the specified connection
+ belongs to.</fsummary>
+ <type>
+ <v>Handle = handle()</v>
+ <v>TargetName = target_name()</v>
+ </type>
+ <desc><marker id="get_target_name-1"/>
+ <p>Returns the name of the target that the specified connection
+ belongs to.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_testspec_terms() -&gt; TestSpecTerms | undefined</name>
+ <fsummary>Gets a list of all test specification terms used to
+ configure and run this test.</fsummary>
+ <type>
+ <v>TestSpecTerms = [{Tag, Value}]</v>
+ <v>Value = [term()]</v>
+ </type>
+ <desc><marker id="get_testspec_terms-0"/>
+ <p>Gets a list of all test specification terms used to configure
+ and run this test.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_testspec_terms(Tags) -&gt; TestSpecTerms | undefined</name>
+ <fsummary>Reads one or more terms from the test specification used to
+ configure and run this test.</fsummary>
+ <type>
+ <v>Tags = [Tag] | Tag</v>
+ <v>Tag = atom()</v>
+ <v>TestSpecTerms = [{Tag, Value}] | {Tag, Value}</v>
+ <v>Value = [{Node, term()}] | [term()]</v>
+ <v>Node = atom()</v>
+ </type>
+ <desc><marker id="get_testspec_terms-1"/>
+ <p>Reads one or more terms from the test specification used to
+ configure and run this test. <c>Tag</c> is any valid test
+ specification tag, for example, <c>label</c>, <c>config</c>, or
+ <c>logdir</c>. User-specific terms are also available to read if
+ option <c>allow_user_terms</c> is set.</p>
+ <p>All value tuples returned, except user terms, have the node
+ name as first element.</p>
+ <p>To read test terms, use <c>Tag = tests</c> (rather than
+ <c>suites</c>, <c>groups</c>, or <c>cases</c>). <c>Value</c> is
+ then the list of <em>all</em> tests on the form
+ <c>[{Node,Dir,[{TestSpec,GroupsAndCases1},...]},...]</c>, where
+ <c>GroupsAndCases = [{Group,[Case]}] | [Case]</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_timetrap_info() -&gt; {Time, Scale}</name>
+ <fsummary>Reads information about the timetrap set for the current
+ test case.</fsummary>
+ <type>
+ <v>Time = integer() | infinity</v>
+ <v>Scale = true | false</v>
+ </type>
+ <desc><marker id="get_timetrap_info-0"/>
+ <p>Reads information about the timetrap set for the current test
+ case. <c>Scale</c> indicates if <c>Common Test</c> will attempt
+ to compensate timetraps automatically for runtime delays
+ introduced by, for example, tools like cover.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>install(Opts) -&gt; ok | {error, Reason}</name>
+ <fsummary>Installs configuration files and event handlers.</fsummary>
+ <type>
+ <v>Opts = [Opt]</v>
+ <v>Opt = {config, ConfigFiles} | {event_handler, Modules} | {decrypt, KeyOrFile}</v>
+ <v>ConfigFiles = [ConfigFile]</v>
+ <v>ConfigFile = string()</v>
+ <v>Modules = [atom()]</v>
+ <v>KeyOrFile = {key, Key} | {file, KeyFile}</v>
+ <v>Key = string()</v>
+ <v>KeyFile = string()</v>
+ </type>
+ <desc><marker id="install-1"/>
+ <p>Installs configuration files and event handlers.</p>
+
+ <p>Run this function once before the first test.</p>
+
+ <p><em>Example:</em></p>
+
+ <pre>
+ install([{config,["config_node.ctc","config_user.ctc"]}])</pre>
+
+ <p>This function is automatically run by program <c>ct_run</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>listenv(Telnet) -&gt; [Env]</name>
+ <fsummary>Performs command listenv on the specified Telnet connection
+ and returns the result as a list of key-value pairs.</fsummary>
+ <type>
+ <v>Telnet = term()</v>
+ <v>Env = {Key, Value}</v>
+ <v>Key = string()</v>
+ <v>Value = string()</v>
+ </type>
+ <desc><marker id="listenv-1"/>
+ <p>Performs command <c>listenv</c> on the specified Telnet connection
+ and returns the result as a list of key-value pairs.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>log(Format) -&gt; ok</name>
+ <fsummary>Equivalent to log(default, 50, Format, [], []).</fsummary>
+ <desc><marker id="log-1"/>
+ <p>Equivalent to
+ <seealso marker="#log-5"><c>ct:log(default, 50, Format, [], [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>log(X1, X2) -&gt; ok</name>
+ <fsummary>Equivalent to log(Category, Importance, Format,
+ FormatArgs, []).</fsummary>
+ <type>
+ <v>X1 = Category | Importance | Format</v>
+ <v>X2 = Format | FormatArgs</v>
+ </type>
+ <desc><marker id="log-2"/>
+ <p>Equivalent to <seealso marker="#log-5"><c>ct:log(Category,
+ Importance, Format, FormatArgs, [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>log(X1, X2, X3) -&gt; ok</name>
+ <fsummary>Equivalent to log(Category, Importance, Format,
+ FormatArgs, Opts).</fsummary>
+ <type>
+ <v>X1 = Category | Importance</v>
+ <v>X2 = Importance | Format</v>
+ <v>X3 = Format | FormatArgs | Opts</v>
+ </type>
+ <desc><marker id="log-3"/>
+ <p>Equivalent to <seealso marker="#log-5"><c>ct:log(Category,
+ Importance, Format, FormatArgs, Opts)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>log(X1, X2, X3, X4) -&gt; ok</name>
+ <fsummary>Equivalent to log(Category, Importance, Format,
+ FormatArgs, Opts).</fsummary>
+ <type>
+ <v>X1 = Category | Importance</v>
+ <v>X2 = Importance | Format</v>
+ <v>X3 = Format | FormatArgs</v>
+ <v>X4 = FormatArgs | Opts</v>
+ </type>
+ <desc><marker id="log-4"/>
+ <p>Equivalent to <seealso marker="#log-5"><c>ct:log(Category,
+ Importance, Format, FormatArgs, Opts)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>log(Category, Importance, Format, FormatArgs, Opts) -&gt; ok</name>
+ <fsummary>Prints from a test case to the log file.</fsummary>
+ <type>
+ <v>Category = atom()</v>
+ <v>Importance = integer()</v>
+ <v>Format = string()</v>
+ <v>FormatArgs = list()</v>
+ <v>Opts = [Opt]</v>
+ <v>Opt = no_css | esc_chars</v>
+ </type>
+ <desc><marker id="log-5"/>
+ <p>Prints from a test case to the log file.</p>
+
+ <p>This function is meant for printing a string directly from a
+ test case to the test case log file.</p>
+
+ <p>Default <c>Category</c> is <c>default</c>,
+ default <c>Importance</c> is <c>?STD_IMPORTANCE</c>,
+ and default value for <c>FormatArgs</c> is <c>[]</c>.</p>
+
+ <p>For details on <c>Category</c>, <c>Importance</c> and the <c>no_css</c>
+ option, see section <seealso marker="write_test_chapter#logging">
+ Logging - Categories and Verbosity Levels</seealso> in the User's Guide.</p>
+
+ <p>Common Test will not escape special HTML characters (&lt;, &gt; and &amp;)
+ in the text printed with this function, unless the <c>esc_chars</c>
+ option is used.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>make_priv_dir() -&gt; ok | {error, Reason}</name>
+ <fsummary>If the test has been started with option create_priv_dir
+ set to manual_per_tc, in order for the test case to use the private
+ directory, it must first create it by calling this function.</fsummary>
+ <type>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="make_priv_dir-0"/>
+ <p>If the test is started with option <c>create_priv_dir</c>
+ set to <c>manual_per_tc</c>, in order for the test case to use
+ the private directory, it must first create it by calling this
+ function.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>notify(Name, Data) -&gt; ok</name>
+ <fsummary>Sends an asynchronous notification of type Name with Data
+ to the <c>Common Test</c> event manager.</fsummary>
+ <type>
+ <v>Name = atom()</v>
+ <v>Data = term()</v>
+ </type>
+ <desc><marker id="notify-2"/>
+ <p>Sends an asynchronous notification of type <c>Name</c> with
+ <c>Data</c>to the Common Test event manager. This can later be
+ caught by any installed event manager.</p>
+
+ <p>See also
+ <seealso marker="stdlib:gen_event"><c>stdlib:gen_event(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>pal(Format) -&gt; ok</name>
+ <fsummary>Equivalent to pal(default, 50, Format, []).</fsummary>
+ <desc><marker id="pal-1"/>
+ <p>Equivalent to
+ <seealso marker="#pal-4"><c>ct:pal(default, 50, Format,
+ [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>pal(X1, X2) -&gt; ok</name>
+ <fsummary>Equivalent to pal(Category, Importance, Format,
+ FormatArgs).</fsummary>
+ <type>
+ <v>X1 = Category | Importance | Format</v>
+ <v>X2 = Format | FormatArgs</v>
+ </type>
+ <desc><marker id="pal-2"/>
+ <p>Equivalent to <seealso marker="#pal-4"><c>ct:pal(Category,
+ Importance, Format, FormatArgs)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>pal(X1, X2, X3) -&gt; ok</name>
+ <fsummary>Equivalent to pal(Category, Importance, Format,
+ FormatArgs).</fsummary>
+ <type>
+ <v>X1 = Category | Importance</v>
+ <v>X2 = Importance | Format</v>
+ <v>X3 = Format | FormatArgs</v>
+ </type>
+ <desc><marker id="pal-3"/>
+ <p>Equivalent to <seealso marker="#pal-4"><c>ct:pal(Category,
+ Importance, Format, FormatArgs)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>pal(Category, Importance, Format, FormatArgs) -&gt; ok</name>
+ <fsummary>Prints and logs from a test case.</fsummary>
+ <type>
+ <v>Category = atom()</v>
+ <v>Importance = integer()</v>
+ <v>Format = string()</v>
+ <v>FormatArgs = list()</v>
+ </type>
+ <desc><marker id="pal-4"/>
+ <p>Prints and logs from a test case.</p>
+
+ <p>This function is meant for printing a string from a test case,
+ both to the test case log file and to the console.</p>
+
+ <p>Default <c>Category</c> is <c>default</c>,
+ default <c>Importance</c> is <c>?STD_IMPORTANCE</c>,
+ and default value for <c>FormatArgs</c> is <c>[]</c>.</p>
+
+ <p>For details on <c>Category</c> and <c>Importance</c>, see section
+ <seealso marker="write_test_chapter#logging">Logging - Categories
+ and Verbosity Levels</seealso> in the User's Guide.</p>
+
+ <p>Note that special characters in the text (&lt;, &gt; and &amp;) will
+ be escaped by Common Test before the text is printed to the log
+ file.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>parse_table(Data) -&gt; {Heading, Table}</name>
+ <fsummary>Parses the printout from an SQL table and returns a list of
+ tuples.</fsummary>
+ <type>
+ <v>Data = [string()]</v>
+ <v>Heading = tuple()</v>
+ <v>Table = [tuple()]</v>
+ </type>
+ <desc><marker id="parse_table-1"/>
+ <p>Parses the printout from an SQL table and returns a list of
+ tuples.</p>
+
+ <p>The printout to parse is typically the result of a <c>select</c>
+ command in SQL. The returned <c>Table</c> is a list of tuples,
+ where each tuple is a row in the table.</p>
+
+ <p><c>Heading</c> is a tuple of strings representing the headings
+ of each column in the table.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>print(Format) -&gt; ok</name>
+ <fsummary>Equivalent to print(default, 50, Format, []).</fsummary>
+ <desc><marker id="print-1"/>
+ <p>Equivalent to <seealso marker="#print-4"><c>ct:print(default,
+ 50, Format, [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>print(X1, X2) -&gt; ok</name>
+ <fsummary>Equivalent to print(Category, Importance, Format,
+ FormatArgs).</fsummary>
+ <type>
+ <v>X1 = Category | Importance | Format</v>
+ <v>X2 = Format | FormatArgs</v>
+ </type>
+ <desc><marker id="print-2"/>
+ <p>Equivalent to <seealso marker="#print-4"><c>ct:print(Category,
+ Importance, Format, FormatArgs)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>print(X1, X2, X3) -&gt; ok</name>
+ <fsummary>Equivalent to print(Category, Importance, Format,
+ FormatArgs).</fsummary>
+ <type>
+ <v>X1 = Category | Importance</v>
+ <v>X2 = Importance | Format</v>
+ <v>X3 = Format | FormatArgs</v>
+ </type>
+ <desc><marker id="print-3"/>
+ <p>Equivalent to <seealso marker="#print-4"><c>ct:print(Category,
+ Importance, Format, FormatArgs)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>print(Category, Importance, Format, FormatArgs) -&gt; ok</name>
+ <fsummary>Prints from a test case to the console.</fsummary>
+ <type>
+ <v>Category = atom()</v>
+ <v>Importance = integer()</v>
+ <v>Format = string()</v>
+ <v>FormatArgs = list()</v>
+ </type>
+ <desc><marker id="print-4"/>
+ <p>Prints from a test case to the console.</p>
+
+ <p>This function is meant for printing a string from a test case to
+ the console.</p>
+
+ <p>Default <c>Category</c> is <c>default</c>,
+ default <c>Importance</c> is <c>?STD_IMPORTANCE</c>,
+ and default value for <c>FormatArgs</c> is <c>[]</c>.</p>
+
+ <p>For details on <c>Category</c> and <c>Importance</c>, see section
+ <seealso marker="write_test_chapter#logging">Logging - Categories
+ and Verbosity Levels</seealso> in the User's Guide.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>reload_config(Required) -&gt; ValueOrElement</name>
+ <fsummary>Reloads configuration file containing specified configuration
+ key.</fsummary>
+ <type>
+ <v>Required = KeyOrName | {KeyOrName, SubKey} | {KeyOrName, SubKey, SubKey}</v>
+ <v>KeyOrName = atom()</v>
+ <v>SubKey = atom()</v>
+ <v>ValueOrElement = term()</v>
+ </type>
+ <desc><marker id="reload_config-1"/>
+ <p>Reloads configuration file containing specified configuration key.</p>
+
+ <p>This function updates the configuration data from which the
+ specified configuration variable was read, and returns the (possibly)
+ new value of this variable.</p>
+
+ <p>If some variables were present in the configuration, but are
+ not loaded using this function, they are removed from the
+ configuration table together with their aliases.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>remove_config(Callback, Config) -&gt; ok</name>
+ <fsummary>Removes configuration variables (together with
+ their aliases) that were loaded with specified callback module and
+ configuration string.</fsummary>
+ <type>
+ <v>Callback = atom()</v>
+ <v>Config = string()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="remove_config-2"/>
+ <p>Removes configuration variables (together wih their aliases)
+ that were loaded with specified callback module and configuration
+ string.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>require(Required) -&gt; ok | {error, Reason}</name>
+ <fsummary>Checks if the required configuration is available.</fsummary>
+ <type>
+ <v>Required = Key | {Key, SubKeys} | {Key, SubKey, SubKeys}</v>
+ <v>Key = atom()</v>
+ <v>SubKeys = SubKey | [SubKey]</v>
+ <v>SubKey = atom()</v>
+ </type>
+ <desc><marker id="require-1"/>
+ <p>Checks if the required configuration is available. Arbitrarily
+ deep tuples can be specified as <c>Required</c>. Only the last
+ element of the tuple can be a list of <c>SubKey</c>s.</p>
+
+ <p><em>Example 1.</em> Require the variable <c>myvar</c>:</p>
+
+ <pre>
+ ok = ct:require(myvar).</pre>
+
+ <p>In this case the configuration file must at least contain:</p>
+
+ <pre>
+ {myvar,Value}.</pre>
+
+ <p><em>Example 2.</em> Require key <c>myvar</c> with subkeys
+ <c>sub1</c> and <c>sub2</c>:</p>
+
+ <pre>
+ ok = ct:require({myvar,[sub1,sub2]}).</pre>
+
+ <p>In this case the configuration file must at least contain:</p>
+
+ <pre>
+ {myvar,[{sub1,Value},{sub2,Value}]}.</pre>
+
+ <p><em>Example 3.</em> Require key <c>myvar</c> with subkey
+ <c>sub1</c> with <c>subsub1</c>:</p>
+
+ <pre>
+ ok = ct:require({myvar,sub1,sub2}).</pre>
+
+ <p>In this case the configuration file must at least contain:</p>
+
+ <pre>
+ {myvar,[{sub1,[{sub2,Value}]}]}.</pre>
+
+ <p>See also
+ <seealso marker="#get_config-1"><c>ct:get_config/1</c></seealso>,
+ <seealso marker="#get_config-2"><c>ct:get_config/2</c></seealso>,
+ <seealso marker="#get_config-3"><c>ct:get_config/3</c></seealso>,
+ <seealso marker="#require-2"><c>ct:require/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>require(Name, Required) -&gt; ok | {error, Reason}</name>
+ <fsummary>Checks if the required configuration is available and gives
+ it a name.</fsummary>
+ <type>
+ <v>Name = atom()</v>
+ <v>Required = Key | {Key, SubKey} | {Key, SubKey, SubKey}</v>
+ <v>SubKey = Key</v>
+ <v>Key = atom()</v>
+ </type>
+ <desc><marker id="require-2"/>
+ <p>Checks if the required configuration is available and gives it a
+ name. The semantics for <c>Required</c> is the same as in
+ <seealso marker="#require-1"><c>ct:require/1</c></seealso> except
+ that a list of <c>SubKey</c>s cannot be specified.</p>
+
+ <p>If the requested data is available, the subentry is associated
+ with <c>Name</c> so that the value of the element can be read with
+ <seealso marker="#get_config-1"><c>ct:get_config/1,2</c></seealso>
+ provided <c>Name</c> is used instead of the whole <c>Required</c>
+ term.</p>
+
+ <p><em>Example:</em></p>
+
+ <p>Require one node with a Telnet connection and an FTP connection.
+ Name the node <c>a</c>:</p>
+
+ <pre>
+ ok = ct:require(a,{machine,node}).</pre>
+
+ <p>All references to this node can then use the node name. For
+ example, a file over FTP is fetched like follows:</p>
+
+ <pre>
+ ok = ct:ftp_get(a,RemoteFile,LocalFile).</pre>
+
+ <p>For this to work, the configuration file must at least contain:</p>
+
+ <pre>
+ {machine,[{node,[{telnet,IpAddr},{ftp,IpAddr}]}]}.</pre>
+
+ <note><p>The behavior of this function changed radically in
+ <c>Common Test</c> 1.6.2. To keep some backwards compatability,
+ it is still possible to do:<br/>
+ <c>ct:require(a,{node,[telnet,ftp]}).</c><br/>
+ This associates the name <c>a</c> with the top-level <c>node</c>
+ entry. For this to work, the configuration file must at least
+ contain:<br/>
+ <c>{node,[{telnet,IpAddr},{ftp,IpAddr}]}.</c></p>
+ </note>
+
+ <p>See also
+ <seealso marker="#get_config-1"><c>ct:get_config/1</c></seealso>,
+ <seealso marker="#get_config-2"><c>ct:get_config/2</c></seealso>,
+ <seealso marker="#get_config-3"><c>ct:get_config/3</c></seealso>,
+ <seealso marker="#require-1"><c>ct:require/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run(TestDirs) -&gt; Result</name>
+ <fsummary>Runs all test cases in all suites in the specified
+ directories.</fsummary>
+ <type>
+ <v>TestDirs = TestDir | [TestDir]</v>
+ </type>
+ <desc><marker id="run-1"/>
+ <p>Runs all test cases in all suites in the specified directories.</p>
+
+ <p>See also <seealso marker="#run-3"><c>ct:run/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run(TestDir, Suite) -&gt; Result</name>
+ <fsummary>Runs all test cases in the specified suite.</fsummary>
+ <desc><marker id="run-2"/>
+ <p>Runs all test cases in the specified suite.</p>
+
+ <p>See also <seealso marker="#run-3"><c>ct:run/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run(TestDir, Suite, Cases) -&gt; Result</name>
+ <fsummary>Runs the specified test cases.</fsummary>
+ <type>
+ <v>TestDir = string()</v>
+ <v>Suite = atom()</v>
+ <v>Cases = atom() | [atom()]</v>
+ <v>Result = [TestResult] | {error, Reason}</v>
+ </type>
+ <desc><marker id="run-3"/>
+ <p>Runs the specified test cases.</p>
+
+ <p>Requires that
+ <seealso marker="#install-1"><c>ct:install/1</c></seealso> has been
+ run first.</p>
+
+ <p>Suites (<c>*_SUITE.erl</c>) files must be stored in <c>TestDir</c>
+ or <c>TestDir/test</c>. All suites are compiled when the test is
+ run.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run_test(Opts) -&gt; Result</name>
+ <fsummary>Runs tests as specified by the combination of options in
+ Opts.</fsummary>
+ <type>
+ <v>Opts = [OptTuples]</v>
+ <v>OptTuples = {dir, TestDirs} | {suite, Suites} | {group, Groups} | {testcase, Cases} | {spec, TestSpecs} | {join_specs, Bool} | {label, Label} | {config, CfgFiles} | {userconfig, UserConfig} | {allow_user_terms, Bool} | {logdir, LogDir} | {silent_connections, Conns} | {stylesheet, CSSFile} | {cover, CoverSpecFile} | {cover_stop, Bool} | {step, StepOpts} | {event_handler, EventHandlers} | {include, InclDirs} | {auto_compile, Bool} | {abort_if_missing_suites, Bool} | {create_priv_dir, CreatePrivDir} | {multiply_timetraps, M} | {scale_timetraps, Bool} | {repeat, N} | {duration, DurTime} | {until, StopTime} | {force_stop, ForceStop} | {decrypt, DecryptKeyOrFile} | {refresh_logs, LogDir} | {logopts, LogOpts} | {verbosity, VLevels} | {basic_html, Bool} | {ct_hooks, CTHs} | {enable_builtin_hooks, Bool} | {release_shell, Bool}</v>
+ <v>TestDirs = [string()] | string()</v>
+ <v>Suites = [string()] | [atom()] | string() | atom()</v>
+ <v>Cases = [atom()] | atom()</v>
+ <v>Groups = GroupNameOrPath | [GroupNameOrPath]</v>
+ <v>GroupNameOrPath = [atom()] | atom() | all</v>
+ <v>TestSpecs = [string()] | string()</v>
+ <v>Label = string() | atom()</v>
+ <v>CfgFiles = [string()] | string()</v>
+ <v>UserConfig = [{CallbackMod, CfgStrings}] | {CallbackMod, CfgStrings}</v>
+ <v>CallbackMod = atom()</v>
+ <v>CfgStrings = [string()] | string()</v>
+ <v>LogDir = string()</v>
+ <v>Conns = all | [atom()]</v>
+ <v>CSSFile = string()</v>
+ <v>CoverSpecFile = string()</v>
+ <v>StepOpts = [StepOpt] | []</v>
+ <v>StepOpt = config | keep_inactive</v>
+ <v>EventHandlers = EH | [EH]</v>
+ <v>EH = atom() | {atom(), InitArgs} | {[atom()], InitArgs}</v>
+ <v>InitArgs = [term()]</v>
+ <v>InclDirs = [string()] | string()</v>
+ <v>CreatePrivDir = auto_per_run | auto_per_tc | manual_per_tc</v>
+ <v>M = integer()</v>
+ <v>N = integer()</v>
+ <v>DurTime = string(HHMMSS)</v>
+ <v>StopTime = string(YYMoMoDDHHMMSS) | string(HHMMSS)</v>
+ <v>ForceStop = skip_rest | Bool</v>
+ <v>DecryptKeyOrFile = {key, DecryptKey} | {file, DecryptFile}</v>
+ <v>DecryptKey = string()</v>
+ <v>DecryptFile = string()</v>
+ <v>LogOpts = [LogOpt]</v>
+ <v>LogOpt = no_nl | no_src</v>
+ <v>VLevels = VLevel | [{Category, VLevel}]</v>
+ <v>VLevel = integer()</v>
+ <v>Category = atom()</v>
+ <v>CTHs = [CTHModule | {CTHModule, CTHInitArgs}]</v>
+ <v>CTHModule = atom()</v>
+ <v>CTHInitArgs = term()</v>
+ <v>Result = {Ok, Failed, {UserSkipped, AutoSkipped}} | TestRunnerPid | {error, Reason}</v>
+ <v>Ok = integer()</v>
+ <v>Failed = integer()</v>
+ <v>UserSkipped = integer()</v>
+ <v>AutoSkipped = integer()</v>
+ <v>TestRunnerPid = pid()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="run_test-1"/>
+ <p>Runs tests as specified by the combination of options in
+ <c>Opts</c>. The options are the same as those used with program
+ <c>ct_run</c>, see <seealso marker="ct_run#ct_run">Run Tests from
+ Command Line</seealso> in the <c>ct_run</c> manual page.</p>
+ <p>Here a <c>TestDir</c> can be used to point out the path to a
+ <c>Suite</c>. Option <c>testcase</c> corresponds to option
+ <c>-case</c> in program <c>ct_run</c>. Configuration files
+ specified in <c>Opts</c> are installed automatically at startup.</p>
+
+ <p><c>TestRunnerPid</c> is returned if <c>release_shell == true</c>.
+ For details, see
+ <seealso marker="#break-1"><c>ct:break/1</c></seealso>.</p>
+
+ <p><c>Reason</c> indicates the type of error encountered.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run_testspec(TestSpec) -&gt; Result</name>
+ <fsummary>Runs a test specified by TestSpec.</fsummary>
+ <type>
+ <v>TestSpec = [term()]</v>
+ <v>Result = {Ok, Failed, {UserSkipped, AutoSkipped}} | {error, Reason}</v>
+ <v>Ok = integer()</v>
+ <v>Failed = integer()</v>
+ <v>UserSkipped = integer()</v>
+ <v>AutoSkipped = integer()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="run_testspec-1"/>
+ <p>Runs a test specified by <c>TestSpec</c>. The same terms are used
+ as in test specification files.</p>
+
+ <p><c>Reason</c> indicates the type of error encountered.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>sleep(Time) -&gt; ok</name>
+ <fsummary>This function, similar to timer:sleep/1, suspends the
+ test case for a specified time.</fsummary>
+ <type>
+ <v>Time = {hours, Hours} | {minutes, Mins} | {seconds, Secs} | Millisecs | infinity</v>
+ <v>Hours = integer()</v>
+ <v>Mins = integer()</v>
+ <v>Secs = integer()</v>
+ <v>Millisecs = integer() | float()</v>
+ </type>
+ <desc><marker id="sleep-1"/>
+ <p>This function, similar to <c>timer:sleep/1</c> in <c>STDLIB</c>,
+ suspends the test case for a specified time.
+ However, this function also multiplies <c>Time</c> with the
+ <c>multiply_timetraps</c> value (if set) and under certain
+ circumstances also scales up the time automatically if
+ <c>scale_timetraps</c> is set to <c>true</c> (default is
+ <c>false</c>).</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>start_interactive() -&gt; ok</name>
+ <fsummary>Starts <c>Common Test</c> in interactive mode.</fsummary>
+ <desc><marker id="start_interactive-0"/>
+ <p>Starts <c>Common Test</c> in interactive mode.</p>
+
+ <p>From this mode, all test case support functions can be executed
+ directly from the Erlang shell. The interactive mode can also be
+ started from the OS command line with <c>ct_run -shell
+ [-config File...]</c>.</p>
+
+ <p>If any functions (for example, Telnet or FTP) using
+ "required configuration data" are to be called from the Erlang shell,
+ configuration data must first be required with
+ <seealso marker="#require-2"><c>ct:require/2</c></seealso>.</p>
+
+ <p><em>Example:</em></p>
+
+ <pre>
+ &gt; ct:require(unix_telnet, unix).
+ ok
+ &gt; ct_telnet:open(unix_telnet).
+ {ok,&lt;0.105.0&gt;}
+ &gt; ct_telnet:cmd(unix_telnet, "ls .").
+ {ok,["ls","file1 ...",...]}</pre>
+ </desc>
+ </func>
+
+ <func>
+ <name>step(TestDir, Suite, Case) -&gt; Result</name>
+ <fsummary>Steps through a test case with the debugger.</fsummary>
+ <type>
+ <v>Case = atom()</v>
+ </type>
+ <desc><marker id="step-3"/>
+ <p>Steps through a test case with the debugger.</p>
+
+ <p>See also <seealso marker="#run-3"><c>ct:run/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>step(TestDir, Suite, Case, Opts) -&gt; Result</name>
+ <fsummary>Steps through a test case with the debugger.</fsummary>
+ <type>
+ <v>Case = atom()</v>
+ <v>Opts = [Opt] | []</v>
+ <v>Opt = config | keep_inactive</v>
+ </type>
+ <desc><marker id="step-4"/>
+ <p>Steps through a test case with the debugger. If option
+ <c>config</c> has been specifed, breakpoints are also set on
+ the configuration functions in <c>Suite</c>.</p>
+
+ <p>See also <seealso marker="#run-3"><c>ct:run/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>stop_interactive() -&gt; ok</name>
+ <fsummary>Exits the interactive mode.</fsummary>
+ <desc><marker id="stop_interactive-0"/>
+ <p>Exits the interactive mode.</p>
+
+ <p>See also
+ <seealso marker="#start_interactive-0"><c>ct:start_interactive/0</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name>sync_notify(Name, Data) -&gt; ok</name>
+ <fsummary>Sends a synchronous notification of type Name with Data to
+ the <c>Common Test</c> event manager.</fsummary>
+ <type>
+ <v>Name = atom()</v>
+ <v>Data = term()</v>
+ </type>
+ <desc><marker id="sync_notify-2"/>
+ <p>Sends a synchronous notification of type <c>Name</c> with
+ <c>Data</c> to the <c>Common Test</c> event manager. This can later be
+ caught by any installed event manager.</p>
+
+ <p>See also
+ <seealso marker="stdlib:gen_event"><c>stdlib:gen_event(3)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name>testcases(TestDir, Suite) -&gt; Testcases | {error, Reason}</name>
+ <fsummary>Returns all test cases in the specified suite.</fsummary>
+ <type>
+ <v>TestDir = string()</v>
+ <v>Suite = atom()</v>
+ <v>Testcases = list()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="testcases-2"/>
+ <p>Returns all test cases in the specified suite.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>timetrap(Time) -&gt; ok</name>
+ <fsummary>Sets a new timetrap for the running test case.</fsummary>
+ <type>
+ <v>Time = {hours, Hours} | {minutes, Mins} | {seconds, Secs} | Millisecs | infinity | Func</v>
+ <v>Hours = integer()</v>
+ <v>Mins = integer()</v>
+ <v>Secs = integer()</v>
+ <v>Millisecs = integer() | float()</v>
+ <v>Func = {M, F, A} | function()</v>
+ <v>M = atom()</v>
+ <v>F = atom()</v>
+ <v>A = list()</v>
+ </type>
+ <desc><marker id="timetrap-1"/>
+ <p>Sets a new timetrap for the running test case.</p>
+
+ <p>If the argument is <c>Func</c>, the timetrap is triggered when
+ this function returns. <c>Func</c> can also return a new
+ <c>Time</c> value, which in that case is the value for the new
+ timetrap.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>userdata(TestDir, Suite) -&gt; SuiteUserData | {error, Reason}</name>
+ <fsummary>Returns any data specified with tag userdata in the list of
+ tuples returned from Suite:suite/0.</fsummary>
+ <type>
+ <v>TestDir = string()</v>
+ <v>Suite = atom()</v>
+ <v>SuiteUserData = [term()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="userdata-2"/>
+ <p>Returns any data specified with tag <c>userdata</c> in the list
+ of tuples returned from
+ <seealso marker="common_test#Module:suite-0"><c>suite/0</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>userdata(TestDir, Suite, Case::GroupOrCase) -&gt; TCUserData | {error, Reason}</name>
+ <fsummary>Returns any data specified with tag userdata in the list of
+ tuples returned from Suite:group(GroupName) or Suite:Case().</fsummary>
+ <type>
+ <v>TestDir = string()</v>
+ <v>Suite = atom()</v>
+ <v>GroupOrCase = {group, GroupName} | atom()</v>
+ <v>GroupName = atom()</v>
+ <v>TCUserData = [term()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="userdata-3"/>
+ <p>Returns any data specified with tag <c>userdata</c> in the list
+ of tuples returned from <c>Suite:group(GroupName)</c> or
+ <c>Suite:Case()</c>.</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_cover.xml b/lib/common_test/doc/src/ct_cover.xml
new file mode 100644
index 0000000000..be09c08a68
--- /dev/null
+++ b/lib/common_test/doc/src/ct_cover.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_cover</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_cover.xml</file>
+ </header>
+ <module>ct_cover</module>
+ <modulesummary>Common Test framework code coverage support module.
+ </modulesummary>
+
+<description>
+
+ <p><c>Common Test</c> framework code coverage support module.</p>
+
+ <p>This module exports help functions for performing code coverage
+ analysis.</p>
+
+</description>
+
+ <funcs>
+ <func>
+ <name>add_nodes(Nodes) -&gt; {ok, StartedNodes} | {error, Reason}</name>
+ <fsummary>Adds nodes to current cover test (only works if cover support
+ is active).</fsummary>
+ <type>
+ <v>Nodes = [atom()]</v>
+ <v>StartedNodes = [atom()]</v>
+ <v>Reason = cover_not_running | not_main_node</v>
+ </type>
+ <desc><marker id="add_nodes-1"/>
+ <p>Adds nodes to current cover test. Notice that this only works if
+ cover support is active.</p>
+
+ <p>To have effect, this function is to be called from
+ <c>init_per_suite/1</c> (see
+ <seealso marker="common_test"><c>common_test</c></seealso>)
+ before any tests are performed.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>cross_cover_analyse(Level, Tests) -&gt; ok</name>
+ <fsummary>Accumulates cover results over multiple tests.</fsummary>
+ <type>
+ <v>Level = overview | details</v>
+ <v>Tests = [{Tag, Dir}]</v>
+ <v>Tag = atom()</v>
+ <v>Dir = string()</v>
+ </type>
+ <desc><marker id="cross_cover_analyse-2"/>
+ <p>Accumulates cover results over multiple tests. See section
+ <seealso marker="cover_chapter#cross_cover">Cross Cover
+ Analysis</seealso> in the Users's Guide.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>remove_nodes(Nodes) -&gt; ok | {error, Reason}</name>
+ <fsummary>Removes nodes from the current cover test.</fsummary>
+ <type>
+ <v>Nodes = [atom()]</v>
+ <v>Reason = cover_not_running | not_main_node</v>
+ </type>
+ <desc><marker id="remove_nodes-1"/>
+ <p>Removes nodes from the current cover test.</p>
+
+ <p>Call this function to stop cover test on nodes previously
+ added with
+ <seealso marker="#add_nodes-1"><c>ct_cover:add_nodes/1</c></seealso>.
+ Results on the remote node are transferred to the <c>Common Test</c>
+ node.</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_ftp.xml b/lib/common_test/doc/src/ct_ftp.xml
new file mode 100644
index 0000000000..0598dcbe3e
--- /dev/null
+++ b/lib/common_test/doc/src/ct_ftp.xml
@@ -0,0 +1,277 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_ftp</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_ftp.xml</file>
+ </header>
+ <module>ct_ftp</module>
+ <modulesummary>FTP client module (based on the FTP support of the Inets
+ application).</modulesummary>
+
+ <description>
+
+ <p>FTP client module (based on the FTP support of the <c>Inets</c>
+ application).</p>
+
+ </description>
+
+ <section>
+ <title>Data Types</title>
+ <marker id="types"/>
+ <taglist>
+ <tag><c>connection() = handle() | target_name()</c></tag>
+ <item><marker id="type-connection"/>
+ <p>For <c>target_name</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p></item>
+
+ <tag><c>handle() = handle()</c></tag>
+ <item><marker id="type-handle"/>
+ <p>Handle for a specific FTP connection, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p></item>
+ </taglist>
+ </section>
+
+ <funcs>
+ <func>
+ <name>cd(Connection, Dir) -&gt; ok | {error, Reason}</name>
+ <fsummary>Changes directory on remote host.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>Dir = string()</v>
+ </type>
+ <desc><marker id="cd-2"/>
+ <p>Changes directory on remote host.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>close(Connection) -&gt; ok | {error, Reason}</name>
+ <fsummary>Closes the FTP connection.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ </type>
+ <desc><marker id="close-1"/>
+ <p>Closes the FTP connection.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>delete(Connection, File) -&gt; ok | {error, Reason}</name>
+ <fsummary>Deletes a file on remote host.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>File = string()</v>
+ </type>
+ <desc><marker id="delete-2"/>
+ <p>Deletes a file on remote host.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get(KeyOrName, RemoteFile, LocalFile) -&gt; ok | {error, Reason}</name>
+ <fsummary>Opens an FTP connection and fetches a file from the remote
+ host.</fsummary>
+ <type>
+ <v>KeyOrName = Key | Name</v>
+ <v>Key = atom()</v>
+ <v>Name = target_name()</v>
+ <v>RemoteFile = string()</v>
+ <v>LocalFile = string()</v>
+ </type>
+ <desc><marker id="get-3"/>
+ <p>Opens an FTP connection and fetches a file from the remote
+ host.</p>
+
+ <p><c>RemoteFile</c> and <c>LocalFile</c> must be absolute paths.</p>
+
+ <p>The configuration file must be as for
+ <seealso marker="#put-3"><c>ct_ftp:put/3</c></seealso>.</p>
+
+ <p>For <c>target_name</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p>
+
+ <p>See also
+ <seealso marker="ct#require-2"><c>ct:require/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>ls(Connection, Dir) -&gt; {ok, Listing} | {error, Reason}</name>
+ <fsummary>Lists directory Dir.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>Dir = string()</v>
+ <v>Listing = string()</v>
+ </type>
+ <desc><marker id="ls-2"/>
+ <p>Lists directory <c>Dir</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(KeyOrName) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Opens an FTP connection to the specified node.</fsummary>
+ <type>
+ <v>KeyOrName = Key | Name</v>
+ <v>Key = atom()</v>
+ <v>Name = target_name()</v>
+ <v>Handle = handle()</v>
+ </type>
+ <desc><marker id="open-1"/>
+ <p>Opens an FTP connection to the specified node.</p>
+
+ <p>You can open a connection for a particular <c>Name</c> and use the
+ same name as reference for all following subsequent operations.
+ If you want
+ the connection to be associated with <c>Handle</c> instead (if you,
+ for example, need to open multiple connections to a host), use
+ <c>Key</c>, the configuration variable name, to specify the target.
+ A connection without an associated target name can only be closed
+ with the handle value.</p>
+
+ <p>For information on how to create a new <c>Name</c>, see
+ <seealso marker="ct#require-2"><c>ct:require/2</c></seealso>.</p>
+
+ <p>For <c>target_name</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>put(KeyOrName, LocalFile, RemoteFile) -&gt; ok | {error, Reason}</name>
+ <fsummary>Opens an FTP connection and sends a file to the remote
+ host.</fsummary>
+ <type>
+ <v>KeyOrName = Key | Name</v>
+ <v>Key = atom()</v>
+ <v>Name = target_name()</v>
+ <v>LocalFile = string()</v>
+ <v>RemoteFile = string()</v>
+ </type>
+ <desc><marker id="put-3"/>
+ <p>Opens an FTP connection and sends a file to the remote host.</p>
+
+ <p><c>LocalFile</c> and <c>RemoteFile</c> must be absolute paths.</p>
+
+ <p>For <c>target_name</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p>
+
+ <p>If the target host is a "special" node, the FTP address must be
+ specified in the configuration file as follows:</p>
+
+ <pre>
+ {node,[{ftp,IpAddr}]}.</pre>
+
+ <p>If the target host is something else, for example, a UNIX host,
+ the configuration file must also include the username and password
+ (both strings):</p>
+
+ <pre>
+ {unix,[{ftp,IpAddr},
+ {username,Username},
+ {password,Password}]}.</pre>
+
+ <p>See also
+ <seealso marker="ct#require-2"><c>ct:require/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>recv(Connection, RemoteFile) -&gt; ok | {error, Reason}</name>
+ <fsummary>Fetches a file over FTP.</fsummary>
+ <desc><marker id="recv-2"/>
+ <p>Fetches a file over FTP.</p>
+
+ <p>The file gets the same name on the local host.</p>
+
+ <p>See also <seealso marker="#recv-3"><c>ct_ftp:recv/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>recv(Connection, RemoteFile, LocalFile) -&gt; ok | {error, Reason}</name>
+ <fsummary>Fetches a file over FTP.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>RemoteFile = string()</v>
+ <v>LocalFile = string()</v>
+ </type>
+ <desc><marker id="recv-3"/>
+ <p>Fetches a file over FTP.</p>
+
+ <p>The file is named <c>LocalFile</c> on the local host.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(Connection, LocalFile) -&gt; ok | {error, Reason}</name>
+ <fsummary>Sends a file over FTP.</fsummary>
+ <desc><marker id="send-2"/>
+ <p>Sends a file over FTP.</p>
+
+ <p>The file gets the same name on the remote host.</p>
+
+ <p>See also
+ <seealso marker="#send-3"><c>ct_ftp:send/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(Connection, LocalFile, RemoteFile) -&gt; ok | {error, Reason}</name>
+ <fsummary>Sends a file over FTP.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>LocalFile = string()</v>
+ <v>RemoteFile = string()</v>
+ </type>
+ <desc><marker id="send-3"/>
+ <p>Sends a file over FTP.</p>
+
+ <p>The file is named <c>RemoteFile</c> on the remote host.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>type(Connection, Type) -&gt; ok | {error, Reason}</name>
+ <fsummary>Changes the file transfer type.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>Type = ascii | binary</v>
+ </type>
+ <desc><marker id="type-2"/>
+ <p>Changes the file transfer type.</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_hooks.xml b/lib/common_test/doc/src/ct_hooks.xml
index a9f9450dd7..12ec3bcec3 100644
--- a/lib/common_test/doc/src/ct_hooks.xml
+++ b/lib/common_test/doc/src/ct_hooks.xml
@@ -1,5 +1,4 @@
<?xml version="1.0" encoding="UTF-8" ?>
-
<!DOCTYPE erlref SYSTEM "erlref.dtd">
<erlref>
@@ -12,7 +11,7 @@
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
+
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
@@ -34,539 +33,572 @@
<file>ct_hooks.sgml</file>
</header>
<module>ct_hooks</module>
- <modulesummary>A callback interface on top of Common Test</modulesummary>
+ <modulesummary>A callback interface on top of Common Test.</modulesummary>
<description>
- <p>The <em>Common Test Hook</em> (henceforth called CTH) framework allows
- extensions of the default behaviour of Common Test by means of callbacks
- before and after all test suite calls. It is meant for advanced users of
- Common Test which want to abstract out behaviour which is common to
- multiple test suites. </p>
+ <p>The <em>Common Test Hook (CTH)</em> framework allows extensions of the
+ default behavior of <c>Common Test</c> by callbacks before and after all
+ test suite calls. It is intended for advanced users of <c>Common Test</c>
+ who want to abstract out behavior that is common to multiple test suites.
+ </p>
- <p>In brief, Common Test Hooks allows you to:</p>
+ <p>In brief, CTH allows you to:</p>
- <list>
- <item>Manipulate the runtime config before each suite
- configuration call</item>
- <item>Manipulate the return of all suite configuration calls and in
- extension the result of the test themselves.</item>
+ <list type="bulleted">
+ <item><p>Manipulate the runtime configuration before each suite
+ configuration call.</p></item>
+ <item><p>Manipulate the return of all suite configuration calls and by
+ extension the result of the test themselves.</p></item>
</list>
<p>The following sections describe the mandatory and optional CTH
- functions Common Test will call during test execution. For more details
- see <seealso marker="ct_hooks_chapter">Common Test Hooks</seealso> in
- the User's Guide.</p>
+ functions that <c>Common Test</c> calls during test execution.
+ For more details, see section
+ <seealso marker="ct_hooks_chapter">Common Test Hooks</seealso> in the
+ User's Guide.</p>
- <p>For information about how to add a CTH to your suite see
- <seealso marker="ct_hooks_chapter#installing">Installing a CTH
- </seealso> in the User's Guide.</p>
+ <p>For information about how to add a CTH to your suite, see section
+ <seealso marker="ct_hooks_chapter#installing">Installing a CTH</seealso>
+ in the User's Guide.</p>
+
+ <note><p>For a minimal example of a CTH, see section
+ <seealso marker="ct_hooks_chapter#example">Example CTH</seealso>
+ in the User's Guide.</p></note>
- <note><p>See the
- <seealso marker="ct_hooks_chapter#example">Example CTH</seealso>
- in the User's Guide for a minimal example of a CTH. </p></note>
-
</description>
<section>
- <title>CALLBACK FUNCTIONS</title>
- <p>The following functions define the callback interface
- for a Common Test Hook.</p>
+ <title>Callback Functions</title>
+ <p>The following functions define the callback interface for a CTH.</p>
</section>
<funcs>
<func>
- <name>Module:init(Id, Opts) -&gt; {ok, State} |
- {ok, State, Priority}</name>
- <fsummary>Initiates the Common Test Hook</fsummary>
+ <name>Module:init(Id, Opts) -&gt; {ok, State} | {ok, State, Priority}</name>
+ <fsummary>Initiates the Common Test Hook.</fsummary>
<type>
- <v>Id = reference() | term()</v>
- <v>Opts = term()</v>
- <v>State = term()</v>
- <v>Priority = integer()</v>
+ <v>Id = reference() | term()</v>
+ <v>Opts = term()</v>
+ <v>State = term()</v>
+ <v>Priority = integer()</v>
</type>
-
<desc>
- <p> MANDATORY </p>
-
- <p>Always called before any other callback function.
- Use this to initiate any common state.
- It should return a state for this CTH.</p>
-
- <p><c>Id</c> is the return value of
- <seealso marker="#Module:id-1">id/1</seealso>, or a <c>reference</c>
- (created using
- <seealso marker="erts:erlang#make_ref-0">make_ref/0</seealso>)
- if <seealso marker="#Module:id-1">id/1</seealso> is not implemented.
- </p>
-
- <p><c>Priority</c> is the relative priority of this hook. Hooks with a
- lower priority will be executed first. If no priority is given,
- it will be set to 0. </p>
-
- <p>For details about when init is called see
- <seealso marker="ct_hooks_chapter#scope">scope</seealso>
- in the User's Guide.</p>
-
+ <p>MANDATORY</p>
+
+ <p>This function is always called before any other callback function.
+ Use it to initiate any common state. It is to return a state for
+ this CTH.</p>
+
+ <p><c>Id</c> is either the return value of
+ <seealso marker="#Module:id-1"><c>ct_hooks:id/1</c></seealso>,
+ or a <c>reference</c> (created using
+ <seealso marker="erts:erlang#make_ref-0">erlang:make_ref/0</seealso>
+ in <c>ERTS</c>) if
+ <seealso marker="#Module:id-1"><c>ct_hooks:id/1</c></seealso>
+ is not implemented.</p>
+
+ <p><c>Priority</c> is the relative priority of this hook. Hooks with a
+ lower priority are executed first. If no priority is specified, it
+ is set to <c>0</c>.</p>
+
+ <p>For details about when <c>init</c> is called, see section
+ <seealso marker="ct_hooks_chapter#scope">CTH Scope</seealso>
+ in the User's Guide.</p>
</desc>
</func>
<func>
- <name>Module:pre_init_per_suite(SuiteName, InitData, CTHState) -&gt;
- Result</name>
- <fsummary>Called before init_per_suite</fsummary>
+ <name>Module:pre_init_per_suite(SuiteName, InitData, CTHState) -&gt; Result</name>
+ <fsummary>Called before init_per_suite.</fsummary>
<type>
- <v>SuiteName = atom()</v>
- <v>InitData = Config | SkipOrFail</v>
- <v>Config = NewConfig = [{Key,Value}]</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {Return, NewCTHState}</v>
- <v>Return = NewConfig | SkipOrFail</v>
- <v>SkipOrFail = {fail, Reason} | {skip, Reason}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>SuiteName = atom()</v>
+ <v>InitData = Config | SkipOrFail</v>
+ <v>Config = NewConfig = [{Key,Value}]</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {Return, NewCTHState}</v>
+ <v>Return = NewConfig | SkipOrFail</v>
+ <v>SkipOrFail = {fail, Reason} | {skip, Reason}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called before
- <seealso marker="common_test#Module:init_per_suite-1">
- init_per_suite</seealso> if it exists.
- It typically contains initialization/logging which needs to be done
- before init_per_suite is called.
- If <c>{skip,Reason}</c> or <c>{fail,Reason}</c> is returned,
- init_per_suite and all test cases of the suite will be skipped and
- Reason printed in the overview log of the suite.</p>
-
- <p><c>SuiteName</c> is the name of the suite to be run.</p>
-
- <p><c>InitData</c> is the original config list of the test suite, or
- a <c>SkipOrFail</c> tuple if a previous CTH has returned this.</p>
-
- <p><c>CTHState</c> is the current internal state of the CTH.</p>
-
- <p><c>Return</c> is the result of the init_per_suite function.
- If it is <c>{skip,Reason}</c> or <c>{fail,Reason}</c>
- <seealso marker="common_test#Module:init_per_suite-1">init_per_suite
- </seealso> will never be called, instead the initiation is considered
- to be skipped/failed respectively. If a <c>NewConfig</c> list
- is returned, <seealso marker="common_test#Module:init_per_suite-1">
- init_per_suite</seealso> will be called with that <c>NewConfig</c> list.
- See <seealso marker="ct_hooks_chapter#pre">
- Pre Hooks</seealso> in the User's Guide for more details.</p>
-
-
- <p>Note that this function is only called if the CTH has been added
- before init_per_suite is run, see
- <seealso marker="ct_hooks_chapter#scope">CTH Scoping</seealso>
- in the User's Guide for details.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called before
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>
+ if it exists. It typically contains initialization/logging that must
+ be done before <c>init_per_suite</c> is called. If
+ <c>{skip,Reason}</c> or <c>{fail,Reason}</c> is returned,
+ <c>init_per_suite</c> and all test cases of the suite are skipped
+ and <c>Reason</c> printed in the overview log of the suite.</p>
+
+ <p><c>SuiteName</c> is the name of the suite to be run.</p>
+
+ <p><c>InitData</c> is the original configuration list of the test
+ suite, or a <c>SkipOrFail</c> tuple if a previous CTH has returned
+ this.</p>
+
+ <p><c>CTHState</c> is the current internal state of the CTH.</p>
+
+ <p><c>Return</c> is the result of the <c>init_per_suite</c> function.
+ If it is <c>{skip,Reason}</c> or <c>{fail,Reason}</c>,
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>
+ is never called, instead the initiation is considered to be
+ skipped or failed, respectively. If a <c>NewConfig</c> list is
+ returned,
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>
+ is called with that <c>NewConfig</c> list. For more details, see
+ section <seealso marker="ct_hooks_chapter#pre">Pre Hooks</seealso>
+ in the User's Guide.</p>
+
+ <p>This function is called only if the CTH is added before
+ <c>init_per_suite is run</c>. For details, see section
+ <seealso marker="ct_hooks_chapter#scope">CTH Scope</seealso>
+ in the User's Guide.</p>
</desc>
</func>
-
+
<func>
- <name>Module:post_init_per_suite(SuiteName, Config, Return, CTHState) -&gt;
- Result</name>
- <fsummary>Called after init_per_suite</fsummary>
+ <name>Module:post_init_per_suite(SuiteName, Config, Return, CTHState) -&gt; Result</name>
+ <fsummary>Called after init_per_suite.</fsummary>
<type>
- <v>SuiteName = atom()</v>
- <v>Config = [{Key,Value}]</v>
- <v>Return = NewReturn = Config | SkipOrFail | term()</v>
- <v>SkipOrFail = {fail, Reason} | {skip, Reason} | term()</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewReturn, NewCTHState}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>SuiteName = atom()</v>
+ <v>Config = [{Key,Value}]</v>
+ <v>Return = NewReturn = Config | SkipOrFail | term()</v>
+ <v>SkipOrFail = {fail, Reason} | {skip, Reason} | term()</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewReturn, NewCTHState}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called after
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>
+ if it exists. It typically contains extra checks to ensure that all
+ the correct dependencies are started correctly.</p>
+
+ <p><c>Return</c> is what
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>
+ returned, that is, <c>{fail,Reason}</c>, <c>{skip,Reason}</c>, a
+ <c>Config</c> list, or a term describing how
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>
+ failed.</p>
+
+ <p><c>NewReturn</c> is the possibly modified return value of
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>.
+ To recover from a failure in
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>,
+ return <c>ConfigList</c> with the <c>tc_status</c> element removed.
+ For more details, see
+ <seealso marker="ct_hooks_chapter#post"> Post Hooks</seealso> in
+ section "Manipulating Tests" in the User's Guide.</p>
+
+ <p><c>CTHState</c> is the current internal state of the CTH.</p>
+
+ <p>This function is called only if the CTH is added before or in
+ <c>init_per_suite</c>. For details, see section
+ <seealso marker="ct_hooks_chapter#scope">CTH Scope</seealso>
+ in the User's Guide.</p>
+ </desc>
+ </func>
- <p>This function is called after
- <seealso marker="common_test#Module:init_per_suite-1">
- init_per_suite</seealso> if it exists. It typically contains extra
- checks to make sure that all the correct dependencies have
- been started correctly.</p>
-
- <p><c>Return</c> is what
- <seealso marker="common_test#Module:init_per_suite-1">init_per_suite
- </seealso> returned, i.e. {fail,Reason}, {skip,Reason}, a <c>Config</c>
- list or a term describing how
- <seealso marker="common_test#Module:init_per_suite-1">init_per_suite
- </seealso> failed.</p>
-
- <p><c>NewReturn</c> is the possibly modified return value of
- <seealso marker="common_test#Module:init_per_suite-1">init_per_suite
- </seealso>. It is here possible to recover from a failure in
- <seealso marker="common_test#Module:init_per_suite-1">init_per_suite
- </seealso> by returning the <c>ConfigList</c> with the <c>tc_status</c>
- element removed. See <seealso marker="ct_hooks_chapter#post">
- Post Hooks</seealso> in the User's Guide for more details.</p>
-
- <p><c>CTHState</c> is the current internal state of the CTH.</p>
-
- <p>Note that this function is only called if the CTH has been added
- before or in init_per_suite, see
- <seealso marker="ct_hooks_chapter#scope">CTH Scoping</seealso>
- in the User's Guide for details.</p>
+ <func>
+ <name>Module:pre_init_per_group(GroupName, InitData, CTHState) -&gt; Result</name>
+ <fsummary>Called before init_per_group.</fsummary>
+ <type>
+ <v>GroupName = atom()</v>
+ <v>InitData = Config | SkipOrFail</v>
+ <v>Config = NewConfig = [{Key,Value}]</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewConfig | SkipOrFail, NewCTHState}</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>OPTIONAL</p>
+
+ <p>This function is called before
+ <seealso marker="common_test#Module:init_per_group-2"><c>init_per_group</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:pre_init_per_suite-3"><c>pre_init_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:init_per_group-2"><c>init_per_group</c></seealso>
+ instead.</p>
</desc>
</func>
-
+
<func>
- <name>Module:pre_init_per_group(GroupName, InitData, CTHState) -&gt;
- Result</name>
- <fsummary>Called before init_per_group</fsummary>
+ <name>Module:post_init_per_group(GroupName, Config, Return, CTHState) -&gt; Result</name>
+ <fsummary>Called after init_per_group.</fsummary>
<type>
- <v>GroupName = atom()</v>
- <v>InitData = Config | SkipOrFail</v>
- <v>Config = NewConfig = [{Key,Value}]</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewConfig | SkipOrFail, NewCTHState}</v>
- <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>GroupName = atom()</v>
+ <v>Config = [{Key,Value}]</v>
+ <v>Return = NewReturn = Config | SkipOrFail | term()</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewReturn, NewCTHState}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called before
- <seealso marker="common_test#Module:init_per_group-2">
- init_per_group</seealso> if it exists. It behaves the same way as
- <seealso marker="ct_hooks#Module:pre_init_per_suite-3">
- pre_init_per_suite</seealso>, but for the
- <seealso marker="common_test#Module:init_per_group-2">
- init_per_group</seealso> instead.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called after
+ <seealso marker="common_test#Module:init_per_group-2"><c>init_per_group</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:post_init_per_suite-4"><c>post_init_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:init_per_group-2"><c>init_per_group</c></seealso>
+ instead.</p>
</desc>
</func>
-
+
<func>
- <name>Module:post_init_per_group(GroupName, Config, Return, CTHState) -&gt;
- Result</name>
- <fsummary>Called after init_per_group</fsummary>
+ <name>Module:pre_init_per_testcase(TestcaseName, InitData, CTHState) -&gt; Result</name>
+ <fsummary>Called before init_per_testcase.</fsummary>
<type>
- <v>GroupName = atom()</v>
- <v>Config = [{Key,Value}]</v>
- <v>Return = NewReturn = Config | SkipOrFail | term()</v>
- <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewReturn, NewCTHState}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>TestcaseName = atom()</v>
+ <v>InitData = Config | SkipOrFail</v>
+ <v>Config = NewConfig = [{Key,Value}]</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewConfig | SkipOrFail, NewCTHState}</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called after
- <seealso marker="common_test#Module:init_per_group-2">
- init_per_group</seealso> if it exists. It behaves the same way as
- <seealso marker="ct_hooks#Module:post_init_per_suite-4">
- post_init_per_suite</seealso>, but for the
- <seealso marker="common_test#Module:init_per_group-2">
- init_per_group</seealso> instead.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called before
+ <seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:pre_init_per_suite-3"><c>pre_init_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase</c></seealso>
+ instead.</p>
+
+ <p>CTHs cannot be added here right now. That feature may be added in
+ a later release, but it would right now break backwards
+ compatibility.</p>
</desc>
</func>
<func>
- <name>Module:pre_init_per_testcase(TestcaseName, InitData, CTHState) -&gt;
- Result</name>
- <fsummary>Called before init_per_testcase</fsummary>
+ <name>Module:post_init_per_testcase(TestcaseName, Config, Return, CTHState) -&gt; Result</name>
+ <fsummary>Called after init_per_testcase.</fsummary>
<type>
- <v>TestcaseName = atom()</v>
- <v>InitData = Config | SkipOrFail</v>
- <v>Config = NewConfig = [{Key,Value}]</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewConfig | SkipOrFail, NewCTHState}</v>
- <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>TestcaseName = atom()</v>
+ <v>Config = [{Key,Value}]</v>
+ <v>Return = NewReturn = Config | SkipOrFail | term()</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewReturn, NewCTHState}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called before
- <seealso marker="common_test#Module:init_per_testcase-2">
- init_per_testcase</seealso> if it exists. It behaves the same way as
- <seealso marker="ct_hooks#Module:pre_init_per_suite-3">
- pre_init_per_suite</seealso>, but for the
- <seealso marker="common_test#Module:init_per_testcase-2">
- init_per_testcase</seealso> function instead.</p>
-
- <p>Note that it is not possible to add CTH's here right now,
- that feature might be added later,
- but it would right now break backwards compatibility.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called after
+ <seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:post_init_per_suite-4"><c>post_init_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name>Module:post_end_per_testcase(TestcaseName, Config, Return, CTHState)
- -&gt; Result</name>
- <fsummary>Called after end_per_testcase</fsummary>
+ <name>Module:pre_end_per_testcase(TestcaseName, InitData, CTHState) -&gt; Result</name>
+ <fsummary>Called before end_per_testcase.</fsummary>
<type>
- <v>TestcaseName = atom()</v>
- <v>Config = [{Key,Value}]</v>
- <v>Return = NewReturn = Config | SkipOrFail | term()</v>
- <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewReturn, NewCTHState}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>TestcaseName = atom()</v>
+ <v>InitData = Config</v>
+ <v>Config = NewConfig = [{Key,Value}]</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewConfig, NewCTHState}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called after
- <seealso marker="common_test#Module:end_per_testcase-2">
- end_per_testcase</seealso> if it exists. It behaves the same way as
- <seealso marker="ct_hooks#Module:post_init_per_suite-4">
- post_init_per_suite</seealso>, but for the
- <seealso marker="common_test#Module:end_per_testcase-2">
- end_per_testcase</seealso> function instead.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called before
+ <seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:pre_end_per_suite-3"><c>pre_end_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase</c></seealso>
+ instead.</p>
+
+ <p>This function can not change the result of the test case by returning skip or fail
+ tuples, but it may insert items in <c>Config</c> that can be read in
+ <c>end_per_testcase/2</c> or in <c>post_end_per_testcase/4</c>.</p>
</desc>
</func>
<func>
- <name>Module:pre_end_per_group(GroupName, EndData, CTHState) -&gt;
- Result</name>
- <fsummary>Called before end_per_group</fsummary>
+ <name>Module:post_end_per_testcase(TestcaseName, Config, Return, CTHState) -&gt; Result</name>
+ <fsummary>Called after end_per_testcase.</fsummary>
<type>
- <v>GroupName = atom()</v>
- <v>EndData = Config | SkipOrFail</v>
- <v>Config = NewConfig = [{Key,Value}]</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewConfig | SkipOrFail, NewCTHState}</v>
- <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>TestcaseName = atom()</v>
+ <v>Config = [{Key,Value}]</v>
+ <v>Return = NewReturn = Config | SkipOrFail | term()</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewReturn, NewCTHState}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called before
- <seealso marker="common_test#Module:end_per_group-2">
- end_per_group</seealso> if it exists. It behaves the same way as
- <seealso marker="ct_hooks#Module:pre_init_per_suite-3">
- pre_init_per_suite</seealso>, but for the
- <seealso marker="common_test#Module:end_per_group-2">
- end_per_group</seealso> function instead.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called after
+ <seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:post_end_per_suite-4"><c>post_end_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase</c></seealso>
+ instead.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>Module:pre_end_per_group(GroupName, EndData, CTHState) -&gt; Result</name>
+ <fsummary>Called before end_per_group.</fsummary>
+ <type>
+ <v>GroupName = atom()</v>
+ <v>EndData = Config | SkipOrFail</v>
+ <v>Config = NewConfig = [{Key,Value}]</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewConfig | SkipOrFail, NewCTHState}</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>OPTIONAL</p>
+
+ <p>This function is called before
+ <seealso marker="common_test#Module:end_per_group-2"><c>end_per_group</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:pre_init_per_suite-3"><c>pre_init_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:end_per_group-2"><c>end_per_group</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name>Module:post_end_per_group(GroupName, Config, Return, CTHState) -&gt;
- Result</name>
- <fsummary>Called after end_per_group</fsummary>
+ <name>Module:post_end_per_group(GroupName, Config, Return, CTHState) -&gt; Result</name>
+ <fsummary>Called after end_per_group.</fsummary>
<type>
- <v>GroupName = atom()</v>
- <v>Config = [{Key,Value}]</v>
- <v>Return = NewReturn = Config | SkipOrFail | term()</v>
- <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewReturn, NewCTHState}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>GroupName = atom()</v>
+ <v>Config = [{Key,Value}]</v>
+ <v>Return = NewReturn = Config | SkipOrFail | term()</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewReturn, NewCTHState}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called after
- <seealso marker="common_test#Module:end_per_group-2">
- end_per_group</seealso> if it exists. It behaves the same way as
- <seealso marker="ct_hooks#Module:post_init_per_suite-4">
- post_init_per_suite</seealso>, but for the
- <seealso marker="common_test#Module:end_per_group-2">
- end_per_group</seealso> function instead.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called after
+ <seealso marker="common_test#Module:end_per_group-2"><c>end_per_group</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:post_init_per_suite-4"><c>post_init_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:end_per_group-2">end_per_group</seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name>Module:pre_end_per_suite(SuiteName, EndData, CTHState) -&gt;
- Result</name>
- <fsummary>Called before end_per_suite</fsummary>
+ <name>Module:pre_end_per_suite(SuiteName, EndData, CTHState) -&gt; Result</name>
+ <fsummary>Called before end_per_suite.</fsummary>
<type>
- <v>SuiteName = atom()</v>
- <v>EndData = Config | SkipOrFail</v>
- <v>Config = NewConfig = [{Key,Value}]</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewConfig | SkipOrFail, NewCTHState}</v>
- <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>SuiteName = atom()</v>
+ <v>EndData = Config | SkipOrFail</v>
+ <v>Config = NewConfig = [{Key,Value}]</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewConfig | SkipOrFail, NewCTHState}</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called before
- <seealso marker="common_test#Module:end_per_suite-1">
- end_per_suite</seealso> if it exists. It behaves the same way as
- <seealso marker="ct_hooks#Module:pre_init_per_suite-3">
- pre_init_per_suite</seealso>, but for the
- <seealso marker="common_test#Module:end_per_suite-1">
- end_per_suite</seealso> function instead.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called before
+ <seealso marker="common_test#Module:end_per_suite-1"><c>end_per_suite</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:pre_init_per_suite-3"><c>pre_init_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:end_per_suite-1"><c>end_per_suite</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name>Module:post_end_per_suite(SuiteName, Config, Return, CTHState) -&gt;
- Result</name>
- <fsummary>Called after end_per_suite</fsummary>
+ <name>Module:post_end_per_suite(SuiteName, Config, Return, CTHState) -&gt; Result</name>
+ <fsummary>Called after end_per_suite.</fsummary>
<type>
- <v>SuiteName = atom()</v>
- <v>Config = [{Key,Value}]</v>
- <v>Return = NewReturn = Config | SkipOrFail | term()</v>
- <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
- <v>CTHState = NewCTHState = term()</v>
- <v>Result = {NewReturn, NewCTHState}</v>
- <v>Key = atom()</v>
- <v>Value = term()</v>
- <v>Reason = term()</v>
+ <v>SuiteName = atom()</v>
+ <v>Config = [{Key,Value}]</v>
+ <v>Return = NewReturn = Config | SkipOrFail | term()</v>
+ <v>SkipOrFail = {fail,Reason} | {skip, Reason}</v>
+ <v>CTHState = NewCTHState = term()</v>
+ <v>Result = {NewReturn, NewCTHState}</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>Reason = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called after
- <seealso marker="common_test#Module:end_per_suite-1">
- end_per_suite</seealso> if it exists. It behaves the same way as
- <seealso marker="ct_hooks#Module:post_init_per_suite-4">
- post_init_per_suite</seealso>, but for the
- <seealso marker="common_test#Module:end_per_suite-1">
- end_per_suite</seealso> function instead.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called after
+ <seealso marker="common_test#Module:end_per_suite-1"><c>end_per_suite</c></seealso>
+ if it exists. It behaves the same way as
+ <seealso marker="ct_hooks#Module:post_init_per_suite-4"><c>post_init_per_suite</c></seealso>,
+ but for function
+ <seealso marker="common_test#Module:end_per_suite-1"><c>end_per_suite</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name>Module:on_tc_fail(TestName, Reason, CTHState) -&gt;
- NewCTHState</name>
- <fsummary>Called after the CTH scope ends</fsummary>
+ <name>Module:on_tc_fail(TestName, Reason, CTHState) -&gt; NewCTHState</name>
+ <fsummary>Called after the CTH scope ends.</fsummary>
<type>
- <v>TestName = init_per_suite | end_per_suite |
- {init_per_group,GroupName} | {end_per_group,GroupName} |
- {FuncName,GroupName} | FuncName</v>
- <v>FuncName = atom()</v>
- <v>GroupName = atom()</v>
- <v>Reason = term()</v>
- <v>CTHState = NewCTHState = term()</v>
+ <v>TestName = init_per_suite | end_per_suite | {init_per_group,GroupName} | {end_per_group,GroupName} | {FuncName,GroupName} | FuncName</v>
+ <v>FuncName = atom()</v>
+ <v>GroupName = atom()</v>
+ <v>Reason = term()</v>
+ <v>CTHState = NewCTHState = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called whenever a test case (or config function)
- fails. It is called after the post function has been called for
- the failed test case. I.e. if init_per_suite fails, this function
- is called after
- <seealso marker="#Module:post_init_per_suite-4">
- post_init_per_suite</seealso>, and if a test case fails, it is called
- after <seealso marker="#Module:post_end_per_testcase-4">
- post_end_per_testcase</seealso>. If the failed test case belongs
- to a test case group, the first argument is a tuple
- <c>{FuncName,GroupName}</c>, otherwise simply the function name.</p>
-
- <p>The data which comes with the Reason follows the same format as the
- <seealso marker="event_handler_chapter#failreason">FailReason
- </seealso> in the <seealso marker="event_handler_chapter#tc_done">tc_done</seealso> event.
- See <seealso marker="event_handler_chapter#events">Event Handling
- </seealso> in the User's Guide for details.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called whenever a test case (or configuration
+ function) fails. It is called after the post function is called
+ for the failed test case, that is:</p>
+
+ <list type="bulleted">
+ <item><p>If <c>init_per_suite</c> fails, this function is called after
+ <seealso marker="#Module:post_init_per_suite-4"><c>post_init_per_suite</c></seealso>.</p></item>
+ <item><p>If a test case fails, this funcion is called after
+ <seealso marker="#Module:post_end_per_testcase-4"><c>post_end_per_testcase</c></seealso>.</p></item>
+ </list>
+
+ <p>If the failed test case belongs to a test case group, the first
+ argument is a tuple <c>{FuncName,GroupName}</c>, otherwise only
+ the function name.</p>
+
+ <p>The data that comes with <c>Reason</c> follows the same format as
+ <seealso marker="event_handler_chapter#failreason"><c>FailReason</c></seealso>
+ in event
+ <seealso marker="event_handler_chapter#tc_done"><c>tc_done</c></seealso>.
+ For details, see section
+ <seealso marker="event_handler_chapter#events">Event Handling</seealso>
+ in the User's Guide.</p>
</desc>
</func>
<func>
- <name>Module:on_tc_skip(TestName, Reason, CTHState) -&gt;
- NewCTHState</name>
- <fsummary>Called after the CTH scope ends</fsummary>
+ <name>Module:on_tc_skip(TestName, Reason, CTHState) -&gt; NewCTHState</name>
+ <fsummary>Called after the CTH scope ends.</fsummary>
<type>
- <v>TestName = init_per_suite | end_per_suite |
- {init_per_group,GroupName} | {end_per_group,GroupName} |
- {FuncName,GroupName} | FuncName</v>
- <v>FuncName = atom()</v>
- <v>GroupName = atom()</v>
- <v>Reason = {tc_auto_skip | tc_user_skip, term()}</v>
- <v>CTHState = NewCTHState = term()</v>
+ <v>TestName = init_per_suite | end_per_suite | {init_per_group,GroupName} | {end_per_group,GroupName} | {FuncName,GroupName} | FuncName</v>
+ <v>FuncName = atom()</v>
+ <v>GroupName = atom()</v>
+ <v>Reason = {tc_auto_skip | tc_user_skip, term()}</v>
+ <v>CTHState = NewCTHState = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>This function is called whenever a test case (or config function)
- is skipped. It is called after the post function has been called
- for the skipped test case. I.e. if init_per_group is skipped, this
- function is called after
- <seealso marker="#Module:post_init_per_group-4">
- post_init_per_group</seealso>, and if a test case is skipped,
- it is called after
- <seealso marker="#Module:post_end_per_testcase-4">
- post_end_per_testcase</seealso>. If the skipped test case belongs to a
- test case group, the first argument is a tuple <c>{FuncName,GroupName}</c>,
- otherwise simply the function name.</p>
-
- <p>The data which comes with the Reason follows the same format as
- <seealso marker="event_handler_chapter#tc_auto_skip">tc_auto_skip
- </seealso> and <seealso marker="event_handler_chapter#tc_user_skip">
- tc_user_skip</seealso> events.
- See <seealso marker="event_handler_chapter#events">Event Handling
- </seealso> in the User's Guide for details.</p>
+ <p>OPTIONAL</p>
+
+ <p>This function is called whenever a test case (or configuration
+ function) is skipped. It is called after the post function is called
+ for the skipped test case, that is:</p>
+
+ <list type="bulleted">
+ <item><p>If <c>init_per_group</c> is skipped, this function is
+ called after
+ <seealso marker="#Module:post_init_per_group-4"><c>post_init_per_group</c></seealso>.</p></item>
+ <item><p>If a test case is skipped, this function is called after
+ <seealso marker="#Module:post_end_per_testcase-4"><c>post_end_per_testcase</c></seealso>.</p></item>
+ </list>
+
+ <p>If the skipped test case belongs to a test case group, the first
+ argument is a tuple <c>{FuncName,GroupName}</c>, otherwise only
+ the function name.</p>
+
+ <p>The data that comes with <c>Reason</c> follows the same format as
+ events
+ <seealso marker="event_handler_chapter#tc_auto_skip"><c>tc_auto_skip</c></seealso>
+ and
+ <seealso marker="event_handler_chapter#tc_user_skip"><c>tc_user_skip</c></seealso>
+ For details, see section
+ <seealso marker="event_handler_chapter#events">Event Handling</seealso>
+ in the User's Guide.</p>
</desc>
</func>
<func>
<name>Module:terminate(CTHState)</name>
- <fsummary>Called after the CTH scope ends</fsummary>
+ <fsummary>Called after the CTH scope ends.</fsummary>
<type>
- <v>CTHState = term()</v>
+ <v>CTHState = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
+ <p>OPTIONAL</p>
- <p>This function is called at the end of a CTH's
- <seealso marker="ct_hooks_chapter#scope">scope</seealso>.
- </p>
+ <p>This function is called at the end of a CTH
+ <seealso marker="ct_hooks_chapter#scope">scope</seealso>.</p>
</desc>
</func>
<func>
<name>Module:id(Opts) -&gt; Id</name>
- <fsummary>Called before the init function of a CTH</fsummary>
+ <fsummary>Called before the init function of a CTH.</fsummary>
<type>
- <v>Opts = term()</v>
- <v>Id = term()</v>
+ <v>Opts = term()</v>
+ <v>Id = term()</v>
</type>
-
<desc>
- <p> OPTIONAL </p>
-
- <p>The <c>Id</c> is used to uniquely identify a CTH instance,
- if two CTH's return the same <c>Id</c> the second CTH is ignored
- and subsequent calls to the CTH will only be made to the first
- instance. For more information see
- <seealso marker="ct_hooks_chapter#installing">Installing a CTH
- </seealso> in the User's Guide.
- </p>
-
- <p>This function should NOT have any side effects as it might
- be called multiple times by Common Test.</p>
+ <p>OPTIONAL</p>
- <p>If not implemented the CTH will act as if this function returned a
- call to <c>make_ref/0</c>.</p>
- </desc>
+ <p>The <c>Id</c> identifies a CTH instance uniquely. If two CTHs return
+ the same <c>Id</c>, the second CTH is ignored and subsequent calls to
+ the CTH are only made to the first instance. For details, see section
+ <seealso marker="ct_hooks_chapter#installing">Installing a CTH</seealso>
+ in the User's Guide.</p>
+
+ <p>This function is <em>not</em> to have any side effects, as it can
+ be called multiple times by <c>Common Test</c>.</p>
+
+ <p>If not implemented, the CTH acts as if this function returned a call
+ to <c>make_ref/0</c>.</p>
+ </desc>
</func>
-
</funcs>
</erlref>
diff --git a/lib/common_test/doc/src/ct_hooks_chapter.xml b/lib/common_test/doc/src/ct_hooks_chapter.xml
index 3905e23dcc..3eb1945a61 100644
--- a/lib/common_test/doc/src/ct_hooks_chapter.xml
+++ b/lib/common_test/doc/src/ct_hooks_chapter.xml
@@ -34,28 +34,28 @@
<marker id="general"></marker>
<title>General</title>
<p>
- The <em>Common Test Hook</em> (henceforth called CTH) framework allows
- extensions of the default behaviour of Common Test by means of hooks
- before and after all test suite calls. CTHs allow advanced Common Test
- users to abstract out behaviour which is common to multiple test suites
- without littering all test suites with library calls. Some example
- usages are: logging, starting and monitoring external systems,
- building C files needed by the tests and much more!</p>
-
- <p>In brief, Common Test Hooks allows you to:</p>
-
- <list>
- <item>Manipulate the runtime config before each suite
- configuration call</item>
- <item>Manipulate the return of all suite configuration calls and in
- extension the result of the test themselves.</item>
+ The <em>Common Test Hook (CTH)</em> framework allows
+ extensions of the default behavior of <c>Common Test</c> using hooks
+ before and after all test suite calls. CTHs allow advanced <c>Common Test</c>
+ users to abstract out behavior that is common to multiple test suites
+ without littering all test suites with library calls. this can be used
+ for logging, starting, and monitoring external systems,
+ building C files needed by the tests, and so on.</p>
+
+ <p>In brief, CTH allows you to do the following:</p>
+
+ <list type="bulleted">
+ <item>Manipulate the runtime configuration before each suite
+ configuration call.</item>
+ <item>Manipulate the return of all suite configuration calls, and in
+ extension, the result of the tests themselves.</item>
</list>
- <p>The following sections describe how to use CTHs, when they are run
- and how to manipulate your test results in a CTH</p>
+ <p>The following sections describe how to use CTHs, when they are run,
+ and how to manipulate the test results in a CTH.</p>
- <warning><p>When executing within a CTH all timetraps are shutoff. So
- if your CTH never returns, the entire test run will be stalled!</p>
+ <warning><p>When executing within a CTH, all timetraps are shut off. So
+ if your CTH never returns, the entire test run is stalled.</p>
</warning>
</section>
@@ -63,144 +63,148 @@
<section>
<marker id="installing"></marker>
<title>Installing a CTH</title>
- <p>There are multiple ways to install a CTH in your test run. You can do it
- for all tests in a run, for specific test suites and for specific groups
+ <p>A CTH can be installed in multiple ways in your test run. You can do it
+ for all tests in a run, for specific test suites, and for specific groups
within a test suite. If you want a CTH to be present in all test suites
- within your test run there are three different ways to accomplish that.
+ within your test run, there are three ways to accomplish that, as follows:
</p>
- <list>
+ <list type="bulleted">
<item>Add <c>-ct_hooks</c> as an argument to
<seealso marker="run_test_chapter#ct_run">ct_run</seealso>.
- To add multiple CTHs using this method append them to each other
- using the keyword <c>and</c>, i.e.
+ To add multiple CTHs using this method, append them to each other
+ using the keyword <c>and</c>, that is,
<c>ct_run -ct_hooks cth1 [{debug,true}] and cth2 ...</c>.</item>
- <item>Add the <c>ct_hooks</c> tag to your
+ <item>Add tag <c>ct_hooks</c> to your
<seealso marker="run_test_chapter#test_specifications">
- Test Specification</seealso></item>
- <item>Add the <c>ct_hooks</c> tag to your call to
- <seealso marker="ct#run_test-1">ct:run_test/1</seealso></item>
+ Test Specification</seealso>.</item>
+ <item>Add tag <c>ct_hooks</c> to your call to
+ <seealso marker="ct#run_test-1">ct:run_test/1</seealso>.</item>
</list>
- <p>You can also add CTHs within a test suite. This is done by returning
- <c>{ct_hooks,[CTH]}</c> in the config list from
+ <p>CTHs can also be added within a test suite. This is done by returning
+ <c>{ct_hooks,[CTH]}</c> in the configuration list from
<seealso marker="common_test#Module:suite-0">suite/0</seealso>,
<seealso marker="common_test#Module:init_per_suite-1">
- init_per_suite/1</seealso> or
+ init_per_suite/1</seealso>, or
<seealso marker="common_test#Module:init_per_group-2">
- init_per_group/2</seealso>. <c>CTH</c> in this case can be either
- only the module name of the CTH or a tuple with the module name and the
- initial arguments and optionally the hook priority of the CTH. Eg:
- <c>{ct_hooks,[my_cth_module]}</c> or
- <c>{ct_hooks,[{my_cth_module,[{debug,true}]}]}</c> or
- <c>{ct_hooks,[{my_cth_module,[{debug,true}],500}]}</c>
- </p>
+ init_per_group/2</seealso>.</p>
+
+ <p>In this case, <c>CTH</c> can either be only the module name of the CTH
+ or a tuple with the module name and the initial arguments, and optionally
+ the hook priority of the CTH. For example, one of the following:</p>
+ <list type="bulleted">
+ <item><c>{ct_hooks,[my_cth_module]}</c></item>
+ <item><c>{ct_hooks,[{my_cth_module,[{debug,true}]}]}</c></item>
+ <item><c>{ct_hooks,[{my_cth_module,[{debug,true}],500}]}</c></item>
+ </list>
<section>
<title>Overriding CTHs</title>
- <p>By default each installation of a CTH will cause a new instance of it
- to be activated. This can cause problems if you want to be able to
- override CTHs in test specifications while still having them in the
- suite info function. The
+ <p>By default, each installation of a CTH causes a new instance of it
+ to be activated. This can cause problems if you want to override
+ CTHs in test specifications while still having them in the
+ suite information function. The
<seealso marker="ct_hooks#Module:id-1">id/1</seealso>
callback exists to address this problem. By returning the same
- <c>id</c> in both places, Common Test knows that this CTH
- has already been installed and will not try to install it again.</p>
+ <c>id</c> in both places, <c>Common Test</c> knows that this CTH
+ is already installed and does not try to install it again.</p>
</section>
<section>
- <title>CTH Execution order</title>
- <p>By default each CTH installed will be executed in the order which
+ <title>CTH Execution Order</title>
+ <p>By default, each CTH installed is executed in the order that
they are installed for init calls, and then reversed for end calls.
- This is not always wanted so common_test allows
+ This is not always desired, so <c>Common Test</c> allows
the user to specify a priority for each hook. The priority can either
- be specified in the CTH <seealso marker="ct_hooks#Module:init-2">init/2
- </seealso> function or when installing the hook. The priority given at
- installation will override the priority returned by the CTH. </p>
+ be specified in the CTH function
+ <seealso marker="ct_hooks#Module:init-2">init/2</seealso> or when
+ installing the hook. The priority specified at installation overrides the
+ priority returned by the CTH.</p>
</section>
</section>
<section>
<marker id="scope"/>
<title>CTH Scope</title>
- <p>Once the CTH is installed into a certain test run it will be there until
+ <p>Once the CTH is installed into a certain test run it remains there until
its scope is expired. The scope of a CTH depends on when it is
- installed.
- The <seealso marker="ct_hooks#Module:init-2">init/2</seealso> is
- called at the beginning of the scope and the
- <seealso marker="ct_hooks#Module:terminate-1">terminate/1
- </seealso> function is called when the scope ends.</p>
+ installed, see the following table.
+ Function <seealso marker="ct_hooks#Module:init-2">init/2</seealso> is
+ called at the beginning of the scope and function
+ <seealso marker="ct_hooks#Module:terminate-1">terminate/1</seealso>
+ is called when the scope ends.</p>
<table>
<row>
- <cell><em>CTH Installed in</em></cell>
+ <cell><em>CTH installed in</em></cell>
<cell><em>CTH scope begins before</em></cell>
<cell><em>CTH scope ends after</em></cell>
</row>
<row>
<cell><seealso marker="run_test_chapter#ct_run">ct_run</seealso></cell>
- <cell>the first test suite is to be run.</cell>
- <cell>the last test suite has been run.</cell>
+ <cell>the first test suite is to be run</cell>
+ <cell>the last test suite has been run</cell>
</row>
<row>
<cell><seealso marker="ct#run_test-1">ct:run_test</seealso></cell>
- <cell>the first test suite is to be run.</cell>
- <cell>the last test suite has been run.</cell>
+ <cell>the first test suite is run</cell>
+ <cell>the last test suite has been run</cell>
</row>
<row>
<cell><seealso marker="run_test_chapter#test_specifications">
Test Specification</seealso></cell>
- <cell>the first test suite is to be run.</cell>
- <cell>the last test suite has been run.</cell>
+ <cell>the first test suite is run</cell>
+ <cell>the last test suite has been run</cell>
</row>
<row>
<cell><seealso marker="common_test#Module:suite-0">suite/0
</seealso></cell>
<cell><seealso marker="ct_hooks#Module:pre_init_per_suite-3">
- pre_init_per_suite/3</seealso> is called.</cell>
+ pre_init_per_suite/3</seealso> is called</cell>
<cell><seealso marker="ct_hooks#Module:post_end_per_suite-4">
- post_end_per_suite/4</seealso> has been called for that test suite.</cell>
+ post_end_per_suite/4</seealso> has been called for that test suite</cell>
</row>
<row>
<cell><seealso marker="common_test#Module:init_per_suite-1">
init_per_suite/1</seealso></cell>
<cell><seealso marker="ct_hooks#Module:post_init_per_suite-4">
- post_init_per_suite/4</seealso> is called.</cell>
+ post_init_per_suite/4</seealso> is called</cell>
<cell><seealso marker="ct_hooks#Module:post_end_per_suite-4">
- post_end_per_suite/4</seealso> has been called for that test suite.</cell>
+ post_end_per_suite/4</seealso> has been called for that test suite</cell>
</row>
<row>
<cell><seealso marker="common_test#Module:init_per_group-2">
init_per_group/2</seealso></cell>
<cell><seealso marker="ct_hooks#Module:post_init_per_group-4">
- post_init_per_group/4</seealso> is called.</cell>
+ post_init_per_group/4</seealso> is called</cell>
<cell><seealso marker="ct_hooks#Module:post_end_per_suite-4">
- post_end_per_group/4</seealso> has been called for that group.</cell>
+ post_end_per_group/4</seealso> has been called for that group</cell>
</row>
<tcaption>Scope of a CTH</tcaption>
</table>
<section>
<title>CTH Processes and Tables</title>
- <p>CTHs are run with the same process scoping as normal test suites
- i.e. a different process will execute the init_per_suite hooks then the
- init_per_group or per_testcase hooks. So if you want to spawn a
- process in the CTH you cannot link with the CTH process as it will exit
- after the post hook ends. Also if you for some reason need an ETS
- table with your CTH, you will have to spawn a process which handles
- it.</p>
+ <p>CTHs are run with the same process scoping as normal test suites,
+ that is, a different process executes the <c>init_per_suite</c> hooks then the
+ <c>init_per_group</c> or <c>per_testcase</c> hooks. So if you want to spawn a
+ process in the CTH, you cannot link with the CTH process, as it exits
+ after the post hook ends. Also, if you for some reason need an ETS
+ table with your CTH, you must spawn a process that handles it.</p>
</section>
<section>
- <title>External configuration data and Logging</title>
- <p>It's possible in the CTH to read configuration data values
- by calling <seealso marker="ct#get_config-1"><c>ct:get_config/1/2/3</c></seealso> (as explained in the
- <seealso marker="config_file_chapter#require_config_data">
- External configuration data</seealso>
- chapter). The config variables in question must, as always, first have been
- <c>required</c> by means of a suite-, group-, or test case info function,
- or the <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso> function. Note that the latter can also be used
- in CT hook functions.</p>
- <p>The CT hook functions may call any of the logging functions available
+ <title>External Configuration Data and Logging</title>
+ <p>Configuration data values in the CTH can be read
+ by calling
+ <seealso marker="ct#get_config-1"><c>ct:get_config/1,2,3</c></seealso>
+ (as explained in section
+ <seealso marker="config_file_chapter#require_config_data">Requiring and Reading Configuration Data</seealso>).
+ The configuration variables in question must, as always, first have been
+ required by a suite-, group-, or test case information function,
+ or by function <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso>.
+ The latter can also be used in CT hook functions.</p>
+ <p>The CT hook functions can call any logging function
in the <c>ct</c> interface to print information to the log files, or to
add comments in the suite overview page.
</p>
@@ -209,306 +213,327 @@
</section>
<section>
- <marker id="manipulating"/>
- <title>Manipulating tests</title>
- <p>It is through CTHs possible to manipulate the results of tests and
- configuration functions. The main purpose of doing this with CTHs is to
- allow common patterns to be abstracted out from test test suites and applied to
- multiple test suites without duplicating any code. All of the callback
- functions for a CTH follow a common interface, this interface is
- described below.</p>
-
- <p>Common Test will always call all available hook functions, even pre- and post
- hooks for configuration functions that are not implemented in the suite.
+ <marker id="manipulating"/>
+ <title>Manipulating Tests</title>
+ <p>Through CTHs the results of tests and configuration functions can be manipulated.
+ The main purpose to do this with CTHs is to allow common
+ patterns to be abstracted out from test suites and applied to
+ multiple test suites without duplicating any code. All the callback
+ functions for a CTH follow a common interface described hereafter.</p>
+
+ <p><c>Common Test</c> always calls all available hook functions, even pre-
+ and post hooks for configuration functions that are not implemented in the suite.
For example, <c>pre_init_per_suite(x_SUITE, ...)</c> and
- <c>post_init_per_suite(x_SUITE, ...)</c> will be called for test suite
- <c>x_SUITE</c>, even if it doesn't export <c>init_per_suite/1</c>. This feature
- makes it possible to use hooks as configuration fallbacks, or even
- completely replace all configuration functions with hook functions.</p>
+ <c>post_init_per_suite(x_SUITE, ...)</c> are called for test suite
+ <c>x_SUITE</c>, even if it does not export <c>init_per_suite/1</c>.
+ With this feature hooks can be used as configuration fallbacks, and all
+ configuration functions can be replaced with hook functions.</p>
<section>
<marker id="pre"/>
<title>Pre Hooks</title>
<p>
- It is possible in a CTH to hook in behaviour before
- <seealso marker="common_test#Module:init_per_suite-1">init_per_suite</seealso>,
- <seealso marker="common_test#Module:init_per_suite-1">init_per_group</seealso>,
- <seealso marker="common_test#Module:init_per_suite-1">init_per_testcase</seealso>,
- <seealso marker="common_test#Module:init_per_suite-1">end_per_group</seealso> and
- <seealso marker="common_test#Module:init_per_suite-1">end_per_suite</seealso>.
+ In a CTH, the behavior can be hooked in before the following functions:</p>
+
+ <list type="bulleted">
+ <item><seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso></item>
+ <item><seealso marker="common_test#Module:init_per_group-2"><c>init_per_group</c></seealso></item>
+ <item><seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase</c></seealso></item>
+ <item><seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase</c></seealso></item>
+ <item><seealso marker="common_test#Module:end_per_group-2"><c>end_per_group</c></seealso></item>
+ <item><seealso marker="common_test#Module:end_per_suite-1"><c>end_per_suite</c></seealso></item>
+ </list>
+
+ <p>
This is done in the CTH functions called pre_&lt;name of function&gt;.
- All of these functions take the same three arguments: <c>Name</c>,
- <c>Config</c> and <c>CTHState</c>. The return value of the CTH function
- is always a combination of an result for the suite/group/test and an
- updated <c>CTHState</c>. If you want the test suite to continue on
- executing you should return the config list which you want the test to
- use as the result. If you for some reason want to skip/fail the test,
- return a tuple with <c>skip</c> or <c>fail</c> and a reason as the
- result. Example:
- </p>
- <code>pre_init_per_suite(SuiteName, Config, CTHState) -&gt;
- case db:connect() of
- {error,_Reason} -&gt;
- {{fail, "Could not connect to DB"}, CTHState};
- {ok, Handle} -&gt;
- {[{db_handle, Handle} | Config], CTHState#state{ handle = Handle }}
- end.</code>
- <note><p>If using multiple CTHs, the first part of the return tuple will be
- used as input for the next CTH. So in the case above the next CTH might
+ These functions take the same three arguments, <c>Name</c>,
+ <c>Config</c>, and <c>CTHState</c>. The return value of the CTH function
+ is always a combination of a result for the suite/group/test and an
+ updated <c>CTHState</c>.</p>
+
+ <p>To let the test suite continue on executing, return the configuration
+ list that you want the test to use as the result. To skip or
+ fail the test, return a tuple with <c>skip</c> or <c>fail</c>, and a reason
+ as the result.</p>
+
+ <p><em>Example:</em></p>
+ <code>
+ pre_init_per_suite(SuiteName, Config, CTHState) -&gt;
+ case db:connect() of
+ {error,_Reason} -&gt;
+ {{fail, "Could not connect to DB"}, CTHState};
+ {ok, Handle} -&gt;
+ {[{db_handle, Handle} | Config], CTHState#state{ handle = Handle }}
+ end.</code>
+
+ <note><p>If you use multiple CTHs, the first part of the return tuple is
+ used as input for the next CTH. So in the previous example the next CTH can
get <c>{fail,Reason}</c> as the second parameter. If you have many CTHs
- which interact, it might be a good idea to not let each CTH return
- <c>fail</c> or <c>skip</c>. Instead return that an action should be taken
- through the <c>Config</c> list and implement a CTH which at the end takes
- the correct action.</p></note>
+ interacting, do not let each CTH return <c>fail</c> or <c>skip</c>.
+ Instead, return that an action is to be taken through the <c>Config</c>
+ list and implement a CTH that, at the end, takes the correct action.</p></note>
</section>
<section>
<marker id="post"/>
<title>Post Hooks</title>
- <p>It is also possible in a CTH to hook in behaviour after
- <seealso marker="common_test#Module:init_per_suite-1">init_per_suite</seealso>,
- <seealso marker="common_test#Module:init_per_suite-1">init_per_group</seealso>,
- <seealso marker="common_test#Module:init_per_suite-1">end_per_testcase</seealso>,
- <seealso marker="common_test#Module:init_per_suite-1">end_per_group</seealso> and
- <seealso marker="common_test#Module:init_per_suite-1">end_per_suite</seealso>.
- This is done in the CTH functions called post_&lt;name of function&gt;.
- All of these function take the same four arguments: <c>Name</c>,
- <c>Config</c>, <c>Return</c> and <c>CTHState</c>. <c>Config</c> in this
+ <p>In a CTH, behavior can be hooked in after the following functions:</p>
+ <list type="bulleted">
+ <item><seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso></item>
+ <item><seealso marker="common_test#Module:init_per_group-2"><c>init_per_group</c></seealso></item>
+ <item><seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase</c></seealso></item>
+ <item><seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase</c></seealso></item>
+ <item><seealso marker="common_test#Module:end_per_group-2"><c>end_per_group</c></seealso></item>
+ <item><seealso marker="common_test#Module:end_per_suite-1"><c>end_per_suite</c></seealso></item>
+ </list>
+
+ <p>
+ This is done in the CTH functions called <c>post_&lt;name of function&gt;</c>.
+ These functions take the same four arguments, <c>Name</c>,
+ <c>Config</c>, <c>Return</c>, and <c>CTHState</c>. <c>Config</c> in this
case is the same <c>Config</c> as the testcase is called with.
<c>Return</c> is the value returned by the testcase. If the testcase
- failed by crashing, <c>Return</c> will be
+ fails by crashing, <c>Return</c> is
<c>{'EXIT',{{Error,Reason},Stacktrace}}</c>.</p>
- <p>The return value of the CTH function is always a combination of an
+ <p>The return value of the CTH function is always a combination of a
result for the suite/group/test and an updated <c>CTHState</c>. If
- you want the callback to not affect the outcome of the test you should
+ you do not want the callback to affect the outcome of the test,
return the <c>Return</c> data as it is given to the CTH. You can also
- modify the result of the test. By returning the <c>Config</c> list
- with the <c>tc_status</c> element removed you can recover from a test
+ modify the test result. By returning the <c>Config</c> list
+ with element <c>tc_status</c> removed, you can recover from a test
failure. As in all the pre hooks, it is also possible to fail/skip
- the test case in the post hook. Example: </p>
-
- <code>post_end_per_testcase(_TC, Config, {'EXIT',{_,_}}, CTHState) -&gt;
- case db:check_consistency() of
- true ->
- %% DB is good, pass the test.
- {proplists:delete(tc_status, Config), CTHState};
- false ->
- %% DB is not good, mark as skipped instead of failing
- {{skip, "DB is inconsisten!"}, CTHState}
- end;
-post_end_per_testcase(_TC, Config, Return, CTHState) -&gt;
- %% Do nothing if tc does not crash.
- {Return, CTHState}.</code>
-
- <note><p>Recovering from a testcase failure using CTHs should only be done as
- a last resort. If used wrongly it could become very difficult to
- determine which tests pass or fail in a test run</p></note>
+ the test case in the post hook.</p>
+
+ <p><em>Example:</em></p>
+ <code>
+ post_end_per_testcase(_TC, Config, {'EXIT',{_,_}}, CTHState) -&gt;
+ case db:check_consistency() of
+ true ->
+ %% DB is good, pass the test.
+ {proplists:delete(tc_status, Config), CTHState};
+ false ->
+ %% DB is not good, mark as skipped instead of failing
+ {{skip, "DB is inconsisten!"}, CTHState}
+ end;
+ post_end_per_testcase(_TC, Config, Return, CTHState) -&gt;
+ %% Do nothing if tc does not crash.
+ {Return, CTHState}.</code>
+
+ <note><p>Do recover from a testcase failure using CTHs only a last resort.
+ If used wrongly, it can be very difficult to determine which tests that
+ pass or fail in a test run.</p></note>
</section>
<section>
- <marker id="skip_n_fail"/>
- <title>Skip and Fail hooks</title>
+ <title>Skip and Fail Hooks</title>
<p>
After any post hook has been executed for all installed CTHs,
<seealso marker="ct_hooks#Module:on_tc_fail-3">on_tc_fail</seealso>
- or <seealso marker="ct_hooks#Module:on_tc_fail-3">on_tc_skip</seealso>
- might be called if the testcase failed or was skipped
- respectively. You cannot affect the outcome of the tests any further at
- this point.
+ or <seealso marker="ct_hooks#Module:on_tc_skip-3">on_tc_skip</seealso>
+ is called if the testcase failed or was skipped, respectively.
+ You cannot affect the outcome of the tests any further at this point.
</p>
</section>
</section>
<section>
- <marker id="synchronizing"/>
- <title>Synchronizing external user applications with Common Test</title>
+ <marker id="synchronizing"/>
+ <title>Synchronizing External User Applications with Common Test</title>
<p>CTHs can be used to synchronize test runs with external user applications.
- The init function may e.g. start and/or communicate with an application that
- has the purpose of preparing the SUT for an upcoming test run, or maybe
+ The init function can, for example, start and/or communicate with an application that
+ has the purpose of preparing the SUT for an upcoming test run, or
initialize a database for saving test data to during the test run. The
- terminate function may similarly order such an application to reset the SUT
+ terminate function can similarly order such an application to reset the SUT
after the test run, and/or tell the application to finish active sessions
and terminate.
Any system error- or progress reports generated during the init- or
- termination stage will be saved in the
- <seealso marker="run_test_chapter#pre_post_test_io_log">Pre-
- and post test I/O log</seealso>. (This is also true for any printouts made
+ termination stage are saved in the
+ <seealso marker="run_test_chapter#pre_post_test_io_log">Pre- and Post Test I/O Log</seealso>.
+ (This is also true for any printouts made
with <c>ct:log/2</c> and <c>ct:pal/2</c>).</p>
- <p>In order to ensure that Common Test doesn't start executing tests, or
+
+ <p>To ensure that <c>Common Test</c> does not start executing tests, or
closes its log files and shuts down, before the external application
- is ready for it, Common Test may be synchronized with the application.
- During startup and shutdown, Common Test can be suspended, simply by
+ is ready for it, <c>Common Test</c> can be synchronized with the application.
+ During startup and shutdown, <c>Common Test</c> can be suspended, simply by
having a CTH evaluate a <c>receive</c> expression in the init- or terminate
function. The macros <c>?CT_HOOK_INIT_PROCESS</c> (the process executing the hook
init function) and <c>?CT_HOOK_TERMINATE_PROCESS</c> (the process executing
- the hook terminate function), each specifies the name of the correct Common Test
- process to send a message to in order to return from the <c>receive</c>.
+ the hook terminate function) each specifies the name of the correct <c>Common Test</c>
+ process to send a message to. This is done to return from the <c>receive</c>.
These macros are defined in <c>ct.hrl</c>.
</p>
</section>
<section>
- <marker id="example"/>
+ <marker id="example"/>
<title>Example CTH</title>
- <p>The CTH below will log information about a test run into a format
- parseable by <seealso marker="kernel:file#consult-1">file:consult/1</seealso>.
+ <p>The following CTH logs information about a test run into a format
+ parseable by <seealso marker="kernel:file#consult-1">file:consult/1</seealso>
+ (in <c>Kernel</c>):
</p>
- <code>%%% @doc Common Test Example Common Test Hook module.
--module(example_cth).
-
-%% Callbacks
--export([id/1]).
--export([init/2]).
-
--export([pre_init_per_suite/3]).
--export([post_init_per_suite/4]).
--export([pre_end_per_suite/3]).
--export([post_end_per_suite/4]).
-
--export([pre_init_per_group/3]).
--export([post_init_per_group/4]).
--export([pre_end_per_group/3]).
--export([post_end_per_group/4]).
-
--export([pre_init_per_testcase/3]).
--export([post_end_per_testcase/4]).
-
--export([on_tc_fail/3]).
--export([on_tc_skip/3]).
-
--export([terminate/1]).
-
--record(state, { file_handle, total, suite_total, ts, tcs, data }).
-
-%% @doc Return a unique id for this CTH.
-id(Opts) ->
- proplists:get_value(filename, Opts, "/tmp/file.log").
-
-%% @doc Always called before any other callback function. Use this to initiate
-%% any common state.
-init(Id, Opts) ->
- {ok,D} = file:open(Id,[write]),
- {ok, #state{ file_handle = D, total = 0, data = [] }}.
-
-%% @doc Called before init_per_suite is called.
-pre_init_per_suite(Suite,Config,State) ->
- {Config, State#state{ suite_total = 0, tcs = [] }}.
-
-%% @doc Called after init_per_suite.
-post_init_per_suite(Suite,Config,Return,State) ->
- {Return, State}.
-
-%% @doc Called before end_per_suite.
-pre_end_per_suite(Suite,Config,State) ->
- {Config, State}.
-
-%% @doc Called after end_per_suite.
-post_end_per_suite(Suite,Config,Return,State) ->
- Data = {suites, Suite, State#state.suite_total, lists:reverse(State#state.tcs)},
- {Return, State#state{ data = [Data | State#state.data] ,
- total = State#state.total + State#state.suite_total } }.
-
-%% @doc Called before each init_per_group.
-pre_init_per_group(Group,Config,State) ->
- {Config, State}.
-
-%% @doc Called after each init_per_group.
-post_init_per_group(Group,Config,Return,State) ->
- {Return, State}.
-
-%% @doc Called after each end_per_group.
-pre_end_per_group(Group,Config,State) ->
- {Config, State}.
-
-%% @doc Called after each end_per_group.
-post_end_per_group(Group,Config,Return,State) ->
- {Return, State}.
-
-%% @doc Called before each test case.
-pre_init_per_testcase(TC,Config,State) ->
- {Config, State#state{ ts = now(), total = State#state.suite_total + 1 } }.
-
-%% @doc Called after each test case.
-post_end_per_testcase(TC,Config,Return,State) ->
- TCInfo = {testcase, TC, Return, timer:now_diff(now(), State#state.ts)},
- {Return, State#state{ ts = undefined, tcs = [TCInfo | State#state.tcs] } }.
-
-%% @doc Called after post_init_per_suite, post_end_per_suite, post_init_per_group,
-%% post_end_per_group and post_end_per_testcase if the suite, group or test case failed.
-on_tc_fail(TC, Reason, State) ->
- State.
-
-%% @doc Called when a test case is skipped by either user action
-%% or due to an init function failing.
-on_tc_skip(TC, Reason, State) ->
- State.
-
-%% @doc Called when the scope of the CTH is done
-terminate(State) ->
- io:format(State#state.file_handle, "~p.~n",
- [{test_run, State#state.total, State#state.data}]),
- file:close(State#state.file_handle),
- ok.</code>
+ <code>
+ %%% @doc Common Test Example Common Test Hook module.
+ -module(example_cth).
+
+ %% Callbacks
+ -export([id/1]).
+ -export([init/2]).
+
+ -export([pre_init_per_suite/3]).
+ -export([post_init_per_suite/4]).
+ -export([pre_end_per_suite/3]).
+ -export([post_end_per_suite/4]).
+
+ -export([pre_init_per_group/3]).
+ -export([post_init_per_group/4]).
+ -export([pre_end_per_group/3]).
+ -export([post_end_per_group/4]).
+
+ -export([pre_init_per_testcase/3]).
+ -export([post_init_per_testcase/4]).
+ -export([pre_end_per_testcase/3]).
+ -export([post_end_per_testcase/4]).
+
+ -export([on_tc_fail/3]).
+ -export([on_tc_skip/3]).
+
+ -export([terminate/1]).
+
+ -record(state, { file_handle, total, suite_total, ts, tcs, data }).
+
+ %% @doc Return a unique id for this CTH.
+ id(Opts) ->
+ proplists:get_value(filename, Opts, "/tmp/file.log").
+
+ %% @doc Always called before any other callback function. Use this to initiate
+ %% any common state.
+ init(Id, Opts) ->
+ {ok,D} = file:open(Id,[write]),
+ {ok, #state{ file_handle = D, total = 0, data = [] }}.
+
+ %% @doc Called before init_per_suite is called.
+ pre_init_per_suite(Suite,Config,State) ->
+ {Config, State#state{ suite_total = 0, tcs = [] }}.
+
+ %% @doc Called after init_per_suite.
+ post_init_per_suite(Suite,Config,Return,State) ->
+ {Return, State}.
+
+ %% @doc Called before end_per_suite.
+ pre_end_per_suite(Suite,Config,State) ->
+ {Config, State}.
+
+ %% @doc Called after end_per_suite.
+ post_end_per_suite(Suite,Config,Return,State) ->
+ Data = {suites, Suite, State#state.suite_total, lists:reverse(State#state.tcs)},
+ {Return, State#state{ data = [Data | State#state.data] ,
+ total = State#state.total + State#state.suite_total } }.
+
+ %% @doc Called before each init_per_group.
+ pre_init_per_group(Group,Config,State) ->
+ {Config, State}.
+
+ %% @doc Called after each init_per_group.
+ post_init_per_group(Group,Config,Return,State) ->
+ {Return, State}.
+
+ %% @doc Called before each end_per_group.
+ pre_end_per_group(Group,Config,State) ->
+ {Config, State}.
+
+ %% @doc Called after each end_per_group.
+ post_end_per_group(Group,Config,Return,State) ->
+ {Return, State}.
+
+ %% @doc Called before each init_per_testcase.
+ pre_init_per_testcase(TC,Config,State) ->
+ {Config, State#state{ ts = now(), total = State#state.suite_total + 1 } }.
+
+ %% Called after each init_per_testcase (immediately before the test case).
+ post_init_per_testcase(TC,Config,Return,State) ->
+ {Return, State}
+
+%% @doc Called before each end_per_testcase (immediately after the test case).
+ pre_end_per_testcase(TC,Config,State) ->
+ {Config, State}.
+
+ %% @doc Called after each end_per_testcase.
+ post_end_per_testcase(TC,Config,Return,State) ->
+ TCInfo = {testcase, TC, Return, timer:now_diff(now(), State#state.ts)},
+ {Return, State#state{ ts = undefined, tcs = [TCInfo | State#state.tcs] } }.
+
+ %% @doc Called after post_init_per_suite, post_end_per_suite, post_init_per_group,
+ %% post_end_per_group and post_end_per_testcase if the suite, group or test case failed.
+ on_tc_fail(TC, Reason, State) ->
+ State.
+
+ %% @doc Called when a test case is skipped by either user action
+ %% or due to an init function failing.
+ on_tc_skip(TC, Reason, State) ->
+ State.
+
+ %% @doc Called when the scope of the CTH is done
+ terminate(State) ->
+ io:format(State#state.file_handle, "~p.~n",
+ [{test_run, State#state.total, State#state.data}]),
+ file:close(State#state.file_handle),
+ ok.</code>
</section>
<section>
- <marker id="builtin_cths"/>
- <title>Built-in CTHs</title>
- <p>Common Test is delivered with a couple of general purpose CTHs that
- can be enabled by the user to provide some generic testing functionality.
- Some of these are enabled by default when starting running common_test,
- they can be disabled by setting <c>enable_builtin_hooks</c> to
- <c>false</c> on the command line or in the test specification. In the
- table below there is a list of all current CTHs which are delivered with
- Common Test.</p>
-
- <table>
- <row>
- <cell align="left"><em>CTH Name</em></cell>
- <cell align="left"><em>Is Built-in</em></cell>
- <cell align="left"><em>Description</em></cell>
- </row>
- <row>
- <cell align="left">cth_log_redirect</cell>
- <cell align="left">yes</cell>
- <cell align="left">Captures all error_logger and SASL logging events and prints them
- to the current test case log. If an event can not be associated with a
- testcase it will be printed in the common test framework log. This will
- happen for testcases which are run in parallel and events which occur
- inbetween testcases. You can configure the level of
- <seealso marker="sasl:sasl_app">SASL</seealso> events report
- using the normal SASL mechanisms. </cell>
- </row>
- <row>
- <cell align="left">cth_surefire</cell>
- <cell align="left">no</cell>
- <cell align="left"><p>Captures all test results and outputs them as surefire
- XML into a file. The file which is created is by default
- called junit_report.xml. The file name can be changed by
- setting the <c>path</c> option for this hook, e.g.</p>
-
- <code>-ct_hooks cth_surefire [{path,"/tmp/report.xml"}]</code>
-
- <p>If the <c>url_base</c> option is set, an additional
- attribute named <c>url</c> will be added to each
- <c>testsuite</c> and <c>testcase</c> XML element. The value will
- be constructed from the <c>url_base</c> and a relative path
- to the test suite or test case log respectively, e.g.</p>
-
- <code>-ct_hooks cth_surefire [{url_base, "http://myserver.com/"}]</code>
- <p>will give a url attribute value similar to</p>
-
- <code>"http://myserver.com/[email protected]_11.19.39/
-x86_64-unknown-linux-gnu.my_test.logs/run.2012-12-12_11.19.39/suite.log.html"</code>
-
- <p>Surefire XML can for instance be used by Jenkins to display test
- results.</p></cell>
- </row>
- </table>
+ <marker id="builtin_cths"/>
+ <title>Built-In CTHs</title>
+ <p><c>Common Test</c> is delivered with some general-purpose CTHs that
+ can be enabled by the user to provide generic testing functionality.
+ Some of these CTHs are enabled by default when <c>common_test</c> is started to run.
+ They can be disabled by setting <c>enable_builtin_hooks</c> to
+ <c>false</c> on the command line or in the test specification. The following
+ two CTHs are delivered with <c>Common Test</c>:</p>
+
+ <taglist>
+ <tag><c>cth_log_redirect</c></tag>
+ <item>
+ <p>Built-in</p>
+ <p>Captures all <c>error_logger</c> and <c>SASL</c> logging
+ events and prints them to the current test case log. If an event cannot be
+ associated with a test case, it is printed in the <c>Common Test</c> framework log.
+ This happens for test cases running in parallel and events occuring
+ in-between test cases. You can configure the level of
+ <seealso marker="sasl:sasl_app"><c>SASL</c></seealso> events report
+ using the normal <c>SASL</c> mechanisms.</p>
+ </item>
+ <tag><c>cth_surefire</c></tag>
+ <item>
+ <p>Not built-in</p>
+ <p>Captures all test results and outputs them as surefire
+ XML into a file. The created file is by default
+ called <c>junit_report.xml</c>. The file name can be changed by
+ setting option <c>path</c> for this hook, for example:</p>
+
+ <p><c>-ct_hooks cth_surefire [{path,"/tmp/report.xml"}]</c></p>
+
+ <p>If option <c>url_base</c> is set, an extra
+ attribute named <c>url</c> is added to each
+ <c>testsuite</c> and <c>testcase</c> XML element. The value
+ is constructed from <c>url_base</c> and a relative path
+ to the test suite or test case log, respectively, for example:</p>
+
+ <p><c>-ct_hooks cth_surefire [{url_base, "http://myserver.com/"}]</c></p>
+
+ <p>gives an URL attribute value similar to</p>
+
+ <p><c>"http://myserver.com/[email protected]_11.19.39/
+x86_64-unknown-linux-gnu.my_test.logs/run.2012-12-12_11.19.39/suite.log.html"</c></p>
+
+ <p>Surefire XML can, for example, be used by Jenkins to display test
+ results.</p>
+ </item>
+ </taglist>
</section>
diff --git a/lib/common_test/doc/src/ct_master.xml b/lib/common_test/doc/src/ct_master.xml
new file mode 100644
index 0000000000..06f9b04f1b
--- /dev/null
+++ b/lib/common_test/doc/src/ct_master.xml
@@ -0,0 +1,220 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_master</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_master.xml</file>
+ </header>
+ <module>ct_master</module>
+ <modulesummary>Distributed test execution control for Common Test.</modulesummary>
+
+<description>
+
+ <p>Distributed test execution control for <c>Common Test</c>.</p>
+
+ <p>This module exports functions for running <c>Common Test</c> nodes on
+ multiple hosts in parallel.</p>
+
+</description>
+
+ <funcs>
+ <func>
+ <name>abort() -&gt; ok</name>
+ <fsummary>Stops all running tests.</fsummary>
+ <desc><marker id="abort-0"/>
+ <p>Stops all running tests.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>abort(Nodes) -&gt; ok</name>
+ <fsummary>Stops tests on specified nodes.</fsummary>
+ <type>
+ <v>Nodes = atom() | [atom()]</v>
+ </type>
+ <desc><marker id="abort-1"/>
+ <p>Stops tests on specified nodes.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>basic_html(Bool) -&gt; ok</name>
+ <fsummary>If set to true, the ct_master logs are written on a primitive
+ HTML format, not using the <c>Common Test</c> CSS style sheet.</fsummary>
+ <type>
+ <v>Bool = true | false</v>
+ </type>
+ <desc><marker id="basic_html-1"/>
+ <p>If set to <c>true</c>, the <c>ct_master logs</c> are written on a
+ primitive HTML format, not using the <c>Common Test</c> CSS style
+ sheet.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_event_mgr_ref() -&gt; MasterEvMgrRef</name>
+ <fsummary>Gets a reference to the <c>Common Test</c> master event
+ manager.</fsummary>
+ <type>
+ <v>MasterEvMgrRef = atom()</v>
+ </type>
+ <desc><marker id="get_event_mgr_ref-0"/>
+ <p>Gets a reference to the <c>Common Test</c> master event manager.
+ The reference can be used to, for example, add a user-specific
+ event handler while tests are running.</p>
+
+ <p><em>Example:</em></p>
+
+ <pre>
+ gen_event:add_handler(ct_master:get_event_mgr_ref(), my_ev_h, [])</pre>
+ </desc>
+ </func>
+
+ <func>
+ <name>progress() -&gt; [{Node, Status}]</name>
+ <fsummary>Returns test progress.</fsummary>
+ <type>
+ <v>Node = atom()</v>
+ <v>Status = finished_ok | ongoing | aborted | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="progress-0"/>
+ <p>Returns test progress. If <c>Status</c> is <c>ongoing</c>, tests
+ are running on the node and are not yet finished.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run(TestSpecs) -&gt; ok</name>
+ <fsummary>Equivalent to run(TestSpecs, false, [], []).</fsummary>
+ <type>
+ <v>TestSpecs = string() | [SeparateOrMerged]</v>
+ </type>
+ <desc><marker id="run-1"/>
+ <p>Equivalent to <seealso marker="#run-4"><c>ct_master:run(TestSpecs,
+ false, [], [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run(TestSpecs, InclNodes, ExclNodes) -&gt; ok</name>
+ <fsummary>Equivalent to run(TestSpecs, false, InclNodes, ExclNodes).
+ </fsummary>
+ <type>
+ <v>TestSpecs = string() | [SeparateOrMerged]</v>
+ <v>SeparateOrMerged = string() | [string()]</v>
+ <v>InclNodes = [atom()]</v>
+ <v>ExclNodes = [atom()]</v>
+ </type>
+ <desc><marker id="run-3"/>
+ <p>Equivalent to <seealso marker="#run-4"><c>ct_master:run(TestSpecs,
+ false, InclNodes, ExclNodes)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run(TestSpecs, AllowUserTerms, InclNodes, ExclNodes) -&gt; ok</name>
+ <fsummary>Tests are spawned on the nodes as specified in TestSpecs.
+ </fsummary>
+ <type>
+ <v>TestSpecs = string() | [SeparateOrMerged]</v>
+ <v>SeparateOrMerged = string() | [string()]</v>
+ <v>AllowUserTerms = bool()</v>
+ <v>InclNodes = [atom()]</v>
+ <v>ExclNodes = [atom()]</v>
+ </type>
+ <desc><marker id="run-4"/>
+ <p>Tests are spawned on the nodes as specified in <c>TestSpecs</c>.
+ Each specification in <c>TestSpec</c> is handled separately.
+ However, it is also possible to specify a list of specifications to
+ be merged into one specification before the tests are executed. Any
+ test without a particular node specification is also executed on
+ the nodes in <c>InclNodes</c>. Nodes in the <c>ExclNodes</c> list
+ are excluded from the test.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run_on_node(TestSpecs, Node) -&gt; ok</name>
+ <fsummary>Equivalent to run_on_node(TestSpecs, false, Node).</fsummary>
+ <type>
+ <v>TestSpecs = string() | [SeparateOrMerged]</v>
+ <v>SeparateOrMerged = string() | [string()]</v>
+ <v>Node = atom()</v>
+ </type>
+ <desc><marker id="run_on_node-2"/>
+ <p>Equivalent to
+ <seealso marker="#run_on_node-3"><c>ct_master:run_on_node(TestSpecs,
+ false, Node)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run_on_node(TestSpecs, AllowUserTerms, Node) -&gt; ok</name>
+ <fsummary>Tests are spawned on Node according to TestSpecs.</fsummary>
+ <type>
+ <v>TestSpecs = string() | [SeparateOrMerged]</v>
+ <v>SeparateOrMerged = string() | [string()]</v>
+ <v>AllowUserTerms = bool()</v>
+ <v>Node = atom()</v>
+ </type>
+ <desc><marker id="run_on_node-3"/>
+ <p>Tests are spawned on <c>Node</c> according to <c>TestSpecs</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>run_test(Node, Opts) -&gt; ok</name>
+ <fsummary>Tests are spawned on Node using ct:run_test/1.</fsummary>
+ <type>
+ <v>Node = atom()</v>
+ <v>Opts = [OptTuples]</v>
+ <v>OptTuples = {config, CfgFiles} | {dir, TestDirs} | {suite, Suites} | {testcase, Cases} | {spec, TestSpecs} | {allow_user_terms, Bool} | {logdir, LogDir} | {event_handler, EventHandlers} | {silent_connections, Conns} | {cover, CoverSpecFile} | {cover_stop, Bool} | {userconfig, UserCfgFiles}</v>
+ <v>CfgFiles = string() | [string()]</v>
+ <v>TestDirs = string() | [string()]</v>
+ <v>Suites = atom() | [atom()]</v>
+ <v>Cases = atom() | [atom()]</v>
+ <v>TestSpecs = string() | [string()]</v>
+ <v>LogDir = string()</v>
+ <v>EventHandlers = EH | [EH]</v>
+ <v>EH = atom() | {atom(), InitArgs} | {[atom()], InitArgs}</v>
+ <v>InitArgs = [term()]</v>
+ <v>Conns = all | [atom()]</v>
+ </type>
+ <desc><marker id="run_test-2"/>
+ <p>Tests are spawned on <c>Node</c> using
+ <seealso marker="ct:run_test-1"><c>ct:run_test/1</c></seealso></p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_master_chapter.xml b/lib/common_test/doc/src/ct_master_chapter.xml
index 16492f39b8..99555164e0 100644
--- a/lib/common_test/doc/src/ct_master_chapter.xml
+++ b/lib/common_test/doc/src/ct_master_chapter.xml
@@ -22,7 +22,7 @@
</legalnotice>
- <title>Using Common Test for Large Scale Testing</title>
+ <title>Using Common Test for Large-Scale Testing</title>
<prepared>Peter Andersson</prepared>
<docno></docno>
<date></date>
@@ -33,217 +33,220 @@
<section>
<marker id="general"></marker>
<title>General</title>
- <p>Large scale automated testing requires running multiple independent
+ <p>Large-scale automated testing requires running multiple independent
test sessions in parallel. This is accomplished by running
- a number of Common Test nodes on one or more hosts, testing
- different target systems. Configuring, starting and controlling the
+ some <c>Common Test</c> nodes on one or more hosts, testing
+ different target systems. Configuring, starting, and controlling the
test nodes independently can be a cumbersome operation. To aid
- this kind of automated large scale testing, CT offers a master test
- node component, CT Master, that handles central configuration and control
- in a system of distributed CT nodes.</p>
+ this kind of automated large-scale testing, <c>Common Test</c> offers a master
+ test node component, <c>Common Test</c> Master, which handles central configuration and control
+ in a system of distributed <c>Common Test</c> nodes.</p>
- <p>The CT Master server runs on one dedicated Erlang node and uses distributed
- Erlang to communicate with any number of CT test nodes, each hosting a regular
- CT server. Test specifications are used as input to specify what to test on which
+ <p>The <c>Common Test</c> Master server runs on one dedicated Erlang node and uses distributed
+ Erlang to communicate with any number of <c>Common Test</c> test nodes, each hosting a regular
+ <c>Common Test</c> server. Test specifications are used as input to specify what to test on which
test nodes, using what configuration.</p>
- <p>The CT Master server writes progress information to HTML log files similarly
- to the regular CT server. The logs contain test statistics and links to the
- log files written by each independent CT server.</p>
+ <p>The <c>Common Test</c> Master server writes progress information to HTML log files similarly
+ to the regular <c>Common Test</c> server. The logs contain test statistics and links to the
+ log files written by each independent <c>Common Test</c> server.</p>
- <p>The CT master API is exported by the <c>ct_master</c> module.</p>
+ <p>The <c>Common Test</c> Master API is exported by module
+ <seealso marker="ct_master"><c>ct_master</c></seealso>.</p>
</section>
<section>
- <title>Usage</title>
- <p>CT Master requires all test nodes to be on the same network and share a common
- file system. As of this date, CT Master can not start test nodes
- automatically. The nodes must have been started in advance for CT Master to be
+ <title>Use</title>
+ <p><c>Common Test</c> Master requires all test nodes to be on the same network and share a common
+ file system. <c>Common Test</c> Master cannot start test nodes
+ automatically. The nodes must be started in advance for <c>Common Test</c> Master to be
able to start test sessions on them.</p>
- <p>Tests are started by calling:</p>
-
- <p><c>ct_master:run(TestSpecs)</c> or
- <c>ct_master:run(TestSpecs, InclNodes, ExclNodes)</c></p>
+ <p>Tests are started by calling
+ <seealso marker="ct_master#run-1"><c>ct_master:run(TestSpecs)</c></seealso> or
+ <seealso marker="ct_master#run-3"><c>ct_master:run(TestSpecs, InclNodes, ExclNodes)</c></seealso></p>
<p><c>TestSpecs</c> is either the name of a test specification file (string) or a list
- of test specifications. In case of a list, the specifications will be handled (and
+ of test specifications. If it is a list, the specifications are handled (and
the corresponding tests executed) in sequence. An element in a <c>TestSpecs</c> list
- can also be list of test specifications. The specifications in such a list will be
- merged into one combined specification prior to test execution. For example:</p>
-
- <p><c>ct_master:run(["ts1","ts2",["ts3","ts4"]])</c></p>
+ can also be list of test specifications. The specifications in such a list are
+ merged into one combined specification before test execution.</p>
- <p>means first the tests specified by "ts1" will run, then the tests specified by "ts2"
+ <p><em>Example:</em></p>
+ <pre>
+ ct_master:run(["ts1","ts2",["ts3","ts4"]])</pre>
+
+ <p>Here, the tests specified by "ts1" run first, then the tests specified by "ts2",
and finally the tests specified by both "ts3" and "ts4".</p>
- <p>The <c>InclNodes</c> argument to <c>run/3</c> is a list of node names. The <c>run/3</c>
- function runs the tests in <c>TestSpecs</c> just like <c>run/1</c> but will also
- take any test in <c>TestSpecs</c> that's not explicitly tagged with a particular
- node name and execute it on the nodes listed in <c>InclNodes</c>. By using <c>run/3</c>
- this way it is possible to use any test specification, with or without node information,
- in a large scale test environment! <c>ExclNodes</c> is a list of nodes that should be
- excluded from the test. I.e. tests that have been specified in the test specification
- to run on a particular node will not be performed if that node is at runtime
- listed in <c>ExclNodes</c>.</p>
-
- <p>If CT Master fails initially to connect to any of the test nodes specified in a
- test specification or in the <c>InclNodes</c> list, the operator will be prompted with
- the option to either start over again (after manually checking the status of the
- node(s) in question), to run without the missing nodes, or to abort the operation.</p>
+ <p>The <c>InclNodes</c> argument to <c>run/3</c> is a list of node names. Function
+ <c>run/3</c> runs the tests in <c>TestSpecs</c> just like <c>run/1</c>, but also
+ takes any test in <c>TestSpecs</c>, which is not explicitly tagged with a particular
+ node name, and execute it on the nodes listed in <c>InclNodes</c>. By using <c>run/3</c>
+ this way, any test specification can be used, with or without node information,
+ in a large-scale test environment.</p>
+
+ <p><c>ExclNodes</c> is a list of nodes to be
+ excluded from the test. That is, tests that are specified in the test specification
+ to run on a particular node are not performed if that node is
+ listed in <c>ExclNodes</c> at runtime.</p>
- <p>When tests start, CT Master prints information to console about the nodes that are
- involved. CT Master also reports when tests finish, successfully or unsuccessfully. If
- connection is lost to a node, the test on that node is considered finished. CT Master
- will not attempt to reestablish contact with the failing node. At any time to get the
- current status of the test nodes, call the function:</p>
+ <p>If <c>Common Test</c> Master fails initially to connect to any of the test nodes specified in a
+ test specification or in the <c>InclNodes</c> list, the operator is prompted with
+ the option to either start over again (after manually checking the status of the
+ nodes in question), to run without the missing nodes, or to abort the operation.</p>
- <p><c>ct_master:progress()</c></p>
+ <p>When tests start, <c>Common Test</c> Master displays information to console about the involved nodes.
+ <c>Common Test</c> Master also reports when tests finish, successfully or unsuccessfully. If
+ connection is lost to a node, the test on that node is considered finished. <c>Common Test</c> Master
+ does not attempt to re-establish contact with the failing node.</p>
- <p>To stop one or more tests, use:</p>
+ <p>At any time, to get the current status of the test nodes, call function
+ <seealso marker="ct_master#progress-0"><c>ct_master:progress()</c></seealso>.</p>
- <p><c>ct_master:abort()</c> (stop all) or <c>ct_master:abort(Nodes)</c></p>
+ <p>To stop one or more tests, use function
+ <seealso marker="ct_master#abort-0"><c>ct_master:abort()</c></seealso> (to stop all) or
+ <seealso marker="ct_master#abort-1"><c>ct_master:abort(Nodes)</c></seealso>.</p>
- <p>For detailed information about the <c>ct_master</c> API, please see the
- <seealso marker="ct_master">manual page</seealso> for this module.</p>
+ <p>For details about the <c>Common Test</c> Master API, see module
+ <seealso marker="ct_master"><c>ct_master</c></seealso>.</p>
</section>
<section>
<marker id="test_specifications"></marker>
<title>Test Specifications</title>
- <p>The test specifications used as input to CT Master are fully compatible with the
- specifications used as input to the regular CT server. The syntax is described in the
- <seealso marker="run_test_chapter#test_specifications">Running Test Suites</seealso>
- chapter.</p>
+ <p>The test specifications used as input to <c>Common Test</c> Master are fully compatible with the
+ specifications used as input to the regular <c>Common Test</c> server. The syntax is described in section
+ <seealso marker="run_test_chapter#test_specifications">Test Specifications</seealso>
+ in section Running Tests and Analyzing Results.</p>
<p>All test specification terms can have a <c>NodeRefs</c> element. This element
specifies which node or nodes a configuration operation or a test is to be executed
- on. <c>NodeRefs</c> is defined as:</p>
+ on. <c>NodeRefs</c> is defined as follows:</p>
<p><c>NodeRefs = all_nodes | [NodeRef] | NodeRef</c></p>
-
- <p>where</p>
<p><c>NodeRef = NodeAlias | node() | master</c></p>
<p>A <c>NodeAlias</c> (<c>atom()</c>) is used in a test specification as a
- reference to a node name (so the actual node name only needs to be declared once,
- which can of course also be achieved using constants).
- The alias is declared with a <c>node</c> term:</p>
+ reference to a node name (so the node name only needs to be declared once,
+ which also can be achieved using constants).
+ The alias is declared with a <c>node</c> term as follows:</p>
<p><c>{node, NodeAlias, NodeName}</c></p>
- <p>If <c>NodeRefs</c> has the value <c>all_nodes</c>, the operation or test will
- be performed on all given test nodes. (Declaring a term without a <c>NodeRefs</c>
- element actually has the same effect). If <c>NodeRefs</c> has the value
- <c>master</c>, the operation is only performed on the CT Master node (namely set
+ <p>If <c>NodeRefs</c> has the value <c>all_nodes</c>, the operation or test
+ is performed on all specified test nodes. (Declaring a term without a <c>NodeRefs</c>
+ element has the same effect). If <c>NodeRefs</c> has the value
+ <c>master</c>, the operation is only performed on the <c>Common Test</c> Master node (namely set
the log directory or install an event handler).</p>
- <p>Consider the example in the
- <seealso marker="run_test_chapter#test_specifications">Running Test Suites</seealso>
- chapter, now extended with node information and intended to be executed by the
- CT Master:</p>
+ <p>Consider the example in section
+ <seealso marker="run_test_chapter#test_specifications">Test Specifications</seealso>
+ in section Running Tests and Analysing Results,
+ now extended with node information and intended to be executed by
+ <c>Common Test</c> Master:</p>
<pre>
- {define, 'Top', "/home/test"}.
- {define, 'T1', "'Top'/t1"}.
- {define, 'T2', "'Top'/t2"}.
- {define, 'T3', "'Top'/t3"}.
- {define, 'CfgFile', "config.cfg"}.
- {define, 'Node', ct_node}.
-
- {node, node1, 'Node@host_x'}.
- {node, node2, 'Node@host_y'}.
-
- {logdir, master, "'Top'/master_logs"}.
- {logdir, "'Top'/logs"}.
-
- {config, node1, "'T1'/'CfgFile'"}.
- {config, node2, "'T2'/'CfgFile'"}.
- {config, "'T3'/'CfgFile'"}.
-
- {suites, node1, 'T1', all}.
- {skip_suites, node1, 'T1', [t1B_SUITE,t1D_SUITE], "Not implemented"}.
- {skip_cases, node1, 'T1', t1A_SUITE, [test3,test4], "Irrelevant"}.
- {skip_cases, node1, 'T1', t1C_SUITE, [test1], "Ignore"}.
-
- {suites, node2, 'T2', [t2B_SUITE,t2C_SUITE]}.
- {cases, node2, 'T2', t2A_SUITE, [test4,test1,test7]}.
-
- {skip_suites, 'T3', all, "Not implemented"}.</pre>
+ {define, 'Top', "/home/test"}.
+ {define, 'T1', "'Top'/t1"}.
+ {define, 'T2', "'Top'/t2"}.
+ {define, 'T3', "'Top'/t3"}.
+ {define, 'CfgFile', "config.cfg"}.
+ {define, 'Node', ct_node}.
+
+ {node, node1, 'Node@host_x'}.
+ {node, node2, 'Node@host_y'}.
+
+ {logdir, master, "'Top'/master_logs"}.
+ {logdir, "'Top'/logs"}.
+
+ {config, node1, "'T1'/'CfgFile'"}.
+ {config, node2, "'T2'/'CfgFile'"}.
+ {config, "'T3'/'CfgFile'"}.
+
+ {suites, node1, 'T1', all}.
+ {skip_suites, node1, 'T1', [t1B_SUITE,t1D_SUITE], "Not implemented"}.
+ {skip_cases, node1, 'T1', t1A_SUITE, [test3,test4], "Irrelevant"}.
+ {skip_cases, node1, 'T1', t1C_SUITE, [test1], "Ignore"}.
+
+ {suites, node2, 'T2', [t2B_SUITE,t2C_SUITE]}.
+ {cases, node2, 'T2', t2A_SUITE, [test4,test1,test7]}.
+
+ {skip_suites, 'T3', all, "Not implemented"}.</pre>
<p>This example specifies the same tests as the original example. But
- now if started with a call to <c>ct_master:run(TestSpecName)</c>, the
- t1 test will be executed on node <c>ct_node@host_x</c> (node1), the
- t2 test on <c>ct_node@host_y</c> (node2) and the t3 test on both
- node1 and node2. The t1 config file will only be read on
- node1 and the t2 config file only on node2, while the t3 config file
- will be read on both node1 and node2. Both test nodes will write log
- files to the same directory. (The CT Master node will however use a
- different log directory than the test nodes).</p>
+ now if started with a call to <c>ct_master:run(TestSpecName)</c>, test
+ <c>t1</c> is executed on node <c>ct_node@host_x</c> (<c>node1</c>), test
+ <c>t2</c> on <c>ct_node@host_y</c> (<c>node2</c>) and test <c>t3</c>
+ on both <c>node1</c> and <c>node2</c>. Configuration file <c>t1</c> is only read on
+ <c>node1</c> and configuration file <c>t2</c> only on <c>node2</c>, while the
+ configuration file <c>t3</c> is read on both <c>node1</c> and <c>node2</c>.
+ Both test nodes write log files to the same directory. (However, the <c>Common Test</c> Master
+ node uses a different log directory than the test nodes.)</p>
<p>If the test session is instead started with a call to
<c>ct_master:run(TestSpecName, [ct_node@host_z], [ct_node@host_x])</c>,
- the result is that the t1 test does not run on
- <c>ct_node@host_x</c> (or any other node) while the t3 test runs on
+ the result is that test <c>t1</c> does not run on
+ <c>ct_node@host_x</c> (or any other node) while test <c>t3</c> runs on both
<c>ct_node@host_y</c> and <c>ct_node@host_z</c>.</p>
<p>A nice feature is that a test specification that includes node
- information can still be used as input to the regular Common Test server
- (as described in the
- <seealso marker="run_test_chapter#test_specifications">Running Test Suites</seealso>
- chapter). The result is that any test specified to run on a node with the same
- name as the Common Test node in question (typically <c>ct@somehost</c> if started
- with the <c>ct_run</c> program), will be performed. Tests without explicit
- node association will always be performed too of course!</p>
+ information can still be used as input to the regular <c>Common Test</c> server
+ (as described in section
+ <seealso marker="run_test_chapter#test_specifications">Test Specifications</seealso>).
+ The result is that any test specified to run on a node with the same
+ name as the <c>Common Test</c> node in question (typically <c>ct@somehost</c> if started
+ with the <c>ct_run</c> program), is performed. Tests without explicit
+ node association are always performed too, of course.</p>
</section>
<section>
- <title>Automatic startup of test target nodes</title>
+ <title>Automatic Startup of Test Target Nodes</title>
<marker id="ct_slave"></marker>
- <p>It is possible to automatically start, and perform initial actions, on
- test target nodes by using the test specification term <c>init</c>.</p>
- <p>Currently, two sub-terms are supported, <c>node_start</c> and <c>eval</c>.</p>
- <p>Example:</p>
+ <p>Initial actions can be started and performed automatically on
+ test target nodes using test specification term <c>init</c>.</p>
+ <p>Two subterms are supported, <c>node_start</c> and <c>eval</c>.</p>
+ <p><em>Example:</em></p>
<pre>
- {node, node1, node1@host1}.
- {node, node2, node1@host2}.
- {node, node3, node2@host2}.
- {node, node4, node1@host3}.
- {init, node1, [{node_start, [{callback_module, my_slave_callback}]}]}.
- {init, [node2, node3], {node_start, [{username, "ct_user"}, {password, "ct_password"}]}}.
- {init, node4, {eval, {module, function, []}}}.</pre>
+ {node, node1, node1@host1}.
+ {node, node2, node1@host2}.
+ {node, node3, node2@host2}.
+ {node, node4, node1@host3}.
+ {init, node1, [{node_start, [{callback_module, my_slave_callback}]}]}.
+ {init, [node2, node3], {node_start, [{username, "ct_user"}, {password, "ct_password"}]}}.
+ {init, node4, {eval, {module, function, []}}}.</pre>
<p>This test specification declares that <c>node1@host1</c> is to be started using
the user callback function <c>callback_module:my_slave_callback/0</c>, and nodes
- <c>node1@host2</c> and <c>node2@host2</c> will be started with the default callback
- module <c>ct_slave</c>. The given user name and password is used to log into remote
- host <c>host2</c>. Also, the function <c>module:function/0</c> will be evaluated on
- <c>node1@host3</c>, and the result of this call will be printed to the log.</p>
+ <c>node1@host2</c> and <c>node2@host2</c> are to be started with the default callback
+ module <c>ct_slave</c>. The specified username and password are used to log on to remote
+ host <c>host2</c>. Also, function <c>module:function/0</c> is evaluated on
+ <c>node1@host3</c>, and the result of this call is printed to the log.</p>
- <p>The default <seealso marker="ct_slave">ct_slave</seealso> callback module,
- which is part of the Common Test application, has the following features:
+ <p>The default callback module <seealso marker="ct_slave">ct_slave</seealso>,
+ has the following features:
</p>
- <list>
+ <list type="bulleted">
<item>Starting Erlang target nodes on local or remote hosts
- (ssh is used for communication).
+ (application <c>SSH</c> is used for communication).
</item>
- <item>Ability to start an Erlang emulator with additional flags
+ <item>Ability to start an Erlang emulator with more flags
(any flags supported by <c>erl</c> are supported).
</item>
- <item>Supervision of a node being started by means of internal callback
- functions. Used to prevent hanging nodes. (Configurable).
+ <item>Supervision of a node being started using internal callback
+ functions. Used to prevent hanging nodes. (Configurable.)
</item>
- <item>Monitoring of the master node by the slaves. A slave node may be
- stopped in case the master node terminates. (Configurable).
+ <item>Monitoring of the master node by the slaves. A slave node can be
+ stopped if the master node terminates. (Configurable.)
</item>
- <item>Execution of user functions after a slave node is started.
- Functions can be given as a list of {Module, Function, Arguments} tuples.
+ <item>Execution of user functions after a slave node is started. Functions can
+ be specified as a list of <c>{Module, Function, Arguments}</c> tuples.
</item>
</list>
- <p>Note that it is possible to specify an <c>eval</c> term for the node as well
- as <c>startup_functions</c> in the <c>node_start</c> options list. In this
- case first the node will be started, then the <c>startup_functions</c> are
+ <note><p>An <c>eval</c> term for the node and
+ <c>startup_functions</c> in the <c>node_start</c> options list can be specified.
+ In this case, the node is started first, then the <c>startup_functions</c> are
executed, and finally functions specified with <c>eval</c> are called.
- </p>
+ </p></note>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/ct_netconfc.xml b/lib/common_test/doc/src/ct_netconfc.xml
new file mode 100644
index 0000000000..d8c82c7f2c
--- /dev/null
+++ b/lib/common_test/doc/src/ct_netconfc.xml
@@ -0,0 +1,1039 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_netconfc</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_netconfc.xml</file>
+ </header>
+ <module>ct_netconfc</module>
+ <modulesummary>NETCONF client module.</modulesummary>
+
+<description>
+
+ <p>NETCONF client module.</p>
+
+ <p>The NETCONF client is compliant with RFC 4741 NETCONF Configuration
+ Protocol and RFC 4742 Using the NETCONF Configuration Protocol over
+ Secure SHell (SSH)..</p>
+
+ <p>For each server to test against, the following entry can be added to a
+ configuration file:</p>
+
+ <pre>
+ {server_id(),options()}.</pre>
+
+ <p>The <c>server_id()</c> or an associated <c>target_name()</c> (see
+ module <seealso marker="ct"><c>ct</c></seealso>) must then be used
+ in calls to
+ <seealso marker="#open-2"><c>ct_netconfc:open/2</c></seealso>.</p>
+
+ <p>If no configuration exists for a server, a session can still be
+ opened by calling
+ <seealso marker="#open-2"><c>ct_netconfc:open/2</c></seealso> with
+ all necessary options specified in the call. The first argument to
+ <seealso marker="#open-2"><c>ct_netconfc:open/2</c></seealso> can
+ then be any atom.</p>
+
+ </description>
+
+ <section>
+ <marker id="Logging"/>
+ <title>Logging</title>
+ <p>The NETCONF server uses <c>error_logger</c> for logging of NETCONF
+ traffic. A special purpose error handler is implemented in
+ <c>ct_conn_log_h</c>. To use this error handler, add the
+ <c>cth_conn_log</c> hook in the test suite, for example:</p>
+
+ <pre>
+ suite() -&gt;
+ [{ct_hooks, [{cth_conn_log, [{conn_mod(),hook_options()}]}]}].</pre>
+
+ <p><c>conn_mod()</c> is the name of the <c>Common Test</c> module
+ implementing the connection protocol, for example, <c>ct_netconfc</c>.</p>
+
+ <p>Hook option <c>log_type</c> specifies the type of logging:</p>
+
+ <taglist>
+ <tag><c>raw</c></tag>
+ <item><p>The sent and received NETCONF data is logged to a separate
+ text file "as is" without any formatting. A link to the file is
+ added to the test case HTML log.</p>.</item>
+
+ <tag><c>pretty</c></tag>
+ <item><p>The sent and received NETCONF data is logged to a separate
+ text file with XML data nicely indented. A link to the file is
+ added to the test case HTML log.</p></item>
+
+ <tag><c>html (default)</c></tag>
+ <item><p>The sent and received NETCONF traffic is pretty printed
+ directly in the test case HTML log.</p></item>
+
+ <tag><c>silent</c></tag>
+ <item><p>NETCONF traffic is not logged.</p></item>
+ </taglist>
+
+ <p>By default, all NETCONF traffic is logged in one single log file.
+ However, different connections can be logged in separate files.
+ To do this, use hook option <c>hosts</c> and list the names of the
+ servers/connections to be used in the suite. The connections
+ must be named for this to work, that is, they must be opened with
+ <seealso marker="#open-2"><c>ct_netconfc:open/2</c></seealso>.</p>
+
+ <p>Option <c>hosts</c> has no effect if <c>log_type</c> is set to
+ <c>html</c> or <c>silent</c>.</p>
+
+ <p>The hook options can also be specified in a configuration file with
+ configuration variable <c>ct_conn_log</c>:</p>
+
+ <pre>
+ {ct_conn_log,[{conn_mod(),hook_options()}]}.</pre>
+
+ <p>For example:</p>
+
+ <pre>
+ {ct_conn_log,[{ct_netconfc,[{log_type,pretty},
+ {hosts,[key_or_name()]}]}]}</pre>
+
+ <note>
+ <p>Hook options specified in a configuration file overwrite the
+ hard-coded hook options in the test suite.</p>
+ </note>
+
+ <p><em>Logging Example 1:</em></p>
+ <marker id="Logging_example_1"/>
+
+ <p>The following <c>ct_hooks</c> statement causes pretty printing of
+ NETCONF traffic to separate logs for the connections named
+ <c>nc_server1</c> and <c>nc_server2</c>. Any other connections are
+ logged to default NETCONF log.</p>
+
+ <pre>
+ suite() -&gt;
+ [{ct_hooks, [{cth_conn_log, [{ct_netconfc,[{log_type,pretty}},
+ {hosts,[nc_server1,nc_server2]}]}
+ ]}]}].</pre>
+
+ <p>Connections must be opened as follows:</p>
+
+ <pre>
+ open(nc_server1,[...]),
+ open(nc_server2,[...]).</pre>
+
+ <p><em>Logging Example 2:</em></p>
+ <marker id="Logging_example_2"/>
+
+ <p>The following configuration file causes raw logging of all NETCONF
+ traffic in to one single text file:</p>
+
+ <pre>
+ {ct_conn_log,[{ct_netconfc,[{log_type,raw}]}]}.</pre>
+
+ <p>The <c>ct_hooks</c> statement must look as follows:</p>
+
+ <pre>
+ suite() -&gt;
+ [{ct_hooks, [{cth_conn_log, []}]}].</pre>
+
+ <p>The same <c>ct_hooks</c> statement without the configuration file
+ would cause HTML logging of all NETCONF connections in to the test
+ case HTML log.</p>
+ </section>
+
+ <section>
+ <marker id="Notifications"/>
+ <title>Notifications</title>
+
+ <p>The NETCONF client is also compliant with RFC 5277 NETCONF Event
+ Notifications, which defines a mechanism for an asynchronous message
+ notification delivery service for the NETCONF protocol.</p>
+
+ <p>Specific functions to support this are
+ <seealso marker="#create_subscription-6"><c>ct_netconfc:create_subscription/6</c></seealso>
+ and
+ <seealso marker="#get_event_streams-3"><c>ct_netconfc:get_event_streams/3</c></seealso>.
+ (The functions also exist with other arities.)</p>
+ </section>
+
+ <section>
+ <title>Data Types</title>
+ <marker id="types"/>
+ <taglist>
+ <tag><c>client() = handle() | key_or_name()</c></tag>
+ <item><marker id="type-client"/>
+ <p>For <c>handle()</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p></item>
+
+ <tag><c>error_reason() = term()</c></tag>
+ <item><marker id="type-error_reason"/> </item>
+ <tag><c>event_time() = {eventTime, xml_attributes(), [xs_datetime()]}</c></tag>
+ <item><marker id="type-event_time"/> </item>
+
+ <tag><c>handle() = term()</c></tag>
+ <item><marker id="type-handle"/>
+ <p>Opaque reference for a connection (NETCONF session). For more
+ information, see module <seealso marker="ct"><c>ct</c></seealso>.</p>
+ </item>
+
+ <tag><c>host() = <seealso marker="kernel:inet#type-hostname"><c>inet:hostname()</c></seealso> | <seealso marker="kernel:inet#type-ip_address"><c>inet:ip_address()</c></seealso></c></tag>
+ <item><marker id="type-host"/></item>
+
+ <tag><c>key_or_name() = server_id() | target_name()</c></tag>
+ <item><marker id="type-key_or_name"/>
+ <p>For <c>target_name</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p></item>
+
+ <tag><c>netconf_db() = running | startup | candidate</c></tag>
+ <item><marker id="type-netconf_db"/> </item>
+
+ <tag><c>notification() = {notification, xml_attributes(), notification_content()}</c></tag>
+ <item><marker id="type-notification"/> </item>
+
+ <tag><c>notification_content() = [event_time() | simple_xml()]</c></tag>
+ <item><marker id="type-notification_content"/> </item>
+
+ <tag><c>option() = {ssh, host()} | {port, <seealso marker="kernel:inet#type-port_number"><c>inet:port_number()</c></seealso>} | {timeout, timeout()} | SshConnectOption</c></tag>
+ <item><marker id="type-option"/>
+
+ <p><c>SshConnectOption</c> is any valid option to
+ <seealso marker="ssh:ssh#connect-3"><c>ssh:connect/3,4</c></seealso>.
+ Common options used are <c>user</c>, <c>password</c>
+ and <c>user_dir</c>. The <c>SshConnectOptions</c> are
+ verfied by the SSH application.</p></item>
+
+ <tag><c>options() = [option()]</c></tag>
+ <item><marker id="type-options"/>
+ <p>Options used for setting up an SSH connection to a NETCONF
+ server.</p></item>
+
+ <tag><c>server_id() = atom()</c></tag>
+ <item><marker id="type-server_id"/>
+ <p>The identity of a server, specified in a configuration
+ file.</p></item>
+
+ <tag><c>simple_xml() = {xml_tag(), xml_attributes(), xml_content()} | {xml_tag(), xml_content()} | xml_tag()</c></tag>
+ <item><marker id="type-simple_xml"/>
+ <p>This type is further described in application
+ <seealso marker="xmerl:index"><c>xmerl</c></seealso>.</p></item>
+
+ <tag><c>stream_data() = {description, string()} | {replaySupport, string()} | {replayLogCreationTime, string()} | {replayLogAgedTime, string()}</c></tag>
+ <item><marker id="type-stream_data"/>
+ <p>For details about the data format for the string values, see
+ "XML Schema for Event Notifications" in RFC 5277.</p></item>
+
+ <tag><c>stream_name() = string()</c></tag>
+ <item><marker id="type-stream_name"/> </item>
+
+ <tag><c>streams() = [{stream_name(), [stream_data()]}]</c></tag>
+ <item><marker id="type-streams"/> </item>
+
+ <tag><c>xml_attribute_tag() = atom()</c></tag>
+ <item><marker id="type-xml_attribute_tag"/> </item>
+
+ <tag><c>xml_attribute_value() = string()</c></tag>
+ <item><marker id="type-xml_attribute_value"/> </item>
+
+ <tag><c>xml_attributes() = [{xml_attribute_tag(), xml_attribute_value()}]</c></tag>
+ <item><marker id="type-xml_attributes"/> </item>
+
+ <tag><c>xml_content() = [simple_xml() | iolist()]</c></tag>
+ <item><marker id="type-xml_content"/> </item>
+
+ <tag><c>xml_tag() = atom()</c></tag>
+ <item><marker id="type-xml_tag"/> </item>
+
+ <tag><c>xpath() = {xpath, string()}</c></tag>
+ <item><marker id="type-xpath"/> </item>
+
+ <tag><c>xs_datetime() = string()</c></tag>
+ <item><marker id="type-xs_datetime"/>
+ <p>This date and time identifier has the same format as the XML type
+ <c>dateTime</c> and is compliant with RFC 3339 Date and Time on
+ the Internet Timestamps. The format is as follows:</p>
+ <pre>
+ [-]CCYY-MM-DDThh:mm:ss[.s][Z|(+|-)hh:mm]</pre>
+ </item>
+ </taglist>
+ </section>
+
+ <funcs>
+ <func>
+ <name>action(Client, Action) -&gt; Result</name>
+ <fsummary>Equivalent to action(Client, Action, infinity).</fsummary>
+ <desc><marker id="action-2"/>
+ <p>Equivalent to
+ <seealso marker="#action-3"><c>ct_netconfc:action(Client, Action,
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>action(Client, Action, Timeout) -&gt; Result</name>
+ <fsummary>Executes an action.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Action = simple_xml()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {ok, [simple_xml()]} | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="action-3"/>
+ <p>Executes an action. If the return type is void, <c>ok</c> is
+ returned instead of <c>{ok,[simple_xml()]}</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>close_session(Client) -&gt; Result</name>
+ <fsummary>Equivalent to close_session(Client, infinity).</fsummary>
+ <desc><marker id="close_session-1"/>
+ <p>Equivalent to
+ <seealso marker="#close_session-2"><c>ct_netconfc:close_session(Client,
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>close_session(Client, Timeout) -&gt; Result</name>
+ <fsummary>Requests graceful termination of the session associated with
+ the client.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="close_session-2"/>
+ <p>Requests graceful termination of the session associated with the
+ client.</p>
+
+ <p>When a NETCONF server receives a <c>close-session</c> request, it
+ gracefully closes the session. The server releases any locks and
+ resources associated with the session and gracefully closes any
+ associated connections. Any NETCONF requests received after a
+ <c>close-session</c> request are ignored.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>copy_config(Client, Source, Target) -&gt; Result</name>
+ <fsummary>Equivalent to copy_config(Client, Source, Target,
+ infinity).</fsummary>
+ <desc><marker id="copy_config-3"/>
+ <p>Equivalent to
+ <seealso marker="#copy_config-4"><c>ct_netconfc:copy_config(Client,
+ Source, Target, infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>copy_config(Client, Target, Source, Timeout) -&gt; Result</name>
+ <fsummary>Copies configuration data.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Target = netconf_db()</v>
+ <v>Source = netconf_db()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="copy_config-4"/>
+ <p>Copies configuration data.</p>
+
+ <p>Which source and target options that can be issued depends on the
+ capabilities supported by the server. That is, <c>:candidate</c>
+ and/or <c>:startup</c> are required.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>create_subscription(Client) -&gt; term()</name>
+ <fsummary>Creates a subscription for event notifications.</fsummary>
+ <desc><marker id="create_subscription-1"/></desc>
+ </func>
+
+ <func>
+ <name>create_subscription(Client, Timeout) -&gt; term()</name>
+ <fsummary>Creates a subscription for event notifications.</fsummary>
+ <desc><marker id="create_subscription-2"/></desc>
+ </func>
+
+ <func>
+ <name>create_subscription(Client, Stream, Timeout) -&gt; term()</name>
+ <fsummary>Creates a subscription for event notifications.</fsummary>
+ <desc><marker id="create_subscription-3"/></desc>
+ </func>
+
+ <func>
+ <name>create_subscription(Client, StartTime, StopTime, Timeout) -&gt; term()</name>
+ <fsummary>Creates a subscription for event notifications.</fsummary>
+ <desc><marker id="create_subscription-4"/></desc>
+ </func>
+
+ <func>
+ <name>create_subscription(Client, Stream, StartTime, StopTime, Timeout) -&gt; term()</name>
+ <fsummary>Creates a subscription for event notifications.</fsummary>
+ <desc><marker id="create_subscription-5"/></desc>
+ </func>
+
+ <func>
+ <name>create_subscription(Client, Stream, Filter, StartTime, StopTime, Timeout) -&gt; Result</name>
+ <fsummary>Creates a subscription for event notifications.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Stream = stream_name()</v>
+ <v>Filter = simple_xml() | [simple_xml()]</v>
+ <v>StartTime = xs_datetime()</v>
+ <v>StopTime = xs_datetime()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="create_subscription-6"/>
+ <p>Creates a subscription for event notifications.</p>
+
+ <p>This function sets up a subscription for NETCONF event
+ notifications of the specified stream type, matching the specified
+ filter. The calling process receives notifications as messages of
+ type <c>notification()</c>.</p>
+
+ <taglist>
+ <tag><c>Stream</c></tag>
+ <item><p>Optional parameter that indicates which stream of event
+ is of interest. If not present, events in the default NETCONF
+ stream are sent.</p></item>
+ <tag><c>Filter</c></tag>
+ <item><p>Optional parameter that indicates which subset of all
+ possible events is of interest. The parameter format is the
+ same as that of the filter parameter in the NETCONF protocol
+ operations. If not present, all events not precluded by other
+ parameters are sent.</p></item>
+ <tag><c>StartTime</c></tag>
+ <item><p>Optional parameter used to trigger the replay feature and
+ indicate that the replay is to start at the time specified.
+ If <c>StartTime</c> is not present, this is not a replay
+ subscription.</p>
+ <p>It is not valid to specify start times that are later than
+ the current time. If <c>StartTime</c> is specified earlier
+ than the log can support, the replay begins with the earliest
+ available notification.</p>
+ <p>This parameter is of type <c>dateTime</c> and compliant to
+ RFC 3339. Implementations must support time zones.</p></item>
+ <tag><c>StopTime</c></tag>
+ <item><p>Optional parameter used with the optional replay feature
+ to indicate the newest notifications of interest. If
+ <c>StopTime</c> is not present, the notifications continues
+ until the subscription is terminated.</p>
+ <p>Must be used with and be later than <c>StartTime</c>. Values
+ of <c>StopTime</c> in the future are valid. This parameter is
+ of type <c>dateTime</c> and compliant to RFC 3339.
+ Implementations must support time zones.</p></item>
+ </taglist>
+
+ <p>For more details about the event notification mechanism, see
+ RFC 5277.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>delete_config(Client, Target) -&gt; Result</name>
+ <fsummary>Equivalent to delete_config(Client, Target,
+ infinity).</fsummary>
+ <desc><marker id="delete_config-2"/>
+ <p>Equivalent to
+ <seealso marker="#delete_config-3"><c>ct_netconfc:delete_config(Client, Target, infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>delete_config(Client, Target, Timeout) -&gt; Result</name>
+ <fsummary>Deletes configuration data.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Target = startup | candidate</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="delete_config-3"/>
+ <p>Deletes configuration data.</p>
+
+ <p>The running configuration cannot be deleted and <c>:candidate</c>
+ or <c>:startup</c> must be advertised by the server.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>edit_config(Client, Target, Config) -&gt; Result</name>
+ <fsummary>Equivalent to edit_config(Client, Target, Config, [],
+ infinity).</fsummary>
+ <desc><marker id="edit_config-3"/>
+ <p>Equivalent to
+ <seealso marker="#edit_config-5"><c>ct_netconfc:edit_config(Client,
+ Target, Config, [], infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>edit_config(Client, Target, Config, OptParamsOrTimeout) -&gt; Result</name>
+ <fsummary>If OptParamsOrTimeout is a time-out value, this function is
+ equivalent to ct_netconfc:edit_config(Client, Target, Config, [],
+ Timeout).</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Target = netconf_db()</v>
+ <v>Config = simple_xml()</v>
+ <v>OptParamsOrTimeout = [simple_xml()] | timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="edit_config-4"/>
+ <p>If <c>OptParamsOrTimeout</c> is a time-out value, this function is
+ equivalent to
+ <seealso marker="#edit_config-5"><c>ct_netconfc:edit_config(Client,
+ Target, Config, [], Timeout)</c></seealso>.</p>
+
+ <p>If <c>OptParamsOrTimeout</c> is a list of simple XML, this
+ function is equivalent to
+ <seealso marker="#edit_config-5"><c>ct_netconfc:edit_config(Client,
+ Target, Config, OptParams, infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>edit_config(Client, Target, Config, OptParams, Timeout) -&gt; Result</name>
+ <fsummary>Edits configuration data.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Target = netconf_db()</v>
+ <v>Config = simple_xml()</v>
+ <v>OptParams = [simple_xml()]</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="edit_config-5"/>
+ <p>Edits configuration data.</p>
+
+ <p>By default only the running target is available, unless the server
+ includes <c>:candidate</c> or <c>:startup</c> in its list of
+ capabilities.</p>
+
+ <p><c>OptParams</c> can be used for specifying optional parameters
+ (<c>default-operation</c>, <c>test-option</c>, or
+ <c>error-option</c>) to be added to the <c>edit-config</c>
+ request. The value must be a list containing valid simple XML,
+ for example:</p>
+
+ <pre>
+ [{'default-operation', ["none"]},
+ {'error-option', ["rollback-on-error"]}]</pre>
+ </desc>
+ </func>
+
+ <func>
+ <name>get(Client, Filter) -&gt; Result</name>
+ <fsummary>Equivalent to get(Client, Filter, infinity).</fsummary>
+ <desc><marker id="get-2"/>
+ <p>Equivalent to
+ <seealso marker="#get-3"><c>ct_netconfc:get(Client, Filter,
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get(Client, Filter, Timeout) -&gt; Result</name>
+ <fsummary>Gets data.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Filter = simple_xml() | xpath()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = {ok, [simple_xml()]} | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="get-3"/>
+ <p>Gets data.</p>
+
+ <p>This operation returns both configuration and state data from the
+ server.</p>
+
+ <p>Filter type <c>xpath</c> can be used only if the server supports
+ <c>:xpath</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_capabilities(Client) -&gt; Result</name>
+ <fsummary>Equivalent to get_capabilities(Client, infinity).</fsummary>
+ <desc><marker id="get_capabilities-1"/>
+ <p>Equivalent to
+ <seealso marker="#get_capabilities-2"><c>ct_netconfc:get_capabilities(Client,
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_capabilities(Client, Timeout) -&gt; Result</name>
+ <fsummary>Returns the server side capabilities.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = [string()] | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="get_capabilities-2"/>
+ <p>Returns the server side capabilities.</p>
+
+ <p>The following capability identifiers, defined in RFC 4741 NETCONF
+ Configuration Protocol, can be returned:</p>
+
+ <list>
+ <item><p><c>"urn:ietf:params:netconf:base:1.0"</c></p></item>
+ <item><p><c>"urn:ietf:params:netconf:capability:writable-running:1.0"</c></p></item>
+ <item><p><c>"urn:ietf:params:netconf:capability:candidate:1.0"</c></p></item>
+ <item><p><c>"urn:ietf:params:netconf:capability:confirmed-commit:1.0"</c></p></item>
+ <item><p><c>"urn:ietf:params:netconf:capability:rollback-on-error:1.0"</c></p></item>
+ <item><p><c>"urn:ietf:params:netconf:capability:startup:1.0"</c></p></item>
+ <item><p><c>"urn:ietf:params:netconf:capability:url:1.0"</c></p></item>
+ <item><p><c>"urn:ietf:params:netconf:capability:xpath:1.0"</c></p></item>
+ </list>
+
+ <p>More identifiers can exist, for example, server-side namespace.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_config(Client, Source, Filter) -&gt; Result</name>
+ <fsummary>Equivalent to get_config(Client, Source, Filter,
+ infinity).</fsummary>
+ <desc><marker id="get_config-3"/>
+ <p>Equivalent to
+ <seealso marker="#get_config-4"><c>ct_netconfc:get_config(Client, Source, Filter, infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_config(Client, Source, Filter, Timeout) -&gt; Result</name>
+ <fsummary>Gets configuration data.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Source = netconf_db()</v>
+ <v>Filter = simple_xml() | xpath()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = {ok, [simple_xml()]} | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="get_config-4"/>
+ <p>Gets configuration data.</p>
+
+ <p>To be able to access another source than <c>running</c>, the
+ server must advertise <c>:candidate</c> and/or <c>:startup</c>.</p>
+
+ <p>Filter type <c>xpath</c> can be used only if the server supports
+ <c>:xpath</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_event_streams(Client, Timeout) -&gt; Result</name>
+ <fsummary>Equivalent to get_event_streams(Client, [], Timeout).</fsummary>
+ <desc><marker id="get_event_streams-2"/>
+ <p>Equivalent to
+ <seealso marker="#get_event_streams-3"><c>ct_netconfc:get_event_streams(Client,
+ [], Timeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_event_streams(Client, Streams, Timeout) -&gt; Result</name>
+ <fsummary>Sends a request to get the specified event streams.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Streams = [stream_name()]</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = {ok, streams()} | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="get_event_streams-3"/>
+ <p>Sends a request to get the specified event streams.</p>
+
+ <p><c>Streams</c> is a list of stream names. The following filter is
+ sent to the NETCONF server in a <c>get</c> request:</p>
+
+ <pre>
+ &lt;netconf xmlns="urn:ietf:params:xml:ns:netmod:notification"&gt;
+ &lt;streams&gt;
+ &lt;stream&gt;
+ &lt;name&gt;StreamName1&lt;/name&gt;
+ &lt;/stream&gt;
+ &lt;stream&gt;
+ &lt;name&gt;StreamName2&lt;/name&gt;
+ &lt;/stream&gt;
+ ...
+ &lt;/streams&gt;
+ &lt;/netconf&gt;</pre>
+
+ <p>If <c>Streams</c> is an empty list, <em>all</em> streams are
+ requested by sending the following filter:</p>
+
+ <pre>
+ &lt;netconf xmlns="urn:ietf:params:xml:ns:netmod:notification"&gt;
+ &lt;streams/&gt;
+ &lt;/netconf&gt;</pre>
+
+ <p>If more complex filtering is needed, use
+ <seealso marker="#get-2"><c>ct_netconfc:get/2</c></seealso> or
+ <seealso marker="#get-3"><c>ct_netconfc:get/3</c></seealso> and
+ specify the exact filter according to "XML Schema for Event
+ Notifications" in RFC 5277.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_session_id(Client) -&gt; Result</name>
+ <fsummary>Equivalent to get_session_id(Client, infinity).</fsummary>
+ <desc><marker id="get_session_id-1"/>
+ <p>Equivalent to
+ <seealso marker="#get_session_id-2"><c>ct_netconfc:get_session_id(Client,
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_session_id(Client, Timeout) -&gt; Result</name>
+ <fsummary>Returns the session Id associated with the specified
+ client.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = pos_integer() | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="get_session_id-2"/>
+ <p>Returns the session Id associated with the specified client.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>hello(Client) -&gt; Result</name>
+ <fsummary>Equivalent to hello(Client, [], infinity).</fsummary>
+ <desc><marker id="hello-1"/>
+ <p>Equivalent to
+ <seealso marker="#hello-3"><c>ct_netconfc:hello(Client, [],
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>hello(Client, Timeout) -&gt; Result</name>
+ <fsummary>Equivalent to hello(Client, [], Timeout).</fsummary>
+ <desc><marker id="hello-2"/>
+ <p>Equivalent to
+ <seealso marker="#hello-3"><c>ct_netconfc:hello(Client, [],
+ Timeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>hello(Client, Options, Timeout) -&gt; Result</name>
+ <fsummary>Exchanges hello messages with the server.</fsummary>
+ <type>
+ <v>Client = handle()</v>
+ <v>Options = [{capability, [string()]}]</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="hello-3"/>
+ <p>Exchanges <c>hello</c> messages with the server.</p>
+
+ <p>Adds optional capabilities and sends a <c>hello</c> message to the
+ server and waits for the return.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>kill_session(Client, SessionId) -&gt; Result</name>
+ <fsummary>Equivalent to kill_session(Client, SessionId,
+ infinity).</fsummary>
+ <desc><marker id="kill_session-2"/>
+ <p>Equivalent to
+ <seealso marker="#kill_session-3"><c>ct_netconfc:kill_session(Client,
+SessionId, infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>kill_session(Client, SessionId, Timeout) -&gt; Result</name>
+ <fsummary>Forces termination of the session associated with the supplied
+ session Id.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>SessionId = pos_integer()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="kill_session-3"/>
+ <p>Forces termination of the session associated with the supplied
+ session Id.</p>
+
+ <p>The server side must abort any ongoing operations, release any
+ locks and resources associated with the session, and close any
+ associated connections.</p>
+
+ <p>Only if the server is in the confirmed commit phase, the
+ configuration is restored to its state before entering the confirmed
+ commit phase. Otherwise, no configuration rollback is performed.</p>
+
+ <p>If the specified <c>SessionId</c> is equal to the current session
+ Id, an error is returned.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>lock(Client, Target) -&gt; Result</name>
+ <fsummary>Equivalent to lock(Client, Target, infinity).</fsummary>
+ <desc><marker id="lock-2"/>
+ <p>Equivalent to
+ <seealso marker="#lock-3"><c>ct_netconfc:lock(Client, Target,
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>lock(Client, Target, Timeout) -&gt; Result</name>
+ <fsummary>Unlocks the configuration target.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Target = netconf_db()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="lock-3"/>
+ <p>Unlocks the configuration target.</p>
+
+ <p>Which target parameters that can be used depends on if
+ <c>:candidate</c> and/or <c>:startup</c> are supported by the
+ server. If successfull, the configuration system of the device is
+ unavailable to other clients (NETCONF, CORBA, SNMP, and so on).
+ Locks are intended to be short-lived.</p>
+
+ <p>Operation
+ <seealso marker="#kill_session-2"><c>ct_netconfc:kill_session/2</c></seealso>
+ or
+ <seealso marker="#kill_session-3"><c>ct_netconfc:kill_session/3</c></seealso>
+ can be used to force the release of a lock owned by another NETCONF
+ session. How this is achieved by the server side is
+ implementation-specific.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>only_open(Options) -&gt; Result</name>
+ <fsummary>Opens a NETCONF session, but does not send hello.</fsummary>
+ <type>
+ <v>Options = options()</v>
+ <v>Result = {ok, handle()} | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="only_open-1"/>
+ <p>Opens a NETCONF session, but does not send <c>hello</c>.</p>
+
+ <p>As <seealso marker="#open-1"><c>ct_netconfc:open/1</c></seealso>,
+ but does not send a <c>hello</c> message.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>only_open(KeyOrName, ExtraOptions) -&gt; Result</name>
+ <fsummary>Opens a name NETCONF session, but does not send
+ hello.</fsummary>
+ <type>
+ <v>KeyOrName = key_or_name()</v>
+ <v>ExtraOptions = options()</v>
+ <v>Result = {ok, handle()} | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="only_open-2"/>
+ <p>Opens a name NETCONF session, but does not send <c>hello</c>.</p>
+
+ <p>As <seealso marker="#open-2"><c>ct_netconfc:open/2</c></seealso>,
+ but does not send a <c>hello</c> message.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(Options) -&gt; Result</name>
+ <fsummary>Opens a NETCONF session and exchanges hello messages.</fsummary>
+ <type>
+ <v>Options = options()</v>
+ <v>Result = {ok, handle()} | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="open-1"/>
+ <p>Opens a NETCONF session and exchanges <c>hello</c> messages.</p>
+
+ <p>If the server options are specified in a configuration file,
+ or if a named client is needed for logging purposes (see section
+ <seealso marker="#Logging">Logging</seealso> in this module), use
+ <seealso marker="#open-2"><c>ct_netconfc:open/2</c></seealso>
+ instead.</p>
+
+ <p>The opaque <c>handle()</c> reference returned from this
+ function is required as client identifier when calling any other
+ function in this module.</p>
+
+ <p>Option <c>timeout</c> (milliseconds) is used when setting up the
+ SSH connection and when waiting for the <c>hello</c> message from
+ the server. It is not used for any other purposes during the
+ lifetime of the connection.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(KeyOrName, ExtraOptions) -&gt; Result</name>
+ <fsummary>Opens a named NETCONF session and exchanges hello
+ messages.</fsummary>
+ <type>
+ <v>KeyOrName = key_or_name()</v>
+ <v>ExtraOptions = options()</v>
+ <v>Result = {ok, handle()} | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="open-2"/>
+ <p>Opens a named NETCONF session and exchanges <c>hello</c>
+ messages.</p>
+
+ <p>If <c>KeyOrName</c> is a configured <c>server_id()</c> or a
+ <c>target_name()</c> associated with such an Id, then the options
+ for this server are fetched from the configuration file.</p>
+
+ <p>Argument <c>ExtraOptions</c> is added to the options found in the
+ configuration file. If the same options are specified, the values
+ from the configuration file overwrite <c>ExtraOptions</c>.</p>
+
+ <p>If the server is not specified in a configuration file, use
+ <seealso marker="#open-1"><c>ct_netconfc:open/1</c></seealso>
+ instead.</p>
+
+ <p>The opaque <c>handle()</c> reference returned from this
+ function can be used as client identifier when calling any other
+ function in this module. However, if <c>KeyOrName</c> is a
+ <c>target_name()</c>, that is, if the server is named through a
+ call to <seealso marker="ct#require-2"><c>ct:require/2</c></seealso>
+ or a <c>require</c> statement in the test suite, then this name can
+ be used instead of <c>handle()</c>.</p>
+
+ <p>Option <c>timeout</c> (milliseconds) is used when setting up the
+ SSH connection and when waiting for the <c>hello</c> message from
+ the server. It is not used for any other purposes during the
+ lifetime of the connection.</p>
+
+ <p>See also
+ <seealso marker="ct#require-2"><c>ct:require/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(Client, SimpleXml) -&gt; Result</name>
+ <fsummary>Equivalent to send(Client, SimpleXml, infinity).</fsummary>
+ <desc><marker id="send-2"/>
+ <p>Equivalent to
+ <seealso marker="#send-3"><c>ct_netconfc:send(Client, SimpleXml,
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(Client, SimpleXml, Timeout) -&gt; Result</name>
+ <fsummary>Sends an XML document to the server.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>SimpleXml = simple_xml()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = simple_xml() | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="send-3"/>
+ <p>Sends an XML document to the server.</p>
+
+ <p>The specified XML document is sent "as is" to the server. This
+ function can be used for sending XML documents that cannot be
+ expressed by other interface functions in this module.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send_rpc(Client, SimpleXml) -&gt; Result</name>
+ <fsummary>Equivalent to send_rpc(Client, SimpleXml, infinity).</fsummary>
+ <desc><marker id="send_rpc-2"/>
+ <p>Equivalent to
+ <seealso marker="#send_rpc-3"><c>ct_netconfc:send_rpc(Client,
+ SimpleXml, infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send_rpc(Client, SimpleXml, Timeout) -&gt; Result</name>
+ <fsummary>Sends a NETCONF rpc request to the server.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>SimpleXml = simple_xml()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = [simple_xml()] | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="send_rpc-3"/>
+ <p>Sends a NETCONF <c>rpc</c> request to the server.</p>
+
+ <p>The specified XML document is wrapped in a valid NETCONF <c>rpc</c>
+ request and sent to the server. The <c>message-id</c> and namespace
+ attributes are added to element <c>rpc</c>.</p>
+
+ <p>This function can be used for sending <c>rpc</c> requests that
+ cannot be expressed by other interface functions in this module.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unlock(Client, Target) -&gt; Result</name>
+ <fsummary>Equivalent to unlock(Client, Target, infinity).</fsummary>
+ <desc><marker id="unlock-2"/>
+ <p>Equivalent to
+ <seealso marker="#unlock-3"><c>ct_netconfc:unlock(Client, Target,
+ infinity)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unlock(Client, Target, Timeout) -&gt; Result</name>
+ <fsummary>Unlocks the configuration target.</fsummary>
+ <type>
+ <v>Client = client()</v>
+ <v>Target = netconf_db()</v>
+ <v>Timeout = timeout()</v>
+ <v>Result = ok | {error, error_reason()}</v>
+ </type>
+ <desc><marker id="unlock-3"/>
+ <p>Unlocks the configuration target.</p>
+
+ <p>If the client earlier has acquired a lock through
+ <seealso marker="#lock-2"><c>ct_netconfc:lock/2</c></seealso> or
+ <seealso marker="#lock-3"><c>ct_netconfc:lock/3</c></seealso>, this
+ operation releases the associated lock. To access another target
+ than <c>running</c>, the server must support <c>:candidate</c>
+ and/or <c>:startup</c>.</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_property_test.xml b/lib/common_test/doc/src/ct_property_test.xml
new file mode 100644
index 0000000000..2e9bd1969c
--- /dev/null
+++ b/lib/common_test/doc/src/ct_property_test.xml
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_property_test</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_property_test.xml</file>
+ </header>
+ <module>ct_property_test</module>
+ <modulesummary>EXPERIMENTAL support in Common Test for calling
+ property-based tests.</modulesummary>
+
+ <description>
+
+ <p>EXPERIMENTAL support in <c>Common Test</c> for calling property-based
+ tests.</p>
+
+ <p>This module is a first step to run property-based tests in the
+ <c>Common Test</c> framework. A property testing tool like QuickCheck
+ or PropEr is assumed to be installed.</p>
+
+ <p>The idea is to have a <c>Common Test</c> test suite calling a property
+ testing tool with special property test suites as defined by that tool.
+ The usual Erlang application directory structure is assumed. The tests
+ are collected in the <c>test</c> directory of the application. The
+ <c>test</c> directory has a subdirectory <c>property_test</c>, where
+ everything needed for the property tests is collected.</p>
+
+ <p>A typical <c>Common Test</c> test suite using <c>ct_property_test</c>
+ is organized as follows:</p>
+
+ <pre>
+ -include_lib("common_test/include/ct.hrl").
+
+ all() -&gt; [prop_ftp_case].
+
+ init_per_suite(Config) -&gt;
+ ct_property_test:init_per_suite(Config).
+
+ %%%---- test case
+ prop_ftp_case(Config) -&gt;
+ ct_property_test:quickcheck(
+ ftp_simple_client_server:prop_ftp(Config),
+ Config
+ ).</pre>
+
+ <warning>
+ <p>This is experimental code that can be changed or removed anytime
+ without any warning.</p>
+ </warning>
+
+ </description>
+
+ <funcs>
+ <func>
+ <name>init_per_suite(Config) -&gt; Config | {skip, Reason}</name>
+ <fsummary>Initializes Config for property testing.</fsummary>
+ <desc><marker id="init_per_suite-1"/>
+ <p>Initializes <c>Config</c> for property testing.</p>
+
+ <p>This function investigates if support is available for either
+ Quickcheck, PropEr, or Triq. The options
+ <c>{property_dir,AbsPath}</c> and <c>{property_test_tool,Tool}</c>
+ are set in the <c>Config</c> returned.</p>
+
+ <p>The function is intended to be called in function
+ <c>init_per_suite</c> in the test suite.</p>
+
+ <p>The property tests are assumed to be in subdirectory
+ <c>property_test</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>quickcheck(Property, Config) -&gt; true | {fail, Reason}</name>
+ <fsummary>Calls quickcheck and returns the result in a form suitable for
+ Common Test.</fsummary>
+ <desc><marker id="quickcheck-2"/>
+ <p>Calls quickcheck and returns the result in a form suitable for
+ <c>Common Test</c>.</p>
+
+ <p>This function is intended to be called in the test cases in the
+ test suite.</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_rpc.xml b/lib/common_test/doc/src/ct_rpc.xml
new file mode 100644
index 0000000000..0169727581
--- /dev/null
+++ b/lib/common_test/doc/src/ct_rpc.xml
@@ -0,0 +1,220 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_rpc</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_rpc.xml</file>
+ </header>
+ <module>ct_rpc</module>
+ <modulesummary>Common Test specific layer on Erlang/OTP rpc.</modulesummary>
+
+ <description>
+
+ <p><c>Common Test</c> specific layer on Erlang/OTP <c>rpc</c>.</p>
+
+ </description>
+
+ <funcs>
+ <func>
+ <name>app_node(App, Candidates) -&gt; NodeName</name>
+ <fsummary>From a set of candidate nodes determines which of them is
+ running the application App.</fsummary>
+ <type>
+ <v>App = atom()</v>
+ <v>Candidates = [NodeName]</v>
+ <v>NodeName = atom()</v>
+ </type>
+ <desc><marker id="app_node-2"/>
+ <p>From a set of candidate nodes determines which of them is running
+ the application <c>App</c>. If none of the candidate nodes is
+ running <c>App</c>, the function makes the test case calling
+ this function to fail. This function is the same as calling
+ <c>app_node(App, Candidates, true)</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>app_node(App, Candidates, FailOnBadRPC) -&gt; NodeName</name>
+ <fsummary>Same as app_node/2, except that argument FailOnBadRPC
+ determines if the search for a candidate node is to stop if
+ badrpc is received at some point.</fsummary>
+ <type>
+ <v>App = atom()</v>
+ <v>Candidates = [NodeName]</v>
+ <v>NodeName = atom()</v>
+ <v>FailOnBadRPC = true | false</v>
+ </type>
+ <desc><marker id="app_node-3"/>
+ <p>Same as
+ <seealso marker="#app_node-2"><c>ct_rpc:app_node/2</c></seealso>,
+ except that argument <c>FailOnBadRPC</c> determines if the search
+ for a candidate node is to stop if <c>badrpc</c> is received at
+ some point.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>app_node(App, Candidates, FailOnBadRPC, Cookie) -&gt; NodeName</name>
+ <fsummary>Same as app_node/2, except that argument FailOnBadRPC
+ determines if the search for a candidate node is to stop if badrpc is
+ received at some point.</fsummary>
+ <type>
+ <v>App = atom()</v>
+ <v>Candidates = [NodeName]</v>
+ <v>NodeName = atom()</v>
+ <v>FailOnBadRPC = true | false</v>
+ <v>Cookie = atom()</v>
+ </type>
+ <desc><marker id="app_node-4"/>
+ <p>Same as
+ <seealso marker="#app_node-2"><c>ct_rpc:app_node/2</c></seealso>,
+ except that argument <c>FailOnBadRPC</c> determines if the search
+ for a candidate node is to stop if <c>badrpc</c> is received at
+ some point.</p>
+
+ <p>The cookie on the client node is set to <c>Cookie</c> for this
+ <c>rpc</c> operation (used to match the server node cookie).</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>call(Node, Module, Function, Args) -&gt; term() | {badrpc, Reason}</name>
+ <fsummary>Same as call(Node, Module, Function, Args, infinity).</fsummary>
+ <desc><marker id="call-4"/>
+ <p>Same as <c>call(Node, Module, Function, Args, infinity)</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>call(Node, Module, Function, Args, TimeOut) -&gt; term() | {badrpc, Reason}</name>
+ <fsummary>Evaluates apply(Module, Function, Args) on the node
+ Node.</fsummary>
+ <type>
+ <v>Node = NodeName | {Fun, FunArgs}</v>
+ <v>Fun = function()</v>
+ <v>FunArgs = term()</v>
+ <v>NodeName = atom()</v>
+ <v>Module = atom()</v>
+ <v>Function = atom()</v>
+ <v>Args = [term()]</v>
+ <v>Reason = timeout | term()</v>
+ </type>
+ <desc><marker id="call-5"/>
+ <p>Evaluates <c>apply(Module, Function, Args)</c> on the node
+ <c>Node</c>. Returns either whatever <c>Function</c> returns, or
+ <c>{badrpc, Reason}</c> if the remote procedure call fails. If
+ <c>Node</c> is <c>{Fun, FunArgs}</c>, applying <c>Fun</c> to
+ <c>FunArgs</c> is to return a node name.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>call(Node, Module, Function, Args, TimeOut, Cookie) -&gt; term() | {badrpc, Reason}</name>
+ <fsummary>Evaluates apply(Module, Function, Args) on the node
+ Node.</fsummary>
+ <type>
+ <v>Node = NodeName | {Fun, FunArgs}</v>
+ <v>Fun = function()</v>
+ <v>FunArgs = term()</v>
+ <v>NodeName = atom()</v>
+ <v>Module = atom()</v>
+ <v>Function = atom()</v>
+ <v>Args = [term()]</v>
+ <v>Reason = timeout | term()</v>
+ <v>Cookie = atom()</v>
+ </type>
+ <desc><marker id="call-6"/>
+ <p>Evaluates <c>apply(Module, Function, Args)</c> on the node
+ <c>Node</c>. Returns either whatever <c>Function</c> returns, or
+ <c>{badrpc, Reason}</c> if the remote procedure call fails. If
+ <c>Node</c> is <c>{Fun, FunArgs}</c>, applying <c>Fun</c> to
+ <c>FunArgs</c> is to return a node name.</p>
+
+ <p>The cookie on the client node is set to <c>Cookie</c> for this
+ <c>rpc</c> operation (used to match the server node cookie).</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>cast(Node, Module, Function, Args) -&gt; ok</name>
+ <fsummary>Evaluates apply(Module, Function, Args) on the node
+ Node.</fsummary>
+ <type>
+ <v>Node = NodeName | {Fun, FunArgs}</v>
+ <v>Fun = function()</v>
+ <v>FunArgs = term()</v>
+ <v>NodeName = atom()</v>
+ <v>Module = atom()</v>
+ <v>Function = atom()</v>
+ <v>Args = [term()]</v>
+ <v>Reason = timeout | term()</v>
+ </type>
+ <desc><marker id="cast-4"/>
+ <p>Evaluates <c>apply(Module, Function, Args)</c> on the node
+ <c>Node</c>. No response is delivered and the process that makes
+ the call is not suspended until the evaluation is completed as in
+ the case of <c>call/3,4</c>. If <c>Node</c> is
+ <c>{Fun, FunArgs}</c>, applying <c>Fun</c> to <c>FunArgs</c> is to
+ return a node name.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>cast(Node, Module, Function, Args, Cookie) -&gt; ok</name>
+ <fsummary>Evaluates apply(Module, Function, Args) on the node
+ Node.</fsummary>
+ <type>
+ <v>Node = NodeName | {Fun, FunArgs}</v>
+ <v>Fun = function()</v>
+ <v>FunArgs = term()</v>
+ <v>NodeName = atom()</v>
+ <v>Module = atom()</v>
+ <v>Function = atom()</v>
+ <v>Args = [term()]</v>
+ <v>Reason = timeout | term()</v>
+ <v>Cookie = atom()</v>
+ </type>
+ <desc><marker id="cast-5"/>
+ <p>Evaluates <c>apply(Module, Function, Args)</c> on the node
+ <c>Node</c>. No response is delivered and the process that makes
+ the call is not suspended until the evaluation is completed as in
+ the case of <c>call/3,4</c>. If <c>Node</c> is
+ <c>{Fun, FunArgs}</c>, applying <c>Fun</c> to <c>FunArgs</c> is to
+ return a node name.</p>
+
+ <p>The cookie on the client node is set to <c>Cookie</c> for this
+ <c>rpc</c> operation (used to match the server node cookie).</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_run.xml b/lib/common_test/doc/src/ct_run.xml
index 5518bb039b..d0ecc38564 100644
--- a/lib/common_test/doc/src/ct_run.xml
+++ b/lib/common_test/doc/src/ct_run.xml
@@ -34,178 +34,189 @@
</header>
<com>ct_run</com>
<comsummary>Program used for starting Common Test from the
- OS command line.
- </comsummary>
+ OS command line.</comsummary>
<description>
<p>The <c>ct_run</c> program is automatically installed with Erlang/OTP
- and Common Test (please see the Installation chapter in the Common
- Test User's Guide for more information). The program accepts a number
- of different start flags. Some flags trigger <c>ct_run</c>
- to start the Common Test application and pass on data to it. Some
- flags start an Erlang node prepared for running Common Test in a
- particular mode.</p>
-
- <p>There is an interface function that corresponds to this program,
- called <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>, for starting Common Test from the Erlang
- shell (or an Erlang program). Please see the <c>ct</c> man page for
- details.</p>
+ and the <c>Common Test</c> application (for more information, see
+ section <seealso marker="install_chapter">Installation</seealso>
+ in the User's Guide). The program accepts different start flags.
+ Some flags trigger <c>ct_run</c> to start <c>Common Test</c> and
+ pass on data to it. Some flags start an Erlang node prepared for
+ running <c>Common Test</c> in a particular mode.</p>
+
+ <p>The interface function
+ <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>,
+ corresponding to the <c>ct_run</c> program, is used for starting
+ <c>Common Test</c> from the Erlang shell (or an Erlang program).
+ For details, see the <seealso marker="ct"><c>ct</c></seealso>
+ manual page.</p>
<p><c>ct_run</c> also accepts Erlang emulator flags. These are used
- when <c>ct_run</c> calls <c>erl</c> to start the Erlang node
- (making it possible to e.g. add directories to the code server path,
- change the cookie on the node, start additional applications, etc).</p>
-
- <p>With the optional flag:</p>
- <pre>-erl_args</pre>
- <p>it's possible to divide the options on the <c>ct_run</c> command line into
- two groups, one that Common Test should process (those preceding <c>-erl_args</c>),
- and one it should completely ignore and pass on directly to the emulator
- (those following <c>-erl_args</c>). Options preceding <c>-erl_args</c> that Common Test
- doesn't recognize, also get passed on to the emulator untouched.
- By means of <c>-erl_args</c> the user may specify flags with the same name, but
+ when <c>ct_run</c> calls <c>erl</c> to start the Erlang node (this
+ makes it possible to add directories to the code server path,
+ change the cookie on the node, start more applications, and so on).</p>
+
+ <p>With the optional flag <c>-erl_args</c>, options on the <c>ct_run</c>
+ command line can be divided into two groups:</p>
+
+ <list type="bulleted">
+ <item>One group that <c>Common Test</c> is to process (those
+ preceding <c>-erl_args</c>).</item>
+ <item>One group that <c>Common Test</c> is to ignore and pass on
+ directly to the emulator (those following <c>-erl_args</c>).</item>
+ </list>
+
+ <p>Options preceding <c>-erl_args</c> that <c>Common Test</c>
+ does not recognize are also passed on to the emulator untouched.
+ By <c>-erl_args</c> the user can specify flags with the same name, but
with different destinations, on the <c>ct_run</c> command line.</p>
- <p>If <c>-pa</c> or <c>-pz</c> flags are specified in the Common Test group of options
- (preceding <c>-erl_args</c>), relative directories will be converted to
- absolute and re-inserted into the code path by Common Test (to avoid
- problems loading user modules when Common Test changes working directory
- during test runs). Common Test will however ignore <c>-pa</c> and <c>-pz</c> flags
- following <c>-erl_args</c> on the command line. These directories are added
- to the code path normally (i.e. on specified form)</p>
-
- <p>Exit status is set before the program ends. Value <c>0</c> indicates a successful
- test result, <c>1</c> indicates one or more failed or auto-skipped test cases, and
- <c>2</c> indicates test execution failure.</p>
-
- <p>If <c>ct_run</c> is called with option:</p>
- <pre>-help</pre>
- <p>it prints all valid start flags to stdout.</p>
- </description>
+ <p>If flags <c>-pa</c> or <c>-pz</c> are specified in the
+ <c>Common Test</c> group of options (preceding <c>-erl_args</c>),
+ relative directories are converted to absolute and reinserted into
+ the code path by <c>Common Test</c>. This is to avoid problems
+ loading user modules when <c>Common Test</c> changes working directory
+ during test runs. However, <c>Common Test</c> ignores flags <c>-pa</c>
+ and <c>-pz</c> following <c>-erl_args</c> on the command line. These
+ directories are added to the code path normally (that is, on specified
+ form).</p>
+
+ <p>Exit status is set before the program ends. Value <c>0</c> indicates
+ a successful test result, <c>1</c> indicates one or more failed or
+ auto-skipped test cases, and <c>2</c> indicates test execution failure.</p>
+
+ <p>If <c>ct_run</c> is called with option <c>-help</c>, it prints all
+ valid start flags to <c>stdout</c>.</p>
+ </description>
<section>
<marker id="ct_run"></marker>
- <title>Run tests from command line</title>
+ <title>Run Tests from Command Line</title>
<pre>
- ct_run -dir TestDir1 TestDir2 .. TestDirN |
- [-dir TestDir] -suite Suite1 Suite2 .. SuiteN
- [-group Groups1 Groups2 .. GroupsN] [-case Case1 Case2 .. CaseN]
- [-step [config | keep_inactive]]
- [-config ConfigFile1 ConfigFile2 .. ConfigFileN]
- [-userconfig CallbackModule1 ConfigString1 and CallbackModule2
- ConfigString2 and .. CallbackModuleN ConfigStringN]
- [-decrypt_key Key] | [-decrypt_file KeyFile]
- [-label Label]
- [-logdir LogDir]
- [-logopts LogOpts]
- [-verbosity GenVLevel | [Category1 VLevel1 and
- Category2 VLevel2 and .. CategoryN VLevelN]]
- [-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]
- [-stylesheet CSSFile]
- [-cover CoverCfgFile]
- [-cover_stop Bool]
- [-event_handler EvHandler1 EvHandler2 .. EvHandlerN] |
- [-event_handler_init EvHandler1 InitArg1 and
- EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
- [-include InclDir1 InclDir2 .. InclDirN]
- [-no_auto_compile]
- [-abort_if_missing_suites]
- [-muliply_timetraps Multiplier]
- [-scale_timetraps]
- [-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
- [-repeat N] |
- [-duration HHMMSS [-force_stop [skip_rest]]] |
- [-until [YYMoMoDD]HHMMSS [-force_stop [skip_rest]]]
- [-basic_html]
- [-ct_hooks CTHModule1 CTHOpts1 and CTHModule2 CTHOpts2 and ..
- CTHModuleN CTHOptsN]
- [-exit_status ignore_config]
- [-help]
- </pre>
+ ct_run -dir TestDir1 TestDir2 .. TestDirN |
+ [-dir TestDir] -suite Suite1 Suite2 .. SuiteN
+ [-group Groups1 Groups2 .. GroupsN] [-case Case1 Case2 .. CaseN]
+ [-step [config | keep_inactive]]
+ [-config ConfigFile1 ConfigFile2 .. ConfigFileN]
+ [-userconfig CallbackModule1 ConfigString1 and CallbackModule2
+ ConfigString2 and .. CallbackModuleN ConfigStringN]
+ [-decrypt_key Key] | [-decrypt_file KeyFile]
+ [-label Label]
+ [-logdir LogDir]
+ [-logopts LogOpts]
+ [-verbosity GenVLevel | [Category1 VLevel1 and
+ Category2 VLevel2 and .. CategoryN VLevelN]]
+ [-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]
+ [-stylesheet CSSFile]
+ [-cover CoverCfgFile]
+ [-cover_stop Bool]
+ [-event_handler EvHandler1 EvHandler2 .. EvHandlerN] |
+ [-event_handler_init EvHandler1 InitArg1 and
+ EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
+ [-include InclDir1 InclDir2 .. InclDirN]
+ [-no_auto_compile]
+ [-abort_if_missing_suites]
+ [-muliply_timetraps Multiplier]
+ [-scale_timetraps]
+ [-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
+ [-repeat N] |
+ [-duration HHMMSS [-force_stop [skip_rest]]] |
+ [-until [YYMoMoDD]HHMMSS [-force_stop [skip_rest]]]
+ [-basic_html]
+ [-ct_hooks CTHModule1 CTHOpts1 and CTHModule2 CTHOpts2 and ..
+ CTHModuleN CTHOptsN]
+ [-exit_status ignore_config]
+ [-help]</pre>
</section>
+
<section>
- <title>Run tests using test specification</title>
+ <title>Run Tests using Test Specification</title>
<pre>
- ct_run -spec TestSpec1 TestSpec2 .. TestSpecN
- [-join_specs]
- [-config ConfigFile1 ConfigFile2 .. ConfigFileN]
- [-userconfig CallbackModule1 ConfigString1 and CallbackModule2
- ConfigString2 and .. and CallbackModuleN ConfigStringN]
- [-decrypt_key Key] | [-decrypt_file KeyFile]
- [-label Label]
- [-logdir LogDir]
- [-logopts LogOpts]
- [-verbosity GenVLevel | [Category1 VLevel1 and
- Category2 VLevel2 and .. CategoryN VLevelN]]
- [-allow_user_terms]
- [-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]
- [-stylesheet CSSFile]
- [-cover CoverCfgFile]
- [-cover_stop Bool]
- [-event_handler EvHandler1 EvHandler2 .. EvHandlerN] |
- [-event_handler_init EvHandler1 InitArg1 and
- EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
- [-include InclDir1 InclDir2 .. InclDirN]
- [-no_auto_compile]
- [-abort_if_missing_suites]
- [-muliply_timetraps Multiplier]
- [-scale_timetraps]
- [-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
- [-repeat N] |
- [-duration HHMMSS [-force_stop [skip_rest]]] |
- [-until [YYMoMoDD]HHMMSS [-force_stop [skip_rest]]]
- [-basic_html]
- [-ct_hooks CTHModule1 CTHOpts1 and CTHModule2 CTHOpts2 and ..
- CTHModuleN CTHOptsN]
- [-exit_status ignore_config]
- </pre>
+ ct_run -spec TestSpec1 TestSpec2 .. TestSpecN
+ [-join_specs]
+ [-config ConfigFile1 ConfigFile2 .. ConfigFileN]
+ [-userconfig CallbackModule1 ConfigString1 and CallbackModule2
+ ConfigString2 and .. and CallbackModuleN ConfigStringN]
+ [-decrypt_key Key] | [-decrypt_file KeyFile]
+ [-label Label]
+ [-logdir LogDir]
+ [-logopts LogOpts]
+ [-verbosity GenVLevel | [Category1 VLevel1 and
+ Category2 VLevel2 and .. CategoryN VLevelN]]
+ [-allow_user_terms]
+ [-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]
+ [-stylesheet CSSFile]
+ [-cover CoverCfgFile]
+ [-cover_stop Bool]
+ [-event_handler EvHandler1 EvHandler2 .. EvHandlerN] |
+ [-event_handler_init EvHandler1 InitArg1 and
+ EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
+ [-include InclDir1 InclDir2 .. InclDirN]
+ [-no_auto_compile]
+ [-abort_if_missing_suites]
+ [-muliply_timetraps Multiplier]
+ [-scale_timetraps]
+ [-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
+ [-repeat N] |
+ [-duration HHMMSS [-force_stop [skip_rest]]] |
+ [-until [YYMoMoDD]HHMMSS [-force_stop [skip_rest]]]
+ [-basic_html]
+ [-ct_hooks CTHModule1 CTHOpts1 and CTHModule2 CTHOpts2 and ..
+ CTHModuleN CTHOptsN]
+ [-exit_status ignore_config]</pre>
</section>
+
<section>
- <title>Run tests in web based GUI</title>
+ <title>Run Tests in Web-Based GUI</title>
<pre>
- ct_run -vts [-browser Browser]
- [-dir TestDir1 TestDir2 .. TestDirN] |
- [[dir TestDir] -suite Suite [[-group Group] [-case Case]]]
- [-config ConfigFile1 ConfigFile2 .. ConfigFileN]
- [-userconfig CallbackModule1 ConfigString1 and CallbackModule2
- ConfigString2 and .. and CallbackModuleN ConfigStringN]
- [-logopts LogOpts]
- [-verbosity GenVLevel | [Category1 VLevel1 and
- Category2 VLevel2 and .. CategoryN VLevelN]]
- [-decrypt_key Key] | [-decrypt_file KeyFile]
- [-include InclDir1 InclDir2 .. InclDirN]
- [-no_auto_compile]
- [-abort_if_missing_suites]
- [-muliply_timetraps Multiplier]
- [-scale_timetraps]
- [-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
- [-basic_html]</pre>
+ ct_run -vts [-browser Browser]
+ [-dir TestDir1 TestDir2 .. TestDirN] |
+ [[dir TestDir] -suite Suite [[-group Group] [-case Case]]]
+ [-config ConfigFile1 ConfigFile2 .. ConfigFileN]
+ [-userconfig CallbackModule1 ConfigString1 and CallbackModule2
+ ConfigString2 and .. and CallbackModuleN ConfigStringN]
+ [-logopts LogOpts]
+ [-verbosity GenVLevel | [Category1 VLevel1 and
+ Category2 VLevel2 and .. CategoryN VLevelN]]
+ [-decrypt_key Key] | [-decrypt_file KeyFile]
+ [-include InclDir1 InclDir2 .. InclDirN]
+ [-no_auto_compile]
+ [-abort_if_missing_suites]
+ [-muliply_timetraps Multiplier]
+ [-scale_timetraps]
+ [-create_priv_dir auto_per_run | auto_per_tc | manual_per_tc]
+ [-basic_html]</pre>
</section>
+
<section>
- <title>Refresh the HTML index files</title>
+ <title>Refresh HTML Index Files</title>
<pre>
- ct_run -refresh_logs [-logdir LogDir] [-basic_html]</pre>
+ ct_run -refresh_logs [-logdir LogDir] [-basic_html]</pre>
</section>
+
<section>
- <title>Run CT in interactive mode</title>
+ <title>Run Common Test in Interactive Mode</title>
<pre>
- ct_run -shell
- [-config ConfigFile1 ConfigFile2 ... ConfigFileN]
- [-userconfig CallbackModule1 ConfigString1 and CallbackModule2
- ConfigString2 and .. and CallbackModuleN ConfigStringN]
- [-decrypt_key Key] | [-decrypt_file KeyFile]</pre>
+ ct_run -shell
+ [-config ConfigFile1 ConfigFile2 ... ConfigFileN]
+ [-userconfig CallbackModule1 ConfigString1 and CallbackModule2
+ ConfigString2 and .. and CallbackModuleN ConfigStringN]
+ [-decrypt_key Key] | [-decrypt_file KeyFile]</pre>
</section>
+
<section>
- <title>Start a Common Test Master node</title>
+ <title>Start a Common Test Master Node</title>
<pre>
- ct_run -ctmaster</pre>
+ ct_run -ctmaster</pre>
</section>
<section>
- <title>See also</title>
- <p>Please read the <seealso marker="run_test_chapter">Running Test Suites</seealso>
- chapter in the Common Test User's Guide for information about the meaning of the
- different start flags.</p>
+ <title>See Also</title>
+ <p>For information about the start flags, see section
+ <seealso marker="run_test_chapter">Running Tests and Analyzing
+ Results</seealso> in the User's Guide.</p>
</section>
</comref>
+
diff --git a/lib/common_test/doc/src/ct_slave.xml b/lib/common_test/doc/src/ct_slave.xml
new file mode 100644
index 0000000000..44a7b7873f
--- /dev/null
+++ b/lib/common_test/doc/src/ct_slave.xml
@@ -0,0 +1,221 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_slave</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_slave.xml</file>
+ </header>
+ <module>ct_slave</module>
+ <modulesummary>Common Test framework functions for starting and stopping
+ nodes for Large-Scale Testing.</modulesummary>
+
+ <description>
+
+ <p><c>Common Test</c> framework functions for starting and stopping nodes
+ for Large-Scale Testing.</p>
+
+ <p>This module exports functions used by the <c>Common Test</c>
+ Master to start and stop "slave" nodes. It is the default callback
+ module for the <c>{init, node_start}</c> term in the Test
+ Specification.</p>
+
+ </description>
+
+ <funcs>
+ <func>
+ <name>start(Node) -&gt; Result</name>
+ <fsummary>Starts an Erlang node with name Node on the local
+ host.</fsummary>
+ <type>
+ <v>Node = atom()</v>
+ <v>Result = {ok, NodeName} | {error, Reason, NodeName}</v>
+ <v>Reason = already_started | started_not_connected | boot_timeout | init_timeout | startup_timeout | not_alive</v>
+ <v>NodeName = atom()</v>
+ </type>
+ <desc><marker id="start-1"/>
+ <p>Starts an Erlang node with name <c>Node</c> on the local host.</p>
+
+ <p>See also
+ <seealso marker="#start-3"><c>ct_slave:start/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>start(HostOrNode, NodeOrOpts) -&gt; Result</name>
+ <fsummary>Starts an Erlang node with default options on a specified
+ host, or on the local host with specified options.</fsummary>
+ <type>
+ <v>HostOrNode = atom()</v>
+ <v>NodeOrOpts = atom() | list()</v>
+ <v>Result = {ok, NodeName} | {error, Reason, NodeName}</v>
+ <v>Reason = already_started | started_not_connected | boot_timeout | init_timeout | startup_timeout | not_alive</v>
+ <v>NodeName = atom()</v>
+ </type>
+ <desc><marker id="start-2"/>
+ <p>Starts an Erlang node with default options on a specified host, or
+ on the local host with specified options. That is, the call is
+ interpreted as <c>start(Host, Node)</c> when the second argument is
+ atom-valued and <c>start(Node, Opts)</c> when it is list-valued.</p>
+
+ <p>See also
+ <seealso marker="#start-3"><c>ct_slave:start/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>start(Host, Node, Opts) -&gt; Result</name>
+ <fsummary>Starts an Erlang node with name Node on host Host as
+ specified by the combination of options in Opts.</fsummary>
+ <type>
+ <v>Node = atom()</v>
+ <v>Host = atom()</v>
+ <v>Opts = [OptTuples]</v>
+ <v>OptTuples = {username, Username} | {password, Password} | {boot_timeout, BootTimeout} | {init_timeout, InitTimeout} | {startup_timeout, StartupTimeout} | {startup_functions, StartupFunctions} | {monitor_master, Monitor} | {kill_if_fail, KillIfFail} | {erl_flags, ErlangFlags} | {env, [{EnvVar, Value}]}</v>
+ <v>Username = string()</v>
+ <v>Password = string()</v>
+ <v>BootTimeout = integer()</v>
+ <v>InitTimeout = integer()</v>
+ <v>StartupTimeout = integer()</v>
+ <v>StartupFunctions = [StartupFunctionSpec]</v>
+ <v>StartupFunctionSpec = {Module, Function, Arguments}</v>
+ <v>Module = atom()</v>
+ <v>Function = atom()</v>
+ <v>Arguments = [term]</v>
+ <v>Monitor = bool()</v>
+ <v>KillIfFail = bool()</v>
+ <v>ErlangFlags = string()</v>
+ <v>EnvVar = string()</v>
+ <v>Value = string()</v>
+ <v>Result = {ok, NodeName} | {error, Reason, NodeName}</v>
+ <v>Reason = already_started | started_not_connected | boot_timeout | init_timeout | startup_timeout | not_alive</v>
+ <v>NodeName = atom()</v>
+ </type>
+ <desc><marker id="start-3"/>
+ <p>Starts an Erlang node with name <c>Node</c> on host <c>Host</c> as
+ specified by the combination of options in <c>Opts</c>.</p>
+
+ <p>Options <c>Username</c> and <c>Password</c> are used to log on to the
+ remote host <c>Host</c>. <c>Username</c>, if omitted, defaults to
+ the current username. <c>Password</c> is empty by default.</p>
+
+ <p>A list of functions specified in option <c>Startup</c> are
+ executed after startup of the node. Notice that all used modules
+ are to be present in the code path on <c>Host</c>.</p>
+
+ <p>The time-outs are applied as follows:</p>
+
+ <taglist>
+ <tag><c>BootTimeout</c></tag>
+ <item><p>The time to start the Erlang node, in seconds. Defaults to
+ 3 seconds. If the node is not pingable within this time, the result
+ <c>{error, boot_timeout, NodeName}</c> is returned.</p></item>
+ <tag><c>InitTimeout</c></tag>
+ <item><p>The time to wait for the node until it calls the internal
+ callback function informing master about a successful startup.
+ Defaults to 1 second. In case of a timed out message, the result
+ <c>{error, init_timeout, NodeName}</c> is returned.</p></item>
+ <tag><c>StartupTimeout</c></tag>
+ <item><p>The time to wait until the node stops to run
+ <c>StartupFunctions</c>. Defaults to 1 second. If this time-out
+ occurs, the result <c>{error, startup_timeout, NodeName}</c> is
+ returned.</p></item>
+ </taglist>
+
+ <p><em>Options:</em></p>
+
+ <taglist>
+ <tag><c>monitor_master</c></tag>
+ <item><p>Specifies if the slave node is to be stopped if the
+ master node stops. Defaults to <c>false</c>.</p></item>
+ <tag><c>kill_if_fail</c></tag>
+ <item><p>Specifies if the slave node is to be killed if a time-out
+ occurs during initialization or startup. Defaults to <c>true</c>.
+ Notice that the node can also be still alive it the boot time-out
+ occurred, but it is not killed in this case.</p></item>
+ <tag><c>erlang_flags</c></tag>
+ <item><p>Specifies which flags are added to the parameters of the
+ executable <c>erl</c>.</p></item>
+ <tag><c>env</c></tag>
+ <item><p>Specifies a list of environment variables that will extend
+ the environment.</p></item>
+ </taglist>
+
+ <p><em>Special return values:</em></p>
+
+ <list type="bulleted">
+ <item><p><c>{error, already_started, NodeName}</c> if the node
+ with the specified name is already started on a specified
+ host.</p></item>
+ <item><p><c>{error, started_not_connected, NodeName}</c> if the
+ node is started, but not connected to the master node.</p></item>
+ <item><p><c>{error, not_alive, NodeName}</c> if the node on which
+ <seealso marker="#start-3"><c>ct_slave:start/3</c></seealso> is
+ called, is not alive. Notice that <c>NodeName</c> is the name of
+ the current node in this case.</p></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>stop(Node) -&gt; Result</name>
+ <fsummary>Stops the running Erlang node with name Node on the local
+ host.</fsummary>
+ <type>
+ <v>Node = atom()</v>
+ <v>Result = {ok, NodeName} | {error, Reason, NodeName}</v>
+ <v>Reason = not_started | not_connected | stop_timeout</v>
+ </type>
+ <desc><marker id="stop-1"/>
+ <p>Stops the running Erlang node with name <c>Node</c> on the local
+ host.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>stop(Host, Node) -&gt; Result</name>
+ <fsummary>Stops the running Erlang node with name Node on host
+ Host.</fsummary>
+ <type>
+ <v>Host = atom()</v>
+ <v>Node = atom()</v>
+ <v>Result = {ok, NodeName} | {error, Reason, NodeName}</v>
+ <v>Reason = not_started | not_connected | stop_timeout</v>
+ <v>NodeName = atom()</v>
+ </type>
+ <desc><marker id="stop-2"/>
+ <p>Stops the running Erlang node with name <c>Node</c> on host
+ <c>Host</c>.</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_snmp.xml b/lib/common_test/doc/src/ct_snmp.xml
new file mode 100644
index 0000000000..d001fb24ec
--- /dev/null
+++ b/lib/common_test/doc/src/ct_snmp.xml
@@ -0,0 +1,523 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_snmp</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_snmp.xml</file>
+ </header>
+ <module>ct_snmp</module>
+ <modulesummary>Common Test user interface module for the SNMP application.</modulesummary>
+
+ <description>
+
+ <p><c>Common Test</c> user interface module for the <c>SNMP</c>
+ application.</p>
+
+ <p>The purpose of this module is to simplify SNMP configuration for the
+ test case writer. Many test cases can use default values for common
+ operations and then no SNMP configuration files need to be supplied.
+ When it is necessary to change particular configuration parameters, a
+ subset of the relevant SNMP configuration files can be passed to
+ <c>ct_snmp</c> by <c>Common Test</c> configuration files. For more
+ specialized configuration parameters, a simple SNMP configuration file
+ can be placed in the test suite data directory. To simplify the test
+ suite, <c>Common Test</c> keeps track of some of the SNMP manager
+ information. This way the test suite does not have to handle as many
+ input parameters as if it had to interface wthe OTP SNMP manager
+ directly.</p>
+
+ <p><em>Configurable SNMP Manager and Agent Parameters:</em></p>
+
+ <p>Manager configuration:</p>
+
+ <taglist>
+ <tag><c>[{start_manager, boolean()}</c></tag>
+ <item><p>Optional. Default is <c>true</c>.</p></item>
+ <tag><c>{users, [{user_name(), [call_back_module(), user_data()]}]}</c></tag>
+ <item><p>Optional.</p></item>
+ <tag><c>{usm_users, [{usm_user_name(), [usm_config()]}]}</c></tag>
+ <item><p>Optional. SNMPv3 only.</p></item>
+ <tag><c>{managed_agents,[{agent_name(), [user_name(), agent_ip(), agent_port(), [agent_config()]]}]}</c></tag>
+ <item><p><c>managed_agents</c> is optional.</p></item>
+ <tag><c>{max_msg_size, integer()}</c></tag>
+ <item><p>Optional. Default is <c>484</c>.</p></item>
+ <tag><c>{mgr_port, integer()}</c></tag>
+ <item><p>Optional. Default is <c>5000</c>.</p></item>
+ <tag><c>{engine _id, string()}</c></tag>
+ <item><p>Optional. Default is <c>"mgrEngine"</c>.</p></item>
+ </taglist>
+
+ <p>Agent configuration:</p>
+
+ <taglist>
+ <tag><c>{start_agent, boolean()}</c></tag>
+ <item><p>Optional. Default is <c>false</c>.</p></item>
+ <tag><c>{agent_sysname, string()}</c></tag>
+ <item><p>Optional. Default is <c>"ct_test"</c>.</p></item>
+ <tag><c>{agent_manager_ip, manager_ip()}</c></tag>
+ <item><p>Optional. Default is <c>localhost</c>.</p></item>
+ <tag><c>{agent_vsns, list()}</c></tag>
+ <item><p>Optional. Default is <c>[v2]</c>.</p></item>
+ <tag><c>{agent_trap_udp, integer()}</c></tag>
+ <item><p>Optional. Default is <c>5000</c>.</p></item>
+ <tag><c>{agent_udp, integer()}</c></tag>
+ <item><p>Optional. Default is <c>4000</c>.</p></item>
+ <tag><c>{agent_notify_type, atom()}</c></tag>
+ <item><p>Optional. Default is <c>trap</c>.</p></item>
+ <tag><c>{agent_sec_type, sec_type()}</c></tag>
+ <item><p>Optional. Default is <c>none</c>.</p></item>
+ <tag><c>{agent_passwd, string()}</c></tag>
+ <item><p>Optional. Default is <c>""</c>.</p></item>
+ <tag><c>{agent_engine_id, string()}</c></tag>
+ <item><p>Optional. Default is <c>"agentEngine"</c>.</p></item>
+ <tag><c>{agent_max_msg_size, string()}</c></tag>
+ <item><p>Optional. Default is <c>484</c>.</p></item>
+ </taglist>
+
+ <p>The following parameters represents the SNMP configuration files
+ <c>context.conf</c>, <c>standard.conf</c>, <c>community.conf</c>,
+ <c>vacm.conf</c>, <c>usm.conf</c>, <c>notify.conf</c>,
+ <c>target_addr.conf</c>, and <c>target_params.conf</c>. Notice that
+ all values in <c>agent.conf</c> can be modified by the parameters
+ listed above. All these configuration files have default values set by
+ the <c>SNMP</c> application. These values can be overridden by suppling
+ a list of valid configuration values or a file located in the test
+ suites data directory, which can produce a list of valid configuration
+ values if you apply function <c>file:consult/1</c> to the file.</p>
+
+ <taglist>
+ <tag><c>{agent_contexts, [term()] | {data_dir_file, rel_path()}}</c></tag>
+ <item><p>Optional.</p></item>
+ <tag><c>{agent_community, [term()] | {data_dir_file, rel_path()}}</c></tag>
+ <item><p>Optional.</p></item>
+ <tag><c>{agent_sysinfo, [term()] | {data_dir_file, rel_path()}}</c></tag>
+ <item><p>Optional.</p></item>
+ <tag><c>{agent_vacm, [term()] | {data_dir_file, rel_path()}}</c></tag>
+ <item><p>Optional.</p></item>
+ <tag><c>{agent_usm, [term()] | {data_dir_file, rel_path()}}</c></tag>
+ <item><p>Optional.</p></item>
+ <tag><c>{agent_notify_def, [term()] | {data_dir_file, rel_path()}}</c></tag>
+ <item><p>Optional.</p></item>
+ <tag><c>{agent_target_address_def, [term()] | {data_dir_file, rel_path()}}</c></tag>
+ <item><p>Optional.</p></item>
+ <tag><c>{agent_target_param_def, [term()] | {data_dir_file, rel_path()}}</c></tag>
+ <item><p>Optional.</p></item>
+ </taglist>
+
+ <p>Parameter <c>MgrAgentConfName</c> in the functions is to be a name
+ you allocate in your test suite using a <c>require</c> statement.
+ Example (where <c>MgrAgentConfName = snmp_mgr_agent</c>):</p>
+
+ <pre>
+ suite() -&gt; [{require, snmp_mgr_agent, snmp}].</pre>
+
+ <p>or</p>
+
+ <pre>
+ ct:require(snmp_mgr_agent, snmp).</pre>
+
+ <p>Notice that USM users are needed for SNMPv3 configuration and are
+ not to be confused with users.</p>
+
+ <p>SNMP traps, inform, and report messages are handled by the user
+ callback module. For details, see the
+ <seealso marker="snmp:index"><c>SNMP</c></seealso> application.</p>
+
+ <p>It is recommended to use the <c>.hrl</c> files created by the
+ Erlang/OTP MIB compiler to define the Object Identifiers (OIDs).
+ For example, to get the Erlang node name from <c>erlNodeTable</c>
+ in the OTP-MIB:</p>
+
+ <pre>
+ Oid = ?erlNodeEntry ++ [?erlNodeName, 1]</pre>
+
+ <p>Furthermore, values can be set for <c>SNMP</c> application configuration
+ parameters, <c>config</c>, <c>server</c>, <c>net_if</c>, and so on (for
+ a list of valid parameters and types, see the <seealso marker="snmp:users_guide"><c>User's Guide for the SNMP application</c></seealso>). This is
+ done by defining a configuration data variable on the following form:</p>
+
+ <pre>
+ {snmp_app, [{manager, [snmp_app_manager_params()]},
+ {agent, [snmp_app_agent_params()]}]}.</pre>
+
+ <p>A name for the data must be allocated in the suite using
+ <c>require</c> (see the example above). Pass this name as argument
+ <c>SnmpAppConfName</c> to
+ <seealso marker="#start-3"><c>ct_snmp:start/3</c></seealso>.
+ <c>ct_snmp</c> specifies default values for some <c>SNMP</c> application
+ configuration parameters (such as <c>{verbosity,trace}</c> for parameter
+ <c>config</c>). This set of defaults is merged with the parameters
+ specified by the user. The user values override <c>ct_snmp</c>
+ defaults.</p>
+
+ </description>
+
+ <section>
+ <title>Data Types</title>
+ <marker id="types"/>
+ <taglist>
+ <tag><c>agent_config() = {Item, Value}</c></tag>
+ <item><marker id="type-agent_config"/> </item>
+ <tag><c>agent_ip() = ip()</c></tag>
+ <item><marker id="type-agent_ip"/> </item>
+ <tag><c>agent_name() = atom()</c></tag>
+ <item><marker id="type-agent_name"/> </item>
+ <tag><c>agent_port() = integer()</c></tag>
+ <item><marker id="type-agent_port"/> </item>
+ <tag><c>call_back_module() = atom()</c></tag>
+ <item><marker id="type-call_back_module"/> </item>
+ <tag><c>error_index() = integer()</c></tag>
+ <item><marker id="type-error_index"/> </item>
+ <tag><c>error_status() = noError | atom()</c></tag>
+ <item><marker id="type-error_status"/> </item>
+ <tag><c>ip() = string() | {integer(), integer(), integer(), integer()}</c></tag>
+ <item><marker id="type-ip"/> </item>
+ <tag><c>manager_ip() = ip()</c></tag>
+ <item><marker id="type-manager_ip"/> </item>
+ <tag><c>oid() = [byte()]</c></tag>
+ <item><marker id="type-oid"/> </item>
+ <tag><c>oids() = [oid()]</c></tag>
+ <item><marker id="type-oids"/> </item>
+ <tag><c>rel_path() = string()</c></tag>
+ <item><marker id="type-rel_path"/> </item>
+ <tag><c>sec_type() = none | minimum | semi</c></tag>
+ <item><marker id="type-sec_type"/> </item>
+ <tag><c>snmp_app_agent_params() = term()</c></tag>
+ <item><marker id="type-snmp_app_agent_params"/> </item>
+ <tag><c>snmp_app_manager_params() = term()</c></tag>
+ <item><marker id="type-snmp_app_manager_params"/> </item>
+ <tag><c>snmpreply() = {error_status(), error_index(), varbinds()}</c></tag>
+ <item><marker id="type-snmpreply"/> </item>
+ <tag><c>user_data() = term()</c></tag>
+ <item><marker id="type-user_data"/> </item>
+ <tag><c>user_name() = atom()</c></tag>
+ <item><marker id="type-user_name"/> </item>
+ <tag><c>usm_config() = {Item, Value}</c></tag>
+ <item><marker id="type-usm_config"/> </item>
+ <tag><c>usm_user_name() = string()</c></tag>
+ <item><marker id="type-usm_user_name"/> </item>
+ <tag><c>value_type() = o('OBJECT IDENTIFIER') | i('INTEGER') | u('Unsigned32') | g('Unsigned32') | s('OCTET STRING')</c></tag>
+ <item><marker id="type-value_type"/> </item>
+ <tag><c>var_and_val() = {oid(), value_type(), value()}</c></tag>
+ <item><marker id="type-var_and_val"/> </item>
+ <tag><c>varbind() = term()</c></tag>
+ <item><marker id="type-varbind"/> </item>
+ <tag><c>varbinds() = [varbind()]</c></tag>
+ <item><marker id="type-varbinds"/> </item>
+ <tag><c>varsandvals() = [var_and_val()]</c></tag>
+ <item><marker id="type-varsandvals"/> </item>
+ </taglist>
+ <p>These data types are described in the documentation for
+ the <seealso marker="snmp:index"><c>SNMP</c></seealso> application.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>get_next_values(Agent, Oids, MgrAgentConfName) -&gt; SnmpReply</name>
+ <fsummary>Issues a synchronous SNMP get next request.</fsummary>
+ <type>
+ <v>Agent = agent_name()</v>
+ <v>Oids = oids()</v>
+ <v>MgrAgentConfName = atom()</v>
+ <v>SnmpReply = snmpreply()</v>
+ </type>
+ <desc><marker id="get_next_values-3"/>
+ <p>Issues a synchronous SNMP <c>get next</c> request.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_values(Agent, Oids, MgrAgentConfName) -&gt; SnmpReply</name>
+ <fsummary>Issues a synchronous SNMP get request.</fsummary>
+ <type>
+ <v>Agent = agent_name()</v>
+ <v>Oids = oids()</v>
+ <v>MgrAgentConfName = atom()</v>
+ <v>SnmpReply = snmpreply()</v>
+ </type>
+ <desc><marker id="get_values-3"/>
+ <p>Issues a synchronous SNMP <c>get</c> request.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>load_mibs(Mibs) -&gt; ok | {error, Reason}</name>
+ <fsummary>Loads the MIBs into agent snmp_master_agent.</fsummary>
+ <type>
+ <v>Mibs = [MibName]</v>
+ <v>MibName = string()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="load_mibs-1"/>
+ <p>Loads the MIBs into agent <c>snmp_master_agent</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>register_agents(MgrAgentConfName, ManagedAgents) -&gt; ok | {error, Reason}</name>
+ <fsummary>Explicitly instructs the manager to handle this
+ agent.</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>ManagedAgents = [agent()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="register_agents-2"/>
+ <p>Explicitly instructs the manager to handle this agent. Corresponds
+ to making an entry in <c>agents.conf</c>.</p>
+
+ <p>This function tries to register the specified managed agents, without
+ checking if any of them exist. To change a registered managed agent,
+ the agent must first be unregistered.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>register_users(MgrAgentConfName, Users) -&gt; ok | {error, Reason}</name>
+ <fsummary>Registers the manager entity (=user) responsible for specific
+ agent(s).</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>Users = [user()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="register_users-2"/>
+ <p>Registers the manager entity (=user) responsible for specific
+ agent(s). Corresponds to making an entry in <c>users.conf</c>.</p>
+
+ <p>This function tries to register the specified users, without checking
+ if any of them exist. To change a registered user, the user must
+ first be unregistered.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>register_usm_users(MgrAgentConfName, UsmUsers) -&gt; ok | {error, Reason}</name>
+ <fsummary>Explicitly instructs the manager to handle this USM user.</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>UsmUsers = [usm_user()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="register_usm_users-2"/>
+ <p>Explicitly instructs the manager to handle this USM user.
+ Corresponds to making an entry in <c>usm.conf</c>.</p>
+
+ <p>This function tries to register the specified users, without checking
+ if any of them exist. To change a registered user, the user must
+ first be unregistered.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>set_info(Config) -&gt; [{Agent, OldVarsAndVals, NewVarsAndVals}]</name>
+ <fsummary>Returns a list of all successful set requests performed in the
+ test case in reverse order.</fsummary>
+ <type>
+ <v>Config = [{Key, Value}]</v>
+ <v>Agent = agent_name()</v>
+ <v>OldVarsAndVals = varsandvals()</v>
+ <v>NewVarsAndVals = varsandvals()</v>
+ </type>
+ <desc><marker id="set_info-1"/>
+ <p>Returns a list of all successful <c>set</c> requests performed in
+ the test case in reverse order. The list contains the involved user
+ and agent, the value before <c>set</c>, and the new value. This is
+ intended to simplify the cleanup in function <c>end_per_testcase</c>,
+ that is, the undoing of the <c>set</c> requests and their possible
+ side-effects.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>set_values(Agent, VarsAndVals, MgrAgentConfName, Config) -&gt; SnmpReply</name>
+ <fsummary>Issues a synchronous SNMP set request.</fsummary>
+ <type>
+ <v>Agent = agent_name()</v>
+ <v>Oids = oids()</v>
+ <v>MgrAgentConfName = atom()</v>
+ <v>Config = [{Key, Value}]</v>
+ <v>SnmpReply = snmpreply()</v>
+ </type>
+ <desc><marker id="set_values-4"/>
+ <p>Issues a synchronous SNMP <c>set</c> request.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>start(Config, MgrAgentConfName) -&gt; ok</name>
+ <fsummary>Equivalent to start(Config, MgrAgentConfName,
+ undefined).</fsummary>
+ <desc><marker id="start-2"/>
+ <p>Equivalent to
+ <seealso marker="#start-3"><c>ct_snmp:start(Config, MgrAgentConfName,
+ undefined)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>start(Config, MgrAgentConfName, SnmpAppConfName) -&gt; ok</name>
+ <fsummary>Starts an SNMP manager and/or agent.</fsummary>
+ <type>
+ <v>Config = [{Key, Value}]</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ <v>MgrAgentConfName = atom()</v>
+ <v>SnmpConfName = atom()</v>
+ </type>
+ <desc><marker id="start-3"/>
+ <p>Starts an SNMP manager and/or agent. In the manager case,
+ registrations of users and agents, as specified by the configuration
+ <c>MgrAgentConfName</c>, are performed. When using SNMPv3, called
+ USM users are also registered. Users, <c>usm_users</c>, and
+ managed agents can also be registered later using
+ <seealso marker="#register_users-2"><c>ct_snmp:register_users/2</c></seealso>,
+ <seealso marker="#register_agents-2"><c>ct_snmp:register_agents/2</c></seealso>,
+ and
+ <seealso marker="#register_usm_users-2"><c>ct_snmp:register_usm_users/2</c></seealso>.</p>
+
+ <p>The agent started is called <c>snmp_master_agent</c>. Use
+ <seealso marker="#load_mibs-1"><c>ct_snmp:load_mibs/1</c></seealso>
+ to load MIBs into the agent.</p>
+
+ <p>With <c>SnmpAppConfName</c> SNMP applications can be configured
+ with parameters <c>config</c>, <c>mibs</c>, <c>net_if</c>, and so on.
+ The values are merged with (and possibly override) default values
+ set by <c>ct_snmp</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>stop(Config) -&gt; ok</name>
+ <fsummary>Stops the SNMP manager and/or agent, and removes all files
+ created.</fsummary>
+ <type>
+ <v>Config = [{Key, Value}]</v>
+ <v>Key = atom()</v>
+ <v>Value = term()</v>
+ </type>
+ <desc><marker id="stop-1"/>
+ <p>Stops the SNMP manager and/or agent, and removes all files
+ created.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unload_mibs(Mibs) -&gt; ok | {error, Reason}</name>
+ <fsummary>Unloads the MIBs from agent snmp_master_agent.</fsummary>
+ <type>
+ <v>Mibs = [MibName]</v>
+ <v>MibName = string()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="unload_mibs-1"/>
+ <p>Unloads the MIBs from agent <c>snmp_master_agent</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unregister_agents(MgrAgentConfName) -&gt; ok</name>
+ <fsummary>Unregisters all managed agents.</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="unregister_agents-1"/>
+ <p>Unregisters all managed agents.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unregister_agents(MgrAgentConfName, ManagedAgents) -&gt; ok</name>
+ <fsummary>Unregisters the specified managed agents.</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>ManagedAgents = [agent_name()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="unregister_agents-2"/>
+ <p>Unregisters the specified managed agents.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unregister_users(MgrAgentConfName) -&gt; ok</name>
+ <fsummary>Unregisters all users.</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="unregister_users-1"/>
+ <p>Unregisters all users.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unregister_users(MgrAgentConfName, Users) -&gt; ok</name>
+ <fsummary>Unregisters the specified users.</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>Users = [user_name()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="unregister_users-2"/>
+ <p>Unregisters the specified users.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unregister_usm_users(MgrAgentConfName) -&gt; ok</name>
+ <fsummary>Unregisters all USM users.</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="unregister_usm_users-1"/>
+ <p>Unregisters all USM users.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>unregister_usm_users(MgrAgentConfName, UsmUsers) -&gt; ok</name>
+ <fsummary>Unregisters the specified USM users.</fsummary>
+ <type>
+ <v>MgrAgentConfName = atom()</v>
+ <v>UsmUsers = [usm_user_name()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="unregister_usm_users-2"/>
+ <p>Unregisters the specified USM users.</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_ssh.xml b/lib/common_test/doc/src/ct_ssh.xml
new file mode 100644
index 0000000000..92b1f60b8c
--- /dev/null
+++ b/lib/common_test/doc/src/ct_ssh.xml
@@ -0,0 +1,1150 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_ssh</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_ssh.xml</file>
+ </header>
+ <module>ct_ssh</module>
+ <modulesummary>SSH/SFTP client module.</modulesummary>
+
+<description>
+
+ <p>SSH/SFTP client module.</p>
+
+ <p>This module uses application <c>SSH</c>, which provides detailed
+ information about, for example, functions, types, and options.</p>
+
+ <p>Argument <c>Server</c> in the SFTP functions is only to be used for
+ SFTP sessions that have been started on existing SSH connections
+ (that is, when the original connection type is <c>ssh</c>). Whenever
+ the connection type is <c>sftp</c>, use the SSH connection reference
+ only.</p>
+
+ <p>The following options are valid for specifying an SSH/SFTP
+ connection (that is, can be used as configuration elements):</p>
+
+ <pre>
+ [{ConnType, Addr},
+ {port, Port},
+ {user, UserName}
+ {password, Pwd}
+ {user_dir, String}
+ {public_key_alg, PubKeyAlg}
+ {connect_timeout, Timeout}
+ {key_cb, KeyCallbackMod}]</pre>
+
+ <p><c>ConnType = ssh | sftp</c>.</p>
+
+ <p>For other types, see
+ <seealso marker="ssh:ssh"><c>ssh:ssh(3)</c></seealso>.</p>
+
+ <p>All time-out parameters in <c>ct_ssh</c> functions are values in
+ milliseconds.</p>
+
+ </description>
+
+ <section>
+ <title>Data Types</title>
+ <marker id="types"/>
+ <taglist>
+ <tag><c>connection() = handle() | target_name()</c></tag>
+ <item><marker id="type-connection"/>
+ <p>For <c>target_name</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p></item>
+
+ <tag><c>handle() = handle()</c></tag>
+ <item><marker id="type-handle"/>
+ <p>Handle for a specific SSH/SFTP connection, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p></item>
+
+ <tag><c>ssh_sftp_return() = term()</c></tag>
+ <item><marker id="type-ssh_sftp_return"/>
+ <p>Return value from an
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp</c></seealso>
+ function.</p></item>
+ </taglist>
+ </section>
+
+ <funcs>
+ <func>
+ <name>apread(SSH, Handle, Position, Length) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="apread-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>apread(SSH, Server, Handle, Position, Length) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="apread-5"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>apwrite(SSH, Handle, Position, Data) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="apwrite-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>apwrite(SSH, Server, Handle, Position, Data) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="apwrite-5"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>aread(SSH, Handle, Len) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="aread-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>aread(SSH, Server, Handle, Len) -&gt; Result</name>
+ <fsummary>For inforamtion and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="aread-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>awrite(SSH, Handle, Data) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="awrite-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>awrite(SSH, Server, Handle, Data) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="awrite-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>close(SSH, Handle) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="close-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>close(SSH, Server, Handle) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="close-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>connect(KeyOrName) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Equivalent to connect(KeyOrName, host, []).</fsummary>
+ <desc><marker id="connect-1"/>
+ <p>Equivalent to
+ <seealso marker="#connect-3"><c>ct_ssh:connect(KeyOrName, host,
+ [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>connect(KeyOrName, ConnType) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Equivalent to connect(KeyOrName, ConnType, []).</fsummary>
+ <desc><marker id="connect-2"/>
+ <p>Equivalent to
+ <seealso marker="#connect-3"><c>ct_ssh:connect(KeyOrName, ConnType,
+ [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>connect(KeyOrName, ConnType, ExtraOpts) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Opens an SSH or SFTP connection using the information
+ associated with KeyOrName.</fsummary>
+ <type>
+ <v>KeyOrName = Key | Name</v>
+ <v>Key = atom()</v>
+ <v>Name = target_name()</v>
+ <v>ConnType = ssh | sftp | host</v>
+ <v>ExtraOpts = ssh_connect_options()</v>
+ <v>Handle = handle()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="connect-3"/>
+ <p>Opens an SSH or SFTP connection using the information associated
+ with <c>KeyOrName</c>.</p>
+
+ <p>If <c>Name</c> (an alias name for <c>Key</c>) is used to identify
+ the connection, this name can be used as connection reference for
+ subsequent calls. Only one open connection at a time associated
+ with <c>Name</c> is possible. If <c>Key</c> is used, the returned
+ handle must be used for subsequent calls (multiple connections can
+ be opened using the configuration data specified by <c>Key</c>).</p>
+
+ <p>For information on how to create a new <c>Name</c>, see
+ <seealso marker="ct#require-2"><c>ct:require/2</c></seealso>.</p>
+
+ <p>For <c>target_name</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p>
+
+ <p><c>ConnType</c> always overrides the type specified in the
+ address tuple in the configuration data (and in <c>ExtraOpts</c>).
+ So it is possible to, for example, open an SFTP connection
+ directly using data originally specifying an SSH connection. Value
+ <c>host</c> means that the connection type specified by the host
+ option (either in the configuration data or in <c>ExtraOpts</c>)
+ is used.</p>
+
+ <p><c>ExtraOpts</c> (optional) are extra SSH options to be added to
+ the configuration data for <c>KeyOrName</c>. The extra options
+ override any existing options with the same key in the
+ configuration data. For details on valid SSH options, see
+ application <seealso marker="ssh:index"><c>SSH</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>del_dir(SSH, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="del_dir-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>del_dir(SSH, Server, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="del_dir-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>delete(SSH, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="delete-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>delete(SSH, Server, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="delete-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>disconnect(SSH) -&gt; ok | {error, Reason}</name>
+ <fsummary>Closes an SSH/SFTP connection.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="disconnect-1"/>
+ <p>Closes an SSH/SFTP connection.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>exec(SSH, Command) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Equivalent to exec(SSH, Command, DefaultTimeout).</fsummary>
+ <desc><marker id="exec-2"/>
+ <p>Equivalent to
+ <seealso marker="#exec-3"><c>ct_ssh:exec(SSH, Command,
+ DefaultTimeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>exec(SSH, Command, Timeout) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Requests server to perform Command.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Command = string()</v>
+ <v>Timeout = integer()</v>
+ <v>Data = list()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="exec-3"/>
+ <p>Requests server to perform <c>Command</c>. A session channel is
+ opened automatically for the request. <c>Data</c> is received from
+ the server as a result of the command.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>exec(SSH, ChannelId, Command, Timeout) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Requests server to perform Command.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>ChannelId = integer()</v>
+ <v>Command = string()</v>
+ <v>Timeout = integer()</v>
+ <v>Data = list()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="exec-4"/>
+ <p>Requests server to perform <c>Command</c>. A previously opened
+ session channel is used for the request. <c>Data</c> is received
+ from the server as a result of the command.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_file_info(SSH, Handle) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="get_file_info-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_file_info(SSH, Server, Handle) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="get_file_info-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>list_dir(SSH, Path) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="list_dir-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>list_dir(SSH, Server, Path) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="list_dir-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>make_dir(SSH, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="make_dir-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>make_dir(SSH, Server, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="make_dir-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>make_symlink(SSH, Name, Target) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="make_symlink-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>make_symlink(SSH, Server, Name, Target) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="make_symlink-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(SSH, File, Mode) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="open-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(SSH, Server, File, Mode) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="open-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>opendir(SSH, Path) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="opendir-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>opendir(SSH, Server, Path) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="opendir-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>position(SSH, Handle, Location) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="position-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>position(SSH, Server, Handle, Location) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="position-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>pread(SSH, Handle, Position, Length) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="pread-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>pread(SSH, Server, Handle, Position, Length) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="pread-5"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>pwrite(SSH, Handle, Position, Data) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="pwrite-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>pwrite(SSH, Server, Handle, Position, Data) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="pwrite-5"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read(SSH, Handle, Len) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read(SSH, Server, Handle, Len) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read_file(SSH, File) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read_file-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read_file(SSH, Server, File) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read_file-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read_file_info(SSH, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read_file_info-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read_file_info(SSH, Server, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read_file_info-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read_link(SSH, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read_link-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read_link(SSH, Server, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read_link-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read_link_info(SSH, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read_link_info-2"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>read_link_info(SSH, Server, Name) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="read_link_info-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>receive_response(SSH, ChannelId) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Equivalent to receive_response(SSH, ChannelId,
+ close).</fsummary>
+ <desc><marker id="receive_response-2"/>
+ <p>Equivalent to
+ <seealso marker="#receive_response-3"><c>ct_ssh:receive_response(SSH,
+ChannelId, close)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>receive_response(SSH, ChannelId, End) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Equivalent to receive_response(SSH, ChannelId, End,
+ DefaultTimeout).</fsummary>
+ <desc><marker id="receive_response-3"/>
+ <p>Equivalent to
+ <seealso marker="#receive_response-4"><c>ct_ssh:receive_response(SSH,
+ChannelId, End, DefaultTimeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>receive_response(SSH, ChannelId, End, Timeout) -&gt; {ok, Data} | {timeout, Data} | {error, Reason}</name>
+ <fsummary>Receives expected data from server on the specified session
+ channel.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>ChannelId = integer()</v>
+ <v>End = Fun | close | timeout</v>
+ <v>Timeout = integer()</v>
+ <v>Data = list()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="receive_response-4"/>
+ <p>Receives expected data from server on the specified session
+ channel.</p>
+
+ <p>If <c>End == close</c>, data is returned to the caller when the
+ channel is closed by the server. If a time-out occurs before this
+ happens, the function returns <c>{timeout,Data}</c> (where
+ <c>Data</c> is the data received so far).</p>
+ <p>If <c>End == timeout</c>, a time-out is expected and
+ <c>{ok,Data}</c> is returned both in the case of a time-out and
+ when the channel is closed.</p>
+
+ <p>If <c>End</c> is a fun, this fun is called with one argument, the
+ data value in a received <c>ssh_cm</c> message (see
+ <seealso marker="ssh:ssh_connection"><c>ssh:ssh_connection(3)</c></seealso>.
+ The fun is to return either <c>true</c> to end the receiving
+ operation (and have the so far collected data returned) or
+ <c>false</c> to wait for more data from the server. Even if a fun
+ is supplied, the function returns immediately if the server closes
+ the channel).</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>rename(SSH, OldName, NewName) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="rename-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>rename(SSH, Server, OldName, NewName) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="rename-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(SSH, ChannelId, Data) -&gt; ok | {error, Reason}</name>
+ <fsummary>Equivalent to send(SSH, ChannelId, 0, Data,
+ DefaultTimeout).</fsummary>
+ <desc><marker id="send-3"/>
+ <p>Equivalent to <seealso marker="#send-5"><c>ct_ssh:send(SSH,
+ ChannelId, 0, Data, DefaultTimeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(SSH, ChannelId, Data, Timeout) -&gt; ok | {error, Reason}</name>
+ <fsummary>Equivalent to send(SSH, ChannelId, 0, Data, Timeout).</fsummary>
+ <desc><marker id="send-4"/>
+ <p>Equivalent to <seealso marker="#send-5"><c>ct_ssh:send(SSH,
+ ChannelId, 0, Data, Timeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(SSH, ChannelId, Type, Data, Timeout) -&gt; ok | {error, Reason}</name>
+ <fsummary>Sends data to server on specified session channel.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>ChannelId = integer()</v>
+ <v>Type = integer()</v>
+ <v>Data = list()</v>
+ <v>Timeout = integer()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="send-5"/>
+ <p>Sends data to server on specified session channel.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send_and_receive(SSH, ChannelId, Data) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Equivalent to send_and_receive(SSH, ChannelId, Data,
+ close).</fsummary>
+ <desc><marker id="send_and_receive-3"/>
+ <p>Equivalent to
+ <seealso marker="#send_and_receive-4"><c>ct_ssh:send_and_receive(SSH,
+ ChannelId, Data, close)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send_and_receive(SSH, ChannelId, Data, End) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Equivalent to send_and_receive(SSH, ChannelId, 0, Data, End,
+ DefaultTimeout).</fsummary>
+ <desc><marker id="send_and_receive-4"/>
+ <p>Equivalent to
+ <seealso marker="#send_and_receive-6"><c>ct_ssh;send_and_receive(SSH,
+ChannelId, 0, Data, End, DefaultTimeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send_and_receive(SSH, ChannelId, Data, End, Timeout) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Equivalent to send_and_receive(SSH, ChannelId, 0, Data, End,
+ Timeout).</fsummary>
+ <desc><marker id="send_and_receive-5"/>
+ <p>Equivalent to
+ <seealso marker="#send_and_receive-6"><c>ct_ssh:send_and_receive(SSH,
+ChannelId, 0, Data, End, Timeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send_and_receive(SSH, ChannelId, Type, Data, End, Timeout) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Sends data to server on specified session channel and waits
+ to receive the server response.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>ChannelId = integer()</v>
+ <v>Type = integer()</v>
+ <v>Data = list()</v>
+ <v>End = Fun | close | timeout</v>
+ <v>Timeout = integer()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="send_and_receive-6"/>
+ <p>Sends data to server on specified session channel and waits to
+ receive the server response.</p>
+
+ <p>For details on argument <c>End</c>, see
+ <seealso marker="#receive_response-4"><c>ct_ssh:receive_response/4</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>session_close(SSH, ChannelId) -&gt; ok | {error, Reason}</name>
+ <fsummary>Closes an SSH session channel.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>ChannelId = integer()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="session_close-2"/>
+ <p>Closes an SSH session channel.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>session_open(SSH) -&gt; {ok, ChannelId} | {error, Reason}</name>
+ <fsummary>Equivalent to session_open(SSH, DefaultTimeout).</fsummary>
+ <desc><marker id="session_open-1"/>
+ <p>Equivalent to
+ <seealso marker="#session_open-2"><c>ct_ssh:session_open(SSH,
+ DefaultTimeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>session_open(SSH, Timeout) -&gt; {ok, ChannelId} | {error, Reason}</name>
+ <fsummary>Opens a channel for an SSH session.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Timeout = integer()</v>
+ <v>ChannelId = integer()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="session_open-2"/>
+ <p>Opens a channel for an SSH session.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>sftp_connect(SSH) -&gt; {ok, Server} | {error, Reason}</name>
+ <fsummary>Starts an SFTP session on an already existing SSH
+ connection.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Server = pid()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="sftp_connect-1"/>
+ <p>Starts an SFTP session on an already existing SSH connection.
+ <c>Server</c> identifies the new session and must be specified
+ whenever SFTP requests are to be sent.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>subsystem(SSH, ChannelId, Subsystem) -&gt; Status | {error, Reason}</name>
+ <fsummary>Equivalent to subsystem(SSH, ChannelId, Subsystem,
+ DefaultTimeout).</fsummary>
+ <desc><marker id="subsystem-3"/>
+ <p>Equivalent to
+ <seealso marker="#subsystem-4"><c>ct_ssh:subsystem(SSH, ChannelId,
+ Subsystem, DefaultTimeout)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>subsystem(SSH, ChannelId, Subsystem, Timeout) -&gt; Status | {error, Reason}</name>
+ <fsummary>Sends a request to execute a predefined subsystem.</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>ChannelId = integer()</v>
+ <v>Subsystem = string()</v>
+ <v>Timeout = integer()</v>
+ <v>Status = success | failure</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="subsystem-4"/>
+ <p>Sends a request to execute a predefined subsystem.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>write(SSH, Handle, Data) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="write-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>write(SSH, Server, Handle, Data) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="write-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>write_file(SSH, File, Iolist) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="write_file-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>write_file(SSH, Server, File, Iolist) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="write_file-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>write_file_info(SSH, Name, Info) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="write_file_info-3"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>write_file_info(SSH, Server, Name, Info) -&gt; Result</name>
+ <fsummary>For information and other types, see ssh_sftp(3).</fsummary>
+ <type>
+ <v>SSH = connection()</v>
+ <v>Result = ssh_sftp_return() | {error, Reason}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="write_file_info-4"/>
+ <p>For information and other types, see
+ <seealso marker="ssh:ssh_sftp"><c>ssh:ssh_sftp(3)</c></seealso>.</p>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/ct_telnet.xml b/lib/common_test/doc/src/ct_telnet.xml
new file mode 100644
index 0000000000..b7ba352104
--- /dev/null
+++ b/lib/common_test/doc/src/ct_telnet.xml
@@ -0,0 +1,601 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ct_telnet</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>ct_telnet.xml</file>
+ </header>
+ <module>ct_telnet</module>
+ <modulesummary>Common Test specific layer on top of Telnet client ct_telnet_client.erl</modulesummary>
+
+ <description>
+
+ <p><c>Common Test</c> specific layer on top of Telnet client
+ <c>ct_telnet_client.erl</c>.</p>
+
+ <p>Use this module to set up Telnet connections, send commands, and
+ perform string matching on the result. For information about how to use
+ <c>ct_telnet</c> and configure connections, specifically for UNIX hosts,
+ see the
+ <seealso marker="unix_telnet"><c>unix_telnet</c></seealso> manual page.
+ </p>
+
+ <p>Default values defined in <c>ct_telnet</c>:</p>
+ <marker id="Default_values"/>
+
+ <list type="bulleted">
+ <item><p>Connection timeout (time to wait for connection) = 10
+ seconds</p></item>
+ <item><p>Command timeout (time to wait for a command to return) =
+ 10 seconds</p></item>
+ <item><p>Max number of reconnection attempts = 3</p></item>
+ <item><p>Reconnection interval (time to wait in between
+ reconnection attempts) = 5 seconds</p></item>
+ <item><p>Keep alive (sends NOP to the server every 8 sec if
+ connection is idle) = <c>true</c></p></item>
+ <item><p>Polling limit (max number of times to poll to get a
+ remaining string terminated) = 0</p></item>
+ <item><p>Polling interval (sleep time between polls) = 1 second</p>
+ </item>
+ </list>
+
+ <p>These parameters can be modified by the user with the following
+ configuration term:</p>
+
+ <pre>
+ {telnet_settings, [{connect_timeout,Millisec},
+ {command_timeout,Millisec},
+ {reconnection_attempts,N},
+ {reconnection_interval,Millisec},
+ {keep_alive,Bool},
+ {poll_limit,N},
+ {poll_interval,Millisec}]}.</pre>
+
+ <p><c>Millisec = integer(), N = integer()</c></p>
+
+ <p>Enter the <c>telnet_settings</c> term in a configuration file included
+ in the test and <c>ct_telnet</c> retrieves the information
+ automatically.</p>
+
+ <p><c>keep_alive</c> can be specified per connection, if necessary. For
+ details, see
+ <seealso marker="unix_telnet"><c>unix_telnet</c></seealso>.</p>
+
+ </description>
+
+ <section>
+ <title>Logging</title>
+ <marker id="Logging"/>
+
+ <p>The default logging behavior of <c>ct_telnet</c> is to print information
+ about performed operations, commands, and their corresponding results to
+ the test case HTML log. The following is not printed to the HTML
+ log: text strings sent from the Telnet server that are not explicitly
+ received by a <c>ct_telnet</c> function, such as <c>expect/3</c>.
+ However, <c>ct_telnet</c> can be configured to use a special purpose
+ event handler, implemented in <c>ct_conn_log_h</c>, for logging
+ <em>all</em> Telnet traffic. To use this handler, install a <c>Common
+ Test</c> hook named <c>cth_conn_log</c>. Example (using the test suite
+ information function):</p>
+
+ <pre>
+ suite() -&gt;
+ [{ct_hooks, [{cth_conn_log, [{conn_mod(),hook_options()}]}]}].</pre>
+
+ <p><c>conn_mod()</c> is the name of the <c>Common Test</c> module
+ implementing the connection protocol, that is, <c>ct_telnet</c>.</p>
+
+ <p>The <c>cth_conn_log</c> hook performs unformatted logging of Telnet
+ data to a separate text file. All Telnet communication is captured and
+ printed, including any data sent from the server. The link to
+ this text file is located at the top of the test case HTML log.</p>
+
+ <p>By default, data for all Telnet connections is logged in one common
+ file (named <c>default</c>), which can get messy, for example, if
+ multiple Telnet sessions are running in parallel. Therefore a separate
+ log file can be created for each connection. To configure this, use hook
+ option <c>hosts</c> and list the names of the servers/connections
+ to be used in the suite. The connections must be named for this to
+ work (see
+ <seealso marker="#open-1"><c>ct_telnet:open/1,2,3,4</c></seealso>).</p>
+
+ <p>Hook option <c>log_type</c> can be used to change the
+ <c>cth_conn_log</c> behavior. The default value of this option is
+ <c>raw</c>, which results in the behavior described above. If the value
+ is set to <c>html</c>, all Telnet communication is printed to the test
+ case HTML log instead.</p>
+
+ <p>All <c>cth_conn_log</c> hook options described can also be
+ specified in a configuration file with configuration variable
+ <c>ct_conn_log</c>.</p>
+
+ <p><em>Example:</em></p>
+
+ <pre>
+ {ct_conn_log, [{ct_telnet,[{log_type,raw},
+ {hosts,[key_or_name()]}]}]}</pre>
+
+ <note>
+ <p>Hook options specified in a configuration file overwrite any
+ hard-coded hook options in the test suite.</p>
+ </note>
+
+ <marker id="Logging_example"/>
+ <p><em>Logging Example:</em></p>
+
+ <p>The following <c>ct_hooks</c> statement causes printing of Telnet
+ traffic to separate logs for the connections <c>server1</c> and
+ <c>server2</c>. Traffic for any other connections is logged in the
+ default Telnet log.</p>
+
+ <pre>
+ suite() -&gt;
+ [{ct_hooks,
+ [{cth_conn_log, [{ct_telnet,[{hosts,[server1,server2]}]}]}]}].</pre>
+
+ <p>As previously explained, this specification can also be provided by an
+ entry like the following in a configuration file:</p>
+
+ <pre>
+ {ct_conn_log, [{ct_telnet,[{hosts,[server1,server2]}]}]}.</pre>
+
+ <p>In this case the <c>ct_hooks</c> statement in the test suite can look
+ as follows:</p>
+
+ <pre>
+ suite() -&gt;
+ [{ct_hooks, [{cth_conn_log, []}]}].</pre>
+ </section>
+
+ <section>
+ <title>Data Types</title>
+ <marker id="types"/>
+ <taglist>
+ <tag><c>connection() = handle() | {target_name(), connection_type()} | target_name()</c></tag>
+ <item><marker id="type-connection"/>
+ <p>For <c>target_name()</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p></item>
+
+ <tag><c>connection_type() = telnet | ts1 | ts2</c></tag>
+ <item><marker id="type-connection_type"/> </item>
+
+ <tag><c>handle() = handle()</c></tag>
+ <item><marker id="type-handle"/>
+ <p>Handle for a specific Telnet connection, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p></item>
+
+ <tag><c>prompt_regexp() = string()</c></tag>
+ <item><marker id="type-prompt_regexp"/>
+ <p>Regular expression matching all possible prompts for a specific
+ target type. <c>regexp</c> must not have any groups, that is, when
+ matching, <c>re:run/3</c> (in <c>STDLIB</c>) must return a list with
+ one single element.</p></item>
+ </taglist>
+ </section>
+
+ <funcs>
+ <func>
+ <name>close(Connection) -&gt; ok | {error, Reason}</name>
+ <fsummary>Closes the Telnet connection and stops the process managing
+ it.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="close-1"/>
+ <p>Closes the Telnet connection and stops the process managing it.</p>
+
+ <p>A connection can be associated with a target name and/or a handle.
+ If <c>Connection</c> has no associated target name, it can only
+ be closed with the handle value (see
+ <seealso marker="#open-4"><c>ct_telnet:open/4</c></seealso>).</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>cmd(Connection, Cmd) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Equivalent to cmd(Connection, Cmd, []).</fsummary>
+ <desc><marker id="cmd-2"/>
+ <p>Equivalent to
+ <seealso marker="#cmd-3"><c>ct_telnet:cmd(Connection, Cmd,
+ [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>cmd(Connection, Cmd, Opts) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Sends a command through Telnet and waits for prompt.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>Cmd = string()</v>
+ <v>Opts = [Opt]</v>
+ <v>Opt = {timeout, timeout()} | {newline, boolean()}</v>
+ <v>Data = [string()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="cmd-3"/>
+ <p>Sends a command through Telnet and waits for prompt.</p>
+
+ <p>By default, this function adds a new line to the end of the
+ specified command. If this is not desired, use option
+ <c>{newline,false}</c>. This is necessary, for example, when
+ sending Telnet command sequences prefixed with character
+ Interprete As Command (IAC).</p>
+
+ <p>Option <c>timeout</c> specifies how long the client must wait
+ for prompt. If the time expires, the function returns
+ <c>{error,timeout}</c>. For information about the default value
+ for the command timeout, see the
+ <seealso marker="#Default_values">list of default values</seealso>
+ in the beginning of this module.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>cmdf(Connection, CmdFormat, Args) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Equivalent to cmdf(Connection, CmdFormat, Args, []).</fsummary>
+ <desc><marker id="cmdf-3"/>
+ <p>Equivalent to
+ <seealso marker="#cmdf-4"><c>ct_telnet:cmdf(Connection, CmdFormat,
+ Args, [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>cmdf(Connection, CmdFormat, Args, Opts) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Sends a Telnet command and waits for prompt (uses a format
+ string and a list of arguments to build the command).</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>CmdFormat = string()</v>
+ <v>Args = list()</v>
+ <v>Opts = [Opt]</v>
+ <v>Opt = {timeout, timeout()} | {newline, boolean()}</v>
+ <v>Data = [string()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="cmdf-4"/>
+ <p>Sends a Telnet command and waits for prompt (uses a format string
+ and a list of arguments to build the command).</p>
+
+ <p>For details, see
+ <seealso marker="#cmd-3"><c>ct_telnet:cmd/3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>expect(Connection, Patterns) -&gt; term()</name>
+ <fsummary>Equivalent to expect(Connections, Patterns, []).</fsummary>
+ <desc><marker id="expect-2"/>
+ <p>Equivalent to
+ <seealso marker="#expect-3"><c>ct_telnet:expect(Connections,
+ Patterns, [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>expect(Connection, Patterns, Opts) -&gt; {ok, Match} | {ok, MatchList, HaltReason} | {error, Reason}</name>
+ <fsummary>Gets data from Telnet and waits for the expected
+ pattern.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>Patterns = Pattern | [Pattern]</v>
+ <v>Pattern = string() | {Tag, string()} | prompt | {prompt, Prompt}</v>
+ <v>Prompt = string()</v>
+ <v>Tag = term()</v>
+ <v>Opts = [Opt]</v>
+ <v>Opt = {idle_timeout, IdleTimeout} | {total_timeout, TotalTimeout} | repeat | {repeat, N} | sequence | {halt, HaltPatterns} | ignore_prompt | no_prompt_check | wait_for_prompt | {wait_for_prompt, Prompt}</v>
+ <v>IdleTimeout = infinity | integer()</v>
+ <v>TotalTimeout = infinity | integer()</v>
+ <v>N = integer()</v>
+ <v>HaltPatterns = Patterns</v>
+ <v>MatchList = [Match]</v>
+ <v>Match = RxMatch | {Tag, RxMatch} | {prompt, Prompt}</v>
+ <v>RxMatch = [string()]</v>
+ <v>HaltReason = done | Match</v>
+ <v>Reason = timeout | {prompt, Prompt}</v>
+ </type>
+ <desc><marker id="expect-3"/>
+ <p>Gets data from Telnet and waits for the expected pattern.</p>
+
+ <p><c>Pattern</c> can be a POSIX regular expression. The function
+ returns when a pattern is successfully matched (at least one, in
+ the case of multiple patterns).</p>
+
+ <p><c>RxMatch</c> is a list of matched strings. It looks as
+ follows <c>[FullMatch, SubMatch1, SubMatch2, ...]</c>, where
+ <c>FullMatch</c> is the string matched by the whole regular
+ expression, and <c>SubMatchN</c> is the string that matched
+ subexpression number <c>N</c>. Subexpressions are denoted with
+ <c>(' ')</c> in the regular expression.</p>
+
+ <p>If a <c>Tag</c> is speciifed, the returned <c>Match</c> also
+ includes the matched <c>Tag</c>. Otherwise, only <c>RxMatch</c>
+ is returned.</p>
+
+ <p><em>Options:</em></p>
+
+ <taglist>
+ <tag><c>idle_timeout</c></tag>
+ <item><p>Indicates that the function must return if the Telnet
+ client is idle (that is, if no data is received) for more than
+ <c>IdleTimeout</c> milliseconds. Default time-out is 10
+ seconds.</p></item>
+ <tag><c>total_timeout</c></tag>
+ <item><p>Sets a time limit for the complete <c>expect</c> operation.
+ After <c>TotalTimeout</c> milliseconds, <c>{error,timeout}</c>
+ is returned. Default is <c>infinity</c> (that is, no time
+ limit).</p></item>
+ <tag><c>ignore_prompt | no_prompt_check</c></tag>
+ <item><p>>The function returns when a prompt is received, even if
+ no pattern has yet been matched, and
+ <c>{error,{prompt,Prompt}}</c> is returned. However, this
+ behavior can be modified with option <c>ignore_prompt</c> or
+ option <c>no_prompt_check</c>, which tells <c>expect</c> to
+ return only when a match is found or after a time-out.</p></item>
+ <tag><c>ignore_prompt</c></tag>
+ <item><p><c>ct_telnet</c> ignores any prompt found. This option is
+ useful if data sent by the server can include a pattern
+ matching prompt <c>regexp</c> (as returned by
+ <c>TargedMod:get_prompt_regexp/0</c>), but is not to not cause
+ the function to return.</p></item>
+ <tag><c>no_prompt_check</c></tag>
+ <item><p><c>ct_telnet</c> does not search for a prompt at all. This
+ is useful if, for example, <c>Pattern</c> itself matches the
+ prompt.</p></item>
+ <tag><c>wait_for_prompt</c></tag>
+ <item><p>Forces <c>ct_telnet</c> to wait until the prompt string
+ is received before returning (even if a pattern has already been
+ matched). This is equal to calling
+ <c>expect(Conn, Patterns++[{prompt,Prompt}], [sequence|Opts])</c>.
+ Notice that option <c>idle_timeout</c> and <c>total_timeout</c>
+ can abort the operation of waiting for prompt.</p></item>
+ <tag><c>repeat | repeat, N</c></tag>
+ <item><p>The pattern(s) must be matched multiple times. If <c>N</c>
+ is speciified, the pattern(s) are matched <c>N</c> times, and
+ the function returns <c>HaltReason = done</c>. This option can be
+ interrupted by one or more <c>HaltPatterns</c>. <c>MatchList</c>
+ is always returned, that is, a list of <c>Match</c> instead of
+ only one <c>Match</c>. Also <c>HaltReason</c> is returned.</p>
+ </item>
+ <tag><c>sequence</c></tag>
+ <item><p>All patterns must be matched in a sequence. A match is not
+ concluded until all patterns are matched. This option can be
+ interrupted by one or more <c>HaltPatterns</c>. <c>MatchList</c>
+ is always returned, that is, a list of <c>Match</c> instead of
+ only one <c>Match</c>. Also <c>HaltReason</c> is returned.</p>
+ </item>
+ </taglist>
+
+ <p><em>Example 1:</em></p>
+
+ <pre>
+ expect(Connection,[{abc,"ABC"},{xyz,"XYZ"}],[sequence,{halt,[{nnn,"NNN"}]}])</pre>
+
+ <p>First this tries to match <c>"ABC"</c>, and then <c>"XYZ"</c>, but
+ if <c>"NNN"</c> appears, the function returns
+ <c>{error,{nnn,["NNN"]}}</c>. If both <c>"ABC"</c> and <c>"XYZ"</c>
+ are matched, the function returns <c>{ok,[AbcMatch,XyzMatch]}</c>.</p>
+
+ <p><em>Example 2:</em></p>
+
+ <pre>
+ expect(Connection,[{abc,"ABC"},{xyz,"XYZ"}],[{repeat,2},{halt,[{nnn,"NNN"}]}])</pre>
+
+ <p>This tries to match <c>"ABC"</c> or <c>"XYZ"</c> twice. If
+ <c>"NNN"</c> appears, the function returns
+ <c>HaltReason = {nnn,["NNN"]}</c>.</p>
+
+ <p>Options <c>repeat</c> and <c>sequence</c> can be combined to
+ match a sequence multiple times.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_data(Connection) -&gt; {ok, Data} | {error, Reason}</name>
+ <fsummary>Gets all data received by the Telnet client since the last
+ command was sent.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>Data = [string()]</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="get_data-1"/>
+ <p>Gets all data received by the Telnet client since the last
+ command was sent. Only newline-terminated strings are returned.
+ If the last received string has not yet been terminated, the
+ connection can be polled automatically until the string is
+ complete.</p>
+
+ <p>The polling feature is controlled by the configuration values
+ <c>poll_limit</c> and <c>poll_interval</c> and is by default
+ disabled. This means that the function immediately returns all
+ complete strings received and saves a remaining non-terminated
+ string for a later <c>get_data</c> call.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(Name) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Equivalent to open(Name, telnet).</fsummary>
+ <desc><marker id="open-1"/>
+ <p>Equivalent to
+ <seealso marker="#open-2"><c>ct_telnet:open(Name,
+ telnet)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(Name, ConnType) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Opens a Telnet connection to the specified target
+ host.</fsummary>
+ <type>
+ <v>Name = target_name()</v>
+ <v>ConnType = connection_type()</v>
+ <v>Handle = handle()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="open-2"/>
+ <p>Opens a Telnet connection to the specified target host.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(KeyOrName, ConnType, TargetMod) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Equivalent to open(KeyOrName, ConnType, TargetMod, []).</fsummary>
+ <desc><marker id="open-3"/>
+ <p>Equivalent to
+ <seealso marker="#open-4"><c>ct_telnet:ct_telnet:open(KeyOrName,
+ ConnType, TargetMod, [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>open(KeyOrName, ConnType, TargetMod, Extra) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Opens a Telnet connection to the specified target
+ host.</fsummary>
+ <type>
+ <v>KeyOrName = Key | Name</v>
+ <v>Key = atom()</v>
+ <v>Name = target_name()</v>
+ <v>ConnType = connection_type()</v>
+ <v>TargetMod = atom()</v>
+ <v>Extra = term()</v>
+ <v>Handle = handle()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="open-4"/>
+ <p>Opens a Telnet connection to the specified target host.</p>
+
+ <p>The target data must exist in a configuration file. The connection
+ can be associated with <c>Name</c> and/or the returned <c>Handle</c>.
+ To allocate a name for the target, use one of the following
+ alternatives:</p>
+
+ <list type="bulleted">
+ <item><p><seealso marker="ct#require-2"><c>ct:require/2</c></seealso>
+ in a test case</p></item>
+ <item><p>A <c>require</c> statement in the suite information
+ function (<c>suite/0</c>)</p></item>
+ <item><p>A <c>require</c> statement in a test case information
+ function</p></item>
+ </list>
+
+ <p>If you want the connection to be associated with <c>Handle</c> only
+ (if you, for example, need to open multiple connections to a host),
+ use <c>Key</c>, the configuration variable name, to specify the
+ target. Notice that a connection without an associated target name
+ can only be closed with the <c>Handle</c> value.</p>
+
+ <p><c>TargetMod</c> is a module that exports the functions
+ <c>connect(Ip, Port, KeepAlive, Extra)</c> and
+ <c>get_prompt_regexp()</c> for the specified <c>TargetType</c>
+ (for example, <c>unix_telnet</c>).</p>
+
+ <p>For <c>target_name()</c>, see module
+ <seealso marker="ct"><c>ct</c></seealso>.</p>
+
+ <p>See also
+ <seealso marker="ct#require-2"><c>ct:require/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(Connection, Cmd) -&gt; ok | {error, Reason}</name>
+ <fsummary>Equivalent to send(Connection, Cmd, []).</fsummary>
+ <desc><marker id="send-2"/>
+ <p>Equivalent to
+ <seealso marker="#send-3"><c>ct_telnet:send(Connection, Cmd,
+ [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>send(Connection, Cmd, Opts) -&gt; ok | {error, Reason}</name>
+ <fsummary>Sends a Telnet command and returns immediately.</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>Cmd = string()</v>
+ <v>Opts = [Opt]</v>
+ <v>Opt = {newline, boolean()}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="send-3"/>
+ <p>Sends a Telnet command and returns immediately.</p>
+
+ <p>By default, this function adds a newline to the end of the
+ specified command. If this is not desired, option
+ <c>{newline,false}</c> can be used. This is necessary, for example,
+ when sending Telnet command sequences prefixed with character
+ Interprete As Command (IAC).</p>
+
+ <p>The resulting output from the command can be read with
+ <seealso marker="#get_data-1"><c>ct_telnet:get_data/2</c></seealso> or
+ <seealso marker="#expect-2"><c>ct_telnet:expect/2,3</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>sendf(Connection, CmdFormat, Args) -&gt; ok | {error, Reason}</name>
+ <fsummary>Equivalent to sendf(Connection, CmdFormat, Args, []).</fsummary>
+ <desc><marker id="sendf-3"/>
+ <p>Equivalent to
+ <seealso marker="#sendf-4"><c>ct_telnet:sendf(Connection, CmdFormat,
+ Args, [])</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>sendf(Connection, CmdFormat, Args, Opts) -&gt; ok | {error, Reason}</name>
+ <fsummary>Sends a Telnet command and returns immediately (uses a format
+ string and a list of arguments to build the command).</fsummary>
+ <type>
+ <v>Connection = connection()</v>
+ <v>CmdFormat = string()</v>
+ <v>Args = list()</v>
+ <v>Opts = [Opt]</v>
+ <v>Opt = {newline, boolean()}</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="sendf-4"/>
+ <p>Sends a Telnet command and returns immediately (uses a format
+ string and a list of arguments to build the command).</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="unix_telnet"><c>unix_telnet</c></seealso></p>
+ </section>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/dependencies_chapter.xml b/lib/common_test/doc/src/dependencies_chapter.xml
index fb758d90df..29c54e819a 100644
--- a/lib/common_test/doc/src/dependencies_chapter.xml
+++ b/lib/common_test/doc/src/dependencies_chapter.xml
@@ -33,217 +33,220 @@
<section>
<title>General</title>
<p>When creating test suites, it is strongly recommended to not
- create dependencies between test cases, i.e. letting test cases
+ create dependencies between test cases, that is, letting test cases
depend on the result of previous test cases. There are various
- reasons for this, for example:</p>
+ reasons for this, such as, the following:</p>
- <list>
+ <list type="bulleted">
<item>It makes it impossible to run test cases individually.</item>
- <item>It makes it impossible to run test cases in different order.</item>
- <item>It makes debugging very difficult (since a fault could be
+ <item>It makes it impossible to run test cases in a different order.</item>
+ <item>It makes debugging difficult (as a fault can be
the result of a problem in a different test case than the one failing).</item>
- <item>There exists no good and explicit ways to declare dependencies, so
- it may be very difficult to see and understand these in test suite
+ <item>There are no good and explicit ways to declare dependencies, so
+ it can be difficult to see and understand these in test suite
code and in test logs.</item>
- <item>Extending, restructuring and maintaining test suites with
+ <item>Extending, restructuring, and maintaining test suites with
test case dependencies is difficult.</item>
</list>
<p>There are often sufficient means to work around the need for test
case dependencies. Generally, the problem is related to the state of
- the system under test (SUT). The action of one test case may alter the state
- of the system and for some other test case to run properly, the new state
+ the System Under Test (SUT). The action of one test case can change the
+ system state. For some other test case to run properly, this new state
must be known.</p>
<p>Instead of passing data between test cases, it is recommended
that the test cases read the state from the SUT and perform assertions
- (i.e. let the test case run if the state is as expected, otherwise reset or fail)
- and/or use the state to set variables necessary for the test case to execute
- properly. Common actions can often be implemented as library functions for
- test cases to call to set the SUT in a required state. (Such common actions
- may of course also be separately tested if necessary, to ensure they are
- working as expected). It is sometimes also possible, but not always desirable,
- to group tests together in one test case, i.e. let a test case perform a
- "scenario" test (a test that consists of subtests).</p>
-
- <p>Consider for example a server application under test. The following
+ (that is, let the test case run if the state is as expected, otherwise reset or fail).
+ It is also recommended to use the state to set variables necessary for the
+ test case to execute properly. Common actions can often be implemented as
+ library functions for test cases to call to set the SUT in a required state.
+ (Such common actions can also be separately tested, if necessary,
+ to ensure that they work as expected). It is sometimes also possible,
+ but not always desirable, to group tests together in one test case, that is,
+ let a test case perform a "scenario" test (a test consisting of subtests).</p>
+
+ <p>Consider, for example, a server application under test. The following
functionality is to be tested:</p>
- <list>
- <item>Starting the server.</item>
- <item>Configuring the server.</item>
- <item>Connecting a client to the server.</item>
- <item>Disconnecting a client from the server.</item>
- <item>Stopping the server.</item>
+ <list type="bulleted">
+ <item>Starting the server</item>
+ <item>Configuring the server</item>
+ <item>Connecting a client to the server</item>
+ <item>Disconnecting a client from the server</item>
+ <item>Stopping the server</item>
</list>
- <p>There are obvious dependencies between the listed functions. We can't configure
- the server if it hasn't first been started, we can't connect a client until
- the server has been properly configured, etc. If we want to have one test
- case for each of the functions, we might be tempted to try to always run the
+ <p>There are obvious dependencies between the listed functions. The server cannot
+ be configured if it has not first been started, a client connot be connectd until
+ the server is properly configured, and so on. If we want to have one test
+ case for each function, we might be tempted to try to always run the
test cases in the stated order and carry possible data (identities, handles,
- etc) between the cases and therefore introduce dependencies between them.
- To avoid this we could consider starting and stopping the server for every test.
- We would implement the start and stop action as common functions that may be
- called from init_per_testcase and end_per_testcase. (We would of course test
- the start and stop functionality separately). The configuration could perhaps also
- be implemented as a common function, maybe grouped with the start function.
- Finally the testing of connecting and disconnecting a client may be grouped into
- one test case. The resulting suite would look something like this:</p>
-
+ and so on) between the cases and therefore introduce dependencies between them.</p>
+
+ <p>To avoid this, we can consider starting and stopping the server for every test.
+ We can thus implement the start and stop action as common functions to be
+ called from
+ <seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase</c></seealso> and
+ <seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase</c></seealso>.
+ (Remember to test the start and stop functionality separately.)
+ The configuration can also be implemented as a common function, maybe grouped
+ with the start function. Finally, the testing of connecting and disconnecting a
+ client can be grouped into one test case. The resulting suite can look as
+ follows:</p>
<pre>
- -module(my_server_SUITE).
- -compile(export_all).
- -include_lib("ct.hrl").
+ -module(my_server_SUITE).
+ -compile(export_all).
+ -include_lib("ct.hrl").
+
+ %%% init and end functions...
- %%% init and end functions...
+ suite() -> [{require,my_server_cfg}].
- suite() -> [{require,my_server_cfg}].
+ init_per_testcase(start_and_stop, Config) ->
+ Config;
- init_per_testcase(start_and_stop, Config) ->
- Config;
+ init_per_testcase(config, Config) ->
+ [{server_pid,start_server()} | Config];
- init_per_testcase(config, Config) ->
- [{server_pid,start_server()} | Config];
+ init_per_testcase(_, Config) ->
+ ServerPid = start_server(),
+ configure_server(),
+ [{server_pid,ServerPid} | Config].
- init_per_testcase(_, Config) ->
- ServerPid = start_server(),
- configure_server(),
- [{server_pid,ServerPid} | Config].
+ end_per_testcase(start_and_stop, _) ->
+ ok;
- end_per_testcase(start_and_stop, _) ->
- ok;
+ end_per_testcase(_, _) ->
+ ServerPid = ?config(server_pid),
+ stop_server(ServerPid).
- end_per_testcase(_, _) ->
- ServerPid = ?config(server_pid),
- stop_server(ServerPid).
+ %%% test cases...
- %%% test cases...
+ all() -> [start_and_stop, config, connect_and_disconnect].
- all() -> [start_and_stop, config, connect_and_disconnect].
+ %% test that starting and stopping works
+ start_and_stop(_) ->
+ ServerPid = start_server(),
+ stop_server(ServerPid).
- %% test that starting and stopping works
- start_and_stop(_) ->
- ServerPid = start_server(),
- stop_server(ServerPid).
+ %% configuration test
+ config(Config) ->
+ ServerPid = ?config(server_pid, Config),
+ configure_server(ServerPid).
- %% configuration test
- config(Config) ->
- ServerPid = ?config(server_pid, Config),
- configure_server(ServerPid).
+ %% test connecting and disconnecting client
+ connect_and_disconnect(Config) ->
+ ServerPid = ?config(server_pid, Config),
+ {ok,SessionId} = my_server:connect(ServerPid),
+ ok = my_server:disconnect(ServerPid, SessionId).
- %% test connecting and disconnecting client
- connect_and_disconnect(Config) ->
- ServerPid = ?config(server_pid, Config),
- {ok,SessionId} = my_server:connect(ServerPid),
- ok = my_server:disconnect(ServerPid, SessionId).
+ %%% common functions...
- %%% common functions...
+ start_server() ->
+ {ok,ServerPid} = my_server:start(),
+ ServerPid.
- start_server() ->
- {ok,ServerPid} = my_server:start(),
- ServerPid.
+ stop_server(ServerPid) ->
+ ok = my_server:stop(),
+ ok.
- stop_server(ServerPid) ->
- ok = my_server:stop(),
- ok.
+ configure_server(ServerPid) ->
+ ServerCfgData = ct:get_config(my_server_cfg),
+ ok = my_server:configure(ServerPid, ServerCfgData),
+ ok.</pre>
- configure_server(ServerPid) ->
- ServerCfgData = ct:get_config(my_server_cfg),
- ok = my_server:configure(ServerPid, ServerCfgData),
- ok.
- </pre>
</section>
<section>
<marker id="save_config"></marker>
- <title>Saving configuration data</title>
+ <title>Saving Configuration Data</title>
- <p>There might be situations where it is impossible, or infeasible at least, to
- implement independent test cases. Maybe it is simply not possible to read the
- SUT state. Maybe resetting the SUT is impossible and it takes much too long
+ <p>Sometimes it is impossible, or infeasible, to
+ implement independent test cases. Maybe it is not possible to read the
+ SUT state. Maybe resetting the SUT is impossible and it takes too long time
to restart the system. In situations where test case dependency is necessary,
CT offers a structured way to carry data from one test case to the next. The
- same mechanism may also be used to carry data from one test suite to the next.</p>
+ same mechanism can also be used to carry data from one test suite to the next.</p>
<p>The mechanism for passing data is called <c>save_config</c>. The idea is that
- one test case (or suite) may save the current value of Config - or any list of
- key-value tuples - so that it can be read by the next executing test case
- (or test suite). The configuration data is not saved permanently but can only
- be passed from one case (or suite) to the next.</p>
+ one test case (or suite) can save the current value of <c>Config</c>, or any list of
+ key-value tuples, so that the next executing test case (or test suite) can read it.
+ The configuration data is not saved permanently but can only be passed from one
+ case (or suite) to the next.</p>
- <p>To save <c>Config</c> data, return the tuple:</p>
+ <p>To save <c>Config</c> data, return tuple <c>{save_config,ConfigList}</c>
+ from <c>end_per_testcase</c> or from the main test case function.</p>
- <p><c>{save_config,ConfigList}</c></p>
-
- <p>from <c>end_per_testcase</c> or from the main test case function. To read data
- saved by a previous test case, use the <c>config</c> macro with a
- <c>saved_config</c> key:</p>
+ <p>To read data saved by a previous test case, use macro <c>config</c> with a
+ <c>saved_config</c> key as follows:</p>
<p><c>{Saver,ConfigList} = ?config(saved_config, Config)</c></p>
<p><c>Saver</c> (<c>atom()</c>) is the name of the previous test case (where the
- data was saved). The <c>config</c> macro may be used to extract particular data
+ data was saved). The <c>config</c> macro can be used to extract particular data
also from the recalled <c>ConfigList</c>. It is strongly recommended that
<c>Saver</c> is always matched to the expected name of the saving test case.
- This way problems due to restructuring of the test suite may be avoided. Also it
- makes the dependency more explicit and the test suite easier to read and maintain.</p>
+ This way, problems because of restructuring of the test suite can be avoided.
+ Also, it makes the dependency more explicit and the test suite easier to read
+ and maintain.</p>
<p>To pass data from one test suite to another, the same mechanism is used. The data
- should be saved by the <c>end_per_suite</c> function and read by <c>init_per_suite</c>
+ is to be saved by finction
+ <seealso marker="common_test#Module:end_per_suite-1"><c>end_per_suite</c></seealso>
+ and read by function
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite</c></seealso>
in the suite that follows. When passing data between suites, <c>Saver</c> carries the
name of the test suite.</p>
- <p>Example:</p>
+ <p><em>Example:</em></p>
<pre>
- -module(server_b_SUITE).
- -compile(export_all).
- -include_lib("ct.hrl").
-
- %%% init and end functions...
-
- init_per_suite(Config) ->
- %% read config saved by previous test suite
- {server_a_SUITE,OldConfig} = ?config(saved_config, Config),
- %% extract server identity (comes from server_a_SUITE)
- ServerId = ?config(server_id, OldConfig),
- SessionId = connect_to_server(ServerId),
- [{ids,{ServerId,SessionId}} | Config].
-
- end_per_suite(Config) ->
- %% save config for server_c_SUITE (session_id and server_id)
- {save_config,Config}
-
- %%% test cases...
-
- all() -> [allocate, deallocate].
-
- allocate(Config) ->
- {ServerId,SessionId} = ?config(ids, Config),
- {ok,Handle} = allocate_resource(ServerId, SessionId),
- %% save handle for deallocation test
- NewConfig = [{handle,Handle}],
- {save_config,NewConfig}.
-
- deallocate(Config) ->
- {ServerId,SessionId} = ?config(ids, Config),
- {allocate,OldConfig} = ?config(saved_config, Config),
- Handle = ?config(handle, OldConfig),
- ok = deallocate_resource(ServerId, SessionId, Handle).
- </pre>
-
- <p>It is also possible to save <c>Config</c> data from a test case that is to be
- skipped. To accomplish this, return the following tuple:</p>
-
- <p><c>{skip_and_save,Reason,ConfigList}</c></p>
-
- <p>The result will be that the test case is skipped with <c>Reason</c> printed to
- the log file (as described in previous chapters), and <c>ConfigList</c> is saved
- for the next test case. <c>ConfigList</c> may be read by means of
- <c>?config(saved_config, Config)</c>, as described above. <c>skip_and_save</c>
- may also be returned from <c>init_per_suite</c>, in which case the saved data can
+ -module(server_b_SUITE).
+ -compile(export_all).
+ -include_lib("ct.hrl").
+
+ %%% init and end functions...
+
+ init_per_suite(Config) ->
+ %% read config saved by previous test suite
+ {server_a_SUITE,OldConfig} = ?config(saved_config, Config),
+ %% extract server identity (comes from server_a_SUITE)
+ ServerId = ?config(server_id, OldConfig),
+ SessionId = connect_to_server(ServerId),
+ [{ids,{ServerId,SessionId}} | Config].
+
+ end_per_suite(Config) ->
+ %% save config for server_c_SUITE (session_id and server_id)
+ {save_config,Config}
+
+ %%% test cases...
+
+ all() -> [allocate, deallocate].
+
+ allocate(Config) ->
+ {ServerId,SessionId} = ?config(ids, Config),
+ {ok,Handle} = allocate_resource(ServerId, SessionId),
+ %% save handle for deallocation test
+ NewConfig = [{handle,Handle}],
+ {save_config,NewConfig}.
+
+ deallocate(Config) ->
+ {ServerId,SessionId} = ?config(ids, Config),
+ {allocate,OldConfig} = ?config(saved_config, Config),
+ Handle = ?config(handle, OldConfig),
+ ok = deallocate_resource(ServerId, SessionId, Handle).</pre>
+
+ <p>To save <c>Config</c> data from a test case that is to be
+ skipped, return tuple
+ <c>{skip_and_save,Reason,ConfigList}</c>.</p>
+
+ <p>The result is that the test case is skipped with <c>Reason</c> printed to
+ the log file (as described earlier) and <c>ConfigList</c> is saved
+ for the next test case. <c>ConfigList</c> can be read using
+ <c>?config(saved_config, Config)</c>, as described earlier. <c>skip_and_save</c>
+ can also be returned from <c>init_per_suite</c>. In this case, the saved data can
be read by <c>init_per_suite</c> in the suite that follows.</p>
</section>
@@ -251,60 +254,63 @@
<marker id="sequences"></marker>
<title>Sequences</title>
- <p>It is possible that test cases depend on each other so that
- if one case fails, the following test(s) should not be executed.
+ <p>Sometimes test cases depend on each other so that
+ if one case fails, the following tests are not to be executed.
Typically, if the <c>save_config</c> facility is used and a test
case that is expected to save data crashes, the following
- case can not run. CT offers a way to declare such dependencies,
+ case cannot run. <c>Common Test</c> offers a way to declare such dependencies,
called sequences.</p>
<p>A sequence of test cases is defined as a test case group
- with a <c>sequence</c> property. Test case groups are defined by
- means of the <c>groups/0</c> function in the test suite (see the
- <seealso marker="write_test_chapter#test_case_groups">Test case groups</seealso>
- chapter for details).</p>
-
- <p>For example, if we would like to make sure that if <c>allocate</c>
- in <c>server_b_SUITE</c> (above) crashes, <c>deallocate</c> is skipped,
- we may define a sequence like this:</p>
+ with a <c>sequence</c> property. Test case groups are defined
+ through function <c>groups/0</c> in the test suite (for details, see section
+ <seealso marker="write_test_chapter#test_case_groups">Test Case Groups</seealso>.</p>
+
+ <p>For example, to ensure that if <c>allocate</c>
+ in <c>server_b_SUITE</c> crashes, <c>deallocate</c> is skipped,
+ the following sequence can be defined:</p>
<pre>
- groups() -> [{alloc_and_dealloc, [sequence], [alloc,dealloc]}].</pre>
+ groups() -> [{alloc_and_dealloc, [sequence], [alloc,dealloc]}].</pre>
- <p>Let's also assume the suite contains the test case <c>get_resource_status</c>,
- which is independent of the other two cases, then the <c>all</c> function could
- look like this:</p>
+ <p>Assume that the suite contains the test case <c>get_resource_status</c>
+ that is independent of the other two cases, then function <c>all</c> can
+ look as follows:</p>
<pre>
- all() -> [{group,alloc_and_dealloc}, get_resource_status].</pre>
+ all() -> [{group,alloc_and_dealloc}, get_resource_status].</pre>
<p>If <c>alloc</c> succeeds, <c>dealloc</c> is also executed. If <c>alloc</c> fails
- however, <c>dealloc</c> is not executed but marked as SKIPPED in the html log.
- <c>get_resource_status</c> will run no matter what happens to the <c>alloc_and_dealloc</c>
+ however, <c>dealloc</c> is not executed but marked as <c>SKIPPED</c> in the HTML log.
+ <c>get_resource_status</c> runs no matter what happens to the <c>alloc_and_dealloc</c>
cases.</p>
- <p>Test cases in a sequence will be executed in order until they have all succeeded or
- until one case fails. If one fails, all following cases in the sequence are skipped.
- The cases in the sequence that have succeeded up to that point are reported as successful
- in the log. An arbitrary number of sequences may be specified. Example:</p>
+ <p>Test cases in a sequence are executed in order until all succeed or
+ one fails. If one fails, all following cases in the sequence are skipped.
+ The cases in the sequence that have succeeded up to that point are reported as
+ successful in the log. Any number of sequences can be specified.</p>
+ <p><em>Example:</em></p>
<pre>
- groups() -> [{scenarioA, [sequence], [testA1, testA2]},
- {scenarioB, [sequence], [testB1, testB2, testB3]}].
-
- all() -> [test1,
- test2,
- {group,scenarioA},
- test3,
- {group,scenarioB},
- test4].</pre>
-
- <p>It is possible to have sub-groups in a sequence group. Such sub-groups can have
- any property, i.e. they are not required to also be sequences. If you want the status
- of the sub-group to affect the sequence on the level above, return
- <c>{return_group_result,Status}</c> from <c>end_per_group/2</c>, as described in the
- <seealso marker="write_test_chapter#repeated_groups">Repeated groups</seealso>
- chapter. A failed sub-group (<c>Status == failed</c>) will cause the execution of a
+ groups() -> [{scenarioA, [sequence], [testA1, testA2]},
+ {scenarioB, [sequence], [testB1, testB2, testB3]}].
+
+ all() -> [test1,
+ test2,
+ {group,scenarioA},
+ test3,
+ {group,scenarioB},
+ test4].</pre>
+
+ <p>A sequence group can have subgroups. Such subgroups can have
+ any property, that is, they are not required to also be sequences. If you want the
+ status of the subgroup to affect the sequence on the level above, return
+ <c>{return_group_result,Status}</c> from
+ <seealso marker="common_test#Module:end_per_group-2"><c>end_per_group/2</c></seealso>,
+ as described in section
+ <seealso marker="write_test_chapter#repeated_groups">Repeated Groups</seealso>
+ in Writing Test Suites.
+ A failed subgroup (<c>Status == failed</c>) causes the execution of a
sequence to fail in the same way a test case does.</p>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/event_handler_chapter.xml b/lib/common_test/doc/src/event_handler_chapter.xml
index 78e5bb5e70..31128a7114 100644
--- a/lib/common_test/doc/src/event_handler_chapter.xml
+++ b/lib/common_test/doc/src/event_handler_chapter.xml
@@ -33,144 +33,171 @@
<section>
<marker id="event_handling"></marker>
<title>General</title>
- <p>It is possible for the operator of a Common Test system to receive
- event notifications continously during a test run. It is reported e.g.
- when a test case starts and stops, what the current count of successful,
- failed and skipped cases is, etc. This information can be used for
- different purposes such as logging progress and results on
- other format than HTML, saving statistics to a database for report
- generation and test system supervision.</p>
-
- <p>Common Test has a framework for event handling which is based on
- the OTP event manager concept and gen_event behaviour. When the Common Test
- server starts, it spawns an event manager. During test execution the
- manager gets a notification from the server every time something
- of potential interest happens. Any event handler plugged into the
- event manager can match on events of interest, take action, or maybe
- simply pass the information on. Event handlers are Erlang modules
- implemented by the Common Test user according to the gen_event
- behaviour (see the OTP User's Guide and Reference Manual for more
- information).</p>
-
- <p>As already described, a Common Test server always starts an event manager.
- The server also plugs in a default event handler which has as its only
- purpose to relay notifications to a globally registered CT Master
- event manager (if a CT Master server is running in the system).
- The CT Master also spawns an event manager at startup.
- Event handlers plugged into this manager will receive the events from
- all the test nodes as well as information from the CT Master server
- itself.</p>
-
- <p>User specific event handlers may be plugged into a Common Test event
- manager, either by telling Common Test to install them before the test
- run (see below), or by adding the handlers dynamically during the test
- run by means of
- <c>gen_event:add_handler/3</c> or <c>gen_event:add_sup_handler/3</c>.
- In the latter scenario, the reference of the Common Test event manager is
- required. To get it, call <c>ct:get_event_mgr_ref/0</c> or (on the CT
- Master node) <c>ct_master:get_event_mgr_ref/0</c>.</p>
+ <p>The operator of a <c>Common Test</c> system can receive
+ event notifications continuously during a test run. For example,
+ <c>Common Test</c> reports when a test case starts and stops,
+ the current count of successful, failed, and skipped cases, and so on.
+ This information can be used for different purposes such as logging progress
+ and results in another format than HTML, saving statistics to a database
+ for report generation, and test system supervision.</p>
+
+ <p><c>Common Test</c> has a framework for event handling based on
+ the OTP event manager concept and <c>gen_event</c> behavior.
+ When the <c>Common Test</c> server starts, it spawns an event manager.
+ During test execution the manager gets a notification from the server
+ when something of potential interest happens. Any event handler plugged into
+ the event manager can match on events of interest, take action, or
+ pass the information on. The event handlers are Erlang modules
+ implemented by the <c>Common Test</c> user according to the <c>gen_event</c>
+ behavior (for details, see module
+ <seealso marker="stdlib:gen_event"><c>stdlib:gen_event</c></seealso> and
+ section
+ <seealso marker="doc/design_principles:events"><c>gen_event Behaviour</c></seealso>
+ in OTP Design Principles in the System Documentation).
+ </p>
+
+ <p>A <c>Common Test</c> server always starts an event manager.
+ The server also plugs in a default event handler, which only
+ purpose is to relay notifications to a globally registered <c>Common Test</c>
+ Master event manager (if a <c>Common Test</c> Master server is running in the system).
+ The <c>Common Test</c> Master also spawns an event manager at startup.
+ Event handlers plugged into this manager receives the events from
+ all the test nodes, plus information from the <c>Common Test</c> Master server.
+ </p>
+
+ <p>User-specific event handlers can be plugged into a <c>Common Test</c> event
+ manager, either by telling <c>Common Test</c> to install them before the test
+ run (described later), or by adding the handlers dynamically during the test
+ run using
+ <seealso marker="stdlib:gen_event#add_handler-3"><c>stdlib:gen_event:add_handler/3</c></seealso> or
+ <seealso marker="stdlib:gen_event#add_sup_handler-3"><c>stdlib:gen_event:add_sup_handler/3</c></seealso>.
+ In the latter scenario, the reference of the <c>Common Test</c> event manager is
+ required. To get it, call
+ <seealso marker="ct#get_event_mgr_ref-0"><c>ct:get_event_mgr_ref/0</c></seealso>
+ or (on the <c>Common Test</c> Master node)
+ <seealso marker="ct_master#get_event_mgr_ref-0"><c>ct_master:get_event_mgr_ref/0</c></seealso>.</p>
</section>
<section>
<marker id="usage"></marker>
- <title>Usage</title>
- <p>Event handlers may be installed by means of an <c>event_handler</c>
- start flag (<c>ct_run</c>) or option (<seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>), where the
- argument specifies the names of one or more event handler modules.
- Example:</p>
+ <title>Use</title>
+ <p>Event handlers can be installed by an <c>event_handler</c> start flag
+ (<seealso marker="ct_run"><c>ct_run</c></seealso>) or option
+ <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>, where the
+ argument specifies the names of one or more event handler modules.</p>
+
+ <p><em>Example:</em></p>
<p><c>$ ct_run -suite test/my_SUITE -event_handler handlers/my_evh1
handlers/my_evh2 -pa $PWD/handlers</c></p>
- <p>Use the <c><![CDATA[ct_run -event_handler_init]]></c> option instead of
- <c><![CDATA[-event_handler]]></c> to pass start arguments to the event handler
- init function.</p>
- <p>All event handler modules must have gen_event behaviour. Note also that
- these modules must be precompiled, and that their locations must be
- added explicitly to the Erlang code server search path (like in the
- example).</p>
- <p>An event_handler tuple in the argument <c>Opts</c> has the following
- definition (see also <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso> in the reference manual):</p>
+ <p>To pass start arguments to the event handler init function, use option
+ <c><![CDATA[ct_run -event_handler_init]]></c> instead of
+ <c><![CDATA[-event_handler]]></c>.</p>
+
+ <note><p>All event handler modules must have <c>gen_event</c> behavior.
+ These modules must be precompiled and their locations must be
+ added explicitly to the Erlang code server search path (as in the previous
+ example).</p></note>
+
+ <p>An event_handler tuple in argument <c>Opts</c> has the following definition
+ (see <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>):</p>
<pre>
- {event_handler,EventHandlers}
+ {event_handler,EventHandlers}
- EventHandlers = EH | [EH]
- EH = atom() | {atom(),InitArgs} | {[atom()],InitArgs}
- InitArgs = [term()]</pre>
+ EventHandlers = EH | [EH]
+ EH = atom() | {atom(),InitArgs} | {[atom()],InitArgs}
+ InitArgs = [term()]</pre>
- <p>Example:</p>
+ <p>In the following example, two event handlers for the <c>my_SUITE</c> test are installed:</p>
<pre>
- 1> ct:run_test([{suite,"test/my_SUITE"},{event_handler,[my_evh1,{my_evh2,[node()]}]}]).</pre>
- <p>This will install two event handlers for the <c>my_SUITE</c> test. Event handler
- <c>my_evh1</c> is started with <c>[]</c> as argument to the init function. Event handler
- <c>my_evh2</c> is started with the name of the current node in the init argument list.</p>
+ 1> ct:run_test([{suite,"test/my_SUITE"},{event_handler,[my_evh1,{my_evh2,[node()]}]}]).</pre>
+ <p>Event handler <c>my_evh1</c> is started with <c>[]</c> as argument to the init function.
+ Event handler <c>my_evh2</c> is started with the name of the current node in the init argument list.</p>
- <p>Event handlers can also be plugged in by means of
+ <p>Event handlers can also be plugged in using one of the following
<seealso marker="run_test_chapter#test_specifications">test specification</seealso>
terms:</p>
-
- <p><c>{event_handler, EventHandlers}</c>, or</p>
- <p><c>{event_handler, EventHandlers, InitArgs}</c>, or</p>
- <p><c>{event_handler, NodeRefs, EventHandlers}</c>, or</p>
- <p><c>{event_handler, NodeRefs, EventHandlers, InitArgs}</c></p>
+ <list type="bulleted">
+ <item><c>{event_handler, EventHandlers}</c></item>
+ <item><c>{event_handler, EventHandlers, InitArgs}</c></item>
+ <item><c>{event_handler, NodeRefs, EventHandlers}</c></item>
+ <item><c>{event_handler, NodeRefs, EventHandlers, InitArgs}</c></item>
+ </list>
<p><c>EventHandlers</c> is a list of module names. Before a test
session starts, the init function of each plugged in event handler
- is called (with the InitArgs list as argument or [] if
- no start arguments are given).</p>
+ is called (with the <c>InitArgs</c> list as argument or <c>[]</c> if
+ no start arguments are specified).</p>
- <p>To plug a handler into the CT Master event manager, specify
+ <p>To plug in a handler to the <c>Common Test</c> Master event manager, specify
<c>master</c> as the node in <c>NodeRefs</c>.</p>
- <p>For an event handler to be able to match on events, the module must
+ <p>To be able to match on events, the event handler module must
include the header file <c>ct_event.hrl</c>. An event is a record with the
following definition:</p>
<p><c>#event{name, node, data}</c></p>
- <p><c>name</c> is the label (type) of the event. <c>node</c> is the name of the
- node the event has originated from (only relevant for CT Master event handlers).
- <c>data</c> is specific for the particular event.</p>
+ <taglist>
+ <tag><c>name</c></tag>
+ <item><p>Label (type) of the event.</p></item>
+ <tag><c>node</c></tag>
+ <item><p>Name of the node that the event originated from
+ (only relevant for <c>Common Test</c> Master event handlers).</p></item>
+ <tag><c>data</c></tag>
+ <item><p>Specific for the event.</p></item>
+ </taglist>
+
<marker id="events"></marker>
- <p><em>General events:</em></p>
+ <section>
+ <title>General Events</title>
- <list>
- <item><c>#event{name = start_logging, data = LogDir}</c>
- <p><c>LogDir = string()</c>, top level log directory for the test run.</p>
- <p>Indicates that the logging process of Common Test
- has started successfully and is ready to receive IO
+ <p>The general events are as follows:</p>
+
+ <taglist>
+ <tag><c>#event{name = start_logging, data = LogDir}</c></tag>
+ <item>
+ <p><c>LogDir = string()</c>, top-level log directory for the test run.</p>
+ <p>This event indicates that the logging process of <c>Common Test</c>
+ has started successfully and is ready to receive I/O
messages.</p></item>
- <item><c>#event{name = stop_logging, data = []}</c>
- <p>Indicates that the logging process of Common Test
- has been shut down at the end of the test run.
+ <tag><c>#event{name = stop_logging, data = []}</c></tag>
+ <item>
+ <p>This event indicates that the logging process of <c>Common Test</c>
+ was shut down at the end of the test run.
</p></item>
- <item><c>#event{name = test_start, data = {StartTime,LogDir}}</c>
+ <tag><c>#event{name = test_start, data = {StartTime,LogDir}}</c></tag>
+ <item>
<p><c>StartTime = {date(),time()}</c>, test run start date and time.</p>
- <p><c>LogDir = string()</c>, top level log directory for the test run.</p>
- <p>This event indicates that Common Test has finished initial preparations
- and will begin executing test cases.
+ <p><c>LogDir = string()</c>, top-level log directory for the test run.</p>
+ <p>This event indicates that <c>Common Test</c> has finished initial preparations
+ and begins executing test cases.
</p></item>
- <item><c>#event{name = test_done, data = EndTime}</c>
+ <tag><c>#event{name = test_done, data = EndTime}</c></tag>
+ <item>
<p><c>EndTime = {date(),time()}</c>, date and time the test run finished.</p>
- <p>This indicates that the last test case has been executed and
- Common Test is shutting down.
+ <p>This event indicates that the last test case has been executed and
+ <c>Common Test</c> is shutting down.
</p></item>
- <item><c>#event{name = start_info, data = {Tests,Suites,Cases}}</c>
- <p><c>Tests = integer()</c>, the number of tests.</p>
- <p><c>Suites = integer()</c>, the total number of suites.</p>
- <p><c>Cases = integer() | unknown</c>, the total number of test cases.</p>
- <p>Initial test run information that can be interpreted as: "This test
- run will execute <c>Tests</c> separate tests, in total containing
+ <tag><c>#event{name = start_info, data = {Tests,Suites,Cases}}</c></tag>
+ <item>
+ <p><c>Tests = integer()</c>, number of tests.</p>
+ <p><c>Suites = integer()</c>, total number of suites.</p>
+ <p><c>Cases = integer() | unknown</c>, total number of test cases.</p>
+ <p>This event gives initial test run information that can be interpreted as:
+ "This test run will execute <c>Tests</c> separate tests, in total containing
<c>Cases</c> number of test cases, in <c>Suites</c> number of suites".
- Note that if a test case group with a repeat property exists in any test,
- the total number of test cases can not be calculated (unknown).
+ However, if a test case group with a repeat property exists in any test,
+ the total number of test cases cannot be calculated (unknown).
</p></item>
- <item><c>#event{name = tc_start, data = {Suite,FuncOrGroup}}</c>
+ <tag><c>#event{name = tc_start, data = {Suite,FuncOrGroup}}</c></tag>
+ <item>
<p><c>Suite = atom()</c>, name of the test suite.</p>
<p><c>FuncOrGroup = Func | {Conf,GroupName,GroupProperties}</c></p>
<p><c>Func = atom()</c>, name of test case or configuration function.</p>
@@ -180,23 +207,24 @@
<p>This event informs about the start of a test case, or a group configuration
function. The event is sent also for <c>init_per_suite</c> and <c>end_per_suite</c>,
but not for <c>init_per_testcase</c> and <c>end_per_testcase</c>. If a group
- configuration function is starting, the group name and execution properties
- are also given.
+ configuration function starts, the group name and execution properties
+ are also specified.
</p></item>
- <item><c>#event{name = tc_logfile, data = {{Suite,Func},LogFileName}}</c>
+ <tag><c>#event{name = tc_logfile, data = {{Suite,Func},LogFileName}}</c></tag>
+ <item>
<p><c>Suite = atom()</c>, name of the test suite.</p>
<p><c>Func = atom()</c>, name of test case or configuration function.</p>
- <p><c>LogFileName = string()</c>, full name of test case log file.</p>
+ <p><c>LogFileName = string()</c>, full name of the test case log file.</p>
<p>This event is sent at the start of each test case (and configuration function
except <c>init/end_per_testcase</c>) and carries information about the
- full name (i.e. the file name including the absolute directory path) of
+ full name (that is, the file name including the absolute directory path) of
the current test case log file.
</p></item>
+ <tag><c>#event{name = tc_done, data = {Suite,FuncOrGroup,Result}}</c></tag>
<item>
- <marker id="tc_done"/>
- <c>#event{name = tc_done, data = {Suite,FuncOrGroup,Result}}</c>
+ <marker id="tc_done"/>
<p><c>Suite = atom()</c>, name of the suite.</p>
<p><c>FuncOrGroup = Func | {Conf,GroupName,GroupProperties}</c></p>
<p><c>Func = atom()</c>, name of test case or configuration function.</p>
@@ -211,34 +239,37 @@
{require_failed_in_suite0,RequireInfo} |
{failed,{Suite,init_per_testcase,FailInfo}} |
UserTerm</c>,
- the reason why the case has been skipped.</p>
+ why the case was skipped.</p>
<marker id="failreason"/>
<p><c>FailReason = {error,FailInfo} |
{error,{RunTimeError,StackTrace}} |
{timetrap_timeout,integer()} |
{failed,{Suite,end_per_testcase,FailInfo}}</c>, reason for failure.</p>
- <p><c>RequireInfo = {not_available,atom() | tuple()}</c>, why require has failed.</p>
+ <p><c>RequireInfo = {not_available,atom() | tuple()}</c>, why require failed.</p>
<p><c>FailInfo = {timetrap_timeout,integer()} |
{RunTimeError,StackTrace} |
UserTerm</c>,
- detailed information about an error.</p>
- <p><c>RunTimeError = term()</c>, a run-time error, e.g. badmatch, undef, etc.</p>
- <p><c>StackTrace = list()</c>, list of function calls preceeding a run-time error.</p>
- <p><c>UserTerm = term()</c>, arbitrary data specified by user, or <c>exit/1</c> info.</p>
- <p>This event informs about the end of a test case or a configuration function (see the
- <c>tc_start</c> event for details on the FuncOrGroup element). With this event comes the
- final result of the function in question. It is possible to determine on the top level
- of <c>Result</c> if the function was successful, skipped (by the user), or if it failed.
- It is of course possible to dig deeper and also perform pattern matching on the various
- reasons for skipped or failed. Note that <c>{'EXIT',Reason}</c> tuples have been translated into
- <c>{error,Reason}</c>. Note also that if a <c>{failed,{Suite,end_per_testcase,FailInfo}</c>
- result is received, it actually means the test case was successful, but that
+ error details.</p>
+ <p><c>RunTimeError = term()</c>, a runtime error, for example,
+ <c>badmatch</c> or <c>undef</c>.</p>
+ <p><c>StackTrace = list()</c>, list of function calls preceding a runtime error.</p>
+ <p><c>UserTerm = term()</c>, any data specified by user, or <c>exit/1</c> information.</p>
+ <p>This event informs about the end of a test case or a configuration function (see event
+ <c>tc_start</c> for details on element <c>FuncOrGroup</c>). With this event
+ comes the final result of the function in question. It is possible to determine on the
+ top level of <c>Result</c> if the function was successful, skipped (by the user),
+ or if it failed.</p>
+ <p>It is also possible to dig deeper and, for example, perform pattern matching
+ on the various reasons for skipped or failed. Notice that <c>{'EXIT',Reason}</c> tuples
+ are translated into <c>{error,Reason}</c>.
+ Notice also that if a <c>{failed,{Suite,end_per_testcase,FailInfo}</c>
+ result is received, the test case was successful, but
<c>end_per_testcase</c> for the case failed.
</p></item>
+ <tag><c>#event{name = tc_auto_skip, data = {Suite,TestName,Reason}}</c></tag>
<item>
<marker id="tc_auto_skip"></marker>
- <c>#event{name = tc_auto_skip, data = {Suite,TestName,Reason}}</c>
<p><c>Suite = atom()</c>, the name of the suite.</p>
<p><c>TestName = init_per_suite | end_per_suite |
{init_per_group,GroupName} | {end_per_group,GroupName} |
@@ -247,101 +278,116 @@
<p><c>GroupName = atom()</c>, the name of the test case group.</p>
<p><c>Reason = {failed,FailReason} |
{require_failed_in_suite0,RequireInfo}</c>,
- reason for auto skipping <c>Func</c>.</p>
+ reason for auto-skipping <c>Func</c>.</p>
<p><c>FailReason = {Suite,ConfigFunc,FailInfo}} |
{Suite,FailedCaseInSequence}</c>, reason for failure.</p>
- <p><c>RequireInfo = {not_available,atom() | tuple()}</c>, why require has failed.</p>
+ <p><c>RequireInfo = {not_available,atom() | tuple()}</c>, why require failed.</p>
<p><c>ConfigFunc = init_per_suite | init_per_group</c></p>
<p><c>FailInfo = {timetrap_timeout,integer()} |
{RunTimeError,StackTrace} |
bad_return | UserTerm</c>,
- detailed information about an error.</p>
- <p><c>FailedCaseInSequence = atom()</c>, name of a case that has failed in a sequence.</p>
- <p><c>RunTimeError = term()</c>, a run-time error, e.g. badmatch, undef, etc.</p>
- <p><c>StackTrace = list()</c>, list of function calls preceeding a run-time error.</p>
- <p><c>UserTerm = term()</c>, arbitrary data specified by user, or <c>exit/1</c> info.</p>
- <p>This event gets sent for every test case or configuration function that Common Test
+ error details.</p>
+ <p><c>FailedCaseInSequence = atom()</c>, the name of a case that failed in a sequence.</p>
+ <p><c>RunTimeError = term()</c>, a runtime error, for example <c>badmatch</c> or
+ <c>undef</c>.</p>
+ <p><c>StackTrace = list()</c>, list of function calls preceeding a runtime error.</p>
+ <p><c>UserTerm = term()</c>, any data specified by user, or <c>exit/1</c> information.</p>
+ <p>This event is sent for every test case or configuration function that <c>Common Test</c>
has skipped automatically because of either a failed <c>init_per_suite</c> or
<c>init_per_group</c>, a failed <c>require</c> in <c>suite/0</c>, or a failed test case
- in a sequence. Note that this event is never received as a result of a test case getting
- skipped because of <c>init_per_testcase</c> failing, since that information is carried with
- the <c>tc_done</c> event. If a failed test case belongs to a test case group, the second
- data element is a tuple <c>{FuncName,GroupName}</c>, otherwise simply the function name.
+ in a sequence. Notice that this event is never received as a result of a test case getting
+ skipped because of <c>init_per_testcase</c> failing, as that information is carried with
+ event <c>tc_done</c>. If a failed test case belongs to a test case group, the second
+ data element is a tuple <c>{FuncName,GroupName}</c>, otherwise only the function name.
</p></item>
+ <tag><c>#event{name = tc_user_skip, data = {Suite,TestName,Comment}}</c></tag>
<item>
- <marker id="tc_user_skip"></marker>
- <c>#event{name = tc_user_skip, data = {Suite,TestName,Comment}}</c>
+ <marker id="tc_user_skip"></marker>
<p><c>Suite = atom()</c>, the name of the suite.</p>
<p><c>TestName = init_per_suite | end_per_suite |
{init_per_group,GroupName} | {end_per_group,GroupName} |
{FuncName,GroupName} | FuncName</c></p>
<p><c>FuncName = atom()</c>, the name of the test case or configuration function.</p>
<p><c>GroupName = atom()</c>, the name of the test case group.</p>
- <p><c>Comment = string()</c>, reason for skipping the test case.</p>
- <p>This event specifies that a test case has been skipped by the user.
- It is only ever received if the skip was declared in a test specification.
+ <p><c>Comment = string()</c>, why the test case was skipped.</p>
+ <p>This event specifies that a test case was skipped by the user.
+ It is only received if the skip is declared in a test specification.
Otherwise, user skip information is received as a <c>{skipped,SkipReason}</c>
- result in the <c>tc_done</c> event for the test case. If a skipped test case belongs
+ result in event <c>tc_done</c> for the test case. If a skipped test case belongs
to a test case group, the second data element is a tuple <c>{FuncName,GroupName}</c>,
- otherwise simply the function name.
+ otherwise only the function name.
</p></item>
- <item><c>#event{name = test_stats, data = {Ok,Failed,Skipped}}</c>
- <p><c>Ok = integer()</c>, the current number of successful test cases.</p>
- <p><c>Failed = integer()</c>, the current number of failed test cases.</p>
+ <tag><c>#event{name = test_stats, data = {Ok,Failed,Skipped}}</c></tag>
+ <item>
+ <p><c>Ok = integer()</c>, current number of successful test cases.</p>
+ <p><c>Failed = integer()</c>, current number of failed test cases.</p>
<p><c>Skipped = {UserSkipped,AutoSkipped}</c></p>
- <p><c>UserSkipped = integer()</c>, the current number of user skipped test cases.</p>
- <p><c>AutoSkipped = integer()</c>, the current number of auto skipped test cases.</p>
- <p>This is a statistics event with the current count of successful, skipped
- and failed test cases so far. This event gets sent after the end of each test case,
- immediately following the <c>tc_done</c> event.
+ <p><c>UserSkipped = integer()</c>, current number of user-skipped test cases.</p>
+ <p><c>AutoSkipped = integer()</c>, current number of auto-skipped test cases.</p>
+ <p>This is a statistics event with current count of successful, skipped,
+ and failed test cases so far. This event is sent after the end of each test case,
+ immediately following event <c>tc_done</c>.
</p></item>
- </list>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Internal Events</title>
- <p><em>Internal events:</em></p>
+ <p>The internal events are as follows:</p>
- <list>
- <item><c>#event{name = start_make, data = Dir}</c>
+ <taglist>
+ <tag><c>#event{name = start_make, data = Dir}</c></tag>
+ <item>
<p><c>Dir = string()</c>, running make in this directory.</p>
- <p>An internal event saying that Common Test will start compiling
+ <p>This internal event says that <c>Common Test</c> starts compiling
modules in directory <c>Dir</c>.
</p></item>
- <item><c>#event{name = finished_make, data = Dir}</c>
+ <tag><c>#event{name = finished_make, data = Dir}</c></tag>
+ <item>
<p><c>Dir = string()</c>, finished running make in this directory.</p>
- <p>An internal event saying that Common Test is finished compiling
+ <p>This internal event says that <c>Common Test</c> is finished compiling
modules in directory <c>Dir</c>.
</p></item>
- <item><c>#event{name = start_write_file, data = FullNameFile}</c>
+ <tag><c>#event{name = start_write_file, data = FullNameFile}</c></tag>
+ <item>
<p><c>FullNameFile = string(), full name of the file.</c></p>
- <p>An internal event used by the Common Test Master process to
+ <p>This internal event is used by the <c>Common Test</c> Master process to
synchronize particular file operations.
</p></item>
- <item><c>#event{name = finished_write_file, data = FullNameFile}</c>
+ <tag><c>#event{name = finished_write_file, data = FullNameFile}</c></tag>
+ <item>
<p><c>FullNameFile = string(), full name of the file.</c></p>
- <p>An internal event used by the Common Test Master process to
+ <p>This internal event is used by the <c>Common Test</c> Master process to
synchronize particular file operations.
</p></item>
- </list>
-
+ </taglist>
+ </section>
+ <section>
+ <title>Notes</title>
+
<p>The events are also documented in <c>ct_event.erl</c>. This module
- may serve as an example of what an event handler for the CT event
+ can serve as an example of what an event handler for the <c>Common Test</c> event
manager can look like.</p>
- <note><p>To ensure that printouts to standard out (or printouts made with
- <seealso marker="ct#log-2"><c>ct:log/2/3</c></seealso> or <seealso marker="ct:pal-2"><c>ct:pal/2/3</c></seealso>) get written to the test case log
- file, and not to the Common Test framework log, you can syncronize
- with the Common Test server by matching on the <c>tc_start</c> and <c>tc_done</c>
- events. In the period between these events, all IO gets directed to the
+ <note><p>To ensure that printouts to <c>stdout</c> (or printouts made with
+ <seealso marker="ct#log-2"><c>ct:log/2,3</c></seealso> or
+ <seealso marker="ct:pal-2"><c>ct:pal,2,3</c></seealso>) get written to the test case log
+ file, and not to the <c>Common Test</c> framework log, you can synchronize
+ with the <c>Common Test</c> server by matching on evvents <c>tc_start</c> and <c>tc_done</c>.
+ In the period between these events, all I/O is directed to the
test case log file. These events are sent synchronously to avoid potential
- timing problems (e.g. that the test case log file gets closed just before
- an IO message from an external process gets through). Knowing this, you
- need to be careful that your <c>handle_event/2</c> callback function doesn't
- stall the test execution, possibly causing unexpected behaviour as a result.</p></note>
+ timing problems (for example, that the test case log file is closed just before
+ an I/O message from an external process gets through). Knowing this, you
+ need to be careful that your <c>handle_event/2</c> callback function does not
+ stall the test execution, possibly causing unexpected behavior as a result.</p></note>
+ </section>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/example_chapter.xml b/lib/common_test/doc/src/example_chapter.xml
index 8201107c04..8523c9f485 100644
--- a/lib/common_test/doc/src/example_chapter.xml
+++ b/lib/common_test/doc/src/example_chapter.xml
@@ -33,476 +33,472 @@
<marker id="top"></marker>
<section>
- <title>Test suite example</title>
- <p>This example test suite shows some tests of a database server.
+ <title>Test Suite Example</title>
+ <p>The following example test suite shows some tests of a database server:
</p>
<code>
--module(db_data_type_SUITE).
-
--include_lib("common_test/include/ct.hrl").
-
-%% Test server callbacks
--export([suite/0, all/0,
- init_per_suite/1, end_per_suite/1,
- init_per_testcase/2, end_per_testcase/2]).
-
-%% Test cases
--export([string/1, integer/1]).
-
--define(CONNECT_STR, "DSN=sqlserver;UID=alladin;PWD=sesame").
-
-%%--------------------------------------------------------------------
-%% COMMON TEST CALLBACK FUNCTIONS
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% Function: suite() -> Info
-%%
-%% Info = [tuple()]
-%% List of key/value pairs.
-%%
-%% Description: Returns list of tuples to set default properties
-%% for the suite.
-%%--------------------------------------------------------------------
-suite() ->
- [{timetrap,{minutes,1}}].
-
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config0) -> Config1
-%%
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initialization before the suite.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {ok, Ref} = db:connect(?CONNECT_STR, []),
- TableName = db_lib:unique_table_name(),
- [{con_ref, Ref },{table_name, TableName}| Config].
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> term()
-%%
-%% Config = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Cleanup after the suite.
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- Ref = ?config(con_ref, Config),
- db:disconnect(Ref),
- ok.
+ -module(db_data_type_SUITE).
+
+ -include_lib("common_test/include/ct.hrl").
+
+ %% Test server callbacks
+ -export([suite/0, all/0,
+ init_per_suite/1, end_per_suite/1,
+ init_per_testcase/2, end_per_testcase/2]).
+
+ %% Test cases
+ -export([string/1, integer/1]).
+
+ -define(CONNECT_STR, "DSN=sqlserver;UID=alladin;PWD=sesame").
+
+ %%--------------------------------------------------------------------
+ %% COMMON TEST CALLBACK FUNCTIONS
+ %%--------------------------------------------------------------------
+
+ %%--------------------------------------------------------------------
+ %% Function: suite() -> Info
+ %%
+ %% Info = [tuple()]
+ %% List of key/value pairs.
+ %%
+ %% Description: Returns list of tuples to set default properties
+ %% for the suite.
+ %%--------------------------------------------------------------------
+ suite() ->
+ [{timetrap,{minutes,1}}].
+
+ %%--------------------------------------------------------------------
+ %% Function: init_per_suite(Config0) -> Config1
+ %%
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %%
+ %% Description: Initialization before the suite.
+ %%--------------------------------------------------------------------
+ init_per_suite(Config) ->
+ {ok, Ref} = db:connect(?CONNECT_STR, []),
+ TableName = db_lib:unique_table_name(),
+ [{con_ref, Ref },{table_name, TableName}| Config].
+
+ %%--------------------------------------------------------------------
+ %% Function: end_per_suite(Config) -> term()
+ %%
+ %% Config = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %%
+ %% Description: Cleanup after the suite.
+ %%--------------------------------------------------------------------
+ end_per_suite(Config) ->
+ Ref = ?config(con_ref, Config),
+ db:disconnect(Ref),
+ ok.
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config0) -> Config1
-%%
-%% TestCase = atom()
-%% Name of the test case that is about to run.
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initialization before each test case.
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- Ref = ?config(con_ref, Config),
- TableName = ?config(table_name, Config),
- ok = db:create_table(Ref, TableName, table_type(Case)),
- Config.
-
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> term()
-%%
-%% TestCase = atom()
-%% Name of the test case that is finished.
-%% Config = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Cleanup after each test case.
-%%--------------------------------------------------------------------
-end_per_testcase(_Case, Config) ->
- Ref = ?config(con_ref, Config),
- TableName = ?config(table_name, Config),
- ok = db:delete_table(Ref, TableName),
- ok.
-
-%%--------------------------------------------------------------------
-%% Function: all() -> GroupsAndTestCases
-%%
-%% GroupsAndTestCases = [{group,GroupName} | TestCase]
-%% GroupName = atom()
-%% Name of a test case group.
-%% TestCase = atom()
-%% Name of a test case.
-%%
-%% Description: Returns the list of groups and test cases that
-%% are to be executed.
-%%--------------------------------------------------------------------
-all() ->
- [string, integer].
-
-
-%%--------------------------------------------------------------------
-%% TEST CASES
-%%--------------------------------------------------------------------
-
-string(Config) ->
- insert_and_lookup(dummy_key, "Dummy string", Config).
-
-integer(Config) ->
- insert_and_lookup(dummy_key, 42, Config).
-
-
-insert_and_lookup(Key, Value, Config) ->
- Ref = ?config(con_ref, Config),
- TableName = ?config(table_name, Config),
- ok = db:insert(Ref, TableName, Key, Value),
- [Value] = db:lookup(Ref, TableName, Key),
- ok = db:delete(Ref, TableName, Key),
- [] = db:lookup(Ref, TableName, Key),
- ok.
-
-</code>
+ %%--------------------------------------------------------------------
+ %% Function: init_per_testcase(TestCase, Config0) -> Config1
+ %%
+ %% TestCase = atom()
+ %% Name of the test case that is about to run.
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %%
+ %% Description: Initialization before each test case.
+ %%--------------------------------------------------------------------
+ init_per_testcase(Case, Config) ->
+ Ref = ?config(con_ref, Config),
+ TableName = ?config(table_name, Config),
+ ok = db:create_table(Ref, TableName, table_type(Case)),
+ Config.
+
+ %%--------------------------------------------------------------------
+ %% Function: end_per_testcase(TestCase, Config) -> term()
+ %%
+ %% TestCase = atom()
+ %% Name of the test case that is finished.
+ %% Config = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %%
+ %% Description: Cleanup after each test case.
+ %%--------------------------------------------------------------------
+ end_per_testcase(_Case, Config) ->
+ Ref = ?config(con_ref, Config),
+ TableName = ?config(table_name, Config),
+ ok = db:delete_table(Ref, TableName),
+ ok.
+
+ %%--------------------------------------------------------------------
+ %% Function: all() -> GroupsAndTestCases
+ %%
+ %% GroupsAndTestCases = [{group,GroupName} | TestCase]
+ %% GroupName = atom()
+ %% Name of a test case group.
+ %% TestCase = atom()
+ %% Name of a test case.
+ %%
+ %% Description: Returns the list of groups and test cases that
+ %% are to be executed.
+ %%--------------------------------------------------------------------
+ all() ->
+ [string, integer].
+
+
+ %%--------------------------------------------------------------------
+ %% TEST CASES
+ %%--------------------------------------------------------------------
+
+ string(Config) ->
+ insert_and_lookup(dummy_key, "Dummy string", Config).
+
+ integer(Config) ->
+ insert_and_lookup(dummy_key, 42, Config).
+
+
+ insert_and_lookup(Key, Value, Config) ->
+ Ref = ?config(con_ref, Config),
+ TableName = ?config(table_name, Config),
+ ok = db:insert(Ref, TableName, Key, Value),
+ [Value] = db:lookup(Ref, TableName, Key),
+ ok = db:delete(Ref, TableName, Key),
+ [] = db:lookup(Ref, TableName, Key),
+ ok.</code>
</section>
<section>
- <title>Test suite templates</title>
- <p>The Erlang mode for the Emacs editor includes two Common Test test suite
- templates, one with extensive information in the function headers, and
+ <title>Test Suite Templates</title>
+ <p>The Erlang mode for the Emacs editor includes two <c>Common Test</c> test
+ suite templates, one with extensive information in the function headers, and
one with minimal information. A test suite template provides a quick start
- for implementing a suite from scratch and gives you a good overview
- of the available callback functions. Here are the templates in question:
+ for implementing a suite from scratch and gives a good overview
+ of the available callback functions. The two templates follows:
</p>
- <p><em>Large Common Test suite</em></p>
+ <p><em>Large Common Test Suite</em></p>
<code>
-%%%-------------------------------------------------------------------
-%%% File : example_SUITE.erl
-%%% Author :
-%%% Description :
-%%%
-%%% Created :
-%%%-------------------------------------------------------------------
--module(example_SUITE).
-
-%% Note: This directive should only be used in test suites.
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
-%%--------------------------------------------------------------------
-%% COMMON TEST CALLBACK FUNCTIONS
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% Function: suite() -> Info
-%%
-%% Info = [tuple()]
-%% List of key/value pairs.
-%%
-%% Description: Returns list of tuples to set default properties
-%% for the suite.
-%%
-%% Note: The suite/0 function is only meant to be used to return
-%% default data values, not perform any other operations.
-%%--------------------------------------------------------------------
-suite() ->
- [{timetrap,{minutes,10}}].
-
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config0) ->
-%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
-%%
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Reason = term()
-%% The reason for skipping the suite.
-%%
-%% Description: Initialization before the suite.
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- Config.
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config0) -> term() | {save_config,Config1}
-%%
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Cleanup after the suite.
-%%--------------------------------------------------------------------
-end_per_suite(_Config) ->
- ok.
-
-%%--------------------------------------------------------------------
-%% Function: init_per_group(GroupName, Config0) ->
-%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
-%%
-%% GroupName = atom()
-%% Name of the test case group that is about to run.
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding configuration data for the group.
-%% Reason = term()
-%% The reason for skipping all test cases and subgroups in the group.
-%%
-%% Description: Initialization before each test case group.
-%%--------------------------------------------------------------------
-init_per_group(_GroupName, Config) ->
- Config.
-
-%%--------------------------------------------------------------------
-%% Function: end_per_group(GroupName, Config0) ->
-%% term() | {save_config,Config1}
-%%
-%% GroupName = atom()
-%% Name of the test case group that is finished.
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding configuration data for the group.
-%%
-%% Description: Cleanup after each test case group.
-%%--------------------------------------------------------------------
-end_per_group(_GroupName, _Config) ->
- ok.
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config0) ->
-%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
-%%
-%% TestCase = atom()
-%% Name of the test case that is about to run.
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Reason = term()
-%% The reason for skipping the test case.
-%%
-%% Description: Initialization before each test case.
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_testcase(_TestCase, Config) ->
- Config.
-
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config0) ->
-%% term() | {save_config,Config1} | {fail,Reason}
-%%
-%% TestCase = atom()
-%% Name of the test case that is finished.
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Reason = term()
-%% The reason for failing the test case.
-%%
-%% Description: Cleanup after each test case.
-%%--------------------------------------------------------------------
-end_per_testcase(_TestCase, _Config) ->
- ok.
-
-%%--------------------------------------------------------------------
-%% Function: groups() -> [Group]
-%%
-%% Group = {GroupName,Properties,GroupsAndTestCases}
-%% GroupName = atom()
-%% The name of the group.
-%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
-%% Group properties that may be combined.
-%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
-%% TestCase = atom()
-%% The name of a test case.
-%% Shuffle = shuffle | {shuffle,Seed}
-%% To get cases executed in random order.
-%% Seed = {integer(),integer(),integer()}
-%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
-%% repeat_until_any_ok | repeat_until_any_fail
-%% To get execution of cases repeated.
-%% N = integer() | forever
-%%
-%% Description: Returns a list of test case group definitions.
-%%--------------------------------------------------------------------
-groups() ->
- [].
-
-%%--------------------------------------------------------------------
-%% Function: all() -> GroupsAndTestCases | {skip,Reason}
-%%
-%% GroupsAndTestCases = [{group,GroupName} | TestCase]
-%% GroupName = atom()
-%% Name of a test case group.
-%% TestCase = atom()
-%% Name of a test case.
-%% Reason = term()
-%% The reason for skipping all groups and test cases.
-%%
-%% Description: Returns the list of groups and test cases that
-%% are to be executed.
-%%--------------------------------------------------------------------
-all() ->
- [my_test_case].
-
-
-%%--------------------------------------------------------------------
-%% TEST CASES
-%%--------------------------------------------------------------------
-
-%%--------------------------------------------------------------------
-%% Function: TestCase() -> Info
-%%
-%% Info = [tuple()]
-%% List of key/value pairs.
-%%
-%% Description: Test case info function - returns list of tuples to set
-%% properties for the test case.
-%%
-%% Note: This function is only meant to be used to return a list of
-%% values, not perform any other operations.
-%%--------------------------------------------------------------------
-my_test_case() ->
- [].
-
-%%--------------------------------------------------------------------
-%% Function: TestCase(Config0) ->
-%% ok | exit() | {skip,Reason} | {comment,Comment} |
-%% {save_config,Config1} | {skip_and_save,Reason,Config1}
-%%
-%% Config0 = Config1 = [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Reason = term()
-%% The reason for skipping the test case.
-%% Comment = term()
-%% A comment about the test case that will be printed in the html log.
-%%
-%% Description: Test case function. (The name of it must be specified in
-%% the all/0 list or in a test case group for the test case
-%% to be executed).
-%%--------------------------------------------------------------------
-my_test_case(_Config) ->
- ok.
-</code>
+ %%%-------------------------------------------------------------------
+ %%% File : example_SUITE.erl
+ %%% Author :
+ %%% Description :
+ %%%
+ %%% Created :
+ %%%-------------------------------------------------------------------
+ -module(example_SUITE).
+
+ %% Note: This directive should only be used in test suites.
+ -compile(export_all).
+
+ -include_lib("common_test/include/ct.hrl").
+
+ %%--------------------------------------------------------------------
+ %% COMMON TEST CALLBACK FUNCTIONS
+ %%--------------------------------------------------------------------
+
+ %%--------------------------------------------------------------------
+ %% Function: suite() -> Info
+ %%
+ %% Info = [tuple()]
+ %% List of key/value pairs.
+ %%
+ %% Description: Returns list of tuples to set default properties
+ %% for the suite.
+ %%
+ %% Note: The suite/0 function is only meant to be used to return
+ %% default data values, not perform any other operations.
+ %%--------------------------------------------------------------------
+ suite() ->
+ [{timetrap,{minutes,10}}].
+
+ %%--------------------------------------------------------------------
+ %% Function: init_per_suite(Config0) ->
+ %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+ %%
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %% Reason = term()
+ %% The reason for skipping the suite.
+ %%
+ %% Description: Initialization before the suite.
+ %%
+ %% Note: This function is free to add any key/value pairs to the Config
+ %% variable, but should NOT alter/remove any existing entries.
+ %%--------------------------------------------------------------------
+ init_per_suite(Config) ->
+ Config.
+
+ %%--------------------------------------------------------------------
+ %% Function: end_per_suite(Config0) -> term() | {save_config,Config1}
+ %%
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %%
+ %% Description: Cleanup after the suite.
+ %%--------------------------------------------------------------------
+ end_per_suite(_Config) ->
+ ok.
+
+ %%--------------------------------------------------------------------
+ %% Function: init_per_group(GroupName, Config0) ->
+ %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+ %%
+ %% GroupName = atom()
+ %% Name of the test case group that is about to run.
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding configuration data for the group.
+ %% Reason = term()
+ %% The reason for skipping all test cases and subgroups in the group.
+ %%
+ %% Description: Initialization before each test case group.
+ %%--------------------------------------------------------------------
+ init_per_group(_GroupName, Config) ->
+ Config.
+
+ %%--------------------------------------------------------------------
+ %% Function: end_per_group(GroupName, Config0) ->
+ %% term() | {save_config,Config1}
+ %%
+ %% GroupName = atom()
+ %% Name of the test case group that is finished.
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding configuration data for the group.
+ %%
+ %% Description: Cleanup after each test case group.
+ %%--------------------------------------------------------------------
+ end_per_group(_GroupName, _Config) ->
+ ok.
+
+ %%--------------------------------------------------------------------
+ %% Function: init_per_testcase(TestCase, Config0) ->
+ %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+ %%
+ %% TestCase = atom()
+ %% Name of the test case that is about to run.
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %% Reason = term()
+ %% The reason for skipping the test case.
+ %%
+ %% Description: Initialization before each test case.
+ %%
+ %% Note: This function is free to add any key/value pairs to the Config
+ %% variable, but should NOT alter/remove any existing entries.
+ %%--------------------------------------------------------------------
+ init_per_testcase(_TestCase, Config) ->
+ Config.
+
+ %%--------------------------------------------------------------------
+ %% Function: end_per_testcase(TestCase, Config0) ->
+ %% term() | {save_config,Config1} | {fail,Reason}
+ %%
+ %% TestCase = atom()
+ %% Name of the test case that is finished.
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %% Reason = term()
+ %% The reason for failing the test case.
+ %%
+ %% Description: Cleanup after each test case.
+ %%--------------------------------------------------------------------
+ end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+ %%--------------------------------------------------------------------
+ %% Function: groups() -> [Group]
+ %%
+ %% Group = {GroupName,Properties,GroupsAndTestCases}
+ %% GroupName = atom()
+ %% The name of the group.
+ %% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+ %% Group properties that may be combined.
+ %% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+ %% TestCase = atom()
+ %% The name of a test case.
+ %% Shuffle = shuffle | {shuffle,Seed}
+ %% To get cases executed in random order.
+ %% Seed = {integer(),integer(),integer()}
+ %% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+ %% repeat_until_any_ok | repeat_until_any_fail
+ %% To get execution of cases repeated.
+ %% N = integer() | forever
+ %%
+ %% Description: Returns a list of test case group definitions.
+ %%--------------------------------------------------------------------
+ groups() ->
+ [].
+
+ %%--------------------------------------------------------------------
+ %% Function: all() -> GroupsAndTestCases | {skip,Reason}
+ %%
+ %% GroupsAndTestCases = [{group,GroupName} | TestCase]
+ %% GroupName = atom()
+ %% Name of a test case group.
+ %% TestCase = atom()
+ %% Name of a test case.
+ %% Reason = term()
+ %% The reason for skipping all groups and test cases.
+ %%
+ %% Description: Returns the list of groups and test cases that
+ %% are to be executed.
+ %%--------------------------------------------------------------------
+ all() ->
+ [my_test_case].
+
+
+ %%--------------------------------------------------------------------
+ %% TEST CASES
+ %%--------------------------------------------------------------------
+
+ %%--------------------------------------------------------------------
+ %% Function: TestCase() -> Info
+ %%
+ %% Info = [tuple()]
+ %% List of key/value pairs.
+ %%
+ %% Description: Test case info function - returns list of tuples to set
+ %% properties for the test case.
+ %%
+ %% Note: This function is only meant to be used to return a list of
+ %% values, not perform any other operations.
+ %%--------------------------------------------------------------------
+ my_test_case() ->
+ [].
+
+ %%--------------------------------------------------------------------
+ %% Function: TestCase(Config0) ->
+ %% ok | exit() | {skip,Reason} | {comment,Comment} |
+ %% {save_config,Config1} | {skip_and_save,Reason,Config1}
+ %%
+ %% Config0 = Config1 = [tuple()]
+ %% A list of key/value pairs, holding the test case configuration.
+ %% Reason = term()
+ %% The reason for skipping the test case.
+ %% Comment = term()
+ %% A comment about the test case that will be printed in the html log.
+ %%
+ %% Description: Test case function. (The name of it must be specified in
+ %% the all/0 list or in a test case group for the test case
+ %% to be executed).
+ %%--------------------------------------------------------------------
+ my_test_case(_Config) ->
+ ok.</code>
<br></br>
- <p><em>Small Common Test suite</em></p>
+ <p><em>Small Common Test Suite</em></p>
<code>
-%%%-------------------------------------------------------------------
-%%% File : example_SUITE.erl
-%%% Author :
-%%% Description :
-%%%
-%%% Created :
-%%%-------------------------------------------------------------------
--module(example_SUITE).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
-%%--------------------------------------------------------------------
-%% Function: suite() -> Info
-%% Info = [tuple()]
-%%--------------------------------------------------------------------
-suite() ->
- [{timetrap,{seconds,30}}].
-
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config0) ->
-%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
-%% Config0 = Config1 = [tuple()]
-%% Reason = term()
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- Config.
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config0) -> term() | {save_config,Config1}
-%% Config0 = Config1 = [tuple()]
-%%--------------------------------------------------------------------
-end_per_suite(_Config) ->
- ok.
-
-%%--------------------------------------------------------------------
-%% Function: init_per_group(GroupName, Config0) ->
-%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
-%% GroupName = atom()
-%% Config0 = Config1 = [tuple()]
-%% Reason = term()
-%%--------------------------------------------------------------------
-init_per_group(_GroupName, Config) ->
- Config.
-
-%%--------------------------------------------------------------------
-%% Function: end_per_group(GroupName, Config0) ->
-%% term() | {save_config,Config1}
-%% GroupName = atom()
-%% Config0 = Config1 = [tuple()]
-%%--------------------------------------------------------------------
-end_per_group(_GroupName, _Config) ->
- ok.
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config0) ->
-%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
-%% TestCase = atom()
-%% Config0 = Config1 = [tuple()]
-%% Reason = term()
-%%--------------------------------------------------------------------
-init_per_testcase(_TestCase, Config) ->
- Config.
-
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config0) ->
-%% term() | {save_config,Config1} | {fail,Reason}
-%% TestCase = atom()
-%% Config0 = Config1 = [tuple()]
-%% Reason = term()
-%%--------------------------------------------------------------------
-end_per_testcase(_TestCase, _Config) ->
- ok.
-
-%%--------------------------------------------------------------------
-%% Function: groups() -> [Group]
-%% Group = {GroupName,Properties,GroupsAndTestCases}
-%% GroupName = atom()
-%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
-%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
-%% TestCase = atom()
-%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
-%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
-%% repeat_until_any_ok | repeat_until_any_fail
-%% N = integer() | forever
-%%--------------------------------------------------------------------
-groups() ->
- [].
-
-%%--------------------------------------------------------------------
-%% Function: all() -> GroupsAndTestCases | {skip,Reason}
-%% GroupsAndTestCases = [{group,GroupName} | TestCase]
-%% GroupName = atom()
-%% TestCase = atom()
-%% Reason = term()
-%%--------------------------------------------------------------------
-all() ->
- [my_test_case].
-
-%%--------------------------------------------------------------------
-%% Function: TestCase() -> Info
-%% Info = [tuple()]
-%%--------------------------------------------------------------------
-my_test_case() ->
- [].
-
-%%--------------------------------------------------------------------
-%% Function: TestCase(Config0) ->
-%% ok | exit() | {skip,Reason} | {comment,Comment} |
-%% {save_config,Config1} | {skip_and_save,Reason,Config1}
-%% Config0 = Config1 = [tuple()]
-%% Reason = term()
-%% Comment = term()
-%%--------------------------------------------------------------------
-my_test_case(_Config) ->
- ok.
-</code>
+ %%%-------------------------------------------------------------------
+ %%% File : example_SUITE.erl
+ %%% Author :
+ %%% Description :
+ %%%
+ %%% Created :
+ %%%-------------------------------------------------------------------
+ -module(example_SUITE).
+
+ -compile(export_all).
+
+ -include_lib("common_test/include/ct.hrl").
+
+ %%--------------------------------------------------------------------
+ %% Function: suite() -> Info
+ %% Info = [tuple()]
+ %%--------------------------------------------------------------------
+ suite() ->
+ [{timetrap,{seconds,30}}].
+
+ %%--------------------------------------------------------------------
+ %% Function: init_per_suite(Config0) ->
+ %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+ %% Config0 = Config1 = [tuple()]
+ %% Reason = term()
+ %%--------------------------------------------------------------------
+ init_per_suite(Config) ->
+ Config.
+
+ %%--------------------------------------------------------------------
+ %% Function: end_per_suite(Config0) -> term() | {save_config,Config1}
+ %% Config0 = Config1 = [tuple()]
+ %%--------------------------------------------------------------------
+ end_per_suite(_Config) ->
+ ok.
+
+ %%--------------------------------------------------------------------
+ %% Function: init_per_group(GroupName, Config0) ->
+ %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+ %% GroupName = atom()
+ %% Config0 = Config1 = [tuple()]
+ %% Reason = term()
+ %%--------------------------------------------------------------------
+ init_per_group(_GroupName, Config) ->
+ Config.
+
+ %%--------------------------------------------------------------------
+ %% Function: end_per_group(GroupName, Config0) ->
+ %% term() | {save_config,Config1}
+ %% GroupName = atom()
+ %% Config0 = Config1 = [tuple()]
+ %%--------------------------------------------------------------------
+ end_per_group(_GroupName, _Config) ->
+ ok.
+
+ %%--------------------------------------------------------------------
+ %% Function: init_per_testcase(TestCase, Config0) ->
+ %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+ %% TestCase = atom()
+ %% Config0 = Config1 = [tuple()]
+ %% Reason = term()
+ %%--------------------------------------------------------------------
+ init_per_testcase(_TestCase, Config) ->
+ Config.
+
+ %%--------------------------------------------------------------------
+ %% Function: end_per_testcase(TestCase, Config0) ->
+ %% term() | {save_config,Config1} | {fail,Reason}
+ %% TestCase = atom()
+ %% Config0 = Config1 = [tuple()]
+ %% Reason = term()
+ %%--------------------------------------------------------------------
+ end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+ %%--------------------------------------------------------------------
+ %% Function: groups() -> [Group]
+ %% Group = {GroupName,Properties,GroupsAndTestCases}
+ %% GroupName = atom()
+ %% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+ %% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+ %% TestCase = atom()
+ %% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+ %% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+ %% repeat_until_any_ok | repeat_until_any_fail
+ %% N = integer() | forever
+ %%--------------------------------------------------------------------
+ groups() ->
+ [].
+
+ %%--------------------------------------------------------------------
+ %% Function: all() -> GroupsAndTestCases | {skip,Reason}
+ %% GroupsAndTestCases = [{group,GroupName} | TestCase]
+ %% GroupName = atom()
+ %% TestCase = atom()
+ %% Reason = term()
+ %%--------------------------------------------------------------------
+ all() ->
+ [my_test_case].
+
+ %%--------------------------------------------------------------------
+ %% Function: TestCase() -> Info
+ %% Info = [tuple()]
+ %%--------------------------------------------------------------------
+ my_test_case() ->
+ [].
+
+ %%--------------------------------------------------------------------
+ %% Function: TestCase(Config0) ->
+ %% ok | exit() | {skip,Reason} | {comment,Comment} |
+ %% {save_config,Config1} | {skip_and_save,Reason,Config1}
+ %% Config0 = Config1 = [tuple()]
+ %% Reason = term()
+ %% Comment = term()
+ %%--------------------------------------------------------------------
+ my_test_case(_Config) ->
+ ok.</code>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/getting_started_chapter.xml b/lib/common_test/doc/src/getting_started_chapter.xml
index ef9c409bf1..802f9ba397 100644
--- a/lib/common_test/doc/src/getting_started_chapter.xml
+++ b/lib/common_test/doc/src/getting_started_chapter.xml
@@ -31,235 +31,247 @@
</header>
<section>
- <title>Are you new around here?</title>
+ <title>Introduction for Newcomers</title>
<p>
- The purpose of this short chapter is to, with a "learning by example"
- approach, give the newcomer a chance to get started quickly writing and
- executing some first simple tests. The chapter will introduce some of the
- basics, but leave most explanations and details for the later
- chapters in this User's Guide. Hopefully though, after this chapter, you
- will be inspired and unintimidated enough to go on and get into the
- nitty-gritty that follows in this rather heavy User's Guide! If you're
- not much into "learning by example" and prefer to get into more technical
- detail right away, go ahead and skip to the next chapter. Again, the basics
- presented here will be covered in detail in later chapters.
+ The purpose of this section is to let the newcomer get started in
+ quickly writing and executing some first simple tests with a
+ "learning by example" approach. Most explanations are left for later sections.
+ If you are not much into "learning by example" and prefer more technical
+ details, go ahead and skip to the next section.
</p>
<p>
- This chapter also tries to demonstrate how dead simple it actually is
- to write a very basic (yet for many module testing purposes, often sufficiently
- complex) test suite, and execute its test cases. This is not necessarily
- obvious when you read the rest of the chapters in the User's Guide.
- </p>
- <p>
- A quick note before we start: In order to understand what's discussed and
- examplified here, it is recommended that you first read through the
- opening <seealso marker="basics_chapter#basics">Common Test Basics</seealso>
- chapter.
+ This section demonstrates how simple it is to write a basic
+ (yet for many module testing purposes, often sufficiently complex)
+ test suite and execute its test cases. This is not necessarily
+ obvious when you read the remaining sections in this User's Guide.
</p>
+ <note>
+ <p>
+ To understand what is discussed and examplified here, we recommended
+ you to first read section
+ <seealso marker="basics_chapter#basics">Common Test Basics</seealso>.
+ </p>
+ </note>
</section>
<section>
- <title>Test case execution</title>
- <p>Execution of test cases is handled this way:</p>
+ <title>Test Case Execution</title>
+ <p>Execution of test cases is handled as follows:</p>
<image file="tc_execution.gif">
<icaption>
- Successful vs unsuccessful test case execution.
+ Successful and Unsuccessful Test Case Execution
</icaption>
</image>
- <p>For each test case that Common Test is told to execute, it spawns a
- dedicated process on which the test case function in question starts
+ <p>For each test case that <c>Common Test</c> is ordered to execute, it spawns a
+ dedicated process on which the test case function starts
running. (In parallel to the test case process, an idle waiting timer
- process is started which is linked to the test case process. If the timer
+ process is started, which is linked to the test case process. If the timer
process runs out of waiting time, it sends an exit signal to terminate
- the test case process and this is what's called a <em>timetrap</em>).
+ the test case process. This is called a <em>timetrap</em>).
</p>
- <p>In scenario 1, the test case process terminates normally after case A has
- finished executing its test code without detecting any errors. The test
- case function simply returns a value and Common Test logs the test case as
- successful.
+ <p>In scenario 1, the test case process terminates normally after
+ <c>case A</c> has finished executing its test code without detecting
+ any errors. The test case function returns a value and <c>Common Test</c>
+ logs the test case as successful.
</p>
- <p>In scenario 2, an error is detected during test case execution
- which causes the test case B function to generate an exception.
- This causes the test case process to exit with reason
- other than normal, and as a result, Common Test will log this as an
- unsuccessful test case.
+ <p>In scenario 2, an error is detected during test <c>case B</c> execution.
+ This causes the test <c>case B</c> function to generate an exception
+ and, as a result, the test case process exits with reason other than normal.
+ <c>Common Test</c> logs this as an unsuccessful (Failed) test case.
</p>
- <p>As you can understand from the illustration above, Common Test requires
- that a test case generates a runtime error to indicate failure (e.g.
- by causing a bad match error or by calling <c>exit/1</c>, preferrably
- through the <seealso marker="ct#fail-1"><c>ct:fail/1,2</c></seealso> help function). A succesful execution is
- indicated by means of a normal return from the test case function.
+ <p>As you can understand from the illustration, <c>Common Test</c> requires
+ a test case to generate a runtime error to indicate failure (for example,
+ by causing a bad match error or by calling <c>exit/1</c>, preferably
+ through the help function
+ <seealso marker="ct#fail-1"><c>ct:fail/1,2</c></seealso>). A successful
+ execution is indicated by a normal return from the test case function.
</p>
</section>
<section>
- <title>A simple test suite</title>
- <p>As you've seen in the basics chapter, the test suite module implements
+ <title>A Simple Test Suite</title>
+ <p>As shown in section
+ <seealso marker="basics_chapter#External_Interfaces">Common Test Basics</seealso>,
+ the test suite module implements
<seealso marker="common_test">callback functions</seealso>
- (mandatory or optional) for various purposes, e.g:
+ (mandatory or optional) for various purposes, for example:
</p>
- <list>
+ <list type="bulleted">
<item>Init/end configuration function for the test suite</item>
<item>Init/end configuration function for a test case</item>
<item>Init/end configuration function for a test case group</item>
<item>Test cases</item>
</list>
<p>
- The configuration functions are optional and if you don't need them for
- your test, a test suite with one simple test case could look like this:
+ The configuration functions are optional. The following example is a test suite
+ without configuration functions, including one simple test case, to
+ check that module <c>mymod</c> exists (that is, can be successfully loaded by the
+ code server):
</p>
<pre>
- -module(my1st_SUITE).
- -compile(export_all).
+ -module(my1st_SUITE).
+ -compile(export_all).
- all() ->
- [mod_exists].
+ all() ->
+ [mod_exists].
- mod_exists(_) ->
- {module,mymod} = code:load_file(mymod).</pre>
+ mod_exists(_) ->
+ {module,mymod} = code:load_file(mymod).</pre>
<p>
- In this example we check that the <c>mymod</c> module exists (i.e. can be
- successfully loaded by the code server). If the operation fails, we will
- get a bad match error which terminates the test case.
+ If the operation fails, a bad match error occurs that terminates the test case.
</p>
</section>
<section>
- <title>A test suite with configuration functions</title>
+ <title>A Test Suite with Configuration Functions</title>
<p>
- If we need to perform configuration operations in order to run our test, we
- implement configuration functions in our suite. The result from a
- configuration function is configuration data, or simply <em><c>Config</c></em>.
- This is a list of key-value tuples which get passed from the configuration
+ If you need to perform configuration operations to run your test, you can
+ implement configuration functions in your suite. The result from a
+ configuration function is configuration data, or <c>Config</c>.
+ This is a list of key-value tuples that get passed from the configuration
function to the test cases (possibly through configuration functions on
- "lower level"). The data flow looks like this:
+ "lower level"). The data flow looks as follows:
</p>
<image file="config.gif">
<icaption>
- Config data flow in the suite.
+ Configuration Data Flow in a Suite
</icaption>
</image>
<p>
- Here's an example of a test suite which uses configuration functions
- to open and close a log file for the test cases (an operation that would
- be unnecessary and irrelevant to perform by each test case):
+ The following example shows a test suite that uses configuration functions
+ to open and close a log file for the test cases (an operation that is
+ unnecessary and irrelevant to perform by each test case):
</p>
<pre>
- -module(check_log_SUITE).
- -export([all/0, init_per_suite/1, end_per_suite/1]).
- -export([check_restart_result/1, check_no_errors/1]).
-
- -define(value(Key,Config), proplists:get_value(Key,Config)).
+ -module(check_log_SUITE).
+ -export([all/0, init_per_suite/1, end_per_suite/1]).
+ -export([check_restart_result/1, check_no_errors/1]).
- all() -> [check_restart_result, check_no_errors].
+ -define(value(Key,Config), proplists:get_value(Key,Config)).
- init_per_suite(InitConfigData) ->
- [{logref,open_log()} | InitConfigData].
+ all() -> [check_restart_result, check_no_errors].
- end_per_suite(ConfigData) ->
- close_log(?value(logref, ConfigData)).
+ init_per_suite(InitConfigData) ->
+ [{logref,open_log()} | InitConfigData].
- check_restart_result(ConfigData) ->
- TestData = read_log(restart, ?value(logref, ConfigData)),
- {match,_Line} = search_for("restart successful", TestData).
-
- check_no_errors(ConfigData) ->
- TestData = read_log(all, ?value(logref, ConfigData)),
- case search_for("error", TestData) of
- {match,Line} -> ct:fail({error_found_in_log,Line});
- nomatch -> ok
- end.</pre>
+ end_per_suite(ConfigData) ->
+ close_log(?value(logref, ConfigData)).
+
+ check_restart_result(ConfigData) ->
+ TestData = read_log(restart, ?value(logref, ConfigData)),
+ {match,_Line} = search_for("restart successful", TestData).
+
+ check_no_errors(ConfigData) ->
+ TestData = read_log(all, ?value(logref, ConfigData)),
+ case search_for("error", TestData) of
+ {match,Line} -> ct:fail({error_found_in_log,Line});
+ nomatch -> ok
+ end.</pre>
<p>
- In this example we have test cases that verify, by parsing a
- log file, that our SUT has performed a successful restart and
- that no unexpected errors have been printed.
+ The test cases verify, by parsing a log file, that our SUT has performed
+ a successful restart and that no unexpected errors are printed.
</p>
- <p>To execute the test cases in the test suite above, we could type this on
- the Unix/Linux command line (assuming for this example that the suite module
+ <p>To execute the test cases in the recent test suite, type the
+ following on the UNIX/Linux command line (assuming that the suite module
is in the current working directory):
</p>
<pre>
- $ ct_run -dir .</pre>
- <p>or</p>
+ $ ct_run -dir .</pre>
+ <p>or:</p>
<pre>
- $ ct_run -suite check_log_SUITE</pre>
+ $ ct_run -suite check_log_SUITE</pre>
- <p>If we want to use the Erlang shell to run our test, we could evaluate this call:
+ <p>To use the Erlang shell to run our test, you can evaluate the following call:
</p>
<pre>
- 1> ct:run_test([{dir, "."}]).</pre>
- <p>or</p>
+ 1> ct:run_test([{dir, "."}]).</pre>
+ <p>or:</p>
<pre>
- 1> ct:run_test([{suite, "check_log_SUITE"}]).</pre>
+ 1> ct:run_test([{suite, "check_log_SUITE"}]).</pre>
<p>
- The result from running our test is printed in log files in HTML format
- (stored in unique log directories on different level). This illustration
- shows the log file structure:
+ The result from running the test is printed in log files in HTML format
+ (stored in unique log directories on a different level). The following
+ illustration shows the log file structure:
</p>
<image file="html_logs.gif">
<icaption>
- HTML log file structure.
+ HTML Log File Structure
</icaption>
</image>
</section>
<section>
- <title>What happens next?</title>
+ <title>Questions and Answers</title>
- <p>Well, you might already be asking yourself questions such as:</p>
+ <p>Here follows some questions that you might have after reading this section
+ with corresponding tips and links to the answers:
+ </p>
- <list>
- <item>"How and where can I specify variable data for my tests that mustn't
- be hard-coded in the test suites (such as host names, addresses,
- user login data, etc)?" The
- <seealso marker="config_file_chapter#top">External Configuration Data</seealso>
- chapter will give you that information.
+ <list type="bulleted">
+ <item><p><em>Question:</em>
+ "How and where can I specify variable data for my tests that must not
+ be hard-coded in the test suites (such as hostnames, addresses, and
+ user login data)?"</p>
+ <p><em>Answer:</em>
+ See section <seealso marker="config_file_chapter#top">External Configuration Data</seealso>.</p>
</item>
- <item>"Is there a way to declare a number of different tests and run them
- in one session without having to write my own scripts? And can such
- declarations be used for regression testing?" The
+
+ <item><p><em>Question:</em> "Is there a way to declare different tests and run them
+ in one session without having to write my own scripts? Also, can such
+ declarations be used for regression testing?"</p>
+ <p><em>Answer:</em> See section
<seealso marker="run_test_chapter#test_specifications">Test Specifications</seealso>
- chapter answers these questions.
+ in section Running Tests and Analyzing Results.
+ </p>
</item>
- <item>"Can test cases and/or test runs be automatically repeated?" Learn more about
+
+ <item><p><em>Question:</em> "Can test cases and/or test runs be automatically repeated?"</p>
+ <p><em>Answer:</em> Learn more about
<seealso marker="write_test_chapter#test_case_groups">Test Case Groups</seealso>
- and also read about start flags/options in the
- <seealso marker="run_test_chapter#ct_run">Running Tests</seealso> chapter and
- the Reference Manual.
+ and read about start flags/options in section
+ <seealso marker="run_test_chapter#ct_run">Running Tests</seealso> and in
+ the Reference Manual.</p>
</item>
- <item>"Will Common Test execute my test cases in sequence or in parallel?" The
+
+ <item><p><em>Question:</em> "Does <c>Common Test</c> execute my test cases in sequence or in parallel?"</p>
+ <p><em>Answer:</em> See
<seealso marker="write_test_chapter#test_case_groups">Test Case Groups</seealso>
- section in the Running Tests chapter will give you the answer.
+ in section Writing Test Suites.</p>
</item>
- <item>"What's the syntax for timetraps (mentioned above), and how do I set them?"
- This is explained in the
- <seealso marker="write_test_chapter#timetraps">Timetrap Timeouts</seealso>
- part of the Writing Test Suites chapter.
+
+ <item><p><em>Question:</em> "What is the syntax for timetraps (mentioned earlier), and how do I set them?"</p>
+ <p><em>Answer:</em> This is explained in the
+ <seealso marker="write_test_chapter#timetraps">Timetrap Time-Outs</seealso>
+ part of section Writing Test Suites.</p>
</item>
- <item>"What functions are available for logging and printing?" Check the
+
+ <item><p><em>Question:</em> "What functions are available for logging and printing?"</p>
+ <p><em>Answer:</em> See
<seealso marker="write_test_chapter#logging">Logging</seealso>
- section in the Writing Test Suites chapter.
+ in section Writing Test Suites.</p>
</item>
- <item>"I need data files for my tests. Where do I store them preferrably?"
- You should read about
+
+ <item><p><em>Question:</em> "I need data files for my tests. Where do I store them preferably?"</p>
+ <p><em>Answer:</em> See
<seealso marker="write_test_chapter#data_priv_dir">Data and Private
- Directories</seealso> for information about this.
+ Directories</seealso>.</p>
</item>
- <item>"May I start with a test suite example, please?"
- <seealso marker="example_chapter#top">Sure!</seealso>
+
+ <item><p><em>Question:</em> "Can I start with a test suite example, please?"</p>
+ <p><em>Answer:</em> <seealso marker="example_chapter#top">Welcome!</seealso></p>
</item>
</list>
- <p>You will probably want to get started on your own first test suites now, while
- at the same time digging deeper into the Common Test User's Guide and Reference Manual.
- You will find that there's lots more to learn about the things that have been introduced
- in this chapter. You will of course also be presented many more useful features, such as the
- ones listed above. Have fun!
+ <p>You probably want to get started on your own first test suites now, while
+ at the same time digging deeper into the <c>Common Test</c> User's Guide and Reference Manual.
+ There are much more to learn about the things that have been introduced
+ in this section. There are also many other useful features to learn,
+ so please continue to the other sections and have fun.
</p>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/install_chapter.xml b/lib/common_test/doc/src/install_chapter.xml
index 107b0e2eac..9dce1e31a4 100644
--- a/lib/common_test/doc/src/install_chapter.xml
+++ b/lib/common_test/doc/src/install_chapter.xml
@@ -32,22 +32,23 @@
<section>
<marker id="general"></marker>
- <title>General information</title>
-
- <p>The two main interfaces for running tests with Common Test
- are an executable program named <c>ct_run</c> and an
- erlang module named <c>ct</c>. The ct_run program
- is compiled for the underlying operating system (e.g. Unix/Linux
- or Windows) during the build of the Erlang/OTP system, and is
- installed automatically with other executable programs in
+ <title>General Information</title>
+
+ <p>The two main interfaces for running tests with <c>Common Test</c>
+ are an executable program named
+ <seealso marker="ct_run"><c>ct_run</c></seealso> and the
+ Erlang module <seealso marker="ct"><c>ct</c></seealso>.
+ <c>ct_run</c> is compiled for the underlying operating system (for example,
+ Unix/Linux or Windows) during the build of the Erlang/OTP system,
+ and is installed automatically with other executable programs in
the top level <c>bin</c> directory of Erlang/OTP.
The <c>ct</c> interface functions can be called from the Erlang shell,
or from any Erlang function, on any supported platform.</p>
- <p>The Common Test application is installed with the Erlang/OTP
- system and no additional installation step is required to start using
- Common Test by means of the <c>ct_run</c> executable program, and/or
- the interface functions in the <c>ct</c> module.</p>
+ <p>The <c>Common Test</c> application is installed with the Erlang/OTP
+ system. No extra installation step is required to start using
+ <c>Common Test</c> through the <c>ct_run</c> executable program,
+ and/or the interface functions in the <c>ct</c> module.</p>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/introduction.xml b/lib/common_test/doc/src/introduction.xml
new file mode 100644
index 0000000000..e2a42bfd33
--- /dev/null
+++ b/lib/common_test/doc/src/introduction.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2003</year><year>2013</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Introduction</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date>2015-10-05</date>
+ <rev></rev>
+ <file>introduction.xml</file>
+ </header>
+ <section>
+ <title>Scope</title>
+ <p><c>Common Test</c> is a portable application for automated
+ testing. It is suitable for:</p>
+ <list type="bulleted">
+ <item><p>Black-box testing of target systems of any type (that
+ is, not necessarily implemented in Erlang). This is performed
+ through standard O&amp;M interfaces (such as SNMP, HTTP, CORBA,
+ and Telnet) and, if necessary, through user-specific interfaces
+ (often called test ports).</p></item>
+ <item><p>White-box testing of Erlang/OTP programs. This is easily
+ done by calling the target API functions directly from the test
+ case functions.</p></item>
+ </list>
+ <p><c>Common Test</c> also integrates use of the OTP
+ <seealso marker="tools:cover">cover</seealso> tool in application
+ <c>Tools</c> for code coverage analysis of Erlang/OTP programs.</p>
+
+ <p><c>Common Test</c> executes test suite programs automatically,
+ without operator interaction. Test progress and results are
+ printed to logs in HTML format, easily browsed with a standard
+ web browser. <c>Common Test</c> also sends notifications about progress
+ and results through an OTP event manager to event handlers plugged
+ in to the system. This way, users can integrate their own
+ programs for, for example, logging, database storing, or supervision with
+ <c>Common Test</c>.</p>
+
+ <p><c>Common Test</c> provides libraries with useful support
+ functions to fill various testing needs and requirements.
+ There is, for example, support for flexible test declarations
+ through test specifications. There is also support
+ for central configuration and control of multiple
+ independent test sessions (to different target systems)
+ running in parallel.</p>
+
+ </section>
+
+ <section>
+ <title>Prerequisites</title>
+ <p>It is assumed that the reader is familiar with the Erlang
+ programming language.</p >
+ </section>
+
+</chapter>
diff --git a/lib/common_test/doc/src/notes.xml b/lib/common_test/doc/src/notes.xml
index 57a8c6c9e2..ff51b101cc 100644
--- a/lib/common_test/doc/src/notes.xml
+++ b/lib/common_test/doc/src/notes.xml
@@ -33,6 +33,91 @@
<file>notes.xml</file>
</header>
+<section><title>Common_Test 1.12</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ This update fixes the problem with generic printouts in
+ the html log file not having special characters escaped.
+ Printouts made with <c>io:format/2</c> and
+ <c>ct:pal/2</c> will now get special characters escaped
+ automatically. Common Test will not attempt to escape
+ characters printed with <c>ct:log/2</c> since it is
+ assumed that the user may want to print html tagged data
+ using this function. A new function, <c>ct:log/5</c>, has
+ been added, which offers optional escaping of characters.
+ The latter function may also be used to print text to the
+ log without headers and CSS class wrapping (analogue to
+ <c>io:format/2</c>).</p>
+ <p>
+ Own Id: OTP-13003 Aux Id: seq13005 </p>
+ </item>
+ <item>
+ <p>
+ Commit 4cf832f1ad163f5b25dd8a6f2d314c169c23c82f
+ erroneously removed logging of open and close of netconf
+ connections. This is now corrected.</p>
+ <p>
+ Own Id: OTP-13386</p>
+ </item>
+ <item>
+ <p>
+ The directory to which nodes started with
+ <c>test_server:start_node/3</c> writes their
+ erl_crash.dump is changed. The crashdumps were earlier
+ written to the directory of test_server.beam, but in
+ later versions of Microsoft Windows this is no longer
+ writable (even for administrators). The framework
+ (common_test) log directory is now used instead.</p>
+ <p>
+ Own Id: OTP-13388</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ This update makes it possible to specify multiple
+ instances of the same group or test case in one test
+ specification term in order to repeat execution. Example:
+ <c>{groups, "./", my_SUITE, [my_group, my_group], {cases,
+ all}}, or {cases, "./", my_SUITE, [my_tc, my_tc,
+ my_tc]}.</c></p>
+ <p>
+ Own Id: OTP-13241 Aux Id: seq12979 </p>
+ </item>
+ <item>
+ <p>
+ Two new CT hook functions have been added:
+ <c>post_init_per_testcase/4</c> and
+ <c>pre_end_per_testcase/3</c>. With these hook functions,
+ it is possible to perform arbitrary actions (including
+ modifications of test execution, test state and results)
+ immediately before and after the execution of the test
+ case.</p>
+ <p>
+ Own Id: OTP-13242 Aux Id: seq12991 </p>
+ </item>
+ <item>
+ <p>
+ The <c>ct_netconfc</c> was earlier very restrictive as to
+ which SSH options the user could set. This is now
+ changed, and any SSH option is now allowed. The netconf
+ client will simply pass on any option, which it does not
+ recognize, to SSH.</p>
+ <p>
+ Own Id: OTP-13338 Aux Id: seq13053,seq13069 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Common_Test 1.11.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/common_test/doc/src/part.xml b/lib/common_test/doc/src/part.xml
index 0f4c448787..41e74e57c6 100644
--- a/lib/common_test/doc/src/part.xml
+++ b/lib/common_test/doc/src/part.xml
@@ -31,40 +31,9 @@
</header>
<description>
- <p><em>Common Test</em> is a portable application for automated
- testing. It is suitable for black-box testing of target
- systems of any type (i.e. not necessarily implemented in Erlang),
- as well as for white-box testing of Erlang/OTP programs.
- Black-box testing is performed via standard O&amp;M
- interfaces (such as SNMP, HTTP, Corba, Telnet, etc) and,
- if required, via user specific interfaces (often called test
- ports). White-box testing of Erlang/OTP programs is easily
- accomplished by calling the target API functions directly
- from the test case functions. Common Test also integrates
- usage of the OTP cover tool for code coverage analysis of
- Erlang/OTP programs.</p>
-
- <p>Common Test executes test suite programs automatically,
- without operator interaction. Test progress and results is
- printed to logs on HTML format, easily browsed with a standard
- web browser. Common Test also sends notifications about progress
- and results via an OTP event manager to event handlers plugged
- in to the system. This way users can integrate their own
- programs for e.g. logging, database storing or supervision with
- Common Test.</p>
-
- <p>Common Test provides libraries that contain useful support
- functions to fill various testing needs and requirements.
- There is for example support for flexible test declarations
- by means of so called test specifications. There is also support
- for central configuration and control of multiple
- independent test sessions (towards different target systems)
- running in parallel.</p>
-
- <p>Common Test is implemented as a framework based on the OTP Test
- Server application.</p>
</description>
+ <xi:include href="introduction.xml"/>
<xi:include href="basics_chapter.xml"/>
<xi:include href="getting_started_chapter.xml"/>
<xi:include href="install_chapter.xml"/>
diff --git a/lib/common_test/doc/src/ref_man.xml b/lib/common_test/doc/src/ref_man.xml
index f98e2475a9..19960bfea7 100644
--- a/lib/common_test/doc/src/ref_man.xml
+++ b/lib/common_test/doc/src/ref_man.xml
@@ -30,43 +30,10 @@
<file>ref_man.xml</file>
</header>
<description>
- <p><em>Common Test</em> is a portable application for automated
- testing. It is suitable for black-box testing of target
- systems of any type (i.e. not necessarily implemented in Erlang),
- as well as for white-box testing of Erlang/OTP programs.
- Black-box testing is performed via standard O&amp;M
- interfaces (such as SNMP, HTTP, Corba, Telnet, etc) and,
- if required, via user specific interfaces (often called test
- ports). White-box testing of Erlang/OTP programs is easily
- accomplished by calling the target API functions directly
- from the test case functions. Common Test also integrates
- usage of the OTP cover tool for code coverage analysis of
- Erlang/OTP programs.</p>
-
- <p>Common Test executes test suite programs automatically,
- without operator interaction. Test progress and results is
- printed to logs on HTML format, easily browsed with a standard
- web browser. Common Test also sends notifications about progress
- and results via an OTP event manager to event handlers plugged
- in to the system. This way users can integrate their own
- programs for e.g. logging, database storing or supervision with
- Common Test.</p>
-
- <p>Common Test provides libraries that contain useful support
- functions to fill various testing needs and requirements.
- There is for example support for flexible test declarations
- by means of so called test specifications. There is also support
- for central configuration and control of multiple
- independent test sessions (towards different target systems)
- running in parallel.</p>
-
- <p>Common Test is implemented as a framework based on the OTP Test
- Server application.</p>
</description>
+
<xi:include href="common_test_app.xml"/>
<xi:include href="ct_run.xml"/>
- <!-- If you make modifications in the module list below,
- you also need to update CT_MODULES in Makefile. -->
<xi:include href="ct.xml"/>
<xi:include href="ct_master.xml"/>
<xi:include href="ct_cover.xml"/>
@@ -83,6 +50,3 @@
</application>
-
-
-
diff --git a/lib/common_test/doc/src/run_test_chapter.xml b/lib/common_test/doc/src/run_test_chapter.xml
index 082a587c8d..e04bb3e208 100644
--- a/lib/common_test/doc/src/run_test_chapter.xml
+++ b/lib/common_test/doc/src/run_test_chapter.xml
@@ -33,95 +33,96 @@
<section>
<title>Using the Common Test Framework</title>
- <p>The Common Test Framework provides a high level
- operator interface for testing. It adds the following features to
- the Erlang/OTP Test Server:</p>
-
- <list>
- <item>Automatic compilation of test suites (and help modules).</item>
- <item>Creation of additional HTML pages for better overview.</item>
- <item>Single command interface for running all available tests.</item>
+ <p>The <c>Common Test</c> framework provides a high-level
+ operator interface for testing, providing the following features:</p>
+
+ <list type="bulleted">
+ <item>Automatic compilation of test suites (and help modules)</item>
+ <item>Creation of extra HTML pages for improved overview.</item>
+ <item>Single-command interface for running all available tests</item>
<item>Handling of configuration files specifying data related to
- the System Under Test (and any other variable data).</item>
+ the System Under Test (SUT) (and any other variable data)</item>
<item>Mode for running multiple independent test sessions in parallel with
- central control and configuration.</item>
+ central control and configuration</item>
</list>
</section>
<section>
- <title>Automatic compilation of test suites and help modules</title>
- <p>When Common Test starts, it will automatically attempt to compile any
+ <title>Automatic Compilation of Test Suites and Help Modules</title>
+ <p>When <c>Common Test</c> starts, it automatically attempts to compile any
suites included in the specified tests. If particular
- suites have been specified, only those suites will be compiled. If a
- particular test object directory has been specified (meaning all suites
- in this directory should be part of the test), Common Test runs
- make:all/1 in the directory to compile the suites.</p>
+ suites are specified, only those suites are compiled. If a
+ particular test object directory is specified (meaning all suites
+ in this directory are to be part of the test), <c>Common Test</c> runs
+ function <c>make:all/1</c> in the directory to compile the suites.</p>
- <p>If compilation should fail for one or more suites, the compilation errors
- are printed to tty and the operator is asked if the test run should proceed
+ <p>If compilation fails for one or more suites, the compilation errors
+ are printed to tty and the operator is asked if the test run is to proceed
without the missing suites, or be aborted. If the operator chooses to proceed,
- it is noted in the HTML log which tests have missing suites. Also, for each failed
- compilation, the failed tests counter in the return value of
- <c><![CDATA[ct:run_test/1]]></c> is incremented. If Common Test is unable to prompt
- the user after compilation failure (if Common Test doesn't control stdin), the test
- run will proceed automatically without the missing suites. In order to always
- abort the test run (without operator interaction) if one or more suites fail
- to compile, the <c><![CDATA[ct_run]]></c> flag <c><![CDATA[-abort_if_missing_suites]]></c>,
- or the <c><![CDATA[ct:run_test/1]]></c> option
- <c><![CDATA[{abort_if_missing_suites,true}]]></c> should be set.</p>
-
- <p>Any help module (i.e. regular Erlang module with name not ending with
- "_SUITE") that resides in the same test object directory as a suite
- which is part of the test, will also be automatically compiled. A help
- module will not be mistaken for a test suite (unless it has a "_SUITE"
- name of course). All help modules in a particular test object directory
- are compiled no matter if all or only particular suites in the directory
+ the tests having missing suites are noted in the HTML log. If <c>Common Test</c> is
+ unable to prompt the user after compilation failure (if <c>Common Test</c> does not
+ control <c>stdin</c>), the test run proceeds automatically without the missing
+ suites. This behavior can however be modified with the
+ <c><![CDATA[ct_run]]></c> flag <c><![CDATA[-abort_if_missing_suites]]></c>,
+ or the <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso> option
+ <c><![CDATA[{abort_if_missing_suites,TrueOrFalse}]]></c>. If
+ <c><![CDATA[abort_if_missing_suites]]></c> is set to <c>true</c>, the test run
+ stops immediately if some suites fail to compile.</p>
+
+ <p>Any help module (that is, regular Erlang module with name not ending with
+ "_SUITE") that resides in the same test object directory as a suite,
+ which is part of the test, is also automatically compiled. A help
+ module is not mistaken for a test suite (unless it has a "_SUITE" name).
+ All help modules in a particular test object directory
+ are compiled, no matter if all or only particular suites in the directory
are part of the test.</p>
<p>If test suites or help modules include header files stored in other
- locations than the test directory, you may specify these include directories
- by means of the <c><![CDATA[-include]]></c> flag with <c><![CDATA[ct_run]]></c>,
- or the <c><![CDATA[include]]></c> option with <c><![CDATA[ct:run_test/1]]></c>.
- In addition to this, an include path may be specified with an OS
- environment variable; <c><![CDATA[CT_INCLUDE_PATH]]></c>. Example (bash):</p>
+ locations than the test directory, these include directories can be specified
+ by using flag <c><![CDATA[-include]]></c> with
+ <seealso marker="ct_run"><c>ct_run</c></seealso>,
+ or option <c><![CDATA[include]]></c> with <c><![CDATA[ct:run_test/1]]></c>.
+ Also, an include path can be specified with an OS
+ environment variable, <c><![CDATA[CT_INCLUDE_PATH]]></c>.</p>
+ <p><em>Example (bash):</em></p>
<p><c>$ export CT_INCLUDE_PATH=~testuser/common_suite_files/include:~testuser/common_lib_files/include</c></p>
- <p>Common Test will pass all include directories (specified either with the
- <c><![CDATA[include]]></c> flag/option, or the <c><![CDATA[CT_INCLUDE_PATH]]></c>
- variable, or both) to the compiler.</p>
+ <p><c>Common Test</c> passes all include directories (specified either with flag/option
+ <c><![CDATA[include]]></c>, or variable <c><![CDATA[CT_INCLUDE_PATH]]></c>
+ , or both, to the compiler.</p>
- <p>It is also possible to specify include directories in test specifications
- (see below).</p>
+ <p>Include directories can also be specified in test specifications,
+ see <seealso marker="#test_specifications">Test Specifications</seealso>.</p>
- <p>If the user wants to run all test suites for a test object (or OTP application)
- by specifying only the top directory (e.g. with the <c>dir</c> start flag/option),
- Common Test will primarily look for test suite modules in a subdirectory named
- <c>test</c>. If this subdirectory doesn't exist, the specified top directory
- is assumed to be the actual test directory, and test suites will be read from
+ <p>If the user wants to run all test suites for a test object (or an OTP application)
+ by specifying only the top directory (for example, with start flag/option <c>dir</c>),
+ <c>Common Test</c> primarily looks for test suite modules in a subdirectory named
+ <c>test</c>. If this subdirectory does not exist, the specified top directory
+ is assumed to be the test directory, and test suites are read from
there instead.</p>
- <p>It is possible to disable the automatic compilation feature by using the
- <c><![CDATA[-no_auto_compile]]></c> flag with <c><![CDATA[ct_run]]></c>, or
- the <c><![CDATA[{auto_compile,false}]]></c> option with
+ <p>To disable the automatic compilation feature, use flag
+ <c><![CDATA[-no_auto_compile]]></c> with <c><![CDATA[ct_run]]></c>, or
+ option <c><![CDATA[{auto_compile,false}]]></c> with
<c><![CDATA[ct:run_test/1]]></c>. With automatic compilation
disabled, the user is responsible for compiling the test suite modules
- (and any help modules) before the test run. If the modules can not be loaded
- from the local file system during startup of Common Test, the user needs to
- pre-load the modules before starting the test. Common Test will only verify
- that the specified test suites exist (i.e. that they are, or can be, loaded).
- This is useful e.g. if the test suites are transferred and loaded as binaries via
- RPC from a remote node.</p>
+ (and any help modules) before the test run. If the modules cannot be loaded
+ from the local file system during startup of <c>Common Test</c>, the user must
+ preload the modules before starting the test. <c>Common Test</c> only verifies
+ that the specified test suites exist (that is, that they are, or can be, loaded).
+ This is useful, for example, if the test suites are transferred and loaded as
+ binaries through RPC from a remote node.</p>
</section>
<section>
<marker id="ct_run"></marker>
- <title>Running tests from the OS command line</title>
+ <title>Running Tests from the OS Command Line</title>
- <p>The <c>ct_run</c> program can be used for running tests from
- the OS command line, e.g.
+ <p>The <seealso marker="ct_run"><c>ct_run</c></seealso> program can be used
+ for running tests from the OS command line, for example, as follows:
</p>
- <list>
+ <list type="bulleted">
<item><c><![CDATA[ct_run -config <configfilenames> -dir <dirs>]]></c></item>
<item><c><![CDATA[ct_run -config <configfilenames> -suite <suiteswithfullpath>]]></c>
</item>
@@ -130,824 +131,917 @@
<item><c><![CDATA[ct_run -config <configfilenames> -suite <suitewithfullpath>
-group <groups> -case <casenames>]]></c></item>
</list>
- <p>Examples:</p>
- <p><c>$ ct_run -config $CFGS/sys1.cfg $CFGS/sys2.cfg -dir $SYS1_TEST $SYS2_TEST</c></p>
- <p><c>$ ct_run -userconfig ct_config_xml $CFGS/sys1.xml $CFGS/sys2.xml -dir $SYS1_TEST $SYS2_TEST</c></p>
- <p><c>$ ct_run -suite $SYS1_TEST/setup_SUITE $SYS2_TEST/config_SUITE</c></p>
- <p><c>$ ct_run -suite $SYS1_TEST/setup_SUITE -case start stop</c></p>
- <p><c>$ ct_run -suite $SYS1_TEST/setup_SUITE -group installation -case start stop</c></p>
+ <p><em>Examples:</em></p>
+ <pre>
+ $ ct_run -config $CFGS/sys1.cfg $CFGS/sys2.cfg -dir $SYS1_TEST $SYS2_TEST
+ $ ct_run -userconfig ct_config_xml $CFGS/sys1.xml $CFGS/sys2.xml -dir $SYS1_TEST $SYS2_TEST
+ $ ct_run -suite $SYS1_TEST/setup_SUITE $SYS2_TEST/config_SUITE
+ $ ct_run -suite $SYS1_TEST/setup_SUITE -case start stop
+ $ ct_run -suite $SYS1_TEST/setup_SUITE -group installation -case start stop</pre>
- <p>It is also possible to combine the <c>dir</c>, <c>suite</c> and <c>group/case</c> flags. E.g, to run
- <c>x_SUITE</c> and <c>y_SUITE</c> in directory <c>testdir</c>:</p>
-
- <p><c>$ ct_run -dir ./testdir -suite x_SUITE y_SUITE</c></p>
-
- <p>This has the same effect as calling:</p>
-
- <p><c>$ ct_run -suite ./testdir/x_SUITE ./testdir/y_SUITE</c></p>
-
- <p>For more details on <seealso marker="run_test_chapter#group_execution">test case group execution</seealso>, please see below.</p>
-
- <p>Other flags that may be used with <c>ct_run</c>:</p>
- <list>
- <item><c><![CDATA[-help]]></c>, lists all available start flags.</item>
- <item><c><![CDATA[-logdir <dir>]]></c>, specifies where the HTML log files are to be written.</item>
- <item><c><![CDATA[-label <name_of_test_run>]]></c>, associates the test run with a name that gets printed
- in the overview HTML log files.</item>
- <item><c>-refresh_logs</c>, refreshes the top level HTML index files.</item>
- <item><c>-vts</c>, start web based GUI (see below).</item>
- <item><c>-shell</c>, start interactive shell mode (see below).</item>
- <item><c>-step [step_opts]</c>, step through test cases using the Erlang Debugger (see below).</item>
- <item><c><![CDATA[-spec <testspecs>]]></c>, use test specification as input (see below).</item>
- <item><c>-allow_user_terms</c>, allows user specific terms in a test specification (see below).</item>
- <item><c>-silent_connections [conn_types]</c>, tells Common Test to suppress printouts for
- specified connections (see below).</item>
- <item><c><![CDATA[-stylesheet <css_file>]]></c>, points out a user HTML style sheet (see below).</item>
- <item><c><![CDATA[-cover <cover_cfg_file>]]></c>, to perform code coverage test (see
- <seealso marker="cover_chapter#cover">Code Coverage Analysis</seealso>).</item>
- <item><c><![CDATA[-cover_stop <bool>]]></c>, to specify if the cover tool shall be stopped after the test is completed (see
- <seealso marker="cover_chapter#cover_stop">Code Coverage Analysis</seealso>).</item>
- <item><c><![CDATA[-event_handler <event_handlers>]]></c>, to install
- <seealso marker="event_handler_chapter#event_handling">event handlers</seealso>.</item>
- <item><c><![CDATA[-event_handler_init <event_handlers>]]></c>, to install
- <seealso marker="event_handler_chapter#event_handling">event handlers</seealso> including start arguments.</item>
- <item><c><![CDATA[-ct_hooks <ct_hooks>]]></c>, to install
- <seealso marker="ct_hooks_chapter#installing">Common Test Hooks</seealso> including start arguments.</item>
- <item><c><![CDATA[-enable_builtin_hooks <bool>]]></c>, to enable/disable
- <seealso marker="ct_hooks_chapter#builtin_cths">Built-in Common Test Hooks</seealso>. Default is <c>true</c>.</item>
- <item><c><![CDATA[-include]]></c>, specifies include directories (see above).</item>
- <item><c><![CDATA[-no_auto_compile]]></c>, disables the automatic test suite compilation feature (see above).</item>
- <item><c><![CDATA[-abort_if_missing_suites]]></c>, aborts the test run if one or more suites fail to compile (see above).</item>
- <item><c><![CDATA[-multiply_timetraps <n>]]></c>, extends <seealso marker="write_test_chapter#timetraps">timetrap
- timeout</seealso> values.</item>
- <item><c><![CDATA[-scale_timetraps <bool>]]></c>, enables automatic <seealso marker="write_test_chapter#timetraps">timetrap
- timeout</seealso> scaling.</item>
- <item><c><![CDATA[-repeat <n>]]></c>, tells Common Test to repeat the tests n times (see below).</item>
- <item><c><![CDATA[-duration <time>]]></c>, tells Common Test to repeat the tests for duration of time (see below).</item>
- <item><c><![CDATA[-until <stop_time>]]></c>, tells Common Test to repeat the tests until stop_time (see below).</item>
- <item><c>-force_stop [skip_rest]</c>, on timeout, the test run will be aborted when current test job is finished. If <c>skip_rest</c> is provided the rest of the test cases in the current test job will be skipped (see below).</item>
- <item><c><![CDATA[-decrypt_key <key>]]></c>, provides a decryption key for
- <seealso marker="config_file_chapter#encrypted_config_files">encrypted configuration files</seealso>.</item>
- <item><c><![CDATA[-decrypt_file <key_file>]]></c>, points out a file containing a decryption key for
- <seealso marker="config_file_chapter#encrypted_config_files">encrypted configuration files</seealso>.</item>
- <item><c><![CDATA[-basic_html]]></c>, switches off html enhancements that might not be compatible with older browsers.</item>
- <item><c><![CDATA[-logopts <opts>]]></c>, makes it possible to modify aspects of the logging behaviour, see
- <seealso marker="run_test_chapter#logopts">Log options</seealso> below.</item>
- <item><c><![CDATA[-verbosity <levels>]]></c>, sets <seealso marker="write_test_chapter#logging">verbosity levels
- for printouts</seealso>.</item>
- </list>
+ <p>The flags <c>dir</c>, <c>suite</c>, and <c>group/case</c> can be combined.
+ For example, to run <c>x_SUITE</c> and <c>y_SUITE</c>
+ in directory <c>testdir</c>, as follows:</p>
+ <pre>
+ $ ct_run -dir ./testdir -suite x_SUITE y_SUITE</pre>
+
+ <p>This has the same effect as the following:</p>
+ <pre>
+ $ ct_run -suite ./testdir/x_SUITE ./testdir/y_SUITE</pre>
+
+ <p>For details, see
+ <seealso marker="run_test_chapter#group_execution">Test Case Group Execution</seealso>.</p>
+
+ <p>The following flags can also be used with
+ <seealso marker="ct_run"><c>ct_run</c></seealso>:</p>
+ <taglist>
+ <tag><c><![CDATA[-help]]></c></tag>
+ <item><p>Lists all available start flags.</p></item>
+
+ <tag><c><![CDATA[-logdir <dir>]]></c></tag>
+ <item><p>Specifies where the HTML log files are to be written.</p></item>
+
+ <tag><c><![CDATA[-label <name_of_test_run>]]></c></tag>
+ <item><p>Associates the test run with a name that gets printed
+ in the overview HTML log files.</p></item>
+
+ <tag><c>-refresh_logs</c></tag>
+ <item><p>Refreshes the top-level HTML index files.</p></item>
+
+ <tag><c>-vts</c></tag>
+ <item><p>Starts web-based GUI (described later).</p></item>
+
+ <tag><c>-shell</c></tag>
+ <item><p>Starts interactive shell mode (described later).</p></item>
+
+ <tag><c>-step [step_opts]</c></tag>
+ <item><p>Steps through test cases using the Erlang Debugger (described later).</p></item>
+
+ <tag><c><![CDATA[-spec <testspecs>]]></c></tag>
+ <item><p>Uses test specification as input (described later).</p></item>
+
+ <tag><c>-allow_user_terms</c></tag>
+ <item><p>Allows user-specific terms in a test specification (described later).</p></item>
+
+ <tag><c>-silent_connections [conn_types]</c></tag>
+ <item><p>, tells <c>Common Test</c> to suppress printouts for
+ specified connections (described later).</p></item>
+
+ <tag><c><![CDATA[-stylesheet <css_file>]]></c></tag>
+ <item><p>Points out a user HTML style sheet (described later).</p></item>
+
+ <tag><c><![CDATA[-cover <cover_cfg_file>]]></c></tag>
+ <item><p>To perform code coverage test (see
+ <seealso marker="cover_chapter#cover">Code Coverage Analysis</seealso>).</p></item>
+
+ <tag><c><![CDATA[-cover_stop <bool>]]></c></tag>
+ <item><p>To specify if the <c>cover</c> tool is to be stopped
+ after the test is completed (see
+ <seealso marker="cover_chapter#cover_stop">Code Coverage Analysis</seealso>).</p></item>
+
+ <tag><c><![CDATA[-event_handler <event_handlers>]]></c></tag>
+ <item><p>To install
+ <seealso marker="event_handler_chapter#event_handling">event handlers</seealso>.</p></item>
+
+ <tag><c><![CDATA[-event_handler_init <event_handlers>]]></c></tag>
+ <item><p>To install
+ <seealso marker="event_handler_chapter#event_handling">event handlers</seealso>
+ including start arguments.</p></item>
+
+ <tag><c><![CDATA[-ct_hooks <ct_hooks>]]></c></tag>
+ <item><p>To install
+ <seealso marker="ct_hooks_chapter#installing">Common Test Hooks</seealso>
+ including start arguments.</p></item>
- <note><p>Directories passed to Common Test may have either relative or absolute paths.</p></note>
+ <tag><c><![CDATA[-enable_builtin_hooks <bool>]]></c></tag>
+ <item><p>To enable or disable
+ <seealso marker="ct_hooks_chapter#builtin_cths">Built-in Common Test Hooks</seealso>.
+ Default is <c>true</c>.</p></item>
- <note><p>Arbitrary start flags to the Erlang Runtime System may also be passed as
+ <tag><c><![CDATA[-include]]></c></tag>
+ <item><p>Specifies include directories (described earlier).</p></item>
+
+ <tag><c><![CDATA[-no_auto_compile]]></c></tag>
+ <item><p>Disables the automatic test suite compilation feature (described earlier).</p></item>
+
+ <tag><c><![CDATA[-abort_if_missing_suites]]></c></tag>
+ <item><p>Aborts the test run if one or more suites fail to compile (described earlier).</p></item>
+
+ <tag><c><![CDATA[-multiply_timetraps <n>]]></c></tag>
+ <item><p>Extends <seealso marker="write_test_chapter#timetraps">timetrap
+ time-out</seealso> values.</p></item>
+
+ <tag><c><![CDATA[-scale_timetraps <bool>]]></c></tag>
+ <item><p>Enables automatic <seealso marker="write_test_chapter#timetraps">timetrap
+ time-out</seealso> scaling.</p></item>
+
+ <tag><c><![CDATA[-repeat <n>]]></c></tag>
+ <item><p>Tells <c>Common Test</c> to repeat the tests <c>n</c> times (described later).</p></item>
+
+ <tag><c><![CDATA[-duration <time>]]></c></tag>
+ <item><p>Tells <c>Common Test</c> to repeat the tests for duration of time (described later).</p></item>
+
+ <tag><c><![CDATA[-until <stop_time>]]></c></tag>
+ <item><p>Tells <c>Common Test</c> to repeat the tests until <c>stop_time</c> (described later).</p></item>
+
+ <tag><c>-force_stop [skip_rest]</c></tag>
+ <item><p>On time-out, the test run is aborted when the current test job is finished. If <c>skip_rest</c>
+ is provided, the remaining test cases in the current test job are skipped (described later).</p></item>
+
+ <tag><c><![CDATA[-decrypt_key <key>]]></c></tag>
+ <item><p>Provides a decryption key for
+ <seealso marker="config_file_chapter#encrypted_config_files">encrypted configuration files</seealso>.</p></item>
+
+ <tag><c><![CDATA[-decrypt_file <key_file>]]></c></tag>
+ <item><p>Points out a file containing a decryption key for
+ <seealso marker="config_file_chapter#encrypted_config_files">encrypted configuration files</seealso>.</p></item>
+
+ <tag><c><![CDATA[-basic_html]]></c></tag>
+ <item><p>Switches off HTML enhancements that can be incompatible with older browsers.</p></item>
+
+ <tag><c><![CDATA[-logopts <opts>]]></c></tag>
+ <item><p>Enables modification of the logging behavior, see
+ <seealso marker="run_test_chapter#logopts">Log options</seealso>.</p></item>
+
+ <tag><c><![CDATA[-verbosity <levels>]]></c></tag>
+ <item><p>Sets <seealso marker="write_test_chapter#logging">verbosity levels
+ for printouts</seealso>.</p></item>
+ </taglist>
+
+ <note><p>Directories passed to <c>Common Test</c> can have either relative or absolute paths.</p></note>
+
+ <note><p>Any start flags to the Erlang runtime system (application <c>ERTS</c>) can also be passed as
parameters to <c>ct_run</c>. It is, for example, useful to be able to
- pass directories that should be added to the Erlang code server search path
- with the <c>-pa</c> or <c>-pz</c> flag. If you have common help- or library
+ pass directories to be added to the Erlang code server search path
+ with flag <c>-pa</c> or <c>-pz</c>. If you have common help- or library
modules for test suites (separately compiled), stored in other directories
- than the test suite directories, these help/lib directories are preferrably
- added to the code path this way. Example:</p>
+ than the test suite directories, these <c>help/lib</c> directories are preferably
+ added to the code path this way.</p>
+ <p><em>Example:</em></p>
<p><c>$ ct_run -dir ./chat_server -logdir ./chat_server/testlogs -pa $PWD/chat_server/ebin</c></p>
- <p>Note how in this example, the absolute path of the <c>chat_server/ebin</c>
- directory is passed to the code server. This is essential since relative
- paths are stored by the code server as relative, and Common Test changes
- the current working directory of the Erlang Runtime System during the test run!</p>
+ <p>The absolute path of directory <c>chat_server/ebin</c>
+ is here passed to the code server. This is essential because relative
+ paths are stored by the code server as relative, and <c>Common Test</c> changes
+ the current working directory of <c>ERTS</c> during the test run.</p>
</note>
<p>The <c>ct_run</c> program sets the exit status before shutting down. The following values
are defined:</p>
- <list>
- <item><c>0</c> indicates a successful testrun, i.e. one without failed or auto skipped test cases.</item>
- <item><c>1</c> indicates that one or more test cases have failed, or have been auto skipped.</item>
- <item><c>2</c> indicates that the test execution has failed because of e.g. compilation errors, an
- illegal return value from an info function, etc.</item>
+ <list type="bulleted">
+ <item><c>0</c> indicates a successful testrun, that is, without failed or auto-skipped test cases.</item>
+ <item><c>1</c> indicates that one or more test cases have failed, or have been auto-skipped.</item>
+ <item><c>2</c> indicates that the test execution has failed because of, for example, compilation errors, or an
+ illegal return value from an information function.</item>
</list>
- <p>If auto skipped test cases should not affect the exit status, you may change the default
- behaviour using start flag:</p>
- <pre>-exit_status ignore_config</pre>
+ <p>If auto-skipped test cases do not affect the exit status. The default
+ behavior can be changed using start flag:</p>
+ <pre>
+ -exit_status ignore_config</pre>
- <note><p>Executing <c>ct_run</c> without start flags, is equal to the command:
+ <note><p>Executing <c>ct_run</c> without start flags is equal to the command:
<c>ct_run -dir ./</c></p></note>
- <p>For more information about the <c>ct_run</c> program, see the
- <seealso marker="ct_run">Reference Manual</seealso> and the
- <seealso marker="install_chapter#general">Installation</seealso> chapter.
+ <p>For more information about the <c>ct_run</c> program, see module
+ <seealso marker="ct_run"><c>ct_run</c></seealso> and section
+ <seealso marker="install_chapter#general">Installation</seealso>.
</p>
</section>
<section>
- <title>Running tests from the Erlang shell or from an Erlang program</title>
+ <marker id="erlang_shell_or_program"></marker>
+ <title>Running Tests from the Erlang Shell or from an Erlang Program</title>
- <p>Common Test provides an Erlang API for running tests. The main (and most
- flexible) function for specifying and executing tests is called
+ <p><c>Common Test</c> provides an Erlang API for running tests. The main
+ (and most flexible) function for specifying and executing tests is
<seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>.
- This function takes the same start parameters as
- the <seealso marker="run_test_chapter#ct_run"><c>ct_run</c></seealso>
- program described above, only the flags are instead
- given as options in a list of key-value tuples. E.g. a test specified
- with <c>ct_run</c> like:</p>
+ It takes the same start parameters as
+ <seealso marker="run_test_chapter#ct_run"><c>ct_run</c></seealso>,
+ but the flags are instead specified as options in a list of key-value tuples.
+ For example, a test specified with <c>ct_run</c> as follows:</p>
<p><c>$ ct_run -suite ./my_SUITE -logdir ./results</c></p>
<p>is with <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso> specified as:</p>
<p><c>1> ct:run_test([{suite,"./my_SUITE"},{logdir,"./results"}]).</c></p>
- <p>The function returns the test result, represented by the tuple:
+ <p>The function returns the test result, represented by the tuple
<c>{Ok,Failed,{UserSkipped,AutoSkipped}}</c>, where each element is an
- integer. If test execution fails, the function returns the tuple:
+ integer. If test execution fails, the function returns the tuple
<c>{error,Reason}</c>, where the term <c>Reason</c> explains the
failure.</p>
- <p>The default start option <c>{dir,Cwd}</c> (run all suites in the current
+ <p>The default start option <c>{dir,Cwd}</c> (to run all suites in the current
working directory) is used if the function is called with an empty
list of options.</p>
<section>
- <title>Releasing the Erlang shell</title>
- <p>During execution of tests, started with
+ <title>Releasing the Erlang Shell</title>
+ <p>During execution of tests started with
<seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>,
- the Erlang shell process, controlling stdin, will remain the top
- level process of the Common Test system of processes. The result
- is that the Erlang shell is not available for interaction during
- the test run. If this is not desirable, maybe because the shell is needed
- for debugging purposes or for interaction with the SUT during test
- execution, you may set the <c>release_shell</c> start option to
+ the Erlang shell process, controlling <c>stdin</c>, remains the top-level
+ process of the <c>Common Test</c> system of processes. Consequently,
+ the Erlang shell is not available for interaction during
+ the test run. If this is not desirable, for example, because the shell
+ is needed for debugging purposes or for interaction with the SUT during test
+ execution, set start option <c>release_shell</c> to
<c>true</c> (in the call to <c>ct:run_test/1</c> or by
- using the corresponding test specification term, see below). This will
- make Common Test release the shell immediately after the test suite
+ using the corresponding test specification term, described later). This
+ makes <c>Common Test</c> release the shell immediately after the test suite
compilation stage. To accomplish this, a test runner process
- is spawned to take control of the test execution, and the effect is that
+ is spawned to take control of the test execution. The effect is that
<c>ct:run_test/1</c> returns the pid of this process rather than the
- test result - which instead is printed to tty at the end of the test run.</p>
- <note><p>Note that in order to use the
- <seealso marker="ct#break-1"><c>ct:break/1/2</c></seealso> and
- <seealso marker="ct#continue-0"><c>ct:continue/0/1</c></seealso> functions,
+ test result, which instead is printed to tty at the end of the test run.</p>
+ <note><p>To use the functions
+ <seealso marker="ct#break-1"><c>ct:break/1,2</c></seealso> and
+ <seealso marker="ct#continue-0"><c>ct:continue/0,1</c></seealso>,
<c>release_shell</c> <em>must</em> be set to <c>true</c>.</p></note>
</section>
- <p>For detailed documentation about
- <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>,
- please see the
- <seealso marker="ct#run_test-1"><c>ct</c></seealso> manual page.</p>
+ <p>For details, see
+ <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso> manual page.</p>
</section>
<section>
<marker id="group_execution"></marker>
- <title>Test case group execution</title>
+ <title>Test Case Group Execution</title>
<p>With the <c>ct_run</c> flag, or <c>ct:run_test/1</c> option <c>group</c>,
one or more test case groups can be specified, optionally in combination
- with specific test cases. The syntax for specifying groups is as follows
- (on the command line):</p>
+ with specific test cases. The syntax for specifying groups on the command line
+ is as follows:</p>
<pre>
- <![CDATA[$ ct_run -group <group_names_or_paths> [-case <cases>]]]></pre>
- <p>or (in the Erlang shell):</p>
+ <![CDATA[$ ct_run -group <group_names_or_paths> [-case <cases>]]]></pre>
+ <p>The syntax in the Erlang shell is as follows:</p>
<pre>
- <![CDATA[1> ct:run_test([{group,GroupsNamesOrPaths}, {case,Cases}]).]]></pre>
+ <![CDATA[1> ct:run_test([{group,GroupsNamesOrPaths}, {case,Cases}]).]]></pre>
- <p>The <c>group_names_or_paths</c> parameter specifies either one
- or more group names and/or one or more group paths. At start up,
- Common Test will search for matching groups in the group definitions
- tree (i.e. the list returned from <c>Suite:groups/0</c>, please see the
- <seealso marker="write_test_chapter#test_case_groups">Test case groups</seealso>
- chapter for details).
- Given a group name, say <c>g</c>, Common Test will search for all paths
- that lead to <c>g</c>. By path here we mean a sequence of nested groups,
- all of which have to be followed in order to get from the top level
- group to <c>g</c>. Actually, what Common Test needs to do in order to
- execute the test cases in group <c>g</c>, is to call the
- <c>init_per_group/2</c> function for each group in the path to
- <c>g</c>, as well as all corresponding <c>end_per_group/2</c>
- functions afterwards. The obvious reason for this is that the configuration
+ <p>Parameter <c>group_names_or_paths</c> specifies one
+ or more group names and/or one or more group paths. At startup,
+ <c>Common Test</c> searches for matching groups in the group definitions
+ tree (that is, the list returned from <c>Suite:groups/0</c>; for details, see section
+ <seealso marker="write_test_chapter#test_case_groups">Test Case Groups</seealso>.
+ </p>
+
+ <p>Given a group name, say <c>g</c>, <c>Common Test</c> searches for all paths
+ leading to <c>g</c>. By path is meant a sequence of nested groups,
+ which must be followed to get from the top-level
+ group to <c>g</c>. To execute the test cases in group <c>g</c>,
+ <c>Common Test</c> must call the <c>init_per_group/2</c> function for
+ each group in the path to <c>g</c>, and all corresponding <c>end_per_group/2</c>
+ functions afterwards. This is because the configuration
of a test case in <c>g</c> (and its <c>Config</c> input data) depends on
<c>init_per_testcase(TestCase, Config)</c> and its return value, which
in turn depends on <c>init_per_group(g, Config)</c> and its return value,
which in turn depends on <c>init_per_group/2</c> of the group above
- <c>g</c>, etc, all the way up to the top level group.</p>
+ <c>g</c>, and so on, all the way up to the top-level group.</p>
- <p>As you may have already realized, this means that if there is more than
- one way to locate a group (and its test cases) in a path, the result of the
- group search operation is a number of tests, all of which will be performed.
- Common Test actually interprets a group specification that consists of a
- single name this way:</p>
+ <p>This means that if there is more than one way to locate a group
+ (and its test cases) in a path, the result of the group search operation
+ is a number of tests, all of which are to be performed.
+ <c>Common Test</c> interprets a group specification that consists of a
+ single name as follows:</p>
<p>"Search and find all paths in the group definitions tree that lead
- to the specified group and, for each path, create a test which (1) executes
- all configuration functions in the path to the specified group, then (2)
- executes all - or all matching - test cases in this group, as well as (3)
- all - or all matching - test cases in all sub groups of the group".
- </p>
+ to the specified group and, for each path, create a test that does the following,
+ in order:</p>
+ <list type="ordered">
+ <item>Executes all configuration functions in the path to the specified group.</item>
+ <item>Executes all, or all matching, test cases in this group.</item>
+ <item>Executes all, or all matching, test cases in all subgroups of the group."</item>
+ </list>
- <p>It is also possible for the user to specify a specific group path with
- the <c>group_names_or_paths</c> parameter. With this type of specification it's
- possible to avoid execution of unwanted groups (in otherwise matching paths),
- and/or the execution of sub groups. The syntax of the group path is a list of
- group names in the path, e.g. on the command line:
+ <p>The user can specify a specific group path with
+ parameter <c>group_names_or_paths</c>. With this type of specification
+ execution of unwanted groups (in otherwise matching paths),
+ and/or the execution of subgroups can be avoided. The command line syntax of the
+ group path is a list of group names in the path, for example:
</p>
<p><c>$ ct_run -suite "./x_SUITE" -group [g1,g3,g4] -case tc1 tc5</c></p>
- <p>or similarly in the Erlang shell (requires a list within the groups list):</p>
+ <p>The syntax in the Erlang shell is as follows (requires a list within the groups list):</p>
<p><c>1> ct:run_test([{suite,"./x_SUITE"}, {group,[[g1,g3,g4]]}, {testcase,[tc1,tc5]}]).</c></p>
- <p>The last group in the specified path will be the terminating group in
- the test, i.e. no sub groups following this group will be executed. In the
- example above, <c>g4</c> is the terminating group, hence Common Test will
- execute a test that calls all init configuration functions in the path to
- <c>g4</c>, i.e. <c>g1..g3..g4</c>. It will then call test cases <c>tc1</c>
- and <c>tc5</c> in <c>g4</c> and finally all end configuration functions in order
- <c>g4..g3..g1</c>.</p>
+ <p>The last group in the specified path is the terminating group in
+ the test, that is, no subgroups following this group are executed. In the
+ previous example, <c>g4</c> is the terminating group. Hence, <c>Common Test</c>
+ executes a test that calls all <c>init</c> configuration functions in the path to
+ <c>g4</c>, that is, <c>g1..g3..g4</c>. It then calls test cases <c>tc1</c>
+ and <c>tc5</c> in <c>g4</c>, and finally all <c>end</c> configuration functions
+ in order <c>g4..g3..g1</c>.</p>
- <p>Note that the group path specification doesn't necessarily
+ <note><p>The group path specification does not necessarily
have to include <em>all</em> groups in the path to the terminating group.
- Common Test will search for all matching paths if given an incomplete group
- path.</p>
+ <c>Common Test</c> searches for all matching paths if an incomplete
+ group path is specified.</p></note>
- <p>Note also that it's possible to combine group names and group paths with the
- <c>group_names_or_paths</c> parameter. Each element is treated as
- an individual specification in combination with the <c>cases</c> parameter.
- See examples below.</p>
+ <note><p>Group names and group paths can be combined with parameter
+ <c>group_names_or_paths</c>. Each element is treated as an individual specification
+ in combination with parameter <c>cases</c>.
+ The following examples illustrates this.</p></note>
+
+ <p><em>Examples:</em></p>
+ <pre>
+ -module(x_SUITE).
+ ...
+ %% The group definitions:
+ groups() ->
+ [{top1,[],[tc11,tc12,
+ {sub11,[],[tc12,tc13]},
+ {sub12,[],[tc14,tc15,
+ {sub121,[],[tc12,tc16]}]}]},
+
+ {top2,[],[{group,sub21},{group,sub22}]},
+ {sub21,[],[tc21,{group,sub2X2}]},
+ {sub22,[],[{group,sub221},tc21,tc22,{group,sub2X2}]},
+ {sub221,[],[tc21,tc23]},
+ {sub2X2,[],[tc21,tc24]}].</pre>
+
+ <p>The following executes two tests, one for all cases and all subgroups
+ under <c>top1</c>, and one for all under <c>top2</c>:</p>
+ <pre>
+ $ ct_run -suite "x_SUITE" -group all
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,all}]).</pre>
+ <p>Using <c>-group top1 top2</c>, or <c>{group,[top1,top2]}</c> gives the same result.</p>
- <p>Examples:</p>
+ <p>The following executes one test for all cases and subgroups under <c>top1</c>:</p>
<pre>
- -module(x_SUITE).
- ...
- %% The group definitions:
- groups() ->
- [{top1,[],[tc11,tc12,
- {sub11,[],[tc12,tc13]},
- {sub12,[],[tc14,tc15,
- {sub121,[],[tc12,tc16]}]}]},
-
- {top2,[],[{group,sub21},{group,sub22}]},
- {sub21,[],[tc21,{group,sub2X2}]},
- {sub22,[],[{group,sub221},tc21,tc22,{group,sub2X2}]},
- {sub221,[],[tc21,tc23]},
- {sub2X2,[],[tc21,tc24]}].
- </pre>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group all</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,all}]).</c></p>
- <p>Two tests will be executed, one for all cases and all sub groups under <c>top1</c>,
- and one for all under <c>top2</c>. (We would get the same result with
- <c>-group top1 top2</c>, or <c>{group,[top1,top2]}</c>.</p>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group top1</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,[top1]}]).</c></p>
- <p>This will execute one test for all cases and sub groups under <c>top1</c>.</p>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group top1 -case tc12</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,[top1]}, {testcase,[tc12]}]).</c></p>
- <p>This will run a test that executes <c>tc12</c> in <c>top1</c> and any sub group
- under <c>top1</c> where it can be found (<c>sub11</c> and <c>sub121</c>).</p>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group [top1] -case tc12</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,[[top1]]}, {testcase,[tc12]}]).</c></p>
- <p>This will execute <c>tc12</c> <em>only</em> in group <c>top1</c>.</p>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group top1 -case tc16</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,[top1]}, {testcase,[tc16]}]).</c></p>
- <p>This will search <c>top1</c> and all its sub groups for <c>tc16</c> and the result
- will be that this test case executes in group <c>sub121</c>. (The specific path:
- <c>-group [sub121]</c> or <c>{group,[[sub121]]}</c>, would have given
- us the same result in this example).</p>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group sub12 [sub12]</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,[sub12,[sub12]]}]).</c></p>
- <p>This will execute two tests, one that includes all cases and sub groups under
- <c>sub12</c>, and one with <em>only</em> the test cases in <c>sub12</c>.</p>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group sub2X2</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,[sub2X2]}]).</c></p>
- <p>In this example, Common Test will find and execute two tests, one for the path from
- <c>top2</c> to <c>sub2X2</c> via <c>sub21</c>, and one from <c>top2</c> to <c>sub2X2</c>
- via <c>sub22</c>.</p>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group [sub21,sub2X2]</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,[[sub21,sub2X2]]}]).</c></p>
- <p>Here, by specifying the unique path: <c>top2 -> sub21 -> sub2X2</c>, only one test
- is executed. The second possible path from <c>top2</c> to <c>sub2X2</c> (above)
- will be discarded.</p>
- <br></br>
- <p><c>$ ct_run -suite "x_SUITE" -group [sub22] -case tc22 tc21</c></p>
- <p><c>1> ct:run_test([{suite,"x_SUITE"}, {group,[[sub22]]}, {testcase,[tc22,tc21]}]).</c></p>
- <p>In this example only the test cases for <c>sub22</c> will be executed, and in
- reverse order compared to the group definition.</p>
- <br></br>
-
- <p>If a test case that belongs to a group (according to the group definition), is executed
- without a group specification, i.e. simply by means of (command line):</p>
+ $ ct_run -suite "x_SUITE" -group top1
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,[top1]}]).</pre>
+
+ <p>The following runs a test executing <c>tc12</c> in <c>top1</c> and any subgroup
+ under <c>top1</c> where it can be found (<c>sub11</c> and <c>sub121</c>):</p>
+ <pre>
+ $ ct_run -suite "x_SUITE" -group top1 -case tc12
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,[top1]}, {testcase,[tc12]}]).</pre>
+
+ <p>The following executes <c>tc12</c> <em>only</em> in group <c>top1</c>:</p>
+ <pre>
+ $ ct_run -suite "x_SUITE" -group [top1] -case tc12
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,[[top1]]}, {testcase,[tc12]}]).</pre>
+
+ <p>The following searches <c>top1</c> and all its subgroups for <c>tc16</c> resulting
+ in that this test case executes in group <c>sub121</c>:</p>
+ <pre>
+ $ ct_run -suite "x_SUITE" -group top1 -case tc16
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,[top1]}, {testcase,[tc16]}]).</pre>
+ <p>Using the specific path <c>-group [sub121]</c> or <c>{group,[[sub121]]}</c> gives
+ the same result in this example.</p>
+
+ <p>The following executes two tests, one including all cases and subgroups under
+ <c>sub12</c>, and one with <em>only</em> the test cases in <c>sub12</c>:</p>
+ <pre>
+ $ ct_run -suite "x_SUITE" -group sub12 [sub12]
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,[sub12,[sub12]]}]).</pre>
+
+ <p>In the following example, <c>Common Test</c> finds and executes two tests,
+ one for the path from <c>top2</c> to <c>sub2X2</c> through <c>sub21</c>,
+ and one from <c>top2</c> to <c>sub2X2</c> through <c>sub22</c>:</p>
+ <pre>
+ $ ct_run -suite "x_SUITE" -group sub2X2
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,[sub2X2]}]).</pre>
+
+ <p>In the following example, by specifying the unique path <c>top2 -> sub21 -> sub2X2</c>,
+ only one test is executed. The second possible path, from <c>top2</c> to <c>sub2X2</c>
+ (from the former example) is discarded:</p>
+ <pre>
+ $ ct_run -suite "x_SUITE" -group [sub21,sub2X2]
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,[[sub21,sub2X2]]}]).</pre>
+
+ <p>The following executes only the test cases for <c>sub22</c> and in reverse order
+ compared to the group definition:</p>
+ <pre>
+ $ ct_run -suite "x_SUITE" -group [sub22] -case tc22 tc21
+ 1> ct:run_test([{suite,"x_SUITE"}, {group,[[sub22]]}, {testcase,[tc22,tc21]}]).</pre>
+
+ <p>If a test case belonging to a group (according to the group definition) is executed
+ without a group specification, that is, simply by
+ (using the command line):</p>
<p><c>$ ct_run -suite "my_SUITE" -case my_tc</c></p>
- <p>or (Erlang shell):</p>
+ <p>or (using the Erlang shell):</p>
<p><c>1> ct:run_test([{suite,"my_SUITE"}, {testcase,my_tc}]).</c></p>
- <p>then Common Test ignores the group definition and executes the test case in the scope of the
- test suite only (no group configuration functions are called).</p>
+ <p>then <c>Common Test</c> ignores the group definition and executes the test case
+ in the scope of the test suite only (no group configuration functions are called).</p>
- <p>The group specification feature, exactly as it has been presented in this section, can also
+ <p>The group specification feature, as presented in this section, can also
be used in <seealso marker="run_test_chapter#test_specifications">Test
- Specifications</seealso> (with some extra features added). Please see below.</p>
+ Specifications</seealso> (with some extra features added).</p>
</section>
<section>
- <title>Running the interactive shell mode</title>
+ <title>Running the Interactive Shell Mode</title>
- <p>You can start Common Test in an interactive shell mode where no
- automatic testing is performed. Instead, in this mode, Common Test
+ <p>You can start <c>Common Test</c> in an interactive shell mode where no
+ automatic testing is performed. Instead, <c>Common Test</c>
starts its utility processes, installs configuration data (if any),
and waits for the user to call functions (typically test case support
functions) from the Erlang shell.</p>
- <p>The shell mode is useful e.g. for debugging test suites, for analysing
+ <p>The shell mode is useful, for example, for debugging test suites, analyzing
and debugging the SUT during "simulated" test case execution, and
- for trying out various operations during test suite development.</p>
-
- <p>To invoke the interactive shell mode, you can start an Erlang shell
- manually and call <seealso marker="ct#install-1"><c>ct:install/1</c></seealso> to install any configuration
- data you might need (use <c>[]</c> as argument otherwise), then
- call <seealso marker="ct#start_interactive-0"><c>ct:start_interactive/0</c></seealso> to start Common Test. If you use
- the <c>ct_run</c> program, you may start the Erlang shell and Common Test
- in the same go by using the <c>-shell</c> and, optionally, the <c>-config</c>
- and/or <c>-userconfig</c> flag. Examples:
- </p>
- <list>
+ trying out various operations during test suite development.</p>
+
+ <p>To start the interactive shell mode, start an Erlang shell
+ manually and call <seealso marker="ct#install-1"><c>ct:install/1</c></seealso>
+ to install any configuration data you might need (use <c>[]</c> as argument otherwise).
+ Then call <seealso marker="ct#start_interactive-0"><c>ct:start_interactive/0</c></seealso>
+ to start <c>Common Test</c>.</p>
+
+ <p>If you use the <c>ct_run</c> program, you can start
+ the Erlang shell and <c>Common Test</c> in one go by using the flag <c>-shell</c> and,
+ optionally, flag <c>-config</c> and/or <c>-userconfig</c>.</p>
+ <p><em>Examples:</em></p>
+ <list type="bulleted">
<item><c>ct_run -shell</c></item>
<item><c><![CDATA[ct_run -shell -config cfg/db.cfg]]></c></item>
<item><c><![CDATA[ct_run -shell -userconfig db_login testuser x523qZ]]></c></item>
</list>
- <p>If no config file is given with the <c>ct_run</c> command,
- a warning will be displayed. If Common Test has been run from the same
- directory earlier, the same config file(s) will be used
- again. If Common Test has not been run from this directory before, no
- config files will be available.</p>
+ <p>If no configuration file is specified with command <c>ct_run</c>,
+ a warning is displayed. If <c>Common Test</c> has been run from the same
+ directory earlier, the same configuration file(s) are used again. If <c>Common Test</c>
+ has not been run from this directory before, no configuration files are available.</p>
- <p>If any functions using "required config data" (e.g. ct_telnet or
- ct_ftp functions) are to be called from the erlang shell, config
- data must first be required with <seealso marker="ct#require-1"><c>
- ct:require/1/2</c></seealso>. This is
- equivalent to a <c>require</c> statement in the <seealso
- marker="write_test_chapter#suite">Test Suite Info
- Function</seealso> or in the <seealso
- marker="write_test_chapter#info_function">Test Case Info
- Function</seealso>.</p>
-
- <p>Example:</p>
+ <p>If any functions using "required configuration data" (for example, functions
+ <c>ct_telnet</c> or <c>ct_ftp</c>) are to be called from the Erlang shell, first require
+ configuration data with <seealso marker="ct#require-1"><c>
+ ct:require/1,2</c></seealso>. This is equivalent to a <c>require</c> statement
+ in the <seealso marker="write_test_chapter#suite">Test Suite Information Function</seealso>
+ or in the <seealso marker="write_test_chapter#info_function">Test Case Information Function</seealso>.</p>
+
+ <p><em>Example:</em></p>
<pre>
- 1> ct:require(unix_telnet, unix).
- ok
- 2> ct_telnet:open(unix_telnet).
- {ok,&lt;0.105.0&gt;}
- 4> ct_telnet:cmd(unix_telnet, "ls .").
- {ok,["ls .","file1 ...",...]}
- </pre>
+ 1> ct:require(unix_telnet, unix).
+ ok
+ 2> ct_telnet:open(unix_telnet).
+ {ok,&lt;0.105.0&gt;}
+ 4> ct_telnet:cmd(unix_telnet, "ls .").
+ {ok,["ls .","file1 ...",...]}</pre>
- <p>Everything that Common Test normally prints in the test case logs,
- will in the interactive mode be written to a log named
- <c>ctlog.html</c> in the <c><![CDATA[ct_run.<timestamp>]]></c>
- directory. A link to this file will be available in the file
- named <c>last_interactive.html</c> in the directory from which
- you executed <c>ct_run</c>. Currently, specifying a different
- root directory for the logs than the current working directory,
+ <p>Everything that <c>Common Test</c> normally prints in the test case logs,
+ are in the interactive mode written to a log named <c>ctlog.html</c>
+ in directory <c><![CDATA[ct_run.<timestamp>]]></c>. A link to this
+ file is available in the file named <c>last_interactive.html</c> in the
+ directory from which you execute <c>ct_run</c>. Specifying a different
+ root directory for the logs than the current working directory
is not supported.</p>
- <p>If you wish to exit the interactive mode (e.g. to start an
- automated test run with <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>), call the function
- <seealso marker="ct#stop_interactive-0"><c>ct:stop_interactive/0</c></seealso>. This shuts down the
- running <c>ct</c> application. Associations between
+ <p>If you wish to exit the interactive mode (for example, to start an automated
+ test run with <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>),
+ call function
+ <seealso marker="ct#stop_interactive-0"><c>ct:stop_interactive/0</c></seealso>.
+ This shuts down the running <c>ct</c> application. Associations between
configuration names and data created with <c>require</c> are
- consequently deleted. <seealso marker="ct#start_interactive-0"><c>ct:start_interactive/0</c></seealso> will get you
- back into interactive mode, but the previous state is not restored.</p>
+ consequently deleted. Function
+ <seealso marker="ct#start_interactive-0"><c>ct:start_interactive/0</c></seealso>
+ takes you back into interactive mode, but the previous state is not restored.</p>
</section>
<section>
- <title>Step by step execution of test cases with the Erlang Debugger</title>
+ <title>Step-by-Step Execution of Test Cases with the Erlang Debugger</title>
- <p>By means of <c>ct_run -step [opts]</c>, or by passing the
- <c>{step,Opts}</c> option to <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>, it is possible
- to get the Erlang Debugger started automatically and use its
- graphical interface to investigate the state of the current test
- case and to execute it step by step and/or set execution breakpoints.</p>
- <p>If no extra options are given with the <c>step</c> flag/option,
- breakpoints will be set automatically on the test cases that
- are to be executed by Common Test, and those functions only. If
- the step option <c>config</c> is specified, breakpoints will
- also be initially set on the configuration functions in the suite, i.e.
+ <p>Using <c>ct_run -step [opts]</c>, or by passing option <c>{step,Opts}</c>
+ to <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>,
+ the following is possible:</p>
+ <list type="bulleted">
+ <item>Get the Erlang Debugger started automatically.</item>
+ <item>Use its graphical interface to investigate the state of the current test case.</item>
+ <item>Execute the test case step-by-step and/or set execution breakpoints.</item>
+ </list>
+ <p>If no extra options are specified with flag/option <c>step</c>,
+ breakpoints are set automatically on the test cases that
+ are to be executed by <c>Common Test</c>, and those functions only. If
+ step option <c>config</c> is specified, breakpoints are also initially
+ set on the configuration functions in the suite, that is,
<c>init_per_suite/1</c>, <c>end_per_suite/1</c>,
<c>init_per_group/2</c>, <c>end_per_group/2</c>,
<c>init_per_testcase/2</c> and <c>end_per_testcase/2</c>.</p>
- <p>Common Test enables the Debugger auto attach feature, which means
+ <p><c>Common Test</c> enables the Debugger auto-attach feature, which means
that for every new interpreted test case function that starts to execute,
- a new trace window will automatically pop up. (This is because each test
+ a new trace window automatically pops up (as each test
case executes on a dedicated Erlang process). Whenever a new test case starts,
- Common Test will attempt to close the inactive trace window of the previous
- test case. However, if you prefer that Common Test leaves inactive trace
- windows, use the <c>keep_inactive</c> option.</p>
- <p>The step functionality can be used together with the <c>suite</c> and
- the <c>suite</c> + <c>case/testcase</c> flag/option, but not together
- with <c>dir</c>.</p>
+ <c>Common Test</c> attempts to close the inactive trace window of the previous
+ test case. However, if you prefer <c>Common Test</c> to leave inactive trace
+ windows, use option <c>keep_inactive</c>.</p>
+ <p>The step functionality can be used together with flag/option <c>suite</c> and
+ <c>suite</c> + <c>case/testcase</c>, but not together with <c>dir</c>.</p>
</section>
<section>
<marker id="test_specifications"></marker>
<title>Test Specifications</title>
<section>
- <title>General description</title>
- <p>The most flexible way to specify what to test, is to use a so
- called test specification. A test specification is a sequence of
+ <title>General Description</title>
+ <p>The most flexible way to specify what to test, is to use a
+ test specification, which is a sequence of
Erlang terms. The terms are normally declared in one or more text files
(see <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso>), but
- may also be passed to Common Test on the form of a list (see
+ can also be passed to <c>Common Test</c> on the form of a list (see
<seealso marker="ct#run_testspec-1"><c>ct:run_testspec/1</c></seealso>).
There are two general types of terms: configuration terms and test
specification terms.</p>
- <p>With configuration terms it is possible to e.g. label the test
- run (similar to <c>ct_run -label</c>), evaluate arbitrary expressions
- before starting the test, import configuration data (similar to
- <c>ct_run -config/-userconfig</c>), specify the top level HTML log
- directory (similar to <c>ct_run -logdir</c>), enable code coverage
- analysis (similar to <c>ct_run -cover</c>), install Common Test Hooks
- (similar to <c>ct_run -ch_hooks</c>), install event_handler plugins
- (similar to <c>ct_run -event_handler</c>), specify include directories
- that should be passed to the compiler for automatic compilation
- (similar to <c>ct_run -include</c>), disable the auto compilation
- feature (similar to <c>ct_run -no_auto_compile</c>), set verbosity
- levels (similar to <c>ct_run -verbosity</c>), and more.</p>
- <p>Configuration terms can be combined with <c>ct_run</c> start flags,
- or <c>ct:run_test/1</c> options. The result will for some flags/options
- and terms be that the values are merged (e.g. configuration files,
- include directories, verbosity levels, silent connections), and for
+
+ <p>With configuration terms it is, for example, possible to do the following:</p>
+ <list type="bulleted">
+ <item>Label the test run (similar to <c>ct_run -label</c>).</item>
+ <item>Evaluate any expressions before starting the test.</item>
+ <item>Import configuration data (similar to <c>ct_run -config/-userconfig</c>).</item>
+ <item>Specify the top-level HTML log directory (similar to <c>ct_run -logdir</c>).</item>
+ <item>Enable code coverage analysis (similar to <c>ct_run -cover</c>).</item>
+ <item>Install <c>Common Test Hooks</c> (similar to <c>ct_run -ch_hooks</c>).</item>
+ <item>Install <c>event_handler</c> plugins (similar to <c>ct_run -event_handler</c>).</item>
+ <item>Specify include directories to be passed to the compiler for
+ automatic compilation (similar to <c>ct_run -include</c>).</item>
+ <item>Disable the auto-compilation feature (similar to <c>ct_run -no_auto_compile</c>).</item>
+ <item>Set verbosity levels (similar to <c>ct_run -verbosity</c>).</item>
+ </list>
+
+ <p>Configuration terms can be combined with <c>ct_run</c> start flags
+ or <c>ct:run_test/1</c> options. The result is, for some flags/options
+ and terms, that the values are merged (for example, configuration files,
+ include directories, verbosity levels, and silent connections) and for
others that the start flags/options override the test specification
- terms (e.g. log directory, label, style sheet, auto compilation).</p>
- <p>With test specification terms it is possible to state exactly
- which tests should run and in which order. A test term specifies
+ terms (for example, log directory, label, style sheet, and auto-compilation).</p>
+
+ <p>With test specification terms, it is possible to state exactly
+ which tests to run and in which order. A test term specifies
either one or more suites, one or more test case groups (possibly nested),
or one or more test cases in a group (or in multiple groups) or in a suite.</p>
- <p>An arbitrary number of test terms may be declared in sequence.
- Common Test will by default compile the terms into one or more tests
- to be performed in one resulting test run. Note that a term that
- specifies a set of test cases will "swallow" one that only
- specifies a subset of these cases. E.g. the result of merging
- one term that specifies that all cases in suite S should be
+
+ <p>Any number of test terms can be declared in sequence.
+ <c>Common Test</c> compiles by default the terms into one or more tests
+ to be performed in one resulting test run. A term that
+ specifies a set of test cases "swallows" one that only
+ specifies a subset of these cases. For example, the result of merging
+ one term specifying that all cases in suite S are to be
executed, with another term specifying only test case X and Y in
S, is a test of all cases in S. However, if a term specifying
test case X and Y in S is merged with a term specifying case Z
- in S, the result is a test of X, Y and Z in S. To disable this
- behaviour, i.e. to instead perform each test sequentially in a "script-like"
- manner, the term <c>merge_tests</c> can be set to <c>false</c> in
- the test specification.</p>
+ in S, the result is a test of X, Y, and Z in S. To disable this
+ behavior, that is, to instead perform each test sequentially in a
+ "script-like" manner, set term <c>merge_tests</c> to <c>false</c>
+ in the test specification.</p>
+
<p>A test term can also specify one or more test suites, groups,
- or test cases to be skipped. Skipped suites, groups and cases
- are not executed and show up in the HTML log files as
- SKIPPED.</p>
+ or test cases to be skipped. Skipped suites, groups, and cases
+ are not executed and show up in the HTML log files as <c>SKIPPED</c>.</p>
</section>
<section>
- <title>Using multiple test specification files</title>
+ <title>Using Multiple Test Specification Files</title>
- <p>When multiple test specification files are given at startup (either
+ <p>When multiple test specification files are specified at startup (either
with <c>ct_run -spec file1 file2 ...</c> or
<c>ct:run_test([{spec, [File1,File2,...]}])</c>),
- Common Test will either execute one test run per specification file, or
- join the files and perform all tests within one single test run. The first
- behaviour is the default one. The latter requires that the start
- flag/option <c>join_specs</c> is provided, e.g.
+ <c>Common Test</c> either executes one test run per specification file,
+ or joins the files and performs all tests within one single test run.
+ The first behavior is the default one. The latter requires that start
+ flag/option <c>join_specs</c> is provided, for example,
<c>run_test -spec ./my_tests1.ts ./my_tests2.ts -join_specs</c>.</p>
<p>Joining a number of specifications, or running them separately, can
- also be accomplished with (and may be combined with) test specification
- file inclusion, described next.</p>
+ also be accomplished with (and can be combined with) test specification
+ file inclusion.</p>
</section>
<section>
- <title>Test specification file inclusion</title>
- <p>With the <c>specs</c> term (see syntax below), it's possible to have
- a test specification include other specifications. An included
- specification may either be joined with the source specification,
- or used to produce a separate test run (like with the <c>join_specs</c>
- start flag/option above). Example:</p>
+ <title>Test Specification File Inclusion</title>
+ <p>With the term <c>specs</c>, a test specification can include
+ other specifications. An included specification can either be joined
+ with the source specification or used to produce a separate test run
+ (as with start flag/option <c>join_specs</c> above).</p>
+ <p><em>Example:</em></p>
+
<pre>
- %% In specification file "a.spec"
- {specs, join, ["b.spec", "c.spec"]}.
- {specs, separate, ["d.spec", "e.spec"]}.
- %% Config and test terms follow
- ...</pre>
+ %% In specification file "a.spec"
+ {specs, join, ["b.spec", "c.spec"]}.
+ {specs, separate, ["d.spec", "e.spec"]}.
+ %% Config and test terms follow
+ ...</pre>
+
<p>In this example, the test terms defined in files "b.spec" and "c.spec"
- will be joined with the terms in the source specification "a.spec"
+ are joined with the terms in source specification "a.spec"
(if any). The inclusion of specifications "d.spec" and
- "e.spec" will result in two separate, and independent, test runs (i.e.
- one for each included specification).</p>
- <p>Note that the <c>join</c> option does not imply that the test terms
- will be merged (see <c>merge_tests</c> above), only that all tests are
- executed in one single test run.</p>
+ "e.spec" results in two separate, and independent, test runs
+ (one for each included specification).</p>
+
+ <p>Option <c>join</c> does not imply that the test terms
+ are merged, only that all tests are executed in one single test run.</p>
+
<p>Joined specifications share common configuration settings, such as
the list of <c>config</c> files or <c>include</c> directories.
- For configuration that can not be combined, such as settings for <c>logdir</c>
+ For configurations that cannot be combined, such as settings for <c>logdir</c>
or <c>verbosity</c>, it is up to the user to ensure there are no clashes
when the test specifications are joined. Specifications included with
- the <c>separate</c> option, do not share configuration settings with the
- source specification. This is useful e.g. if there are clashing
- configuration settings in included specifications, making it impossible
- to join them.</p>
+ option <c>separate</c> do not share configuration settings with the
+ source specification. This is useful, for example, if there are clashing
+ configuration settings in included specifications, making it them impossible
+ to join.</p>
+
<p>If <c>{merge_tests,true}</c> is set in the source specification
- (which is the default setting), terms in joined specifications will be
+ (which is the default setting), terms in joined specifications are
merged with terms in the source specification (according to the
- description of <c>merge_tests</c> above).</p>
- <p>Note that it is always the <c>merge_tests</c> setting in the source
+ description of <c>merge_tests</c> earlier).</p>
+
+ <p>Notice that it is always the <c>merge_tests</c> setting in the source
specification that is used when joined with other specifications.
- Say e.g. that a source specification A, with tests TA1 and TA2, has
- <c>{merge_tests,false}</c> set, and it includes another specification,
+ Say, for example, that a source specification A, with tests TA1 and TA2, has
+ <c>{merge_tests,false}</c> set, and that it includes another specification,
B, with tests TB1 and TB2, that has <c>{merge_tests,true}</c> set.
- The result will be that the test series: <c>TA1,TA2,merge(TB1,TB2)</c>,
- is executed. The opposite <c>merge_tests</c> settings would result in the
- following the test series: <c>merge(merge(TA1,TA2),TB1,TB2)</c>.</p>
- <p>The <c>specs</c> term may of course be used to nest specifications,
- i.e. have one specification include other specifications, which in turn
- include others, etc.</p>
+ The result is that the test series <c>TA1,TA2,merge(TB1,TB2)</c>
+ is executed. The opposite <c>merge_tests</c> settings would result in
+ the test series <c>merge(merge(TA1,TA2),TB1,TB2)</c>.</p>
+
+ <p>The term <c>specs</c> can be used to nest specifications,
+ that is, have one specification include other specifications, which in turn
+ include others, and so no</p>
</section>
<section>
- <title>Test case groups</title>
+ <title>Test Case Groups</title>
<p>When a test case group is specified, the resulting test
- executes the <c>init_per_group</c> function, followed by all test
- cases and sub groups (including their configuration functions), and
- finally the <c>end_per_group</c> function. Also if particular
+ executes function <c>init_per_group</c>, followed by all test
+ cases and subgroups (including their configuration functions), and
+ finally function <c>end_per_group</c>. Also, if particular
test cases in a group are specified, <c>init_per_group</c>
- and <c>end_per_group</c> for the group in question are
- called. If a group which is defined (in <c>Suite:group/0</c>) to
- be a sub group of another group, is specified (or if particular test
- cases of a sub group are), Common Test will call the configuration
- functions for the top level groups as well as for the sub group
+ and <c>end_per_group</c>, for the group in question, are
+ called. If a group defined (in <c>Suite:group/0</c>) as
+ a subgroup of another group, is specified (or if particular test
+ cases of a subgroup are), <c>Common Test</c> calls the configuration
+ functions for the top-level groups and for the subgroup
in question (making it possible to pass configuration data all
the way from <c>init_per_suite</c> down to the test cases in the
- sub group).</p>
- <p>The test specification utilizes the same mechanism for specifying
- test case groups by means of names and paths, as explained in the
- <seealso marker="run_test_chapter#group_execution">Group Execution</seealso>
- section above, with the addition of the <c>GroupSpec</c> element
- described next.</p>
- <p>The <c>GroupSpec</c> element makes it possible to specify
- group execution properties that will override those in the
- group definition (i.e. in <c>groups/0</c>). Execution properties for
- sub-groups may be overridden as well. This feature makes it possible to
+ subgroup).</p>
+
+ <p>The test specification uses the same mechanism for specifying
+ test case groups through names and paths, as explained in section
+ <seealso marker="run_test_chapter#group_execution">Test Case Group Execution</seealso>,
+ with the addition of element <c>GroupSpec</c>.</p>
+
+ <p>Element <c>GroupSpec</c> makes it possible to specify
+ group execution properties that overrides those in the
+ group definition (that is, in <c>groups/0</c>). Execution properties for
+ subgroups might be overridden as well. This feature makes it possible to
change properties of groups at the time of execution,
- without even having to edit the test suite. The very same
- feature is available for <c>group</c> elements in the <c>Suite:all/0</c>
- list. Therefore, more detailed documentation, and examples, can be
- found in the <seealso marker="write_test_chapter#test_case_groups">
- Test case groups</seealso> chapter.</p>
+ without having to edit the test suite. The same feature is available for
+ <c>group</c> elements in the <c>Suite:all/0</c> list. For details and examples,
+ see section <seealso marker="write_test_chapter#test_case_groups">
+ Test Case Groups</seealso>.</p>
</section>
<section>
- <title>Test specification syntax</title>
-
- <p>Below is the test specification syntax. Test specifications can
- be used to run tests both in a single test host environment and
- in a distributed Common Test environment (Large Scale
- Testing). The node parameters in the <c>init</c> term are only
- relevant in the latter (see the
- <seealso marker="ct_master_chapter#test_specifications">Large
- Scale Testing</seealso> chapter for information). For more information
- about the various terms, please see the corresponding sections in the
- User's Guide, such as e.g. the
- <seealso marker="run_test_chapter#ct_run"><c>ct_run</c>
+ <title>Test Specification Syntax</title>
+
+ <p>Test specifications can be used to run tests both in a single
+ test host environment and in a distributed <c>Common Test</c> environment
+ (Large Scale Testing). The node parameters in term <c>init</c> are only
+ relevant in the latter (see section
+ <seealso marker="ct_master_chapter#test_specifications">Test Specifications</seealso>
+ in Large Scale Testing). For details about the various terms, see the
+ corresponding sections in the User's Guide, for example, the following:
+ </p>
+ <list type="bulleted">
+ <item>The <seealso marker="run_test_chapter#ct_run"><c>ct_run</c>
program</seealso> for an overview of available start flags
- (since most flags have a corresponding configuration term), and
- more detailed explanation of e.g.
- <seealso marker="write_test_chapter#logging">Logging</seealso>
- (for the <c>verbosity</c>, <c>stylesheet</c> and <c>basic_html</c> terms),
- <seealso marker="config_file_chapter#top">External Configuration Data</seealso>
- (for the <c>config</c> and <c>userconfig</c> terms),
- <seealso marker="event_handler_chapter#event_handling">Event
- Handling</seealso> (for the <c>event_handler</c> term),
- <seealso marker="ct_hooks_chapter#installing">Common Test Hooks</seealso>
- (for the <c>ct_hooks</c> term), etc.</p>
- </section>
-
- <p>Config terms:</p>
+ (as most flags have a corresponding configuration term)</item>
+ <item><seealso marker="write_test_chapter#logging">Logging</seealso>
+ (for terms <c>verbosity</c>, <c>stylesheet</c> and <c>basic_html</c>)</item>
+ <item><seealso marker="config_file_chapter#top">External Configuration Data</seealso>
+ (for terms <c>config</c> and <c>userconfig</c>)</item>
+ <item><seealso marker="event_handler_chapter#event_handling">Event
+ Handling</seealso> (for the <c>event_handler</c> term)</item>
+ <item><seealso marker="ct_hooks_chapter#installing">Common Test Hooks</seealso>
+ (for term <c>ct_hooks</c>)</item>
+ </list>
+
+ <p><em>Configuration terms:</em></p>
<pre>
- {merge_tests, Bool}.
-
- {define, Constant, Value}.
-
- {specs, InclSpecsOption, TestSpecs}.
-
- {node, NodeAlias, Node}.
-
- {init, InitOptions}.
- {init, [NodeAlias], InitOptions}.
-
- {label, Label}.
- {label, NodeRefs, Label}.
-
- {verbosity, VerbosityLevels}.
- {verbosity, NodeRefs, VerbosityLevels}.
-
- {stylesheet, CSSFile}.
- {stylesheet, NodeRefs, CSSFile}.
-
- {silent_connections, ConnTypes}.
- {silent_connections, NodeRefs, ConnTypes}.
-
- {multiply_timetraps, N}.
- {multiply_timetraps, NodeRefs, N}.
-
- {scale_timetraps, Bool}.
- {scale_timetraps, NodeRefs, Bool}.
-
- {cover, CoverSpecFile}.
- {cover, NodeRefs, CoverSpecFile}.
-
- {cover_stop, Bool}.
- {cover_stop, NodeRefs, Bool}.
-
- {include, IncludeDirs}.
- {include, NodeRefs, IncludeDirs}.
-
- {auto_compile, Bool},
- {auto_compile, NodeRefs, Bool},
-
- {abort_if_missing_suites, Bool},
- {abort_if_missing_suites, NodeRefs, Bool},
+ {merge_tests, Bool}.
- {config, ConfigFiles}.
- {config, ConfigDir, ConfigBaseNames}.
- {config, NodeRefs, ConfigFiles}.
- {config, NodeRefs, ConfigDir, ConfigBaseNames}.
-
- {userconfig, {CallbackModule, ConfigStrings}}.
- {userconfig, NodeRefs, {CallbackModule, ConfigStrings}}.
-
- {logdir, LogDir}.
- {logdir, NodeRefs, LogDir}.
-
- {logopts, LogOpts}.
- {logopts, NodeRefs, LogOpts}.
-
- {create_priv_dir, PrivDirOption}.
- {create_priv_dir, NodeRefs, PrivDirOption}.
-
- {event_handler, EventHandlers}.
- {event_handler, NodeRefs, EventHandlers}.
- {event_handler, EventHandlers, InitArgs}.
- {event_handler, NodeRefs, EventHandlers, InitArgs}.
-
- {ct_hooks, CTHModules}.
- {ct_hooks, NodeRefs, CTHModules}.
-
- {enable_builtin_hooks, Bool}.
-
- {basic_html, Bool}.
- {basic_html, NodeRefs, Bool}.
-
- {release_shell, Bool}.</pre>
+ {define, Constant, Value}.
+
+ {specs, InclSpecsOption, TestSpecs}.
+
+ {node, NodeAlias, Node}.
+
+ {init, InitOptions}.
+ {init, [NodeAlias], InitOptions}.
+
+ {label, Label}.
+ {label, NodeRefs, Label}.
+
+ {verbosity, VerbosityLevels}.
+ {verbosity, NodeRefs, VerbosityLevels}.
+
+ {stylesheet, CSSFile}.
+ {stylesheet, NodeRefs, CSSFile}.
+
+ {silent_connections, ConnTypes}.
+ {silent_connections, NodeRefs, ConnTypes}.
+
+ {multiply_timetraps, N}.
+ {multiply_timetraps, NodeRefs, N}.
+
+ {scale_timetraps, Bool}.
+ {scale_timetraps, NodeRefs, Bool}.
+
+ {cover, CoverSpecFile}.
+ {cover, NodeRefs, CoverSpecFile}.
+
+ {cover_stop, Bool}.
+ {cover_stop, NodeRefs, Bool}.
+
+ {include, IncludeDirs}.
+ {include, NodeRefs, IncludeDirs}.
+
+ {auto_compile, Bool},
+ {auto_compile, NodeRefs, Bool},
+
+ {abort_if_missing_suites, Bool},
+ {abort_if_missing_suites, NodeRefs, Bool},
+
+ {config, ConfigFiles}.
+ {config, ConfigDir, ConfigBaseNames}.
+ {config, NodeRefs, ConfigFiles}.
+ {config, NodeRefs, ConfigDir, ConfigBaseNames}.
+
+ {userconfig, {CallbackModule, ConfigStrings}}.
+ {userconfig, NodeRefs, {CallbackModule, ConfigStrings}}.
+
+ {logdir, LogDir}.
+ {logdir, NodeRefs, LogDir}.
+
+ {logopts, LogOpts}.
+ {logopts, NodeRefs, LogOpts}.
+
+ {create_priv_dir, PrivDirOption}.
+ {create_priv_dir, NodeRefs, PrivDirOption}.
+
+ {event_handler, EventHandlers}.
+ {event_handler, NodeRefs, EventHandlers}.
+ {event_handler, EventHandlers, InitArgs}.
+ {event_handler, NodeRefs, EventHandlers, InitArgs}.
+
+ {ct_hooks, CTHModules}.
+ {ct_hooks, NodeRefs, CTHModules}.
+
+ {enable_builtin_hooks, Bool}.
+
+ {basic_html, Bool}.
+ {basic_html, NodeRefs, Bool}.
+
+ {release_shell, Bool}.</pre>
- <p>Test terms:</p>
+ <p><em>Test terms:</em></p>
<pre>
- {suites, Dir, Suites}.
- {suites, NodeRefs, Dir, Suites}.
-
- {groups, Dir, Suite, Groups}.
- {groups, NodeRefs, Dir, Suite, Groups}.
-
- {groups, Dir, Suite, Groups, {cases,Cases}}.
- {groups, NodeRefs, Dir, Suite, Groups, {cases,Cases}}.
-
- {cases, Dir, Suite, Cases}.
- {cases, NodeRefs, Dir, Suite, Cases}.
-
- {skip_suites, Dir, Suites, Comment}.
- {skip_suites, NodeRefs, Dir, Suites, Comment}.
-
- {skip_groups, Dir, Suite, GroupNames, Comment}.
- {skip_groups, NodeRefs, Dir, Suite, GroupNames, Comment}.
-
- {skip_cases, Dir, Suite, Cases, Comment}.
- {skip_cases, NodeRefs, Dir, Suite, Cases, Comment}.</pre>
-
- <p>Types:</p>
+ {suites, Dir, Suites}.
+ {suites, NodeRefs, Dir, Suites}.
+
+ {groups, Dir, Suite, Groups}.
+ {groups, NodeRefs, Dir, Suite, Groups}.
+
+ {groups, Dir, Suite, Groups, {cases,Cases}}.
+ {groups, NodeRefs, Dir, Suite, Groups, {cases,Cases}}.
+
+ {cases, Dir, Suite, Cases}.
+ {cases, NodeRefs, Dir, Suite, Cases}.
+
+ {skip_suites, Dir, Suites, Comment}.
+ {skip_suites, NodeRefs, Dir, Suites, Comment}.
+
+ {skip_groups, Dir, Suite, GroupNames, Comment}.
+ {skip_groups, NodeRefs, Dir, Suite, GroupNames, Comment}.
+
+ {skip_cases, Dir, Suite, Cases, Comment}.
+ {skip_cases, NodeRefs, Dir, Suite, Cases, Comment}.</pre>
+
+ <marker id="types"></marker>
+ <p><em>Types:</em></p>
<pre>
- Bool = true | false
- Constant = atom()
- Value = term()
- InclSpecsOption = join | separate
- TestSpecs = string() | [string()]
- NodeAlias = atom()
- Node = node()
- NodeRef = NodeAlias | Node | master
- NodeRefs = all_nodes | [NodeRef] | NodeRef
- InitOptions = term()
- Label = atom() | string()
- VerbosityLevels = integer() | [{Category,integer()}]
- Category = atom()
- CSSFile = string()
- ConnTypes = all | [atom()]
- N = integer()
- CoverSpecFile = string()
- IncludeDirs = string() | [string()]
- ConfigFiles = string() | [string()]
- ConfigDir = string()
- ConfigBaseNames = string() | [string()]
- CallbackModule = atom()
- ConfigStrings = string() | [string()]
- LogDir = string()
- LogOpts = [term()]
- PrivDirOption = auto_per_run | auto_per_tc | manual_per_tc
- EventHandlers = atom() | [atom()]
- InitArgs = [term()]
- CTHModules = [CTHModule |
- {CTHModule, CTHInitArgs} |
- {CTHModule, CTHInitArgs, CTHPriority}]
- CTHModule = atom()
- CTHInitArgs = term()
- Dir = string()
- Suites = atom() | [atom()] | all
- Suite = atom()
- Groups = GroupPath | [GroupPath] | GroupSpec | [GroupSpec] | all
- GroupPath = [GroupName]
- GroupSpec = GroupName | {GroupName,Properties} | {GroupName,Properties,GroupSpec}
- GroupName = atom()
- GroupNames = GroupName | [GroupName]
- Cases = atom() | [atom()] | all
- Comment = string() | ""</pre>
-
- <section>
- <p>The difference between the <c>config</c> terms above, is that with
+ Bool = true | false
+ Constant = atom()
+ Value = term()
+ InclSpecsOption = join | separate
+ TestSpecs = string() | [string()]
+ NodeAlias = atom()
+ Node = node()
+ NodeRef = NodeAlias | Node | master
+ NodeRefs = all_nodes | [NodeRef] | NodeRef
+ InitOptions = term()
+ Label = atom() | string()
+ VerbosityLevels = integer() | [{Category,integer()}]
+ Category = atom()
+ CSSFile = string()
+ ConnTypes = all | [atom()]
+ N = integer()
+ CoverSpecFile = string()
+ IncludeDirs = string() | [string()]
+ ConfigFiles = string() | [string()]
+ ConfigDir = string()
+ ConfigBaseNames = string() | [string()]
+ CallbackModule = atom()
+ ConfigStrings = string() | [string()]
+ LogDir = string()
+ LogOpts = [term()]
+ PrivDirOption = auto_per_run | auto_per_tc | manual_per_tc
+ EventHandlers = atom() | [atom()]
+ InitArgs = [term()]
+ CTHModules = [CTHModule |
+ {CTHModule, CTHInitArgs} |
+ {CTHModule, CTHInitArgs, CTHPriority}]
+ CTHModule = atom()
+ CTHInitArgs = term()
+ Dir = string()
+ Suites = atom() | [atom()] | all
+ Suite = atom()
+ Groups = GroupPath | [GroupPath] | GroupSpec | [GroupSpec] | all
+ GroupPath = [GroupName]
+ GroupSpec = GroupName | {GroupName,Properties} | {GroupName,Properties,GroupSpec}
+ GroupName = atom()
+ GroupNames = GroupName | [GroupName]
+ Cases = atom() | [atom()] | all
+ Comment = string() | ""</pre>
+
+ <p>The difference between the <c>config</c> terms above is that with
<c>ConfigDir</c>, <c>ConfigBaseNames</c> is a list of base names,
- i.e. without directory paths. <c>ConfigFiles</c> must be full names,
- including paths. E.g, these two terms have the same meaning:</p>
+ that is, without directory paths. <c>ConfigFiles</c> must be full names,
+ including paths. For example, the following two terms have the same meaning:</p>
<pre>
- {config, ["/home/testuser/tests/config/nodeA.cfg",
- "/home/testuser/tests/config/nodeB.cfg"]}.
-
- {config, "/home/testuser/tests/config", ["nodeA.cfg","nodeB.cfg"]}.</pre>
+ {config, ["/home/testuser/tests/config/nodeA.cfg",
+ "/home/testuser/tests/config/nodeB.cfg"]}.
+
+ {config, "/home/testuser/tests/config", ["nodeA.cfg","nodeB.cfg"]}.</pre>
- <note><p>Any relative paths specified in the test specification, will be
- relative to the directory which contains the test specification file, if
+ <note><p>Any relative paths, specified in the test specification, are
+ relative to the directory containing the test specification file if
<c>ct_run -spec TestSpecFile ...</c> or
<c>ct:run:test([{spec,TestSpecFile},...])</c>
- executes the test. The path will be relative to the top level log directory, if
+ executes the test.</p>
+ <p>The path is relative to the top-level log directory if
<c>ct:run:testspec(TestSpec)</c> executes the test.</p></note>
</section>
<section>
<title>Constants</title>
- <p>The <c>define</c> term introduces a constant, which is used to
- replace the name <c>Constant</c> with <c>Value</c>, wherever it's found in
- the test specification. This replacement happens during an initial iteration
- through the test specification. Constants may be used anywhere in the test
- specification, e.g. in arbitrary lists and tuples, and even in strings
- and inside the value part of other constant definitions! A constant can
+ <p>The term <c>define</c> introduces a constant that is used to
+ replace the name <c>Constant</c> with <c>Value</c>, wherever it is found in
+ the test specification. This replacement occurs during an initial iteration
+ through the test specification. Constants can be used anywhere in the test
+ specification, for example, in any lists and tuples, and even in strings
+ and inside the value part of other constant definitions. A constant can
also be part of a node name, but that is the only place where a constant
can be part of an atom.</p>
<note><p>For the sake of readability, the name of the constant must always
- begin with an upper case letter, or a <c>$</c>, <c>?</c>, or <c>_</c>.
- This also means that it must always be single quoted (obviously, since
- the constant name is actually an atom, not text).</p></note>
+ begin with an uppercase letter, or a <c>$</c>, <c>?</c>, or <c>_</c>.
+ This means that it must always be single quoted (as the constant name is
+ an atom, not text).</p></note>
<p>The main benefit of constants is that they can be used to reduce the size
- (and avoid repetition) of long strings, such as file paths. Compare these
- terms:</p>
+ (and avoid repetition) of long strings, such as file paths.</p>
+ <p><em>Examples:</em></p>
<pre>
- %% 1a. no constant
- {config, "/home/testuser/tests/config", ["nodeA.cfg","nodeB.cfg"]}.
- {suites, "/home/testuser/tests/suites", all}.
-
- %% 1b. with constant
- {define, 'TESTDIR', "/home/testuser/tests"}.
- {config, "'TESTDIR'/config", ["nodeA.cfg","nodeB.cfg"]}.
- {suites, "'TESTDIR'/suites", all}.
-
- %% 2a. no constants
- {config, [testnode@host1, testnode@host2], "../config", ["nodeA.cfg","nodeB.cfg"]}.
- {suites, [testnode@host1, testnode@host2], "../suites", [x_SUITE, y_SUITE]}.
-
- %% 2b. with constants
- {define, 'NODE', testnode}.
- {define, 'NODES', ['NODE'@host1, 'NODE'@host2]}.
- {config, 'NODES', "../config", ["nodeA.cfg","nodeB.cfg"]}.
- {suites, 'NODES', "../suites", [x_SUITE, y_SUITE]}.</pre>
+ %% 1a. no constant
+ {config, "/home/testuser/tests/config", ["nodeA.cfg","nodeB.cfg"]}.
+ {suites, "/home/testuser/tests/suites", all}.
+
+ %% 1b. with constant
+ {define, 'TESTDIR', "/home/testuser/tests"}.
+ {config, "'TESTDIR'/config", ["nodeA.cfg","nodeB.cfg"]}.
+ {suites, "'TESTDIR'/suites", all}.
+
+ %% 2a. no constants
+ {config, [testnode@host1, testnode@host2], "../config", ["nodeA.cfg","nodeB.cfg"]}.
+ {suites, [testnode@host1, testnode@host2], "../suites", [x_SUITE, y_SUITE]}.
+
+ %% 2b. with constants
+ {define, 'NODE', testnode}.
+ {define, 'NODES', ['NODE'@host1, 'NODE'@host2]}.
+ {config, 'NODES', "../config", ["nodeA.cfg","nodeB.cfg"]}.
+ {suites, 'NODES', "../suites", [x_SUITE, y_SUITE]}.</pre>
<p>Constants make the test specification term <c>alias</c>, in previous
- versions of Common Test, redundant. This term has been deprecated but will
- remain supported in upcoming Common Test releases. Replacing <c>alias</c>
- terms with <c>define</c> is strongly recommended though! Here's an example
- of such a replacement:</p>
+ versions of <c>Common Test</c>, redundant. This term is deprecated but
+ remains supported in upcoming <c>Common Test</c> releases. Replacing <c>alias</c>
+ terms with <c>define</c> is strongly recommended though. An example
+ of such replacement follows:</p>
<pre>
- %% using the old alias term
- {config, "/home/testuser/tests/config/nodeA.cfg"}.
- {alias, suite_dir, "/home/testuser/tests/suites"}.
- {groups, suite_dir, x_SUITE, group1}.
-
- %% replacing with constants
- {define, 'TestDir', "/home/testuser/tests"}.
- {define, 'CfgDir', "'TestDir'/config"}.
- {define, 'SuiteDir', "'TestDir'/suites"}.
- {config, 'CfgDir', "nodeA.cfg"}.
- {groups, 'SuiteDir', x_SUITE, group1}.</pre>
+ %% using the old alias term
+ {config, "/home/testuser/tests/config/nodeA.cfg"}.
+ {alias, suite_dir, "/home/testuser/tests/suites"}.
+ {groups, suite_dir, x_SUITE, group1}.
+
+ %% replacing with constants
+ {define, 'TestDir', "/home/testuser/tests"}.
+ {define, 'CfgDir', "'TestDir'/config"}.
+ {define, 'SuiteDir', "'TestDir'/suites"}.
+ {config, 'CfgDir', "nodeA.cfg"}.
+ {groups, 'SuiteDir', x_SUITE, group1}.</pre>
- <p>Actually, constants could well replace the <c>node</c> term too, but
- this still has declarative value, mainly when used in combination
- with <c>NodeRefs == all_nodes</c> (see types above).</p>
+ <p>Constants can well replace term <c>node</c> also, but
+ this still has a declarative value, mainly when used in combination
+ with <c>NodeRefs == all_nodes</c>
+ (see <seealso marker="#types">Types</seealso>).</p>
</section>
<section>
@@ -955,104 +1049,104 @@
<p>Here follows a simple test specification example:</p>
<pre>
- {define, 'Top', "/home/test"}.
- {define, 'T1', "'Top'/t1"}.
- {define, 'T2', "'Top'/t2"}.
- {define, 'T3', "'Top'/t3"}.
- {define, 'CfgFile', "config.cfg"}.
-
- {logdir, "'Top'/logs"}.
-
- {config, ["'T1'/'CfgFile'", "'T2'/'CfgFile'", "'T3'/'CfgFile'"]}.
-
- {suites, 'T1', all}.
- {skip_suites, 'T1', [t1B_SUITE,t1D_SUITE], "Not implemented"}.
- {skip_cases, 'T1', t1A_SUITE, [test3,test4], "Irrelevant"}.
- {skip_cases, 'T1', t1C_SUITE, [test1], "Ignore"}.
-
- {suites, 'T2', [t2B_SUITE,t2C_SUITE]}.
- {cases, 'T2', t2A_SUITE, [test4,test1,test7]}.
-
- {skip_suites, 'T3', all, "Not implemented"}.</pre>
+ {define, 'Top', "/home/test"}.
+ {define, 'T1', "'Top'/t1"}.
+ {define, 'T2', "'Top'/t2"}.
+ {define, 'T3', "'Top'/t3"}.
+ {define, 'CfgFile', "config.cfg"}.
+
+ {logdir, "'Top'/logs"}.
+
+ {config, ["'T1'/'CfgFile'", "'T2'/'CfgFile'", "'T3'/'CfgFile'"]}.
+
+ {suites, 'T1', all}.
+ {skip_suites, 'T1', [t1B_SUITE,t1D_SUITE], "Not implemented"}.
+ {skip_cases, 'T1', t1A_SUITE, [test3,test4], "Irrelevant"}.
+ {skip_cases, 'T1', t1C_SUITE, [test1], "Ignore"}.
+
+ {suites, 'T2', [t2B_SUITE,t2C_SUITE]}.
+ {cases, 'T2', t2A_SUITE, [test4,test1,test7]}.
+
+ {skip_suites, 'T3', all, "Not implemented"}.</pre>
<p>The example specifies the following:</p>
- <list>
- <item>The specified logdir directory will be used for storing
+ <list type="bulleted">
+ <item>The specified <c>logdir</c> directory is used for storing
the HTML log files (in subdirectories tagged with node name,
- date and time).</item>
- <item>The variables in the specified test system config files will be
+ date, and time).</item>
+ <item>The variables in the specified test system configuration files are
imported for the test.</item>
- <item>The first test to run includes all suites for system t1. Excluded from
- the test are however the t1B and t1D suites. Also test cases test3 and
- test4 in t1A as well as the test1 case in t1C are excluded from
- the test.</item>
- <item>Secondly, the test for system t2 should run. The included suites are
- t2B and t2C. Included are also test cases test4, test1 and test7 in suite
- t2A. Note that the test cases will be executed in the specified order.</item>
- <item>Lastly, all suites for systems t3 are to be completely skipped and this
- should be explicitly noted in the log files.</item>
+ <item>The first test to run includes all suites for system <c>t1</c>.
+ Suites <c>t1B</c> and <c>t1D</c> are excluded from the test. Test cases
+ <c>test3</c> and <c>test4</c> in <c>t1A</c> and <c>test1</c> case in <c>t1C</c>
+ are also excluded from the test.</item>
+ <item>The second test to run is for system <c>t2</c>. The included suites are
+ <c>t2B</c> and <c>t2C</c>. Test cases <c>test4</c>, <c>test1</c>, and <c>test7</c> in suite
+ <c>t2A</c> are also included. The test cases are executed in the specified order.</item>
+ <item>The last test to run is for system <c>t3</c>. Here, all suites are skipped and this
+ is explicitly noted in the log files.</item>
</list>
</section>
<section>
- <title>The init term</title>
- <p>With the <c>init</c> term it's possible to specify initialization options
- for nodes defined in the test specification. Currently, there are options
- to start the node and/or to evaluate any function on the node.
- See the <seealso marker="ct_master_chapter#ct_slave">Automatic startup of
- the test target nodes</seealso> chapter for details.</p>
+ <title>The init Term</title>
+ <p>With term <c>init</c> it is possible to specify initialization options
+ for nodes defined in the test specification. There are options
+ to start the node and to evaluate any function on the node.
+ For details, see section <seealso marker="ct_master_chapter#ct_slave">Automatic Startup of
+ Test Target Nodes</seealso> in section Using Common Test for Large Scale Testing.</p>
</section>
<section>
- <title>User specific terms</title>
- <p>It is possible for the user to provide a test specification that
- includes (for Common Test) unrecognizable terms. If this is desired,
- the <c>-allow_user_terms</c> flag should be used when starting tests with
- <c>ct_run</c>. This forces Common Test to ignore unrecognizable terms.
- Note that in this mode, Common Test is not able to check the specification
- for errors as efficiently as if the scanner runs in default mode.
+ <title>User-Specific Terms</title>
+ <p>The user can provide a test specification including (for <c>Common Test</c>)
+ unrecognizable terms. If this is desired, use flag <c>-allow_user_terms</c>
+ when starting tests with <c>ct_run</c>. This forces <c>Common Test</c> to ignore
+ unrecognizable terms. In this mode, <c>Common Test</c> is not able to check the
+ specification for errors as efficiently as if the scanner runs in default mode.
If <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso> is used
- for starting the tests, the relaxed scanner
- mode is enabled by means of the tuple: <c>{allow_user_terms,true}</c></p>
+ for starting the tests, the relaxed scanner mode is enabled by tuple
+ <c>{allow_user_terms,true}</c>.</p>
</section>
<section>
- <title>Reading test specification terms</title>
- <p>It's possible to look up terms in the current test specification
- (i.e. the spec that's been used to configure and run the current test).
- The function <c>get_testspec_terms()</c> returns a list of all test spec
- terms (both config- and test terms) and <c>get_testspec_terms(Tags)</c>
- returns the term (or a list of terms) matching the tag (or tags) in
- <c>Tags</c>.</p>
+ <title>Reading Test Specification Terms</title>
+ <p>Terms in the current test specification
+ (that is, the specification that has been used to configure and run the current test)
+ can be looked up.
+ The function <seealso marker="ct#get_testspec_terms-0"><c>get_testspec_terms()</c></seealso>
+ returns a list of all test specification terms (both configuration terms and test terms),
+ and <c>get_testspec_terms(Tags)</c> returns the term (or a list of terms) matching the
+ tag (or tags) in <c>Tags</c>.</p>
<p>For example, in the test specification:</p>
<pre>
- ...
- {label, my_server_smoke_test}.
- {config, "../../my_server_setup.cfg"}.
- {config, "../../my_server_interface.cfg"}.
- ...</pre>
- <p>And in e.g. a test suite or a CT hook function:</p>
+ ...
+ {label, my_server_smoke_test}.
+ {config, "../../my_server_setup.cfg"}.
+ {config, "../../my_server_interface.cfg"}.
+ ...</pre>
+ <p>And in, for example, a test suite or a <c>Common Test Hook</c> function:</p>
<pre>
- ...
- [{label,[{_Node,TestType}]}, {config,CfgFiles}] =
- ct:get_testspec_terms([label,config]),
+ ...
+ [{label,[{_Node,TestType}]}, {config,CfgFiles}] =
+ ct:get_testspec_terms([label,config]),
- [verify_my_server_cfg(TestType, CfgFile) || {Node,CfgFile} &lt;- CfgFiles,
- Node == node()];
- ...</pre>
+ [verify_my_server_cfg(TestType, CfgFile) || {Node,CfgFile} &lt;- CfgFiles,
+ Node == node()];
+ ...</pre>
</section>
</section>
<section>
- <title>Running tests from the Web based GUI</title>
+ <title>Running Tests from the Web-Based GUI</title>
- <p>The web based GUI, VTS, is started with the
+ <p>The web-based GUI, Virtual Test Server (VTS), is started with the
<seealso marker="run_test_chapter#ct_run"><c>ct_run</c></seealso>
- program. From the GUI you can load config files, and select
- directories, suites and cases to run. You can also state the
- config files, directories, suites and cases on the command line
- when starting the web based GUI.
+ program. From the GUI, you can load configuration files and select
+ directories, suites, and cases to run. You can also state the
+ configuration files, directories, suites, and cases on the command line
+ when starting the web-based GUI.
</p>
-
- <list>
+ <p><em>Examples:</em></p>
+ <list type="bulleted">
<item><c>ct_run -vts</c></item>
<item><c><![CDATA[ct_run -vts -config <configfilename>]]></c></item>
<item><c><![CDATA[ct_run -vts -config <configfilename> -suite <suitewithfullpath>
@@ -1062,454 +1156,464 @@
<p>From the GUI you can run tests and view the result and the logs.
</p>
- <p>Note that <c>ct_run -vts</c> will try to open the Common Test start
- page in an existing web browser window or start the browser if it is
- not running. Which browser should be started may be specified with
+ <p><c>ct_run -vts</c> tries to open the <c>Common Test</c> start
+ page in an existing web browser window, or start the browser if it is
+ not running. Which browser to start can be specified with
the browser start command option:</p>
<p><c><![CDATA[ct_run -vts -browser <browser_start_cmd>]]></c></p>
- <p>Example:</p>
+ <p><em>Example:</em></p>
<p><c><![CDATA[$ ct_run -vts -browser 'firefox&']]></c></p>
- <p>Note that the browser must run as a separate OS process or VTS will hang!</p>
- <p>If no specific browser start command is specified, Firefox will
- be the default browser on Unix platforms and Internet Explorer on Windows.
- If Common Test fails to start a browser automatically, or <c>'none'</c> is
- specified as the value for -browser (i.e. <c>-browser none</c>), start your
- favourite browser manually and type in the URL that Common Test
+
+ <note><p>The browser must run as a separate OS process, otherwise VTS hangs.</p></note>
+
+ <p>If no specific browser start command is specified, Firefox is
+ the default browser on Unix platforms, and Internet Explorer on Windows.
+ If <c>Common Test</c> fails to start a browser automatically, or <c>none</c> is
+ specified as the value for <c>-browser</c> (that is, <c>-browser none</c>), start your
+ favourite browser manually and type the URL that <c>Common Test</c>
displays in the shell.</p>
</section>
<section>
<marker id="log_files"></marker>
- <title>Log files</title>
+ <title>Log Files</title>
<p>As the execution of the test suites proceed, events are logged in
- four different ways:</p>
+ the following four different ways:</p>
- <list>
- <item>Text to the operator's console.</item>
- <item>Suite related information is sent to the major log file.</item>
- <item>Case related information is sent to the minor log file.</item>
- <item>The HTML overview log file gets updated with test results.</item>
+ <list type="bulleted">
+ <item>Text to the operator console.</item>
+ <item>Suite-related information is sent to the major log file.</item>
+ <item>Case-related information is sent to the minor log file.</item>
+ <item>The HTML overview log file is updated with test results.</item>
<item>A link to all runs executed from a certain directory is written in
- the log named "all_runs.html" and direct links to all tests (the
- latest results) are written to the top level "index.html".</item>
+ the log named <c>all_runs.html</c> and direct links to all tests (the
+ latest results) are written to the top-level <c>index.html</c>.</item>
</list>
- <p>Typically the operator, who may run hundreds or thousands of
- test cases, doesn't want to fill the console with details
- about, or printouts from, the specific test cases. By default, the
- operator will only see:</p>
+ <p>Typically the operator, possibly running hundreds or thousands of
+ test cases, does not want to fill the console with details
+ about, or printouts from, specific test cases. By default, the
+ operator only sees the following:</p>
- <list>
+ <list type="bulleted">
<item>A confirmation that the test has started and information about how
- many test cases will be executed totally.</item>
+ many test cases are executed in total.</item>
<item>A small note about each failed test case.</item>
<item>A summary of all the run test cases.</item>
- <item>A confirmation that the test run is complete.</item>
- <item>Some special information like error reports and progress
- reports, printouts written with erlang:display/1, or io:format/3
+ <item>A confirmation when the test run is complete.</item>
+ <item>Some special information, such as error reports, progress
+ reports, and printouts written with <c>erlang:display/1</c>, or <c>io:format/3</c>
specifically addressed to a receiver other than <c>standard_io</c>
- (e.g. the default group leader process 'user').</item>
+ (for example, the default group leader process <c>user</c>).</item>
</list>
- <p>If/when the operator wants to dig deeper into the general results, or
- the result of a specific test case, he should do so by
- following the links in the HTML presentation and take a look in the
- major or minor log files. The "all_runs.html" page is a practical
- starting point usually. It's located in <c>logdir</c> and contains
- a link to each test run including a quick overview (date and time,
- node name, number of tests, test names and test result totals).</p>
+ <p>To dig deeper into the general results, or
+ the result of a specific test case, the operator can do so by
+ following the links in the HTML presentation and read the
+ major or minor log files. The "all_runs.html" page is a good
+ starting point. It is located in <c>logdir</c> and contains
+ a link to each test run, including a quick overview (with date and time,
+ node name, number of tests, test names, and test result totals).</p>
- <p>An "index.html" page is written for each test run (i.e. stored in
- the "ct_run" directory tagged with node name, date and time). This
- file gives a short overview of all individual tests performed in the
- same test run. The test names follow this convention:</p>
- <list>
- <item><em>TopLevelDir.TestDir</em> (all suites in TestDir executed)</item>
- <item><em>TopLevelDir.TestDir:suites</em> (specific suites were executed)</item>
- <item><em>TopLevelDir.TestDir.Suite</em> (all cases in Suite executed)</item>
- <item><em>TopLevelDir.TestDir.Suite:cases</em> (specific test cases were executed)</item>
- <item><em>TopLevelDir.TestDir.Suite.Case</em> (only Case was executed)</item>
+ <p>An "index.html" page is written for each test run (that is, stored in
+ the <c>ct_run</c> directory tagged with node name, date, and time). This
+ file provides an overview of all individual tests performed in the
+ same test run. The test names follow the following convention:</p>
+ <list type="bulleted">
+ <item><c>TopLevelDir.TestDir</c> (all suites in <c>TestDir</c> executed)</item>
+ <item><c>TopLevelDir.TestDir:suites</c> (specific suites executed)</item>
+ <item><c>TopLevelDir.TestDir.Suite</c> (all cases in <c>Suite</c> executed)</item>
+ <item><c>TopLevelDir.TestDir.Suite:cases</c> (specific test cases executed)</item>
+ <item><c>TopLevelDir.TestDir.Suite.Case</c> (only <c>Case</c> executed)</item>
</list>
- <p>On the test run index page there is a link to the Common Test
+ <p>The "test run index" page includes a link to the <c>Common Test</c>
Framework Log file in which information about imported
configuration data and general test progress is written. This
log file is useful to get snapshot information about the test
- run during execution. It can also be very helpful when
+ run during execution. It can also be helpful when
analyzing test results or debugging test suites.</p>
- <p>On the test run index page it is noted if a test has missing
- suites (i.e. suites that Common Test has failed to
+ <p>The "test run index" page indicates if a test has missing
+ suites (that is, suites that <c>Common Test</c> failed to
compile). Names of the missing suites can be found in the
- Common Test Framework Log file.</p>
+ <c>Common Test</c> Framework Log file.</p>
<p>The major log file shows a detailed report of the test run. It
includes test suite and test case names, execution time, the
- exact reason for failures etc. The information is available in both
+ exact reason for failures, and so on. The information is available in both
a file with textual and with HTML representation. The HTML file shows a
- summary which gives a good overview of the test run. It also has links
+ summary that gives a good overview of the test run. It also has links
to each individual test case log file for quick viewing with an HTML
browser.</p>
<p>The minor log files contain full details of every single test
- case, each one in a separate file. This way, it should be
+ case, each in a separate file. This way, it is
straightforward to compare the latest results to that of previous
- test runs, even if the set of test cases changes. If SASL is running,
- its logs will also be printed to the current minor log file by the
+ test runs, even if the set of test cases changes. If application <c>SASL</c>
+ is running, its logs are also printed to the current minor log file by the
<seealso marker="common_test:ct_hooks_chapter#builtin_cths">
cth_log_redirect built-in hook</seealso>.
</p>
- <p>The full name of the minor log file (i.e. the name of the file
+ <p>The full name of the minor log file (that is, the name of the file
including the absolute directory path) can be read during execution
- of the test case. It comes as value in the tuple
+ of the test case. It comes as value in tuple
<c>{tc_logfile,LogFileName}</c> in the <c>Config</c> list (which means it
- can also be read by a pre- or post Common Test hook function). Also,
+ can also be read by a pre- or post <c>Common Test Hook</c> function). Also,
at the start of a test case, this data is sent with an event
- to any installed event handler. Please see the
- <seealso marker="event_handler_chapter#event_handling">Event Handling</seealso>
- chapter for details.
+ to any installed event handler. For details, see section
+ <seealso marker="event_handler_chapter#event_handling">Event Handling</seealso>.
</p>
-
- <p>Which information goes where is user configurable via the
- test server controller. Three threshold values determine what
- comes out on screen, and in the major or minor log files. See
- the OTP Test Server manual for information. The contents that
- goes to the HTML log file is fixed however and cannot be altered.</p>
-
- <p>The log files are written continously during a test run and links are
- always created initially when a test starts. This makes it possible
- to follow test progress simply by refreshing pages in the HTML browser.
+
+ <p>The log files are written continuously during a test run and links are
+ always created initially when a test starts. Thevtest progress can therefore
+ be followed simply by refreshing pages in the HTML browser.
Statistics totals are not presented until a test is complete however.</p>
<section>
<marker id="logopts"></marker>
- <title>Log options</title>
- <p>With the <c>logopts</c> start flag, it's possible to specify
- options that modify some aspects of the logging behaviour.
- Currently, the following options are available:</p>
- <list>
- <item><c>no_src</c></item>
- <item><c>no_nl</c></item>
- </list>
- <p>With <c>no_src</c>, the html version of the test suite source
- code will not be generated during the test run (and consequently
- not be available in the log file system).</p>
- <p>With <c>no_nl</c>, Common Test will not add a newline character
- (\n) to the end of an output string that it receives from a call to e.g.
- <c>io:format/2</c>, and which it prints to the test case log.</p>
+ <title>Log Options</title>
+ <p>With start flag <c>logopts</c> options that modify some aspects
+ of the logging behavior can be specified.
+ The following options are available:</p>
+ <taglist>
+ <tag><c>no_src</c></tag>
+ <item><p>The HTML version of the test suite source code is not
+ generated during the test run (and is consequently not available
+ in the log file system).</p></item>
+ <tag><c>no_nl</c></tag>
+ <item><p><c>Common Test</c> does not add a newline character <c>(\n)</c>
+ to the end of an output string that it receives from a call to, for example,
+ <c>io:format/2</c>, and which it prints to the test case log.</p></item>
+ </taglist>
+
<p>For example, if a test is started with:</p>
<p><c>$ ct_run -suite my_SUITE -logopts no_src</c></p>
<p>then printouts during the test made by successive calls to <c>io:format("x")</c>,
- will appear in the test case log as:</p>
+ appears in the test case log as:</p>
<p><c>xxx</c></p>
- <p>instead of each <c>x</c> printed on a new line, which is the default behaviour.</p>
+ <p>instead of each <c>x</c> printed on a new line, which is the default behavior.</p>
</section>
<section>
<marker id="table_sorting"></marker>
- <title>Sorting HTML table columns</title>
- <p>By clicking the name in the column header of any table (e.g. "Ok", "Case", "Time", etc),
- the table rows are sorted in whatever order makes sense for the type of value (e.g.
- numerical for "Ok" or "Time", and alphabetical for "Case"). The sorting is performed
- by means of JavaScript code, automatically inserted into the HTML log files. Common Test
- uses the <url href="http://jquery.com">jQuery</url> library and the
- <url href="http://tablesorter.com">tablesorter</url> plugin, with customized sorting
- functions, for this implementation.</p>
+ <title>Sorting HTML Table Columns</title>
+ <p>By clicking the name in the column header of any table
+ (for example, "Ok", "Case", "Time", and so on), the table rows are sorted
+ in whatever order makes sense for the type of value (for example,
+ numerical for "Ok" or "Time", and alphabetical for "Case"). The sorting is
+ performed through JavaScript code, automatically inserted into the HTML
+ log files. <c>Common Test</c> uses the <url href="http://jquery.com">jQuery</url>
+ library and the
+ <url href="http://tablesorter.com">tablesorter</url> plugin,
+ with customized sorting functions, for this implementation.</p>
</section>
<section>
<title>The Unexpected I/O Log</title>
- <p>On the test suites overview page you find a link to the Unexpected I/O Log.
- In this log, Common Test saves printouts made with
- <c>ct:log/2</c> and <c>ct:pal/2</c>, as well as captured system error- and
- progress reports, that cannot be associated with particular test cases and
- therefore cannot be written to individual test case log files. This happens e.g.
- if a log printout is made from an external process (not a test case process),
- or if an error- or progress report comes in, during a short interval while Common
- Test is not executing a test case or configuration function, <em>or</em> while
- Common Test is currently executing a parallell test case group.</p>
+ <p>The test suites overview page includes a link to the Unexpected I/O Log.
+ In this log, <c>Common Test</c> saves printouts made with
+ <seealso marker="ct#log-2"><c>ct:log/2</c></seealso> and
+ <seealso marker="ct#pal-2"><c>ct:pal/2</c></seealso>, as well as captured system
+ error- and progress reports, which cannot be associated with particular test cases and
+ therefore cannot be written to individual test case log files. This occurs,
+ for example, if a log printout is made from an external process (not a test
+ case process), <em>or</em> if an error- or progress report comes in, during a short
+ interval while <c>Common Test</c> is not executing a test case or configuration
+ function, <em>or</em> while <c>Common Test</c> is currently executing a parallel
+ test case group.</p>
</section>
<section>
<marker id="pre_post_test_io_log"></marker>
<title>The Pre- and Post Test I/O Log</title>
- <p>On the Common Test Framework Log page you find links to the so called
- Pre- and Post Test I/O Log. In this log, Common Test saves printouts made with
- <c>ct:log/2</c> and <c>ct:pal/2</c>, as well as captured system error-
- and progress reports, that take place before - and after - the actual test run.
+ <p>The <c>Common Test</c> Framework Log page includes links to the
+ Pre- and Post Test I/O Log. In this log, <c>Common Test</c> saves printouts made
+ with <c>ct:log/2</c> and <c>ct:pal/2</c>, as well as captured system error-
+ and progress reports, which take place before, and after, the test run.
Examples of this are printouts from a CT hook init- or terminate function, or
progress reports generated when an OTP application is started from a CT hook
- init function. Another example is an error report generated due to
+ init function. Another example is an error report generated because of
a failure when an external application is stopped from a CT hook terminate function.
All information in these examples ends up in the Pre- and Post Test I/O Log.
For more information on how to synchronize test runs with external user
- applications, please see the
+ applications, see section
<seealso marker="ct_hooks_chapter#synchronizing">Synchronizing</seealso>
- section in the Common Test Hooks chapter.</p>
- <p>Note that logging to file with <c>ct:log/2</c> or <c>ct:pal/2</c>
- only works when Common Test is running. Printouts with <c>ct:pal/2</c>
- are however always displayed on screen.</p>
+ in section Common Test Hooks.</p>
+ <note><p>Logging to file with <c>ct:log/2</c> or <c>ct:pal/2</c>
+ only works when <c>Common Test</c> is running. Printouts with <c>ct:pal/2</c>
+ are however always displayed on screen.</p></note>
</section>
</section>
<section>
<marker id="html_stylesheet"></marker>
<title>HTML Style Sheets</title>
- <p>Common Test uses an HTML Style Sheet (CSS file) to control the look of
- the HTML log files generated during test runs. If, for some reason, the
- log files are not displayed correctly in the browser of your
- choice, or you prefer a more primitive ("pre Common Test v1.6") look
- of the logs, use the start flag/option:</p>
- <pre>basic_html</pre>
- <p>This disables the use of Style Sheets, as well as JavaScripts (see
- table sorting above).</p>
+ <p><c>Common Test</c> uses an HTML Style Sheet (CSS file) to control the look of
+ the HTML log files generated during test runs. If the log files are not
+ displayed correctly in the browser of your choice, or you prefer a more
+ primitive ("pre <c>Common Test</c> v1.6") look of the logs, use the start
+ flag/option:</p>
+ <pre>
+ basic_html</pre>
+ <p>This disables the use of style sheets and JavaScripts (see
+ <seealso marker="#table_sorting">Sorting HTML Table Columns</seealso>).</p>
- <p>Common Test includes an <em>optional</em> feature to allow
+ <p><c>Common Test</c> includes an <em>optional</em> feature to allow
user HTML style sheets for customizing printouts. The
functions in <c>ct</c> that print to a test case HTML log
file (<c>log/3</c> and <c>pal/3</c>) accept <c>Category</c>
- as first argument. With this argument it's possible to
- specify a category that can be mapped to a selector in a CSS
- definition. This is useful especially for coloring text
+ as first argument. With this argument a category can be specified
+ that can be mapped to a selector in a CSS
+ definition. This is useful, especially for coloring text
differently depending on the type of (or reason for) the
printout. Say you want one color for test system
configuration information, a different one for test system
- state information and finally one for errors detected by the
- test case functions. The corresponding style sheet may
- look like this:</p>
+ state information, and finally one for errors detected by the
+ test case functions. The corresponding style sheet can
+ look as follows:</p>
<pre>
- div.sys_config { background:blue; color:white }
- div.sys_state { background:yellow; color:black }
- div.error { background:red; color:white }</pre>
+ div.sys_config { background:blue; color:white }
+ div.sys_state { background:yellow; color:black }
+ div.error { background:red; color:white }</pre>
- <p>To install the CSS file (Common Test inlines the definition in the
- HTML code), the name may be provided when executing <c>ct_run</c>.
- Example:</p>
+ <p>To install the CSS file (<c>Common Test</c> inlines the definition in the
+ HTML code), the name can be provided when executing <c>ct_run</c>.</p>
+ <p><em>Example:</em></p>
<pre>
- $ ct_run -dir $TEST/prog -stylesheet $TEST/styles/test_categories.css</pre>
+ $ ct_run -dir $TEST/prog -stylesheet $TEST/styles/test_categories.css</pre>
- <p>Categories in a CSS file installed with the <c>-stylesheet</c> flag
+ <p>Categories in a CSS file installed with flag <c>-stylesheet</c>
are on a global test level in the sense that they can be used in any
- suite which is part of the test run.</p>
+ suite that is part of the test run.</p>
- <p>It is also possible to install style sheets on a per suite and
- per test case basis. Example:</p>
+ <p>Style sheets can also be installed on a per suite and
+ per test case basis.</p>
+ <p><em>Example:</em></p>
<pre>
- -module(my_SUITE).
- ...
- suite() -> [..., {stylesheet,"suite_categories.css"}, ...].
- ...
- my_testcase(_) ->
- ...
- ct:log(sys_config, "Test node version: ~p", [VersionInfo]),
- ...
- ct:log(sys_state, "Connections: ~p", [ConnectionInfo]),
- ...
- ct:pal(error, "Error ~p detected! Info: ~p", [SomeFault,ErrorInfo]),
- ct:fail(SomeFault).</pre>
+ -module(my_SUITE).
+ ...
+ suite() -> [..., {stylesheet,"suite_categories.css"}, ...].
+ ...
+ my_testcase(_) ->
+ ...
+ ct:log(sys_config, "Test node version: ~p", [VersionInfo]),
+ ...
+ ct:log(sys_state, "Connections: ~p", [ConnectionInfo]),
+ ...
+ ct:pal(error, "Error ~p detected! Info: ~p", [SomeFault,ErrorInfo]),
+ ct:fail(SomeFault).</pre>
<p>If the style sheet is installed as in this example, the categories are
private to the suite in question. They can be used by all test cases in the
- suite, but can not be used by other suites. A suite private style sheet,
- if specified, will be used in favour of a global style sheet (one specified
- with the <c>-stylesheet</c> flag). A stylesheet tuple (as returned by <c>suite/0</c>
- above) can also be returned from a test case info function. In this case the
+ suite, but cannot be used by other suites. A suite private style sheet,
+ if specified, is used in favor of a global style sheet (one specified
+ with flag <c>-stylesheet</c>). A stylesheet tuple (as returned by <c>suite/0</c>
+ above) can also be returned from a test case information function. In this case the
categories specified in the style sheet can only be used in that particular
- test case. A test case private style sheet is used in favour of a suite or
+ test case. A test case private style sheet is used in favor of a suite or
global level style sheet.
</p>
<p>In a tuple <c>{stylesheet,CSSFile}</c>, if <c>CSSFile</c> is specified
- with a path, e.g. <c>"$TEST/styles/categories.css"</c>, this full
- name will be used to locate the file. If only the file name is specified
- however, e.g. "categories.css", then the CSS file is assumed to be located
- in the data directory, <c>data_dir</c>, of the suite. The latter usage is
- recommended since it is portable compared to hard coding path names in the
- suite!</p>
-
- <p>The <c>Category</c> argument in the example above may have the
+ with a path, for example, <c>"$TEST/styles/categories.css"</c>, this full
+ name is used to locate the file. However, if only the file name is specified,
+ for example, <c>categories.css</c>, the CSS file is assumed to be located
+ in the data directory, <c>data_dir</c>, of the suite. The latter use is
+ recommended, as it is portable compared to hard coding path names in the
+ suite.</p>
+
+ <p>Argument <c>Category</c> in the previous example can have the
value (atom) <c>sys_config</c> (white on blue), <c>sys_state</c>
- (black on yellow) or <c>error</c> (white on red).</p>
+ (black on yellow), or <c>error</c> (white on red).</p>
</section>
<section>
<marker id="repeating_tests"></marker>
- <title>Repeating tests</title>
- <p>You can order Common Test to repeat the tests you specify. You can choose
- to repeat tests a certain number of times, repeat tests for a specific period of time,
+ <title>Repeating Tests</title>
+ <p>You can order <c>Common Test</c> to repeat the tests you specify. You can choose
+ to repeat tests a number of times, repeat tests for a specific period of time,
or repeat tests until a particular stop time is reached. If repetition is controlled by
- means of time, it is also possible to specify what action Common Test should
- take upon timeout. Either Common Test performs all tests in the current run before stopping,
- or it stops as soon as the current test job is finished. Repetition can be activated by
- means of <c>ct_run</c> start flags, or tuples in the <c>ct:run:test/1</c>
- option list argument. The flags (options in parenthesis) are:</p>
- <list>
- <item><c>-repeat N ({repeat,N})</c>, where <c>N</c> is a positive integer.</item>
- <item><c>-duration DurTime ({duration,DurTime})</c>, where <c>DurTime</c> is the duration, see below.</item>
- <item><c>-until StopTime ({until,StopTime})</c>, where <c>StopTime</c> is finish time, see below.</item>
+ time, an action for <c>Common Test</c> to take upon time-out can be specified.
+ Either <c>Common Test</c> performs all tests in the current run
+ before stopping, or it stops when the current test job is finished. Repetition
+ can be activated by <c>ct_run</c> start flags, or tuples in the <c>ct:run:test/1</c>
+ option list argument. The flags (options in parentheses) are the following:</p>
+ <list type="bulleted">
+ <item><c>-repeat N ({repeat,N})</c>, where <c>N</c> is a positive integer</item>
+ <item><c>-duration DurTime ({duration,DurTime})</c>, where <c>DurTime</c> is the duration</item>
+ <item><c>-until StopTime ({until,StopTime})</c>, where <c>StopTime</c> is finish time</item>
<item><c>-force_stop ({force_stop,true})</c></item>
<item><c>-force_stop skip_rest ({force_stop,skip_rest})</c></item>
</list>
- <p>The duration time, <c>DurTime</c>, is specified as <c>HHMMSS</c>. Example:
- <c>-duration 012030</c> or <c>{duration,"012030"}</c>, means the tests will
- be executed and (if time allows) repeated, until timeout occurs after 1 h, 20 min
- and 30 secs.
- <c>StopTime</c> can be specified as <c>HHMMSS</c> and is then interpreted as a time today
- (or possibly tomorrow). <c>StopTime</c> can also be specified as <c>YYMoMoDDHHMMSS</c>.
- Example: <c>-until 071001120000</c> or <c>{until,"071001120000"}</c>, which means the tests
- will be executed and (if time allows) repeated, until 12 o'clock on the 1st of Oct 2007.</p>
-
- <p>When timeout occurs, Common Test will never abort the ongoing test case, since
- this might leave the system under test in an undefined, and possibly bad, state.
- Instead Common Test will by default finish the current test
- run before stopping. If the <c>force_stop</c> flag is
- given, Common Test will stop as soon as the current test job
- is finished, and if the <c>force_stop</c> flag is given with
- <c>skip_rest</c> Common Test will only complete the current
- test case and skip the rest of the tests in the test job.
- Note that since Common Test always finishes off at least the
- current test case,
- the time specified with <c>duration</c> or <c>until</c> is never definitive!</p>
-
- <p>Log files from every single repeated test run is saved in normal Common Test fashion (see above).
- Common Test may later support an optional feature to only store the last (and possibly
- the first) set of logs of repeated test runs, but for now the user must be careful not
- to run out of disk space if tests are repeated during long periods of time.</p>
-
- <p>Note that for each test run that is part of a repeated session, information about the
- particular test run is printed in the Common Test Framework Log. There you can read
- the repetition number, remaining time, etc.</p>
-
- <p>Example 1:</p>
+ <taglist>
+ <tag><c>DurTime</c></tag>
+ <item><p>The duration time is specified as <c>HHMMSS</c>, for example, <c>-duration 012030</c>
+ or <c>{duration,"012030"}</c></p>, which means that the tests are executed and
+ (if time allows) repeated until time-out occurs after 1 hour, 20 minutes, and 30 seconds.
+ </item>
+ <tag><c>StopTime</c></tag>
+ <item><p>The finish time can be specified as <c>HHMMSS</c> and is then interpreted as a
+ time today (or possibly tomorrow), but can also be specified as <c>YYMoMoDDHHMMSS</c>,
+ for example, <c>-until 071001120000</c> or <c>{until,"071001120000"}</c>. This means
+ that the tests are executed and (if time allows) repeated, until 12 o'clock on the 1st
+ of October 2007.</p>
+ </item>
+ </taglist>
+
+ <p>When time-out occurs, <c>Common Test</c> never aborts the ongoing test case,
+ as this can leave the SUT in an undefined, and possibly bad, state.
+ Instead <c>Common Test</c>, by default, finishes the current test
+ run before stopping. If flag <c>force_stop</c> is
+ specified, <c>Common Test</c> stops when the current test job
+ is finished. If flag <c>force_stop</c> is specified with
+ <c>skip_rest</c>, <c>Common Test</c> only completes the current
+ test case and skips the remaining tests in the test job.</p>
+ <note><p>As <c>Common Test</c> always finishes at least the current test case,
+ the time specified with <c>duration</c> or <c>until</c> is never definitive.</p></note>
+
+ <p>Log files from every repeated test run is saved in normal <c>Common Test</c>
+ fashion (described earlier).</p>
+ <p><c>Common Test</c> might later support an optional feature to only store the last (and possibly
+ the first) set of logs of repeated test runs, but for now the user must be careful not
+ to run out of disk space if tests are repeated during long periods of time.</p>
+
+ <p>For each test run that is part of a repeated session, information about the
+ particular test run is printed in the <c>Common Test</c> Framework Log. The information
+ includes the repetition number, remaining time, and so on.</p>
+
+ <p><em>Example 1:</em></p>
<pre>
- $ ct_run -dir $TEST_ROOT/to1 $TEST_ROOT/to2 -duration 001000 -force_stop</pre>
- <p>Here the suites in test directory to1, followed by the suites in to2, will be executed
- in one test run. A timeout event will occur after 10 minutes. As long as there is time
- left, Common Test will repeat the test run (i.e. starting over with the to1 test).
- When the timeout occurs, Common Test will stop as soon as the current job is finished
- (because of the <c>force_stop</c> flag). As a result, the specified test run might be
- aborted after the to1 test and before the to2 test.</p>
-
- <p>Example 2:</p>
+ $ ct_run -dir $TEST_ROOT/to1 $TEST_ROOT/to2 -duration 001000 -force_stop</pre>
+
+ <p>Here, the suites in test directory <c>to1</c>, followed by the suites in <c>to2</c>, are
+ executed in one test run. A time-out event occurs after 10 minutes. As long as there is
+ time left, <c>Common Test</c> repeats the test run (that is, starting over with test <c>to1</c>).
+ After time-out, <c>Common Test</c> stops when the current job is finished
+ (because of flag <c>force_stop</c>). As a result, the specified test run can be
+ aborted after test <c>to1</c> and before test <c>to2</c>.</p>
+
+ <p><em>Example 2:</em></p>
<pre>
- $ ct_run -dir $TEST_ROOT/to1 $TEST_ROOT/to2 -duration 001000 -forces_stop skip_rest</pre>
- <p>Here the same test run as in Example 1, but with the
- <c>force_stop</c> flag set to <c>skip_rest</c>. If the timeout
- occurs while executing tests in directory to1, the rest of the
- test cases in to1 will be skipped and then the test will be
- aborted without running the tests in to2 another time. If the
- timeout occurs while executing tests in directory to2, then the
- rest of the test cases in to2 will be skipped and then the test
- will be aborted.</p>
-
- <p>Example 3:</p>
+ $ ct_run -dir $TEST_ROOT/to1 $TEST_ROOT/to2 -duration 001000 -forces_stop skip_rest</pre>
+
+ <p>Here, the same tests as in Example 1 are run, but with flag <c>force_stop</c> set to
+ <c>skip_rest</c>. If time-out occurs while executing tests in directory <c>to1</c>,
+ the remaining test cases in <c>to1</c> are skipped and the test is aborted without
+ running the tests in <c>to2</c> another time. If time-out occurs while executing
+ tests in directory <c>to2</c>, the remaining test cases in <c>to2</c> are skipped and
+ the test is aborted.</p>
+
+ <p><em>Example 3:</em></p>
<pre>
- $ date
- Fri Sep 28 15:00:00 MEST 2007
-
- $ ct_run -dir $TEST_ROOT/to1 $TEST_ROOT/to2 -until 160000</pre>
- <p>Here the same test run as in the example above will be executed (and possibly repeated).
- In this example, however, the timeout will occur after 1 hour and when that happens,
- Common Test will finish the entire test run before stopping (i.e. the to1 and to2 test
- will always both be executed in the same test run).</p>
+ $ date
+ Fri Sep 28 15:00:00 MEST 2007
+
+ $ ct_run -dir $TEST_ROOT/to1 $TEST_ROOT/to2 -until 160000</pre>
+
+ <p>Here, the same test run as in the previous examples are executed (and possibly repeated).
+ However, when the time-out occurs, after 1 hour, <c>Common Test</c> finishes the entire
+ test run before stopping (that is, both <c>to1</c> and <c>to2</c> are always executed in
+ the same test run).</p>
- <p>Example 4:</p>
+ <p><em>Example 4:</em></p>
<pre>
- $ ct_run -dir $TEST_ROOT/to1 $TEST_ROOT/to2 -repeat 5</pre>
- <p>Here the test run, including both the to1 and the to2 test, will be repeated 5 times.</p>
+ $ ct_run -dir $TEST_ROOT/to1 $TEST_ROOT/to2 -repeat 5</pre>
+
+ <p>Here, the test run, including both the <c>to1</c> and the <c>to2</c> test, is repeated
+ five times.</p>
- <note><p>This feature should not be confused with the <c>repeat</c> property of a test
+ <note><p>Do not confuse this feature with the <c>repeat</c> property of a test
case group. The options described here are used to repeat execution of entire test runs,
while the <c>repeat</c> property of a test case group makes it possible to repeat
execution of sets of test cases within a suite. For more information about the latter,
- see the <seealso marker="write_test_chapter#test_case_groups">Writing Test Suites</seealso>
- chapter.</p></note>
+ see section <seealso marker="write_test_chapter#test_case_groups">Test Case Groups </seealso>
+ in section Writing Test Suites.</p></note>
</section>
<section>
<marker id="silent_connections"></marker>
<title>Silent Connections</title>
- <p>The protocol handling processes in Common Test, implemented by ct_telnet,
- ct_ssh, ct_ftp etc, do verbose printing to the test case logs. This can be switched off
- by means of the <c>-silent_connections</c> flag:</p>
+ <p>The protocol handling processes in <c>Common Test</c>, implemented by <c>ct_telnet</c>,
+ <c>ct_ssh</c>, <c>ct_ftp</c>, and so on, do verbose printing to the test case logs.
+ This can be switched off with flag <c>-silent_connections</c>:</p>
<pre>
- ct_run -silent_connections [conn_types]
- </pre>
+ ct_run -silent_connections [conn_types]</pre>
- <p>where <c>conn_types</c> specifies <c>ssh, telnet, ftp, rpc</c> and/or <c>snmp</c>.</p>
+ <p>Here, <c>conn_types</c> specifies SSH, Telnet, FTP, RPC, and/or SNMP.</p>
- <p>Example:</p>
+ <p><em>Example 1:</em></p>
<pre>
- ct_run ... -silent_connections ssh telnet</pre>
- <p>switches off logging for ssh and telnet connections.</p>
+ ct_run ... -silent_connections ssh telnet</pre>
+ <p>This switches off logging for SSH and Telnet connections.</p>
+
+ <p><em>Example 2:</em></p>
<pre>
- ct_run ... -silent_connections</pre>
- <p>switches off logging for all connection types.</p>
+ ct_run ... -silent_connections</pre>
+ <p>This switches off logging for all connection types.</p>
- <p>Fatal communication error and reconnection attempts will always be printed even
- if logging has been suppressed for the connection type in question. However, operations
- such as sending and receiving data will be performed silently.</p>
+ <p>Fatal communication error and reconnection attempts are always printed, even if
+ logging has been suppressed for the connection type in question. However, operations
+ such as sending and receiving data are performed silently.</p>
- <p>It is possible to also specify <c>silent_connections</c> in a test suite. This is
+ <p><c>silent_connections</c> can also be specified in a test suite. This is
accomplished by returning a tuple, <c>{silent_connections,ConnTypes}</c>, in the
- <c>suite/0</c> or test case info list. If <c>ConnTypes</c> is a list of atoms
- (<c>ssh, telnet, ftp, rpc</c> and/or <c>snmp</c>), output for any corresponding connections
- will be suppressed. Full logging is per default enabled for any connection of type not
+ <c>suite/0</c> or test case information list. If <c>ConnTypes</c> is a list of atoms
+ (SSH, Telnet, FTP, RPC and/or SNMP), output for any corresponding connections
+ are suppressed. Full logging is by default enabled for any connection of type not
specified in <c>ConnTypes</c>. Hence, if <c>ConnTypes</c> is the empty list, logging
is enabled for all connections.</p>
- <p>Example:</p>
+ <p><em>Example 3:</em></p>
<pre>
-
- -module(my_SUITE).
+ -module(my_SUITE).
- suite() -> [..., {silent_connections,[telnet,ssh]}, ...].
+ suite() -> [..., {silent_connections,[telnet,ssh]}, ...].
- ...
+ ...
- my_testcase1() ->
- [{silent_connections,[ssh]}].
+ my_testcase1() ->
+ [{silent_connections,[ssh]}].
- my_testcase1(_) ->
- ...
+ my_testcase1(_) ->
+ ...
- my_testcase2(_) ->
- ...
- </pre>
+ my_testcase2(_) ->
+ ...</pre>
- <p>In this example, <c>suite/0</c> tells Common Test to suppress
- printouts from telnet and ssh connections. This is valid for
+ <p>In this example, <c>suite/0</c> tells <c>Common Test</c> to suppress
+ printouts from Telnet and SSH connections. This is valid for
all test cases. However, <c>my_testcase1/0</c> specifies that
- for this test case, only ssh should be silent. The result is
- that <c>my_testcase1</c> will get telnet info (if any) printed
- in the log, but not ssh info. <c>my_testcase2</c> will get no
- info from either connection printed.</p>
+ for this test case, only SSH is to be silent. The result is
+ that <c>my_testcase1</c> gets Telnet information (if any) printed
+ in the log, but not SSH information. <c>my_testcase2</c> gets no
+ information from either connection printed.</p>
- <p><c>silent_connections</c> may also be specified with a term
+ <p><c>silent_connections</c> can also be specified with a term
in a test specification
- (see <seealso marker="run_test_chapter#test_specifications">Test
- Specifications</seealso>). Connections provided with the
- <c>silent_connections</c> start flag/option, will be merged with
- any connections listed in the test specification.</p>
+ (see section <seealso marker="run_test_chapter#test_specifications">Test
+ Specifications</seealso> in section Running Tests and Analyzing Results).
+ Connections provided with start flag/option <c>silent_connections</c>
+ are merged with any connections listed in the test specification.</p>
- <p>The <c>silent_connections</c> start flag/option and test
- specification term, overrides any settings made by the info functions
+ <p>Start flag/option <c>silent_connections</c> and the test
+ specification term override any settings made by the information functions
inside the test suite.</p>
- <note><p>Note that in the current Common Test version, the
- <c>silent_connections</c> feature only works for telnet
- and ssh connections! Support for other connection types will be added
- in future Common Test versions.</p></note>
+ <note><p>In the current <c>Common Test</c> version, the
+ <c>silent_connections</c> feature only works for Telnet
+ and SSH connections. Support for other connection types can be added
+ in future <c>Common Test</c> versions.</p></note>
</section>
</chapter>
diff --git a/lib/common_test/doc/src/test_structure_chapter.xml b/lib/common_test/doc/src/test_structure_chapter.xml
index d5b92b163f..8076244928 100644
--- a/lib/common_test/doc/src/test_structure_chapter.xml
+++ b/lib/common_test/doc/src/test_structure_chapter.xml
@@ -31,167 +31,167 @@
</header>
<section>
- <title>Test structure</title>
+ <title>General</title>
<p>A test is performed by running one or more test suites. A test suite
- consists of test cases (as well as configuration functions and info
- functions). Test cases may be grouped in so called test case groups.
+ consists of test cases, configuration functions, and information
+ functions. Test cases can be grouped in so called test case groups.
A test suite is an Erlang module and test cases are implemented as
Erlang functions. Test suites are stored in test directories.</p>
</section>
<section>
- <title>Skipping test cases</title>
+ <marker id="skipping_test_cases"></marker>
+ <title>Skipping Test Cases</title>
- <p>It is possible to skip certain test cases, for example if you
- know beforehand that a specific test case fails. This might be
- functionality which isn't yet implemented, a bug that is known but
- not yet fixed or some functionality which doesn't work or isn't
+ <p>Certain test cases can be skipped, for example, if you
+ know beforehand that a specific test case fails. The reason can be
+ functionality that is not yet implemented, a bug that is known but
+ not yet fixed, or some functionality that does not work or is not
applicable on a specific platform.</p>
- <p>There are several different ways to state that one or more
- test cases should be skipped:</p>
- <list>
+ <p>Test cases can be skipped in the following ways:</p>
+ <list type="bulleted">
<item>Using <c>skip_suites</c> and <c>skip_cases</c>
terms in
<seealso marker="run_test_chapter#test_specifications">test specifications</seealso>.
</item>
- <item>Returning <c>{skip,Reason}</c> from the
- <c>init_per_testcase/2</c> or <c>init_per_suite/1</c> functions.</item>
+ <item>Returning <c>{skip,Reason}</c> from function
+ <seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso> or
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite/1</c></seealso>.</item>
<item>Returning <c>{skip,Reason}</c> from the execution clause
- of the test case.</item>
+ of the test case. The execution clause is called, so the author
+ must ensure that the test case does not run.</item>
</list>
- <p>The latter of course means that the execution clause is
- actually called, so the author must make sure that the test case
- does not run.</p>
-
- <p>When a test case is skipped, it will be noted as <c>SKIPPED</c>
+ <p>When a test case is skipped, it is noted as <c>SKIPPED</c>
in the HTML log.</p>
</section>
<section>
- <title>Definition of terms</title>
+ <title>Definition of Terms</title>
<taglist>
- <tag><em>Auto skipped test case</em></tag>
+ <tag><em>Auto-skipped test case</em></tag>
<item>
- When a configuration function fails (i.e. terminates unexpectedly),
- the test cases that depend on the configuration function will be
- skipped automatically by Common Test. The status of the test cases
- is then "auto skipped". Test cases are also auto skipped by
- Common Test if required configuration data is not available at
- runtime.
+ <p>When a configuration function fails (that is, terminates unexpectedly),
+ the test cases depending on the configuration function are
+ skipped automatically by <c>Common Test</c>. The status of the test cases
+ is then "auto-skipped". Test cases are also "auto-skipped" by
+ <c>Common Test</c> if the required configuration data is unavailable at
+ runtime.</p>
</item>
<tag><em>Configuration function</em></tag>
<item>
- A function in a test suite that is meant to be used for
+ <p>A function in a test suite that is meant to be used for
setting up, cleaning up, and/or verifying the state and
- environment on the SUT (System Under Test) and/or the Common Test
+ environment on the System Under Test (SUT) and/or the <c>Common Test</c>
host node, so that a test case (or a set of test cases) can
- execute correctly.
+ execute correctly.</p>
</item>
<tag><em>Configuration file</em></tag>
<item>
- A file that contains data related to a test and/or an SUT
- (System Under Test), e.g. protocol server addresses, client
- login details, hardware interface addresses, etc - any data
- that should be handled as variable in the suite and not
- be hardcoded.
+ <p>A file containing data related to a test and/or an SUT,
+ for example, protocol server addresses, client
+ login details, and hardware interface addresses. That is, any data
+ that is to be handled as variable in the suite and not
+ be hard-coded.</p>
</item>
<tag><em>Configuration variable</em></tag>
<item>
- A name (an Erlang atom) associated with a data value read from
- a configuration file.
+ <p>A name (an Erlang atom) associated with a data value read from
+ a configuration file.</p>
</item>
- <tag><em>data_dir</em></tag>
+ <tag><c>data_dir</c></tag>
<item>
- Data directory for a test suite. This directory contains
- any files used by the test suite, e.g. additional Erlang
- modules, binaries or data files.
+ <p>Data directory for a test suite. This directory contains
+ any files used by the test suite, for example, extra Erlang
+ modules, binaries, or data files.</p>
</item>
- <tag><em>Info function</em></tag>
+ <tag><em>Information function</em></tag>
<item>
- A function in a test suite that returns a list of properties
- (read by the Common Test server) that describes the conditions
- for executing the test cases in the suite.
+ <p>A function in a test suite that returns a list of properties
+ (read by the <c>Common Test</c> server) that describes the conditions
+ for executing the test cases in the suite.</p>
</item>
<tag><em>Major log file</em></tag>
<item>
- An overview and summary log file for one or more test suites.
+ <p>An overview and summary log file for one or more test suites.</p>
</item>
<tag><em>Minor log file</em></tag>
<item>
- A log file for one particular test case. Also called the
- test case log file.
+ <p>A log file for one particular test case. Also called the
+ test case log file.</p>
</item>
- <tag><em>priv_dir</em></tag>
+
+ <tag><c>priv_dir</c></tag>
<item>
- Private directory for a test suite. This directory should
- be used when the test suite needs to write to files.
+ <p>Private directory for a test suite. This directory is to
+ be used when the test suite needs to write to files.</p>
</item>
- <tag><em>ct_run</em></tag>
+ <tag><c>ct_run</c></tag>
<item>
- The name of an executable program that may be
+ <p>The name of an executable program that can be
used as an interface for specifying and running
- tests with Common Test.
+ tests with <c>Common Test</c>.</p>
</item>
<tag><em>Test case</em></tag>
<item>
- A single test included in a test suite. A test case is
- implemented as a function in a test suite module.
+ <p>A single test included in a test suite. A test case is
+ implemented as a function in a test suite module.</p>
</item>
<tag><em>Test case group</em></tag>
<item>
- A set of test cases that share configuration functions and
- execution properties. The execution properties specify whether
- the test cases in the group should be executed in random order,
- in parallel, in sequence, and if the execution of the group
- should be repeated. Test case groups may also be nested (i.e. a
- group may, besides test cases, contain sub-groups).
+ <p>A set of test cases sharing configuration functions and
+ execution properties. The execution properties specify if
+ the test cases in the group are to be executed in random order,
+ in parallel, or in sequence, and if the execution of the group
+ is be repeated. Test case groups can also be nested. That is,
+ a group can, besides test cases, contain subgroups.</p>
</item>
<tag><em>Test suite</em></tag>
<item>
- An erlang module containing a collection of test cases for
- a specific functional area.
+ <p>An Erlang module containing a collection of test cases for
+ a specific functional area.</p>
</item>
<tag><em>Test directory</em></tag>
<item>
- A directory that contains one or more test suite modules, i.e.
- a group of test suites.
+ <p>A directory containing one or more test suite modules,
+ that is, a group of test suites.</p>
</item>
- <tag><em>The Config argument</em></tag>
+ <tag><em>Argument</em> <c>Config</c></tag>
<item>
- A list of key-value tuples (i.e. a property list) containing
+ <p>A list of key-value tuples (that is, a property list) containing
runtime configuration data passed from the configuration
- functions to the test cases.
+ functions to the test cases.</p>
</item>
- <tag><em>User skipped test case</em></tag>
+ <tag><em>User-skipped test case</em></tag>
<item>
- This is the status of a test case that has been explicitly
- skipped in any of the ways described in the "Skipping test cases"
- section above.
+ <p>The status of a test case explicitly skipped in any of
+ the ways described in section
+ <seealso marker="#skipping_test_cases">Skipping Test Cases</seealso>.
+ </p>
</item>
diff --git a/lib/common_test/doc/src/unix_telnet.xml b/lib/common_test/doc/src/unix_telnet.xml
new file mode 100644
index 0000000000..189379c39a
--- /dev/null
+++ b/lib/common_test/doc/src/unix_telnet.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2010</year><year>2012</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>unix_telnet</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>unix_telnet.xml</file>
+ </header>
+ <module>unix_telnet</module>
+ <modulesummary>Callback module for ct_telnet, for connecting to a Telnet
+ server on a UNIX host.</modulesummary>
+
+ <description>
+
+ <p>Callback module for
+ <seealso marker="ct_telnet"><c>ct_telnet</c></seealso>,
+ for connecting to a Telnet server on a UNIX host.</p>
+
+ <p>It requires the following entry in the configuration file:</p>
+
+ <pre>
+ {unix,[{telnet,HostNameOrIpAddress},
+ {port,PortNum}, % optional
+ {username,UserName},
+ {password,Password},
+ {keep_alive,Bool}]}. % optional</pre>
+
+ <p>To communicate through Telnet to the host specified by
+ <c>HostNameOrIpAddress</c>, use the interface functions in
+ <seealso marker="ct_telnet"><c>ct_telnet</c></seealso>, for example,
+ <c>open(Name)</c> and <c>cmd(Name,Cmd)</c>.</p>
+
+ <p><c>Name</c> is the name you allocated to the Unix host in your
+ <c>require</c> statement, for example:</p>
+
+ <pre>
+ suite() -&gt; [{require,Name,{unix,[telnet]}}].</pre>
+
+ <p>or</p>
+
+ <pre>
+ ct:require(Name,{unix,[telnet]}).</pre>
+
+ <p>The "keep alive" activity (that is, that <c>Common Test</c> sends NOP
+ to the server every 10 seconds if the connection is idle) can be
+ enabled or disabled for one particular connection as described here.
+ It can be disabled for all connections using <c>telnet_settings</c>
+ (see <seealso marker="ct_telnet"><c>ct_telnet</c></seealso>).</p>
+
+ <p>The <c>{port,PortNum}</c> tuple is optional and if omitted, default
+ Telnet port 23 is used. Also the <c>keep_alive</c> tuple is optional,
+ and the value defauls to <c>true</c> (enabled).</p>
+ </description>
+
+ <funcs>
+ <func>
+ <name>connect(ConnName, Ip, Port, Timeout, KeepAlive, Extra) -&gt; {ok, Handle} | {error, Reason}</name>
+ <fsummary>Callback for ct_telnet.erl.</fsummary>
+ <type>
+ <v>ConnName = target_name()</v>
+ <v>Ip = string() | {integer(), integer(), integer(), integer()}</v>
+ <v>Port = integer()</v>
+ <v>Timeout = integer()</v>
+ <v>KeepAlive = bool()</v>
+ <v>Extra = target_name() | {Username, Password}</v>
+ <v>Username = string()</v>
+ <v>Password = string()</v>
+ <v>Handle = handle()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc><marker id="connect-6"/>
+ <p>Callback for <c>ct_telnet.erl</c>.</p>
+
+ <p>Setup Telnet connection to a Unix host.</p>
+
+ <p>For <c>target_name()</c>, see
+ <seealso marker="ct"><c>ct</c></seealso>. For <c>handle()</c>, see
+ <seealso marker="ct_telnet"><c>ct_telnet</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>get_prompt_regexp() -&gt; PromptRegexp</name>
+ <fsummary>Callback for ct_telnet.erl.</fsummary>
+ <type>
+ <v>PromptRegexp = prompt_regexp()</v>
+ </type>
+ <desc><marker id="get_prompt_regexp-0"/>
+ <p>Callback for <c>ct_telnet.erl</c>.</p>
+
+ <p>Returns a suitable <c>regexp</c> string matching common prompts
+ for users on Unix hosts.</p>
+
+ <p>For <c>prompt_regexp()</c>, see
+ <seealso marker="ct_telnet"><c>ct_telnet</c></seealso>.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="ct"><c>ct</c></seealso>,
+ <seealso marker="ct_telnet"><c>ct_telnet</c></seealso></p>
+ </section>
+
+</erlref>
+
+
diff --git a/lib/common_test/doc/src/why_test_chapter.xml b/lib/common_test/doc/src/why_test_chapter.xml
index 15eb1aaed4..ff6000628b 100644
--- a/lib/common_test/doc/src/why_test_chapter.xml
+++ b/lib/common_test/doc/src/why_test_chapter.xml
@@ -22,7 +22,7 @@
</legalnotice>
- <title>Some thoughts about testing</title>
+ <title>Some Thoughts about Testing</title>
<prepared>Siri Hansen</prepared>
<docno></docno>
<date></date>
@@ -34,54 +34,53 @@
<section>
<title>Goals</title>
- <p>It's not possible to prove that a program is correct by
+ <p>It is not possible to prove that a program is correct by
testing. On the contrary, it has been formally proven that it is
impossible to prove programs in general by testing. Theoretical
- program proofs or plain examination of code may be viable options
- for those that wish to certify that a program is correct. The test
+ program proofs or plain examination of code can be viable options
+ for those wishing to certify that a program is correct. The test
server, as it is based on testing, cannot be used for
certification. Its intended use is instead to (cost effectively)
<em>find bugs</em>. A successful test suite is one that reveals a
- bug. If a test suite results in Ok, then we know very little that
- we didn't know before.</p>
+ bug. If a test suite results in OK, then we know very little that
+ we did not know before.</p>
</section>
<section>
- <title>What to test?</title>
+ <title>What to Test</title>
<p>
There are many kinds of test suites. Some concentrate on
calling every function or command (in the documented way) in
a certain interface.
- Some other do the same, but uses all kinds of illegal
- parameters, and verifies that the server stays alive and rejects
+ Some others do the same, but use all kinds of illegal
+ parameters, and verify that the server stays alive and rejects
the requests with reasonable error codes. Some test suites
simulate an application (typically consisting of a few modules of
- an application), some try to do tricky requests in general, some
+ an application), some try to do tricky requests in general, and some
test suites even test internal functions with help of special
- load-modules on target.</p>
+ Load Modules on target.</p>
- <p>Another interesting category of test suites are the ones that
- check that fixed bugs don't reoccur. When a bugfix is introduced,
- a test case that checks for that specific bug should be written
- and submitted to the affected test suite(s).</p>
+ <p>Another interesting category of test suites is the one
+ checking that fixed bugs do not reoccur. When a bugfix is introduced,
+ a test case that checks for that specific bug is written
+ and submitted to the affected test suites.</p>
<p>Aim for finding bugs. Write whatever test that has the highest
probability of finding a bug, now or in the future. Concentrate
- more on the critical parts. Bugs in critical subsystems are a lot
+ more on the critical parts. Bugs in critical subsystems are much
more expensive than others.</p>
<p>Aim for functionality testing rather than implementation
details. Implementation details change quite often, and the test
- suites should be long lived. Often implementation details differ
+ suites are to be long lived. Implementation details often differ
on different platforms and versions. If implementation details
- have to be tested, try to factor them out into separate test
- cases. Later on these test cases may be rewritten, or just
- skipped.</p>
+ must be tested, try to factor them out into separate test
+ cases. These test cases can later be rewritten or skipped.</p>
- <p>Also, aim for testing everything once, no less, no more. It's
- not effective having every test case fail just because one
+ <p>Also, aim for testing everything once, no less, no more. It is
+ not effective having every test case fail only because one
function in the interface changed.</p>
</section>
diff --git a/lib/common_test/doc/src/write_test_chapter.xml b/lib/common_test/doc/src/write_test_chapter.xml
index 1f5650651f..e3811ec4cf 100644
--- a/lib/common_test/doc/src/write_test_chapter.xml
+++ b/lib/common_test/doc/src/write_test_chapter.xml
@@ -32,84 +32,88 @@
<section>
<marker id="intro"></marker>
- <title>Support for test suite authors</title>
+ <title>Support for Test Suite Authors</title>
- <p>The <c>ct</c> module provides the main interface for writing
- test cases. This includes e.g:</p>
+ <p>The <seealso marker="ct"><c>ct</c></seealso> module provides the main
+ interface for writing test cases. This includes for example, the following:</p>
- <list>
+ <list type="bulleted">
<item>Functions for printing and logging</item>
<item>Functions for reading configuration data</item>
<item>Function for terminating a test case with error reason</item>
<item>Function for adding comments to the HTML overview page</item>
</list>
- <p>Please see the reference manual for the <c>ct</c>
- module for details about these functions.</p>
+ <p>For details about these functions, see module <seealso marker="ct"><c>ct</c></seealso>.</p>
- <p>The CT application also includes other modules named
- <c><![CDATA[ct_<component>]]></c> that
+ <p>The <c>Common Test</c> application also includes other modules named
+ <c><![CDATA[ct_<component>]]></c>, which
provide various support, mainly simplified use of communication
- protocols such as rpc, snmp, ftp, telnet, etc.</p>
+ protocols such as RPC, SNMP, FTP, Telnet, and others.</p>
</section>
<section>
- <title>Test suites</title>
+ <title>Test Suites</title>
<p>A test suite is an ordinary Erlang module that contains test
cases. It is recommended that the module has a name on the form
<c>*_SUITE.erl</c>. Otherwise, the directory and auto compilation
- function in CT will not be able to locate it (at least not per default).
+ function in <c>Common Test</c> cannot locate it (at least not by default).
</p>
<p>It is also recommended that the <c>ct.hrl</c> header file is included
in all test suite modules.
</p>
- <p>Each test suite module must export the function <c>all/0</c>
+ <p>Each test suite module must export function
+ <seealso marker="common_test#Module:all-0"><c>all/0</c></seealso>,
which returns the list of all test case groups and test cases
to be executed in that module.
</p>
- <p>The callback functions that the test suite should implement, and
- which will be described in more detail below, are
- all listed in the <seealso marker="common_test">common_test
- reference manual page</seealso>.
+ <p>The callback functions to be implemented by the test suite are
+ all listed in module <seealso marker="common_test">common_test
+ </seealso>. They are also described in more detail later in this User's Guide.
</p>
</section>
<section>
- <title>Init and end per suite</title>
+ <title>Init and End per Suite</title>
- <p>Each test suite module may contain the optional configuration functions
- <c>init_per_suite/1</c> and <c>end_per_suite/1</c>. If the init function
- is defined, so must the end function be.
+ <p>Each test suite module can contain the optional configuration functions
+ <seealso marker="common_test#Module:init_per_suite-1"><c>init_per_suite/1</c></seealso>
+ and <seealso marker="common_test#Module:end_per_suite-1"><c>end_per_suite/1</c></seealso>.
+ If the init function is defined, so must the end function be.
</p>
- <p>If it exists, <c>init_per_suite</c> is called initially before the
- test cases are executed. It typically contains initializations that are
- common for all test cases in the suite, and that are only to be
- performed once. It is recommended to be used for setting up and
- verifying state and environment on the SUT (System Under Test) and/or
- the CT host node, so that the test cases in the suite will execute
- correctly. Examples of initial configuration operations: Opening a connection
- to the SUT, initializing a database, running an installation script, etc.
+ <p>If <c>init_per_suite</c> exists, it is called initially before the
+ test cases are executed. It typically contains initializations common
+ for all test cases in the suite, which are only to be performed once.
+ <c>init_per_suite</c> is recommended for setting up and verifying state
+ and environment on the System Under Test (SUT) or the <c>Common Test</c>
+ host node, or both, so that the test cases in the suite executes correctly.
+ The following are examples of initial configuration operations:
</p>
+ <list type="bulleted">
+ <item>Opening a connection to the SUT</item>
+ <item>Initializing a database</item>
+ <item>Running an installation script</item>
+ </list>
<p><c>end_per_suite</c> is called as the final stage of the test suite execution
(after the last test case has finished). The function is meant to be used
for cleaning up after <c>init_per_suite</c>.
</p>
- <p><c>init_per_suite</c> and <c>end_per_suite</c> will execute on dedicated
+ <p><c>init_per_suite</c> and <c>end_per_suite</c> execute on dedicated
Erlang processes, just like the test cases do. The result of these functions
- is however not included in the test run statistics of successful, failed and
+ is however not included in the test run statistics of successful, failed, and
skipped cases.
</p>
- <p>The argument to <c>init_per_suite</c> is <c>Config</c>, the
+ <p>The argument to <c>init_per_suite</c> is <c>Config</c>, that is, the
same key-value list of runtime configuration data that each test case takes
as input argument. <c>init_per_suite</c> can modify this parameter with
information that the test cases need. The possibly modified <c>Config</c>
@@ -117,671 +121,683 @@
</p>
<p>If <c>init_per_suite</c> fails, all test cases in the test
- suite will be skipped automatically (so called <em>auto skipped</em>),
+ suite are skipped automatically (so called <em>auto skipped</em>),
including <c>end_per_suite</c>.
</p>
- <p>Note that if <c>init_per_suite</c> and <c>end_per_suite</c> do not exist
- in the suite, Common Test calls dummy functions (with the same names)
- instead, so that output generated by hook functions may be saved to the log
- files for these dummies
- (see the <seealso marker="ct_hooks_chapter#manipulating">Common Test Hooks</seealso>
- chapter for more information).
+ <p>Notice that if <c>init_per_suite</c> and <c>end_per_suite</c> do not exist
+ in the suite, <c>Common Test</c> calls dummy functions (with the same names)
+ instead, so that output generated by hook functions can be saved to the log
+ files for these dummies. For details, see
+ <seealso marker="ct_hooks_chapter#manipulating">Common Test Hooks</seealso>.
</p>
</section>
<section>
<marker id="per_testcase"/>
- <title>Init and end per test case</title>
+ <title>Init and End per Test Case</title>
<p>Each test suite module can contain the optional configuration functions
- <c>init_per_testcase/2</c> and <c>end_per_testcase/2</c>. If the init function
- is defined, so must the end function be.</p>
+ <seealso marker="common_test#Module:init_per_testcase-2"><c>init_per_testcase/2</c></seealso>
+ and <seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase/2</c></seealso>.
+ If the init function is defined, so must the end function be.</p>
- <p>If it exists, <c>init_per_testcase</c> is called before each
- test case in the suite. It typically contains initialization which
- must be done for each test case (analogue to <c>init_per_suite</c> for the
+ <p>If <c>init_per_testcase</c> exists, it is called before each
+ test case in the suite. It typically contains initialization that
+ must be done for each test case (analog to <c>init_per_suite</c> for the
suite).</p>
<p><c>end_per_testcase/2</c> is called after each test case has
- finished, giving the opportunity to perform clean-up after
- <c>init_per_testcase</c>.</p>
+ finished, enabling cleanup after <c>init_per_testcase</c>.</p>
<p>The first argument to these functions is the name of the test
case. This value can be used with pattern matching in function clauses
or conditional expressions to choose different initialization and cleanup
- routines for different test cases, or perform the same routine for a number of,
+ routines for different test cases, or perform the same routine for many,
or all, test cases.</p>
<p>The second argument is the <c>Config</c> key-value list of runtime
configuration data, which has the same value as the list returned by
- <c>init_per_suite</c>. <c>init_per_testcase/2</c> may modify this
- parameter or return it as is. The return value of <c>init_per_testcase/2</c>
- is passed as the <c>Config</c> parameter to the test case itself.</p>
+ <c>init_per_suite</c>. <c>init_per_testcase/2</c> can modify this
+ parameter or return it "as is". The return value of <c>init_per_testcase/2</c>
+ is passed as parameter <c>Config</c> to the test case itself.</p>
<p>The return value of <c>end_per_testcase/2</c> is ignored by the
test server, with exception of the
- <seealso marker="dependencies_chapter#save_config">save_config</seealso>
+ <seealso marker="dependencies_chapter#save_config"><c>save_config</c></seealso>
and <c>fail</c> tuple.</p>
- <p>It is possible in <c>end_per_testcase</c> to check if the
- test case was successful or not (which consequently may determine
- how cleanup should be performed). This is done by reading the value
- tagged with <c>tc_status</c> from <c>Config</c>. The value is either
- <c>ok</c>, <c>{failed,Reason}</c> (where <c>Reason</c> is <c>timetrap_timeout</c>,
- info from <c>exit/1</c>, or details of a run-time error), or
- <c>{skipped,Reason}</c> (where Reason is a user specific term).
+ <p><c>end_per_testcase</c> can check if the test case was successful.
+ (which in turn can determine how cleanup is to be performed).
+ This is done by reading the value tagged with <c>tc_status</c> from
+ <c>Config</c>. The value is one of the following:
</p>
-
- <p>The <c>end_per_testcase/2</c> function is called even after a
- test case terminates due to a call to <seealso marker="ct#abort_current_testcase-1"><c>ct:abort_current_testcase/1</c></seealso>,
- or after a timetrap timeout. However, <c>end_per_testcase</c>
- will then execute on a different process than the test case
- function, and in this situation, <c>end_per_testcase</c> will
- not be able to change the reason for test case termination by
- returning <c>{fail,Reason}</c>, nor will it be able to save data with
- <c>{save_config,Data}</c>.</p>
-
- <p>If <c>init_per_testcase</c> crashes, the test case itself gets skipped
- automatically (so called <em>auto skipped</em>). If <c>init_per_testcase</c>
- returns a tuple <c>{skip,Reason}</c>, also then the test case gets skipped
- (so called <em>user skipped</em>). It is also possible, by returning a tuple
- <c>{fail,Reason}</c> from <c>init_per_testcase</c>, to mark the test case
- as failed without actually executing it.
+ <list type="bulleted">
+ <item>
+ <p><c>ok</c></p>
+ </item>
+ <item>
+ <p><c>{failed,Reason}</c></p>
+ <p>where <c>Reason</c> is <c>timetrap_timeout</c>, information from <c>exit/1</c>,
+ or details of a runtime error</p></item>
+ <item>
+ <p><c>{skipped,Reason}</c></p>
+ <p>where <c>Reason</c> is a user-specific term</p></item>
+ </list>
+
+ <p>Function <c>end_per_testcase/2</c> is even called if a
+ test case terminates because of a call to
+ <seealso marker="ct#abort_current_testcase-1"><c>ct:abort_current_testcase/1</c></seealso>,
+ or after a timetrap time-out. However, <c>end_per_testcase</c>
+ then executes on a different process than the test case
+ function. In this situation, <c>end_per_testcase</c> cannot
+ change the reason for test case termination by returning <c>{fail,Reason}</c>
+ or save data with <c>{save_config,Data}</c>.</p>
+
+ <p>The test case is skipped in the following two cases:
</p>
+ <list type="bulleted">
+ <item>If <c>init_per_testcase</c> crashes (called <em>auto skipped</em>).</item>
+ <item>If <c>init_per_testcase</c> returns a tuple <c>{skip,Reason}</c>
+ (called <em>user skipped</em>).</item>
+ </list>
+ <p>The test case can also be marked as failed without executing it
+ by returning a tuple <c>{fail,Reason}</c> from <c>init_per_testcase</c>.</p>
+
<note><p>If <c>init_per_testcase</c> crashes, or returns <c>{skip,Reason}</c>
- or <c>{fail,Reason}</c>, the <c>end_per_testcase</c> function is not called.
+ or <c>{fail,Reason}</c>, function <c>end_per_testcase</c> is not called.
</p></note>
<p>If it is determined during execution of <c>end_per_testcase</c> that
- the status of a successful test case should be changed to failed,
- <c>end_per_testcase</c> may return the tuple: <c>{fail,Reason}</c>
+ the status of a successful test case is to be changed to failed,
+ <c>end_per_testcase</c> can return the tuple <c>{fail,Reason}</c>
(where <c>Reason</c> describes why the test case fails).</p>
- <p><c>init_per_testcase</c> and <c>end_per_testcase</c> execute on the
- same Erlang process as the test case and printouts from these
- configuration functions can be found in the test case log file.</p>
+ <p>As <c>init_per_testcase</c> and <c>end_per_testcase</c> execute on the
+ same Erlang process as the test case, printouts from these
+ configuration functions are included in the test case log file.</p>
</section>
<section>
<marker id="test_cases"></marker>
- <title>Test cases</title>
+ <title>Test Cases</title>
<p>The smallest unit that the test server is concerned with is a
- test case. Each test case can actually test many things, for
- example make several calls to the same interface function with
+ test case. Each test case can test many things, for
+ example, make several calls to the same interface function with
different parameters.
</p>
- <p>It is possible to choose to put many or few tests into each test
- case. What exactly each test case does is of course up to the
- author, but here are some things to keep in mind:
+ <p>The author can choose to put many or few tests into each test
+ case. Some things to keep in mind follows:
</p>
-
- <p>Having many small test cases tend to result in extra, and possibly
+ <list type="bulleted">
+ <item><p>Many small test cases tend to result in extra, and possibly
duplicated code, as well as slow test execution because of
- large overhead for initializations and cleanups. Duplicated
- code should be avoided, e.g. by means of common help functions, or
- the resulting suite will be difficult to read and understand, and
+ large overhead for initializations and cleanups. Avoid duplicated
+ code, for example, by using common help functions. Otherwise,
+ the resulting suite becomes difficult to read and understand, and
expensive to maintain.
- </p>
-
- <p>Larger test cases make it harder to tell what went wrong if it
- fails, and large portions of test code will potentially be skipped
- when errors occur. Furthermore, readability and maintainability suffers
- when test cases become too large and extensive. Also, the resulting log
- files may not reflect very well the number of tests that have
- actually been performed.
- </p>
+ </p></item>
+ <item><p>Larger test cases make it harder to tell what went wrong if it
+ fails. Also, large portions of test code risk being skipped
+ when errors occur.</p>
+ </item>
+ <item><p>Readability and maintainability suffer
+ when test cases become too large and extensive. It is not certain
+ that the resulting log files reflect very well the number of tests
+ performed.
+ </p></item>
+ </list>
<p>The test case function takes one argument, <c>Config</c>, which
contains configuration information such as <c>data_dir</c> and
- <c>priv_dir</c>. (See <seealso marker="#data_priv_dir">Data and
- Private Directories</seealso> for more information about these).
- The value of <c>Config</c> at the time of the call, is the same
- as the return value from <c>init_per_testcase</c>, see above.
+ <c>priv_dir</c>. (For details about these, see section
+ <seealso marker="#data_priv_dir">Data and Private Directories</seealso>.
+ The value of <c>Config</c> at the time of the call, is the same
+ as the return value from <c>init_per_testcase</c>, mentioned earlier.
</p>
- <note><p>The test case function argument <c>Config</c> should not be
- confused with the information that can be retrieved from
+ <note><p>The test case function argument <c>Config</c> is not to be
+ confused with the information that can be retrieved from the
configuration files (using <seealso marker="ct#get_config-1"><c>
- ct:get_config/1/2</c></seealso>). The Config argument
- should be used for runtime configuration of the test suite and the
- test cases, while configuration files should typically contain data
+ ct:get_config/1/2</c></seealso>). The test case argument <c>Config</c>
+ is to be used for runtime configuration of the test suite and the
+ test cases, while configuration files are to contain data
related to the SUT. These two types of configuration data are handled
- differently!</p></note>
+ differently.</p></note>
- <p>Since the <c>Config</c> parameter is a list of key-value tuples, i.e.
- a data type generally called a property list, it can be handled by means of the
- <c>proplists</c> module in the OTP <c>stdlib</c>. A value can for example
- be searched for and returned with the <c>proplists:get_value/2</c> function.
- Also, or alternatively, you might want to look in the general <c>lists</c> module,
- also in <c>stdlib</c>, for useful functions. Normally, the only operations you
- ever perform on <c>Config</c> is insert (adding a tuple to the head of the list)
- and lookup. Common Test provides a simple macro named <c>?config</c>, which returns
- a value of an item in <c>Config</c> given the key (exactly like
+ <p>As parameter <c>Config</c> is a list of key-value tuples, that is,
+ a data type called a property list, it can be handled by the
+ <seealso marker="stdlib:proplists"><c>stdlib:proplists</c></seealso> module.
+ A value can, for example, be searched for and returned with function
+ <seealso marker="stdlib:proplists#get_value-2"><c>proplists:get_value/2</c></seealso>.
+ Also, or alternatively, the general <seealso marker="stdlib:lists"><c>stdlib:lists</c></seealso>
+ module contains useful functions. Normally, the only operations
+ performed on <c>Config</c> is insert (adding a tuple to the head of the list)
+ and lookup. <c>Common Test</c> provides a simple macro named <c>?config</c>,
+ which returns a value of an item in <c>Config</c> given the key (exactly like
<c>proplists:get_value</c>). Example: <c>PrivDir = ?config(priv_dir, Config)</c>.
</p>
<p>If the test case function crashes or exits purposely, it is considered
- <em>failed</em>. If it returns a value (no matter what actual value) it is
+ <em>failed</em>. If it returns a value (no matter what value), it is
considered successful. An exception to this rule is the return value
<c>{skip,Reason}</c>. If this tuple is returned, the test case is considered
- skipped and gets logged as such.</p>
+ skipped and is logged as such.</p>
<p>If the test case returns the tuple <c>{comment,Comment}</c>, the case
- is considered successful and <c>Comment</c> is printed out in the overview
- log file. This is by the way equal to calling <c>ct:comment(Comment)</c>.
+ is considered successful and <c>Comment</c> is printed in the overview
+ log file. This is equal to calling
+ <seealso marker="ct#comment-1"><c>ct:comment(Comment)</c></seealso>.
</p>
</section>
<section>
<marker id="info_function"></marker>
- <title>Test case info function</title>
+ <title>Test Case Information Function</title>
- <p>For each test case function there can be an additional function
- with the same name but with no arguments. This is the test case
- info function. The test case info function is expected to return a
- list of tagged tuples that specifies various properties regarding the
- test case.
+ <p>For each test case function there can be an extra function
+ with the same name but without arguments. This is the test case
+ information function. It is expected to return a list of tagged
+ tuples that specifies various properties regarding the test case.
</p>
<p>The following tags have special meaning:</p>
<taglist>
- <tag><em><c>timetrap</c></em></tag>
+ <tag><c>timetrap</c></tag>
<item>
<p>
- Set the maximum time the test case is allowed to execute. If
- the timetrap time is exceeded, the test case fails with
- reason <c>timetrap_timeout</c>. Note that <c>init_per_testcase</c>
+ Sets the maximum time the test case is allowed to execute. If
+ this time is exceeded, the test case fails with
+ reason <c>timetrap_timeout</c>. Notice that <c>init_per_testcase</c>
and <c>end_per_testcase</c> are included in the timetrap time.
- Please see the <seealso marker="write_test_chapter#timetraps">Timetrap</seealso>
- section for more details.
+ For details, see section
+ <seealso marker="write_test_chapter#timetraps">Timetrap Time-Outs</seealso>.
</p>
</item>
- <tag><em><c>userdata</c></em></tag>
+ <tag><c>userdata</c></tag>
<item>
<p>
- Use this to specify arbitrary data related to the testcase. This
- data can be retrieved at any time using the <seealso marker="ct#userdata-3"><c>ct:userdata/3</c></seealso>
+ Specifies any data related to the test case. This
+ data can be retrieved at any time using the
+ <seealso marker="ct#userdata-3"><c>ct:userdata/3</c></seealso>
utility function.
</p>
</item>
- <tag><em><c>silent_connections</c></em></tag>
+ <tag><c>silent_connections</c></tag>
<item>
<p>
- Please see the
- <seealso marker="run_test_chapter#silent_connections">Silent Connections</seealso>
- chapter for details.
+ For details, see section
+ <seealso marker="run_test_chapter#silent_connections">Silent Connections</seealso>.
</p>
</item>
- <tag><em><c>require</c></em></tag>
+ <tag><c>require</c></tag>
<item>
<p>
- Use this to specify configuration variables that are required by the
+ Specifies configuration variables required by the
test case. If the required configuration variables are not
found in any of the test system configuration files, the test case is
skipped.</p>
<p>
- It is also possible to give a required variable a default value that will
+ A required variable can also be given a default value to
be used if the variable is not found in any configuration file. To specify
- a default value, add a tuple on the form:
- <c>{default_config,ConfigVariableName,Value}</c> to the test case info list
+ a default value, add a tuple on the form
+ <c>{default_config,ConfigVariableName,Value}</c> to the test case information list
(the position in the list is irrelevant).
- Examples:</p>
+ </p>
+ <p><em>Examples:</em></p>
<pre>
- testcase1() ->
- [{require, ftp},
- {default_config, ftp, [{ftp, "my_ftp_host"},
- {username, "aladdin"},
- {password, "sesame"}]}}].</pre>
+ testcase1() ->
+ [{require, ftp},
+ {default_config, ftp, [{ftp, "my_ftp_host"},
+ {username, "aladdin"},
+ {password, "sesame"}]}}].</pre>
<pre>
- testcase2() ->
- [{require, unix_telnet, unix},
- {require, {unix, [telnet, username, password]}},
- {default_config, unix, [{telnet, "my_telnet_host"},
- {username, "aladdin"},
- {password, "sesame"}]}}].</pre>
+ testcase2() ->
+ [{require, unix_telnet, unix},
+ {require, {unix, [telnet, username, password]}},
+ {default_config, unix, [{telnet, "my_telnet_host"},
+ {username, "aladdin"},
+ {password, "sesame"}]}}].</pre>
</item>
</taglist>
- <p>See the <seealso marker="config_file_chapter#require_config_data">Config files</seealso>
- chapter and the <seealso marker="ct#require-1"><c>
- ct:require/1/2</c></seealso> function in the
- <seealso marker="ct">ct</seealso> reference manual for more information about
- <c>require</c>.</p>
+ <p>For more information about <c>require</c>, see section
+ <seealso marker="config_file_chapter#require_config_data">
+ Requiring and Reading Configuration Data</seealso>
+ in section External Configuration Data and function
+ <seealso marker="ct#require-1"><c>ct:require/1/2</c></seealso>.</p>
<note><p>Specifying a default value for a required variable can result
- in a test case always getting executed. This might not be a desired behaviour!</p>
+ in a test case always getting executed. This might not be a desired behavior.</p>
</note>
- <p>If <c>timetrap</c> and/or <c>require</c> is not set specifically for
- a particular test case, default values specified by the <c>suite/0</c>
- function are used.
+ <p>If <c>timetrap</c> or <c>require</c>, or both, is not set specifically for
+ a particular test case, default values specified by function
+ <seealso marker="common_test#Module:suite-0"><c>suite/0</c></seealso>
+ are used.
</p>
- <p>Other tags than the ones mentioned above will simply be ignored by
- the test server.
+ <p>Tags other than the earlier mentioned are ignored by the test server.
</p>
<p>
- Example of a test case info function:
+ An example of a test case information function follows:
</p>
<pre>
- reboot_node() ->
- [
- {timetrap,{seconds,60}},
- {require,interfaces},
- {userdata,
- [{description,"System Upgrade: RpuAddition Normal RebootNode"},
- {fts,"http://someserver.ericsson.se/test_doc4711.pdf"}]}
- ].</pre>
+ reboot_node() ->
+ [
+ {timetrap,{seconds,60}},
+ {require,interfaces},
+ {userdata,
+ [{description,"System Upgrade: RpuAddition Normal RebootNode"},
+ {fts,"http://someserver.ericsson.se/test_doc4711.pdf"}]}
+ ].</pre>
</section>
<section>
<marker id="suite"></marker>
- <title>Test suite info function</title>
-
- <p>The <c>suite/0</c> function can be used in a test suite
- module to e.g. set a default <c>timetrap</c> value and to
- <c>require</c> external configuration data. If a test case-, or
- group info function also specifies any of the info tags, it
- overrides the default values set by <c>suite/0</c>. See the test
- case info function above, and group info function below, for more
- details.
+ <title>Test Suite Information Function</title>
+
+ <p>Function <seealso marker="common_test#Module:suite-0"><c>suite/0</c></seealso>
+ can, for example, be used in a test suite module to set a default
+ <c>timetrap</c> value and to <c>require</c> external configuration data.
+ If a test case, or a group information function also specifies any of the information tags, it
+ overrides the default values set by <c>suite/0</c>. For details,
+ see
+ <seealso marker="#info_function">Test Case Information Function</seealso> and
+ <seealso marker="#test_case_groups">Test Case Groups</seealso>.
</p>
- <p>Other options that may be specified with the suite info list are:</p>
- <list>
+ <p>The following options can also be specified with the suite information list:</p>
+ <list type="bulleted">
<item><c>stylesheet</c>,
- see <seealso marker="run_test_chapter#html_stylesheet">HTML Style Sheets</seealso>.</item>
+ see <seealso marker="run_test_chapter#html_stylesheet">HTML Style Sheets</seealso></item>
<item><c>userdata</c>,
- see <seealso marker="#info_function">Test case info function</seealso>.</item>
+ see <seealso marker="#info_function">Test Case Information Function</seealso></item>
<item><c>silent_connections</c>,
- see <seealso marker="run_test_chapter#silent_connections">Silent Connections</seealso>.</item>
+ see <seealso marker="run_test_chapter#silent_connections">Silent Connections</seealso></item>
</list>
<p>
- Example of the suite info function:
+ An example of the suite information function follows:
</p>
<pre>
- suite() ->
- [
- {timetrap,{minutes,10}},
- {require,global_names},
- {userdata,[{info,"This suite tests database transactions."}]},
- {silent_connections,[telnet]},
- {stylesheet,"db_testing.css"}
- ].</pre>
+ suite() ->
+ [
+ {timetrap,{minutes,10}},
+ {require,global_names},
+ {userdata,[{info,"This suite tests database transactions."}]},
+ {silent_connections,[telnet]},
+ {stylesheet,"db_testing.css"}
+ ].</pre>
</section>
<section>
<marker id="test_case_groups"></marker>
- <title>Test case groups</title>
- <p>A test case group is a set of test cases that share configuration
+ <title>Test Case Groups</title>
+ <p>A test case group is a set of test cases sharing configuration
functions and execution properties. Test case groups are defined by
- means of the <c>groups/0</c> function according to the following syntax:</p>
+ function
+ <seealso marker="common_test#Module:groups-0"><c>groups/0</c></seealso>
+ according to the following syntax:</p>
<pre>
- groups() -> GroupDefs
+ groups() -> GroupDefs
- Types:
+ Types:
- GroupDefs = [GroupDef]
- GroupDef = {GroupName,Properties,GroupsAndTestCases}
- GroupName = atom()
- GroupsAndTestCases = [GroupDef | {group,GroupName} | TestCase]
- TestCase = atom()</pre>
+ GroupDefs = [GroupDef]
+ GroupDef = {GroupName,Properties,GroupsAndTestCases}
+ GroupName = atom()
+ GroupsAndTestCases = [GroupDef | {group,GroupName} | TestCase]
+ TestCase = atom()</pre>
- <p><c>GroupName</c> is the name of the group and should be unique within
- the test suite module. Groups may be nested, and this is accomplished
- simply by including a group definition within the <c>GroupsAndTestCases</c>
- list of another group. <c>Properties</c> is the list of execution
- properties for the group. The possible values are:</p>
+ <p><c>GroupName</c> is the name of the group and must be unique within
+ the test suite module. Groups can be nested, by including a group definition
+ within the <c>GroupsAndTestCases</c> list of another group.
+ <c>Properties</c> is the list of execution
+ properties for the group. The possible values are as follows:</p>
<pre>
- Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
- Shuffle = shuffle | {shuffle,Seed}
- Seed = {integer(),integer(),integer()}
- RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
- repeat_until_any_ok | repeat_until_any_fail
- N = integer() | forever</pre>
-
- <p>If the <c>parallel</c> property is specified, Common Test will execute
- all test cases in the group in parallel. If <c>sequence</c> is specified,
- the cases will be executed in a sequence, as described in the chapter
- <seealso marker="dependencies_chapter#sequences">Dependencies between
- test cases and suites</seealso>. If <c>shuffle</c> is specified, the cases
- in the group will be executed in random order. The <c>repeat</c> property
- orders Common Test to repeat execution of the cases in the group a given
- number of times, or until any, or all, cases fail or succeed.</p>
-
- <p>Example:</p>
+ Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+ Shuffle = shuffle | {shuffle,Seed}
+ Seed = {integer(),integer(),integer()}
+ RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+ repeat_until_any_ok | repeat_until_any_fail
+ N = integer() | forever</pre>
+
+ <p><em>Explanations:</em></p>
+ <taglist>
+ <tag><c>parallel</c></tag>
+ <item><p><c>Common Test</c> executes all test cases in the group in parallel.</p></item>
+ <tag><c>sequence</c></tag>
+ <item><p>The cases are executed in a sequence as described in section
+ <seealso marker="dependencies_chapter#sequences">Sequences</seealso> in section
+ Dependencies Between Test Cases and Suites.</p></item>
+ <tag><c>shuffle</c></tag>
+ <item><p>The cases in the group are executed in random order.</p></item>
+ <tag><c>repeat</c></tag>
+ <item><p>Orders <c>Common Test</c> to repeat execution of the cases in the
+ group a given number of times, or until any, or all, cases fail or succeed.</p></item>
+ </taglist>
+
+ <p><em>Example:</em></p>
<pre>
- groups() -> [{group1, [parallel], [test1a,test1b]},
- {group2, [shuffle,sequence], [test2a,test2b,test2c]}].</pre>
+ groups() -> [{group1, [parallel], [test1a,test1b]},
+ {group2, [shuffle,sequence], [test2a,test2b,test2c]}].</pre>
- <p>To specify in which order groups should be executed (also with respect
- to test cases that are not part of any group), tuples on the form
- <c>{group,GroupName}</c> should be added to the <c>all/0</c> list. Example:</p>
+ <p>To specify in which order groups are to be executed (also with respect
+ to test cases that are not part of any group), add tuples on the form
+ <c>{group,GroupName}</c> to the <c>all/0</c> list.</p>
+ <p><em>Example:</em></p>
<pre>
- all() -> [testcase1, {group,group1}, testcase2, {group,group2}].</pre>
+ all() -> [testcase1, {group,group1}, testcase2, {group,group2}].</pre>
- <p>It is also possible to specify execution properties with a group
- tuple in <c>all/0</c>: <c>{group,GroupName,Properties}</c>. These
- properties will override those specified in the group definition (see
- <c>groups/0</c> above). This way, it's possible to run the same set of tests,
+ <p>Execution properties with a group tuple in
+ <c>all/0</c>: <c>{group,GroupName,Properties}</c> can also be specified.
+ These properties override those specified in the group definition (see
+ <c>groups/0</c> earlier). This way, the same set of tests can be run,
but with different properties, without having to make copies of the group
definition in question.</p>
- <p>If a group contains sub-groups, the execution properties for these may
+ <p>If a group contains subgroups, the execution properties for these can
also be specified in the group tuple:
- <c>{group,GroupName,Properties,SubGroups}</c>, where <c>SubGroups</c>
- is a list of tuples, <c>{GroupName,Properties}</c>, or
- <c>{GroupName,Properties,SubGroups}</c>, representing the sub-groups.
- Any sub-groups defined in <c>group/0</c> for a group, that are not specified
- in the <c>SubGroups</c> list, will simply execute with their pre-defined
+ <c>{group,GroupName,Properties,SubGroups}</c>
+ Where, <c>SubGroups</c> is a list of tuples, <c>{GroupName,Properties}</c> or
+ <c>{GroupName,Properties,SubGroups}</c> representing the subgroups.
+ Any subgroups defined in <c>group/0</c> for a group, that are not specified
+ in the <c>SubGroups</c> list, executes with their predefined
properties.</p>
- <p>Example:</p>
+ <p><em>Example:</em></p>
<pre>
- groups() -> {tests1, [], [{tests2, [], [t2a,t2b]},
- {tests3, [], [t31,t3b]}]}.</pre>
- <p>To execute group 'tests1' twice with different properties for 'tests2'
+ groups() -> {tests1, [], [{tests2, [], [t2a,t2b]},
+ {tests3, [], [t31,t3b]}]}.</pre>
+ <p>To execute group <c>tests1</c> twice with different properties for <c>tests2</c>
each time:</p>
<pre>
- all() ->
- [{group, tests1, default, [{tests2, [parallel]}]},
- {group, tests1, default, [{tests2, [shuffle,{repeat,10}]}]}].</pre>
- <p>Note that this is equivalent to this specification:</p>
+ all() ->
+ [{group, tests1, default, [{tests2, [parallel]}]},
+ {group, tests1, default, [{tests2, [shuffle,{repeat,10}]}]}].</pre>
+ <p>This is equivalent to the following specification:</p>
<pre>
- all() ->
- [{group, tests1, default, [{tests2, [parallel]},
- {tests3, default}]},
- {group, tests1, default, [{tests2, [shuffle,{repeat,10}]},
- {tests3, default}]}].</pre>
- <p>The value <c>default</c> states that the pre-defined properties
- should be used.</p>
- <p>Here's an example of how to override properties in a scenario
+ all() ->
+ [{group, tests1, default, [{tests2, [parallel]},
+ {tests3, default}]},
+ {group, tests1, default, [{tests2, [shuffle,{repeat,10}]},
+ {tests3, default}]}].</pre>
+ <p>Value <c>default</c> states that the predefined properties
+ are to be used.</p>
+ <p>The following example shows how to override properties in a scenario
with deeply nested groups:</p>
<pre>
- groups() ->
- [{tests1, [], [{group, tests2}]},
- {tests2, [], [{group, tests3}]},
- {tests3, [{repeat,2}], [t3a,t3b,t3c]}].
-
- all() ->
- [{group, tests1, default,
- [{tests2, default,
- [{tests3, [parallel,{repeat,100}]}]}]}].</pre>
-
- <p>The syntax described above may also be used in Test Specifications
- in order to change properties of groups at the time of execution,
- without even having to edit the test suite (please see the
- <seealso marker="run_test_chapter#test_specifications">Test
- Specifications</seealso> chapter for more info).</p>
-
- <p>As illustrated above, properties may be combined. If e.g.
- <c>shuffle</c>, <c>repeat_until_any_fail</c> and <c>sequence</c>
- are all specified, the test cases in the group will be executed
+ groups() ->
+ [{tests1, [], [{group, tests2}]},
+ {tests2, [], [{group, tests3}]},
+ {tests3, [{repeat,2}], [t3a,t3b,t3c]}].
+
+ all() ->
+ [{group, tests1, default,
+ [{tests2, default,
+ [{tests3, [parallel,{repeat,100}]}]}]}].</pre>
+
+ <p>The described syntax can also be used in test specifications
+ to change group properties at the time of execution,
+ without having to edit the test suite. For more information, see
+ section <seealso marker="run_test_chapter#test_specifications">Test
+ Specifications</seealso> in section Running Tests and Analyzing Results.</p>
+
+ <p>As illustrated, properties can be combined. If, for example,
+ <c>shuffle</c>, <c>repeat_until_any_fail</c>, and <c>sequence</c>
+ are all specified, the test cases in the group are executed
repeatedly, and in random order, until a test case fails. Then
- execution is immediately stopped and the rest of the cases skipped.</p>
+ execution is immediately stopped and the remaining cases are skipped.</p>
<p>Before execution of a group begins, the configuration function
- <c>init_per_group(GroupName, Config)</c> is called. The list of tuples
- returned from this function is passed to the test cases in the usual
- manner by means of the <c>Config</c> argument. <c>init_per_group/2</c>
- is meant to be used for initializations common for the test cases in the
- group. After execution of the group is finished, the
- <c>end_per_group(GroupName, Config</c> function is called. This function
- is meant to be used for cleaning up after <c>init_per_group/2</c>.</p>
+ <seealso marker="common_test#Module:init_per_group-2"><c>init_per_group(GroupName, Config)</c></seealso>
+ is called. The list of tuples returned from this function is passed to the
+ test cases in the usual manner by argument <c>Config</c>.
+ <c>init_per_group/2</c> is meant to be used for initializations common
+ for the test cases in the group. After execution of the group is finished, function
+ <seealso marker="common_test#Module:end_per_group-2"><c>end_per_group(GroupName, Config)</c></seealso>
+ is called. This function is meant to be used for cleaning up after
+ <c>init_per_group/2</c>.</p>
<p>Whenever a group is executed, if <c>init_per_group</c> and
- <c>end_per_group</c> do not exist in the suite, Common Test calls
+ <c>end_per_group</c> do not exist in the suite, <c>Common Test</c> calls
dummy functions (with the same names) instead. Output generated by
- hook functions will be saved to the log files for these dummies
- (see the <seealso marker="ct_hooks_chapter#manipulating">Common Test
- Hooks</seealso> chapter for more information).
+ hook functions are saved to the log files for these dummies.
+ For more information, see section
+ <seealso marker="ct_hooks_chapter#manipulating">Manipulating Tests</seealso>
+ in section Common Test Hooks.
</p>
<note><p><c>init_per_testcase/2</c> and <c>end_per_testcase/2</c>
are always called for each individual test case, no matter if the case
belongs to a group or not.</p></note>
- <p>The properties for a group is always printed on the top of the HTML log
- for <c>init_per_group/2</c>. Also, the total execution time for a group
- can be found at the bottom of the log for <c>end_per_group/2</c>.</p>
+ <p>The properties for a group are always printed in the top of the HTML log
+ for <c>init_per_group/2</c>. The total execution time for a group is
+ included at the bottom of the log for <c>end_per_group/2</c>.</p>
- <p>Test case groups may be nested so that sets of groups can be
+ <p>Test case groups can be nested so sets of groups can be
configured with the same <c>init_per_group/2</c> and <c>end_per_group/2</c>
- functions. Nested groups may be defined by including a group definition,
- or a group name reference, in the test case list of another group. Example:</p>
+ functions. Nested groups can be defined by including a group definition,
+ or a group name reference, in the test case list of another group.</p>
+ <p><em>Example:</em></p>
<pre>
- groups() -> [{group1, [shuffle], [test1a,
- {group2, [], [test2a,test2b]},
- test1b]},
- {group3, [], [{group,group4},
- {group,group5}]},
- {group4, [parallel], [test4a,test4b]},
- {group5, [sequence], [test5a,test5b,test5c]}].</pre>
-
- <p>In the example above, if <c>all/0</c> would return group name references
- in this order: <c>[{group,group1},{group,group3}]</c>, the order of the
- configuration functions and test cases will be the following (note that
+ groups() -> [{group1, [shuffle], [test1a,
+ {group2, [], [test2a,test2b]},
+ test1b]},
+ {group3, [], [{group,group4},
+ {group,group5}]},
+ {group4, [parallel], [test4a,test4b]},
+ {group5, [sequence], [test5a,test5b,test5c]}].</pre>
+
+ <p>In the previous example, if <c>all/0</c> returns group name references
+ in the order <c>[{group,group1},{group,group3}]</c>, the order of the
+ configuration functions and test cases becomes the following (notice that
<c>init_per_testcase/2</c> and <c>end_per_testcase/2:</c> are also
always called, but not included in this example for simplification):</p>
<pre>
-- init_per_group(group1, Config) -> Config1 (*)
-
--- test1a(Config1)
-
--- init_per_group(group2, Config1) -> Config2
-
---- test2a(Config2), test2b(Config2)
-
--- end_per_group(group2, Config2)
-
--- test1b(Config1)
-
-- end_per_group(group1, Config1)
-
-- init_per_group(group3, Config) -> Config3
-
--- init_per_group(group4, Config3) -> Config4
-
---- test4a(Config4), test4b(Config4) (**)
-
--- end_per_group(group4, Config4)
-
--- init_per_group(group5, Config3) -> Config5
-
---- test5a(Config5), test5b(Config5), test5c(Config5)
-
--- end_per_group(group5, Config5)
-
-- end_per_group(group3, Config3)
-
-
- (*) The order of test case test1a, test1b and group2 is not actually
- defined since group1 has a shuffle property.
-
- (**) These cases are not executed in order, but in parallel.</pre>
-
- <p>Properties are not inherited from top level groups to nested
- sub-groups. E.g, in the example above, the test cases in <c>group2</c>
- will not be executed in random order (which is the property of
- <c>group1</c>).</p>
+ init_per_group(group1, Config) -> Config1 (*)
+ test1a(Config1)
+ init_per_group(group2, Config1) -> Config2
+ test2a(Config2), test2b(Config2)
+ end_per_group(group2, Config2)
+ test1b(Config1)
+ end_per_group(group1, Config1)
+ init_per_group(group3, Config) -> Config3
+ init_per_group(group4, Config3) -> Config4
+ test4a(Config4), test4b(Config4) (**)
+ end_per_group(group4, Config4)
+ init_per_group(group5, Config3) -> Config5
+ test5a(Config5), test5b(Config5), test5c(Config5)
+ end_per_group(group5, Config5)
+ end_per_group(group3, Config3)</pre>
+
+ <p>(*) The order of test case <c>test1a</c>, <c>test1b</c>, and <c>group2</c> is
+ undefined, as <c>group1</c> has a shuffle property.</p>
+ <p>(**) These cases are not executed in order, but in parallel.</p>
+ <p>Properties are not inherited from top-level groups to nested
+ subgroups. For instance, in the previous example, the test cases in <c>group2</c>
+ are not executed in random order (which is the property of <c>group1</c>).</p>
</section>
<section>
- <title>The parallel property and nested groups</title>
- <p>If a group has a parallel property, its test cases will be spawned
- simultaneously and get executed in parallel. A test case is not allowed
- to execute in parallel with <c>end_per_group/2</c> however, which means
- that the time it takes to execute a parallel group is equal to the
+ <title>Parallel Property and Nested Groups</title>
+ <p>If a group has a parallel property, its test cases are spawned
+ simultaneously and get executed in parallel. However, a test case is not
+ allowed to execute in parallel with <c>end_per_group/2</c>, which means
+ that the time to execute a parallel group is equal to the
execution time of the slowest test case in the group. A negative side
effect of running test cases in parallel is that the HTML summary pages
- are not updated with links to the individual test case logs until the
- <c>end_per_group/2</c> function for the group has finished.</p>
+ are not updated with links to the individual test case logs until function
+ <c>end_per_group/2</c> for the group has finished.</p>
- <p>A group nested under a parallel group will start executing in parallel
+ <p>A group nested under a parallel group starts executing in parallel
with previous (parallel) test cases (no matter what properties the nested
- group has). Since, however, test cases are never executed in parallel with
- <c>init_per_group/2</c> or <c>end_per_group/2</c> of the same group, it's
- only after a nested group has finished that any remaining parallel cases
- in the previous group get spawned.</p>
+ group has). However, as test cases are never executed in parallel with
+ <c>init_per_group/2</c> or <c>end_per_group/2</c> of the same group, it is
+ only after a nested group has finished that remaining parallel cases
+ in the previous group become spawned.</p>
</section>
<section>
- <title>Parallel test cases and IO</title>
- <p>A parallel test case has a private IO server as its group leader.
- (Please see the Erlang Run-Time System Application documentation for
- a description of the group leader concept). The
- central IO server process that handles the output from regular test
- cases and configuration functions, does not respond to IO messages
+ <title>Parallel Test Cases and I/O</title>
+ <p>A parallel test case has a private I/O server as its group leader.
+ (For a description of the group leader concept, see
+ <seealso marker="erts:index"><c>ERTS</c></seealso>).
+ The central I/O server process, which handles the output from
+ regular test cases and configuration functions, does not respond to I/O messages
during execution of parallel groups. This is important to understand
- in order to avoid certain traps, like this one:</p>
- <p>If a process, <c>P</c>, is spawned during execution of e.g.
- <c>init_per_suite/1</c>, it will inherit the group leader of the
- <c>init_per_suite</c> process. This group leader is the central IO server
- process mentioned above. If, at a later time, <em>during parallel test case
+ to avoid certain traps, like the following:</p>
+ <p>If a process, <c>P</c>, is spawned during execution of, for example,
+ <c>init_per_suite/1</c>, it inherits the group leader of the
+ <c>init_per_suite</c> process. This group leader is the central I/O server
+ process mentioned earlier. If, at a later time, <em>during parallel test case
execution</em>, some event triggers process <c>P</c> to call
- <c>io:format/1/2</c>, that call will never return (since the group leader
- is in a non-responsive state) and cause <c>P</c> to hang.
+ <c>io:format/1/2</c>, that call never returns (as the group leader
+ is in a non-responsive state) and causes <c>P</c> to hang.
</p>
</section>
<section>
- <title>Repeated groups</title>
+ <title>Repeated Groups</title>
<marker id="repeated_groups"></marker>
- <p>A test case group may be repeated a certain number of times
+ <p>A test case group can be repeated a certain number of times
(specified by an integer) or indefinitely (specified by <c>forever</c>).
- The repetition may also be stopped prematurely if any or all cases
- fail or succeed, i.e. if the property <c>repeat_until_any_fail</c>,
+ The repetition can also be stopped too early if any or all cases
+ fail or succeed, that is, if any of the properties <c>repeat_until_any_fail</c>,
<c>repeat_until_any_ok</c>, <c>repeat_until_all_fail</c>, or
<c>repeat_until_all_ok</c> is used. If the basic <c>repeat</c>
property is used, status of test cases is irrelevant for the repeat
operation.</p>
- <p>It is possible to return the status of a sub-group (ok or
- failed), to affect the execution of the group on the level above.
+ <p>The status of a subgroup can be returned (<c>ok</c> or
+ <c>failed</c>), to affect the execution of the group on the level above.
This is accomplished by, in <c>end_per_group/2</c>, looking up the value
of <c>tc_group_properties</c> in the <c>Config</c> list and checking the
- result of the test cases in the group. If status <c>failed</c> should be
- returned from the group as a result, <c>end_per_group/2</c> should return
- the value <c>{return_group_result,failed}</c>. The status of a sub-group
- is taken into account by Common Test when evaluating if execution of a
- group should be repeated or not (unless the basic <c>repeat</c>
+ result of the test cases in the group. If status <c>failed</c> is to be
+ returned from the group as a result, <c>end_per_group/2</c> is to return
+ the value <c>{return_group_result,failed}</c>. The status of a subgroup
+ is taken into account by <c>Common Test</c> when evaluating if execution of a
+ group is to be repeated or not (unless the basic <c>repeat</c>
property is used).</p>
- <p>The <c>tc_group_properties</c> value is a list of status tuples,
- each with the key <c>ok</c>, <c>skipped</c> and <c>failed</c>. The
- value of a status tuple is a list containing names of test cases
+ <p>The value of <c>tc_group_properties</c> is a list of status tuples,
+ each with the key <c>ok</c>, <c>skipped</c>, and <c>failed</c>. The
+ value of a status tuple is a list with names of test cases
that have been executed with the corresponding status as result.</p>
- <p>Here's an example of how to return the status from a group:</p>
+ <p>The following is an example of how to return the status from a group:</p>
<pre>
- end_per_group(_Group, Config) ->
- Status = ?config(tc_group_result, Config),
- case proplists:get_value(failed, Status) of
- [] -> % no failed cases
- {return_group_result,ok};
- _Failed -> % one or more failed
- {return_group_result,failed}
- end.</pre>
-
- <p>It is also possible in <c>end_per_group/2</c> to check the status of
- a sub-group (maybe to determine what status the current group should also
- return). This is as simple as illustrated in the example above, only the
- name of the group is stored in a tuple <c>{group_result,GroupName}</c>,
- which can be searched for in the status lists. Example:</p>
+ end_per_group(_Group, Config) ->
+ Status = ?config(tc_group_result, Config),
+ case proplists:get_value(failed, Status) of
+ [] -> % no failed cases
+ {return_group_result,ok};
+ _Failed -> % one or more failed
+ {return_group_result,failed}
+ end.</pre>
+
+ <p>It is also possible, in <c>end_per_group/2</c>, to check the status of
+ a subgroup (maybe to determine what status the current group is to
+ return). This is as simple as illustrated in the previous example, only the
+ group name is stored in a tuple <c>{group_result,GroupName}</c>,
+ which can be searched for in the status lists.</p>
+ <p><em>Example:</em></p>
<pre>
- end_per_group(group1, Config) ->
- Status = ?config(tc_group_result, Config),
- Failed = proplists:get_value(failed, Status),
- case lists:member({group_result,group2}, Failed) of
- true ->
- {return_group_result,failed};
- false ->
- {return_group_result,ok}
- end;
- ...</pre>
+ end_per_group(group1, Config) ->
+ Status = ?config(tc_group_result, Config),
+ Failed = proplists:get_value(failed, Status),
+ case lists:member({group_result,group2}, Failed) of
+ true ->
+ {return_group_result,failed};
+ false ->
+ {return_group_result,ok}
+ end;
+ ...</pre>
<note><p>When a test case group is repeated, the configuration
- functions, <c>init_per_group/2</c> and <c>end_per_group/2</c>, are
+ functions <c>init_per_group/2</c> and <c>end_per_group/2</c> are
also always called with each repetition.</p></note>
</section>
<section>
- <title>Shuffled test case order</title>
- <p>The order that test cases in a group are executed, is under normal
+ <title>Shuffled Test Case Order</title>
+ <p>The order in which test cases in a group are executed is under normal
circumstances the same as the order specified in the test case list
- in the group definition. With the <c>shuffle</c> property set, however,
- Common Test will instead execute the test cases in random order.</p>
+ in the group definition. With property <c>shuffle</c> set, however,
+ <c>Common Test</c> instead executes the test cases in random order.</p>
- <p>The user may provide a seed value (a tuple of three integers) with
- the shuffle property: <c>{shuffle,Seed}</c>. This way, the same shuffling
+ <p>You can provide a seed value (a tuple of three integers) with
+ the shuffle property <c>{shuffle,Seed}</c>. This way, the same shuffling
order can be created every time the group is executed. If no seed value
- is given, Common Test creates a "random" seed for the shuffling operation
- (using the return value of <c>erlang:now()</c>). The seed value is always
+ is specified, <c>Common Test</c> creates a "random" seed for the shuffling operation
+ (using the return value of <c>erlang:timestamp/0</c>). The seed value is always
printed to the <c>init_per_group/2</c> log file so that it can be used to
recreate the same execution order in a subsequent test run.</p>
- <note><p>If a shuffled test case group is repeated, the seed will not
- be reset in between turns.</p></note>
+ <note><p>If a shuffled test case group is repeated, the seed is not
+ reset between turns.</p></note>
- <p>If a sub-group is specified in a group with a <c>shuffle</c> property,
- the execution order of this sub-group in relation to the test cases
- (and other sub-groups) in the group, is also random. The order of the
- test cases in the sub-group is however not random (unless, of course, the
- sub-group also has a <c>shuffle</c> property).</p>
+ <p>If a subgroup is specified in a group with a <c>shuffle</c> property,
+ the execution order of this subgroup in relation to the test cases
+ (and other subgroups) in the group, is random. The order of the
+ test cases in the subgroup is however not random (unless the
+ subgroup has a <c>shuffle</c> property).</p>
</section>
<section>
<marker id="group_info"></marker>
- <title>Group info function</title>
+ <title>Group Information Function</title>
- <p>The test case group info function, <c>group(GroupName)</c>,
- serves the same purpose as the suite- and test case info
- functions previously described in this chapter. The scope for
- the group info, however, is all test cases and sub-groups in the
+ <p>The test case group information function, <c>group(GroupName)</c>,
+ serves the same purpose as the suite- and test case information
+ functions previously described. However, the scope for
+ the group information function, is all test cases and subgroups in the
group in question (<c>GroupName</c>).</p>
- <p>Example:</p>
+ <p><em>Example:</em></p>
<pre>
- group(connection_tests) ->
- [{require,login_data},
- {timetrap,1000}].</pre>
+ group(connection_tests) ->
+ [{require,login_data},
+ {timetrap,1000}].</pre>
- <p>The group info properties override those set with the
- suite info function, and may in turn be overridden by test
- case info properties. Please see the test case info
- function above for a list of valid info properties and more
- general information.</p>
+ <p>The group information properties override those set with the
+ suite information function, and can in turn be overridden by test
+ case information properties. For a list of valid information properties
+ and more general information, see the
+ <seealso marker="#info_function">Test Case Information Function</seealso>.
+ </p>
</section>
<section>
- <title>Info functions for init- and end-configuration</title>
- <p>It is possible to use info functions also for the <c>init_per_suite</c>,
- <c>end_per_suite</c>, <c>init_per_group</c>, and <c>end_per_group</c>
- functions, and it works the same way as with info functions
- for test cases (see above). This is useful e.g. for setting
- timetraps and requiring external configuration data relevant
- only for the configuration function in question (without
- affecting properties set for groups and test cases in the suite).</p>
-
- <p>The info function <c>init/end_per_suite()</c> is called for
- <c>init/end_per_suite(Config)</c>, and info function
+ <title>Information Functions for Init- and End-Configuration</title>
+ <p>Information functions can also be used for functions <c>init_per_suite</c>,
+ <c>end_per_suite</c>, <c>init_per_group</c>, and <c>end_per_group</c>,
+ and they work the same way as with the
+ <seealso marker="#info_function">Test Case Information Function</seealso>.
+ This is useful, for example, for setting timetraps and requiring
+ external configuration data relevant only for the configuration
+ function in question (without affecting properties set for groups
+ and test cases in the suite).</p>
+
+ <p>The information function <c>init/end_per_suite()</c> is called for
+ <c>init/end_per_suite(Config)</c>, and information function
<c>init/end_per_group(GroupName)</c> is called for
- <c>init/end_per_group(GroupName,Config)</c>. Info functions
- can not be used with <c>init/end_per_testcase(TestCase, Config)</c>,
- however, since these configuration functions execute on the test case process
- and will use the same properties as the test case (i.e. the properties
- set by the test case info function, <c>TestCase()</c>). Please see the test case
- info function above for a list of valid info properties and more
- general information.
+ <c>init/end_per_group(GroupName,Config)</c>. However, information functions
+ cannot be used with <c>init/end_per_testcase(TestCase, Config)</c>,
+ as these configuration functions execute on the test case process
+ and use the same properties as the test case (that is, the properties
+ set by the test case information function, <c>TestCase()</c>). For a list
+ of valid information properties and more general information, see the
+ <seealso marker="#info_function">Test Case Information Function</seealso>.
</p>
</section>
@@ -789,77 +805,67 @@
<marker id="data_priv_dir"></marker>
<title>Data and Private Directories</title>
- <p>The data directory, <c>data_dir</c>, is the directory where the
- test module has its own files needed for the testing. The name
- of the <c>data_dir</c> is the the name of the test suite followed
- by <c>"_data"</c>. For example,
- <c>"some_path/foo_SUITE.beam"</c> has the data directory
+ <p>In the data directory, <c>data_dir</c>, the test module has
+ its own files needed for the testing. The name of <c>data_dir</c>
+ is the the name of the test suite followed by <c>"_data"</c>.
+ For example, <c>"some_path/foo_SUITE.beam"</c> has the data directory
<c>"some_path/foo_SUITE_data/"</c>. Use this directory for portability,
- i.e. to avoid hardcoding directory names in your suite. Since the data
- directory is stored in the same directory as your test suite, you should
- be able to rely on its existence at runtime, even if the path to your
+ that is, to avoid hardcoding directory names in your suite. As the data
+ directory is stored in the same directory as your test suite, you can
+ rely on its existence at runtime, even if the path to your
test suite directory has changed between test suite implementation and
execution.
</p>
-
-<!--
- <p>
- When using the Common Test framework <c>ct</c>, automatic
- compilation of code in the data directory can be obtained by
- placing a makefile source called Makefile.src in the data
- directory. Makefile.src will be converted to a valid makefile by
- <c>ct</c> when the test suite is run. See the reference manual for
- the <c>ct</c> module for details about the syntax of Makefile.src.
- </p>
--->
<p>
<c>priv_dir</c> is the private directory for the test cases.
- This directory may be used whenever a test case (or configuration function)
+ This directory can be used whenever a test case (or configuration function)
needs to write something to file. The name of the private directory is
- generated by Common Test, which also creates the directory.
+ generated by <c>Common Test</c>, which also creates the directory.
</p>
- <p>By default, Common Test creates one central private directory
- per test run that all test cases share. This may not always be suitable,
- especially if the same test cases are executed multiple times during
- a test run (e.g. if they belong to a test case group with repeat
- property), and there's a risk that files in the private directory get
- overwritten. Under these circumstances, it's possible to configure
- Common Test to create one dedicated private directory per
- test case and execution instead. This is accomplished by means of
- the flag/option: <c>create_priv_dir</c> (to be used with the
- <c>ct_run</c> program, the <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso> function, or
+ <p>By default, <c>Common Test</c> creates one central private directory
+ per test run, shared by all test cases. This is not always suitable.
+ Especially if the same test cases are executed multiple times during
+ a test run (that is, if they belong to a test case group with property
+ <c>repeat</c>) and there is a risk that files in the private directory get
+ overwritten. Under these circumstances, <c>Common Test</c> can be
+ configured to create one dedicated private directory per
+ test case and execution instead. This is accomplished with
+ the flag/option <c>create_priv_dir</c> (to be used with the
+ <seealso marker="ct_run"><c>ct_run</c></seealso> program, the
+ <seealso marker="ct#run_test-1"><c>ct:run_test/1</c></seealso> function, or
as test specification term). There are three possible values
- for this option:
+ for this option as follows:
</p>
- <list>
+ <list type="bulleted">
<item><c>auto_per_run</c></item>
<item><c>auto_per_tc</c></item>
<item><c>manual_per_tc</c></item>
</list>
<p>
- The first value indicates the default priv_dir behaviour, i.e.
+ The first value indicates the default <c>priv_dir</c> behavior, that is,
one private directory created per test run. The two latter
- values tell Common Test to generate a unique test directory name
+ values tell <c>Common Test</c> to generate a unique test directory name
per test case and execution. If the auto version is used, <em>all</em>
- private directories will be created automatically. This can obviously
- become very inefficient for test runs with many test cases and/or
- repetitions. Therefore, in case the manual version is instead used, the
- test case must tell Common Test to create priv_dir when it needs it.
- It does this by calling the function <seealso marker="ct#make_priv_dir-0"><c>ct:make_priv_dir/0</c></seealso>.
+ private directories are created automatically. This can become very
+ inefficient for test runs with many test cases or repetitions, or both.
+ Therefore, if the manual version is used instead, the test case must tell
+ <c>Common Test</c> to create <c>priv_dir</c> when it needs it.
+ It does this by calling the function
+ <seealso marker="ct#make_priv_dir-0"><c>ct:make_priv_dir/0</c></seealso>.
</p>
- <note><p>You should not depend on current working directory for
- reading and writing data files since this is not portable. All
+ <note><p>Do not depend on the current working directory for
+ reading and writing data files, as this is not portable. All
scratch files are to be written in the <c>priv_dir</c> and all
- data files should be located in <c>data_dir</c>. Note also that
- the Common Test server sets current working directory to the test case
- log directory at the start of every case.
+ data files are to be located in <c>data_dir</c>. Also,
+ the <c>Common Test</c> server sets the current working directory to
+ the test case log directory at the start of every case.
</p></note>
</section>
<section>
- <title>Execution environment</title>
+ <title>Execution Environment</title>
<p>Each test case is executed by a dedicated Erlang process. The
process is spawned when the test case starts, and terminated when
@@ -876,236 +882,263 @@
<section>
<marker id="timetraps"></marker>
- <title>Timetrap timeouts</title>
+ <title>Timetrap Time-Outs</title>
<p>The default time limit for a test case is 30 minutes, unless a
<c>timetrap</c> is specified either by the suite-, group-,
- or test case info function. The timetrap timeout value defined by
- <c>suite/0</c> is the value that will be used for each test case
- in the suite (as well as for the configuration functions
+ or test case information function. The timetrap time-out value defined by
+ <c>suite/0</c> is the value that is used for each test case
+ in the suite (and for the configuration functions
<c>init_per_suite/1</c>, <c>end_per_suite/1</c>, <c>init_per_group/2</c>,
and <c>end_per_group/2</c>). A timetrap value defined by
<c>group(GroupName)</c> overrides one defined by <c>suite()</c>
- and will be used for each test case in group <c>GroupName</c>, and any
- of its sub-groups. If a timetrap value is defined by <c>group/1</c>
- for a sub-group, it overrides that of its higher level groups. Timetrap
- values set by individual test cases (by means of the test case info
+ and is used for each test case in group <c>GroupName</c>, and any
+ of its subgroups. If a timetrap value is defined by <c>group/1</c>
+ for a subgroup, it overrides that of its higher level groups. Timetrap
+ values set by individual test cases (by the test case information
function) override both group- and suite- level timetraps.</p>
- <p>It is also possible to dynamically set/reset a timetrap during the
- excution of a test case, or configuration function. This is done by calling
- <seealso marker="ct#timetrap-1"><c>ct:timetrap/1</c></seealso>. This function cancels the current timetrap
- and starts a new one (that stays active until timeout, or end of the
- current function).</p>
+ <p>A timetrap can also be set or reset dynamically during the
+ execution of a test case, or configuration function.
+ This is done by calling
+ <seealso marker="ct#timetrap-1"><c>ct:timetrap/1</c></seealso>.
+ This function cancels the current timetrap and starts a new one
+ (that stays active until time-out, or end of the current function).</p>
<p>Timetrap values can be extended with a multiplier value specified at
- startup with the <c>multiply_timetraps</c> option. It is also possible
- to let the test server decide to scale up timetrap timeout values
- automatically, e.g. if tools such as cover or trace are running during
- the test. This feature is disabled by default and can be enabled with
- the <c>scale_timetraps</c> start option.</p>
+ startup with option <c>multiply_timetraps</c>. It is also possible
+ to let the test server decide to scale up timetrap time-out values
+ automatically. That is, if tools such as <c>cover</c> or <c>trace</c>
+ are running during the test. This feature is disabled by default and
+ can be enabled with start option <c>scale_timetraps</c>.</p>
<p>If a test case needs to suspend itself for a time that also gets
multipled by <c>multiply_timetraps</c> (and possibly also scaled up if
- <c>scale_timetraps</c> is enabled), the function <seealso marker="ct#sleep-1"><c>ct:sleep/1</c></seealso>
- may be used (instead of e.g. <c>timer:sleep/1</c>).</p>
+ <c>scale_timetraps</c> is enabled), the function
+ <seealso marker="ct#sleep-1"><c>ct:sleep/1</c></seealso>
+ can be used (instead of, for example, <c>timer:sleep/1</c>).</p>
- <p>A function (<c>fun/0</c> or <c>MFA</c>) may be specified as
- timetrap value in the suite-, group- and test case info function, as
- well as argument to the <seealso marker="ct#timetrap-1"><c>ct:timetrap/1</c></seealso> function. Examples:</p>
+ <p>A function (<c>fun/0</c> or <c>{Mod,Func,Args}</c> (MFA) tuple) can be
+ specified as timetrap value in the suite-, group- and test case information
+ function, and as argument to function
+ <seealso marker="ct#timetrap-1"><c>ct:timetrap/1</c></seealso>.</p>
+ <p><em>Examples:</em></p>
<p><c>{timetrap,{my_test_utils,timetrap,[?MODULE,system_start]}}</c></p>
<p><c>ct:timetrap(fun() -> my_timetrap(TestCaseName, Config) end)</c></p>
- <p>The user timetrap function may be used for two things:</p>
- <list>
- <item>To act as a timetrap - the timeout is triggered when the
+ <p>The user timetrap function can be used for two things as follows:</p>
+ <list type="bulleted">
+ <item>To act as a timetrap. The time-out is triggered when the
function returns.</item>
<item>To return a timetrap time value (other than a function).</item>
</list>
<p>Before execution of the timetrap function (which is performed
- on a parallel, dedicated timetrap process), Common Test cancels
+ on a parallel, dedicated timetrap process), <c>Common Test</c> cancels
any previously set timer for the test case or configuration function.
- When the timetrap function returns, the timeout is triggered, <em>unless</em>
+ When the timetrap function returns, the time-out is triggered, <em>unless</em>
the return value is a valid timetrap time, such as an integer,
- or a <c>{SecMinOrHourTag,Time}</c> tuple (see the
- <seealso marker="common_test">common_test reference manual</seealso> for
- details). If a time value is returned, a new timetrap is started
- to generate a timeout after the specified time.</p>
+ or a <c>{SecMinOrHourTag,Time}</c> tuple (for details, see module
+ <seealso marker="common_test">common_test</seealso>). If a time value
+ is returned, a new timetrap is started to generate a time-out after
+ the specified time.</p>
- <p>The user timetrap function may of course return a time value after a delay,
- and if so, the effective timetrap time is the delay time <em>plus</em> the
+ <p>The user timetrap function can return a time value after a delay.
+ The effective timetrap time is then the delay time <em>plus</em> the
returned time.</p>
</section>
<section>
<marker id="logging"></marker>
- <title>Logging - categories and verbosity levels</title>
- <p>Common Test provides three main functions for printing strings:</p>
- <list>
- <item><c>ct:log(Category, Importance, Format, Args)</c></item>
- <item><c>ct:print(Category, Importance, Format, Args)</c></item>
- <item><c>ct:pal(Category, Importance, Format, Args)</c></item>
+ <title>Logging - Categories and Verbosity Levels</title>
+ <p><c>Common Test</c> provides the following three main functions for
+ printing strings:</p>
+ <list type="bulleted">
+ <item><c>ct:log(Category, Importance, Format, FormatArgs, Opts)</c></item>
+ <item><c>ct:print(Category, Importance, Format, FormatArgs)</c></item>
+ <item><c>ct:pal(Category, Importance, Format, FormatArgs)</c></item>
</list>
- <p>The <c>log/1/2/3/4</c> function will print a string to the test case
- log file. The <c>print/1/2/3/4</c> function will print the string to screen,
- and the <c>pal/1/2/3/4</c> function will print the same string both to file and
- screen. (The functions are documented in the <c>ct</c> reference manual).</p>
-
- <p>The optional <c>Category</c> argument may be used to categorize the
- log printout, and categories can be used for two things:</p>
- <list>
+ <p>The <seealso marker="ct#log-1"><c>log/1,2,3,4,5</c></seealso> function
+ prints a string to the test case log file.
+ The <seealso marker="ct#print-1"><c>print/1,2,3,4</c></seealso> function
+ prints the string to screen.
+ The <seealso marker="ct#pal-1"><c>pal/1,2,3,4</c></seealso> function
+ prints the same string both to file and screen. The functions are described
+ in module <seealso marker="ct">ct</seealso>.
+ </p>
+
+ <p>The optional <c>Category</c> argument can be used to categorize the
+ log printout. Categories can be used for two things as follows:</p>
+ <list type="bulleted">
<item>To compare the importance of the printout to a specific
- verbosity level, and</item>
- <item>to format the printout according to a user specific HTML
+ verbosity level.</item>
+ <item>To format the printout according to a user-specific HTML
Style Sheet (CSS).</item>
</list>
- <p>The <c>Importance</c> argument specifies a level of importance
- which, compared to a verbosity level (general and/or set per category),
- determines if the printout should be visible or not. <c>Importance</c>
- is an arbitrary integer in the range 0..99. Pre-defined constants
+ <p>Argument <c>Importance</c> specifies a level of importance
+ that, compared to a verbosity level (general and/or set per category),
+ determines if the printout is to be visible. <c>Importance</c>
+ is any integer in the range 0..99. Predefined constants
exist in the <c>ct.hrl</c> header file. The default importance level,
- <c>?STD_IMPORTANCE</c> (used if the <c>Importance</c> argument is not
- provided), is 50. This is also the importance used for standard IO, e.g.
- from printouts made with <c>io:format/2</c>, <c>io:put_chars/1</c>, etc.</p>
+ <c>?STD_IMPORTANCE</c> (used if argument <c>Importance</c> is not
+ provided), is 50. This is also the importance used for standard I/O,
+ for example, from printouts made with <c>io:format/2</c>,
+ <c>io:put_chars/1</c>, and so on.</p>
- <p><c>Importance</c> is compared to a verbosity level set by means of the
+ <p><c>Importance</c> is compared to a verbosity level set by the
<c>verbosity</c> start flag/option. The verbosity level can be set per
- category and/or generally. The default verbosity level, <c>?STD_VERBOSITY</c>,
- is 50, i.e. all standard IO gets printed. If a lower verbosity level is set,
- standard IO printouts will be ignored. Common Test performs the following test:</p>
- <pre>Importance >= (100-VerbosityLevel)</pre>
+ category or generally, or both. The default verbosity level,
+ <c>?STD_VERBOSITY</c>, is 50, that is, all standard I/O gets printed.
+ If a lower verbosity level is set, standard I/O printouts are ignored.
+ <c>Common Test</c> performs the following test:</p>
+ <pre>
+ Importance >= (100-VerbosityLevel)</pre>
<p>This also means that verbosity level 0 effectively turns all logging off
- (with the exception of printouts made by Common Test itself).</p>
+ (except from printouts made by <c>Common Test</c> itself).</p>
<p>The general verbosity level is not associated with any particular
- category. This level sets the threshold for the standard IO printouts,
- uncategorized <c>ct:log/print/pal</c> printouts, as well as
+ category. This level sets the threshold for the standard I/O printouts,
+ uncategorized <c>ct:log/print/pal</c> printouts, and
printouts for categories with undefined verbosity level.</p>
- <p>Example:</p>
- <pre>
- Some printouts during test case execution:
-
- io:format("1. Standard IO, importance = ~w~n", [?STD_IMPORTANCE]),
- ct:log("2. Uncategorized, importance = ~w", [?STD_IMPORTANCE]),
- ct:log(info, "3. Categorized info, importance = ~w", [?STD_IMPORTANCE]]),
- ct:log(info, ?LOW_IMPORTANCE, "4. Categorized info, importance = ~w", [?LOW_IMPORTANCE]),
- ct:log(error, "5. Categorized error, importance = ~w", [?HI_IMPORTANCE]),
- ct:log(error, ?HI_IMPORTANCE, "6. Categorized error, importance = ~w", [?MAX_IMPORTANCE]),
-
- If starting the test without specifying any verbosity levels:
-
- $ ct_run ...
-
- the following gets printed:
-
- 1. Standard IO, importance = 50
- 2. Uncategorized, importance = 50
- 3. Categorized info, importance = 50
- 5. Categorized error, importance = 75
- 6. Categorized error, importance = 99
-
- If starting the test with:
-
- $ ct_run -verbosity 1 and info 75
-
- the following gets printed:
-
- 3. Categorized info, importance = 50
- 4. Categorized info, importance = 25
- 6. Categorized error, importance = 99</pre>
-
- <p>How categories can be mapped to CSS tags is documented in the
- <seealso marker="run_test_chapter#html_stylesheet">Running Tests</seealso>
- chapter.</p>
-
- <p>The <c>Format</c> and <c>Args</c> arguments in <c>ct:log/print/pal</c> are
- always passed on to the <c>io:format/3</c> function in <c>stdlib</c>
- (please see the <c>io</c> manual page for details).</p>
+ <p><em>Examples:</em></p>
+ <p>Some printouts during test case execution:</p>
+ <pre>
+ io:format("1. Standard IO, importance = ~w~n", [?STD_IMPORTANCE]),
+ ct:log("2. Uncategorized, importance = ~w", [?STD_IMPORTANCE]),
+ ct:log(info, "3. Categorized info, importance = ~w", [?STD_IMPORTANCE]]),
+ ct:log(info, ?LOW_IMPORTANCE, "4. Categorized info, importance = ~w", [?LOW_IMPORTANCE]),
+ ct:log(error, "5. Categorized error, importance = ~w", [?HI_IMPORTANCE]),
+ ct:log(error, ?HI_IMPORTANCE, "6. Categorized error, importance = ~w", [?MAX_IMPORTANCE]),</pre>
+
+ <p>If starting the test without specifying any verbosity levels as follows:</p>
+ <pre>
+ $ ct_run ...</pre>
+ <p>the following is printed:</p>
+ <pre>
+ 1. Standard IO, importance = 50
+ 2. Uncategorized, importance = 50
+ 3. Categorized info, importance = 50
+ 5. Categorized error, importance = 75
+ 6. Categorized error, importance = 99</pre>
+
+ <p>If starting the test with:</p>
+ <pre>
+ $ ct_run -verbosity 1 and info 75</pre>
+ <p>the following is printed:</p>
+ <pre>
+ 3. Categorized info, importance = 50
+ 4. Categorized info, importance = 25
+ 6. Categorized error, importance = 99</pre>
+
+ <p>The arguments <c>Format</c> and <c>FormatArgs</c> in <c>ct:log/print/pal</c> are
+ always passed on to the <c>stdlib</c> function <c>io:format/3</c> (For details,
+ see the <seealso marker="stdlib:io"><c>stdlib:io</c></seealso> manual page).</p>
+
+ <p><c>ct:pal/4</c> and <c>ct:log/5</c> add headers to strings being printed to the
+ log file. The strings are also wrapped in div tags with a CSS class
+ attribute, so that stylesheet formatting can be applied. To disable this feature for
+ a printout (i.e. to get a result similar to using <c>io:format/2</c>),
+ call <c>ct:log/5</c> with the <c>no_css</c> option.</p>
+
+ <p>How categories can be mapped to CSS tags is documented in section
+ <seealso marker="run_test_chapter#html_stylesheet">HTML Style Sheets</seealso>
+ in section Running Tests and Analyzing Results.</p>
- <p>For more information about log files, please see the
- <seealso marker="run_test_chapter#log_files">Running Tests</seealso> chapter.</p>
+ <p>Common Test will escape special HTML characters (&lt;, &gt; and &amp;) in printouts
+ to the log file made with <c>ct:pal/4</c> and <c>io:format/2</c>. In order to print
+ strings with HTML tags to the log, use the <c>ct:log/5</c> function. The character escaping
+ feature is per default disabled for <c>ct:log/5</c>, but can be enabled with the
+ <c>esc_chars</c> option.</p>
+
+ <p>For more information about log files, see section
+ <seealso marker="run_test_chapter#log_files">Log Files</seealso>
+ in section Running Tests and Analyzing Results.</p>
</section>
<section>
- <title>Illegal dependencies</title>
+ <title>Illegal Dependencies</title>
<p>Even though it is highly efficient to write test suites with
- the Common Test framework, there will surely be mistakes made,
- mainly due to illegal dependencies. Noted below are some of the
+ the <c>Common Test</c> framework, mistakes can be made,
+ mainly because of illegal dependencies. Some of the
more frequent mistakes from our own experience with running the
- Erlang/OTP test suites.</p>
+ Erlang/OTP test suites follows:</p>
- <list>
- <item>Depending on current directory, and writing there:<br></br>
+ <list type="bulleted">
+ <item><p>Depending on current directory, and writing there:</p>
<p>This is a common error in test suites. It is assumed that
- the current directory is the same as what the author used as
+ the current directory is the same as the author used as
current directory when the test case was developed. Many test
cases even try to write scratch files to this directory. Instead
- <c>data_dir</c> and <c>priv_dir</c> should be used to locate
+ <c>data_dir</c> and <c>priv_dir</c> are to be used to locate
data and for writing scratch files.
</p>
</item>
- <item>Depending on execution order:<br></br>
+ <item><p>Depending on execution order:</p>
- <p>During development of test suites, no assumption should preferrably
- be made about the execution order of the test cases or suites.
- E.g. a test case should not assume that a server it depends on,
- has already been started by a previous test case. There are
- several reasons for this:
- </p>
- <p>Firstly, the user/operator may specify the order at will, and maybe
- a different execution order is more relevant or efficient on
- some particular occasion. Secondly, if the user specifies a whole
- directory of test suites for his/her test, the order the suites are
- executed will depend on how the files are listed by the operating
- system, which varies between systems. Thirdly, if a user
- wishes to run only a subset of a test suite, there is no way
- one test case could successfully depend on another.
+ <p>During development of test suites, make no assumptions on the
+ execution order of the test cases or suites. For example, a test
+ case must not assume that a server it depends on is already
+ started by a previous test case. Reasons for this follows:
</p>
+ <list type="bulleted">
+ <item>The user/operator can specify the order at will, and maybe
+ a different execution order is sometimes more relevant or
+ efficient.</item>
+ <item>If the user specifies a whole directory of test suites
+ for the test, the execution order of the suites depends on
+ how the files are listed by the operating system, which varies
+ between systems.</item>
+ <item>If a user wants to run only a subset of a test suite,
+ there is no way one test case could successfully depend on
+ another.</item>
+ </list>
</item>
- <item>Depending on Unix:<br></br>
+ <item><p>Depending on Unix:</p>
- <p>Running unix commands through <c>os:cmd</c> are likely
- not to work on non-unix platforms.
+ <p>Running Unix commands through <c>os:cmd</c> are likely
+ not to work on non-Unix platforms.
</p>
</item>
- <item>Nested test cases:<br></br>
+ <item><p>Nested test cases:</p>
- <p>Invoking a test case from another not only tests the same
- thing twice, but also makes it harder to follow what exactly
- is being tested. Also, if the called test case fails for some
- reason, so will the caller. This way one error gives cause to
- several error reports, which is less than ideal.
+ <p>Starting a test case from another not only tests the same
+ thing twice, but also makes it harder to follow what is being
+ tested. Also, if the called test case fails for some
+ reason, so do the caller. This way, one error gives cause to
+ several error reports, which is to be avoided.
</p>
- <p>Functionality common for many test case functions may be implemented
- in common help functions. If these functions are useful for test cases
- across suites, put the help functions into common help modules.
+ <p>Functionality common for many test case functions can be
+ implemented in common help functions. If these functions are
+ useful for test cases across suites, put the help functions
+ into common help modules.
</p>
</item>
- <item>Failure to crash or exit when things go wrong:<br></br>
+ <item><p>Failure to crash or exit when things go wrong:</p>
<p>Making requests without checking that the return value
- indicates success may be ok if the test case will fail at a
- later stage, but it is never acceptable just to print an error
- message (into the log file) and return successfully. Such test cases
- do harm since they create a false sense of security when overviewing
- the test results.
+ indicates success can be OK if the test case fails
+ later, but it is never acceptable just to print an error
+ message (into the log file) and return successfully. Such test
+ cases do harm, as they create a false sense of security when
+ overviewing the test results.
</p>
</item>
- <item>Messing up for subsequent test cases:<br></br>
+ <item><p>Messing up for subsequent test cases:</p>
- <p>Test cases should restore as much of the execution
- environment as possible, so that the subsequent test cases will
- not crash because of execution order of the test cases.
- The function <c>end_per_testcase</c> is suitable for this.
+ <p>Test cases are to restore as much of the execution
+ environment as possible, so that subsequent test cases
+ do not crash because of their execution order.
+ The function
+ <seealso marker="common_test#Module:end_per_testcase-2"><c>end_per_testcase</c></seealso>
+ is suitable for this.
</p>
</item>
</list>
diff --git a/lib/common_test/include/ct.hrl b/lib/common_test/include/ct.hrl
index 53bc43fc24..5806f75806 100644
--- a/lib/common_test/include/ct.hrl
+++ b/lib/common_test/include/ct.hrl
@@ -18,8 +18,6 @@
%% %CopyrightEnd%
%%
--include_lib("test_server/include/test_server.hrl").
-
%% the log level is used as argument to any CT logging function
-define(MIN_IMPORTANCE, 0 ).
-define(LOW_IMPORTANCE, 25).
@@ -37,3 +35,9 @@
%% name of process executing the CT Hook init and terminate function
-define(CT_HOOK_INIT_PROCESS, ct_util_server).
-define(CT_HOOK_TERMINATE_PROCESS, ct_util_server).
+
+%% Backward compatibility for test_server test suites.
+%% DO NOT USE IN NEW TEST SUITES.
+-define(line,).
+-define(t,test_server).
+-define(config,test_server:lookup_config).
diff --git a/lib/common_test/src/Makefile b/lib/common_test/src/Makefile
index 987345c679..91c1e8ede8 100644
--- a/lib/common_test/src/Makefile
+++ b/lib/common_test/src/Makefile
@@ -79,7 +79,15 @@ MODULES= \
cth_conn_log \
ct_groups \
ct_property_test \
- ct_release_test
+ ct_release_test \
+ erl2html2 \
+ test_server_ctrl \
+ test_server_gl \
+ test_server_io \
+ test_server_node \
+ test_server \
+ test_server_sup
+
TARGET_MODULES= $(MODULES:%=$(EBIN)/%)
BEAM_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR))
@@ -101,8 +109,7 @@ DTD_FILES = \
# FLAGS
# ----------------------------------------------------
ERL_COMPILE_FLAGS += -pa ../ebin -I../include -I $(ERL_TOP)/lib/snmp/include/ \
- -I../../test_server/include -I../../xmerl/inc/ \
- -I $(ERL_TOP)/lib/kernel/include -Werror
+ -I../../xmerl/inc/ -I $(ERL_TOP)/lib/kernel/include -Werror
# ----------------------------------------------------
# Targets
diff --git a/lib/common_test/src/common_test.app.src b/lib/common_test/src/common_test.app.src
index d847907d75..26bcf00824 100644
--- a/lib/common_test/src/common_test.app.src
+++ b/lib/common_test/src/common_test.app.src
@@ -53,7 +53,13 @@
ct_slave,
cth_log_redirect,
cth_conn_log,
- cth_surefire
+ cth_surefire,
+ erl2html2,
+ test_server_ctrl,
+ test_server,
+ test_server_io,
+ test_server_node,
+ test_server_sup
]},
{registered, [ct_logs,
ct_util_server,
@@ -61,13 +67,27 @@
ct_make_ref,
vts,
ct_master,
- ct_master_logs]},
+ ct_master_logs,
+ test_server_ctrl,
+ test_server,
+ test_server_break_process]},
{applications, [kernel,stdlib]},
{env, []},
- {runtime_dependencies,["xmerl-1.3.8","tools-2.8",
- "test_server-3.9","stdlib-2.5","ssh-4.0",
- "snmp-5.1.2","sasl-2.4.2","runtime_tools-1.8.16",
- "kernel-4.0","inets-6.0","erts-7.0",
- "debugger-4.1","crypto-3.6","compiler-6.0",
- "observer-2.1"]}]}.
+ {runtime_dependencies,
+ ["compiler-6.0",
+ "crypto-3.6",
+ "debugger-4.1",
+ "erts-7.0",
+ "inets-6.0",
+ "kernel-4.0",
+ "observer-2.1",
+ "runtime_tools-1.8.16",
+ "sasl-2.4.2",
+ "snmp-5.1.2",
+ "ssh-4.0",
+ "stdlib-2.5",
+ "syntax_tools-1.7",
+ "tools-2.8",
+ "xmerl-1.3.8"
+ ]}]}.
diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl
index 7958a349b4..538be514d6 100644
--- a/lib/common_test/src/ct.erl
+++ b/lib/common_test/src/ct.erl
@@ -64,7 +64,8 @@
-export([require/1, require/2,
get_config/1, get_config/2, get_config/3,
reload_config/1,
- log/1, log/2, log/3, log/4,
+ escape_chars/1, escape_chars/2,
+ log/1, log/2, log/3, log/4, log/5,
print/1, print/2, print/3, print/4,
pal/1, pal/2, pal/3, pal/4,
capture_start/0, capture_stop/0, capture_get/0, capture_get/1,
@@ -509,44 +510,88 @@ get_testspec_terms(Tags) ->
%%%-----------------------------------------------------------------
+%%% @spec escape_chars(IoList1) -> IoList2 | {error,Reason}
+%%% IoList1 = iolist()
+%%% IoList2 = iolist()
+%%%
+%%% @doc Escape special characters to be printed in html log
+%%%
+escape_chars(IoList) ->
+ ct_logs:escape_chars(IoList).
+
+%%%-----------------------------------------------------------------
+%%% @spec escape_chars(Format, Args) -> IoList | {error,Reason}
+%%% Format = string()
+%%% Args = list()
+%%%
+%%% @doc Escape special characters to be printed in html log
+%%%
+escape_chars(Format, Args) ->
+ try io_lib:format(Format, Args) of
+ IoList ->
+ ct_logs:escape_chars(IoList)
+ catch
+ _:Reason ->
+ {error,Reason}
+ end.
+
+%%%-----------------------------------------------------------------
%%% @spec log(Format) -> ok
-%%% @equiv log(default,50,Format,[])
+%%% @equiv log(default,50,Format,[],[])
log(Format) ->
- log(default,?STD_IMPORTANCE,Format,[]).
+ log(default,?STD_IMPORTANCE,Format,[],[]).
%%%-----------------------------------------------------------------
%%% @spec log(X1,X2) -> ok
%%% X1 = Category | Importance | Format
%%% X2 = Format | Args
-%%% @equiv log(Category,Importance,Format,Args)
+%%% @equiv log(Category,Importance,Format,Args,[])
log(X1,X2) ->
{Category,Importance,Format,Args} =
if is_atom(X1) -> {X1,?STD_IMPORTANCE,X2,[]};
is_integer(X1) -> {default,X1,X2,[]};
is_list(X1) -> {default,?STD_IMPORTANCE,X1,X2}
end,
- log(Category,Importance,Format,Args).
+ log(Category,Importance,Format,Args,[]).
%%%-----------------------------------------------------------------
%%% @spec log(X1,X2,X3) -> ok
+%%% X1 = Category | Importance | Format
+%%% X2 = Importance | Format | Args
+%%% X3 = Format | Args | Opts
+%%% @equiv log(Category,Importance,Format,Args,Opts)
+log(X1,X2,X3) ->
+ {Category,Importance,Format,Args,Opts} =
+ if is_atom(X1), is_integer(X2) -> {X1,X2,X3,[],[]};
+ is_atom(X1), is_list(X2) -> {X1,?STD_IMPORTANCE,X2,X3,[]};
+ is_integer(X1) -> {default,X1,X2,X3,[]};
+ is_list(X1), is_list(X2) -> {default,?STD_IMPORTANCE,X1,X2,X3}
+ end,
+ log(Category,Importance,Format,Args,Opts).
+
+%%%-----------------------------------------------------------------
+%%% @spec log(X1,X2,X3,X4) -> ok
%%% X1 = Category | Importance
%%% X2 = Importance | Format
%%% X3 = Format | Args
-%%% @equiv log(Category,Importance,Format,Args)
-log(X1,X2,X3) ->
- {Category,Importance,Format,Args} =
- if is_atom(X1), is_integer(X2) -> {X1,X2,X3,[]};
- is_atom(X1), is_list(X2) -> {X1,?STD_IMPORTANCE,X2,X3};
- is_integer(X1) -> {default,X1,X2,X3}
+%%% X4 = Args | Opts
+%%% @equiv log(Category,Importance,Format,Args,Opts)
+log(X1,X2,X3,X4) ->
+ {Category,Importance,Format,Args,Opts} =
+ if is_atom(X1), is_integer(X2) -> {X1,X2,X3,X4,[]};
+ is_atom(X1), is_list(X2) -> {X1,?STD_IMPORTANCE,X2,X3,X4};
+ is_integer(X1) -> {default,X1,X2,X3,X4}
end,
- log(Category,Importance,Format,Args).
+ log(Category,Importance,Format,Args,Opts).
%%%-----------------------------------------------------------------
-%%% @spec log(Category,Importance,Format,Args) -> ok
+%%% @spec log(Category,Importance,Format,Args,Opts) -> ok
%%% Category = atom()
%%% Importance = integer()
%%% Format = string()
%%% Args = list()
+%%% Opts = [Opt]
+%%% Opt = esc_chars | no_css
%%%
%%% @doc Printout from a test case to the log file.
%%%
@@ -558,8 +603,8 @@ log(X1,X2,X3) ->
%%% and default value for <c>Args</c> is <c>[]</c>.</p>
%%% <p>Please see the User's Guide for details on <c>Category</c>
%%% and <c>Importance</c>.</p>
-log(Category,Importance,Format,Args) ->
- ct_logs:tc_log(Category,Importance,Format,Args).
+log(Category,Importance,Format,Args,Opts) ->
+ ct_logs:tc_log(Category,Importance,Format,Args,Opts).
%%%-----------------------------------------------------------------
diff --git a/lib/common_test/src/ct_config.erl b/lib/common_test/src/ct_config.erl
index 251204aa75..33efe7a14a 100644
--- a/lib/common_test/src/ct_config.erl
+++ b/lib/common_test/src/ct_config.erl
@@ -694,11 +694,10 @@ make_crypto_key(String) ->
{[K1,K2,K3],IVec}.
random_bytes(N) ->
- random:seed(os:timestamp()),
random_bytes_1(N, []).
random_bytes_1(0, Acc) -> Acc;
-random_bytes_1(N, Acc) -> random_bytes_1(N-1, [random:uniform(255)|Acc]).
+random_bytes_1(N, Acc) -> random_bytes_1(N-1, [rand:uniform(255)|Acc]).
check_callback_load(Callback) ->
case code:is_loaded(Callback) of
diff --git a/lib/common_test/src/ct_conn_log_h.erl b/lib/common_test/src/ct_conn_log_h.erl
index 93f358462d..034906a3ba 100644
--- a/lib/common_test/src/ct_conn_log_h.erl
+++ b/lib/common_test/src/ct_conn_log_h.erl
@@ -105,52 +105,62 @@ terminate(_,#state{logs=Logs}) ->
%%% Writing reports
write_report(_Time,#conn_log{header=false,module=ConnMod}=Info,Data,GL,State) ->
case get_log(Info,GL,State) of
- {silent,_} ->
+ {silent,_,_} ->
ok;
- {LogType,Fd} ->
- io:format(Fd,"~n~ts",[format_data(ConnMod,LogType,Data)])
+ {LogType,Dest,Fd} ->
+ Str = if LogType == html, Dest == gl -> ["$tc_html","~n~ts"];
+ true -> "~n~ts"
+ end,
+ io:format(Fd,Str,[format_data(ConnMod,LogType,Data)])
end;
write_report(Time,#conn_log{module=ConnMod}=Info,Data,GL,State) ->
case get_log(Info,GL,State) of
- {silent,_} ->
+ {silent,_,_} ->
ok;
- {LogType,Fd} ->
+ {LogType,Dest,Fd} ->
case format_data(ConnMod,LogType,Data) of
[] when Info#conn_log.action==send; Info#conn_log.action==recv ->
ok;
FormattedData ->
- io:format(Fd,"~n~ts~ts~ts",[format_head(ConnMod,LogType,Time),
- format_title(LogType,Info),
- FormattedData])
+ Str = if LogType == html, Dest == gl ->
+ ["$tc_html","~n~ts~ts~ts"];
+ true ->
+ "~n~ts~ts~ts"
+ end,
+ io:format(Fd,Str,[format_head(ConnMod,LogType,Time),
+ format_title(LogType,Info),
+ FormattedData])
end
end.
write_error(Time,#conn_log{module=ConnMod}=Info,Report,GL,State) ->
case get_log(Info,GL,State) of
- {LogType,_} when LogType==html; LogType==silent ->
+ {LogType,_,_} when LogType==html; LogType==silent ->
%% The error will anyway be written in the html log by the
%% sasl error handler, so don't write it again.
ok;
- {LogType,Fd} ->
- io:format(Fd,"~n~ts~ts~ts",
- [format_head(ConnMod,LogType,Time," ERROR"),
- format_title(LogType,Info),
- format_error(LogType,Report)])
+ {LogType,Dest,Fd} ->
+ Str = if LogType == html, Dest == gl -> ["$tc_html","~n~ts~ts~ts"];
+ true -> "~n~ts~ts~ts"
+ end,
+ io:format(Fd,Str,[format_head(ConnMod,LogType,Time," ERROR"),
+ format_title(LogType,Info),
+ format_error(LogType,Report)])
end.
get_log(Info,GL,State) ->
case proplists:get_value(GL,State#state.logs) of
undefined ->
- {html,State#state.default_gl};
+ {html,gl,State#state.default_gl};
ConnLogs ->
case proplists:get_value(Info#conn_log.module,ConnLogs) of
{html,_} ->
- {html,GL};
+ {html,gl,GL};
{LogType,Fds} ->
- {LogType,get_fd(Info,Fds)};
+ {LogType,file,get_fd(Info,Fds)};
undefined ->
- {html,GL}
+ {html,gl,GL}
end
end.
diff --git a/lib/common_test/src/ct_framework.erl b/lib/common_test/src/ct_framework.erl
index 8e25bfece0..19a3a51b88 100644
--- a/lib/common_test/src/ct_framework.erl
+++ b/lib/common_test/src/ct_framework.erl
@@ -866,13 +866,13 @@ tag(_Other) ->
%%% <code>Func</code> in suite <code>Mod</code> crashing.
%%% <code>Error</code> specifies the reason for failing.
error_notification(Mod,Func,_Args,{Error,Loc}) ->
- ErrSpec = case Error of
+ ErrorSpec = case Error of
{What={_E,_R},Trace} when is_list(Trace) ->
What;
What ->
What
end,
- ErrStr = case ErrSpec of
+ ErrorStr = case ErrorSpec of
{badmatch,Descr} ->
Descr1 = lists:flatten(io_lib:format("~P",[Descr,10])),
if length(Descr1) > 50 ->
@@ -894,7 +894,8 @@ error_notification(Mod,Func,_Args,{Error,Loc}) ->
Other ->
io_lib:format("~P", [Other,5])
end,
- ErrorHtml = "<font color=\"brown\">" ++ ErrStr ++ "</font>",
+ ErrorHtml =
+ "<font color=\"brown\">" ++ ct_logs:escape_chars(ErrorStr) ++ "</font>",
case {Mod,Error} of
%% some notifications come from the main test_server process
%% and for these cases the existing comment may not be modified
@@ -922,41 +923,43 @@ error_notification(Mod,Func,_Args,{Error,Loc}) ->
end
end,
- PrintErr = fun(ErrFormat, ErrArgs) ->
+ PrintError = fun(ErrorFormat, ErrorArgs) ->
Div = "~n- - - - - - - - - - - - - - - - - - - "
"- - - - - - - - - - - - - - - - - - - - -~n",
- io:format(user, lists:concat([Div,ErrFormat,Div,"~n"]),
- ErrArgs),
+ ErrorStr2 = io_lib:format(ErrorFormat, ErrorArgs),
+ io:format(user, lists:concat([Div,ErrorStr2,Div,"~n"]),
+ []),
Link =
"\n\n<a href=\"#end\">"
"Full error description and stacktrace"
"</a>",
+ ErrorHtml2 = ct_logs:escape_chars(ErrorStr2),
ct_logs:tc_log(ct_error_notify,
?MAX_IMPORTANCE,
"CT Error Notification",
- ErrFormat++Link, ErrArgs)
+ ErrorHtml2++Link, [], [])
end,
case Loc of
[{?MODULE,error_in_suite}] ->
- PrintErr("Error in suite detected: ~ts", [ErrStr]);
+ PrintError("Error in suite detected: ~ts", [ErrorStr]);
R when R == unknown; R == undefined ->
- PrintErr("Error detected: ~ts", [ErrStr]);
+ PrintError("Error detected: ~ts", [ErrorStr]);
%% if a function specified by all/0 does not exist, we
%% pick up undef here
- [{LastMod,LastFunc}|_] when ErrStr == "undef" ->
- PrintErr("~w:~w could not be executed~nReason: ~ts",
- [LastMod,LastFunc,ErrStr]);
+ [{LastMod,LastFunc}|_] when ErrorStr == "undef" ->
+ PrintError("~w:~w could not be executed~nReason: ~ts",
+ [LastMod,LastFunc,ErrorStr]);
[{LastMod,LastFunc}|_] ->
- PrintErr("~w:~w failed~nReason: ~ts", [LastMod,LastFunc,ErrStr]);
+ PrintError("~w:~w failed~nReason: ~ts", [LastMod,LastFunc,ErrorStr]);
[{LastMod,LastFunc,LastLine}|_] ->
%% print error to console, we are only
%% interested in the last executed expression
- PrintErr("~w:~w failed on line ~w~nReason: ~ts",
- [LastMod,LastFunc,LastLine,ErrStr]),
+ PrintError("~w:~w failed on line ~w~nReason: ~ts",
+ [LastMod,LastFunc,LastLine,ErrorStr]),
case ct_util:read_suite_data({seq,Mod,Func}) of
undefined ->
diff --git a/lib/common_test/src/ct_logs.erl b/lib/common_test/src/ct_logs.erl
index 78081380e7..4920383f39 100644
--- a/lib/common_test/src/ct_logs.erl
+++ b/lib/common_test/src/ct_logs.erl
@@ -37,15 +37,17 @@
-export([add_external_logs/1, add_link/3]).
-export([make_last_run_index/0]).
-export([make_all_suites_index/1,make_all_runs_index/1]).
--export([get_ts_html_wrapper/5]).
+-export([get_ts_html_wrapper/5, escape_chars/1]).
-export([xhtml/2, locate_priv_file/1, make_relative/1]).
-export([insert_javascript/1]).
-export([uri/1]).
%% Logging stuff directly from testcase
--export([tc_log/3, tc_log/4, tc_log/5, tc_log_async/3, tc_log_async/5,
+-export([tc_log/3, tc_log/4, tc_log/5, tc_log/6,
+ tc_log_async/3, tc_log_async/5,
tc_print/3, tc_print/4,
- tc_pal/3, tc_pal/4, ct_log/3, basic_html/0]).
+ tc_pal/3, tc_pal/4, ct_log/3,
+ basic_html/0]).
%% Simulate logger process for use without ct environment running
-export([simulate/0]).
@@ -267,7 +269,7 @@ cast(Msg) ->
%%% <p>This function is called by ct_framework:init_tc/3</p>
init_tc(RefreshLog) ->
call({init_tc,self(),group_leader(),RefreshLog}),
- io:format(xhtml("", "<br />")),
+ io:format(["$tc_html",xhtml("", "<br />")]),
ok.
%%%-----------------------------------------------------------------
@@ -314,9 +316,10 @@ unregister_groupleader(Pid) ->
%%% data to log (as in <code>io:format(Format,Args)</code>).</p>
log(Heading,Format,Args) ->
cast({log,sync,self(),group_leader(),ct_internal,?MAX_IMPORTANCE,
- [{int_header(),[log_timestamp(?now),Heading]},
+ [{hd,int_header(),[log_timestamp(?now),Heading]},
{Format,Args},
- {int_footer(),[]}]}),
+ {ft,int_footer(),[]}],
+ true}),
ok.
%%%-----------------------------------------------------------------
@@ -336,7 +339,7 @@ log(Heading,Format,Args) ->
%%% @see end_log/0
start_log(Heading) ->
cast({log,sync,self(),group_leader(),ct_internal,?MAX_IMPORTANCE,
- [{int_header(),[log_timestamp(?now),Heading]}]}),
+ [{hd,int_header(),[log_timestamp(?now),Heading]}],false}),
ok.
%%%-----------------------------------------------------------------
@@ -351,7 +354,7 @@ cont_log([],[]) ->
cont_log(Format,Args) ->
maybe_log_timestamp(),
cast({log,sync,self(),group_leader(),ct_internal,?MAX_IMPORTANCE,
- [{Format,Args}]}),
+ [{Format,Args}],true}),
ok.
%%%-----------------------------------------------------------------
@@ -363,7 +366,7 @@ cont_log(Format,Args) ->
%%% @see cont_log/2
end_log() ->
cast({log,sync,self(),group_leader(),ct_internal,?MAX_IMPORTANCE,
- [{int_footer(), []}]}),
+ [{ft,int_footer(), []}],false}),
ok.
@@ -400,32 +403,46 @@ add_link(Heading,File,Type) ->
%%% @spec tc_log(Category,Format,Args) -> ok
%%% @equiv tc_log(Category,?STD_IMPORTANCE,Format,Args)
tc_log(Category,Format,Args) ->
- tc_log(Category,?STD_IMPORTANCE,Format,Args).
+ tc_log(Category,?STD_IMPORTANCE,"User",Format,Args,[]).
%%%-----------------------------------------------------------------
%%% @spec tc_log(Category,Importance,Format,Args) -> ok
%%% @equiv tc_log(Category,Importance,"User",Format,Args)
tc_log(Category,Importance,Format,Args) ->
- tc_log(Category,Importance,"User",Format,Args).
+ tc_log(Category,Importance,"User",Format,Args,[]).
%%%-----------------------------------------------------------------
-%%% @spec tc_log(Category,Importance,Printer,Format,Args) -> ok
+%%% @spec tc_log(Category,Importance,Format,Args) -> ok
+%%% @equiv tc_log(Category,Importance,"User",Format,Args)
+tc_log(Category,Importance,Format,Args,Opts) ->
+ tc_log(Category,Importance,"User",Format,Args,Opts).
+
+%%%-----------------------------------------------------------------
+%%% @spec tc_log(Category,Importance,Printer,Format,Args,Opts) -> ok
%%% Category = atom()
%%% Importance = integer()
%%% Printer = string()
%%% Format = string()
%%% Args = list()
+%%% Opts = list()
%%%
%%% @doc Printout from a testcase.
%%%
%%% <p>This function is called by <code>ct</code> when logging
%%% stuff directly from a testcase (i.e. not from within the CT
%%% framework).</p>
-tc_log(Category,Importance,Printer,Format,Args) ->
- cast({log,sync,self(),group_leader(),Category,Importance,
- [{div_header(Category,Printer),[]},
- {Format,Args},
- {div_footer(),[]}]}),
+tc_log(Category,Importance,Printer,Format,Args,Opts) ->
+ Data =
+ case lists:member(no_css, Opts) of
+ true ->
+ [{Format,Args}];
+ false ->
+ [{hd,div_header(Category,Printer),[]},
+ {Format,Args},
+ {ft,div_footer(),[]}]
+ end,
+ cast({log,sync,self(),group_leader(),Category,Importance,Data,
+ lists:member(esc_chars, Opts)}),
ok.
%%%-----------------------------------------------------------------
@@ -451,9 +468,10 @@ tc_log_async(Category,Format,Args) ->
%%% asks ct_logs for an html wrapper.</p>
tc_log_async(Category,Importance,Printer,Format,Args) ->
cast({log,async,self(),group_leader(),Category,Importance,
- [{div_header(Category,Printer),[]},
+ [{hd,div_header(Category,Printer),[]},
{Format,Args},
- {div_footer(),[]}]}),
+ {ft,div_footer(),[]}],
+ true}),
ok.
%%%-----------------------------------------------------------------
%%% @spec tc_print(Category,Format,Args)
@@ -522,43 +540,45 @@ tc_pal(Category,Format,Args) ->
tc_pal(Category,Importance,Format,Args) ->
tc_print(Category,Importance,Format,Args),
cast({log,sync,self(),group_leader(),Category,Importance,
- [{div_header(Category),[]},
+ [{hd,div_header(Category),[]},
{Format,Args},
- {div_footer(),[]}]}),
+ {ft,div_footer(),[]}],
+ true}),
ok.
%%%-----------------------------------------------------------------
-%%% @spec ct_pal(Category,Format,Args) -> ok
+%%% @spec ct_log(Category,Format,Args) -> ok
%%% Category = atom()
%%% Format = string()
%%% Args = list()
%%%
-%%% @doc Print and log to the ct framework log
+%%% @doc Print to the ct framework log
%%%
%%% <p>This function is called by internal ct functions to
%%% force logging to the ct framework log</p>
ct_log(Category,Format,Args) ->
- cast({ct_log,[{div_header(Category),[]},
+ cast({ct_log,[{hd,div_header(Category),[]},
{Format,Args},
- {div_footer(),[]}]}),
+ {ft,div_footer(),[]}],
+ true}),
ok.
%%%=================================================================
%%% Internal functions
int_header() ->
- "<div class=\"ct_internal\"><b>*** CT ~s *** ~ts</b>".
+ "</pre>\n<div class=\"ct_internal\"><pre><b>*** CT ~s *** ~ts</b>".
int_footer() ->
- "</div>".
+ "</pre></div>\n<pre>".
div_header(Class) ->
div_header(Class,"User").
div_header(Class,Printer) ->
- "\n<div class=\"" ++ atom_to_list(Class) ++ "\"><b>*** " ++ Printer ++
- " " ++ log_timestamp(?now) ++ " ***</b>".
+ "\n</pre>\n<div class=\"" ++ atom_to_list(Class) ++ "\"><pre><b>*** "
+ ++ Printer ++ " " ++ log_timestamp(?now) ++ " ***</b>".
div_footer() ->
- "</div>".
+ "</pre></div>\n<pre>".
maybe_log_timestamp() ->
@@ -568,7 +588,7 @@ maybe_log_timestamp() ->
ok;
_ ->
cast({log,sync,self(),group_leader(),ct_internal,?MAX_IMPORTANCE,
- [{"<i>~s</i>",[log_timestamp({MS,S,US})]}]})
+ [{hd,"<i>~s</i>",[log_timestamp({MS,S,US})]}],false})
end.
log_timestamp({MS,S,US}) ->
@@ -729,7 +749,7 @@ copy_priv_files([], []) ->
logger_loop(State) ->
receive
- {log,SyncOrAsync,Pid,GL,Category,Importance,List} ->
+ {log,SyncOrAsync,Pid,GL,Category,Importance,Content,EscChars} ->
VLvl = case Category of
ct_internal ->
?MAX_VERBOSITY;
@@ -746,15 +766,15 @@ logger_loop(State) ->
case erlang:is_process_alive(TCGL) of
true ->
State1 = print_to_log(SyncOrAsync, Pid,
- Category,
- TCGL, List, State),
+ Category, TCGL, Content,
+ EscChars, State),
logger_loop(State1#logger_state{
tc_groupleaders = TCGLs});
false ->
%% Group leader is dead, so write to the
%% CtLog or unexpected_io log instead
- unexpected_io(Pid,Category,Importance,
- List,CtLogFd),
+ unexpected_io(Pid, Category, Importance,
+ Content, CtLogFd, EscChars),
logger_loop(State)
end;
@@ -762,7 +782,8 @@ logger_loop(State) ->
%% If category is ct_internal then write
%% to ct_log, else write to unexpected_io
%% log
- unexpected_io(Pid,Category,Importance,List,CtLogFd),
+ unexpected_io(Pid, Category, Importance, Content,
+ CtLogFd, EscChars),
logger_loop(State#logger_state{
tc_groupleaders = TCGLs})
end;
@@ -818,10 +839,17 @@ logger_loop(State) ->
logger_loop(State);
{clear_stylesheet,_} ->
logger_loop(State#logger_state{stylesheet = undefined});
- {ct_log, List} ->
+ {ct_log,Content,EscChars} ->
+ Str = lists:map(fun({_HdOrFt,Str,Args}) ->
+ [io_lib:format(Str,Args),io_lib:nl()];
+ ({Str,Args}) when EscChars ->
+ Io = io_lib:format(Str,Args),
+ [escape_chars(Io),io_lib:nl()];
+ ({Str,Args}) ->
+ [io_lib:format(Str,Args),io_lib:nl()]
+ end, Content),
Fd = State#logger_state.ct_log_fd,
- [begin io:format(Fd,Str,Args),io:nl(Fd) end ||
- {Str,Args} <- List],
+ io:format(Fd, "~ts", [Str]),
logger_loop(State);
{'DOWN',Ref,_,_Pid,_} ->
%% there might be print jobs executing in parallel with ct_logs
@@ -843,45 +871,74 @@ logger_loop(State) ->
ok
end.
-create_io_fun(FromPid, CtLogFd) ->
+create_io_fun(FromPid, CtLogFd, EscChars) ->
%% we have to build one io-list of all strings
%% before printing, or other io printouts (made in
%% parallel) may get printed between this header
%% and footer
- fun({Str,Args}, IoList) ->
- case catch io_lib:format(Str,Args) of
- {'EXIT',_Reason} ->
+ fun(FormatData, IoList) ->
+ {Escapable,Str,Args} =
+ case FormatData of
+ {_HdOrFt,S,A} -> {false,S,A};
+ {S,A} -> {true,S,A}
+ end,
+ try io_lib:format(Str,Args) of
+ IoStr when Escapable, EscChars, IoList == [] ->
+ escape_chars(IoStr);
+ IoStr when Escapable, EscChars ->
+ [IoList,"\n",escape_chars(IoStr)];
+ IoStr when IoList == [] ->
+ IoStr;
+ IoStr ->
+ [IoList,"\n",IoStr]
+ catch
+ _:_Reason ->
io:format(CtLogFd, "Logging fails! Str: ~p, Args: ~p~n",
[Str,Args]),
%% stop the testcase, we need to see the fault
exit(FromPid, {log_printout_error,Str,Args}),
- [];
- IoStr when IoList == [] ->
- [IoStr];
- IoStr ->
- [IoList,"\n",IoStr]
+ []
end
end.
-print_to_log(sync, FromPid, Category, TCGL, List, State) ->
+escape_chars([Bin | Io]) when is_binary(Bin) ->
+ [Bin | escape_chars(Io)];
+escape_chars([List | Io]) when is_list(List) ->
+ [escape_chars(List) | escape_chars(Io)];
+escape_chars([$< | Io]) ->
+ ["&lt;" | escape_chars(Io)];
+escape_chars([$> | Io]) ->
+ ["&gt;" | escape_chars(Io)];
+escape_chars([$& | Io]) ->
+ ["&amp;" | escape_chars(Io)];
+escape_chars([Char | Io]) when is_integer(Char) ->
+ [Char | escape_chars(Io)];
+escape_chars([]) ->
+ [];
+escape_chars(Bin) ->
+ Bin.
+
+print_to_log(sync, FromPid, Category, TCGL, Content, EscChars, State) ->
%% in some situations (exceptions), the printout is made from the
%% test server IO process and there's no valid group leader to send to
CtLogFd = State#logger_state.ct_log_fd,
if FromPid /= TCGL ->
- IoFun = create_io_fun(FromPid, CtLogFd),
- io:format(TCGL,"~ts", [lists:foldl(IoFun, [], List)]);
+ IoFun = create_io_fun(FromPid, CtLogFd, EscChars),
+ IoList = lists:foldl(IoFun, [], Content),
+ io:format(TCGL,["$tc_html","~ts"], [IoList]);
true ->
- unexpected_io(FromPid,Category,?MAX_IMPORTANCE,List,CtLogFd)
+ unexpected_io(FromPid, Category, ?MAX_IMPORTANCE, Content,
+ CtLogFd, EscChars)
end,
State;
-print_to_log(async, FromPid, Category, TCGL, List, State) ->
+print_to_log(async, FromPid, Category, TCGL, Content, EscChars, State) ->
%% in some situations (exceptions), the printout is made from the
%% test server IO process and there's no valid group leader to send to
CtLogFd = State#logger_state.ct_log_fd,
Printer =
if FromPid /= TCGL ->
- IoFun = create_io_fun(FromPid, CtLogFd),
+ IoFun = create_io_fun(FromPid, CtLogFd, EscChars),
fun() ->
test_server:permit_io(TCGL, self()),
@@ -894,25 +951,25 @@ print_to_log(async, FromPid, Category, TCGL, List, State) ->
case erlang:is_process_alive(TCGL) of
true ->
- try io:format(TCGL, "~ts",
- [lists:foldl(IoFun,[],List)]) of
+ try io:format(TCGL, ["$tc_html","~ts"],
+ [lists:foldl(IoFun,[],Content)]) of
_ -> ok
catch
_:terminated ->
unexpected_io(FromPid, Category,
?MAX_IMPORTANCE,
- List, CtLogFd)
+ Content, CtLogFd, EscChars)
end;
false ->
unexpected_io(FromPid, Category,
?MAX_IMPORTANCE,
- List, CtLogFd)
+ Content, CtLogFd, EscChars)
end
end;
true ->
fun() ->
unexpected_io(FromPid, Category, ?MAX_IMPORTANCE,
- List, CtLogFd)
+ Content, CtLogFd, EscChars)
end
end,
case State#logger_state.async_print_jobs of
@@ -1087,12 +1144,6 @@ print_style(Fd,StyleSheet) ->
print_style_error(Fd,StyleSheet,Reason)
end.
-%% Simple link version, doesn't work with all browsers unfortunately. :-(
-%% print_style(Fd, StyleSheet) ->
-%% io:format(Fd,
-%% "<link href=~p rel=\"stylesheet\" type=\"text/css\">",
-%% [StyleSheet]).
-
print_style_error(Fd,StyleSheet,Reason) ->
io:format(Fd,"\n<!-- Failed to load stylesheet ~ts: ~p -->\n",
[StyleSheet,Reason]),
@@ -1364,11 +1415,11 @@ total_row(Success, Fail, UserSkip, AutoSkip, NotBuilt, All) ->
[xhtml("<tr valign=top>\n",
["</tbody>\n<tfoot>\n<tr class=\"",odd_or_even(),"\">\n"]),
"<td><b>Total</b></td>\n", Label, TimestampCell,
- "<td align=right><b>",integer_to_list(Success),"<b></td>\n",
- "<td align=right><b>",integer_to_list(Fail),"<b></td>\n",
+ "<td align=right><b>",integer_to_list(Success),"</b></td>\n",
+ "<td align=right><b>",integer_to_list(Fail),"</b></td>\n",
"<td align=right>",integer_to_list(AllSkip),
" (",UserSkipStr,"/",AutoSkipStr,")</td>\n",
- "<td align=right><b>",integer_to_list(NotBuilt),"<b></td>\n",
+ "<td align=right><b>",integer_to_list(NotBuilt),"</b></td>\n",
AllInfo, "</tr>\n",
xhtml("","</tfoot>\n")].
@@ -1560,10 +1611,12 @@ header1(Title, SubTitle, TableCols) ->
"<!-- autogenerated by '"++atom_to_list(?MODULE)++"' -->\n",
"<head>\n",
"<title>" ++ Title ++ " " ++ SubTitle ++ "</title>\n",
- "<meta http-equiv=\"cache-control\" content=\"no-cache\">\n",
- "<meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\n",
+ "<meta http-equiv=\"cache-control\" content=\"no-cache\"></meta>\n",
+ "<meta http-equiv=\"content-type\" content=\"text/html; "
+ "charset=utf-8\"></meta>\n",
xhtml("",
- ["<link rel=\"stylesheet\" href=\"",uri(CSSFile),"\" type=\"text/css\">\n"]),
+ ["<link rel=\"stylesheet\" href=\"",uri(CSSFile),
+ "\" type=\"text/css\"></link>\n"]),
xhtml("",
["<script type=\"text/javascript\" src=\"",JQueryFile,
"\"></script>\n"]),
@@ -1610,7 +1663,7 @@ footer() ->
"Copyright &copy; ", year(),
" <a href=\"http://www.erlang.org\">Open Telecom Platform</a>",
xhtml("<br>\n", "<br />\n"),
- "Updated: <!date>", current_time(), "<!/date>",
+ "Updated: <!--date-->", current_time(), "<!--/date-->",
xhtml("<br>\n", "<br />\n"),
xhtml("</font></p>\n", "</div>\n"),
"</center>\n"
@@ -1985,9 +2038,9 @@ interactive_link() ->
"<!-- autogenerated by '"++atom_to_list(?MODULE)++"' -->\n",
"<head>\n",
"<title>Last interactive run</title>\n",
- "<meta http-equiv=\"cache-control\" content=\"no-cache\">\n",
+ "<meta http-equiv=\"cache-control\" content=\"no-cache\"></meta>\n",
"<meta http-equiv=\"content-type\" content=\"text/html; "
- "charset=utf-8\">\n",
+ "charset=utf-8\"></meta>\n",
"</head>\n",
"<body>\n",
"Log from last interactive run: <a href=\"",uri(CtLog),"\">",
@@ -2846,8 +2899,12 @@ simulate() ->
simulate_logger_loop() ->
receive
- {log,_,_,_,_,_,List} ->
- S = [[io_lib:format(Str,Args),io_lib:nl()] || {Str,Args} <- List],
+ {log,_,_,_,_,_,Content,_} ->
+ S = lists:map(fun({_,Str,Args}) ->
+ [io_lib:format(Str,Args),io_lib:nl()];
+ ({Str,Args}) ->
+ [io_lib:format(Str,Args),io_lib:nl()]
+ end, Content),
io:format("~ts",[S]),
simulate_logger_loop();
stop ->
@@ -3053,15 +3110,15 @@ get_ts_html_wrapper(TestName, Logdir, PrintLabel, Cwd, TableCols, Encoding) ->
"Copyright &copy; ", year(),
" <a href=\"http://www.erlang.org\">",
"Open Telecom Platform</a><br>\n",
- "Updated: <!date>", current_time(), "<!/date>",
+ "Updated: <!--date-->", current_time(), "<!--/date-->",
"<br>\n</font></p>\n"],
{basic_html,
["<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n",
"<html>\n",
"<head><title>", TestName1, "</title>\n",
- "<meta http-equiv=\"cache-control\" content=\"no-cache\">\n",
+ "<meta http-equiv=\"cache-control\" content=\"no-cache\"></meta>\n",
"<meta http-equiv=\"content-type\" content=\"text/html; charset=",
- html_encoding(Encoding),"\">\n",
+ html_encoding(Encoding),"\"></meta>\n",
"</head>\n",
"<body", Bgr, " bgcolor=\"white\" text=\"black\" ",
"link=\"blue\" vlink=\"purple\" alink=\"red\">\n",
@@ -3078,7 +3135,7 @@ get_ts_html_wrapper(TestName, Logdir, PrintLabel, Cwd, TableCols, Encoding) ->
"Copyright &copy; ", year(),
" <a href=\"http://www.erlang.org\">",
"Open Telecom Platform</a><br />\n",
- "Updated: <!date>", current_time(), "<!/date>",
+ "Updated: <!--date-->", current_time(), "<!--/date-->",
"<br />\n</div>\n"],
CSSFile =
xhtml(fun() -> "" end,
@@ -3105,9 +3162,11 @@ get_ts_html_wrapper(TestName, Logdir, PrintLabel, Cwd, TableCols, Encoding) ->
"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n",
"<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n",
"<head>\n<title>", TestName1, "</title>\n",
- "<meta http-equiv=\"cache-control\" content=\"no-cache\">\n",
- "<meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\n",
- "<link rel=\"stylesheet\" href=\"", uri(CSSFile), "\" type=\"text/css\">\n",
+ "<meta http-equiv=\"cache-control\" content=\"no-cache\"></meta>\n",
+ "<meta http-equiv=\"content-type\" content=\"text/html; ",
+ "charset=utf-8\"></meta>\n",
+ "<link rel=\"stylesheet\" href=\"", uri(CSSFile),
+ "\" type=\"text/css\"></link>\n",
"<script type=\"text/javascript\" src=\"", JQueryFile, "\"></script>\n",
"<script type=\"text/javascript\" src=\"", TableSorterFile, "\"></script>\n"] ++
TableSorterScript ++ ["</head>\n","<body>\n", LabelStr, "\n"],
@@ -3233,11 +3292,11 @@ html_encoding(latin1) ->
html_encoding(utf8) ->
"utf-8".
-unexpected_io(Pid,ct_internal,_Importance,List,CtLogFd) ->
- IoFun = create_io_fun(Pid,CtLogFd),
- io:format(CtLogFd, "~ts", [lists:foldl(IoFun, [], List)]);
-unexpected_io(Pid,_Category,_Importance,List,CtLogFd) ->
- IoFun = create_io_fun(Pid,CtLogFd),
- Data = io_lib:format("~ts", [lists:foldl(IoFun, [], List)]),
+unexpected_io(Pid, ct_internal, _Importance, Content, CtLogFd, EscChars) ->
+ IoFun = create_io_fun(Pid, CtLogFd, EscChars),
+ io:format(CtLogFd, "~ts", [lists:foldl(IoFun, [], Content)]);
+unexpected_io(Pid, _Category, _Importance, Content, CtLogFd, EscChars) ->
+ IoFun = create_io_fun(Pid, CtLogFd, EscChars),
+ Data = io_lib:format("~ts", [lists:foldl(IoFun, [], Content)]),
test_server_io:print_unexpected(Data),
ok.
diff --git a/lib/common_test/src/ct_make.erl b/lib/common_test/src/ct_make.erl
index f4b81a0ef6..e7a9cfa843 100644
--- a/lib/common_test/src/ct_make.erl
+++ b/lib/common_test/src/ct_make.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -324,10 +324,11 @@ check_includes(File, IncludePath, ObjMTime) ->
end.
check_includes2(Epp, File, ObjMTime) ->
+ A1 = erl_anno:new(1),
case epp:parse_erl_form(Epp) of
- {ok, {attribute, 1, file, {File, 1}}} ->
+ {ok, {attribute, A1, file, {File, A1}}} ->
check_includes2(Epp, File, ObjMTime);
- {ok, {attribute, 1, file, {IncFile, 1}}} ->
+ {ok, {attribute, A1, file, {IncFile, A1}}} ->
case file:read_file_info(IncFile) of
{ok, #file_info{mtime=MTime}} when MTime>ObjMTime ->
epp:close(Epp),
diff --git a/lib/common_test/src/ct_master_logs.erl b/lib/common_test/src/ct_master_logs.erl
index 2adced42ae..54190a8254 100644
--- a/lib/common_test/src/ct_master_logs.erl
+++ b/lib/common_test/src/ct_master_logs.erl
@@ -238,9 +238,9 @@ config_table1([]) ->
["</tbody>\n</table>\n"].
int_header() ->
- "<div class=\"ct_internal\"><b>*** CT MASTER ~s *** ~ts</b>".
+ "</pre>\n<div class=\"ct_internal\"><pre><b>*** CT MASTER ~s *** ~ts</b>".
int_footer() ->
- "</div>".
+ "</pre></div>\n<pre>".
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% NodeDir Index functions %%%
@@ -387,11 +387,12 @@ header(Title, TableCols) ->
"<!-- autogenerated by '"++atom_to_list(?MODULE)++"' -->\n",
"<head>\n",
"<title>" ++ Title ++ "</title>\n",
- "<meta http-equiv=\"cache-control\" content=\"no-cache\">\n",
- "<meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\n",
+ "<meta http-equiv=\"cache-control\" content=\"no-cache\"></meta>\n",
+ "<meta http-equiv=\"content-type\" content=\"text/html; ",
+ "charset=utf-8\"></meta>\n",
xhtml("",
["<link rel=\"stylesheet\" href=\"",ct_logs:uri(CSSFile),
- "\" type=\"text/css\">"]),
+ "\" type=\"text/css\"></link>\n"]),
xhtml("",
["<script type=\"text/javascript\" src=\"",JQueryFile,
"\"></script>\n"]),
@@ -419,7 +420,7 @@ footer() ->
"Copyright &copy; ", year(),
" <a href=\"http://www.erlang.org\">Open Telecom Platform</a>",
xhtml("<br>\n", "<br />\n"),
- "Updated: <!date>", current_time(), "<!/date>",
+ "Updated: <!--date-->", current_time(), "<--!/date-->",
xhtml("<br>\n", "<br />\n"),
xhtml("</font></p>\n", "</div>\n"),
"</center>\n"
diff --git a/lib/common_test/src/ct_release_test.erl b/lib/common_test/src/ct_release_test.erl
index 6438ea01c1..6c38f51363 100644
--- a/lib/common_test/src/ct_release_test.erl
+++ b/lib/common_test/src/ct_release_test.erl
@@ -131,7 +131,7 @@
-include_lib("kernel/include/file.hrl").
%%-----------------------------------------------------------------
--define(testnode, otp_upgrade).
+-define(testnode, 'ct_release_test-upgrade').
-define(exclude_apps, [hipe, typer, dialyzer]). % never include these apps
%%-----------------------------------------------------------------
@@ -304,7 +304,13 @@ upgrade(Apps,Level,Callback,Config) ->
%% Note, we will not reach this if the test fails with a
%% timetrap timeout in the test suite! Thus we can have
%% hanging nodes...
- Nodes = nodes(),
+ Nodes = lists:filter(fun(Node) ->
+ case atom_to_list(Node) of
+ "ct_release_test-" ++_ -> true;
+ _ -> false
+ end
+ end,
+ nodes()),
[rpc:call(Node,erlang,halt,[]) || Node <- Nodes]
end.
@@ -328,7 +334,14 @@ upgrade(Apps,Level,Callback,Config) ->
%% ct_release_test:cleanup(Config).'''
%%
cleanup(Config) ->
- Nodes = [node_name(?testnode)|nodes()],
+ AllNodes = [node_name(?testnode)|nodes()],
+ Nodes = lists:filter(fun(Node) ->
+ case atom_to_list(Node) of
+ "ct_release_test-" ++_ -> true;
+ _ -> false
+ end
+ end,
+ AllNodes),
[rpc:call(Node,erlang,halt,[]) || Node <- Nodes],
Config.
@@ -455,9 +468,9 @@ get_rels(minor) ->
{CurrentMajor,Current}.
init_upgrade_test(FromVsn,ToVsn,OldRel) ->
- OtpRel = list_to_atom("otp-"++FromVsn),
+ Name = list_to_atom("ct_release_test-otp-"++FromVsn),
ct:log("Starting node to fetch application versions to upgrade from"),
- {ok,Node} = test_server:start_node(OtpRel,peer,[{erl,[OldRel]}]),
+ {ok,Node} = test_server:start_node(Name,peer,[{erl,[OldRel]}]),
{Apps,Path} = fetch_all_apps(Node),
test_server:stop_node(Node),
{FromVsn,ToVsn,Apps,Path}.
@@ -723,7 +736,7 @@ do_callback(Node,Mod,Func,Args) ->
ct:log("~p:~p/~w returned: ~p",[Mod,Func,length(Args),R]),
case R of
{badrpc,Error} ->
- test_server:fail({test_upgrade_callback,Mod,Func,Args,Error});
+ throw({fail,{test_upgrade_callback,Mod,Func,Args,Error}});
NewState ->
NewState
end.
@@ -753,7 +766,7 @@ create_relfile(AppsVsns,CreateDir,RelName0,RelVsn) ->
%% Should test tools really be included? Some library functions
%% here could be used by callback, but not everything since
%% processes of these applications will not be running.
- TestToolAppsVsns0 = get_vsns([test_server,common_test]),
+ TestToolAppsVsns0 = get_vsns([common_test]),
TestToolAppsVsns =
[{A,V,none} || {A,V} <- TestToolAppsVsns0,
false == lists:keymember(A,1,AllAppsVsns0)],
diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl
index ceb94ceee5..e156c9b773 100644
--- a/lib/common_test/src/ct_run.erl
+++ b/lib/common_test/src/ct_run.erl
@@ -2634,11 +2634,9 @@ run_make(Targets, TestDir0, Mod, UserInclude) ->
data=TestDir}),
{ok,Cwd} = file:get_cwd(),
ok = file:set_cwd(TestDir),
- TestServerInclude = get_dir(test_server, "include"),
CtInclude = get_dir(common_test, "include"),
XmerlInclude = get_dir(xmerl, "include"),
- ErlFlags = UserInclude ++ [{i,TestServerInclude},
- {i,CtInclude},
+ ErlFlags = UserInclude ++ [{i,CtInclude},
{i,XmerlInclude},
debug_info],
Result =
diff --git a/lib/common_test/src/ct_util.erl b/lib/common_test/src/ct_util.erl
index 445fce1db8..b7fa7947e2 100644
--- a/lib/common_test/src/ct_util.erl
+++ b/lib/common_test/src/ct_util.erl
@@ -485,6 +485,8 @@ loop(Mode,TestData,StartDir) ->
{'EXIT',Pid,Reason} ->
case ets:lookup(?conn_table,Pid) of
[#conn{address=A,callback=CB}] ->
+ ErrorStr = io_lib:format("~tp", [Reason]),
+ ErrorHtml = ct_logs:escape_chars(ErrorStr),
%% A connection crashed - remove the connection but don't die
ct_logs:tc_log_async(ct_error_notify,
?MAX_IMPORTANCE,
@@ -492,8 +494,8 @@ loop(Mode,TestData,StartDir) ->
"Connection process died: "
"Pid: ~w, Address: ~p, "
"Callback: ~w\n"
- "Reason: ~p\n\n",
- [Pid,A,CB,Reason]),
+ "Reason: ~ts\n\n",
+ [Pid,A,CB,ErrorHtml]),
catch CB:close(Pid),
%% in case CB:close failed to do this:
unregister_connection(Pid),
diff --git a/lib/common_test/src/cth_conn_log.erl b/lib/common_test/src/cth_conn_log.erl
index 9b3dc0b5f1..954b4239af 100644
--- a/lib/common_test/src/cth_conn_log.erl
+++ b/lib/common_test/src/cth_conn_log.erl
@@ -132,7 +132,7 @@ pre_init_per_testcase(TestCase,Config,CthState) ->
[S,ct_logs:uri(L),filename:basename(L)])
|| {S,L} <- Ls] ++
"</table>",
- io:format(Str,[]),
+ ct:log(Str,[],[no_css]),
{ConnMod,{LogType,Ls}};
_ ->
{ConnMod,{LogType,[]}}
diff --git a/lib/common_test/src/erl2html2.erl b/lib/common_test/src/erl2html2.erl
new file mode 100644
index 0000000000..d440d0940d
--- /dev/null
+++ b/lib/common_test/src/erl2html2.erl
@@ -0,0 +1,311 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2015. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%------------------------------------------------------------------
+%%% Purpose:Convert Erlang files to html.
+%%%------------------------------------------------------------------
+
+-module(erl2html2).
+-export([convert/3, convert/4]).
+
+convert([], _Dest, _InclPath) -> % Fake clause.
+ ok;
+convert(File, Dest, InclPath) ->
+ %% The generated code uses the BGCOLOR attribute in the
+ %% BODY tag, which wasn't valid until HTML 3.2. Also,
+ %% good HTML should either override all colour attributes
+ %% or none of them -- *never* just a few.
+ %%
+ %% FIXME: The colours should *really* be set with
+ %% stylesheets...
+ %%
+ %% The html file is written with the same encoding as the input file.
+ Encoding = encoding(File),
+ Header = ["<!DOCTYPE HTML PUBLIC "
+ "\"-//W3C//DTD HTML 3.2 Final//EN\">\n"
+ "<!-- autogenerated by '",atom_to_list(?MODULE),"'. -->\n"
+ "<html>\n"
+ "<head>\n"
+ "<meta http-equiv=\"Content-Type\" content=\"text/html;"
+ "charset=",html_encoding(Encoding),"\"/></meta>\n"
+ "<title>", to_raw_list(File,Encoding), "</title>\n"
+ "</head>\n\n"
+ "<body bgcolor=\"white\" text=\"black\""
+ " link=\"blue\" vlink=\"purple\" alink=\"red\">\n"],
+ convert(File, Dest, InclPath, Header).
+
+
+convert(File, Dest, InclPath, Header) ->
+ %% statistics(runtime),
+ case parse_file(File, InclPath) of
+ {ok,Functions} ->
+ %% {_, Time1} = statistics(runtime),
+ %% io:format("Parsed file in ~.2f Seconds.~n",[Time1/1000]),
+ case file:open(File,[raw,{read_ahead,10000}]) of
+ {ok,SFd} ->
+ case file:open(Dest,[write,raw]) of
+ {ok,DFd} ->
+ file:write(DFd,[Header,"<pre>\n"]),
+ _Lines = build_html(SFd,DFd,encoding(File),Functions),
+ file:write(DFd,["</pre>\n",footer(),
+ "</body>\n</html>\n"]),
+ %% {_, Time2} = statistics(runtime),
+ %% io:format("Converted ~p lines in ~.2f Seconds.~n",
+ %% [_Lines, Time2/1000]),
+ file:close(SFd),
+ file:close(DFd),
+ ok;
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Parse the input file to get the line numbers for all function
+%%% definitions. This will be used when creating link targets for each
+%%% function in build_html/5.
+%%%
+%%% All function clauses are also marked in order to allow
+%%% possibly_enhance/2 to write these in bold.
+%%%
+%%% Use expanded preprocessor directives if possible (epp). Only if
+%%% this fails, fall back on using non-expanded code (epp_dodger).
+
+parse_file(File, InclPath) ->
+ case epp:open(File, InclPath, []) of
+ {ok,Epp} ->
+ try parse_preprocessed_file(Epp,File,false) of
+ Forms ->
+ epp:close(Epp),
+ {ok,Forms}
+ catch
+ _:{error,_Reason,true} ->
+ parse_non_preprocessed_file(File);
+ _:{error,_Reason,false} ->
+ {ok,[]}
+ end;
+ Error = {error,_} ->
+ Error
+ end.
+
+parse_preprocessed_file(Epp, File, InCorrectFile) ->
+ case epp:parse_erl_form(Epp) of
+ {ok,Form} ->
+ case Form of
+ {attribute,_,file,{File,_}} ->
+ parse_preprocessed_file(Epp, File, true);
+ {attribute,_,file,{_OtherFile,_}} ->
+ parse_preprocessed_file(Epp, File, false);
+ {function,L,F,A,Cs} when InCorrectFile ->
+ {CLs,LastCL} = find_clause_lines(Cs, []),
+ %% tl(CLs) cause we know the start line already
+ [{atom_to_list(F),A,get_line(L),LastCL} | tl(CLs)] ++
+ parse_preprocessed_file(Epp, File, true);
+ _ ->
+ parse_preprocessed_file(Epp, File, InCorrectFile)
+ end;
+ {error,Reason={_L,epp,{undefined,_Macro,none}}} ->
+ throw({error,Reason,InCorrectFile});
+ {error,_Reason} ->
+ parse_preprocessed_file(Epp, File, InCorrectFile);
+ {eof,_Location} ->
+ []
+ end.
+
+parse_non_preprocessed_file(File) ->
+ case file:open(File, []) of
+ {ok,Epp} ->
+ Forms = parse_non_preprocessed_file(Epp, File, 1),
+ file:close(Epp),
+ {ok,Forms};
+ Error = {error,_E} ->
+ Error
+ end.
+
+parse_non_preprocessed_file(Epp, File, Location) ->
+ case epp_dodger:parse_form(Epp, Location) of
+ {ok,Tree,Location1} ->
+ try erl_syntax:revert(Tree) of
+ {function,L,F,A,Cs} ->
+ {CLs,LastCL} = find_clause_lines(Cs, []),
+ %% tl(CLs) cause we know the start line already
+ [{atom_to_list(F),A,get_line(L),LastCL} | tl(CLs)] ++
+ parse_non_preprocessed_file(Epp, File, Location1);
+ _ ->
+ parse_non_preprocessed_file(Epp, File, Location1)
+ catch
+ _:_ -> parse_non_preprocessed_file(Epp, File, Location1)
+ end;
+ {error,_E,Location1} ->
+ parse_non_preprocessed_file(Epp, File, Location1);
+ {eof,_Location} ->
+ []
+ end.
+
+get_line(Anno) ->
+ erl_anno:line(Anno).
+
+%%%-----------------------------------------------------------------
+%%% Find the line number of the last expression in the function
+find_clause_lines([{clause,CL,_Params,_Op,Exprs}], CLs) -> % last clause
+ case classify_exprs(Exprs) of
+ {anno, Anno} ->
+ {lists:reverse([{clause,get_line(CL)}|CLs]), get_line(Anno)};
+ {tree, Exprs1} ->
+ find_clause_lines([{clause,CL,undefined,undefined,Exprs1}], CLs);
+ unknown ->
+ {lists:reverse([{clause,get_line(CL)}|CLs]), get_line(CL)}
+ end;
+find_clause_lines([{clause,CL,_Params,_Op,_Exprs} | Cs], CLs) ->
+ find_clause_lines(Cs, [{clause,get_line(CL)}|CLs]).
+
+classify_exprs(Exprs) ->
+ case tuple_to_list(lists:last(Exprs)) of
+ [macro,{_var,Anno,_MACRO} | _] ->
+ {anno, Anno};
+ [T,ExprAnno | Exprs1] ->
+ case erl_anno:is_anno(ExprAnno) of
+ true ->
+ {anno, ExprAnno};
+ false when T =:= tree ->
+ {tree, Exprs1};
+ false ->
+ unknown
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Add a link target for each line and one for each function definition.
+build_html(SFd,DFd,Encoding,FuncsAndCs) ->
+ build_html(SFd,DFd,Encoding,file:read_line(SFd),1,FuncsAndCs,
+ false,undefined).
+
+%% line of last expression in function found
+build_html(SFd,DFd,Enc,{ok,Str},LastL,FuncsAndCs,_IsFuncDef,{F,LastL}) ->
+ LastLineLink = test_server_ctrl:uri_encode(F++"-last_expr",utf8),
+ file:write(DFd,["<a name=\"",
+ to_raw_list(LastLineLink,Enc),"\"/>"]),
+ build_html(SFd,DFd,Enc,{ok,Str},LastL,FuncsAndCs,true,undefined);
+%% function start line found
+build_html(SFd,DFd,Enc,{ok,Str},L0,[{F,A,L0,LastL}|FuncsAndCs],
+ _IsFuncDef,_FAndLastL) ->
+ FALink = test_server_ctrl:uri_encode(F++"-"++integer_to_list(A),utf8),
+ file:write(DFd,["<a name=\"",to_raw_list(FALink,Enc),"\"/>"]),
+ build_html(SFd,DFd,Enc,{ok,Str},L0,FuncsAndCs,true,{F,LastL});
+build_html(SFd,DFd,Enc,{ok,Str},L,[{clause,L}|FuncsAndCs],
+ _IsFuncDef,FAndLastL) ->
+ build_html(SFd,DFd,Enc,{ok,Str},L,FuncsAndCs,true,FAndLastL);
+build_html(SFd,DFd,Enc,{ok,Str},L,FuncsAndCs,IsFuncDef,FAndLastL) ->
+ LStr = line_number(L),
+ Str1 = line(Str,IsFuncDef),
+ file:write(DFd,[LStr,Str1]),
+ build_html(SFd,DFd,Enc,file:read_line(SFd),L+1,FuncsAndCs,false,FAndLastL);
+build_html(_SFd,_DFd,_Enc,eof,L,_FuncsAndCs,_IsFuncDef,_FAndLastL) ->
+ L.
+
+line_number(L) ->
+ LStr = integer_to_list(L),
+ Pred =
+ case length(LStr) of
+ Length when Length < 5 ->
+ lists:duplicate(5-Length,$\s);
+ _ ->
+ []
+ end,
+ ["<a name=\"",LStr,"\"/>",Pred,LStr,": "].
+
+line(Str,IsFuncDef) ->
+ Str1 = htmlize(Str),
+ possibly_enhance(Str1,IsFuncDef).
+
+%%%-----------------------------------------------------------------
+%%% Substitute special characters that should not appear in HTML
+htmlize([$<|Str]) ->
+ [$&,$l,$t,$;|htmlize(Str)];
+htmlize([$>|Str]) ->
+ [$&,$g,$t,$;|htmlize(Str)];
+htmlize([$&|Str]) ->
+ [$&,$a,$m,$p,$;|htmlize(Str)];
+htmlize([$"|Str]) ->
+ [$&,$q,$u,$o,$t,$;|htmlize(Str)];
+htmlize([Ch|Str]) ->
+ [Ch|htmlize(Str)];
+htmlize([]) ->
+ [].
+
+%%%-----------------------------------------------------------------
+%%% Write comments in italic and function definitions in bold.
+possibly_enhance(Str,true) ->
+ case lists:splitwith(fun($() -> false; (_) -> true end, Str) of
+ {_,[]} -> Str;
+ {F,A} -> ["<b>",F,"</b>",A]
+ end;
+possibly_enhance([$%|_]=Str,_) ->
+ ["<i>",Str--"\n","</i>","\n"];
+possibly_enhance([$-|_]=Str,_) ->
+ possibly_enhance(Str,true);
+possibly_enhance(Str,false) ->
+ Str.
+
+%%%-----------------------------------------------------------------
+%%% End of the file
+footer() ->
+ "".
+
+%%%-----------------------------------------------------------------
+%%% Read encoding from source file
+encoding(File) ->
+ case epp:read_encoding(File) of
+ none ->
+ epp:default_encoding();
+ E ->
+ E
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Covert encoding atom to string for use in HTML header
+html_encoding(latin1) ->
+ "iso-8859-1";
+html_encoding(utf8) ->
+ "utf-8".
+
+%%%-----------------------------------------------------------------
+%%% Convert a string to a list of raw printable characters in the
+%%% given encoding. This is necessary since the files (source and
+%%% destination) are both opened in raw mode (default encoding). Byte
+%%% by byte is read from source and written to the destination. This
+%%% conversion is needed when printing data that is not first read
+%%% from the source.
+%%%
+%%% Example: if the encoding of the file is utf8, and we have a string
+%%% containing "å" = [229], then we need to convert this to [195,165]
+%%% before writing. Note that this conversion is only necessary
+%%% because the destination file is not (necessarily) opened with utf8
+%%% encoding - it is opened with default encoding in order to allow
+%%% raw file mode and byte by byte copying from source.
+to_raw_list(X,latin1) when is_list(X) ->
+ X;
+to_raw_list(X,utf8) when is_list(X) ->
+ binary_to_list(unicode:characters_to_binary(X)).
diff --git a/lib/common_test/src/test_server.erl b/lib/common_test/src/test_server.erl
new file mode 100644
index 0000000000..34acad6fd1
--- /dev/null
+++ b/lib/common_test/src/test_server.erl
@@ -0,0 +1,2783 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+-module(test_server).
+
+-define(DEFAULT_TIMETRAP_SECS, 60).
+
+%%% TEST_SERVER_CTRL INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([run_test_case_apply/1,init_target_info/0,init_purify/0]).
+-export([cover_compile/1,cover_analyse/2]).
+
+%%% TEST_SERVER_SUP INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([get_loc/1,set_tc_state/1]).
+
+%%% TEST SUITE INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([lookup_config/2]).
+-export([fail/0,fail/1,format/1,format/2,format/3]).
+-export([capture_start/0,capture_stop/0,capture_get/0]).
+-export([messages_get/0]).
+-export([permit_io/2]).
+-export([hours/1,minutes/1,seconds/1,sleep/1,adjusted_sleep/1,timecall/3]).
+-export([timetrap_scale_factor/0,timetrap/1,get_timetrap_info/0,
+ timetrap_cancel/1,timetrap_cancel/0]).
+-export([m_out_of_n/3,do_times/4,do_times/2]).
+-export([call_crash/3,call_crash/4,call_crash/5]).
+-export([temp_name/1]).
+-export([start_node/3, stop_node/1, wait_for_node/1, is_release_available/1]).
+-export([app_test/1, app_test/2, appup_test/1]).
+-export([is_native/1]).
+-export([comment/1, make_priv_dir/0]).
+-export([os_type/0]).
+-export([run_on_shielded_node/2]).
+-export([is_cover/0,is_debug/0,is_commercial/0]).
+
+-export([break/1,break/2,break/3,continue/0,continue/1]).
+
+%%% DEBUGGER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([purify_new_leaks/0, purify_format/2, purify_new_fds_inuse/0,
+ purify_is_running/0]).
+
+%%% PRIVATE EXPORTED %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-include("test_server_internal.hrl").
+-include_lib("kernel/include/file.hrl").
+
+init_target_info() ->
+ [$.|Emu] = code:objfile_extension(),
+ {_, OTPRel} = init:script_id(),
+ #target_info{os_family=test_server_sup:get_os_family(),
+ os_type=os:type(),
+ version=erlang:system_info(version),
+ system_version=erlang:system_info(system_version),
+ root_dir=code:root_dir(),
+ emulator=Emu,
+ otp_release=OTPRel,
+ username=test_server_sup:get_username(),
+ cookie=atom_to_list(erlang:get_cookie())}.
+
+init_purify() ->
+ purify_new_leaks().
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% cover_compile(#cover{app=App,incl=Include,excl=Exclude,cross=Cross}) ->
+%% {ok,#cover{mods=AnalyseModules}} | {error,Reason}
+%%
+%% App = atom() , name of application to be compiled
+%% Exclude = [atom()], list of modules to exclude
+%% Include = [atom()], list of modules outside of App that should be included
+%% in the cover compilation
+%% Cross = [atoms()], list of modules outside of App shat should be included
+%% in the cover compilation, but that shall not be part of
+%% the cover analysis for this application.
+%% AnalyseModules = [atom()], list of successfully compiled modules
+%%
+%% Cover compile the given application. Return {ok,CoverInfo} if
+%% compilation succeeds, else (if application is not found and there
+%% are no modules to compile) {error,application_not_found}.
+
+cover_compile(CoverInfo=#cover{app=none,incl=Include,cross=Cross}) ->
+ CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross),
+ CompileMods = Include++CrossMods,
+ case length(CompileMods) of
+ 0 ->
+ io:fwrite("WARNING: No modules to cover compile!\n\n",[]),
+ cover:start(), % start cover server anyway
+ {ok,CoverInfo#cover{mods=[]}};
+ N ->
+ io:fwrite("Cover compiling ~w modules - "
+ "this may take some time... ",[N]),
+ do_cover_compile(CompileMods),
+ io:fwrite("done\n\n",[]),
+ {ok,CoverInfo#cover{mods=Include}}
+ end;
+cover_compile(CoverInfo=#cover{app=App,excl=all,incl=Include,cross=Cross}) ->
+ CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross),
+ CompileMods = Include++CrossMods,
+ case length(CompileMods) of
+ 0 ->
+ io:fwrite("WARNING: No modules to cover compile!\n\n",[]),
+ cover:start(), % start cover server anyway
+ {ok,CoverInfo#cover{mods=[]}};
+ N ->
+ io:fwrite("Cover compiling '~w' (~w files) - "
+ "this may take some time... ",[App,N]),
+ io:format("\nWARNING: All modules in \'~w\' are excluded\n"
+ "Only cover compiling modules in include list "
+ "and the modules\nin the cross cover file:\n"
+ "~tp\n", [App,CompileMods]),
+ do_cover_compile(CompileMods),
+ io:fwrite("done\n\n",[]),
+ {ok,CoverInfo#cover{mods=Include}}
+ end;
+cover_compile(CoverInfo=#cover{app=App,excl=Exclude,
+ incl=Include,cross=Cross}) ->
+ CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross),
+ case code:lib_dir(App) of
+ {error,bad_name} ->
+ case Include++CrossMods of
+ [] ->
+ io:format("\nWARNING: Can't find lib_dir for \'~w\'\n"
+ "Not cover compiling!\n\n",[App]),
+ {error,application_not_found};
+ CompileMods ->
+ io:fwrite("Cover compiling '~w' (~w files) - "
+ "this may take some time... ",
+ [App,length(CompileMods)]),
+ io:format("\nWARNING: Can't find lib_dir for \'~w\'\n"
+ "Only cover compiling modules in include list: "
+ "~tp\n", [App,Include]),
+ do_cover_compile(CompileMods),
+ io:fwrite("done\n\n",[]),
+ {ok,CoverInfo#cover{mods=Include}}
+ end;
+ LibDir ->
+ EbinDir = filename:join([LibDir,"ebin"]),
+ WC = filename:join(EbinDir,"*.beam"),
+ AllMods = module_names(filelib:wildcard(WC)),
+ AnalyseMods = (AllMods ++ Include) -- Exclude,
+ CompileMods = AnalyseMods ++ CrossMods,
+ case length(CompileMods) of
+ 0 ->
+ io:fwrite("WARNING: No modules to cover compile!\n\n",[]),
+ cover:start(), % start cover server anyway
+ {ok,CoverInfo#cover{mods=[]}};
+ N ->
+ io:fwrite("Cover compiling '~w' (~w files) - "
+ "this may take some time... ",[App,N]),
+ do_cover_compile(CompileMods),
+ io:fwrite("done\n\n",[]),
+ {ok,CoverInfo#cover{mods=AnalyseMods}}
+ end
+ end.
+
+
+module_names(Beams) ->
+ [list_to_atom(filename:basename(filename:rootname(Beam))) || Beam <- Beams].
+
+
+do_cover_compile(Modules) ->
+ cover:start(),
+ Sticky = prepare_cover_compile(Modules,[]),
+ R = cover:compile_beam(Modules),
+ [warn_compile(Error) || Error <- R,element(1,Error)=/=ok],
+ [code:stick_mod(M) || M <- Sticky],
+ ok.
+
+warn_compile({error,{Reason,Module}}) ->
+ io:fwrite("\nWARNING: Could not cover compile ~ts: ~p\n",
+ [Module,{error,Reason}]).
+
+%% Make sure all modules are loaded and unstick if sticky
+prepare_cover_compile([M|Ms],Sticky) ->
+ case {code:is_sticky(M),code:is_loaded(M)} of
+ {true,_} ->
+ code:unstick_mod(M),
+ prepare_cover_compile(Ms,[M|Sticky]);
+ {false,false} ->
+ case code:load_file(M) of
+ {module,_} ->
+ prepare_cover_compile([M|Ms],Sticky);
+ Error ->
+ io:fwrite("\nWARNING: Could not load ~w: ~p\n",[M,Error]),
+ prepare_cover_compile(Ms,Sticky)
+ end;
+ {false,_} ->
+ prepare_cover_compile(Ms,Sticky)
+ end;
+prepare_cover_compile([],Sticky) ->
+ Sticky.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% cover_analyse(Dir,#cover{level=Analyse,mods=Modules,stop=Stop) ->
+%% [{M,{Cov,NotCov,Details}}]
+%%
+%% Dir = string()
+%% Analyse = details | overview
+%% Modules = [atom()], the modules to analyse
+%%
+%% Cover analysis. If Analyse==details analyse_to_file is used.
+%%
+%% If Analyse==overview analyse_to_file is not used, only an overview
+%% containing the number of covered/not covered lines in each module.
+%%
+%% Also, cover data will be exported to a file called all.coverdata in
+%% the given directory.
+%%
+%% Finally, if Stop==true, then cover will be stopped after the
+%% analysis is completed. Stopping cover causes the original (non
+%% cover compiled) modules to be loaded back in. If a process at this
+%% point is still running old code of any of the cover compiled
+%% modules, meaning that is has not done any fully qualified function
+%% call after the cover compilation, the process will now be
+%% killed. To avoid this scenario, it is possible to set Stop=false,
+%% which means that the modules will stay cover compiled. Note that
+%% this is only recommended if the erlang node is being terminated
+%% after the test is completed.
+cover_analyse(Dir,#cover{level=Analyse,mods=Modules,stop=Stop}) ->
+ io:fwrite(user, "Cover analysing... ", []),
+ {ATFOk,ATFFail} =
+ case Analyse of
+ details ->
+ case cover:export(filename:join(Dir,"all.coverdata")) of
+ ok ->
+ {result,Ok1,Fail1} =
+ cover:analyse_to_file(Modules,[{outdir,Dir},html]),
+ {lists:map(fun(OutFile) ->
+ M = list_to_atom(
+ filename:basename(
+ filename:rootname(OutFile,
+ ".COVER.html")
+ )
+ ),
+ {M,{file,OutFile}}
+ end, Ok1),
+ lists:map(fun({Reason,M}) ->
+ {M,{error,Reason}}
+ end, Fail1)};
+ Error ->
+ {[],lists:map(fun(M) -> {M,Error} end, Modules)}
+ end;
+ overview ->
+ case cover:export(filename:join(Dir,"all.coverdata")) of
+ ok ->
+ {[],lists:map(fun(M) -> {M,undefined} end, Modules)};
+ Error ->
+ {[],lists:map(fun(M) -> {M,Error} end, Modules)}
+ end
+ end,
+ {result,AOk,AFail} = cover:analyse(Modules,module),
+ R0 = merge_analysis_results(AOk,ATFOk++ATFFail,[]) ++
+ [{M,{error,Reason}} || {Reason,M} <- AFail],
+ R = lists:sort(R0),
+ io:fwrite(user, "done\n\n", []),
+
+ case Stop of
+ true ->
+ Sticky = unstick_all_sticky(node()),
+ cover:stop(),
+ stick_all_sticky(node(),Sticky);
+ false ->
+ ok
+ end,
+ R.
+
+merge_analysis_results([{M,{Cov,NotCov}}|T],ATF,Acc) ->
+ case lists:keytake(M,1,ATF) of
+ {value,{_,R},ATF1} ->
+ merge_analysis_results(T,ATF1,[{M,{Cov,NotCov,R}}|Acc]);
+ false ->
+ merge_analysis_results(T,ATF,Acc)
+ end;
+merge_analysis_results([],_,Acc) ->
+ Acc.
+
+do_cover_for_node(Node,CoverFunc) ->
+ do_cover_for_node(Node,CoverFunc,true).
+do_cover_for_node(Node,CoverFunc,StickUnstick) ->
+ %% In case a slave node is starting another slave node! I.e. this
+ %% function is executed on a slave node - then the cover function
+ %% must be executed on the master node. This is for instance the
+ %% case in test_server's own tests.
+ MainCoverNode = cover:get_main_node(),
+ Sticky =
+ if StickUnstick -> unstick_all_sticky(MainCoverNode,Node);
+ true -> ok
+ end,
+ rpc:call(MainCoverNode,cover,CoverFunc,[Node]),
+ if StickUnstick -> stick_all_sticky(Node,Sticky);
+ true -> ok
+ end.
+
+unstick_all_sticky(Node) ->
+ unstick_all_sticky(node(),Node).
+unstick_all_sticky(MainCoverNode,Node) ->
+ lists:filter(
+ fun(M) ->
+ case code:is_sticky(M) of
+ true ->
+ rpc:call(Node,code,unstick_mod,[M]),
+ true;
+ false ->
+ false
+ end
+ end,
+ rpc:call(MainCoverNode,cover,modules,[])).
+
+stick_all_sticky(Node,Sticky) ->
+ lists:foreach(
+ fun(M) ->
+ rpc:call(Node,code,stick_mod,[M])
+ end,
+ Sticky).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% run_test_case_apply(Mod,Func,Args,Name,RunInit,TimetrapData) ->
+%% {Time,Value,Loc,Opts,Comment} | {died,Reason,unknown,Comment}
+%%
+%% Time = float() (seconds)
+%% Value = term()
+%% Loc = term()
+%% Comment = string()
+%% Reason = term()
+%%
+%% Spawns off a process (case process) that actually runs the test suite.
+%% The case process will have the job process as group leader, which makes
+%% it possible to capture all it's output from io:format/2, etc.
+%%
+%% The job process then sits down and waits for news from the case process.
+%%
+%% Returns a tuple with the time spent (in seconds) in the test case,
+%% the return value from the test case or an {'EXIT',Reason} if the case
+%% failed, Loc points out where the test case crashed (if it did). Loc
+%% is either the name of the function, or {<Module>,<Line>} of the last
+%% line executed that had a ?line macro. If the test case did execute
+%% erase/0 or similar, it may be empty. Comment is the last comment added
+%% by test_server:comment/1, the reason if test_server:fail has been
+%% called or the comment given by the return value {comment,Comment} from
+%% a test case.
+%%
+%% {died,Reason,unknown,Comment} is returned if the test case was killed
+%% by some other process. Reason is the kill reason provided.
+%%
+%% TimetrapData = {MultiplyTimetrap,ScaleTimetrap}, which indicates a
+%% possible extension of all timetraps. Timetraps will be multiplied by
+%% MultiplyTimetrap. If it is infinity, no timetraps will be started at all.
+%% ScaleTimetrap indicates if test_server should attemp to automatically
+%% compensate timetraps for runtime delays introduced by e.g. tools like
+%% cover.
+
+run_test_case_apply({CaseNum,Mod,Func,Args,Name,
+ RunInit,TimetrapData}) ->
+ purify_format("Test case #~w ~w:~w/1", [CaseNum, Mod, Func]),
+ case os:getenv("TS_RUN_VALGRIND") of
+ false ->
+ ok;
+ _ ->
+ os:putenv("VALGRIND_LOGFILE_INFIX",atom_to_list(Mod)++"."++
+ atom_to_list(Func)++"-")
+ end,
+ ProcBef = erlang:system_info(process_count),
+ Result = run_test_case_apply(Mod, Func, Args, Name, RunInit,
+ TimetrapData),
+ ProcAft = erlang:system_info(process_count),
+ purify_new_leaks(),
+ DetFail = get(test_server_detected_fail),
+ {Result,DetFail,ProcBef,ProcAft}.
+
+-type tc_status() :: 'starting' | 'running' | 'init_per_testcase' |
+ 'end_per_testcase' | {'framework',atom(),atom()} |
+ 'tc'.
+-record(st,
+ {
+ ref :: reference(),
+ pid :: pid(),
+ mf :: {atom(),atom()},
+ last_known_loc :: term(),
+ status :: tc_status() | 'undefined',
+ ret_val :: term(),
+ comment :: list(char()),
+ timeout :: non_neg_integer() | 'infinity',
+ config :: list() | 'undefined',
+ end_conf_pid :: pid() | 'undefined'
+ }).
+
+run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) ->
+ print_timestamp(minor,"Started at "),
+ print(minor, "", [], internal_raw),
+ TCCallback = get(test_server_testcase_callback),
+ LogOpts = get(test_server_logopts),
+ Ref = make_ref(),
+ Pid =
+ spawn_link(
+ run_test_case_eval_fun(Mod, Func, Args, Name, Ref,
+ RunInit, TimetrapData,
+ LogOpts, TCCallback)),
+ put(test_server_detected_fail, []),
+ St = #st{ref=Ref,pid=Pid,mf={Mod,Func},last_known_loc=unknown,
+ status=starting,ret_val=[],comment="",timeout=infinity,
+ config=hd(Args)},
+ run_test_case_msgloop(St).
+
+%% Ugly bug (pre R5A):
+%% If this process (group leader of the test case) terminates before
+%% all messages have been replied back to the io server, the io server
+%% hangs. Fixed by the 20 milli timeout check here, and by using monitor in
+%% io.erl.
+%%
+%% A test case is known to have failed if it returns {'EXIT', _} tuple,
+%% or sends a message {failed, File, Line} to it's group_leader
+%%
+run_test_case_msgloop(#st{ref=Ref,pid=Pid,end_conf_pid=EndConfPid0}=St0) ->
+ receive
+ {set_tc_state=Tag,From,{Status,Config0}} ->
+ Config = case Config0 of
+ unknown -> St0#st.config;
+ _ -> Config0
+ end,
+ St = St0#st{status=Status,config=Config},
+ From ! {self(),Tag,ok},
+ run_test_case_msgloop(St);
+ {abort_current_testcase,_,_}=Abort when St0#st.status =:= starting ->
+ %% we're in init phase, must must postpone this operation
+ %% until test case execution is in progress (or FW:init_tc
+ %% gets killed)
+ self() ! Abort,
+ erlang:yield(),
+ run_test_case_msgloop(St0);
+ {abort_current_testcase,Reason,From} ->
+ Line = case is_process_alive(Pid) of
+ true -> get_loc(Pid);
+ false -> unknown
+ end,
+ Mon = erlang:monitor(process, Pid),
+ exit(Pid,{testcase_aborted,Reason,Line}),
+ erlang:yield(),
+ From ! {self(),abort_current_testcase,ok},
+ St = receive
+ {'DOWN', Mon, process, Pid, _} ->
+ St0
+ after 10000 ->
+ %% Pid is probably trapping exits, hit it harder...
+ exit(Pid, kill),
+ %% here's the only place we know Reason, so we save
+ %% it as a comment, potentially replacing user data
+ Error = lists:flatten(io_lib:format("Aborted: ~p",
+ [Reason])),
+ Error1 = lists:flatten([string:strip(S,left) ||
+ S <- string:tokens(Error,
+ [$\n])]),
+ Comment = if length(Error1) > 63 ->
+ string:substr(Error1,1,60) ++ "...";
+ true ->
+ Error1
+ end,
+ St0#st{comment=Comment}
+ end,
+ run_test_case_msgloop(St);
+ {sync_apply,From,MFA} ->
+ do_sync_apply(false,From,MFA),
+ run_test_case_msgloop(St0);
+ {sync_apply_proxy,Proxy,From,MFA} ->
+ do_sync_apply(Proxy,From,MFA),
+ run_test_case_msgloop(St0);
+ {comment,NewComment0} ->
+ NewComment1 = test_server_ctrl:to_string(NewComment0),
+ NewComment = test_server_sup:framework_call(format_comment,
+ [NewComment1],
+ NewComment1),
+ run_test_case_msgloop(St0#st{comment=NewComment});
+ {read_comment,From} ->
+ From ! {self(),read_comment,St0#st.comment},
+ run_test_case_msgloop(St0);
+ {make_priv_dir,From} ->
+ Config = case St0#st.config of
+ undefined -> [];
+ Config0 -> Config0
+ end,
+ Result =
+ case proplists:get_value(priv_dir, Config) of
+ undefined ->
+ {error,no_priv_dir_in_config};
+ PrivDir ->
+ case file:make_dir(PrivDir) of
+ ok ->
+ ok;
+ {error, eexist} ->
+ ok;
+ MkDirError ->
+ {error,{MkDirError,PrivDir}}
+ end
+ end,
+ From ! {self(),make_priv_dir,Result},
+ run_test_case_msgloop(St0);
+ {'EXIT',Pid,{Ref,Time,Value,Loc,Opts}} ->
+ RetVal = {Time/1000000,Value,Loc,Opts},
+ St = setup_termination(RetVal, St0#st{config=undefined}),
+ run_test_case_msgloop(St);
+ {'EXIT',Pid,Reason} ->
+ %% This exit typically happens when an unknown external process
+ %% has caused a test case process to terminate (e.g. if a linked
+ %% process has crashed).
+ St =
+ case Reason of
+ {What,[Loc0={_M,_F,A,[{file,_}|_]}|_]} when
+ is_integer(A) ->
+ Loc = rewrite_loc_item(Loc0),
+ handle_tc_exit(What, St0#st{last_known_loc=[Loc]});
+ {What,[Details,Loc0={_M,_F,A,[{file,_}|_]}|_]} when
+ is_integer(A) ->
+ Loc = rewrite_loc_item(Loc0),
+ handle_tc_exit({What,Details}, St0#st{last_known_loc=[Loc]});
+ _ ->
+ handle_tc_exit(Reason, St0)
+ end,
+ run_test_case_msgloop(St);
+ {EndConfPid0,{call_end_conf,Data,_Result}} ->
+ #st{mf={Mod,Func},config=CurrConf} = St0,
+ case CurrConf of
+ _ when is_list(CurrConf) ->
+ {_Mod,_Func,TCPid,TCExitReason,Loc} = Data,
+ spawn_fw_call(Mod,Func,CurrConf,TCPid,
+ TCExitReason,Loc,self()),
+ St = St0#st{config=undefined,end_conf_pid=undefined},
+ run_test_case_msgloop(St);
+ _ ->
+ run_test_case_msgloop(St0)
+ end;
+ {_FwCallPid,fw_notify_done,{T,Value,Loc,Opts,AddToComment}} ->
+ %% the framework has been notified, we're finished
+ RetVal = {T,Value,Loc,Opts},
+ Comment0 = St0#st.comment,
+ Comment = case AddToComment of
+ undefined ->
+ Comment0;
+ _ ->
+ if Comment0 =:= "" ->
+ AddToComment;
+ true ->
+ Comment0 ++
+ test_server_ctrl:xhtml("<br>",
+ "<br />") ++
+ AddToComment
+ end
+ end,
+ St = setup_termination(RetVal, St0#st{comment=Comment,
+ config=undefined}),
+ run_test_case_msgloop(St);
+ {'EXIT',_FwCallPid,{fw_notify_done,Func,Error}} ->
+ %% a framework function failed
+ CB = os:getenv("TEST_SERVER_FRAMEWORK"),
+ Loc = case CB of
+ FW when FW =:= false; FW =:= "undefined" ->
+ [{test_server,Func}];
+ _ ->
+ [{list_to_atom(CB),Func}]
+ end,
+ RetVal = {died,{framework_error,Loc,Error},Loc},
+ St = setup_termination(RetVal, St0#st{comment="Framework error",
+ config=undefined}),
+ run_test_case_msgloop(St);
+ {failed,File,Line} ->
+ put(test_server_detected_fail,
+ [{File, Line}| get(test_server_detected_fail)]),
+ run_test_case_msgloop(St0);
+
+ {user_timetrap,Pid,_TrapTime,StartTime,E={user_timetrap_error,_},_} ->
+ case update_user_timetraps(Pid, StartTime) of
+ proceed ->
+ self() ! {abort_current_testcase,E,Pid};
+ ignore ->
+ ok
+ end,
+ run_test_case_msgloop(St0);
+ {user_timetrap,Pid,TrapTime,StartTime,ElapsedTime,Scale} ->
+ %% a user timetrap is triggered, ignore it if new
+ %% timetrap has been started since
+ case update_user_timetraps(Pid, StartTime) of
+ proceed ->
+ TotalTime = if is_integer(TrapTime) ->
+ TrapTime + ElapsedTime;
+ true ->
+ TrapTime
+ end,
+ timetrap(TrapTime, TotalTime, Pid, Scale);
+ ignore ->
+ ok
+ end,
+ run_test_case_msgloop(St0);
+ {timetrap_cancel_one,Handle,_From} ->
+ timetrap_cancel_one(Handle, false),
+ run_test_case_msgloop(St0);
+ {timetrap_cancel_all,TCPid,_From} ->
+ timetrap_cancel_all(TCPid, false),
+ run_test_case_msgloop(St0);
+ {get_timetrap_info,From,TCPid} ->
+ Info = get_timetrap_info(TCPid, false),
+ From ! {self(),get_timetrap_info,Info},
+ run_test_case_msgloop(St0);
+ _Other when not is_tuple(_Other) ->
+ %% ignore anything not generated by test server
+ run_test_case_msgloop(St0);
+ _Other when element(1, _Other) /= 'EXIT',
+ element(1, _Other) /= started,
+ element(1, _Other) /= finished,
+ element(1, _Other) /= print ->
+ %% ignore anything not generated by test server
+ run_test_case_msgloop(St0)
+ after St0#st.timeout ->
+ #st{ret_val=RetVal,comment=Comment} = St0,
+ erlang:append_element(RetVal, Comment)
+ end.
+
+setup_termination(RetVal, #st{pid=Pid}=St) ->
+ timetrap_cancel_all(Pid, false),
+ St#st{ret_val=RetVal,timeout=20}.
+
+set_tc_state(State) ->
+ set_tc_state(State,unknown).
+set_tc_state(State, Config) ->
+ tc_supervisor_req(set_tc_state, {State,Config}).
+
+handle_tc_exit(killed, St) ->
+ %% probably the result of an exit(TestCase,kill) call, which is the
+ %% only way to abort a testcase process that traps exits
+ %% (see abort_current_testcase).
+ #st{config=Config,mf={Mod,Func},pid=Pid} = St,
+ Msg = testcase_aborted_or_killed,
+ spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()),
+ St;
+handle_tc_exit({testcase_aborted,{user_timetrap_error,_}=Msg,_}, St) ->
+ #st{config=Config,mf={Mod,Func},pid=Pid} = St,
+ spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()),
+ St;
+handle_tc_exit(Reason, #st{status={framework,FwMod,FwFunc},
+ config=Config,pid=Pid}=St) ->
+ R = case Reason of
+ {timetrap_timeout,TVal,_} ->
+ {timetrap,TVal};
+ {testcase_aborted=E,AbortReason,_} ->
+ {E,AbortReason};
+ {fw_error,{FwMod,FwFunc,FwError}} ->
+ FwError;
+ Other ->
+ Other
+ end,
+ Error = {framework_error,R},
+ spawn_fw_call(FwMod, FwFunc, Config, Pid, Error, unknown, self()),
+ St;
+handle_tc_exit(Reason, #st{status=tc,config=Config0,mf={Mod,Func},pid=Pid}=St)
+ when is_list(Config0) ->
+ {R,Loc1,F} = case Reason of
+ {timetrap_timeout=E,TVal,Loc0} ->
+ {{E,TVal},Loc0,E};
+ {testcase_aborted=E,AbortReason,Loc0} ->
+ Msg = {E,AbortReason},
+ {Msg,Loc0,Msg};
+ Other ->
+ {{'EXIT',Other},unknown,Other}
+ end,
+ Timeout = end_conf_timeout(Reason, St),
+ Config = [{tc_status,{failed,F}}|Config0],
+ EndConfPid = call_end_conf(Mod, Func, Pid, R, Loc1, Config, Timeout),
+ St#st{end_conf_pid=EndConfPid};
+handle_tc_exit(Reason, #st{config=Config,mf={Mod,Func0},pid=Pid,
+ status=Status}=St) ->
+ {R,Loc1} = case Reason of
+ {timetrap_timeout=E,TVal,Loc0} ->
+ {{E,TVal},Loc0};
+ {testcase_aborted=E,AbortReason,Loc0} ->
+ {{E,AbortReason},Loc0};
+ Other ->
+ {{'EXIT',Other},St#st.last_known_loc}
+ end,
+ Func = case Status of
+ init_per_testcase=F -> {F,Func0};
+ end_per_testcase=F -> {F,Func0};
+ _ -> Func0
+ end,
+ spawn_fw_call(Mod, Func, Config, Pid, R, Loc1, self()),
+ St.
+
+end_conf_timeout({timetrap_timeout,Timeout,_}, _) ->
+ Timeout;
+end_conf_timeout(_, #st{config=Config}) when is_list(Config) ->
+ proplists:get_value(default_timeout, Config, ?DEFAULT_TIMETRAP_SECS*1000);
+end_conf_timeout(_, _) ->
+ ?DEFAULT_TIMETRAP_SECS*1000.
+
+call_end_conf(Mod,Func,TCPid,TCExitReason,Loc,Conf,TVal) ->
+ Starter = self(),
+ Data = {Mod,Func,TCPid,TCExitReason,Loc},
+ case erlang:function_exported(Mod,end_per_testcase,2) of
+ false ->
+ spawn_link(fun() ->
+ Starter ! {self(),{call_end_conf,Data,ok}}
+ end);
+ true ->
+ do_call_end_conf(Starter,Mod,Func,Data,TCExitReason,Conf,TVal)
+ end.
+
+do_call_end_conf(Starter,Mod,Func,Data,TCExitReason,Conf,TVal) ->
+ EndConfProc =
+ fun() ->
+ process_flag(trap_exit,true), % to catch timetraps
+ Supervisor = self(),
+ EndConfApply =
+ fun() ->
+ timetrap(TVal),
+ %% We can't handle fails or skips here
+ %% (neither input nor output). The error can
+ %% be read from Conf though (tc_status).
+ EndConf =
+ case do_init_tc_call(Mod,{end_per_testcase,Func},
+ [Conf],
+ {TCExitReason,[Conf]}) of
+ {_,[EPTCInit]} when is_list(EPTCInit) ->
+ EPTCInit;
+ _ ->
+ Conf
+ end,
+ try apply(Mod,end_per_testcase,[Func,EndConf]) of
+ _ -> ok
+ catch
+ _:Error ->
+ timer:sleep(1),
+ print_end_conf_result(Mod,Func,Conf,
+ "crashed",Error)
+ end,
+ Supervisor ! {self(),end_conf}
+ end,
+ Pid = spawn_link(EndConfApply),
+ receive
+ {Pid,end_conf} ->
+ Starter ! {self(),{call_end_conf,Data,ok}};
+ {'EXIT',Pid,Reason} ->
+ print_end_conf_result(Mod,Func,Conf,"failed",Reason),
+ Starter ! {self(),{call_end_conf,Data,{error,Reason}}};
+ {'EXIT',_OtherPid,Reason} ->
+ %% Probably the parent - not much to do about that
+ exit(Reason)
+ end
+ end,
+ spawn_link(EndConfProc).
+
+print_end_conf_result(Mod,Func,Conf,Cause,Error) ->
+ Str2Print =
+ fun(NoHTML) when NoHTML == stdout; NoHTML == major ->
+ io_lib:format("WARNING! "
+ "~w:end_per_testcase(~w, ~tp)"
+ " ~s!\n\tReason: ~tp\n",
+ [Mod,Func,Conf,Cause,Error]);
+ (minor) ->
+ ErrorStr = test_server_ctrl:escape_chars(Error),
+ io_lib:format("WARNING! "
+ "~w:end_per_testcase(~w, ~tp)"
+ " ~s!\n\tReason: ~ts\n",
+ [Mod,Func,Conf,Cause,ErrorStr])
+ end,
+ group_leader() ! {printout,12,Str2Print}.
+
+
+spawn_fw_call(Mod,IPTC={init_per_testcase,Func},CurrConf,Pid,
+ Why,Loc,SendTo) ->
+ FwCall =
+ fun() ->
+ Skip = {skip,{failed,{Mod,init_per_testcase,Why}}},
+ %% if init_per_testcase fails, the test case
+ %% should be skipped
+ try begin do_end_tc_call(Mod,IPTC, {Pid,Skip,[CurrConf]}, Why),
+ do_init_tc_call(Mod,{end_per_testcase,Func},
+ [CurrConf],{ok,[CurrConf]}),
+ do_end_tc_call(Mod,{end_per_testcase,Func},
+ {Pid,Skip,[CurrConf]}, Why) end of
+ _ -> ok
+ catch
+ _:FwEndTCErr ->
+ exit({fw_notify_done,end_tc,FwEndTCErr})
+ end,
+ Time = case Why of
+ {timetrap_timeout,TVal} -> TVal/1000;
+ _ -> died
+ end,
+ group_leader() ! {printout,12,
+ "ERROR! ~w:init_per_testcase(~w, ~p)"
+ " failed!\n\tReason: ~tp\n",
+ [Mod,Func,CurrConf,Why]},
+ %% finished, report back
+ SendTo ! {self(),fw_notify_done,{Time,Skip,Loc,[],undefined}}
+ end,
+ spawn_link(FwCall);
+
+spawn_fw_call(Mod,EPTC={end_per_testcase,Func},EndConf,Pid,
+ Why,_Loc,SendTo) ->
+ FwCall =
+ fun() ->
+ {RetVal,Report} =
+ case proplists:get_value(tc_status, EndConf) of
+ undefined ->
+ E = {failed,{Mod,end_per_testcase,Why}},
+ {E,E};
+ E = {failed,Reason} ->
+ {E,{error,Reason}};
+ Result ->
+ E = {failed,{Mod,end_per_testcase,Why}},
+ {Result,E}
+ end,
+ {Time,Warn} =
+ case Why of
+ {timetrap_timeout,TVal} ->
+ group_leader() !
+ {printout,12,
+ "WARNING! ~w:end_per_testcase(~w, ~p)"
+ " failed!\n\tReason: timetrap timeout"
+ " after ~w ms!\n", [Mod,Func,EndConf,TVal]},
+ W = "<font color=\"red\">"
+ "WARNING: end_per_testcase timed out!</font>",
+ {TVal/1000,W};
+ _ ->
+ group_leader() !
+ {printout,12,
+ "WARNING! ~w:end_per_testcase(~w, ~p)"
+ " failed!\n\tReason: ~tp\n",
+ [Mod,Func,EndConf,Why]},
+ W = "<font color=\"red\">"
+ "WARNING: end_per_testcase failed!</font>",
+ {died,W}
+ end,
+ try do_end_tc_call(Mod,EPTC,{Pid,Report,[EndConf]}, Why) of
+ _ -> ok
+ catch
+ _:FwEndTCErr ->
+ exit({fw_notify_done,end_tc,FwEndTCErr})
+ end,
+ FailLoc = proplists:get_value(tc_fail_loc, EndConf),
+ %% finished, report back (if end_per_testcase fails, a warning
+ %% should be printed as part of the comment)
+ SendTo ! {self(),fw_notify_done,
+ {Time,RetVal,FailLoc,[],Warn}}
+ end,
+ spawn_link(FwCall);
+
+spawn_fw_call(FwMod,FwFunc,_,_Pid,{framework_error,FwError},_,SendTo) ->
+ FwCall =
+ fun() ->
+ test_server_sup:framework_call(report, [framework_error,
+ {{FwMod,FwFunc},
+ FwError}]),
+ Comment =
+ lists:flatten(
+ io_lib:format("<font color=\"red\">"
+ "WARNING! ~w:~w failed!</font>",
+ [FwMod,FwFunc])),
+ %% finished, report back
+ SendTo ! {self(),fw_notify_done,
+ {died,{error,{FwMod,FwFunc,FwError}},
+ {FwMod,FwFunc},[],Comment}}
+ end,
+ spawn_link(FwCall);
+
+spawn_fw_call(Mod,Func,CurrConf,Pid,Error,Loc,SendTo) ->
+ {Func1,EndTCFunc} = case Func of
+ CF when CF == init_per_suite; CF == end_per_suite;
+ CF == init_per_group; CF == end_per_group ->
+ {CF,CF};
+ TC -> {TC,{end_per_testcase,TC}}
+ end,
+ FwCall =
+ fun() ->
+ try fw_error_notify(Mod,Func1,[],
+ Error,Loc) of
+ _ -> ok
+ catch
+ _:FwErrorNotifyErr ->
+ exit({fw_notify_done,error_notification,
+ FwErrorNotifyErr})
+ end,
+ Conf = [{tc_status,{failed,Error}}|CurrConf],
+ try do_end_tc_call(Mod,EndTCFunc,{Pid,Error,[Conf]},Error) of
+ _ -> ok
+ catch
+ _:FwEndTCErr ->
+ exit({fw_notify_done,end_tc,FwEndTCErr})
+ end,
+ %% finished, report back
+ SendTo ! {self(),fw_notify_done,{died,Error,Loc,[],undefined}}
+ end,
+ spawn_link(FwCall).
+
+%% The job proxy process forwards messages between the test case
+%% process on a shielded node (and its descendants) and the job process.
+%%
+%% The job proxy process have to be started by the test-case process
+%% on the shielded node!
+start_job_proxy() ->
+ group_leader(spawn(fun () -> job_proxy_msgloop() end), self()), ok.
+
+%% The io_reply_proxy is not the most satisfying solution but it works...
+io_reply_proxy(ReplyTo) ->
+ receive
+ IoReply when is_tuple(IoReply),
+ element(1, IoReply) == io_reply ->
+ ReplyTo ! IoReply;
+ _ ->
+ io_reply_proxy(ReplyTo)
+ end.
+
+job_proxy_msgloop() ->
+ receive
+
+ %%
+ %% Messages that need intervention by proxy...
+ %%
+
+ %% io stuff ...
+ IoReq when tuple_size(IoReq) >= 2,
+ element(1, IoReq) == io_request ->
+
+ ReplyProxy = spawn(fun () -> io_reply_proxy(element(2, IoReq)) end),
+ group_leader() ! setelement(2, IoReq, ReplyProxy);
+
+ %% test_server stuff...
+ {sync_apply, From, MFA} ->
+ group_leader() ! {sync_apply_proxy, self(), From, MFA};
+ {sync_result_proxy, To, Result} ->
+ To ! {sync_result, Result};
+
+ %%
+ %% Messages that need no intervention by proxy...
+ %%
+ Msg ->
+ group_leader() ! Msg
+ end,
+ job_proxy_msgloop().
+
+-spec run_test_case_eval_fun(_, _, _, _, _, _, _, _, _) ->
+ fun(() -> no_return()).
+run_test_case_eval_fun(Mod, Func, Args, Name, Ref, RunInit,
+ TimetrapData, LogOpts, TCCallback) ->
+ fun () ->
+ run_test_case_eval(Mod, Func, Args, Name, Ref,
+ RunInit, TimetrapData,
+ LogOpts, TCCallback)
+ end.
+
+%% A test case is known to have failed if it returns {'EXIT', _} tuple,
+%% or sends a message {failed, File, Line} to it's group_leader
+
+run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit,
+ TimetrapData, LogOpts, TCCallback) ->
+ put(test_server_multiply_timetraps, TimetrapData),
+ put(test_server_logopts, LogOpts),
+ Where = [{Mod,Func}],
+ put(test_server_loc, Where),
+
+ FWInitFunc = case RunInit of
+ run_init -> {init_per_testcase,Func};
+ _ -> Func
+ end,
+
+ FWInitResult0 = do_init_tc_call(Mod,FWInitFunc,Args0,{ok,Args0}),
+
+ set_tc_state(running),
+ {{Time,Value},Loc,Opts} =
+ case FWInitResult0 of
+ {ok,Args} ->
+ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback);
+ Error = {error,_Reason} ->
+ NewResult = do_end_tc_call(Mod,FWInitFunc, {Error,Args0},
+ {auto_skip,{failed,Error}}),
+ {{0,NewResult},Where,[]};
+ {fail,Reason} ->
+ Conf = [{tc_status,{failed,Reason}} | hd(Args0)],
+ fw_error_notify(Mod, Func, Conf, Reason),
+ NewResult = do_end_tc_call(Mod,FWInitFunc,
+ {{error,Reason},[Conf]},
+ {fail,Reason}),
+ {{0,NewResult},Where,[]};
+ Skip = {SkipType,_Reason} when SkipType == skip;
+ SkipType == skipped ->
+ NewResult = do_end_tc_call(Mod,FWInitFunc,
+ {Skip,Args0}, Skip),
+ {{0,NewResult},Where,[]};
+ AutoSkip = {auto_skip,_Reason} ->
+ %% special case where a conf case "pretends" to be skipped
+ NewResult =
+ do_end_tc_call(Mod,FWInitFunc, {AutoSkip,Args0}, AutoSkip),
+ {{0,NewResult},Where,[]}
+ end,
+ exit({Ref,Time,Value,Loc,Opts}).
+
+run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
+ case RunInit of
+ run_init ->
+ set_tc_state(init_per_testcase, hd(Args)),
+ ensure_timetrap(Args),
+ case init_per_testcase(Mod, Func, Args) of
+ Skip = {SkipType,Reason} when SkipType == skip;
+ SkipType == skipped ->
+ Line = get_loc(),
+ Conf = [{tc_status,{skipped,Reason}}|hd(Args)],
+ NewRes = do_end_tc_call(Mod,{init_per_testcase,Func},
+ {Skip,[Conf]}, Skip),
+ {{0,NewRes},Line,[]};
+ {skip_and_save,Reason,SaveCfg} ->
+ Line = get_loc(),
+ Conf = [{tc_status,{skipped,Reason}},
+ {save_config,SaveCfg}|hd(Args)],
+ NewRes = do_end_tc_call(Mod,{init_per_testcase,Func},
+ {{skip,Reason},[Conf]},
+ {skip,Reason}),
+ {{0,NewRes},Line,[]};
+ FailTC = {fail,Reason} -> % user fails the testcase
+ EndConf = [{tc_status,{failed,Reason}} | hd(Args)],
+ fw_error_notify(Mod, Func, EndConf, Reason),
+ NewRes = do_end_tc_call(Mod,{init_per_testcase,Func},
+ {{error,Reason},[EndConf]},
+ FailTC),
+ {{0,NewRes},[{Mod,Func}],[]};
+ {ok,NewConf} ->
+ IPTCEndRes = do_end_tc_call(Mod,{init_per_testcase,Func},
+ {ok,[NewConf]}, NewConf),
+ {{T,Return},Loc,NewConf1} =
+ if not is_list(IPTCEndRes) ->
+ %% received skip or fail, not config
+ {{0,IPTCEndRes},undefined,NewConf};
+ true ->
+ %% call user callback function if defined
+ NewConfUC =
+ user_callback(TCCallback, Mod, Func,
+ init, IPTCEndRes),
+ %% save current state in controller loop
+ set_tc_state(tc, NewConfUC),
+ %% execute the test case
+ {ts_tc(Mod,Func,[NewConfUC]),get_loc(),NewConfUC}
+ end,
+ {EndConf,TSReturn,FWReturn} =
+ case Return of
+ {E,TCError} when E=='EXIT' ; E==failed ->
+ fw_error_notify(Mod, Func, NewConf1,
+ TCError, Loc),
+ {[{tc_status,{failed,TCError}},
+ {tc_fail_loc,Loc}|NewConf1],
+ Return,{error,TCError}};
+ SaveCfg={save_config,_} ->
+ {[{tc_status,ok},SaveCfg|NewConf1],Return,ok};
+ {skip_and_save,Why,SaveCfg} ->
+ Skip = {skip,Why},
+ {[{tc_status,{skipped,Why}},
+ {save_config,SaveCfg}|NewConf1],
+ Skip,Skip};
+ {SkipType,Why} when SkipType == skip;
+ SkipType == skipped ->
+ {[{tc_status,{skipped,Why}}|NewConf1],Return,
+ Return};
+ _ ->
+ {[{tc_status,ok}|NewConf1],Return,ok}
+ end,
+ %% call user callback function if defined
+ EndConf1 =
+ user_callback(TCCallback, Mod, Func, 'end', EndConf),
+
+ %% We can't handle fails or skips here
+ EndConf2 =
+ case do_init_tc_call(Mod,{end_per_testcase,Func},
+ [EndConf1],{ok,[EndConf1]}) of
+ {ok,[EPTCInitRes]} when is_list(EPTCInitRes) ->
+ EPTCInitRes;
+ _ ->
+ EndConf1
+ end,
+
+ %% update current state in controller loop
+ {FWReturn1,TSReturn1,EndConf3} =
+ case end_per_testcase(Mod, Func, EndConf2) of
+ SaveCfg1={save_config,_} ->
+ {FWReturn,TSReturn,
+ [SaveCfg1|lists:keydelete(save_config,1,
+ EndConf2)]};
+ {fail,ReasonToFail} ->
+ %% user has failed the testcase
+ fw_error_notify(Mod, Func, EndConf2,
+ ReasonToFail),
+ {{error,ReasonToFail},
+ {failed,ReasonToFail},
+ EndConf2};
+ {failed,{_,end_per_testcase,_}} = Failure when
+ FWReturn == ok ->
+ %% unexpected termination in end_per_testcase
+ %% report this as the result to the framework
+ {Failure,TSReturn,EndConf2};
+ _ ->
+ %% test case result should be reported to
+ %% framework no matter the status of
+ %% end_per_testcase
+ {FWReturn,TSReturn,EndConf2}
+ end,
+ %% clear current state in controller loop
+ case do_end_tc_call(Mod,{end_per_testcase,Func},
+ {FWReturn1,[EndConf3]}, TSReturn1) of
+ {failed,Reason} = NewReturn ->
+ fw_error_notify(Mod,Func,EndConf3, Reason),
+ {{T,NewReturn},[{Mod,Func}],[]};
+ NewReturn ->
+ {{T,NewReturn},Loc,[]}
+ end
+ end;
+ skip_init ->
+ set_tc_state(running, hd(Args)),
+ %% call user callback function if defined
+ Args1 = user_callback(TCCallback, Mod, Func, init, Args),
+ ensure_timetrap(Args1),
+ %% ts_tc does a catch
+ %% if this is a named conf group, the test case (init or end conf)
+ %% should be called with the name as the first argument
+ Args2 = if Name == undefined -> Args1;
+ true -> [Name | Args1]
+ end,
+ %% execute the conf test case
+ {{T,Return},Loc} = {ts_tc(Mod, Func, Args2),get_loc()},
+ %% call user callback function if defined
+ Return1 = user_callback(TCCallback, Mod, Func, 'end', Return),
+ {Return2,Opts} = process_return_val([Return1], Mod, Func,
+ Args1, [{Mod,Func}], Return1),
+ {{T,Return2},Loc,Opts}
+ end.
+
+do_init_tc_call(Mod, Func, Res, Return) ->
+ test_server_sup:framework_call(init_tc,[Mod,Func,Res],Return).
+
+do_end_tc_call(Mod, IPTC={init_per_testcase,Func}, Res, Return) ->
+ case Return of
+ {NOk,_} when NOk == auto_skip; NOk == fail;
+ NOk == skip ; NOk == skipped ->
+ {_,Args} = Res,
+ IPTCEndRes =
+ case do_end_tc_call1(Mod, IPTC, Res, Return) of
+ IPTCEndConfig when is_list(IPTCEndConfig) ->
+ IPTCEndConfig;
+ _ ->
+ Args
+ end,
+ EPTCInitRes =
+ case do_init_tc_call(Mod,{end_per_testcase,Func},
+ IPTCEndRes,Return) of
+ {ok,EPTCInitConfig} when is_list(EPTCInitConfig) ->
+ {Return,EPTCInitConfig};
+ _ ->
+ Return
+ end,
+ do_end_tc_call1(Mod, {end_per_testcase,Func},
+ EPTCInitRes, Return);
+ _Ok ->
+ do_end_tc_call1(Mod, IPTC, Res, Return)
+ end;
+do_end_tc_call(Mod, Func, Res, Return) ->
+ do_end_tc_call1(Mod, Func, Res, Return).
+
+do_end_tc_call1(Mod, Func, Res, Return) ->
+ FwMod = os:getenv("TEST_SERVER_FRAMEWORK"),
+ Ref = make_ref(),
+ if FwMod == "ct_framework" ; FwMod == "undefined"; FwMod == false ->
+ case test_server_sup:framework_call(
+ end_tc, [Mod,Func,Res, Return], ok) of
+ {fail,FWReason} ->
+ {failed,FWReason};
+ ok ->
+ case Return of
+ {fail,Reason} ->
+ {failed,Reason};
+ Return ->
+ Return
+ end;
+ NewReturn ->
+ NewReturn
+ end;
+ true ->
+ case test_server_sup:framework_call(FwMod, end_tc,
+ [Mod,Func,Res], Ref) of
+ {fail,FWReason} ->
+ {failed,FWReason};
+ _Else ->
+ Return
+ end
+ end.
+
+%% the return value is a list and we have to check if it contains
+%% the result of an end conf case or if it's a Config list
+process_return_val([Return], M,F,A, Loc, Final) when is_list(Return) ->
+ ReturnTags = [skip,skip_and_save,save_config,comment,return_group_result],
+ %% check if all elements in the list are valid end conf return value tuples
+ case lists:all(fun(Val) when is_tuple(Val) ->
+ lists:any(fun(T) -> T == element(1, Val) end,
+ ReturnTags);
+ (ok) ->
+ true;
+ (_) ->
+ false
+ end, Return) of
+ true -> % must be return value from end conf case
+ process_return_val1(Return, M,F,A, Loc, Final, []);
+ false -> % must be Config value from init conf case
+ case do_end_tc_call(M, F, {ok,A}, Return) of
+ {failed, FWReason} = Failed ->
+ fw_error_notify(M,F,A, FWReason),
+ {Failed, []};
+ NewReturn ->
+ {NewReturn, []}
+ end
+ end;
+%% the return value is not a list, so it's the return value from an
+%% end conf case or it's a dummy value that can be ignored
+process_return_val(Return, M,F,A, Loc, Final) ->
+ process_return_val1(Return, M,F,A, Loc, Final, []).
+
+process_return_val1([Failed={E,TCError}|_], M,F,A=[Args], Loc, _, SaveOpts)
+ when E=='EXIT';
+ E==failed ->
+ fw_error_notify(M,F,A, TCError, Loc),
+ case do_end_tc_call(M,F, {{error,TCError},
+ [[{tc_status,{failed,TCError}}|Args]]},
+ Failed) of
+ {failed,FWReason} ->
+ {{failed,FWReason},SaveOpts};
+ NewReturn ->
+ {NewReturn,SaveOpts}
+ end;
+process_return_val1([SaveCfg={save_config,_}|Opts], M,F,[Args],
+ Loc, Final, SaveOpts) ->
+ process_return_val1(Opts, M,F,[[SaveCfg|Args]], Loc, Final, SaveOpts);
+process_return_val1([{skip_and_save,Why,SaveCfg}|Opts], M,F,[Args],
+ Loc, _, SaveOpts) ->
+ process_return_val1(Opts, M,F,[[{save_config,SaveCfg}|Args]],
+ Loc, {skip,Why}, SaveOpts);
+process_return_val1([GR={return_group_result,_}|Opts], M,F,A,
+ Loc, Final, SaveOpts) ->
+ process_return_val1(Opts, M,F,A, Loc, Final, [GR|SaveOpts]);
+process_return_val1([RetVal={Tag,_}|Opts], M,F,A,
+ Loc, _, SaveOpts) when Tag==skip;
+ Tag==comment ->
+ process_return_val1(Opts, M,F,A, Loc, RetVal, SaveOpts);
+process_return_val1([_|Opts], M,F,A, Loc, Final, SaveOpts) ->
+ process_return_val1(Opts, M,F,A, Loc, Final, SaveOpts);
+process_return_val1([], M,F,A, _Loc, Final, SaveOpts) ->
+ case do_end_tc_call(M,F, {Final,A}, Final) of
+ {failed,FWReason} ->
+ {{failed,FWReason},SaveOpts};
+ NewReturn ->
+ {NewReturn,lists:reverse(SaveOpts)}
+ end.
+
+user_callback(undefined, _, _, _, Args) ->
+ Args;
+user_callback({CBMod,CBFunc}, Mod, Func, InitOrEnd,
+ [Args]) when is_list(Args) ->
+ case catch apply(CBMod, CBFunc, [InitOrEnd,Mod,Func,Args]) of
+ Args1 when is_list(Args1) ->
+ [Args1];
+ _ ->
+ [Args]
+ end;
+user_callback({CBMod,CBFunc}, Mod, Func, InitOrEnd, Args) ->
+ case catch apply(CBMod, CBFunc, [InitOrEnd,Mod,Func,Args]) of
+ Args1 when is_list(Args1) ->
+ Args1;
+ _ ->
+ Args
+ end.
+
+init_per_testcase(Mod, Func, Args) ->
+ case code:is_loaded(Mod) of
+ false -> code:load_file(Mod);
+ _ -> ok
+ end,
+ case erlang:function_exported(Mod, init_per_testcase, 2) of
+ true ->
+ do_init_per_testcase(Mod, [Func|Args]);
+ false ->
+ %% Optional init_per_testcase is not defined -- keep quiet.
+ [Config] = Args,
+ {ok, Config}
+ end.
+
+do_init_per_testcase(Mod, Args) ->
+ try apply(Mod, init_per_testcase, Args) of
+ {Skip,Reason} when Skip =:= skip; Skip =:= skipped ->
+ {skip,Reason};
+ {skip_and_save,_,_}=Res ->
+ Res;
+ NewConf when is_list(NewConf) ->
+ case lists:filter(fun(T) when is_tuple(T) -> false;
+ (_) -> true end, NewConf) of
+ [] ->
+ {ok,NewConf};
+ Bad ->
+ group_leader() ! {printout,12,
+ "ERROR! init_per_testcase has returned "
+ "bad elements in Config: ~tp\n",[Bad]},
+ {skip,{failed,{Mod,init_per_testcase,bad_return}}}
+ end;
+ {fail,_Reason}=Res ->
+ Res;
+ _Other ->
+ group_leader() ! {printout,12,
+ "ERROR! init_per_testcase did not return "
+ "a Config list.\n",[]},
+ {skip,{failed,{Mod,init_per_testcase,bad_return}}}
+ catch
+ throw:{Skip,Reason} when Skip =:= skip; Skip =:= skipped ->
+ {skip,Reason};
+ exit:{Skip,Reason} when Skip =:= skip; Skip =:= skipped ->
+ {skip,Reason};
+ throw:Other ->
+ set_loc(erlang:get_stacktrace()),
+ Line = get_loc(),
+ print_init_conf_result(Line,"thrown",Other),
+ {skip,{failed,{Mod,init_per_testcase,Other}}};
+ _:Reason0 ->
+ Stk = erlang:get_stacktrace(),
+ Reason = {Reason0,Stk},
+ set_loc(Stk),
+ Line = get_loc(),
+ print_init_conf_result(Line,"crashed",Reason),
+ {skip,{failed,{Mod,init_per_testcase,Reason}}}
+ end.
+
+print_init_conf_result(Line,Cause,Reason) ->
+ FormattedLoc = test_server_sup:format_loc(Line),
+ Str2Print =
+ fun(NoHTML) when NoHTML == stdout; NoHTML == major ->
+ io_lib:format("ERROR! init_per_testcase ~s!\n"
+ "\tLocation: ~p\n\tReason: ~tp\n",
+ [Cause,Line,Reason]);
+ (minor) ->
+ ReasonStr = test_server_ctrl:escape_chars(Reason),
+ io_lib:format("ERROR! init_per_testcase ~s!\n"
+ "\tLocation: ~ts\n\tReason: ~ts\n",
+ [Cause,FormattedLoc,ReasonStr])
+ end,
+ group_leader() ! {printout,12,Str2Print}.
+
+
+end_per_testcase(Mod, Func, Conf) ->
+ case erlang:function_exported(Mod,end_per_testcase,2) of
+ true ->
+ do_end_per_testcase(Mod,end_per_testcase,Func,Conf);
+ false ->
+ %% Backwards compatibility!
+ case erlang:function_exported(Mod,fin_per_testcase,2) of
+ true ->
+ do_end_per_testcase(Mod,fin_per_testcase,Func,Conf);
+ false ->
+ ok
+ end
+ end.
+
+do_end_per_testcase(Mod,EndFunc,Func,Conf) ->
+ set_tc_state(end_per_testcase, Conf),
+ try Mod:EndFunc(Func, Conf) of
+ {save_config,_}=SaveCfg ->
+ SaveCfg;
+ {fail,_}=Fail ->
+ Fail;
+ _ ->
+ ok
+ catch
+ throw:Other ->
+ Comment0 = case read_comment() of
+ "" -> "";
+ Cmt -> Cmt ++ test_server_ctrl:xhtml("<br>",
+ "<br />")
+ end,
+ set_loc(erlang:get_stacktrace()),
+ comment(io_lib:format("~ts<font color=\"red\">"
+ "WARNING: ~w thrown!"
+ "</font>\n",[Comment0,EndFunc])),
+ print_end_tc_warning(EndFunc,Other,"thrown",get_loc()),
+ {failed,{Mod,end_per_testcase,Other}};
+ Class:Reason ->
+ Stk = erlang:get_stacktrace(),
+ set_loc(Stk),
+ Why = case Class of
+ exit -> {'EXIT',Reason};
+ error -> {'EXIT',{Reason,Stk}}
+ end,
+ Comment0 = case read_comment() of
+ "" -> "";
+ Cmt -> Cmt ++ test_server_ctrl:xhtml("<br>",
+ "<br />")
+ end,
+ comment(io_lib:format("~ts<font color=\"red\">"
+ "WARNING: ~w crashed!"
+ "</font>\n",[Comment0,EndFunc])),
+ print_end_tc_warning(EndFunc,Reason,"crashed",get_loc()),
+ {failed,{Mod,end_per_testcase,Why}}
+ end.
+
+print_end_tc_warning(EndFunc,Reason,Cause,Loc) ->
+ FormattedLoc = test_server_sup:format_loc(Loc),
+ Str2Print =
+ fun(NoHTML) when NoHTML == stdout; NoHTML == major ->
+ io_lib:format("WARNING: ~w ~s!\n"
+ "Reason: ~tp\nLine: ~p\n",
+ [EndFunc,Cause,Reason,Loc]);
+ (minor) ->
+ ReasonStr = test_server_ctrl:escape_chars(Reason),
+ io_lib:format("WARNING: ~w ~s!\n"
+ "Reason: ~ts\nLine: ~ts\n",
+ [EndFunc,Cause,ReasonStr,FormattedLoc])
+ end,
+ group_leader() ! {printout,12,Str2Print}.
+
+get_loc() ->
+ get(test_server_loc).
+
+get_loc(Pid) ->
+ [{current_stacktrace,Stk0},{dictionary,Dict}] =
+ process_info(Pid, [current_stacktrace,dictionary]),
+ lists:foreach(fun({Key,Val}) -> put(Key, Val) end, Dict),
+ Stk = [rewrite_loc_item(Loc) || Loc <- Stk0],
+ case get(test_server_loc) of
+ [{Suite,Case}] ->
+ %% Location info unknown, check if {Suite,Case,Line}
+ %% is available in stacktrace and if so, use stacktrace
+ %% instead of current test_server_loc.
+ %% If location is the last expression in a test case
+ %% function, the info is not available due to tail call
+ %% elimination. We need to check if the test case has been
+ %% called by ts_tc/3 and, if so, insert the test case info
+ %% at that position.
+ case [match || {S,C,_L} <- Stk, S == Suite, C == Case] of
+ [match|_] ->
+ put(test_server_loc, Stk);
+ _ ->
+ {PreTC,PostTC} =
+ lists:splitwith(fun({test_server,ts_tc,_}) ->
+ false;
+ (_) ->
+ true
+ end, Stk),
+ if PostTC == [] ->
+ ok;
+ true ->
+ put(test_server_loc,
+ PreTC++[{Suite,Case,last_expr} | PostTC])
+ end
+ end;
+ _ ->
+ put(test_server_loc, Stk)
+ end,
+ get_loc().
+
+fw_error_notify(Mod, Func, Args, Error) ->
+ test_server_sup:framework_call(error_notification,
+ [Mod,Func,[Args],
+ {Error,unknown}]).
+fw_error_notify(Mod, Func, Args, Error, Loc) ->
+ test_server_sup:framework_call(error_notification,
+ [Mod,Func,[Args],
+ {Error,Loc}]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% print(Detail,Format,Args,Printer) -> ok
+%% Detail = integer()
+%% Format = string()
+%% Args = [term()]
+%%
+%% Just like io:format, except that depending on the Detail value, the output
+%% is directed to console, major and/or minor log files.
+
+%% print(Detail,Format,Args) ->
+%% test_server_ctrl:print(Detail, Format, Args).
+
+print(Detail,Format,Args,Printer) ->
+ test_server_ctrl:print(Detail, Format, Args, Printer).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% print_timsteamp(Detail,Leader) -> ok
+%%
+%% Prints Leader followed by a time stamp (date and time). Depending on
+%% the Detail value, the output is directed to console, major and/or minor
+%% log files.
+
+print_timestamp(Detail,Leader) ->
+ test_server_ctrl:print_timestamp(Detail, Leader).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% lookup_config(Key,Config) -> {value,{Key,Value}} | undefined
+%% Key = term()
+%% Value = term()
+%% Config = [{Key,Value},...]
+%%
+%% Looks up a specific key in the config list, and returns the value
+%% of the associated key, or undefined if the key doesn't exist.
+
+lookup_config(Key,Config) ->
+ case lists:keysearch(Key,1,Config) of
+ {value,{Key,Val}} ->
+ Val;
+ _ ->
+ io:format("Could not find element ~p in Config.~n",[Key]),
+ undefined
+ end.
+
+%%
+%% IMPORTANT: get_loc/1 uses the name of this function when analysing
+%% stack traces. If the name changes, get_loc/1 must be updated!
+%%
+ts_tc(M, F, A) ->
+ Before = erlang:monotonic_time(),
+ Result = try
+ apply(M, F, A)
+ catch
+ throw:{skip, Reason} -> {skip, Reason};
+ throw:{skipped, Reason} -> {skip, Reason};
+ exit:{skip, Reason} -> {skip, Reason};
+ exit:{skipped, Reason} -> {skip, Reason};
+ Type:Reason ->
+ Stk = erlang:get_stacktrace(),
+ set_loc(Stk),
+ case Type of
+ throw ->
+ {failed,{thrown,Reason}};
+ error ->
+ {'EXIT',{Reason,Stk}};
+ exit ->
+ {'EXIT',Reason}
+ end
+ end,
+ After = erlang:monotonic_time(),
+ Elapsed = erlang:convert_time_unit(After-Before, native, micro_seconds),
+ {Elapsed, Result}.
+
+set_loc(Stk) ->
+ Loc = case [rewrite_loc_item(I) || {_,_,_,_}=I <- Stk] of
+ [{M,F,0}|Stack] ->
+ [{M,F}|Stack];
+ Other ->
+ Other
+ end,
+ put(test_server_loc, Loc).
+
+rewrite_loc_item({M,F,_,Loc}) ->
+ {M,F,proplists:get_value(line, Loc, 0)}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% TEST SUITE SUPPORT FUNCTIONS %%
+%% %%
+%% Note: Some of these functions have been moved to test_server_sup %%
+%% in an attempt to keep this modules small (yeah, right!) %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% format(Format) -> IoLibReturn
+%% format(Detail,Format) -> IoLibReturn
+%% format(Format,Args) -> IoLibReturn
+%% format(Detail,Format,Args) -> IoLibReturn
+%% Detail = integer()
+%% Format = string()
+%% Args = [term(),...]
+%% IoLibReturn = term()
+%%
+%% Logs the Format string and Args, similar to io:format/1/2 etc. If
+%% Detail is not specified, the default detail level (which is 50) is used.
+%% Which log files the string will be logged in depends on the thresholds
+%% set with set_levels/3. Typically with default detail level, only the
+%% minor log file is used.
+format(Format) ->
+ format(minor, Format, []).
+
+format(major, Format) ->
+ format(major, Format, []);
+format(minor, Format) ->
+ format(minor, Format, []);
+format(Detail, Format) when is_integer(Detail) ->
+ format(Detail, Format, []);
+format(Format, Args) ->
+ format(minor, Format, Args).
+
+format(Detail, Format, Args) ->
+ Str =
+ case catch io_lib:format(Format,Args) of
+ {'EXIT',_} ->
+ io_lib:format("illegal format; ~p with args ~p.\n",
+ [Format,Args]);
+ Valid -> Valid
+ end,
+ log({Detail, Str}).
+
+log(Msg) ->
+ group_leader() ! {structured_io, self(), Msg},
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% capture_start() -> ok
+%% capture_stop() -> ok
+%%
+%% Starts/stops capturing all output from io:format, and similar. Capturing
+%% output doesn't stop output from happening. It just makes it possible
+%% to retrieve the output using capture_get/0.
+%% Starting and stopping capture doesn't affect already captured output.
+%% All output is stored as messages in the message queue until retrieved
+
+capture_start() ->
+ group_leader() ! {capture,self()},
+ ok.
+
+capture_stop() ->
+ group_leader() ! {capture,false},
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% capture_get() -> Output
+%% Output = [string(),...]
+%%
+%% Retrieves all the captured output since last call to capture_get/0.
+%% Note that since output arrive as messages to the process, it takes
+%% a short while from the call to io:format until all output is available
+%% by capture_get/0. It is not necessary to call capture_stop/0 before
+%% retreiving the output.
+capture_get() ->
+ test_server_sup:capture_get([]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% messages_get() -> Messages
+%% Messages = [term(),...]
+%%
+%% Returns all messages in the message queue.
+messages_get() ->
+ test_server_sup:messages_get([]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% permit_io(GroupLeader, FromPid) -> ok
+%%
+%% Make sure proceeding IO from FromPid won't get rejected
+permit_io(GroupLeader, FromPid) ->
+ GroupLeader ! {permit_io,FromPid}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% sleep(Time) -> ok
+%% Time = integer() | float() | infinity
+%%
+%% Sleeps the specified number of milliseconds. This sleep also accepts
+%% floating point numbers (which are truncated) and the atom 'infinity'.
+sleep(infinity) ->
+ receive
+ after infinity ->
+ ok
+ end;
+sleep(MSecs) ->
+ receive
+ after trunc(MSecs) ->
+ ok
+ end,
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% adjusted_sleep(Time) -> ok
+%% Time = integer() | float() | infinity
+%%
+%% Sleeps the specified number of milliseconds, multiplied by the
+%% 'multiply_timetraps' value (if set) and possibly also automatically scaled
+%% up if 'scale_timetraps' is set to true (which is default).
+%% This function also accepts floating point numbers (which are truncated) and
+%% the atom 'infinity'.
+adjusted_sleep(infinity) ->
+ receive
+ after infinity ->
+ ok
+ end;
+adjusted_sleep(MSecs) ->
+ {Multiplier,ScaleFactor} =
+ case test_server_ctrl:get_timetrap_parameters() of
+ {undefined,undefined} ->
+ {1,1};
+ {undefined,false} ->
+ {1,1};
+ {undefined,true} ->
+ {1,timetrap_scale_factor()};
+ {infinity,_} ->
+ {infinity,1};
+ {Mult,undefined} ->
+ {Mult,1};
+ {Mult,false} ->
+ {Mult,1};
+ {Mult,true} ->
+ {Mult,timetrap_scale_factor()}
+ end,
+ receive
+ after trunc(MSecs*Multiplier*ScaleFactor) ->
+ ok
+ end,
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% fail(Reason) -> exit({suite_failed,Reason})
+%%
+%% Immediately calls exit. Included because test suites are easier
+%% to read when using this function, rather than exit directly.
+fail(Reason) ->
+ comment(cast_to_list(Reason)),
+ try
+ exit({suite_failed,Reason})
+ catch
+ Class:R ->
+ case erlang:get_stacktrace() of
+ [{?MODULE,fail,1,_}|Stk] -> ok;
+ Stk -> ok
+ end,
+ erlang:raise(Class, R, Stk)
+ end.
+
+cast_to_list(X) when is_list(X) -> X;
+cast_to_list(X) when is_atom(X) -> atom_to_list(X);
+cast_to_list(X) -> lists:flatten(io_lib:format("~p", [X])).
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% fail() -> exit(suite_failed)
+%%
+%% Immediately calls exit. Included because test suites are easier
+%% to read when using this function, rather than exit directly.
+fail() ->
+ try
+ exit(suite_failed)
+ catch
+ Class:R ->
+ case erlang:get_stacktrace() of
+ [{?MODULE,fail,0,_}|Stk] -> ok;
+ Stk -> ok
+ end,
+ erlang:raise(Class, R, Stk)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% break(Comment) -> ok
+%%
+%% Break a test case so part of the test can be done manually.
+%% Use continue/0 to continue.
+break(Comment) ->
+ break(?MODULE, Comment).
+
+break(CBM, Comment) ->
+ break(CBM, '', Comment).
+
+break(CBM, TestCase, Comment) ->
+ timetrap_cancel(),
+ {TCName,CntArg,PName} =
+ if TestCase == '' ->
+ {"", "", test_server_break_process};
+ true ->
+ Str = atom_to_list(TestCase),
+ {[32 | Str], Str,
+ list_to_atom("test_server_break_process_" ++ Str)}
+ end,
+ io:format(user,
+ "\n\n\n--- SEMIAUTOMATIC TESTING ---"
+ "\nThe test case~ts executes on process ~w"
+ "\n\n\n~ts"
+ "\n\n\n-----------------------------\n\n"
+ "Continue with --> ~w:continue(~ts).\n",
+ [TCName,self(),Comment,CBM,CntArg]),
+ case whereis(PName) of
+ undefined ->
+ spawn_break_process(self(), PName);
+ OldBreakProcess ->
+ OldBreakProcess ! cancel,
+ spawn_break_process(self(), PName)
+ end,
+ receive continue -> ok end.
+
+spawn_break_process(Pid, PName) ->
+ spawn(fun() ->
+ register(PName, self()),
+ receive
+ continue -> continue(Pid);
+ cancel -> ok
+ end
+ end).
+
+continue() ->
+ case whereis(test_server_break_process) of
+ undefined -> ok;
+ BreakProcess -> BreakProcess ! continue
+ end.
+
+continue(TestCase) when is_atom(TestCase) ->
+ PName = list_to_atom("test_server_break_process_" ++
+ atom_to_list(TestCase)),
+ case whereis(PName) of
+ undefined -> ok;
+ BreakProcess -> BreakProcess ! continue
+ end;
+
+continue(Pid) when is_pid(Pid) ->
+ Pid ! continue.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timetrap_scale_factor() -> Factor
+%%
+%% Returns the amount to scale timetraps with.
+
+%% {X, fun() -> check() end} <- multiply scale with X if Fun() is true
+timetrap_scale_factor() ->
+ timetrap_scale_factor([
+ { 2, fun() -> has_lock_checking() end},
+ { 3, fun() -> has_superfluous_schedulers() end},
+ { 5, fun() -> purify_is_running() end},
+ { 6, fun() -> is_debug() end},
+ {10, fun() -> is_cover() end}
+ ]).
+
+timetrap_scale_factor(Scales) ->
+ %% The fun in {S, Fun} a filter input to the list comprehension
+ lists:foldl(fun(S,O) -> O*S end, 1, [ S || {S,F} <- Scales, F()]).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timetrap(Timeout) -> Handle
+%% Handle = term()
+%%
+%% Creates a time trap, that will kill the calling process if the
+%% trap is not cancelled with timetrap_cancel/1, within Timeout milliseconds.
+timetrap(Timeout) ->
+ MultAndScale =
+ case get(test_server_multiply_timetraps) of
+ undefined -> {fun(T) -> T end, true};
+ {undefined,false} -> {fun(T) -> T end, false};
+ {undefined,_} -> {fun(T) -> T end, true};
+ {infinity,_} -> {fun(_) -> infinity end, false};
+ {Int,Scale} -> {fun(infinity) -> infinity;
+ (T) -> T*Int end, Scale}
+ end,
+ timetrap(Timeout, Timeout, self(), MultAndScale).
+
+%% when the function is called from different process than
+%% the test case, the test_server_multiply_timetraps data
+%% is unknown and must be passed as argument
+timetrap(Timeout, TCPid, MultAndScale) ->
+ timetrap(Timeout, Timeout, TCPid, MultAndScale).
+
+timetrap(Timeout0, TimeToReport0, TCPid, MultAndScale = {Multiplier,Scale}) ->
+ %% the time_ms call will either convert Timeout to ms or spawn a
+ %% user timetrap which sends the result to the IO server process
+ Timeout = time_ms(Timeout0, TCPid, MultAndScale),
+ Timeout1 = Multiplier(Timeout),
+ TimeToReport = if Timeout0 == TimeToReport0 ->
+ Timeout1;
+ true ->
+ %% only convert to ms, don't start a
+ %% user timetrap
+ time_ms_check(TimeToReport0)
+ end,
+ cancel_default_timetrap(self() == TCPid),
+ Handle = case Timeout1 of
+ infinity ->
+ infinity;
+ _ ->
+ spawn_link(test_server_sup,timetrap,[Timeout1,TimeToReport,
+ Scale,TCPid])
+ end,
+
+ %% ERROR! This sets dict on IO process instead of testcase process
+ %% if Timeout is return value from previous user timetrap!!
+
+ case get(test_server_timetraps) of
+ undefined ->
+ put(test_server_timetraps,[{Handle,TCPid,{TimeToReport,Scale}}]);
+ List ->
+ List1 = lists:delete({infinity,TCPid,{infinity,false}}, List),
+ put(test_server_timetraps,[{Handle,TCPid,
+ {TimeToReport,Scale}}|List1])
+ end,
+ Handle.
+
+ensure_timetrap(Config) ->
+ case get(test_server_timetraps) of
+ [_|_] ->
+ ok;
+ _ ->
+ case get(test_server_default_timetrap) of
+ undefined -> ok;
+ Garbage ->
+ erase(test_server_default_timetrap),
+ format("=== WARNING: garbage in "
+ "test_server_default_timetrap: ~p~n",
+ [Garbage])
+ end,
+ DTmo = case lists:keysearch(default_timeout,1,Config) of
+ {value,{default_timeout,Tmo}} -> Tmo;
+ _ -> ?DEFAULT_TIMETRAP_SECS
+ end,
+ format("=== test_server setting default "
+ "timetrap of ~p seconds~n",
+ [DTmo]),
+ put(test_server_default_timetrap, timetrap(seconds(DTmo)))
+ end.
+
+%% executing on IO process, no default timetrap ever set here
+cancel_default_timetrap(false) ->
+ ok;
+cancel_default_timetrap(true) ->
+ case get(test_server_default_timetrap) of
+ undefined ->
+ ok;
+ TimeTrap when is_pid(TimeTrap) ->
+ timetrap_cancel(TimeTrap),
+ erase(test_server_default_timetrap),
+ format("=== test_server canceled default timetrap "
+ "since another timetrap was set~n"),
+ ok;
+ Garbage ->
+ erase(test_server_default_timetrap),
+ format("=== WARNING: garbage in "
+ "test_server_default_timetrap: ~p~n",
+ [Garbage]),
+ error
+ end.
+
+time_ms({hours,N}, _, _) -> hours(N);
+time_ms({minutes,N}, _, _) -> minutes(N);
+time_ms({seconds,N}, _, _) -> seconds(N);
+time_ms({Other,_N}, _, _) ->
+ format("=== ERROR: Invalid time specification: ~p. "
+ "Should be seconds, minutes, or hours.~n", [Other]),
+ exit({invalid_time_format,Other});
+time_ms(Ms, _, _) when is_integer(Ms) -> Ms;
+time_ms(infinity, _, _) -> infinity;
+time_ms(Fun, TCPid, MultAndScale) when is_function(Fun) ->
+ time_ms_apply(Fun, TCPid, MultAndScale);
+time_ms({M,F,A}=MFA, TCPid, MultAndScale) when is_atom(M),
+ is_atom(F),
+ is_list(A) ->
+ time_ms_apply(MFA, TCPid, MultAndScale);
+time_ms(Other, _, _) -> exit({invalid_time_format,Other}).
+
+time_ms_check(MFA = {M,F,A}) when is_atom(M), is_atom(F), is_list(A) ->
+ MFA;
+time_ms_check(Fun) when is_function(Fun) ->
+ Fun;
+time_ms_check(Other) ->
+ time_ms(Other, undefined, undefined).
+
+time_ms_apply(Func, TCPid, MultAndScale) ->
+ {_,GL} = process_info(TCPid, group_leader),
+ WhoAmI = self(), % either TC or IO server
+ T0 = erlang:monotonic_time(),
+ UserTTSup =
+ spawn(fun() ->
+ user_timetrap_supervisor(Func, WhoAmI, TCPid,
+ GL, T0, MultAndScale)
+ end),
+ receive
+ {UserTTSup,infinity} ->
+ %% remember the user timetrap so that it can be cancelled
+ save_user_timetrap(TCPid, UserTTSup, T0),
+ %% we need to make sure the user timetrap function
+ %% gets time to execute and return
+ timetrap(infinity, TCPid, MultAndScale)
+ after 5000 ->
+ exit(UserTTSup, kill),
+ if WhoAmI /= GL ->
+ exit({user_timetrap_error,time_ms_apply});
+ true ->
+ format("=== ERROR: User timetrap execution failed!", []),
+ ignore
+ end
+ end.
+
+user_timetrap_supervisor(Func, Spawner, TCPid, GL, T0, MultAndScale) ->
+ process_flag(trap_exit, true),
+ Spawner ! {self(),infinity},
+ MonRef = monitor(process, TCPid),
+ UserTTSup = self(),
+ group_leader(GL, UserTTSup),
+ UserTT = spawn_link(fun() -> call_user_timetrap(Func, UserTTSup) end),
+ receive
+ {UserTT,Result} ->
+ demonitor(MonRef, [flush]),
+ T1 = erlang:monotonic_time(),
+ Elapsed = erlang:convert_time_unit(T1-T0, native, milli_seconds),
+ try time_ms_check(Result) of
+ TimeVal ->
+ %% this is the new timetrap value to set (return value
+ %% from a fun or an MFA)
+ GL ! {user_timetrap,TCPid,TimeVal,T0,Elapsed,MultAndScale}
+ catch _:_ ->
+ %% when other than a legal timetrap value is returned
+ %% which will be the normal case for user timetraps
+ GL ! {user_timetrap,TCPid,0,T0,Elapsed,MultAndScale}
+ end;
+ {'EXIT',UserTT,Error} when Error /= normal ->
+ demonitor(MonRef, [flush]),
+ GL ! {user_timetrap,TCPid,0,T0,{user_timetrap_error,Error},
+ MultAndScale};
+ {'DOWN',MonRef,_,_,_} ->
+ demonitor(MonRef, [flush]),
+ exit(UserTT, kill)
+ end.
+
+call_user_timetrap(Func, Sup) when is_function(Func) ->
+ try Func() of
+ Result ->
+ Sup ! {self(),Result}
+ catch _:Error ->
+ exit({Error,erlang:get_stacktrace()})
+ end;
+call_user_timetrap({M,F,A}, Sup) ->
+ try apply(M,F,A) of
+ Result ->
+ Sup ! {self(),Result}
+ catch _:Error ->
+ exit({Error,erlang:get_stacktrace()})
+ end.
+
+save_user_timetrap(TCPid, UserTTSup, StartTime) ->
+ %% save pid of user timetrap supervisor process so that
+ %% it may be stopped even before the timetrap func has returned
+ NewUserTT = {TCPid,{UserTTSup,StartTime}},
+ case get(test_server_user_timetrap) of
+ undefined ->
+ put(test_server_user_timetrap, [NewUserTT]);
+ UserTTSups ->
+ case proplists:get_value(TCPid, UserTTSups) of
+ undefined ->
+ put(test_server_user_timetrap,
+ [NewUserTT | UserTTSups]);
+ PrevTTSup ->
+ %% remove prev user timetrap
+ remove_user_timetrap(PrevTTSup),
+ put(test_server_user_timetrap,
+ [NewUserTT | proplists:delete(TCPid,
+ UserTTSups)])
+ end
+ end.
+
+update_user_timetraps(TCPid, StartTime) ->
+ %% called when a user timetrap is triggered
+ case get(test_server_user_timetrap) of
+ undefined ->
+ proceed;
+ UserTTs ->
+ case proplists:get_value(TCPid, UserTTs) of
+ {_UserTTSup,StartTime} -> % same timetrap
+ put(test_server_user_timetrap,
+ proplists:delete(TCPid, UserTTs)),
+ proceed;
+ {OtherUserTTSup,OtherStartTime} ->
+ case OtherStartTime - StartTime of
+ Diff when Diff >= 0 ->
+ ignore;
+ _ ->
+ exit(OtherUserTTSup, kill),
+ put(test_server_user_timetrap,
+ proplists:delete(TCPid, UserTTs)),
+ proceed
+ end;
+ undefined ->
+ proceed
+ end
+ end.
+
+remove_user_timetrap(TTSup) ->
+ exit(TTSup, kill).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timetrap_cancel(Handle) -> ok
+%% Handle = term()
+%%
+%% Cancels a time trap.
+timetrap_cancel(Handle) ->
+ timetrap_cancel_one(Handle, true).
+
+timetrap_cancel_one(infinity, _SendToServer) ->
+ ok;
+timetrap_cancel_one(Handle, SendToServer) ->
+ case get(test_server_timetraps) of
+ undefined ->
+ ok;
+ [{Handle,_,_}] ->
+ erase(test_server_timetraps);
+ Timers ->
+ case lists:keysearch(Handle, 1, Timers) of
+ {value,_} ->
+ put(test_server_timetraps,
+ lists:keydelete(Handle, 1, Timers));
+ false when SendToServer == true ->
+ group_leader() ! {timetrap_cancel_one,Handle,self()};
+ false ->
+ ok
+ end
+ end,
+ test_server_sup:timetrap_cancel(Handle).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timetrap_cancel() -> ok
+%%
+%% Cancels timetrap for current test case.
+timetrap_cancel() ->
+ timetrap_cancel_all(self(), true).
+
+timetrap_cancel_all(TCPid, SendToServer) ->
+ case get(test_server_timetraps) of
+ undefined ->
+ ok;
+ Timers ->
+ [timetrap_cancel_one(Handle, false) ||
+ {Handle,Pid,_} <- Timers, Pid == TCPid]
+ end,
+ case get(test_server_user_timetrap) of
+ undefined ->
+ ok;
+ UserTTs ->
+ case proplists:get_value(TCPid, UserTTs) of
+ {UserTTSup,_StartTime} ->
+ remove_user_timetrap(UserTTSup),
+ put(test_server_user_timetrap,
+ proplists:delete(TCPid, UserTTs));
+ undefined ->
+ ok
+ end
+ end,
+ if SendToServer == true ->
+ group_leader() ! {timetrap_cancel_all,TCPid,self()};
+ true ->
+ ok
+ end,
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% get_timetrap_info() -> {Timeout,Scale} | undefined
+%%
+%% Read timetrap info for current test case
+get_timetrap_info() ->
+ get_timetrap_info(self(), true).
+
+get_timetrap_info(TCPid, SendToServer) ->
+ case get(test_server_timetraps) of
+ undefined ->
+ undefined;
+ Timers ->
+ case [Info || {Handle,Pid,Info} <- Timers,
+ Pid == TCPid, Handle /= infinity] of
+ [I|_] ->
+ I;
+ [] when SendToServer == true ->
+ tc_supervisor_req({get_timetrap_info,TCPid});
+ [] ->
+ undefined
+ end
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% hours(N) -> Milliseconds
+%% minutes(N) -> Milliseconds
+%% seconds(N) -> Milliseconds
+%% N = integer() | float()
+%% Milliseconds = integer()
+%%
+%% Transforms the named units to milliseconds. Fractions in the input
+%% are accepted. The output is an integer.
+hours(N) -> trunc(N * 1000 * 60 * 60).
+minutes(N) -> trunc(N * 1000 * 60).
+seconds(N) -> trunc(N * 1000).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% tc_supervisor_req(Tag) -> Result
+%% tc_supervisor_req(Tag, Msg) -> Result
+%%
+
+tc_supervisor_req(Tag) ->
+ Pid = test_server_gl:get_tc_supervisor(group_leader()),
+ Pid ! {Tag,self()},
+ receive
+ {Pid,Tag,Result} ->
+ Result
+ after 5000 ->
+ error(no_answer_from_tc_supervisor)
+ end.
+
+tc_supervisor_req(Tag, Msg) ->
+ Pid = test_server_gl:get_tc_supervisor(group_leader()),
+ Pid ! {Tag,self(),Msg},
+ receive
+ {Pid,Tag,Result} ->
+ Result
+ after 5000 ->
+ error(no_answer_from_tc_supervisor)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timecall(M,F,A) -> {Time,Val}
+%% Time = float()
+%%
+%% Measures the time spent evaluating MFA. The measurement is done with
+%% erlang:now/0, and should have pretty good accuracy on most platforms.
+%% The function is not evaluated in a catch context.
+timecall(M, F, A) ->
+ test_server_sup:timecall(M,F,A).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% do_times(N,M,F,A) -> ok
+%% do_times(N,Fun) ->
+%% N = integer()
+%% Fun = fun() -> void()
+%%
+%% Evaluates MFA or Fun N times, and returns ok.
+do_times(N,M,F,A) when N>0 ->
+ apply(M,F,A),
+ do_times(N-1,M,F,A);
+do_times(0,_,_,_) ->
+ ok.
+
+do_times(N,Fun) when N>0 ->
+ Fun(),
+ do_times(N-1,Fun);
+do_times(0,_) ->
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% m_out_of_n(M,N,Fun) -> ok | exit({m_out_of_n_failed,{R,left_to_do}})
+%% M = integer()
+%% N = integer()
+%% Fun = fun() -> void()
+%% R = integer()
+%%
+%% Repeats evaluating the given function until it succeeded (didn't crash)
+%% M times. If, after N times, M successful attempts have not been
+%% accomplished, the process crashes with reason {m_out_of_n_failed
+%% {R,left_to_do}}, where R indicates how many cases that remained to be
+%% successfully completed.
+%%
+%% For example:
+%% m_out_of_n(1,4,fun() -> tricky_test_case() end)
+%% Tries to run tricky_test_case() up to 4 times,
+%% and is happy if it succeeds once.
+%%
+%% m_out_of_n(7,8,fun() -> clock_sanity_check() end)
+%% Tries running clock_sanity_check() up to 8
+%% times and allows the function to fail once.
+%% This might be useful if clock_sanity_check/0
+%% is known to fail if the clock crosses an hour
+%% boundary during the test (and the up to 8
+%% test runs could never cross 2 boundaries)
+m_out_of_n(0,_,_) ->
+ ok;
+m_out_of_n(M,0,_) ->
+ exit({m_out_of_n_failed,{M,left_to_do}});
+m_out_of_n(M,N,Fun) ->
+ case catch Fun() of
+ {'EXIT',_} ->
+ m_out_of_n(M,N-1,Fun);
+ _Other ->
+ m_out_of_n(M-1,N-1,Fun)
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%call_crash(M,F,A)
+%%call_crash(Time,M,F,A)
+%%call_crash(Time,Crash,M,F,A)
+%% M - atom()
+%% F - atom()
+%% A - [term()]
+%% Time - integer() in milliseconds.
+%% Crash - term()
+%%
+%% Spaws a new process that calls MFA. The call is considered
+%% successful if the call crashes with the given reason (Crash),
+%% or any other reason if Crash is not specified.
+%% ** The call must terminate withing the given Time (defaults
+%% to infinity), or it is considered a failure (exit with reason
+%% 'call_crash_timeout' is generated).
+
+call_crash(M,F,A) ->
+ call_crash(infinity,M,F,A).
+call_crash(Time,M,F,A) ->
+ call_crash(Time,any,M,F,A).
+call_crash(Time,Crash,M,F,A) ->
+ test_server_sup:call_crash(Time,Crash,M,F,A).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% start_node(SlaveName, Type, Options) ->
+%% {ok, Slave} | {error, Reason}
+%%
+%% SlaveName = string(), atom().
+%% Type = slave | peer
+%% Options = [{tuple(), term()}]
+%%
+%% OptionList is a tuplelist wich may contain one
+%% or more of these members:
+%%
+%% Slave and Peer:
+%% {remote, true} - Start the node on a remote host. If not specified,
+%% the node will be started on the local host (with
+%% some exceptions, for instance VxWorks,
+%% where all nodes are started on a remote host).
+%% {args, Arguments} - Arguments passed directly to the node.
+%% {cleanup, false} - Nodes started with this option will not be killed
+%% by the test server after completion of the test case
+%% Therefore it is IMPORTANT that the USER terminates
+%% the node!!
+%% {erl, ReleaseList} - Use an Erlang emulator determined by ReleaseList
+%% when starting nodes, instead of the same emulator
+%% as the test server is running. ReleaseList is a list
+%% of specifiers, where a specifier is either
+%% {release, Rel}, {prog, Prog}, or 'this'. Rel is
+%% either the name of a release, e.g., "r7a" or
+%% 'latest'. 'this' means using the same emulator as
+%% the test server. Prog is the name of an emulator
+%% executable. If the list has more than one element,
+%% one of them is picked randomly. (Only
+%% works on Solaris and Linux, and the test
+%% server gives warnings when it notices that
+%% nodes are not of the same version as
+%% itself.)
+%%
+%% Peer only:
+%% {wait, false} - Don't wait for the node to be started.
+%% {fail_on_error, false} - Returns {error, Reason} rather than failing
+%% the test case. This option can only be used with
+%% peer nodes.
+%% Note that slave nodes always act as if they had
+%% fail_on_error==false.
+%%
+
+start_node(Name, Type, Options) ->
+ lists:foreach(
+ fun(N) ->
+ case firstname(N) of
+ Name ->
+ format("=== WARNING: Trying to start node \'~w\' when node"
+ " with same first name exists: ~w", [Name, N]);
+ _other -> ok
+ end
+ end,
+ nodes()),
+
+ group_leader() ! {sync_apply,
+ self(),
+ {test_server_ctrl,start_node,[Name,Type,Options]}},
+ Result = receive {sync_result,R} -> R end,
+
+ case Result of
+ {ok,Node} ->
+
+ %% Cannot run cover on shielded node or on a node started
+ %% by a shielded node.
+ Cover = case is_cover(Node) of
+ true ->
+ proplists:get_value(start_cover,Options,true);
+ false ->
+ false
+ end,
+
+ net_adm:ping(Node),
+ case Cover of
+ true ->
+ do_cover_for_node(Node,start);
+ _ ->
+ ok
+ end,
+ {ok,Node};
+ {fail,Reason} -> fail(Reason);
+ Error -> Error
+ end.
+
+firstname(N) ->
+ list_to_atom(upto($@,atom_to_list(N))).
+
+%% This should!!! crash if H is not member in list.
+upto(H, [H | _T]) -> [];
+upto(H, [X | T]) -> [X | upto(H,T)].
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% wait_for_node(Name) -> ok | {error,timeout}
+%%
+%% If a node is started with the options {wait,false}, this function
+%% can be used to wait for the node to come up from the
+%% test server point of view (i.e. wait until it has contacted
+%% the test server controller after startup)
+wait_for_node(Slave) ->
+ group_leader() ! {sync_apply,
+ self(),
+ {test_server_ctrl,wait_for_node,[Slave]}},
+ Result = receive {sync_result,R} -> R end,
+ case Result of
+ ok ->
+ net_adm:ping(Slave),
+ case is_cover(Slave) of
+ true ->
+ do_cover_for_node(Slave,start);
+ _ ->
+ ok
+ end;
+ _ ->
+ ok
+ end,
+ Result.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% stop_node(Name) -> true|false
+%%
+%% Kills a (remote) node.
+%% Also inform test_server_ctrl so it can clean up!
+stop_node(Slave) ->
+ Cover = is_cover(Slave),
+ if Cover -> do_cover_for_node(Slave,flush,false);
+ true -> ok
+ end,
+ group_leader() ! {sync_apply,self(),{test_server_ctrl,stop_node,[Slave]}},
+ Result = receive {sync_result,R} -> R end,
+ case Result of
+ ok ->
+ erlang:monitor_node(Slave, true),
+ slave:stop(Slave),
+ receive
+ {nodedown, Slave} ->
+ format(minor, "Stopped slave node: ~w", [Slave]),
+ format(major, "=node_stop ~w", [Slave]),
+ if Cover -> do_cover_for_node(Slave,stop,false);
+ true -> ok
+ end,
+ true
+ after 30000 ->
+ format("=== WARNING: Node ~w does not seem to terminate.",
+ [Slave]),
+ erlang:monitor_node(Slave, false),
+ receive {nodedown, Slave} -> ok after 0 -> ok end,
+ false
+ end;
+ {error, _Reason} ->
+ %% Either, the node is already dead or it was started
+ %% with the {cleanup,false} option, or it was started
+ %% in some other way than test_server:start_node/3
+ format("=== WARNING: Attempt to stop a nonexisting slavenode (~w)~n"
+ "=== Trying to kill it anyway!!!",
+ [Slave]),
+ case net_adm:ping(Slave)of
+ pong ->
+ erlang:monitor_node(Slave, true),
+ slave:stop(Slave),
+ receive
+ {nodedown, Slave} ->
+ format(minor, "Stopped slave node: ~w", [Slave]),
+ format(major, "=node_stop ~w", [Slave]),
+ if Cover -> do_cover_for_node(Slave,stop,false);
+ true -> ok
+ end,
+ true
+ after 30000 ->
+ format("=== WARNING: Node ~w does not seem to terminate.",
+ [Slave]),
+ erlang:monitor_node(Slave, false),
+ receive {nodedown, Slave} -> ok after 0 -> ok end,
+ false
+ end;
+ pang ->
+ if Cover -> do_cover_for_node(Slave,stop,false);
+ true -> ok
+ end,
+ false
+ end
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% is_release_available(Release) -> true | false
+%% Release -> string()
+%%
+%% Test if a release (such as "r10b") is available to be
+%% started using start_node/3.
+
+is_release_available(Release) ->
+ group_leader() ! {sync_apply,
+ self(),
+ {test_server_ctrl,is_release_available,[Release]}},
+ receive {sync_result,R} -> R end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% run_on_shielded_node(Fun, CArgs) -> term()
+%% Fun -> function()
+%% CArg -> list()
+%%
+%%
+%% Fun is executed in a process on a temporarily created
+%% hidden node. Communication with the job process goes
+%% via a job proxy process on the hidden node, i.e. the
+%% group leader of the test case process is the job proxy
+%% process. This makes it possible to start nodes from the
+%% hidden node that are unaware of the test server node.
+%% Without the job proxy process all processes would have
+%% a process residing on the test_server node as group_leader.
+%%
+%% Fun - Function to execute
+%% CArg - Extra command line arguments to use when starting
+%% the shielded node.
+%%
+%% If Fun is successfully executed, the result is returned.
+%%
+
+run_on_shielded_node(Fun, CArgs) when is_function(Fun), is_list(CArgs) ->
+ Nr = erlang:unique_integer([positive]),
+ Name = "shielded_node-" ++ integer_to_list(Nr),
+ Node = case start_node(Name, slave, [{args, "-hidden " ++ CArgs}]) of
+ {ok, N} -> N;
+ Err -> fail({failed_to_start_shielded_node, Err})
+ end,
+ Master = self(),
+ Ref = make_ref(),
+ Slave = spawn(Node, start_job_proxy_fun(Master, Fun)),
+ MRef = erlang:monitor(process, Slave),
+ Slave ! Ref,
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ stop_node(Node),
+ fail(Info);
+ {Ref, Res} ->
+ stop_node(Node),
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ Res
+ end
+ end.
+
+-spec start_job_proxy_fun(_, _) -> fun(() -> no_return()).
+start_job_proxy_fun(Master, Fun) ->
+ fun () ->
+ start_job_proxy(),
+ receive
+ Ref ->
+ Master ! {Ref, Fun()}
+ end,
+ receive after infinity -> infinity end
+ end.
+
+%% Return true if Name or node() is a shielded node
+is_shielded(Name) ->
+ case {cast_to_list(Name),atom_to_list(node())} of
+ {"shielded_node"++_,_} -> true;
+ {_,"shielded_node"++_} -> true;
+ _ -> false
+ end.
+
+same_version(Name) ->
+ ThisVersion = erlang:system_info(version),
+ OtherVersion = rpc:call(Name, erlang, system_info, [version]),
+ ThisVersion =:= OtherVersion.
+
+is_cover(Name) ->
+ case is_cover() of
+ true ->
+ not is_shielded(Name) andalso same_version(Name);
+ false ->
+ false
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% temp_name(Stem) -> string()
+%% Stem = string()
+%%
+%% Create a unique file name, based on (starting with) Stem.
+%% A filename of the form <Stem><Number> is generated, and the
+%% function checks that that file doesn't already exist.
+temp_name(Stem) ->
+ Num = erlang:unique_integer([positive]),
+ RandomName = Stem ++ integer_to_list(Num),
+ {ok,Files} = file:list_dir(filename:dirname(Stem)),
+ case lists:member(RandomName,Files) of
+ true ->
+ %% oh, already exists - bad luck. Try again.
+ temp_name(Stem); %% recursively try again
+ false ->
+ RandomName
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% app_test/1
+%%
+app_test(App) ->
+ app_test(App, pedantic).
+app_test(App, Mode) ->
+ test_server_sup:app_test(App, Mode).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% appup_test/1
+%%
+appup_test(App) ->
+ test_server_sup:appup_test(App).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% is_native(Mod) -> true | false
+%%
+%% Checks wether the module is natively compiled or not.
+
+is_native(Mod) ->
+ (catch Mod:module_info(native)) =:= true.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% comment(String) -> ok
+%%
+%% The given String will occur in the comment field
+%% of the table on the test suite result page. If
+%% called several times, only the last comment is
+%% printed.
+%% comment/1 is also overwritten by the return value
+%% {comment,Comment} or fail/1 (which prints Reason
+%% as a comment).
+comment(String) ->
+ group_leader() ! {comment,String},
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% read_comment() -> string()
+%%
+%% Read the current comment string stored in
+%% state during test case execution.
+read_comment() ->
+ tc_supervisor_req(read_comment).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% make_priv_dir() -> ok
+%%
+%% Order test server to create the private directory
+%% for the current test case.
+make_priv_dir() ->
+ tc_supervisor_req(make_priv_dir).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% os_type() -> OsType
+%%
+%% Returns the OsType of the target node. OsType is
+%% the same as returned from os:type()
+os_type() ->
+ os:type().
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% is_cover() -> boolean()
+%%
+%% Returns true if cover is running, else false
+is_cover() ->
+ case whereis(cover_server) of
+ undefined -> false;
+ _ -> true
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% is_debug() -> boolean()
+%%
+%% Returns true if the emulator is debug-compiled, false otherwise.
+is_debug() ->
+ case catch erlang:system_info(debug_compiled) of
+ {'EXIT', _} ->
+ case string:str(erlang:system_info(system_version), "debug") of
+ Int when is_integer(Int), Int > 0 -> true;
+ _ -> false
+ end;
+ Res ->
+ Res
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% has_lock_checking() -> boolean()
+%%
+%% Returns true if the emulator has lock checking enabled, false otherwise.
+has_lock_checking() ->
+ case catch erlang:system_info(lock_checking) of
+ {'EXIT', _} -> false;
+ Res -> Res
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% has_superfluous_schedulers() -> boolean()
+%%
+%% Returns true if the emulator has more scheduler threads than logical
+%% processors, false otherwise.
+has_superfluous_schedulers() ->
+ case catch {erlang:system_info(schedulers),
+ erlang:system_info(logical_processors)} of
+ {S, P} when is_integer(S), is_integer(P), S > P -> true;
+ _ -> false
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% is_commercial_build() -> boolean()
+%%
+%% Returns true if the current emulator is commercially supported.
+%% (The emulator will not have "[source]" in its start-up message.)
+%% We might want to do more tests on a commercial platform, for instance
+%% ensuring that all applications have documentation).
+is_commercial() ->
+ case string:str(erlang:system_info(system_version), "source") of
+ Int when is_integer(Int), Int > 0 -> false;
+ _ -> true
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% DEBUGGER INTERFACE %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% purify_is_running() -> false|true
+%%
+%% Tests if Purify is currently running.
+
+purify_is_running() ->
+ case catch erlang:system_info({error_checker, running}) of
+ {'EXIT', _} -> false;
+ Res -> Res
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% purify_new_leaks() -> false|BytesLeaked
+%% BytesLeaked = integer()
+%%
+%% Checks for new memory leaks if Purify is active.
+%% Returns the number of bytes leaked, or false if Purify
+%% is not running.
+purify_new_leaks() ->
+ case catch erlang:system_info({error_checker, memory}) of
+ {'EXIT', _} -> false;
+ Leaked when is_integer(Leaked) -> Leaked
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% purify_new_fds_inuse() -> false|FdsInuse
+%% FdsInuse = integer()
+%%
+%% Checks for new file descriptors in use.
+%% Returns the number of new file descriptors in use, or false
+%% if Purify is not running.
+purify_new_fds_inuse() ->
+ case catch erlang:system_info({error_checker, fd}) of
+ {'EXIT', _} -> false;
+ Inuse when is_integer(Inuse) -> Inuse
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% purify_format(Format, Args) -> ok
+%% Format = string()
+%% Args = lists()
+%%
+%% Outputs the formatted string to Purify's logfile,if Purify is active.
+purify_format(Format, Args) ->
+ (catch erlang:system_info({error_checker, io_lib:format(Format, Args)})),
+ ok.
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
+%% Apply given function and reply to caller or proxy.
+%%
+do_sync_apply(Proxy, From, {M,F,A}) ->
+ Result = apply(M, F, A),
+ if is_pid(Proxy) -> Proxy ! {sync_result_proxy,From,Result};
+ true -> From ! {sync_result,Result}
+ end.
diff --git a/lib/common_test/src/test_server_ctrl.erl b/lib/common_test/src/test_server_ctrl.erl
new file mode 100644
index 0000000000..833d99907e
--- /dev/null
+++ b/lib/common_test/src/test_server_ctrl.erl
@@ -0,0 +1,5692 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2002-2014. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_ctrl).
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% %%
+%% The Erlang Test Server %%
+%% %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
+%% MODULE DEPENDENCIES:
+%% HARD TO REMOVE: erlang, lists, io_lib, gen_server, file, io, string,
+%% code, ets, rpc, gen_tcp, inet, erl_tar, sets,
+%% test_server, test_server_sup, test_server_node
+%% EASIER TO REMOVE: filename, filelib, lib, re
+%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%% SUPERVISOR INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([start/0, start/1, start_link/1, stop/0]).
+
+%%% OPERATOR INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([add_spec/1, add_dir/2, add_dir/3]).
+-export([add_module/1, add_module/2,
+ add_conf/3,
+ add_case/2, add_case/3, add_cases/2, add_cases/3]).
+-export([add_dir_with_skip/3, add_dir_with_skip/4, add_tests_with_skip/3]).
+-export([add_module_with_skip/2, add_module_with_skip/3,
+ add_conf_with_skip/4,
+ add_case_with_skip/3, add_case_with_skip/4,
+ add_cases_with_skip/3, add_cases_with_skip/4]).
+-export([jobs/0, run_test/1, wait_finish/0, idle_notify/1,
+ abort_current_testcase/1, abort/0]).
+-export([start_get_totals/1, stop_get_totals/0]).
+-export([reject_io_reqs/1, get_levels/0, set_levels/3]).
+-export([multiply_timetraps/1, scale_timetraps/1, get_timetrap_parameters/0]).
+-export([create_priv_dir/1]).
+-export([cover/1, cover/2, cover/3,
+ cover_compile/7, cover_analyse/2, cross_cover_analyse/2,
+ trc/1, stop_trace/0]).
+-export([testcase_callback/1]).
+-export([set_random_seed/1]).
+-export([kill_slavenodes/0]).
+
+%%% TEST_SERVER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([print/2, print/3, print/4, print_timestamp/2]).
+-export([start_node/3, stop_node/1, wait_for_node/1, is_release_available/1]).
+-export([format/1, format/2, format/3, to_string/1]).
+-export([get_target_info/0]).
+-export([get_hosts/0]).
+-export([node_started/1]).
+-export([uri_encode/1,uri_encode/2]).
+
+%%% DEBUGGER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([i/0, p/1, p/3, pi/2, pi/4, t/0, t/1]).
+
+%%% PRIVATE EXPORTED %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-export([init/1, terminate/2]).
+-export([handle_call/3, handle_cast/2, handle_info/2]).
+-export([do_test_cases/4]).
+-export([do_spec/2, do_spec_list/2]).
+-export([xhtml/2, escape_chars/1]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-include("test_server_internal.hrl").
+-include_lib("kernel/include/file.hrl").
+-define(suite_ext, "_SUITE").
+-define(log_ext, ".log.html").
+-define(src_listing_ext, ".src.html").
+-define(logdir_ext, ".logs").
+-define(data_dir_suffix, "_data/").
+-define(suitelog_name, "suite.log").
+-define(coverlog_name, "cover.html").
+-define(raw_coverlog_name, "cover.log").
+-define(cross_coverlog_name, "cross_cover.html").
+-define(raw_cross_coverlog_name, "cross_cover.log").
+-define(cross_cover_info, "cross_cover.info").
+-define(cover_total, "total_cover.log").
+-define(unexpected_io_log, "unexpected_io.log.html").
+-define(last_file, "last_name").
+-define(last_link, "last_link").
+-define(last_test, "last_test").
+-define(html_ext, ".html").
+-define(now, os:timestamp()).
+
+-define(void_fun, fun() -> ok end).
+-define(mod_result(X), if X == skip -> skipped;
+ X == auto_skip -> skipped;
+ true -> X end).
+
+-define(auto_skip_color, "#FFA64D").
+-define(user_skip_color, "#FF8000").
+-define(sortable_table_name, "SortableTable").
+
+-record(state,{jobs=[], levels={1,19,10}, reject_io_reqs=false,
+ multiply_timetraps=1, scale_timetraps=true,
+ create_priv_dir=auto_per_run, finish=false,
+ target_info, trc=false, cover=false, wait_for_node=[],
+ testcase_callback=undefined, idle_notify=[],
+ get_totals=false, random_seed=undefined}).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% OPERATOR INTERFACE
+
+add_dir(Name, Job=[Dir|_Dirs]) when is_list(Dir) ->
+ add_job(cast_to_list(Name),
+ lists:map(fun(D)-> {dir,cast_to_list(D)} end, Job));
+add_dir(Name, Dir) ->
+ add_job(cast_to_list(Name), {dir,cast_to_list(Dir)}).
+
+add_dir(Name, Job=[Dir|_Dirs], Pattern) when is_list(Dir) ->
+ add_job(cast_to_list(Name),
+ lists:map(fun(D)-> {dir,cast_to_list(D),
+ cast_to_list(Pattern)} end, Job));
+add_dir(Name, Dir, Pattern) ->
+ add_job(cast_to_list(Name), {dir,cast_to_list(Dir),cast_to_list(Pattern)}).
+
+add_module(Mod) when is_atom(Mod) ->
+ add_job(atom_to_list(Mod), {Mod,all}).
+
+add_module(Name, Mods) when is_list(Mods) ->
+ add_job(cast_to_list(Name), lists:map(fun(Mod) -> {Mod,all} end, Mods)).
+
+add_conf(Name, Mod, Conf) when is_tuple(Conf) ->
+ add_job(cast_to_list(Name), {Mod,[Conf]});
+
+add_conf(Name, Mod, Confs) when is_list(Confs) ->
+ add_job(cast_to_list(Name), {Mod,Confs}).
+
+add_case(Mod, Case) when is_atom(Mod), is_atom(Case) ->
+ add_job(atom_to_list(Mod), {Mod,Case}).
+
+add_case(Name, Mod, Case) when is_atom(Mod), is_atom(Case) ->
+ add_job(Name, {Mod,Case}).
+
+add_cases(Mod, Cases) when is_atom(Mod), is_list(Cases) ->
+ add_job(atom_to_list(Mod), {Mod,Cases}).
+
+add_cases(Name, Mod, Cases) when is_atom(Mod), is_list(Cases) ->
+ add_job(Name, {Mod,Cases}).
+
+add_spec(Spec) ->
+ Name = filename:rootname(Spec, ".spec"),
+ case filelib:is_file(Spec) of
+ true -> add_job(Name, {spec,Spec});
+ false -> {error,nofile}
+ end.
+
+%% This version of the interface is to be used if there are
+%% suites or cases that should be skipped.
+
+add_dir_with_skip(Name, Job=[Dir|_Dirs], Skip) when is_list(Dir) ->
+ add_job(cast_to_list(Name),
+ lists:map(fun(D)-> {dir,cast_to_list(D)} end, Job),
+ Skip);
+add_dir_with_skip(Name, Dir, Skip) ->
+ add_job(cast_to_list(Name), {dir,cast_to_list(Dir)}, Skip).
+
+add_dir_with_skip(Name, Job=[Dir|_Dirs], Pattern, Skip) when is_list(Dir) ->
+ add_job(cast_to_list(Name),
+ lists:map(fun(D)-> {dir,cast_to_list(D),
+ cast_to_list(Pattern)} end, Job),
+ Skip);
+add_dir_with_skip(Name, Dir, Pattern, Skip) ->
+ add_job(cast_to_list(Name),
+ {dir,cast_to_list(Dir),cast_to_list(Pattern)}, Skip).
+
+add_module_with_skip(Mod, Skip) when is_atom(Mod) ->
+ add_job(atom_to_list(Mod), {Mod,all}, Skip).
+
+add_module_with_skip(Name, Mods, Skip) when is_list(Mods) ->
+ add_job(cast_to_list(Name), lists:map(fun(Mod) -> {Mod,all} end, Mods), Skip).
+
+add_conf_with_skip(Name, Mod, Conf, Skip) when is_tuple(Conf) ->
+ add_job(cast_to_list(Name), {Mod,[Conf]}, Skip);
+
+add_conf_with_skip(Name, Mod, Confs, Skip) when is_list(Confs) ->
+ add_job(cast_to_list(Name), {Mod,Confs}, Skip).
+
+add_case_with_skip(Mod, Case, Skip) when is_atom(Mod), is_atom(Case) ->
+ add_job(atom_to_list(Mod), {Mod,Case}, Skip).
+
+add_case_with_skip(Name, Mod, Case, Skip) when is_atom(Mod), is_atom(Case) ->
+ add_job(Name, {Mod,Case}, Skip).
+
+add_cases_with_skip(Mod, Cases, Skip) when is_atom(Mod), is_list(Cases) ->
+ add_job(atom_to_list(Mod), {Mod,Cases}, Skip).
+
+add_cases_with_skip(Name, Mod, Cases, Skip) when is_atom(Mod), is_list(Cases) ->
+ add_job(Name, {Mod,Cases}, Skip).
+
+add_tests_with_skip(LogDir, Tests, Skip) ->
+ add_job(LogDir,
+ lists:map(fun({Dir,all,all}) ->
+ {Dir,{dir,Dir}};
+ ({Dir,Mods,all}) ->
+ {Dir,lists:map(fun(M) -> {M,all} end, Mods)};
+ ({Dir,Mod,Cases}) ->
+ {Dir,{Mod,Cases}}
+ end, Tests),
+ Skip).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% COMMAND LINE INTERFACE
+
+parse_cmd_line(Cmds) ->
+ parse_cmd_line(Cmds, [], [], local, false, false, undefined).
+
+parse_cmd_line(['SPEC',Spec|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) ->
+ case file:consult(Spec) of
+ {ok, TermList} ->
+ Name = filename:rootname(Spec),
+ parse_cmd_line(Cmds, TermList++SpecList, [Name|Names], Param,
+ Trc, Cov, TCCB);
+ {error,Reason} ->
+ io:format("Can't open ~w: ~p\n",[Spec, file:format_error(Reason)]),
+ parse_cmd_line(Cmds, SpecList, Names, Param, Trc, Cov, TCCB)
+ end;
+parse_cmd_line(['NAME',Name|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) ->
+ parse_cmd_line(Cmds, SpecList, [{name,atom_to_list(Name)}|Names],
+ Param, Trc, Cov, TCCB);
+parse_cmd_line(['SKIPMOD',Mod|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) ->
+ parse_cmd_line(Cmds, [{skip,{Mod,"by command line"}}|SpecList], Names,
+ Param, Trc, Cov, TCCB);
+parse_cmd_line(['SKIPCASE',Mod,Case|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) ->
+ parse_cmd_line(Cmds, [{skip,{Mod,Case,"by command line"}}|SpecList], Names,
+ Param, Trc, Cov, TCCB);
+parse_cmd_line(['DIR',Dir|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) ->
+ Name = filename:basename(Dir),
+ parse_cmd_line(Cmds, [{topcase,{dir,Name}}|SpecList], [Name|Names],
+ Param, Trc, Cov, TCCB);
+parse_cmd_line(['MODULE',Mod|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) ->
+ parse_cmd_line(Cmds,[{topcase,{Mod,all}}|SpecList],[atom_to_list(Mod)|Names],
+ Param, Trc, Cov, TCCB);
+parse_cmd_line(['CASE',Mod,Case|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) ->
+ parse_cmd_line(Cmds,[{topcase,{Mod,Case}}|SpecList],[atom_to_list(Mod)|Names],
+ Param, Trc, Cov, TCCB);
+parse_cmd_line(['TRACE',Trc|Cmds], SpecList, Names, Param, _Trc, Cov, TCCB) ->
+ parse_cmd_line(Cmds, SpecList, Names, Param, Trc, Cov, TCCB);
+parse_cmd_line(['COVER',App,CF,Analyse|Cmds], SpecList, Names, Param, Trc, _Cov, TCCB) ->
+ parse_cmd_line(Cmds, SpecList, Names, Param, Trc, {{App,CF}, Analyse}, TCCB);
+parse_cmd_line(['TESTCASE_CALLBACK',Mod,Func|Cmds], SpecList, Names, Param, Trc, Cov, _) ->
+ parse_cmd_line(Cmds, SpecList, Names, Param, Trc, Cov, {Mod,Func});
+parse_cmd_line([Obj|_Cmds], _SpecList, _Names, _Param, _Trc, _Cov, _TCCB) ->
+ io:format("~w: Bad argument: ~w\n", [?MODULE,Obj]),
+ io:format(" Use the `ts' module to start tests.\n", []),
+ io:format(" (If you ARE using `ts', there is a bug in `ts'.)\n", []),
+ halt(1);
+parse_cmd_line([], SpecList, Names, Param, Trc, Cov, TCCB) ->
+ NameList = lists:reverse(Names, ["suite"]),
+ Name = case lists:keysearch(name, 1, NameList) of
+ {value,{name,N}} -> N;
+ false -> hd(NameList)
+ end,
+ {lists:reverse(SpecList), Name, Param, Trc, Cov, TCCB}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% cast_to_list(X) -> string()
+%% X = list() | atom() | void()
+%% Returns a string representation of whatever was input
+
+cast_to_list(X) when is_list(X) -> X;
+cast_to_list(X) when is_atom(X) -> atom_to_list(X);
+cast_to_list(X) -> lists:flatten(io_lib:format("~w", [X])).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% START INTERFACE
+
+%% Kept for backwards compatibility
+start(_) ->
+ start().
+start_link(_) ->
+ start_link().
+
+
+start() ->
+ case gen_server:start({local,?MODULE}, ?MODULE, [], []) of
+ {ok, Pid} ->
+ {ok, Pid};
+ Other ->
+ Other
+ end.
+
+start_link() ->
+ case gen_server:start_link({local,?MODULE}, ?MODULE, [], []) of
+ {ok, Pid} ->
+ {ok, Pid};
+ Other ->
+ Other
+ end.
+
+run_test(CommandLine) ->
+ process_flag(trap_exit,true),
+ {SpecList,Name,Param,Trc,Cov,TCCB} = parse_cmd_line(CommandLine),
+ {ok,_TSPid} = start_link(Param),
+ case Trc of
+ false -> ok;
+ File -> trc(File)
+ end,
+ case Cov of
+ false -> ok;
+ {{App,CoverFile},Analyse} -> cover(App, maybe_file(CoverFile), Analyse)
+ end,
+ testcase_callback(TCCB),
+ add_job(Name, {command_line,SpecList}),
+
+ wait_finish().
+
+%% Converted CoverFile to a string unless it is 'none'
+maybe_file(none) ->
+ none;
+maybe_file(CoverFile) ->
+ atom_to_list(CoverFile).
+
+idle_notify(Fun) ->
+ {ok, Pid} = controller_call({idle_notify,Fun}),
+ Pid.
+
+start_get_totals(Fun) ->
+ {ok, Pid} = controller_call({start_get_totals,Fun}),
+ Pid.
+
+stop_get_totals() ->
+ ok = controller_call(stop_get_totals),
+ ok.
+
+wait_finish() ->
+ OldTrap = process_flag(trap_exit, true),
+ {ok, Pid} = finish(true),
+ link(Pid),
+ receive
+ {'EXIT',Pid,_} ->
+ ok
+ end,
+ process_flag(trap_exit, OldTrap),
+ ok.
+
+abort_current_testcase(Reason) ->
+ controller_call({abort_current_testcase,Reason}).
+
+abort() ->
+ OldTrap = process_flag(trap_exit, true),
+ {ok, Pid} = finish(abort),
+ link(Pid),
+ receive
+ {'EXIT',Pid,_} ->
+ ok
+ end,
+ process_flag(trap_exit, OldTrap),
+ ok.
+
+finish(Abort) ->
+ controller_call({finish,Abort}).
+
+stop() ->
+ controller_call(stop).
+
+jobs() ->
+ controller_call(jobs).
+
+get_levels() ->
+ controller_call(get_levels).
+
+set_levels(Show, Major, Minor) ->
+ controller_call({set_levels,Show,Major,Minor}).
+
+reject_io_reqs(Bool) ->
+ controller_call({reject_io_reqs,Bool}).
+
+multiply_timetraps(N) ->
+ controller_call({multiply_timetraps,N}).
+
+scale_timetraps(Bool) ->
+ controller_call({scale_timetraps,Bool}).
+
+get_timetrap_parameters() ->
+ controller_call(get_timetrap_parameters).
+
+create_priv_dir(Value) ->
+ controller_call({create_priv_dir,Value}).
+
+trc(TraceFile) ->
+ controller_call({trace,TraceFile}, 2*?ACCEPT_TIMEOUT).
+
+stop_trace() ->
+ controller_call(stop_trace).
+
+node_started(Node) ->
+ gen_server:cast(?MODULE, {node_started,Node}).
+
+cover(App, Analyse) when is_atom(App) ->
+ cover(App, none, Analyse);
+cover(CoverFile, Analyse) ->
+ cover(none, CoverFile, Analyse).
+cover(App, CoverFile, Analyse) ->
+ {Excl,Incl,Cross} = read_cover_file(CoverFile),
+ CoverInfo = #cover{app=App,
+ file=CoverFile,
+ excl=Excl,
+ incl=Incl,
+ cross=Cross,
+ level=Analyse},
+ controller_call({cover,CoverInfo}).
+
+cover(CoverInfo) ->
+ controller_call({cover,CoverInfo}).
+
+cover_compile(App,File,Excl,Incl,Cross,Analyse,Stop) ->
+ cover_compile(#cover{app=App,
+ file=File,
+ excl=Excl,
+ incl=Incl,
+ cross=Cross,
+ level=Analyse,
+ stop=Stop}).
+
+testcase_callback(ModFunc) ->
+ controller_call({testcase_callback,ModFunc}).
+
+set_random_seed(Seed) ->
+ controller_call({set_random_seed,Seed}).
+
+kill_slavenodes() ->
+ controller_call(kill_slavenodes).
+
+get_hosts() ->
+ get(test_server_hosts).
+
+%%--------------------------------------------------------------------
+
+add_job(Name, TopCase) ->
+ add_job(Name, TopCase, []).
+
+add_job(Name, TopCase, Skip) ->
+ SuiteName =
+ case Name of
+ "." -> "current_dir";
+ ".." -> "parent_dir";
+ Other -> Other
+ end,
+ Dir = filename:absname(SuiteName),
+ controller_call({add_job,Dir,SuiteName,TopCase,Skip}).
+
+controller_call(Arg) ->
+ case catch gen_server:call(?MODULE, Arg, infinity) of
+ {'EXIT',{{badarg,_},{gen_server,call,_}}} ->
+ exit(test_server_ctrl_not_running);
+ {'EXIT',Reason} ->
+ exit(Reason);
+ Other ->
+ Other
+ end.
+controller_call(Arg, Timeout) ->
+ case catch gen_server:call(?MODULE, Arg, Timeout) of
+ {'EXIT',{{badarg,_},{gen_server,call,_}}} ->
+ exit(test_server_ctrl_not_running);
+ {'EXIT',Reason} ->
+ exit(Reason);
+ Other ->
+ Other
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% init([])
+%%
+%% init() is the init function of the test_server's gen_server.
+%%
+init([]) ->
+ case os:getenv("TEST_SERVER_CALL_TRACE") of
+ false ->
+ ok;
+ "" ->
+ ok;
+ TraceSpec ->
+ test_server_sup:call_trace(TraceSpec)
+ end,
+ process_flag(trap_exit, true),
+ %% copy format_exception setting from init arg to application environment
+ case init:get_argument(test_server_format_exception) of
+ {ok,[[TSFE]]} ->
+ application:set_env(test_server, format_exception, list_to_atom(TSFE));
+ _ ->
+ ok
+ end,
+ test_server_sup:cleanup_crash_dumps(),
+ test_server_sup:util_start(),
+ State = #state{jobs=[],finish=false},
+ TI0 = test_server:init_target_info(),
+ TargetHost = test_server_sup:hoststr(),
+ TI = TI0#target_info{host=TargetHost,
+ naming=naming(),
+ master=TargetHost},
+ ets:new(slave_tab, [named_table,set,public,{keypos,2}]),
+ set_hosts([TI#target_info.host]),
+ {ok,State#state{target_info=TI}}.
+
+naming() ->
+ case lists:member($., test_server_sup:hoststr()) of
+ true -> "-name";
+ false -> "-sname"
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(kill_slavenodes, From, State) -> ok
+%%
+%% Kill all slave nodes that remain after a test case
+%% is completed.
+%%
+handle_call(kill_slavenodes, _From, State) ->
+ Nodes = test_server_node:kill_nodes(),
+ {reply, Nodes, State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({set_hosts, HostList}, From, State) -> ok
+%%
+%% Set the global hostlist.
+%%
+handle_call({set_hosts, Hosts}, _From, State) ->
+ set_hosts(Hosts),
+ {reply, ok, State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(get_hosts, From, State) -> [Hosts]
+%%
+%% Returns the lists of hosts that the test server
+%% can use for slave nodes. This is primarily used
+%% for nodename generation.
+%%
+handle_call(get_hosts, _From, State) ->
+ Hosts = get_hosts(),
+ {reply, Hosts, State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({add_job,Dir,Name,TopCase,Skip}, _, State) ->
+%% ok | {error,Reason}
+%%
+%% Dir = string()
+%% Name = string()
+%% TopCase = term()
+%% Skip = [SkipItem]
+%% SkipItem = {Mod,Comment} | {Mod,Case,Comment} | {Mod,Cases,Comment}
+%% Mod = Case = atom()
+%% Comment = string()
+%% Cases = [Case]
+%%
+%% Adds a job to the job queue. The name of the job is Name. A log directory
+%% will be created in Dir/Name.logs. TopCase may be anything that
+%% collect_cases/3 accepts, plus the following:
+%%
+%% {spec,SpecName} executes the named test suite specification file. Commands
+%% in the file should be in the format accepted by do_spec_list/1.
+%%
+%% {command_line,SpecList} executes the list of specification instructions
+%% supplied, which should be in the format accepted by do_spec_list/1.
+
+handle_call({add_job,Dir,Name,TopCase,Skip}, _From, State) ->
+ LogDir = Dir ++ ?logdir_ext,
+ ExtraTools =
+ case State#state.cover of
+ false -> [];
+ CoverInfo -> [{cover,CoverInfo}]
+ end,
+ ExtraTools1 =
+ case State#state.random_seed of
+ undefined -> ExtraTools;
+ Seed -> [{random_seed,Seed}|ExtraTools]
+ end,
+ case lists:keysearch(Name, 1, State#state.jobs) of
+ false ->
+ case TopCase of
+ {spec,SpecName} ->
+ Pid = spawn_tester(
+ ?MODULE, do_spec,
+ [SpecName,{State#state.multiply_timetraps,
+ State#state.scale_timetraps}],
+ LogDir, Name, State#state.levels,
+ State#state.reject_io_reqs,
+ State#state.create_priv_dir,
+ State#state.testcase_callback, ExtraTools1),
+ NewJobs = [{Name,Pid}|State#state.jobs],
+ {reply, ok, State#state{jobs=NewJobs}};
+ {command_line,SpecList} ->
+ Pid = spawn_tester(
+ ?MODULE, do_spec_list,
+ [SpecList,{State#state.multiply_timetraps,
+ State#state.scale_timetraps}],
+ LogDir, Name, State#state.levels,
+ State#state.reject_io_reqs,
+ State#state.create_priv_dir,
+ State#state.testcase_callback, ExtraTools1),
+ NewJobs = [{Name,Pid}|State#state.jobs],
+ {reply, ok, State#state{jobs=NewJobs}};
+ TopCase ->
+ case State#state.get_totals of
+ {CliPid,Fun} ->
+ Result = count_test_cases(TopCase, Skip),
+ Fun(CliPid, Result),
+ {reply, ok, State};
+ _ ->
+ Cfg = make_config([]),
+ Pid = spawn_tester(
+ ?MODULE, do_test_cases,
+ [TopCase,Skip,Cfg,
+ {State#state.multiply_timetraps,
+ State#state.scale_timetraps}],
+ LogDir, Name, State#state.levels,
+ State#state.reject_io_reqs,
+ State#state.create_priv_dir,
+ State#state.testcase_callback, ExtraTools1),
+ NewJobs = [{Name,Pid}|State#state.jobs],
+ {reply, ok, State#state{jobs=NewJobs}}
+ end
+ end;
+ _ ->
+ {reply,{error,name_already_in_use},State}
+ end;
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(jobs, _, State) -> JobList
+%% JobList = [{Name,Pid}, ...]
+%% Name = string()
+%% Pid = pid()
+%%
+%% Return the list of current jobs.
+
+handle_call(jobs, _From, State) ->
+ {reply,State#state.jobs,State};
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({abort_current_testcase,Reason}, _, State) -> Result
+%% Reason = term()
+%% Result = ok | {error,no_testcase_running}
+%%
+%% Attempts to abort the test case that's currently running.
+
+handle_call({abort_current_testcase,Reason}, _From, State) ->
+ case State#state.jobs of
+ [{_,Pid}|_] ->
+ Pid ! {abort_current_testcase,Reason,self()},
+ receive
+ {Pid,abort_current_testcase,Result} ->
+ {reply, Result, State}
+ after 10000 ->
+ {reply, {error,no_testcase_running}, State}
+ end;
+ _ ->
+ {reply, {error,no_testcase_running}, State}
+ end;
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({finish,Fini}, _, State) -> {ok,Pid}
+%% Fini = true | abort
+%%
+%% Tells the test_server to stop as soon as there are no test suites
+%% running. Immediately if none are running. Abort is handled as soon
+%% as current test finishes.
+
+handle_call({finish,Fini}, _From, State) ->
+ case State#state.jobs of
+ [] ->
+ lists:foreach(fun({Cli,Fun}) -> Fun(Cli,Fini) end,
+ State#state.idle_notify),
+ State2 = State#state{finish=false},
+ {stop,shutdown,{ok,self()}, State2};
+ _SomeJobs ->
+ State2 = State#state{finish=Fini},
+ {reply, {ok,self()}, State2}
+ end;
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({idle_notify,Fun}, From, State) -> {ok,Pid}
+%%
+%% Lets a test client subscribe to receive a notification when the
+%% test server becomes idle (can be used to syncronize jobs).
+%% test_server calls Fun(From) when idle.
+
+handle_call({idle_notify,Fun}, {Cli,_Ref}, State) ->
+ case State#state.jobs of
+ [] -> self() ! report_idle;
+ _ -> ok
+ end,
+ Subscribed = State#state.idle_notify,
+ {reply, {ok,self()}, State#state{idle_notify=[{Cli,Fun}|Subscribed]}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(start_get_totals, From, State) -> {ok,Pid}
+%%
+%% Switch on the mode where the test server will only
+%% report back the number of tests it would execute
+%% given some subsequent jobs.
+
+handle_call({start_get_totals,Fun}, {Cli,_Ref}, State) ->
+ {reply, {ok,self()}, State#state{get_totals={Cli,Fun}}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(stop_get_totals, From, State) -> ok
+%%
+%% Lets a test client subscribe to receive a notification when the
+%% test server becomes idle (can be used to syncronize jobs).
+%% test_server calls Fun(From) when idle.
+
+handle_call(stop_get_totals, {_Cli,_Ref}, State) ->
+ {reply, ok, State#state{get_totals=false}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(get_levels, _, State) -> {Show,Major,Minor}
+%% Show = integer()
+%% Major = integer()
+%% Minor = integer()
+%%
+%% Returns a 3-tuple with the logging thresholds.
+%% All output and information from a test suite is tagged with a detail
+%% level. Lower values are more "important". Text that is output using
+%% io:format or similar is automatically tagged with detail level 50.
+%%
+%% All output with detail level:
+%% less or equal to Show is displayed on the screen (default 1)
+%% less or equal to Major is logged in the major log file (default 19)
+%% greater or equal to Minor is logged in the minor log files (default 10)
+
+handle_call(get_levels, _From, State) ->
+ {reply,State#state.levels,State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({set_levels,Show,Major,Minor}, _, State) -> ok
+%% Show = integer()
+%% Major = integer()
+%% Minor = integer()
+%%
+%% Sets the logging thresholds, see handle_call(get_levels,...) above.
+
+handle_call({set_levels,Show,Major,Minor}, _From, State) ->
+ {reply,ok,State#state{levels={Show,Major,Minor}}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({reject_io_reqs,Bool}, _, State) -> ok
+%% Bool = bool()
+%%
+%% May be used to switch off stdout printouts to the minor log file
+
+handle_call({reject_io_reqs,Bool}, _From, State) ->
+ {reply,ok,State#state{reject_io_reqs=Bool}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({multiply_timetraps,N}, _, State) -> ok
+%% N = integer() | infinity
+%%
+%% Multiplies all timetraps set by test cases with N
+
+handle_call({multiply_timetraps,N}, _From, State) ->
+ {reply,ok,State#state{multiply_timetraps=N}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({scale_timetraps,Bool}, _, State) -> ok
+%% Bool = true | false
+%%
+%% Specifies if test_server should scale the timetrap value
+%% automatically if e.g. cover is running.
+
+handle_call({scale_timetraps,Bool}, _From, State) ->
+ {reply,ok,State#state{scale_timetraps=Bool}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(get_timetrap_parameters, _, State) -> {Multiplier,Scale}
+%% Multiplier = integer() | infinity
+%% Scale = true | false
+%%
+%% Returns the parameter values that affect timetraps.
+
+handle_call(get_timetrap_parameters, _From, State) ->
+ {reply,{State#state.multiply_timetraps,State#state.scale_timetraps},State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({trace,TraceFile}, _, State) -> ok | {error,Reason}
+%%
+%% Starts a separate node (trace control node) which
+%% starts tracing on target and all slave nodes
+%%
+%% TraceFile is a text file with elements of type
+%% {Trace,Mod,TracePattern}.
+%% {Trace,Mod,Func,TracePattern}.
+%% {Trace,Mod,Func,Arity,TracePattern}.
+%%
+%% Trace = tp | tpl; local or global call trace
+%% Mod,Func = atom(), Arity=integer(); defines what to trace
+%% TracePattern = [] | match_spec()
+%%
+%% The 'call' trace flag is set on all processes, and then
+%% the given trace patterns are set.
+
+handle_call({trace,TraceFile}, _From, State=#state{trc=false}) ->
+ TI = State#state.target_info,
+ case test_server_node:start_tracer_node(TraceFile, TI) of
+ {ok,Tracer} -> {reply,ok,State#state{trc=Tracer}};
+ Error -> {reply,Error,State}
+ end;
+handle_call({trace,_TraceFile}, _From, State) ->
+ {reply,{error,already_tracing},State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(stop_trace, _, State) -> ok | {error,Reason}
+%%
+%% Stops tracing on target and all slave nodes and
+%% terminates trace control node
+
+handle_call(stop_trace, _From, State=#state{trc=false}) ->
+ {reply,{error,not_tracing},State};
+handle_call(stop_trace, _From, State) ->
+ R = test_server_node:stop_tracer_node(State#state.trc),
+ {reply,R,State#state{trc=false}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({cover,CoverInfo}, _, State) -> ok | {error,Reason}
+%%
+%% Set specification of cover analysis to be used when running tests
+%% (see start_extra_tools/1 and stop_extra_tools/1)
+
+handle_call({cover,CoverInfo}, _From, State) ->
+ {reply,ok,State#state{cover=CoverInfo}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({create_priv_dir,Value}, _, State) -> ok | {error,Reason}
+%%
+%% Set create_priv_dir to either auto_per_run (create common priv dir once
+%% per test run), manual_per_tc (the priv dir name will be unique for each
+%% test case, but the user has to call test_server:make_priv_dir/0 to create
+%% it), or auto_per_tc (unique priv dir created automatically for each test
+%% case).
+
+handle_call({create_priv_dir,Value}, _From, State) ->
+ {reply,ok,State#state{create_priv_dir=Value}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({testcase_callback,{Mod,Func}}, _, State) -> ok | {error,Reason}
+%%
+%% Add a callback function that will be called before and after every
+%% test case (on the test case process):
+%%
+%% Mod:Func(Suite,TestCase,InitOrEnd,Config)
+%%
+%% InitOrEnd = init | 'end'.
+
+handle_call({testcase_callback,ModFunc}, _From, State) ->
+ case ModFunc of
+ {Mod,Func} ->
+ case code:is_loaded(Mod) of
+ {file,_} ->
+ ok;
+ false ->
+ code:load_file(Mod)
+ end,
+ case erlang:function_exported(Mod,Func,4) of
+ true ->
+ ok;
+ false ->
+ io:format(user,
+ "WARNING! Callback function ~w:~w/4 undefined.~n~n",
+ [Mod,Func])
+ end;
+ _ ->
+ ok
+ end,
+ {reply,ok,State#state{testcase_callback=ModFunc}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({set_random_seed,Seed}, _, State) -> ok | {error,Reason}
+%%
+%% Let operator set a random seed value to be used e.g. for shuffling
+%% test cases.
+
+handle_call({set_random_seed,Seed}, _From, State) ->
+ {reply,ok,State#state{random_seed=Seed}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(stop, _, State) -> ok
+%%
+%% Stops the test server immediately.
+%% Some cleanup is done by terminate/2
+
+handle_call(stop, _From, State) ->
+ {stop, shutdown, ok, State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call(get_target_info, _, State) -> TI
+%%
+%% TI = #target_info{}
+%%
+%% Returns information about target
+
+handle_call(get_target_info, _From, State) ->
+ {reply, State#state.target_info, State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({start_node,Name,Type,Options}, _, State) ->
+%% ok | {error,Reason}
+%%
+%% Starts a new node (slave or peer)
+
+handle_call({start_node, Name, Type, Options}, From, State) ->
+ %% test_server_ctrl does gen_server:reply/2 explicitly
+ test_server_node:start_node(Name, Type, Options, From,
+ State#state.target_info),
+ {noreply,State};
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({wait_for_node,Node}, _, State) -> ok
+%%
+%% Waits for a new node to take contact. Used if
+%% node is started with option {wait,false}
+
+handle_call({wait_for_node, Node}, From, State) ->
+ NewWaitList =
+ case ets:lookup(slave_tab,Node) of
+ [] ->
+ [{Node,From}|State#state.wait_for_node];
+ _ ->
+ gen_server:reply(From,ok),
+ State#state.wait_for_node
+ end,
+ {noreply,State#state{wait_for_node=NewWaitList}};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({stop_node,Name}, _, State) -> ok | {error,Reason}
+%%
+%% Stops a slave or peer node. This is actually only some cleanup
+%% - the node is really stopped by test_server when this returns.
+
+handle_call({stop_node, Name}, _From, State) ->
+ R = test_server_node:stop_node(Name),
+ {reply, R, State};
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_call({is_release_available,Name}, _, State) -> ok | {error,Reason}
+%%
+%% Tests if the release is available.
+
+handle_call({is_release_available, Release}, _From, State) ->
+ R = test_server_node:is_release_available(Release),
+ {reply, R, State}.
+
+%%--------------------------------------------------------------------
+set_hosts(Hosts) ->
+ put(test_server_hosts, Hosts).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_cast({node_started,Name}, _, State)
+%%
+%% Called by test_server_node when a slave/peer node is fully started.
+
+handle_cast({node_started,Node}, State) ->
+ case State#state.trc of
+ false -> ok;
+ Trc -> test_server_node:trace_nodes(Trc, [Node])
+ end,
+ NewWaitList =
+ case lists:keysearch(Node,1,State#state.wait_for_node) of
+ {value,{Node,From}} ->
+ gen_server:reply(From, ok),
+ lists:keydelete(Node, 1, State#state.wait_for_node);
+ false ->
+ State#state.wait_for_node
+ end,
+ {noreply, State#state{wait_for_node=NewWaitList}}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_info({'EXIT',Pid,Reason}, State)
+%% Pid = pid()
+%% Reason = term()
+%%
+%% Handles exit messages from linked processes. Only test suites are
+%% expected to be linked. When a test suite terminates, it is removed
+%% from the job queue.
+
+handle_info(report_idle, State) ->
+ Finish = State#state.finish,
+ lists:foreach(fun({Cli,Fun}) -> Fun(Cli,Finish) end,
+ State#state.idle_notify),
+ {noreply,State#state{idle_notify=[]}};
+
+
+handle_info({'EXIT',Pid,Reason}, State) ->
+ case lists:keysearch(Pid,2,State#state.jobs) of
+ false ->
+ %% not our problem
+ {noreply,State};
+ {value,{Name,_}} ->
+ NewJobs = lists:keydelete(Pid, 2, State#state.jobs),
+ case Reason of
+ normal ->
+ fine;
+ killed ->
+ io:format("Suite ~ts was killed\n", [Name]);
+ _Other ->
+ io:format("Suite ~ts was killed with reason ~p\n",
+ [Name,Reason])
+ end,
+ State2 = State#state{jobs=NewJobs},
+ Finish = State2#state.finish,
+ case NewJobs of
+ [] ->
+ lists:foreach(fun({Cli,Fun}) -> Fun(Cli,Finish) end,
+ State2#state.idle_notify),
+ case Finish of
+ false ->
+ {noreply,State2#state{idle_notify=[]}};
+ _ -> % true | abort
+ %% test_server:finish() has been called and
+ %% there are no jobs in the job queue =>
+ %% stop the test_server_ctrl
+ {stop,shutdown,State2#state{finish=false}}
+ end;
+ _ -> % pending jobs
+ case Finish of
+ abort -> % abort test now!
+ lists:foreach(fun({Cli,Fun}) -> Fun(Cli,Finish) end,
+ State2#state.idle_notify),
+ {stop,shutdown,State2#state{finish=false}};
+ _ -> % true | false
+ {noreply, State2}
+ end
+ end
+ end;
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_info({tcp_closed,Sock}, State)
+%%
+%% A Socket was closed. This indicates that a node died.
+%% This can be
+%% *Slave or peer node started by a test suite
+%% *Trace controll node
+
+handle_info({tcp_closed,Sock}, State=#state{trc=Sock}) ->
+ %% Tracer node died - can't really do anything
+ %%! Maybe print something???
+ {noreply,State#state{trc=false}};
+handle_info({tcp_closed,Sock}, State) ->
+ test_server_node:nodedown(Sock),
+ {noreply,State};
+handle_info(_, State) ->
+ %% dummy; accept all, do nothing.
+ {noreply, State}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% terminate(Reason, State) -> ok
+%% Reason = term()
+%%
+%% Cleans up when the test_server is terminating. Kills the running
+%% test suites (if any) and any possible remainting slave node
+
+terminate(_Reason, State) ->
+ test_server_sup:util_stop(),
+ case State#state.trc of
+ false -> ok;
+ Sock -> test_server_node:stop_tracer_node(Sock)
+ end,
+ kill_all_jobs(State#state.jobs),
+ test_server_node:kill_nodes(),
+ ok.
+
+kill_all_jobs([{_Name,JobPid}|Jobs]) ->
+ exit(JobPid, kill),
+ kill_all_jobs(Jobs);
+kill_all_jobs([]) ->
+ ok.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%----------------------- INTERNAL FUNCTIONS -----------------------%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% spawn_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs,
+%% CreatePrivDir, TestCaseCallback, ExtraTools) -> Pid
+%% Mod = atom()
+%% Func = atom()
+%% Args = [term(),...]
+%% Dir = string()
+%% Name = string()
+%% Levels = {integer(),integer(),integer()}
+%% RejectIoReqs = bool()
+%% CreatePrivDir = auto_per_run | manual_per_tc | auto_per_tc
+%% TestCaseCallback = {CBMod,CBFunc} | undefined
+%% ExtraTools = [ExtraTool,...]
+%% ExtraTool = CoverInfo | TraceInfo | RandomSeed
+%%
+%% Spawns a test suite execute-process, just an ordinary spawn, except
+%% that it will set a lot of dictionary information before starting the
+%% named function. Also, the execution is timed and protected by a catch.
+%% When the named function is done executing, a summary of the results
+%% is printed to the log files.
+
+spawn_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs,
+ CreatePrivDir, TCCallback, ExtraTools) ->
+ spawn_link(fun() ->
+ init_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs,
+ CreatePrivDir, TCCallback, ExtraTools)
+ end).
+
+init_tester(Mod, Func, Args, Dir, Name, {_,_,MinLev}=Levels,
+ RejectIoReqs, CreatePrivDir, TCCallback, ExtraTools) ->
+ process_flag(trap_exit, true),
+ test_server_io:start_link(),
+ put(test_server_name, Name),
+ put(test_server_dir, Dir),
+ put(test_server_total_time, 0),
+ put(test_server_ok, 0),
+ put(test_server_failed, 0),
+ put(test_server_skipped, {0,0}),
+ put(test_server_minor_level, MinLev),
+ put(test_server_create_priv_dir, CreatePrivDir),
+ put(test_server_random_seed, proplists:get_value(random_seed, ExtraTools)),
+ put(test_server_testcase_callback, TCCallback),
+ case os:getenv("TEST_SERVER_FRAMEWORK") of
+ FW when FW =:= false; FW =:= "undefined" ->
+ put(test_server_framework, '$none');
+ FW ->
+ put(test_server_framework_name, list_to_atom(FW)),
+ case os:getenv("TEST_SERVER_FRAMEWORK_NAME") of
+ FWName when FWName =:= false; FWName =:= "undefined" ->
+ put(test_server_framework_name, '$none');
+ FWName ->
+ put(test_server_framework_name, list_to_atom(FWName))
+ end
+ end,
+
+ %% before first print, read and set logging options
+ LogOpts = test_server_sup:framework_call(get_logopts, [], []),
+ put(test_server_logopts, LogOpts),
+
+ StartedExtraTools = start_extra_tools(ExtraTools),
+
+ test_server_io:set_job_name(Name),
+ test_server_io:set_gl_props([{levels,Levels},
+ {auto_nl,not lists:member(no_nl, LogOpts)},
+ {reject_io_reqs,RejectIoReqs}]),
+ group_leader(test_server_io:get_gl(true), self()),
+ {TimeMy,Result} = ts_tc(Mod, Func, Args),
+ set_io_buffering(undefined),
+ test_server_io:set_job_name(undefined),
+ catch stop_extra_tools(StartedExtraTools),
+ case Result of
+ {'EXIT',test_suites_done} ->
+ ok;
+ {'EXIT',_Pid,Reason} ->
+ print(1, "EXIT, reason ~p", [Reason]);
+ {'EXIT',Reason} ->
+ report_severe_error(Reason),
+ print(1, "EXIT, reason ~p", [Reason])
+ end,
+ Time = TimeMy/1000000,
+ SuccessStr =
+ case get(test_server_failed) of
+ 0 -> "Ok";
+ _ -> "FAILED"
+ end,
+ {SkippedN,SkipStr} =
+ case get(test_server_skipped) of
+ {0,0} ->
+ {0,""};
+ {USkipped,ASkipped} ->
+ Skipped = USkipped+ASkipped,
+ {Skipped,io_lib:format(", ~w Skipped", [Skipped])}
+ end,
+ OkN = get(test_server_ok),
+ FailedN = get(test_server_failed),
+ print(html,"\n</tbody>\n<tfoot>\n"
+ "<tr><td></td><td><b>TOTAL</b></td><td></td><td></td><td></td>"
+ "<td>~.3fs</td><td><b>~ts</b></td><td>~w Ok, ~w Failed~ts of ~w</td></tr>\n"
+ "</tfoot>\n",
+ [Time,SuccessStr,OkN,FailedN,SkipStr,OkN+FailedN+SkippedN]),
+
+ test_server_io:stop([major,html,unexpected_io]),
+ {UnexpectedIoName,UnexpectedIoFooter} = get(test_server_unexpected_footer),
+ {ok,UnexpectedIoFd} = open_html_file(UnexpectedIoName, [append]),
+ io:put_chars(UnexpectedIoFd, "\n</pre>\n"++UnexpectedIoFooter),
+ file:close(UnexpectedIoFd),
+ ok.
+
+report_severe_error(Reason) ->
+ test_server_sup:framework_call(report, [severe_error,Reason]).
+
+ts_tc(M,F,A) ->
+ Before = erlang:monotonic_time(),
+ Result = (catch apply(M, F, A)),
+ After = erlang:monotonic_time(),
+ Elapsed = erlang:convert_time_unit(After-Before,
+ native,
+ micro_seconds),
+ {Elapsed, Result}.
+
+start_extra_tools(ExtraTools) ->
+ start_extra_tools(ExtraTools, []).
+start_extra_tools([{cover,CoverInfo} | ExtraTools], Started) ->
+ case start_cover(CoverInfo) of
+ {ok,NewCoverInfo} ->
+ start_extra_tools(ExtraTools,[{cover,NewCoverInfo}|Started]);
+ {error,_} ->
+ start_extra_tools(ExtraTools, Started)
+ end;
+start_extra_tools([_ | ExtraTools], Started) ->
+ start_extra_tools(ExtraTools, Started);
+start_extra_tools([], Started) ->
+ Started.
+
+stop_extra_tools(ExtraTools) ->
+ TestDir = get(test_server_log_dir_base),
+ case lists:keymember(cover, 1, ExtraTools) of
+ false ->
+ write_default_coverlog(TestDir);
+ true ->
+ ok
+ end,
+ stop_extra_tools(ExtraTools, TestDir).
+
+stop_extra_tools([{cover,CoverInfo}|ExtraTools], TestDir) ->
+ stop_cover(CoverInfo,TestDir),
+ stop_extra_tools(ExtraTools, TestDir);
+%%stop_extra_tools([_ | ExtraTools], TestDir) ->
+%% stop_extra_tools(ExtraTools, TestDir);
+stop_extra_tools([], _) ->
+ ok.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% do_spec(SpecName, TimetrapSpec) -> {error,Reason} | exit(Result)
+%% SpecName = string()
+%% TimetrapSpec = MultiplyTimetrap | {MultiplyTimetrap,ScaleTimetrap}
+%% MultiplyTimetrap = integer() | infinity
+%% ScaleTimetrap = bool()
+%%
+%% Reads the named test suite specification file, and executes it.
+%%
+%% This function is meant to be called by a process created by
+%% spawn_tester/10, which sets up some necessary dictionary values.
+
+do_spec(SpecName, TimetrapSpec) when is_list(SpecName) ->
+ case file:consult(SpecName) of
+ {ok,TermList} ->
+ do_spec_list(TermList,TimetrapSpec);
+ {error,Reason} ->
+ io:format("Can't open ~ts: ~p\n", [SpecName,Reason]),
+ {error,{cant_open_spec,Reason}}
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% do_spec_list(TermList, TimetrapSpec) -> exit(Result)
+%% TermList = [term()|...]
+%% TimetrapSpec = MultiplyTimetrap | {MultiplyTimetrap,ScaleTimetrap}
+%% MultiplyTimetrap = integer() | infinity
+%% ScaleTimetrap = bool()
+%%
+%% Executes a list of test suite specification commands. The following
+%% commands are available, and may occur zero or more times (if several,
+%% the contents is appended):
+%%
+%% {topcase,TopCase} Specifies top level test goals. TopCase has the syntax
+%% specified by collect_cases/3.
+%%
+%% {skip,Skip} Specifies test cases to skip, and lists requirements that
+%% cannot be granted during the test run. Skip has the syntax specified
+%% by collect_cases/3.
+%%
+%% {nodes,Nodes} Lists node names avaliable to the test suites. Nodes have
+%% the syntax specified by collect_cases/3.
+%%
+%% {require_nodenames, Num} Specifies how many nodenames the test suite will
+%% need. Theese are automaticly generated and inserted into the Config by the
+%% test_server. The caller may specify other hosts to run theese nodes by
+%% using the {hosts, Hosts} option. If there are no hosts specified, all
+%% nodenames will be generated from the local host.
+%%
+%% {hosts, Hosts} Specifies a list of available hosts on which to start
+%% slave nodes. It is used when the {remote, true} option is given to the
+%% test_server:start_node/3 function. Also, if {require_nodenames, Num} is
+%% contained in the TermList, the generated nodenames will be spread over
+%% all hosts given in this Hosts list. The hostnames are given as atoms or
+%% strings.
+%%
+%% {diskless, true}</c></tag> is kept for backwards compatiblilty and
+%% should not be used. Use a configuration test case instead.
+%%
+%% This function is meant to be called by a process created by
+%% spawn_tester/10, which sets up some necessary dictionary values.
+
+do_spec_list(TermList0, TimetrapSpec) ->
+ Nodes = [],
+ TermList =
+ case lists:keysearch(hosts, 1, TermList0) of
+ {value, {hosts, Hosts0}} ->
+ Hosts = lists:map(fun(H) -> cast_to_list(H) end, Hosts0),
+ controller_call({set_hosts, Hosts}),
+ lists:keydelete(hosts, 1, TermList0);
+ _ ->
+ TermList0
+ end,
+ DefaultConfig = make_config([{nodes,Nodes}]),
+ {TopCases,SkipList,Config} = do_spec_terms(TermList, [], [], DefaultConfig),
+ do_test_cases(TopCases, SkipList, Config, TimetrapSpec).
+
+do_spec_terms([], TopCases, SkipList, Config) ->
+ {TopCases,SkipList,Config};
+do_spec_terms([{topcase,TopCase}|Terms], TopCases, SkipList, Config) ->
+ do_spec_terms(Terms,[TopCase|TopCases], SkipList, Config);
+do_spec_terms([{skip,Skip}|Terms], TopCases, SkipList, Config) ->
+ do_spec_terms(Terms, TopCases, [Skip|SkipList], Config);
+do_spec_terms([{nodes,Nodes}|Terms], TopCases, SkipList, Config) ->
+ do_spec_terms(Terms, TopCases, SkipList,
+ update_config(Config, {nodes,Nodes}));
+do_spec_terms([{diskless,How}|Terms], TopCases, SkipList, Config) ->
+ do_spec_terms(Terms, TopCases, SkipList,
+ update_config(Config, {diskless,How}));
+do_spec_terms([{config,MoreConfig}|Terms], TopCases, SkipList, Config) ->
+ do_spec_terms(Terms, TopCases, SkipList, Config++MoreConfig);
+do_spec_terms([{default_timeout,Tmo}|Terms], TopCases, SkipList, Config) ->
+ do_spec_terms(Terms, TopCases, SkipList,
+ update_config(Config, {default_timeout,Tmo}));
+
+do_spec_terms([{require_nodenames,NumNames}|Terms], TopCases, SkipList, Config) ->
+ NodeNames0=generate_nodenames(NumNames),
+ NodeNames=lists:delete([], NodeNames0),
+ do_spec_terms(Terms, TopCases, SkipList,
+ update_config(Config, {nodenames,NodeNames}));
+do_spec_terms([Other|Terms], TopCases, SkipList, Config) ->
+ io:format("** WARNING: Spec file contains unknown directive ~p\n",
+ [Other]),
+ do_spec_terms(Terms, TopCases, SkipList, Config).
+
+
+
+generate_nodenames(Num) ->
+ Hosts = case controller_call(get_hosts) of
+ [] ->
+ TI = controller_call(get_target_info),
+ [TI#target_info.host];
+ List ->
+ List
+ end,
+ generate_nodenames2(Num, Hosts, []).
+
+generate_nodenames2(0, _Hosts, Acc) ->
+ Acc;
+generate_nodenames2(N, Hosts, Acc) ->
+ Host=lists:nth((N rem (length(Hosts)))+1, Hosts),
+ Name=list_to_atom(temp_nodename("nod", []) ++ "@" ++ Host),
+ generate_nodenames2(N-1, Hosts, [Name|Acc]).
+
+temp_nodename([], Acc) ->
+ lists:flatten(Acc);
+temp_nodename([Chr|Base], Acc) ->
+ {A,B,C} = ?now,
+ New = [Chr | integer_to_list(Chr bxor A bxor B+A bxor C+B)],
+ temp_nodename(Base, [New|Acc]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% count_test_cases(TopCases, SkipCases) -> {Suites,NoOfCases} | error
+%% TopCases = term() (See collect_cases/3)
+%% SkipCases = term() (See collect_cases/3)
+%% Suites = list()
+%% NoOfCases = integer() | unknown
+%%
+%% Counts the test cases that are about to run and returns that number.
+%% If there's a conf group in TestSpec with a repeat property, the total number
+%% of cases can not be calculated and NoOfCases = unknown.
+count_test_cases(TopCases, SkipCases) when is_list(TopCases) ->
+ case collect_all_cases(TopCases, SkipCases) of
+ {error,_Why} = Error ->
+ Error;
+ TestSpec ->
+ {get_suites(TestSpec, []),
+ case remove_conf(TestSpec) of
+ {repeats,_} ->
+ unknown;
+ TestSpec1 ->
+ length(TestSpec1)
+ end}
+ end;
+
+count_test_cases(TopCase, SkipCases) ->
+ count_test_cases([TopCase], SkipCases).
+
+
+remove_conf(Cases) ->
+ remove_conf(Cases, [], false).
+
+remove_conf([{conf, _Ref, Props, _MF}|Cases], NoConf, Repeats) ->
+ case get_repeat(Props) of
+ undefined ->
+ remove_conf(Cases, NoConf, Repeats);
+ {_RepType,1} ->
+ remove_conf(Cases, NoConf, Repeats);
+ _ ->
+ remove_conf(Cases, NoConf, true)
+ end;
+remove_conf([{make,_Ref,_MF}|Cases], NoConf, Repeats) ->
+ remove_conf(Cases, NoConf, Repeats);
+remove_conf([{skip_case,{{_M,all},_Cmt},_Mode}|Cases], NoConf, Repeats) ->
+ remove_conf(Cases, NoConf, Repeats);
+remove_conf([{skip_case,{Type,_Ref,_MF,_Cmt}}|Cases],
+ NoConf, Repeats) when Type==conf;
+ Type==make ->
+ remove_conf(Cases, NoConf, Repeats);
+remove_conf([{skip_case,{Type,_Ref,_MF,_Cmt},_Mode}|Cases],
+ NoConf, Repeats) when Type==conf;
+ Type==make ->
+ remove_conf(Cases, NoConf, Repeats);
+remove_conf([C={Mod,error_in_suite,_}|Cases], NoConf, Repeats) ->
+ FwMod = get_fw_mod(?MODULE),
+ if Mod == FwMod ->
+ remove_conf(Cases, NoConf, Repeats);
+ true ->
+ remove_conf(Cases, [C|NoConf], Repeats)
+ end;
+remove_conf([C|Cases], NoConf, Repeats) ->
+ remove_conf(Cases, [C|NoConf], Repeats);
+remove_conf([], NoConf, true) ->
+ {repeats,lists:reverse(NoConf)};
+remove_conf([], NoConf, false) ->
+ lists:reverse(NoConf).
+
+get_suites([{skip_case,{{Mod,_F},_Cmt},_Mode}|Tests], Mods) when is_atom(Mod) ->
+ case add_mod(Mod, Mods) of
+ true -> get_suites(Tests, [Mod|Mods]);
+ false -> get_suites(Tests, Mods)
+ end;
+get_suites([{Mod,_Case}|Tests], Mods) when is_atom(Mod) ->
+ case add_mod(Mod, Mods) of
+ true -> get_suites(Tests, [Mod|Mods]);
+ false -> get_suites(Tests, Mods)
+ end;
+get_suites([{Mod,_Func,_Args}|Tests], Mods) when is_atom(Mod) ->
+ case add_mod(Mod, Mods) of
+ true -> get_suites(Tests, [Mod|Mods]);
+ false -> get_suites(Tests, Mods)
+ end;
+get_suites([_|Tests], Mods) ->
+ get_suites(Tests, Mods);
+
+get_suites([], Mods) ->
+ lists:reverse(Mods).
+
+add_mod(Mod, Mods) ->
+ case string:rstr(atom_to_list(Mod), "_SUITE") of
+ 0 -> false;
+ _ -> % test suite
+ case lists:member(Mod, Mods) of
+ true -> false;
+ false -> true
+ end
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% do_test_cases(TopCases, SkipCases, Config, TimetrapSpec) ->
+%% exit(Result)
+%%
+%% TopCases = term() (See collect_cases/3)
+%% SkipCases = term() (See collect_cases/3)
+%% Config = term() (See collect_cases/3)
+%% TimetrapSpec = MultiplyTimetrap | {MultiplyTimetrap,ScaleTimetrap}
+%% MultiplyTimetrap = integer() | infinity
+%% ScaleTimetrap = bool()
+%%
+%% Initializes and starts the test run, for "ordinary" test suites.
+%% Creates log directories and log files, inserts initial timestamps and
+%% configuration information into the log files.
+%%
+%% This function is meant to be called by a process created by
+%% spawn_tester/10, which sets up some necessary dictionary values.
+do_test_cases(TopCases, SkipCases,
+ Config, MultiplyTimetrap) when is_integer(MultiplyTimetrap);
+ MultiplyTimetrap == infinity ->
+ do_test_cases(TopCases, SkipCases, Config, {MultiplyTimetrap,true});
+
+do_test_cases(TopCases, SkipCases,
+ Config, TimetrapData) when is_list(TopCases),
+ is_tuple(TimetrapData) ->
+ {ok,TestDir} = start_log_file(),
+ FwMod = get_fw_mod(?MODULE),
+ case collect_all_cases(TopCases, SkipCases) of
+ {error,Why} ->
+ print(1, "Error starting: ~p", [Why]),
+ exit(test_suites_done);
+ TestSpec0 ->
+ N = case remove_conf(TestSpec0) of
+ {repeats,_} -> unknown;
+ TS -> length(TS)
+ end,
+ put(test_server_cases, N),
+ put(test_server_case_num, 0),
+
+ TestSpec =
+ add_init_and_end_per_suite(TestSpec0, undefined, undefined, FwMod),
+
+ TI = get_target_info(),
+ print(1, "Starting test~ts",
+ [print_if_known(N, {", ~w test cases",[N]},
+ {" (with repeated test cases)",[]})]),
+ Test = get(test_server_name),
+ TestName = if is_list(Test) ->
+ lists:flatten(io_lib:format("~ts", [Test]));
+ true ->
+ lists:flatten(io_lib:format("~tp", [Test]))
+ end,
+ TestDescr = "Test " ++ TestName ++ " results",
+
+ test_server_sup:framework_call(report, [tests_start,{Test,N}]),
+
+ {Header,Footer} =
+ case test_server_sup:framework_call(get_html_wrapper,
+ [TestDescr,true,TestDir,
+ {[],[2,3,4,7,8],[1,6]}], "") of
+ Empty when (Empty == "") ; (element(2,Empty) == "") ->
+ put(basic_html, true),
+ {[html_header(TestDescr),
+ "<h2>Results for test ", TestName, "</h2>\n"],
+ "\n</body>\n</html>\n"};
+ {basic_html,Html0,Html1} ->
+ put(basic_html, true),
+ {Html0++["<h1>Results for <i>",TestName,"</i></h1>\n"],
+ Html1};
+ {xhtml,Html0,Html1} ->
+ put(basic_html, false),
+ {Html0++["<h1>Results for <i>",TestName,"</i></h1>\n"],
+ Html1}
+ end,
+
+ print(html, Header),
+
+ print(html, xhtml("<p>", "<h4>")),
+ print_timestamp(html, "Test started at "),
+ print(html, xhtml("</p>", "</h4>")),
+
+ print(html, xhtml("\n<p><b>Host info:</b><br>\n",
+ "\n<p><b>Host info:</b><br />\n")),
+ print_who(test_server_sup:hoststr(), test_server_sup:get_username()),
+ print(html, xhtml("<br>Used Erlang v~ts in <tt>~ts</tt></p>\n",
+ "<br />Used Erlang v~ts in \"~ts\"</p>\n"),
+ [erlang:system_info(version), code:root_dir()]),
+
+ if FwMod == ?MODULE ->
+ print(html, xhtml("\n<p><b>Target Info:</b><br>\n",
+ "\n<p><b>Target Info:</b><br />\n")),
+ print_who(TI#target_info.host, TI#target_info.username),
+ print(html,xhtml("<br>Used Erlang v~ts in <tt>~ts</tt></p>\n",
+ "<br />Used Erlang v~ts in \"~ts\"</p>\n"),
+ [TI#target_info.version, TI#target_info.root_dir]);
+ true ->
+ case test_server_sup:framework_call(target_info, []) of
+ TargetInfo when is_list(TargetInfo),
+ length(TargetInfo) > 0 ->
+ print(html, xhtml("\n<p><b>Target info:</b><br>\n",
+ "\n<p><b>Target info:</b><br />\n")),
+ print(html, "~ts</p>\n", [TargetInfo]);
+ _ ->
+ ok
+ end
+ end,
+ CoverLog =
+ case get(test_server_cover_log_dir) of
+ undefined ->
+ ?coverlog_name;
+ AbsLogDir ->
+ AbsLog = filename:join(AbsLogDir,?coverlog_name),
+ make_relative(AbsLog, TestDir)
+ end,
+ print(html,
+ "<p><ul>\n"
+ "<li><a href=\"~ts\">Full textual log</a></li>\n"
+ "<li><a href=\"~ts\">Coverage log</a></li>\n"
+ "<li><a href=\"~ts\">Unexpected I/O log</a></li>\n</ul></p>\n",
+ [?suitelog_name,CoverLog,?unexpected_io_log]),
+ print(html,
+ "<p>~ts</p>\n" ++
+ xhtml(["<table bgcolor=\"white\" border=\"3\" cellpadding=\"5\">\n",
+ "<thead>\n"],
+ ["<table id=\"",?sortable_table_name,"\">\n",
+ "<thead>\n"]) ++
+ "<tr><th>Num</th><th>Module</th><th>Group</th>" ++
+ "<th>Case</th><th>Log</th><th>Time</th><th>Result</th>" ++
+ "<th>Comment</th></tr>\n</thead>\n<tbody>\n",
+ [print_if_known(N, {"<i>Executing <b>~w</b> test cases...</i>"
+ ++ xhtml("\n<br>\n", "\n<br />\n"),[N]},
+ {"",[]})]),
+
+ print(major, "=cases ~w", [get(test_server_cases)]),
+ print(major, "=user ~ts", [TI#target_info.username]),
+ print(major, "=host ~ts", [TI#target_info.host]),
+
+ %% If there are no hosts specified,use only the local host
+ case controller_call(get_hosts) of
+ [] ->
+ print(major, "=hosts ~ts", [TI#target_info.host]),
+ controller_call({set_hosts, [TI#target_info.host]});
+ Hosts ->
+ Str = lists:flatten(lists:map(fun(X) -> [X," "] end, Hosts)),
+ print(major, "=hosts ~ts", [Str])
+ end,
+ print(major, "=emulator_vsn ~ts", [TI#target_info.version]),
+ print(major, "=emulator ~ts", [TI#target_info.emulator]),
+ print(major, "=otp_release ~ts", [TI#target_info.otp_release]),
+ print(major, "=started ~s",
+ [lists:flatten(timestamp_get(""))]),
+
+ test_server_io:set_footer(Footer),
+
+ run_test_cases(TestSpec, Config, TimetrapData)
+ end;
+
+do_test_cases(TopCase, SkipCases, Config, TimetrapSpec) ->
+ %% when not list(TopCase)
+ do_test_cases([TopCase], SkipCases, Config, TimetrapSpec).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% start_log_file() -> {ok,TestDirName} | exit({Error,Reason})
+%% Stem = string()
+%%
+%% Creates the log directories, the major log file and the html log file.
+%% The log files are initialized with some header information.
+%%
+%% The name of the log directory will be <Name>.logs/run.<Date>/ where
+%% Name is the test suite name and Date is the current date and time.
+
+start_log_file() ->
+ Dir = get(test_server_dir),
+ case file:make_dir(Dir) of
+ ok ->
+ ok;
+ {error, eexist} ->
+ ok;
+ MkDirError ->
+ log_file_error(MkDirError, Dir)
+ end,
+ TestDir = timestamp_filename_get(filename:join(Dir, "run.")),
+ TestDir1 =
+ case file:make_dir(TestDir) of
+ ok ->
+ TestDir;
+ {error,eexist} ->
+ timer:sleep(1000),
+ %% we need min 1 second between timestamps unfortunately
+ TestDirX = timestamp_filename_get(filename:join(Dir, "run.")),
+ case file:make_dir(TestDirX) of
+ ok ->
+ TestDirX;
+ MkDirError2 ->
+ log_file_error(MkDirError2, TestDirX)
+ end;
+ MkDirError2 ->
+ log_file_error(MkDirError2, TestDir)
+ end,
+ FilenameMode = file:native_name_encoding(),
+ ok = write_file(filename:join(Dir, ?last_file),
+ TestDir1 ++ "\n",
+ FilenameMode),
+ ok = write_file(?last_file, TestDir1 ++ "\n", FilenameMode),
+ put(test_server_log_dir_base,TestDir1),
+
+ MajorName = filename:join(TestDir1, ?suitelog_name),
+ HtmlName = MajorName ++ ?html_ext,
+ UnexpectedName = filename:join(TestDir1, ?unexpected_io_log),
+
+ {ok,Major} = open_utf8_file(MajorName),
+ {ok,Html} = open_html_file(HtmlName),
+
+ {UnexpHeader,UnexpFooter} =
+ case test_server_sup:framework_call(get_html_wrapper,
+ ["Unexpected I/O log",false,
+ TestDir, undefined],"") of
+ UEmpty when (UEmpty == "") ; (element(2,UEmpty) == "") ->
+ {html_header("Unexpected I/O log"),"\n</body>\n</html>\n"};
+ {basic_html,UH,UF} ->
+ {UH,UF};
+ {xhtml,UH,UF} ->
+ {UH,UF}
+ end,
+
+ {ok,Unexpected} = open_html_file(UnexpectedName),
+ io:put_chars(Unexpected, [UnexpHeader,
+ xhtml("<br>\n<h2>Unexpected I/O</h2>",
+ "<br />\n<h3>Unexpected I/O</h3>"),
+ "\n<pre>\n"]),
+ put(test_server_unexpected_footer,{UnexpectedName,UnexpFooter}),
+
+ test_server_io:set_fd(major, Major),
+ test_server_io:set_fd(html, Html),
+ test_server_io:set_fd(unexpected_io, Unexpected),
+
+ make_html_link(filename:absname(?last_test ++ ?html_ext),
+ HtmlName, filename:basename(Dir)),
+ LinkName = filename:join(Dir, ?last_link),
+ make_html_link(LinkName ++ ?html_ext, HtmlName,
+ filename:basename(Dir)),
+
+ PrivDir = filename:join(TestDir1, ?priv_dir),
+ ok = file:make_dir(PrivDir),
+ put(test_server_priv_dir,PrivDir++"/"),
+ print_timestamp(major, "Suite started at "),
+
+ LogInfo = [{topdir,Dir},{rundir,lists:flatten(TestDir1)}],
+ test_server_sup:framework_call(report, [loginfo,LogInfo]),
+ {ok,TestDir1}.
+
+log_file_error(Error, Dir) ->
+ exit({cannot_create_log_dir,{Error,lists:flatten(Dir)}}).
+
+make_html_link(LinkName, Target, Explanation) ->
+ %% if possible use a relative reference to Target.
+ TargetL = filename:split(Target),
+ PwdL = filename:split(filename:dirname(LinkName)),
+ Href = case lists:prefix(PwdL, TargetL) of
+ true ->
+ uri_encode(filename:join(lists:nthtail(length(PwdL),TargetL)));
+ false ->
+ "file:" ++ uri_encode(Target)
+ end,
+ H = [html_header(Explanation),
+ "<h1>Last test</h1>\n"
+ "<a href=\"",Href,"\">",Explanation,"</a>\n"
+ "</body>\n</html>\n"],
+ ok = write_html_file(LinkName, H).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% start_minor_log_file(Mod, Func, ParallelTC) -> AbsName
+%% Mod = atom()
+%% Func = atom()
+%% ParallelTC = bool()
+%% AbsName = string()
+%%
+%% Create a minor log file for the test case Mod,Func,Args. The log file
+%% will be stored in the log directory under the name <Mod>.<Func>.html.
+%% Some header info will also be inserted into the log file. If the test
+%% case runs in a parallel group, then to avoid clashing file names if the
+%% case is executed more than once, the name <Mod>.<Func>.<Timestamp>.html
+%% is used.
+
+start_minor_log_file(Mod, Func, ParallelTC) ->
+ MFA = {Mod,Func,1},
+ LogDir = get(test_server_log_dir_base),
+ Name0 = lists:flatten(io_lib:format("~w.~w~ts", [Mod,Func,?html_ext])),
+ Name = downcase(Name0),
+ AbsName = filename:join(LogDir, Name),
+ case (ParallelTC orelse (element(1,file:read_file_info(AbsName))==ok)) of
+ false -> %% normal case, unique name
+ start_minor_log_file1(Mod, Func, LogDir, AbsName, MFA);
+ true -> %% special case, duplicate names
+ Tag = test_server_sup:unique_name(),
+ Name1_0 =
+ lists:flatten(io_lib:format("~w.~w.~ts~ts", [Mod,Func,Tag,
+ ?html_ext])),
+ Name1 = downcase(Name1_0),
+ AbsName1 = filename:join(LogDir, Name1),
+ start_minor_log_file1(Mod, Func, LogDir, AbsName1, MFA)
+ end.
+
+start_minor_log_file1(Mod, Func, LogDir, AbsName, MFA) ->
+ {ok,Fd} = open_html_file(AbsName),
+ Lev = get(test_server_minor_level)+1000, %% far down in the minor levels
+ put(test_server_minor_fd, Fd),
+ test_server_gl:set_minor_fd(group_leader(), Fd, MFA),
+
+ TestDescr = io_lib:format("Test ~w:~w result", [Mod,Func]),
+ {Header,Footer} =
+ case test_server_sup:framework_call(get_html_wrapper,
+ [TestDescr,false,
+ filename:dirname(AbsName),
+ undefined], "") of
+ Empty when (Empty == "") ; (element(2,Empty) == "") ->
+ put(basic_html, true),
+ {html_header(TestDescr), "\n</body>\n</html>\n"};
+ {basic_html,Html0,Html1} ->
+ put(basic_html, true),
+ {Html0,Html1};
+ {xhtml,Html0,Html1} ->
+ put(basic_html, false),
+ {Html0,Html1}
+ end,
+ put(test_server_minor_footer, Footer),
+ io:put_chars(Fd, Header),
+
+ io:put_chars(Fd, "<a name=\"top\"></a>"),
+ io:put_chars(Fd, "<pre>\n"),
+
+ SrcListing = downcase(atom_to_list(Mod)) ++ ?src_listing_ext,
+
+ case get_fw_mod(?MODULE) of
+ Mod when Func == error_in_suite ->
+ ok;
+ _ ->
+ {Info,Arity} =
+ if Func == init_per_suite; Func == end_per_suite ->
+ {"Config function: ", 1};
+ Func == init_per_group; Func == end_per_group ->
+ {"Config function: ", 2};
+ true ->
+ {"Test case: ", 1}
+ end,
+
+ case {filelib:is_file(filename:join(LogDir, SrcListing)),
+ lists:member(no_src, get(test_server_logopts))} of
+ {true,false} ->
+ print(Lev, ["$tc_html",
+ Info ++ "<a href=\"~ts#~ts\">~w:~w/~w</a> "
+ "(click for source code)\n"],
+ [uri_encode(SrcListing),
+ uri_encode(atom_to_list(Func)++"-1",utf8),
+ Mod,Func,Arity]);
+ _ ->
+ print(Lev, ["$tc_html",Info ++ "~w:~w/~w\n"], [Mod,Func,Arity])
+ end
+ end,
+
+ AbsName.
+
+stop_minor_log_file() ->
+ test_server_gl:unset_minor_fd(group_leader()),
+ Fd = get(test_server_minor_fd),
+ Footer = get(test_server_minor_footer),
+ io:put_chars(Fd, "</pre>\n" ++ Footer),
+ ok = file:close(Fd),
+ put(test_server_minor_fd, undefined).
+
+downcase(S) -> downcase(S, []).
+downcase([Uc|Rest], Result) when $A =< Uc, Uc =< $Z ->
+ downcase(Rest, [Uc-$A+$a|Result]);
+downcase([C|Rest], Result) ->
+ downcase(Rest, [C|Result]);
+downcase([], Result) ->
+ lists:reverse(Result).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% html_convert_modules(TestSpec, Config) -> ok
+%% Isolate the modules affected by TestSpec and
+%% make sure they are converted to html.
+%%
+%% Errors are silently ignored.
+
+html_convert_modules(TestSpec, _Config, FwMod) ->
+ Mods = html_isolate_modules(TestSpec, FwMod),
+ html_convert_modules(Mods),
+ copy_html_files(get(test_server_dir), get(test_server_log_dir_base)).
+
+%% Retrieve a list of modules out of the test spec.
+html_isolate_modules(List, FwMod) ->
+ html_isolate_modules(List, sets:new(), FwMod).
+
+html_isolate_modules([], Set, _) -> sets:to_list(Set);
+html_isolate_modules([{skip_case,{_Case,_Cmt},_Mode}|Cases], Set, FwMod) ->
+ html_isolate_modules(Cases, Set, FwMod);
+html_isolate_modules([{conf,_Ref,Props,{FwMod,_Func}}|Cases], Set, FwMod) ->
+ Set1 = case proplists:get_value(suite, Props) of
+ undefined -> Set;
+ Mod -> sets:add_element(Mod, Set)
+ end,
+ html_isolate_modules(Cases, Set1, FwMod);
+html_isolate_modules([{conf,_Ref,_Props,{Mod,_Func}}|Cases], Set, FwMod) ->
+ html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod);
+html_isolate_modules([{skip_case,{conf,_Ref,{FwMod,_Func},_Cmt},Mode}|Cases],
+ Set, FwMod) ->
+ Set1 = case proplists:get_value(suite, get_props(Mode)) of
+ undefined -> Set;
+ Mod -> sets:add_element(Mod, Set)
+ end,
+ html_isolate_modules(Cases, Set1, FwMod);
+html_isolate_modules([{skip_case,{conf,_Ref,{Mod,_Func},_Cmt},_Props}|Cases],
+ Set, FwMod) ->
+ html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod);
+html_isolate_modules([{Mod,_Case}|Cases], Set, FwMod) ->
+ html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod);
+html_isolate_modules([{Mod,_Case,_Args}|Cases], Set, FwMod) ->
+ html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod).
+
+%% Given a list of modules, convert each module's source code to HTML.
+html_convert_modules([Mod|Mods]) ->
+ case code:which(Mod) of
+ Path when is_list(Path) ->
+ SrcFile = filename:rootname(Path) ++ ".erl",
+ FoundSrcFile =
+ case file:read_file_info(SrcFile) of
+ {ok,SInfo} ->
+ {SrcFile,SInfo};
+ {error,_} ->
+ ModInfo = Mod:module_info(compile),
+ case proplists:get_value(source, ModInfo) of
+ undefined ->
+ undefined;
+ OtherSrcFile ->
+ case file:read_file_info(OtherSrcFile) of
+ {ok,SInfo} ->
+ {OtherSrcFile,SInfo};
+ {error,_} ->
+ undefined
+ end
+ end
+ end,
+ case FoundSrcFile of
+ undefined ->
+ html_convert_modules(Mods);
+ {SrcFile1,SrcFileInfo} ->
+ DestDir = get(test_server_dir),
+ Name = atom_to_list(Mod),
+ DestFile = filename:join(DestDir,
+ downcase(Name)++?src_listing_ext),
+ html_possibly_convert(SrcFile1, SrcFileInfo, DestFile),
+ html_convert_modules(Mods)
+ end;
+ _Other ->
+ html_convert_modules(Mods)
+ end;
+html_convert_modules([]) -> ok.
+
+%% Convert source code to HTML if possible and needed.
+html_possibly_convert(Src, SrcInfo, Dest) ->
+ case file:read_file_info(Dest) of
+ {ok,DestInfo} when DestInfo#file_info.mtime >= SrcInfo#file_info.mtime ->
+ ok; % dest file up to date
+ _ ->
+ InclPath = case application:get_env(test_server, include) of
+ {ok,Incls} -> Incls;
+ _ -> []
+ end,
+
+ OutDir = get(test_server_log_dir_base),
+ case test_server_sup:framework_call(get_html_wrapper,
+ ["Module "++Src,false,
+ OutDir,undefined,
+ encoding(Src)], "") of
+ Empty when (Empty == "") ; (element(2,Empty) == "") ->
+ erl2html2:convert(Src, Dest, InclPath);
+ {_,Header,_} ->
+ erl2html2:convert(Src, Dest, InclPath, Header)
+ end
+ end.
+
+%% Copy all HTML files in InDir to OutDir.
+copy_html_files(InDir, OutDir) ->
+ Files = filelib:wildcard(filename:join(InDir, "*" ++ ?src_listing_ext)),
+ lists:foreach(fun (Src) -> copy_html_file(Src, OutDir) end, Files).
+
+copy_html_file(Src, DestDir) ->
+ Dest = filename:join(DestDir, filename:basename(Src)),
+ case file:read_file(Src) of
+ {ok,Bin} ->
+ ok = write_binary_file(Dest, Bin);
+ {error,_Reason} ->
+ io:format("File ~ts: read failed\n", [Src])
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% add_init_and_end_per_suite(TestSpec, Mod, Ref, FwMod) -> NewTestSpec
+%%
+%% Expands TestSpec with an initial init_per_suite, and a final
+%% end_per_suite element, per each discovered suite in the list.
+
+add_init_and_end_per_suite([{make,_,_}=Case|Cases], LastMod, LastRef, FwMod) ->
+ [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)];
+add_init_and_end_per_suite([{skip_case,{{Mod,all},_},_}=Case|Cases], LastMod,
+ LastRef, FwMod) when Mod =/= LastMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_end_per_suite_and_skip(LastMod, LastRef, Mod, FwMod),
+ PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod,
+ NextRef, FwMod)];
+add_init_and_end_per_suite([{skip_case,{{Mod,_},_Cmt},_Mode}=Case|Cases],
+ LastMod, LastRef, FwMod) when Mod =/= LastMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
+ PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod,
+ NextRef, FwMod)];
+add_init_and_end_per_suite([{skip_case,{conf,_,{Mod,_},_},_}=Case|Cases],
+ LastMod, LastRef, FwMod) when Mod =/= LastMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
+ PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod,
+ NextRef, FwMod)];
+add_init_and_end_per_suite([{skip_case,{conf,_,{Mod,_},_}}=Case|Cases], LastMod,
+ LastRef, FwMod) when Mod =/= LastMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
+ PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod,
+ NextRef, FwMod)];
+add_init_and_end_per_suite([{conf,Ref,Props,{FwMod,Func}}=Case|Cases], LastMod,
+ LastRef, FwMod) ->
+ %% if Mod == FwMod, this conf test is (probably) a test case group where
+ %% the init- and end-functions are missing in the suite, and if so,
+ %% the suite name should be stored as {suite,Suite} in Props
+ case proplists:get_value(suite, Props) of
+ Suite when Suite =/= undefined, Suite =/= LastMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_init_and_end_per_suite(LastMod, LastRef, Suite, FwMod),
+ Case1 = {conf,Ref,[{suite,NextMod}|proplists:delete(suite,Props)],
+ {FwMod,Func}},
+ PreCases ++ [Case1|add_init_and_end_per_suite(Cases, NextMod,
+ NextRef, FwMod)];
+ _ ->
+ [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)]
+ end;
+add_init_and_end_per_suite([{conf,_,_,{Mod,_}}=Case|Cases], LastMod,
+ LastRef, FwMod) when Mod =/= LastMod, Mod =/= FwMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
+ PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod,
+ NextRef, FwMod)];
+add_init_and_end_per_suite([SkipCase|Cases], LastMod, LastRef, FwMod)
+ when element(1,SkipCase) == skip_case; element(1,SkipCase) == auto_skip_case->
+ [SkipCase|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)];
+add_init_and_end_per_suite([{conf,_,_,_}=Case|Cases], LastMod, LastRef, FwMod) ->
+ [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)];
+add_init_and_end_per_suite([{Mod,_}=Case|Cases], LastMod, LastRef, FwMod)
+ when Mod =/= LastMod, Mod =/= FwMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
+ PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod,
+ NextRef, FwMod)];
+add_init_and_end_per_suite([{Mod,_,_}=Case|Cases], LastMod, LastRef, FwMod)
+ when Mod =/= LastMod, Mod =/= FwMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
+ PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod,
+ NextRef, FwMod)];
+add_init_and_end_per_suite([Case|Cases], LastMod, LastRef, FwMod)->
+ [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)];
+add_init_and_end_per_suite([], _LastMod, undefined, _FwMod) ->
+ [];
+add_init_and_end_per_suite([], _LastMod, skipped_suite, _FwMod) ->
+ [];
+add_init_and_end_per_suite([], LastMod, LastRef, FwMod) ->
+ %% we'll add end_per_suite here even if it's not exported
+ %% (and simply let the call fail if it's missing)
+ case erlang:function_exported(LastMod, end_per_suite, 1) of
+ true ->
+ [{conf,LastRef,[],{LastMod,end_per_suite}}];
+ false ->
+ %% let's call a "fake" end_per_suite if it exists
+ case erlang:function_exported(FwMod, end_per_suite, 1) of
+ true ->
+ [{conf,LastRef,[{suite,LastMod}],{FwMod,end_per_suite}}];
+ false ->
+ [{conf,LastRef,[],{LastMod,end_per_suite}}]
+ end
+ end.
+
+do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod) ->
+ case code:is_loaded(Mod) of
+ false -> code:load_file(Mod);
+ _ -> ok
+ end,
+ {Init,NextMod,NextRef} =
+ case erlang:function_exported(Mod, init_per_suite, 1) of
+ true ->
+ Ref = make_ref(),
+ {[{conf,Ref,[],{Mod,init_per_suite}}],Mod,Ref};
+ false ->
+ %% let's call a "fake" init_per_suite if it exists
+ case erlang:function_exported(FwMod, init_per_suite, 1) of
+ true ->
+ Ref = make_ref(),
+ {[{conf,Ref,[{suite,Mod}],
+ {FwMod,init_per_suite}}],Mod,Ref};
+ false ->
+ {[],Mod,undefined}
+ end
+
+ end,
+ Cases =
+ if LastRef==undefined ->
+ Init;
+ LastRef==skipped_suite ->
+ Init;
+ true ->
+ %% we'll add end_per_suite here even if it's not exported
+ %% (and simply let the call fail if it's missing)
+ case erlang:function_exported(LastMod, end_per_suite, 1) of
+ true ->
+ [{conf,LastRef,[],{LastMod,end_per_suite}}|Init];
+ false ->
+ %% let's call a "fake" end_per_suite if it exists
+ case erlang:function_exported(FwMod, end_per_suite, 1) of
+ true ->
+ [{conf,LastRef,[{suite,Mod}],
+ {FwMod,end_per_suite}}|Init];
+ false ->
+ [{conf,LastRef,[],{LastMod,end_per_suite}}|Init]
+ end
+ end
+ end,
+ {Cases,NextMod,NextRef}.
+
+do_add_end_per_suite_and_skip(LastMod, LastRef, Mod, FwMod) ->
+ case LastRef of
+ No when No==undefined ; No==skipped_suite ->
+ {[],Mod,skipped_suite};
+ _Ref ->
+ case erlang:function_exported(LastMod, end_per_suite, 1) of
+ true ->
+ {[{conf,LastRef,[],{LastMod,end_per_suite}}],
+ Mod,skipped_suite};
+ false ->
+ case erlang:function_exported(FwMod, end_per_suite, 1) of
+ true ->
+ %% let's call "fake" end_per_suite if it exists
+ {[{conf,LastRef,[],{FwMod,end_per_suite}}],
+ Mod,skipped_suite};
+ false ->
+ {[{conf,LastRef,[],{LastMod,end_per_suite}}],
+ Mod,skipped_suite}
+ end
+ end
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% run_test_cases(TestSpec, Config, TimetrapData) -> exit(Result)
+%%
+%% Runs the specified tests, then displays/logs the summary.
+
+run_test_cases(TestSpec, Config, TimetrapData) ->
+ test_server:init_purify(),
+ case lists:member(no_src, get(test_server_logopts)) of
+ true ->
+ ok;
+ false ->
+ FwMod = get_fw_mod(?MODULE),
+ html_convert_modules(TestSpec, Config, FwMod)
+ end,
+
+ run_test_cases_loop(TestSpec, [Config], TimetrapData, [], []),
+
+ {AllSkippedN,UserSkipN,AutoSkipN,SkipStr} =
+ case get(test_server_skipped) of
+ {0,0} -> {0,0,0,""};
+ {US,AS} -> {US+AS,US,AS,io_lib:format(", ~w skipped", [US+AS])}
+ end,
+ OkN = get(test_server_ok),
+ FailedN = get(test_server_failed),
+ print(1, "TEST COMPLETE, ~w ok, ~w failed~ts of ~w test cases\n",
+ [OkN,FailedN,SkipStr,OkN+FailedN+AllSkippedN]),
+ test_server_sup:framework_call(report, [tests_done,
+ {OkN,FailedN,{UserSkipN,AutoSkipN}}]),
+ print(major, "=finished ~s", [lists:flatten(timestamp_get(""))]),
+ print(major, "=failed ~w", [FailedN]),
+ print(major, "=successful ~w", [OkN]),
+ print(major, "=user_skipped ~w", [UserSkipN]),
+ print(major, "=auto_skipped ~w", [AutoSkipN]),
+ exit(test_suites_done).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% run_test_cases_loop(TestCases, Config, TimetrapData, Mode, Status) -> ok
+%% TestCases = [Test,...]
+%% Config = [[{Key,Val},...],...]
+%% TimetrapData = {MultiplyTimetrap,ScaleTimetrap}
+%% MultiplyTimetrap = integer() | infinity
+%% ScaleTimetrap = bool()
+%% Mode = [{Ref,[Prop,..],StartTime}]
+%% Ref = reference()
+%% Prop = {name,Name} | sequence | parallel |
+%% shuffle | {shuffle,Seed} |
+%% repeat | {repeat,N} |
+%% repeat_until_all_ok | {repeat_until_all_ok,N} |
+%% repeat_until_any_ok | {repeat_until_any_ok,N} |
+%% repeat_until_any_fail | {repeat_until_any_fail,N} |
+%% repeat_until_all_fail | {repeat_until_all_fail,N}
+%% Status = [{Ref,{{Ok,Skipped,Failed},CopiedCases}}]
+%% Ok = Skipped = Failed = [Case,...]
+%%
+%% Execute the TestCases under configuration Config. Config is a list
+%% of lists, where hd(Config) holds the config tuples for the current
+%% conf case and tl(Config) is the data for the higher level conf cases.
+%% Config data is "inherited" from top to nested conf cases, but
+%% never the other way around. if length(Config) == 1, Config contains
+%% only the initial config data for the suite.
+%%
+%% Test may be one of the following:
+%%
+%% {conf,Ref,Props,{Mod,Func}} Mod:Func is a configuration modification
+%% function, call it with the current configuration as argument. It will
+%% return a new configuration.
+%%
+%% {make,Ref,{Mod,Func,Args}} Mod:Func is a make function, and it is called
+%% with the given arguments.
+%%
+%% {Mod,Case} This is a normal test case. Determine the correct
+%% configuration, and insert {Mod,Case,Config} as head of the list,
+%% then reiterate.
+%%
+%% {Mod,Case,Args} A test case with predefined argument (usually a normal
+%% test case which just got a fresh configuration (see above)).
+%%
+%% {skip_case,{conf,Ref,Case,Comment}} An init conf case gets skipped
+%% by the user. This will also cause the end conf case to be skipped.
+%% Note that it is not possible to skip an end conf case directly (it
+%% can only be skipped indirectly by a skipped init conf case). The
+%% comment (which gets printed in the log files) describes why the case
+%% was skipped.
+%%
+%% {skip_case,{Case,Comment},Mode} A normal test case skipped by the user.
+%% The comment (which gets printed in the log files) describes why the
+%% case was skipped.
+%%
+%% {auto_skip_case,{conf,Ref,Case,Comment},Mode} This is the result of
+%% an end conf case being automatically skipped due to a failing init
+%% conf case. It could also be a nested conf case that gets skipped
+%% because of a failed or skipped top level conf.
+%%
+%% {auto_skip_case,{Case,Comment},Mode} This is a normal test case which
+%% gets automatically skipped because of a failing init conf case or
+%% because of a failing previous test case in a sequence.
+%%
+%% -------------------------------------------------------------------
+%% Description of IO handling during execution of parallel test cases:
+%% -------------------------------------------------------------------
+%%
+%% A conf group can have an associated list of properties. If the
+%% parallel property is specified for a group, it means the test cases
+%% should be spawned and run in parallel rather than called sequentially
+%% (which is always the default mode). Test cases that execute in parallel
+%% also write to their respective minor log files in parallel. Printouts
+%% to common log files, such as the summary html file and the major log
+%% file on text format, still have to be processed sequentially. For this
+%% reason, the Mode argument specifies if a parallel group is currently
+%% being executed.
+%%
+%% The low-level mechanism for buffering IO for the common log files
+%% is handled by the test_server_io module. Buffering is turned on by
+%% test_server_io:start_transaction/0 and off by calling
+%% test_server_io:end_transaction/0. The buffered data for the transaction
+%% can printed by calling test_server_io:print_buffered/1.
+%%
+%% This module is responsible for turning on IO buffering and to later
+%% test_server_io:print_buffered/1 to print the data. To help with this,
+%% two variables in the process dictionary are used:
+%% 'test_server_common_io_handler' and 'test_server_queued_io'. The values
+%% are set to as follwing:
+%%
+%% Value Meaning
+%% ----- -------
+%% undefined No parallel test cases running
+%% {tc,Pid} Running test cases in a top-level parallel group
+%% {Ref,Pid} Running sequential test case inside a parallel group
+%%
+%% FIXME: The Pid is no longer used.
+%%
+%% If a conf group nested under a parallel group in the test
+%% specification should be started, the 'test_server_common_io_handler'
+%% value gets set also on the main process.
+%%
+%% During execution of a parallel group (or of a group nested under a
+%% parallel group), *any* new test case being started gets registered
+%% in a list saved in the dictionary with 'test_server_queued_io' as key.
+%% When the top level parallel group is finished (only then can we be
+%% sure all parallel test cases have finished and "reported in"), the
+%% list of test cases is traversed in order and test_server_io:print_buffered/1
+%% can be called for each test case. See handle_test_case_io_and_status/0
+%% for details.
+%%
+%% To be able to handle nested conf groups with different properties,
+%% the Mode argument specifies a list of {Ref,Properties} tuples.
+%% The head of the Mode list at any given time identifies the group
+%% currently being processed. The tail of the list identifies groups
+%% on higher level.
+%%
+%% -------------------------------------------------------------------
+%% Notes on parallel execution of test cases
+%% -------------------------------------------------------------------
+%%
+%% A group nested under a parallel group will start executing in
+%% parallel with previous (parallel) test cases (no matter what
+%% properties the nested group has). Test cases are however never
+%% executed in parallel with the start or end conf case of the same
+%% group! Because of this, the test_server_ctrl loop waits at
+%% the end conf of a group for all parallel cases to finish
+%% before the end conf case actually executes. This has the effect
+%% that it's only after a nested group has finished that any
+%% remaining parallel cases in the previous group get spawned (*).
+%% Example (all parallel cases):
+%%
+%% group1_init |---->
+%% group1_case1 | --------->
+%% group1_case2 | --------------------------------->
+%% group2_init | ---->
+%% group2_case1 | ------>
+%% group2_case2 | ---------->
+%% group2_end | --->
+%% group1_case3 (*)| ---->
+%% group1_case4 (*)| -->
+%% group1_end | --->
+%%
+
+run_test_cases_loop([{SkipTag,CaseData={Type,_Ref,_Case,_Comment}}|Cases],
+ Config, TimetrapData, Mode, Status) when
+ ((SkipTag==auto_skip_case) or (SkipTag==skip_case)) and
+ ((Type==conf) or (Type==make)) ->
+ run_test_cases_loop([{SkipTag,CaseData,Mode}|Cases],
+ Config, TimetrapData, Mode, Status);
+
+run_test_cases_loop([{SkipTag,{Type,Ref,Case,Comment},SkipMode}|Cases],
+ Config, TimetrapData, Mode, Status) when
+ ((SkipTag==auto_skip_case) or (SkipTag==skip_case)) and
+ ((Type==conf) or (Type==make)) ->
+ file:set_cwd(filename:dirname(get(test_server_dir))),
+ CurrIOHandler = get(test_server_common_io_handler),
+ ParentMode = tl(Mode),
+
+ {AutoOrUser,ReportTag} =
+ if SkipTag == auto_skip_case -> {auto,tc_auto_skip};
+ SkipTag == skip_case -> {user,tc_user_skip}
+ end,
+
+ %% check and update the mode for test case execution and io msg handling
+ case {curr_ref(Mode),check_props(parallel, Mode)} of
+ {Ref,Ref} ->
+ case check_props(parallel, ParentMode) of
+ false ->
+ %% this is a skipped end conf for a top level parallel
+ %% group, buffered io can be flushed
+ handle_test_case_io_and_status(),
+ set_io_buffering(undefined),
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ false, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report,
+ [ReportTag,ConfData]),
+ run_test_cases_loop(Cases, Config, TimetrapData, ParentMode,
+ delete_status(Ref, Status));
+ _ ->
+ %% this is a skipped end conf for a parallel group nested
+ %% under a parallel group (io buffering is active)
+ wait_for_cases(Ref),
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ true, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
+ case CurrIOHandler of
+ {Ref,_} ->
+ %% current_io_handler was set by start conf of this
+ %% group, so we can unset it now (no more io from main
+ %% process needs to be buffered)
+ set_io_buffering(undefined);
+ _ ->
+ ok
+ end,
+ run_test_cases_loop(Cases, Config,
+ TimetrapData, ParentMode,
+ delete_status(Ref, Status))
+ end;
+ {Ref,false} ->
+ %% this is a skipped end conf for a non-parallel group that's not
+ %% nested under a parallel group
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ false, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
+
+ %% Check if this group is auto skipped because of error in the
+ %% init conf. If so, check if the parent group is a sequence,
+ %% and if it is, skip all proceeding tests in that group.
+ GrName = get_name(Mode),
+ Cases1 =
+ case get_tc_results(Status) of
+ {_,_,Fails} when length(Fails) > 0 ->
+ case lists:member({group_result,GrName}, Fails) of
+ true ->
+ case check_prop(sequence, ParentMode) of
+ false ->
+ Cases;
+ ParentRef ->
+ Reason = {group_result,GrName,failed},
+ skip_cases_upto(ParentRef, Cases,
+ Reason, tc, ParentMode,
+ SkipTag)
+ end;
+ false ->
+ Cases
+ end;
+ _ ->
+ Cases
+ end,
+ run_test_cases_loop(Cases1, Config, TimetrapData, ParentMode,
+ delete_status(Ref, Status));
+ {Ref,_} ->
+ %% this is a skipped end conf for a non-parallel group nested under
+ %% a parallel group (io buffering is active)
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ true, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
+ case CurrIOHandler of
+ {Ref,_} ->
+ %% current_io_handler was set by start conf of this
+ %% group, so we can unset it now (no more io from main
+ %% process needs to be buffered)
+ set_io_buffering(undefined);
+ _ ->
+ ok
+ end,
+ run_test_cases_loop(Cases, Config, TimetrapData, tl(Mode),
+ delete_status(Ref, Status));
+ {_,false} ->
+ %% this is a skipped start conf for a group which is not nested
+ %% under a parallel group
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ false, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
+ run_test_cases_loop(Cases, Config, TimetrapData,
+ [conf(Ref,[])|Mode], Status);
+ {_,Ref0} when is_reference(Ref0) ->
+ %% this is a skipped start conf for a group nested under a parallel
+ %% group and if this is the first nested group, io buffering must
+ %% be activated
+ if CurrIOHandler == undefined ->
+ set_io_buffering({Ref,self()});
+ true ->
+ ok
+ end,
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ true, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
+ run_test_cases_loop(Cases, Config, TimetrapData,
+ [conf(Ref,[])|Mode], Status)
+ end;
+
+run_test_cases_loop([{auto_skip_case,{Case,Comment},SkipMode}|Cases],
+ Config, TimetrapData, Mode, Status) ->
+ {Mod,Func} = skip_case(auto, undefined, get(test_server_case_num)+1,
+ Case, Comment, is_io_buffered(), SkipMode),
+ test_server_sup:framework_call(report, [tc_auto_skip,
+ {Mod,{Func,get_name(SkipMode)},
+ Comment}]),
+ run_test_cases_loop(Cases, Config, TimetrapData, Mode,
+ update_status(skipped, Mod, Func, Status));
+
+run_test_cases_loop([{skip_case,{{Mod,all}=Case,Comment},SkipMode}|Cases],
+ Config, TimetrapData, Mode, Status) ->
+ skip_case(user, undefined, 0, Case, Comment, false, SkipMode),
+ test_server_sup:framework_call(report, [tc_user_skip,
+ {Mod,{all,get_name(SkipMode)},
+ Comment}]),
+ run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status);
+
+run_test_cases_loop([{skip_case,{Case,Comment},SkipMode}|Cases],
+ Config, TimetrapData, Mode, Status) ->
+ {Mod,Func} = skip_case(user, undefined, get(test_server_case_num)+1,
+ Case, Comment, is_io_buffered(), SkipMode),
+ test_server_sup:framework_call(report, [tc_user_skip,
+ {Mod,{Func,get_name(SkipMode)},
+ Comment}]),
+ run_test_cases_loop(Cases, Config, TimetrapData, Mode,
+ update_status(skipped, Mod, Func, Status));
+
+%% a start *or* end conf case, wrapping test cases or other conf cases
+run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
+ Config, TimetrapData, Mode0, Status) ->
+ CurrIOHandler = get(test_server_common_io_handler),
+ %% check and update the mode for test case execution and io msg handling
+ {StartConf,Mode,IOHandler,ConfTime,Status1} =
+ case {curr_ref(Mode0),check_props(parallel, Mode0)} of
+ {Ref,Ref} ->
+ case check_props(parallel, tl(Mode0)) of
+ false ->
+ %% this is an end conf for a top level parallel group,
+ %% collect results from the test case processes
+ %% and calc total time
+ OkSkipFail = handle_test_case_io_and_status(),
+ file:set_cwd(filename:dirname(get(test_server_dir))),
+ After = ?now,
+ Before = get(test_server_parallel_start_time),
+ Elapsed = timer:now_diff(After, Before)/1000000,
+ put(test_server_total_time, Elapsed),
+ {false,tl(Mode0),undefined,Elapsed,
+ update_status(Ref, OkSkipFail, Status)};
+ _ ->
+ %% this is an end conf for a parallel group nested under a
+ %% parallel group (io buffering is active)
+ OkSkipFail = wait_for_cases(Ref),
+ queue_test_case_io(Ref, self(), 0, Mod, Func),
+ Elapsed = timer:now_diff(?now, conf_start(Ref, Mode0))/1000000,
+ case CurrIOHandler of
+ {Ref,_} ->
+ %% current_io_handler was set by start conf of this
+ %% group, so we can unset it after this case (no
+ %% more io from main process needs to be buffered)
+ {false,tl(Mode0),undefined,Elapsed,
+ update_status(Ref, OkSkipFail, Status)};
+ _ ->
+ {false,tl(Mode0),CurrIOHandler,Elapsed,
+ update_status(Ref, OkSkipFail, Status)}
+ end
+ end;
+ {Ref,false} ->
+ %% this is an end conf for a non-parallel group that's not
+ %% nested under a parallel group, so no need to buffer io
+ {false,tl(Mode0),undefined,
+ timer:now_diff(?now, conf_start(Ref, Mode0))/1000000, Status};
+ {Ref,_} ->
+ %% this is an end conf for a non-parallel group nested under
+ %% a parallel group (io buffering is active)
+ queue_test_case_io(Ref, self(), 0, Mod, Func),
+ Elapsed = timer:now_diff(?now, conf_start(Ref, Mode0))/1000000,
+ case CurrIOHandler of
+ {Ref,_} ->
+ %% current_io_handler was set by start conf of this
+ %% group, so we can unset it after this case (no
+ %% more io from main process needs to be buffered)
+ {false,tl(Mode0),undefined,Elapsed,Status};
+ _ ->
+ {false,tl(Mode0),CurrIOHandler,Elapsed,Status}
+ end;
+ {_,false} ->
+ %% this is a start conf for a group which is not nested under a
+ %% parallel group, check if this case starts a new parallel group
+ case lists:member(parallel, Props) of
+ true ->
+ %% prepare for execution of parallel group
+ put(test_server_parallel_start_time, ?now),
+ put(test_server_queued_io, []);
+ false ->
+ ok
+ end,
+ {true,[conf(Ref,Props)|Mode0],undefined,0,Status};
+ {_,_Ref0} ->
+ %% this is a start conf for a group nested under a parallel group, the
+ %% parallel_start_time and parallel_test_cases values have already been set
+ queue_test_case_io(Ref, self(), 0, Mod, Func),
+ %% if this is the first nested group under a parallel group, io
+ %% buffering must be activated
+ IOHandler1 = if CurrIOHandler == undefined ->
+ IOH = {Ref,self()},
+ set_io_buffering(IOH),
+ IOH;
+ true ->
+ CurrIOHandler
+ end,
+ {true,[conf(Ref,Props)|Mode0],IOHandler1,0,Status}
+ end,
+
+ %% if this is a start conf we check if cases should be shuffled
+ {[_Conf|Cases1]=Cs1,Shuffle} =
+ if StartConf ->
+ case get_shuffle(Props) of
+ undefined ->
+ {Cs0,undefined};
+ {_,repeated} ->
+ %% if group is repeated, a new seed should not be set every
+ %% turn - last one is saved in dictionary
+ CurrSeed = get(test_server_curr_random_seed),
+ {shuffle_cases(Ref, Cs0, CurrSeed),{shuffle,CurrSeed}};
+ {_,Seed} ->
+ UseSeed=
+ %% Determine which seed to use by:
+ %% 1. check the TS_RANDOM_SEED env variable
+ %% 2. check random_seed in process state
+ %% 3. use value provided with shuffle option
+ %% 4. use timestamp() values for seed
+ case os:getenv("TS_RANDOM_SEED") of
+ Undef when Undef == false ; Undef == "undefined" ->
+ case get(test_server_random_seed) of
+ undefined -> Seed;
+ TSRS -> TSRS
+ end;
+ NumStr ->
+ %% Ex: "123 456 789" or "123,456,789" -> {123,456,789}
+ list_to_tuple([list_to_integer(NS) ||
+ NS <- string:tokens(NumStr, [$ ,$:,$,])])
+ end,
+ {shuffle_cases(Ref, Cs0, UseSeed),{shuffle,UseSeed}}
+ end;
+ not StartConf ->
+ {Cs0,undefined}
+ end,
+
+ %% if this is a start conf we check if Props specifies repeat and if so
+ %% we copy the group and carry the copy until the end conf where we
+ %% decide to perform the repetition or not
+ {Repeating,Status2,Cases,ReportRepeatStop} =
+ if StartConf ->
+ case get_repeat(Props) of
+ undefined ->
+ %% we *must* have a status entry for every conf since we
+ %% will continously update status with test case results
+ %% without knowing the Ref (but update hd(Status))
+ {false,new_status(Ref, Status1),Cases1,?void_fun};
+ {_RepType,N} when N =< 1 ->
+ {false,new_status(Ref, Status1),Cases1,?void_fun};
+ _ ->
+ {Copied,_} = copy_cases(Ref, make_ref(), Cs1),
+ {true,new_status(Ref, Copied, Status1),Cases1,?void_fun}
+ end;
+ not StartConf ->
+ RepVal = get_repeat(get_props(Mode0)),
+ ReportStop =
+ fun() ->
+ print(minor, "~n*** Stopping repeat operation ~w", [RepVal]),
+ print(1, "Stopping repeat operation ~w", [RepVal])
+ end,
+ CopiedCases = get_copied_cases(Status1),
+ EndStatus = delete_status(Ref, Status1),
+ %% check in Mode0 if this is a repeat conf
+ case RepVal of
+ undefined ->
+ {false,EndStatus,Cases1,?void_fun};
+ {_RepType,N} when N =< 1 ->
+ {false,EndStatus,Cases1,?void_fun};
+ {repeat,_} ->
+ {true,EndStatus,CopiedCases++Cases1,?void_fun};
+ {repeat_until_all_ok,_} ->
+ {RestCs,Fun} = case get_tc_results(Status1) of
+ {_,_,[]} ->
+ {Cases1,ReportStop};
+ _ ->
+ {CopiedCases++Cases1,?void_fun}
+ end,
+ {true,EndStatus,RestCs,Fun};
+ {repeat_until_any_ok,_} ->
+ {RestCs,Fun} = case get_tc_results(Status1) of
+ {Ok,_,_Fails} when length(Ok) > 0 ->
+ {Cases1,ReportStop};
+ _ ->
+ {CopiedCases++Cases1,?void_fun}
+ end,
+ {true,EndStatus,RestCs,Fun};
+ {repeat_until_any_fail,_} ->
+ {RestCs,Fun} = case get_tc_results(Status1) of
+ {_,_,Fails} when length(Fails) > 0 ->
+ {Cases1,ReportStop};
+ _ ->
+ {CopiedCases++Cases1,?void_fun}
+ end,
+ {true,EndStatus,RestCs,Fun};
+ {repeat_until_all_fail,_} ->
+ {RestCs,Fun} = case get_tc_results(Status1) of
+ {[],_,_} ->
+ {Cases1,ReportStop};
+ _ ->
+ {CopiedCases++Cases1,?void_fun}
+ end,
+ {true,EndStatus,RestCs,Fun}
+ end
+ end,
+
+ ReportAbortRepeat = fun(What) when Repeating ->
+ print(minor, "~n*** Aborting repeat operation "
+ "(configuration case ~w)", [What]),
+ print(1, "Aborting repeat operation "
+ "(configuration case ~w)", [What]);
+ (_) -> ok
+ end,
+ CfgProps = if StartConf ->
+ if Shuffle == undefined ->
+ [{tc_group_properties,Props}];
+ true ->
+ [{tc_group_properties,
+ [Shuffle|delete_shuffle(Props)]}]
+ end;
+ not StartConf ->
+ {TcOk,TcSkip,TcFail} = get_tc_results(Status1),
+ [{tc_group_properties,get_props(Mode0)},
+ {tc_group_result,[{ok,TcOk},
+ {skipped,TcSkip},
+ {failed,TcFail}]}]
+ end,
+
+ SuiteName = proplists:get_value(suite, Props),
+ case get(test_server_create_priv_dir) of
+ auto_per_run -> % use common priv_dir
+ TSDirs = [{priv_dir,get(test_server_priv_dir)},
+ {data_dir,get_data_dir(Mod, SuiteName)}];
+ _ ->
+ TSDirs = [{data_dir,get_data_dir(Mod, SuiteName)}]
+ end,
+
+ ActualCfg =
+ if not StartConf ->
+ update_config(hd(Config), TSDirs ++ CfgProps);
+ true ->
+ GroupPath = lists:flatmap(fun({_Ref,[],_T}) -> [];
+ ({_Ref,GrProps,_T}) -> [GrProps]
+ end, Mode0),
+ update_config(hd(Config),
+ TSDirs ++ [{tc_group_path,GroupPath} | CfgProps])
+ end,
+
+ CurrMode = curr_mode(Ref, Mode0, Mode),
+ ConfCaseResult = run_test_case(Ref, 0, Mod, Func, [ActualCfg], skip_init,
+ TimetrapData, CurrMode),
+
+ case ConfCaseResult of
+ {_,NewCfg,_} when Func == init_per_suite, is_list(NewCfg) ->
+ %% check that init_per_suite returned data on correct format
+ case lists:filter(fun({_,_}) -> false;
+ (_) -> true end, NewCfg) of
+ [] ->
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases, [NewCfg|Config],
+ TimetrapData, Mode, Status2);
+ Bad ->
+ print(minor,
+ "~n*** ~w returned bad elements in Config: ~p.~n",
+ [Func,Bad]),
+ Reason = {failed,{Mod,init_per_suite,bad_return}},
+ Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode,
+ auto_skip_case),
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases2, Config, TimetrapData, Mode,
+ delete_status(Ref, Status2))
+ end;
+ {_,NewCfg,_} when StartConf, is_list(NewCfg) ->
+ print_conf_time(ConfTime),
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases, [NewCfg|Config], TimetrapData, Mode, Status2);
+ {_,{framework_error,{FwMod,FwFunc},Reason},_} ->
+ print(minor, "~n*** ~w failed in ~w. Reason: ~p~n",
+ [FwMod,FwFunc,Reason]),
+ print(1, "~w failed in ~w. Reason: ~p~n", [FwMod,FwFunc,Reason]),
+ exit(framework_error);
+ {_,Fail,_} when element(1,Fail) == 'EXIT';
+ element(1,Fail) == timetrap_timeout;
+ element(1,Fail) == user_timetrap_error;
+ element(1,Fail) == failed ->
+ {Cases2,Config1,Status3} =
+ if StartConf ->
+ ReportAbortRepeat(failed),
+ print(minor, "~n*** ~w failed.~n"
+ " Skipping all cases.", [Func]),
+ Reason = {failed,{Mod,Func,Fail}},
+ {skip_cases_upto(Ref, Cases, Reason, conf, CurrMode,
+ auto_skip_case),
+ Config,
+ update_status(failed, group_result, get_name(Mode),
+ delete_status(Ref, Status2))};
+ not StartConf ->
+ ReportRepeatStop(),
+ print_conf_time(ConfTime),
+ {Cases,tl(Config),delete_status(Ref, Status2)}
+ end,
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases2, Config1, TimetrapData, Mode, Status3);
+
+ {_,{auto_skip,SkipReason},_} ->
+ %% this case can only happen if the framework (not the user)
+ %% decides to skip execution of a conf function
+ {Cases2,Config1,Status3} =
+ if StartConf ->
+ ReportAbortRepeat(auto_skipped),
+ print(minor, "~n*** ~w auto skipped.~n"
+ " Skipping all cases.", [Func]),
+ {skip_cases_upto(Ref, Cases, SkipReason, conf, CurrMode,
+ auto_skip_case),
+ Config,
+ delete_status(Ref, Status2)};
+ not StartConf ->
+ ReportRepeatStop(),
+ print_conf_time(ConfTime),
+ {Cases,tl(Config),delete_status(Ref, Status2)}
+ end,
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases2, Config1, TimetrapData, Mode, Status3);
+
+ {_,{Skip,Reason},_} when StartConf and ((Skip==skip) or (Skip==skipped)) ->
+ ReportAbortRepeat(skipped),
+ print(minor, "~n*** ~w skipped.~n"
+ " Skipping all cases.", [Func]),
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(skip_cases_upto(Ref, Cases, Reason, conf,
+ CurrMode, skip_case),
+ [hd(Config)|Config], TimetrapData, Mode,
+ delete_status(Ref, Status2));
+ {_,{skip_and_save,Reason,_SavedConfig},_} when StartConf ->
+ ReportAbortRepeat(skipped),
+ print(minor, "~n*** ~w skipped.~n"
+ " Skipping all cases.", [Func]),
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(skip_cases_upto(Ref, Cases, Reason, conf,
+ CurrMode, skip_case),
+ [hd(Config)|Config], TimetrapData, Mode,
+ delete_status(Ref, Status2));
+ {_,_Other,_} when Func == init_per_suite ->
+ print(minor, "~n*** init_per_suite failed to return a Config list.~n", []),
+ Reason = {failed,{Mod,init_per_suite,bad_return}},
+ Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode,
+ auto_skip_case),
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases2, Config, TimetrapData, Mode,
+ delete_status(Ref, Status2));
+ {_,_Other,_} when StartConf ->
+ print_conf_time(ConfTime),
+ set_io_buffering(IOHandler),
+ ReportRepeatStop(),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases, [hd(Config)|Config], TimetrapData,
+ Mode, Status2);
+ {_,_EndConfRetVal,Opts} ->
+ %% Check if return_group_result is set (ok, skipped or failed) and
+ %% if so:
+ %% 1) *If* the parent group is a sequence, skip all proceeding tests
+ %% in that group.
+ %% 2) Return the value to the group "above" so that result may be
+ %% used for evaluating a 'repeat_until_*' property.
+ GrName = get_name(Mode0, Func),
+ {Cases2,Status3} =
+ case lists:keysearch(return_group_result, 1, Opts) of
+ {value,{_,failed}} ->
+ case {curr_ref(Mode),check_prop(sequence, Mode)} of
+ {ParentRef,ParentRef} ->
+ Reason = {group_result,GrName,failed},
+ {skip_cases_upto(ParentRef, Cases, Reason, tc,
+ Mode, auto_skip_case),
+ update_status(failed, group_result, GrName,
+ delete_status(Ref, Status2))};
+ _ ->
+ {Cases,update_status(failed, group_result, GrName,
+ delete_status(Ref, Status2))}
+ end;
+ {value,{_,GroupResult}} ->
+ {Cases,update_status(GroupResult, group_result, GrName,
+ delete_status(Ref, Status2))};
+ false ->
+ {Cases,update_status(ok, group_result, GrName,
+ delete_status(Ref, Status2))}
+ end,
+ print_conf_time(ConfTime),
+ ReportRepeatStop(),
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases2, tl(Config), TimetrapData,
+ Mode, Status3)
+ end;
+
+run_test_cases_loop([{make,Ref,{Mod,Func,Args}}|Cases0], Config, TimetrapData,
+ Mode, Status) ->
+ case run_test_case(Ref, 0, Mod, Func, Args, skip_init, TimetrapData) of
+ {_,Why={'EXIT',_},_} ->
+ print(minor, "~n*** ~w failed.~n"
+ " Skipping all cases.", [Func]),
+ Reason = {failed,{Mod,Func,Why}},
+ Cases = skip_cases_upto(Ref, Cases0, Reason, conf, Mode,
+ auto_skip_case),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status);
+ {_,_Whatever,_} ->
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases0, Config, TimetrapData, Mode, Status)
+ end;
+
+run_test_cases_loop([{conf,_Ref,_Props,_X}=Conf|_Cases0],
+ Config, _TimetrapData, _Mode, _Status) ->
+ erlang:error(badarg, [Conf,Config]);
+
+run_test_cases_loop([{Mod,Case}|Cases], Config, TimetrapData, Mode, Status) ->
+ ActualCfg =
+ case get(test_server_create_priv_dir) of
+ auto_per_run ->
+ update_config(hd(Config), [{priv_dir,get(test_server_priv_dir)},
+ {data_dir,get_data_dir(Mod)}]);
+ _ ->
+ update_config(hd(Config), [{data_dir,get_data_dir(Mod)}])
+ end,
+ run_test_cases_loop([{Mod,Case,[ActualCfg]}|Cases], Config,
+ TimetrapData, Mode, Status);
+
+run_test_cases_loop([{Mod,Func,Args}|Cases], Config, TimetrapData, Mode, Status) ->
+ {Num,RunInit} =
+ case FwMod = get_fw_mod(?MODULE) of
+ Mod when Func == error_in_suite ->
+ {-1,skip_init};
+ _ ->
+ {put(test_server_case_num, get(test_server_case_num)+1),
+ run_init}
+ end,
+
+ %% check the current execution mode and save info about the case if
+ %% detected that printouts to common log files is handled later
+
+ case check_prop(parallel, Mode) =:= false andalso is_io_buffered() of
+ true ->
+ %% sequential test case nested in a parallel group;
+ %% io is buffered, so we must queue this test case
+ queue_test_case_io(undefined, self(), Num+1, Mod, Func);
+ false ->
+ ok
+ end,
+
+ case run_test_case(undefined, Num+1, Mod, Func, Args,
+ RunInit, TimetrapData, Mode) of
+ %% callback to framework module failed, exit immediately
+ {_,{framework_error,{FwMod,FwFunc},Reason},_} ->
+ print(minor, "~n*** ~w failed in ~w. Reason: ~p~n",
+ [FwMod,FwFunc,Reason]),
+ print(1, "~w failed in ~w. Reason: ~p~n", [FwMod,FwFunc,Reason]),
+ stop_minor_log_file(),
+ exit(framework_error);
+ %% sequential execution of test case finished
+ {Time,RetVal,_} ->
+ {Failed,Status1} =
+ case Time of
+ died ->
+ {true,update_status(failed, Mod, Func, Status)};
+ _ when is_tuple(RetVal) ->
+ case element(1, RetVal) of
+ R when R=='EXIT'; R==failed ->
+ {true,update_status(failed, Mod, Func, Status)};
+ R when R==skip; R==skipped ->
+ {false,update_status(skipped, Mod, Func, Status)};
+ _ ->
+ {false,update_status(ok, Mod, Func, Status)}
+ end;
+ _ ->
+ {false,update_status(ok, Mod, Func, Status)}
+ end,
+ case check_prop(sequence, Mode) of
+ false ->
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status1);
+ Ref ->
+ %% the case is in a sequence; we must check the result and
+ %% determine if the following cases should run or be skipped
+ if not Failed -> % proceed with next case
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status1);
+ true -> % skip rest of cases in sequence
+ print(minor, "~n*** ~w failed.~n"
+ " Skipping all other cases in sequence.",
+ [Func]),
+ Reason = {failed,{Mod,Func}},
+ Cases2 = skip_cases_upto(Ref, Cases, Reason, tc,
+ Mode, auto_skip_case),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases2, Config, TimetrapData, Mode, Status1)
+ end
+ end;
+ %% the test case is being executed in parallel with the main process (and
+ %% other test cases) and Pid is the dedicated process executing the case
+ Pid ->
+ %% io from Pid will be buffered by the test_server_io process and
+ %% handled later, so we have to save info about the case
+ queue_test_case_io(undefined, Pid, Num+1, Mod, Func),
+ run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status)
+ end;
+
+%% TestSpec processing finished
+run_test_cases_loop([], _Config, _TimetrapData, _, _) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% various help functions
+
+new_status(Ref, Status) ->
+ [{Ref,{{[],[],[]},[]}} | Status].
+
+new_status(Ref, CopiedCases, Status) ->
+ [{Ref,{{[],[],[]},CopiedCases}} | Status].
+
+delete_status(Ref, Status) ->
+ lists:keydelete(Ref, 1, Status).
+
+update_status(ok, Mod, Func, [{Ref,{{Ok,Skip,Fail},Cs}} | Status]) ->
+ [{Ref,{{Ok++[{Mod,Func}],Skip,Fail},Cs}} | Status];
+
+update_status(skipped, Mod, Func, [{Ref,{{Ok,Skip,Fail},Cs}} | Status]) ->
+ [{Ref,{{Ok,Skip++[{Mod,Func}],Fail},Cs}} | Status];
+
+update_status(failed, Mod, Func, [{Ref,{{Ok,Skip,Fail},Cs}} | Status]) ->
+ [{Ref,{{Ok,Skip,Fail++[{Mod,Func}]},Cs}} | Status];
+
+update_status(_, _, _, []) ->
+ [].
+
+update_status(Ref, {Ok,Skip,Fail}, [{Ref,{{Ok0,Skip0,Fail0},Cs}} | Status]) ->
+ [{Ref,{{Ok0++Ok,Skip0++Skip,Fail0++Fail},Cs}} | Status].
+
+get_copied_cases([{_,{_,Cases}} | _Status]) ->
+ Cases.
+
+get_tc_results([{_,{OkSkipFail,_}} | _Status]) ->
+ OkSkipFail;
+get_tc_results([]) -> % in case init_per_suite crashed
+ {[],[],[]}.
+
+conf(Ref, Props) ->
+ {Ref,Props,?now}.
+
+curr_ref([{Ref,_Props,_}|_]) ->
+ Ref;
+curr_ref([]) ->
+ undefined.
+
+curr_mode(Ref, Mode0, Mode1) ->
+ case curr_ref(Mode1) of
+ Ref -> Mode1;
+ _ -> Mode0
+ end.
+
+get_props([{_,Props,_} | _]) ->
+ Props;
+get_props([]) ->
+ [].
+
+check_prop(_Attrib, []) ->
+ false;
+check_prop(Attrib, [{Ref,Props,_}|_]) ->
+ case lists:member(Attrib, Props) of
+ true -> Ref;
+ false -> false
+ end.
+
+check_props(Attrib, Mode) ->
+ case [R || {R,Ps,_} <- Mode, lists:member(Attrib, Ps)] of
+ [] -> false;
+ [Ref|_] -> Ref
+ end.
+
+get_name(Mode, Def) ->
+ case get_name(Mode) of
+ undefined -> Def;
+ Name -> Name
+ end.
+
+get_name([{_Ref,Props,_}|_]) ->
+ proplists:get_value(name, Props);
+get_name([]) ->
+ undefined.
+
+conf_start(Ref, Mode) ->
+ case lists:keysearch(Ref, 1, Mode) of
+ {value,{_,_,T}} -> T;
+ false -> 0
+ end.
+
+
+get_data_dir(Mod) ->
+ get_data_dir(Mod, undefined).
+
+get_data_dir(Mod, Suite) ->
+ UseMod = if Suite == undefined -> Mod;
+ true -> Suite
+ end,
+ case code:which(UseMod) of
+ non_existing ->
+ print(12, "The module ~w is not loaded", [Mod]),
+ [];
+ cover_compiled ->
+ MainCoverNode = cover:get_main_node(),
+ {file,File} = rpc:call(MainCoverNode,cover,is_compiled,[UseMod]),
+ do_get_data_dir(UseMod,File);
+ FullPath ->
+ do_get_data_dir(UseMod,FullPath)
+ end.
+
+do_get_data_dir(Mod,File) ->
+ filename:dirname(File) ++ "/" ++ atom_to_list(Mod) ++ ?data_dir_suffix.
+
+print_conf_time(0) ->
+ ok;
+print_conf_time(ConfTime) ->
+ print(major, "=group_time ~.3fs", [ConfTime]),
+ print(minor, "~n=== Total execution time of group: ~.3fs~n", [ConfTime]).
+
+print_props([]) ->
+ ok;
+print_props(Props) ->
+ print(major, "=group_props ~p", [Props]),
+ print(minor, "Group properties: ~p~n", [Props]).
+
+%% repeat N times: {repeat,N}
+%% repeat N times or until all successful: {repeat_until_all_ok,N}
+%% repeat N times or until at least one successful: {repeat_until_any_ok,N}
+%% repeat N times or until at least one case fails: {repeat_until_any_fail,N}
+%% repeat N times or until all fails: {repeat_until_all_fail,N}
+%% N = integer() | forever
+get_repeat(Props) ->
+ get_prop([repeat,repeat_until_all_ok,repeat_until_any_ok,
+ repeat_until_any_fail,repeat_until_all_fail], forever, Props).
+
+update_repeat(Props) ->
+ case get_repeat(Props) of
+ undefined ->
+ Props;
+ {RepType,N} ->
+ Props1 =
+ if N == forever ->
+ [{RepType,N}|lists:keydelete(RepType, 1, Props)];
+ N < 3 ->
+ lists:keydelete(RepType, 1, Props);
+ N >= 3 ->
+ [{RepType,N-1}|lists:keydelete(RepType, 1, Props)]
+ end,
+ %% if shuffle is used in combination with repeat, a new
+ %% seed shouldn't be set every new turn
+ case get_shuffle(Props1) of
+ undefined ->
+ Props1;
+ _ ->
+ [{shuffle,repeated}|delete_shuffle(Props1)]
+ end
+ end.
+
+get_shuffle(Props) ->
+ get_prop([shuffle], ?now, Props).
+
+delete_shuffle(Props) ->
+ delete_prop([shuffle], Props).
+
+%% Return {Item,Value} if found, else if Item alone
+%% is found, return {Item,Default}
+get_prop([Item|Items], Default, Props) ->
+ case lists:keysearch(Item, 1, Props) of
+ {value,R} ->
+ R;
+ false ->
+ case lists:member(Item, Props) of
+ true ->
+ {Item,Default};
+ false ->
+ get_prop(Items, Default, Props)
+ end
+ end;
+get_prop([], _Def, _Props) ->
+ undefined.
+
+delete_prop([Item|Items], Props) ->
+ Props1 = lists:delete(Item, lists:keydelete(Item, 1, Props)),
+ delete_prop(Items, Props1);
+delete_prop([], Props) ->
+ Props.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% shuffle_cases(Ref, Cases, Seed) -> Cases1
+%%
+%% Shuffles the order of Cases.
+
+shuffle_cases(Ref, Cases, undefined) ->
+ shuffle_cases(Ref, Cases, rand:seed_s(exsplus));
+
+shuffle_cases(Ref, [{conf,Ref,_,_}=Start | Cases], Seed0) ->
+ {N,CasesToShuffle,Rest} = cases_to_shuffle(Ref, Cases),
+ Seed = case Seed0 of
+ {X,Y,Z} when is_integer(X+Y+Z) ->
+ rand:seed(exsplus, Seed0);
+ _ ->
+ Seed0
+ end,
+ ShuffledCases = random_order(N, rand:uniform_s(N, Seed), CasesToShuffle, []),
+ [Start|ShuffledCases] ++ Rest.
+
+cases_to_shuffle(Ref, Cases) ->
+ cases_to_shuffle(Ref, Cases, 1, []).
+
+cases_to_shuffle(Ref, [{conf,Ref,_,_} | _]=Cs, N, Ix) -> % end
+ {N-1,Ix,Cs};
+cases_to_shuffle(Ref, [{skip_case,{_,Ref,_,_},_} | _]=Cs, N, Ix) -> % end
+ {N-1,Ix,Cs};
+
+cases_to_shuffle(Ref, [{conf,Ref1,_,_}=C | Cs], N, Ix) -> % nested group
+ {Cs1,Rest} = get_subcases(Ref1, Cs, []),
+ cases_to_shuffle(Ref, Rest, N+1, [{N,[C|Cs1]} | Ix]);
+cases_to_shuffle(Ref, [{skip_case,{_,Ref1,_,_},_}=C | Cs], N, Ix) -> % nested group
+ {Cs1,Rest} = get_subcases(Ref1, Cs, []),
+ cases_to_shuffle(Ref, Rest, N+1, [{N,[C|Cs1]} | Ix]);
+
+cases_to_shuffle(Ref, [C | Cs], N, Ix) ->
+ cases_to_shuffle(Ref, Cs, N+1, [{N,[C]} | Ix]).
+
+get_subcases(SubRef, [{conf,SubRef,_,_}=C | Cs], SubCs) ->
+ {lists:reverse([C|SubCs]),Cs};
+get_subcases(SubRef, [{skip_case,{_,SubRef,_,_},_}=C | Cs], SubCs) ->
+ {lists:reverse([C|SubCs]),Cs};
+get_subcases(SubRef, [C|Cs], SubCs) ->
+ get_subcases(SubRef, Cs, [C|SubCs]).
+
+random_order(1, {_Pos,Seed}, [{_Ix,CaseOrGroup}], Shuffled) ->
+ %% save current seed to be used if test cases are repeated
+ put(test_server_curr_random_seed, Seed),
+ Shuffled++CaseOrGroup;
+random_order(N, {Pos,NewSeed}, IxCases, Shuffled) ->
+ {First,[{_Ix,CaseOrGroup}|Rest]} = lists:split(Pos-1, IxCases),
+ random_order(N-1, rand:uniform_s(N-1, NewSeed),
+ First++Rest, Shuffled++CaseOrGroup).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% skip_case(Type, Ref, CaseNum, Case, Comment, SendSync) -> {Mod,Func}
+%%
+%% Prints info about a skipped case in the major and html log files.
+%% SendSync determines if start and finished messages must be sent so
+%% that the printouts can be buffered and handled in order with io from
+%% parallel processes.
+skip_case(Type, Ref, CaseNum, Case, Comment, SendSync, Mode) ->
+ MF = {Mod,Func} = case Case of
+ {M,F,_A} -> {M,F};
+ {M,F} -> {M,F}
+ end,
+ if SendSync ->
+ queue_test_case_io(Ref, self(), CaseNum, Mod, Func),
+ self() ! {started,Ref,self(),CaseNum,Mod,Func},
+ test_server_io:start_transaction(),
+ skip_case1(Type, CaseNum, Mod, Func, Comment, Mode),
+ test_server_io:end_transaction(),
+ self() ! {finished,Ref,self(),CaseNum,Mod,Func,skipped,{0,skipped,[]}};
+ not SendSync ->
+ skip_case1(Type, CaseNum, Mod, Func, Comment, Mode)
+ end,
+ MF.
+
+skip_case1(Type, CaseNum, Mod, Func, Comment, Mode) ->
+ {{Col0,Col1},_} = get_font_style((CaseNum > 0), Mode),
+ ResultCol = if Type == auto -> ?auto_skip_color;
+ Type == user -> ?user_skip_color
+ end,
+ print(major, "~n=case ~w:~w", [Mod,Func]),
+ GroupName = case get_name(Mode) of
+ undefined ->
+ "";
+ GrName ->
+ GrName1 = cast_to_list(GrName),
+ print(major, "=group_props ~p", [[{name,GrName1}]]),
+ GrName1
+ end,
+ print(major, "=started ~s", [lists:flatten(timestamp_get(""))]),
+ Comment1 = reason_to_string(Comment),
+ if Type == auto ->
+ print(major, "=result auto_skipped: ~ts", [Comment1]);
+ Type == user ->
+ print(major, "=result skipped: ~ts", [Comment1])
+ end,
+ if CaseNum == 0 ->
+ print(2,"*** Skipping ~w ***", [{Mod,Func}]);
+ true ->
+ print(2,"*** Skipping test case #~w ~w ***", [CaseNum,{Mod,Func}])
+ end,
+ TR = xhtml("<tr valign=\"top\">", ["<tr class=\"",odd_or_even(),"\">"]),
+ GroupName = case get_name(Mode) of
+ undefined -> "";
+ Name -> cast_to_list(Name)
+ end,
+ print(html,
+ TR ++ "<td>" ++ Col0 ++ "~ts" ++ Col1 ++ "</td>"
+ "<td>" ++ Col0 ++ "~w" ++ Col1 ++ "</td>"
+ "<td>" ++ Col0 ++ "~ts" ++ Col1 ++ "</td>"
+ "<td>" ++ Col0 ++ "~w" ++ Col1 ++ "</td>"
+ "<td>" ++ Col0 ++ "< >" ++ Col1 ++ "</td>"
+ "<td>" ++ Col0 ++ "0.000s" ++ Col1 ++ "</td>"
+ "<td><font color=\"~ts\">SKIPPED</font></td>"
+ "<td>~ts</td></tr>\n",
+ [num2str(CaseNum),fw_name(Mod),GroupName,Func,ResultCol,Comment1]),
+
+ if CaseNum > 0 ->
+ {US,AS} = get(test_server_skipped),
+ case Type of
+ user -> put(test_server_skipped, {US+1,AS});
+ auto -> put(test_server_skipped, {US,AS+1})
+ end,
+ put(test_server_case_num, CaseNum);
+ true -> % conf
+ ok
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% skip_cases_upto(Ref, Cases, Reason, Origin, Mode, SkipType) -> Cases1
+%%
+%% SkipType = skip_case | auto_skip_case
+%% Mark all cases tagged with Ref as skipped.
+
+skip_cases_upto(Ref, Cases, Reason, Origin, Mode, SkipType) ->
+ {_,Modified,Rest} =
+ modify_cases_upto(Ref, {skip,Reason,Origin,Mode,SkipType}, Cases),
+ Modified++Rest.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% copy_cases(OrigRef, NewRef, Cases) -> Cases1
+%%
+%% Copy the test cases marked with OrigRef and tag the copies with NewRef.
+%% The start conf case copy will also get its repeat property updated.
+
+copy_cases(OrigRef, NewRef, Cases) ->
+ {Original,Altered,Rest} = modify_cases_upto(OrigRef, {copy,NewRef}, Cases),
+ {Altered,Original++Altered++Rest}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% modify_cases_upto(Ref, ModOp, Cases) -> {Original,Altered,Remaining}
+%%
+%% ModOp = {skip,Reason,Origin,Mode} | {copy,NewRef}
+%% Origin = conf | tc
+%%
+%% Modifies Cases according to ModOp and returns the original elements,
+%% the modified versions of these elements and the remaining (untouched)
+%% cases.
+
+modify_cases_upto(Ref, ModOp, Cases) ->
+ {Original,Altered,Rest} = modify_cases_upto(Ref, ModOp, Cases, [], []),
+ {lists:reverse(Original),lists:reverse(Altered),Rest}.
+
+%% first case of a copy operation is the start conf
+modify_cases_upto(Ref, {copy,NewRef}=Op, [{conf,Ref,Props,MF}=C|T], Orig, Alt) ->
+ modify_cases_upto(Ref, Op, T, [C|Orig], [{conf,NewRef,update_repeat(Props),MF}|Alt]);
+
+modify_cases_upto(Ref, ModOp, Cases, Orig, Alt) ->
+ %% we need to check if there's an end conf case with the
+ %% same ref in the list, if not, this *is* an end conf case
+ case lists:any(fun({_,R,_,_}) when R == Ref -> true;
+ ({_,R,_}) when R == Ref -> true;
+ ({skip_case,{_,R,_,_},_}) when R == Ref -> true;
+ ({skip_case,{_,R,_,_}}) when R == Ref -> true;
+ (_) -> false
+ end, Cases) of
+ true ->
+ modify_cases_upto1(Ref, ModOp, Cases, Orig, Alt);
+ false ->
+ {[],[],Cases}
+ end.
+
+%% next case is a conf with same ref, must be end conf = we're done
+modify_cases_upto1(Ref, {skip,Reason,conf,Mode,skip_case},
+ [{conf,Ref,_Props,MF}|T], Orig, Alt) ->
+ {Orig,[{skip_case,{conf,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {skip,Reason,conf,Mode,auto_skip_case},
+ [{conf,Ref,_Props,MF}|T], Orig, Alt) ->
+ {Orig,[{auto_skip_case,{conf,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {copy,NewRef}, [{conf,Ref,Props,MF}=C|T], Orig, Alt) ->
+ {[C|Orig],[{conf,NewRef,update_repeat(Props),MF}|Alt],T};
+
+%% we've skipped all remaining cases in a sequence
+modify_cases_upto1(Ref, {skip,_,tc,_,_},
+ [{conf,Ref,_Props,_MF}|_]=Cs, Orig, Alt) ->
+ {Orig,Alt,Cs};
+
+%% next is a make case
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType},
+ [{make,Ref,MF}|T], Orig, Alt) ->
+ {Orig,[{SkipType,{make,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {copy,NewRef}, [{make,Ref,MF}=M|T], Orig, Alt) ->
+ {[M|Orig],[{make,NewRef,MF}|Alt],T};
+
+%% next case is a user skipped end conf with the same ref = we're done
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType},
+ [{skip_case,{Type,Ref,MF,_Cmt},_}|T], Orig, Alt) ->
+ {Orig,[{SkipType,{Type,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType},
+ [{skip_case,{Type,Ref,MF,_Cmt}}|T], Orig, Alt) ->
+ {Orig,[{SkipType,{Type,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {copy,NewRef},
+ [{skip_case,{Type,Ref,MF,Cmt},Mode}=C|T], Orig, Alt) ->
+ {[C|Orig],[{skip_case,{Type,NewRef,MF,Cmt},Mode}|Alt],T};
+modify_cases_upto1(Ref, {copy,NewRef},
+ [{skip_case,{Type,Ref,MF,Cmt}}=C|T], Orig, Alt) ->
+ {[C|Orig],[{skip_case,{Type,NewRef,MF,Cmt}}|Alt],T};
+
+%% next is a skip_case, could be one test case or 'all' in suite, we must proceed
+modify_cases_upto1(Ref, ModOp, [{skip_case,{_F,_Cmt},_Mode}=MF|T], Orig, Alt) ->
+ modify_cases_upto1(Ref, ModOp, T, [MF|Orig], [MF|Alt]);
+
+%% next is a normal case (possibly in a sequence), mark as skipped, or copy, and proceed
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,skip_case}=Op,
+ [{_M,_F}=MF|T], Orig, Alt) ->
+ modify_cases_upto1(Ref, Op, T, Orig, [{skip_case,{MF,Reason},Mode}|Alt]);
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,auto_skip_case}=Op,
+ [{_M,_F}=MF|T], Orig, Alt) ->
+ modify_cases_upto1(Ref, Op, T, Orig, [{auto_skip_case,{MF,Reason},Mode}|Alt]);
+modify_cases_upto1(Ref, CopyOp, [{_M,_F}=MF|T], Orig, Alt) ->
+ modify_cases_upto1(Ref, CopyOp, T, [MF|Orig], [MF|Alt]);
+
+%% next is a conf case, modify the Mode arg to keep track of sub groups
+modify_cases_upto1(Ref, {skip,Reason,FType,Mode,SkipType},
+ [{conf,OtherRef,Props,_MF}|T], Orig, Alt) ->
+ case hd(Mode) of
+ {OtherRef,_,_} -> % end conf
+ modify_cases_upto1(Ref, {skip,Reason,FType,tl(Mode),SkipType},
+ T, Orig, Alt);
+ _ -> % start conf
+ Mode1 = [conf(OtherRef,Props)|Mode],
+ modify_cases_upto1(Ref, {skip,Reason,FType,Mode1,SkipType},
+ T, Orig, Alt)
+ end;
+
+%% next is some other case, ignore or copy
+modify_cases_upto1(Ref, {skip,_,_,_,_}=Op, [_Other|T], Orig, Alt) ->
+ modify_cases_upto1(Ref, Op, T, Orig, Alt);
+modify_cases_upto1(Ref, CopyOp, [C|T], Orig, Alt) ->
+ modify_cases_upto1(Ref, CopyOp, T, [C|Orig], [C|Alt]).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% set_io_buffering(IOHandler) -> PrevIOHandler
+%%
+%% Save info about current process (always the main process) buffering
+%% io printout messages from parallel test case processes (*and* possibly
+%% also the main process).
+
+set_io_buffering(IOHandler) ->
+ put(test_server_common_io_handler, IOHandler).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% is_io_buffered() -> true|false
+%%
+%% Test whether is being buffered.
+
+is_io_buffered() ->
+ get(test_server_common_io_handler) =/= undefined.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% queue_test_case_io(Pid, Num, Mod, Func) -> ok
+%%
+%% Save info about test case that gets its io buffered. This can
+%% be a parallel test case or it can be a test case (conf or normal)
+%% that belongs to a group nested under a parallel group. The queue
+%% is processed after io buffering is disabled. See run_test_cases_loop/4
+%% and handle_test_case_io_and_status/0 for more info.
+
+queue_test_case_io(Ref, Pid, Num, Mod, Func) ->
+ Entry = {Ref,Pid,Num,Mod,Func},
+ %% the order of the test cases is very important!
+ put(test_server_queued_io,
+ get(test_server_queued_io)++[Entry]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% wait_for_cases(Ref) -> {Ok,Skipped,Failed}
+%%
+%% At the end of a nested parallel group, we have to wait for the test
+%% cases to terminate before we can go on (since test cases never execute
+%% in parallel with the end conf case of the group). When a top level
+%% parallel group is finished, buffered io messages must be handled and
+%% this is taken care of by handle_test_case_io_and_status/0.
+
+wait_for_cases(Ref) ->
+ case get(test_server_queued_io) of
+ [] ->
+ {[],[],[]};
+ Cases ->
+ [_Start|TCs] =
+ lists:dropwhile(fun({R,_,_,_,_}) when R == Ref -> false;
+ (_) -> true
+ end, Cases),
+ wait_and_resend(Ref, TCs, [],[],[])
+ end.
+
+wait_and_resend(Ref, [{OtherRef,_,0,_,_}|Ps],
+ Ok,Skip,Fail) when is_reference(OtherRef),
+ OtherRef /= Ref ->
+ %% ignore cases that belong to nested group
+ Ps1 = rm_cases_upto(OtherRef, Ps),
+ wait_and_resend(Ref, Ps1, Ok,Skip,Fail);
+
+wait_and_resend(Ref, [{_,CurrPid,CaseNum,Mod,Func}|Ps] = Cases, Ok,Skip,Fail) ->
+ receive
+ {finished,_Ref,CurrPid,CaseNum,Mod,Func,Result,_RetVal} = Msg ->
+ %% resend message to main process so that it can be used
+ %% to test_server_io:print_buffered/1 later
+ self() ! Msg,
+ MF = {Mod,Func},
+ {Ok1,Skip1,Fail1} =
+ case Result of
+ ok -> {[MF|Ok],Skip,Fail};
+ skipped -> {Ok,[MF|Skip],Fail};
+ failed -> {Ok,Skip,[MF|Fail]}
+ end,
+ wait_and_resend(Ref, Ps, Ok1,Skip1,Fail1);
+ {'EXIT',CurrPid,Reason} when Reason /= normal ->
+ %% unexpected termination of test case process
+ {value,{_,_,CaseNum,Mod,Func}} = lists:keysearch(CurrPid, 2, Cases),
+ print(1, "Error! Process for test case #~w (~w:~w) died! Reason: ~p",
+ [CaseNum, Mod, Func, Reason]),
+ exit({unexpected_termination,{CaseNum,Mod,Func},{CurrPid,Reason}})
+ end;
+
+wait_and_resend(_, [], Ok,Skip,Fail) ->
+ {lists:reverse(Ok),lists:reverse(Skip),lists:reverse(Fail)}.
+
+rm_cases_upto(Ref, [{Ref,_,0,_,_}|Ps]) ->
+ Ps;
+rm_cases_upto(Ref, [_|Ps]) ->
+ rm_cases_upto(Ref, Ps).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% handle_test_case_io_and_status() -> [Ok,Skipped,Failed}
+%%
+%% Each parallel test case process prints to its own minor log file during
+%% execution. The common log files (major, html etc) must however be
+%% written to sequentially. This is handled by calling
+%% test_server_io:start_transaction/0 to tell the test_server_io process
+%% to buffer all print requests.
+%%
+%% An io session is always started with a
+%% {started,Ref,Pid,Num,Mod,Func} message (and
+%% test_server_io:start_transaction/0 will be called) and terminated
+%% with {finished,Ref,Pid,Num,Mod,Func,Result,RetVal} (and
+%% test_server_io:end_transaction/0 will be called). The result
+%% shipped with the finished message from a parallel process is used
+%% to update status data of the current test run. An 'EXIT' message
+%% from each parallel test case process (after finishing and
+%% terminating) is also received and handled here.
+%%
+%% During execution of a parallel group, any cases (conf or normal)
+%% belonging to a nested group will also get its io printouts buffered.
+%% This is necessary to get the major and html log files written in
+%% correct sequence. This function handles also the print messages
+%% generated by nested group cases that have been executed sequentially
+%% by the main process (note that these cases do not generate 'EXIT'
+%% messages, only 'start' and 'finished' messages).
+%%
+%% See the header comment for run_test_cases_loop/4 for more
+%% info about IO handling.
+%%
+%% Note: It is important that the type of messages handled here
+%% do not get consumed by test_server:run_test_case_msgloop/5
+%% during the test case execution (e.g. in the catch clause of
+%% the receive)!
+
+handle_test_case_io_and_status() ->
+ case get(test_server_queued_io) of
+ [] ->
+ {[],[],[]};
+ Cases ->
+ %% Cases = [{Ref,Pid,CaseNum,Mod,Func} | ...]
+ Result = handle_io_and_exit_loop([], Cases, [],[],[]),
+ Main = self(),
+ %% flush normal exit messages
+ lists:foreach(fun({_,Pid,_,_,_}) when Pid /= Main ->
+ receive
+ {'EXIT',Pid,normal} -> ok
+ after
+ 1000 -> ok
+ end;
+ (_) ->
+ ok
+ end, Cases),
+ Result
+ end.
+
+%% Handle cases (without Ref) that belong to the top parallel group (i.e. when Refs = [])
+handle_io_and_exit_loop([], [{undefined,CurrPid,CaseNum,Mod,Func}|Ps] = Cases, Ok,Skip,Fail) ->
+ %% retrieve the start message for the current io session (= testcase)
+ receive
+ {started,_,CurrPid,CaseNum,Mod,Func} ->
+ {Ok1,Skip1,Fail1} =
+ case handle_io_and_exits(self(), CurrPid, CaseNum, Mod, Func, Cases) of
+ {ok,MF} -> {[MF|Ok],Skip,Fail};
+ {skipped,MF} -> {Ok,[MF|Skip],Fail};
+ {failed,MF} -> {Ok,Skip,[MF|Fail]}
+ end,
+ handle_io_and_exit_loop([], Ps, Ok1,Skip1,Fail1)
+ after
+ 1000 ->
+ exit({testcase_failed_to_start,Mod,Func})
+ end;
+
+%% Handle cases that belong to groups nested under top parallel group
+handle_io_and_exit_loop(Refs, [{Ref,CurrPid,CaseNum,Mod,Func}|Ps] = Cases, Ok,Skip,Fail) ->
+ receive
+ {started,_,CurrPid,CaseNum,Mod,Func} ->
+ handle_io_and_exits(self(), CurrPid, CaseNum, Mod, Func, Cases),
+ Refs1 =
+ case Refs of
+ [Ref|Rs] -> % must be end conf case for subgroup
+ Rs;
+ _ when is_reference(Ref) -> % must be start of new subgroup
+ [Ref|Refs];
+ _ -> % must be normal subgroup testcase
+ Refs
+ end,
+ handle_io_and_exit_loop(Refs1, Ps, Ok,Skip,Fail)
+ after
+ 1000 ->
+ exit({testcase_failed_to_start,Mod,Func})
+ end;
+
+handle_io_and_exit_loop(_, [], Ok,Skip,Fail) ->
+ {lists:reverse(Ok),lists:reverse(Skip),lists:reverse(Fail)}.
+
+handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases) ->
+ receive
+ {abort_current_testcase=Tag,_Reason,From} ->
+ %% If a parallel group is executing, there is no unique
+ %% current test case, so we must generate an error.
+ From ! {self(),Tag,{error,parallel_group}},
+ handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases);
+ %% end of io session from test case executed by main process
+ {finished,_,Main,CaseNum,Mod,Func,Result,_RetVal} ->
+ test_server_io:print_buffered(CurrPid),
+ {Result,{Mod,Func}};
+ %% end of io session from test case executed by parallel process
+ {finished,_,CurrPid,CaseNum,Mod,Func,Result,RetVal} ->
+ test_server_io:print_buffered(CurrPid),
+ case Result of
+ ok ->
+ put(test_server_ok, get(test_server_ok)+1);
+ failed ->
+ put(test_server_failed, get(test_server_failed)+1);
+ skipped ->
+ SkipCounters =
+ update_skip_counters(RetVal, get(test_server_skipped)),
+ put(test_server_skipped, SkipCounters)
+ end,
+ {Result,{Mod,Func}};
+
+ %% unexpected termination of test case process
+ {'EXIT',TCPid,Reason} when Reason /= normal ->
+ test_server_io:print_buffered(CurrPid),
+ {value,{_,_,Num,M,F}} = lists:keysearch(TCPid, 2, Cases),
+ print(1, "Error! Process for test case #~w (~w:~w) died! Reason: ~p",
+ [Num, M, F, Reason]),
+ exit({unexpected_termination,{Num,M,F},{TCPid,Reason}})
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% run_test_case(Ref, Num, Mod, Func, Args, RunInit,
+%% TimetrapData, Mode) -> RetVal
+%%
+%% Creates the minor log file and inserts some test case specific headers
+%% and footers into the log files. Then the test case is executed and the
+%% result is printed to the log files (also info about lingering processes
+%% & slave nodes in the system is presented).
+%%
+%% RunInit decides if the per test case init is to be run (true for all
+%% but conf cases).
+%%
+%% Mode specifies if the test case should be executed by a dedicated,
+%% parallel, process rather than sequentially by the main process. If
+%% the former, the new process is spawned and the dictionary of the main
+%% process is copied to the test case process.
+%%
+%% RetVal is the result of executing the test case. It contains info
+%% about the execution time and the return value of the test case function.
+
+run_test_case(Ref, Num, Mod, Func, Args, RunInit, TimetrapData) ->
+ file:set_cwd(filename:dirname(get(test_server_dir))),
+ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
+ TimetrapData, [], self()).
+
+run_test_case(Ref, Num, Mod, Func, Args, skip_init, TimetrapData, Mode) ->
+ %% a conf case is always executed by the main process
+ run_test_case1(Ref, Num, Mod, Func, Args, skip_init,
+ TimetrapData, Mode, self());
+
+run_test_case(Ref, Num, Mod, Func, Args, RunInit, TimetrapData, Mode) ->
+ file:set_cwd(filename:dirname(get(test_server_dir))),
+ Main = self(),
+ case check_prop(parallel, Mode) of
+ false ->
+ %% this is a sequential test case
+ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
+ TimetrapData, Mode, Main);
+ _Ref ->
+ %% this a parallel test case, spawn the new process
+ Dictionary = get(),
+ {dictionary,Dictionary} = process_info(self(), dictionary),
+ spawn_link(
+ fun() ->
+ process_flag(trap_exit, true),
+ [put(Key, Val) || {Key,Val} <- Dictionary],
+ set_io_buffering({tc,Main}),
+ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
+ TimetrapData, Mode, Main)
+ end)
+ end.
+
+run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
+ TimetrapData, Mode, Main) ->
+ group_leader(test_server_io:get_gl(Main == self()), self()),
+
+ %% if io is being buffered, send start io session message
+ %% (no matter if case runs on parallel or main process)
+ case is_io_buffered() of
+ false -> ok;
+ true ->
+ test_server_io:start_transaction(),
+ Main ! {started,Ref,self(),Num,Mod,Func}
+ end,
+ TSDir = get(test_server_dir),
+
+ print(major, "=case ~w:~w", [Mod, Func]),
+ MinorName = start_minor_log_file(Mod, Func, self() /= Main),
+ MinorBase = filename:basename(MinorName),
+ print(major, "=logfile ~ts", [filename:basename(MinorName)]),
+
+ UpdatedArgs =
+ %% maybe create unique private directory for test case or config func
+ case get(test_server_create_priv_dir) of
+ auto_per_run ->
+ update_config(hd(Args), [{tc_logfile,MinorName}]);
+ PrivDirMode ->
+ %% create unique private directory for test case
+ RunDir = filename:dirname(MinorName),
+ Ext =
+ if Num == 0 ->
+ Int = erlang:unique_integer([positive,monotonic]),
+ lists:flatten(io_lib:format(".cfg.~w", [Int]));
+ true ->
+ lists:flatten(io_lib:format(".~w", [Num]))
+ end,
+ PrivDir = filename:join(RunDir, ?priv_dir) ++ Ext,
+ if PrivDirMode == auto_per_tc ->
+ ok = file:make_dir(PrivDir);
+ PrivDirMode == manual_per_tc ->
+ ok
+ end,
+ update_config(hd(Args), [{priv_dir,PrivDir++"/"},
+ {tc_logfile,MinorName}])
+ end,
+ GrName = get_name(Mode),
+ test_server_sup:framework_call(report,
+ [tc_start,{{Mod,{Func,GrName}},
+ MinorName}]),
+
+ {ok,Cwd} = file:get_cwd(),
+ Args2Print = if is_list(UpdatedArgs) ->
+ lists:keydelete(tc_group_result, 1, UpdatedArgs);
+ true ->
+ UpdatedArgs
+ end,
+ if RunInit == skip_init ->
+ print_props(get_props(Mode));
+ true ->
+ ok
+ end,
+
+ print(minor,
+ escape_chars(io_lib:format("Config value:\n\n ~tp\n", [Args2Print])),
+ []),
+ print(minor, "Current directory is ~tp\n", [Cwd]),
+
+ GrNameStr = case GrName of
+ undefined -> "";
+ Name -> cast_to_list(Name)
+ end,
+ print(major, "=started ~s", [lists:flatten(timestamp_get(""))]),
+ {{Col0,Col1},Style} = get_font_style((RunInit==run_init), Mode),
+ TR = xhtml("<tr valign=\"top\">", ["<tr class=\"",odd_or_even(),"\">"]),
+ EncMinorBase = uri_encode(MinorBase),
+ print(html, TR ++ "<td>" ++ Col0 ++ "~ts" ++ Col1 ++ "</td>"
+ "<td>" ++ Col0 ++ "~w" ++ Col1 ++ "</td>"
+ "<td>" ++ Col0 ++ "~ts" ++ Col1 ++ "</td>"
+ "<td><a href=\"~ts\">~w</a></td>"
+ "<td><a href=\"~ts#top\">&lt;</a> <a href=\"~ts#end\">&gt;</a></td>",
+ [num2str(Num),fw_name(Mod),GrNameStr,EncMinorBase,Func,
+ EncMinorBase,EncMinorBase]),
+
+ do_unless_parallel(Main, fun erlang:yield/0),
+
+ %% run the test case
+ {Result,DetectedFail,ProcsBefore,ProcsAfter} =
+ run_test_case_apply(Num, Mod, Func, [UpdatedArgs], GrName,
+ RunInit, TimetrapData),
+ {Time,RetVal,Loc,Opts,Comment} =
+ case Result of
+ Normal={_Time,_RetVal,_Loc,_Opts,_Comment} -> Normal;
+ {died,DReason,DLoc,DCmt} -> {died,DReason,DLoc,[],DCmt}
+ end,
+
+ print(minor, "<a name=\"end\"></a>", [], internal_raw),
+ print(minor, "\n", [], internal_raw),
+ print_timestamp(minor, "Ended at "),
+ print(major, "=ended ~s", [lists:flatten(timestamp_get(""))]),
+
+ do_unless_parallel(Main, fun() -> file:set_cwd(filename:dirname(TSDir)) end),
+
+ %% call the appropriate progress function clause to print the results to log
+ Status =
+ case {Time,RetVal} of
+ {died,{timetrap_timeout,TimetrapTimeout}} ->
+ progress(failed, Num, Mod, Func, GrName, Loc,
+ timetrap_timeout, TimetrapTimeout, Comment, Style);
+ {died,Reason} ->
+ progress(failed, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style);
+ {_,{'EXIT',{Skip,Reason}}} when Skip==skip; Skip==skipped;
+ Skip==auto_skip ->
+ progress(skip, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style);
+ {_,{'EXIT',_Pid,{Skip,Reason}}} when Skip==skip; Skip==skipped ->
+ progress(skip, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style);
+ {_,{'EXIT',_Pid,Reason}} ->
+ progress(failed, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style);
+ {_,{'EXIT',Reason}} ->
+ progress(failed, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style);
+ {_,{Fail,Reason}} when Fail =:= fail; Fail =:= failed ->
+ progress(failed, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style);
+ {_,Reason={auto_skip,_Why}} ->
+ progress(skip, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style);
+ {_,{Skip,Reason}} when Skip==skip; Skip==skipped ->
+ progress(skip, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style);
+ {Time,RetVal} ->
+ case DetectedFail of
+ [] ->
+ progress(ok, Num, Mod, Func, GrName, Loc, RetVal,
+ Time, Comment, Style);
+
+ Reason ->
+ progress(failed, Num, Mod, Func, GrName, Loc, Reason,
+ Time, Comment, Style)
+ end
+ end,
+ %% if the test case was executed sequentially, this updates the
+ %% status count on the main process (status of parallel test cases
+ %% is updated later by the handle_test_case_io_and_status/0 function)
+ case {RunInit,Status} of
+ {skip_init,_} -> % conf doesn't count
+ ok;
+ {_,ok} ->
+ put(test_server_ok, get(test_server_ok)+1);
+ {_,failed} ->
+ put(test_server_failed, get(test_server_failed)+1);
+ {_,skip} ->
+ {US,AS} = get(test_server_skipped),
+ put(test_server_skipped, {US+1,AS});
+ {_,auto_skip} ->
+ {US,AS} = get(test_server_skipped),
+ put(test_server_skipped, {US,AS+1})
+ end,
+ %% only if test case execution is sequential do we care about the
+ %% remaining processes and slave nodes count
+ case self() of
+ Main ->
+ case test_server_sup:framework_call(warn, [processes], true) of
+ true ->
+ if ProcsBefore < ProcsAfter ->
+ print(minor,
+ "WARNING: ~w more processes in system after test case",
+ [ProcsAfter-ProcsBefore]);
+ ProcsBefore > ProcsAfter ->
+ print(minor,
+ "WARNING: ~w less processes in system after test case",
+ [ProcsBefore-ProcsAfter]);
+ true -> ok
+ end;
+ false ->
+ ok
+ end,
+ case test_server_sup:framework_call(warn, [nodes], true) of
+ true ->
+ case catch controller_call(kill_slavenodes) of
+ {'EXIT',_} = Exit ->
+ print(minor,
+ "WARNING: There might be slavenodes left in the"
+ " system. I tried to kill them, but I failed: ~p\n",
+ [Exit]);
+ [] -> ok;
+ List ->
+ print(minor, "WARNING: ~w slave nodes in system after test"++
+ "case. Tried to killed them.~n"++
+ " Names:~p",
+ [length(List),List])
+ end;
+ false ->
+ ok
+ end;
+ _ ->
+ ok
+ end,
+ %% if the test case was executed sequentially, this updates the execution
+ %% time count on the main process (adding execution time of parallel test
+ %% case groups is done in run_test_cases_loop/4)
+ if is_number(Time) ->
+ put(test_server_total_time, get(test_server_total_time)+Time);
+ true ->
+ ok
+ end,
+ test_server_sup:check_new_crash_dumps(),
+
+ %% if io is being buffered, send finished message
+ %% (no matter if case runs on parallel or main process)
+ case is_io_buffered() of
+ false ->
+ ok;
+ true ->
+ test_server_io:end_transaction(),
+ Main ! {finished,Ref,self(),Num,Mod,Func,
+ ?mod_result(Status),{Time,RetVal,Opts}}
+ end,
+ {Time,RetVal,Opts}.
+
+
+%%--------------------------------------------------------------------
+%% various help functions
+
+%% Call Action if we are running on the main process (not parallel).
+do_unless_parallel(Main, Action) when is_function(Action, 0) ->
+ case self() of
+ Main -> Action();
+ _ -> ok
+ end.
+
+num2str(0) -> "";
+num2str(N) -> integer_to_list(N).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% progress(Result, CaseNum, Mod, Func, Location, Reason, Time,
+%% Comment, TimeFormat) -> Result
+%%
+%% Prints the result of the test case to log file.
+%% Note: Strings that are to be written to the minor log must
+%% be prefixed with "=== " here, or the indentation will be wrong.
+
+progress(skip, CaseNum, Mod, Func, GrName, Loc, Reason, Time,
+ Comment, {St0,St1}) ->
+ {Reason1,{Color,Ret,ReportTag}} =
+ if_auto_skip(Reason,
+ fun() -> {?auto_skip_color,auto_skip,auto_skipped} end,
+ fun() -> {?user_skip_color,skip,skipped} end),
+ print(major, "=result ~w: ~p", [ReportTag,Reason1]),
+ print(1, "*** SKIPPED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
+ test_server_sup:framework_call(report, [tc_done,{Mod,{Func,GrName},
+ {ReportTag,Reason1}}]),
+ ReasonStr = escape_chars(reason_to_string(Reason1)),
+ ReasonStr1 = lists:flatten([string:strip(S,left) ||
+ S <- string:tokens(ReasonStr,[$\n])]),
+ ReasonStr2 =
+ if length(ReasonStr1) > 80 ->
+ string:substr(ReasonStr1, 1, 77) ++ "...";
+ true ->
+ ReasonStr1
+ end,
+ Comment1 = case Comment of
+ "" -> "";
+ _ -> xhtml("<br>(","<br />(") ++ to_string(Comment) ++ ")"
+ end,
+ print(html,
+ "<td>" ++ St0 ++ "~.3fs" ++ St1 ++ "</td>"
+ "<td><font color=\"~ts\">SKIPPED</font></td>"
+ "<td>~ts~ts</td></tr>\n",
+ [Time,Color,ReasonStr2,Comment1]),
+ FormatLoc = test_server_sup:format_loc(Loc),
+ print(minor, "=== Location: ~ts", [FormatLoc]),
+ print(minor, "=== Reason: ~ts", [ReasonStr1]),
+ Ret;
+
+progress(failed, CaseNum, Mod, Func, GrName, Loc, timetrap_timeout, T,
+ Comment0, {St0,St1}) ->
+ print(major, "=result failed: timeout, ~p", [Loc]),
+ print(1, "*** FAILED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
+ test_server_sup:framework_call(report,
+ [tc_done,{Mod,{Func,GrName},
+ {failed,timetrap_timeout}}]),
+ FormatLastLoc = test_server_sup:format_loc(get_last_loc(Loc)),
+ ErrorReason = io_lib:format("{timetrap_timeout,~ts}", [FormatLastLoc]),
+ Comment =
+ case Comment0 of
+ "" -> "<font color=\"red\">" ++ ErrorReason ++ "</font>";
+ _ -> "<font color=\"red\">" ++ ErrorReason ++
+ xhtml("</font><br>","</font><br />") ++ to_string(Comment0)
+ end,
+ print(html,
+ "<td>" ++ St0 ++ "~.3fs" ++ St1 ++ "</td>"
+ "<td><font color=\"red\">FAILED</font></td>"
+ "<td>~ts</td></tr>\n",
+ [T/1000,Comment]),
+ FormatLoc = test_server_sup:format_loc(Loc),
+ print(minor, "=== Location: ~ts", [FormatLoc]),
+ print(minor, "=== Reason: timetrap timeout", []),
+ failed;
+
+progress(failed, CaseNum, Mod, Func, GrName, Loc, {testcase_aborted,Reason}, _T,
+ Comment0, {St0,St1}) ->
+ print(major, "=result failed: testcase_aborted, ~p", [Loc]),
+ print(1, "*** FAILED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
+ test_server_sup:framework_call(report,
+ [tc_done,{Mod,{Func,GrName},
+ {failed,testcase_aborted}}]),
+ FormatLastLoc = test_server_sup:format_loc(get_last_loc(Loc)),
+ ErrorReason = io_lib:format("{testcase_aborted,~ts}", [FormatLastLoc]),
+ Comment =
+ case Comment0 of
+ "" -> "<font color=\"red\">" ++ ErrorReason ++ "</font>";
+ _ -> "<font color=\"red\">" ++ ErrorReason ++
+ xhtml("</font><br>","</font><br />") ++ to_string(Comment0)
+ end,
+ print(html,
+ "<td>" ++ St0 ++ "died" ++ St1 ++ "</td>"
+ "<td><font color=\"red\">FAILED</font></td>"
+ "<td>~ts</td></tr>\n",
+ [Comment]),
+ FormatLoc = test_server_sup:format_loc(Loc),
+ print(minor, "=== Location: ~ts", [FormatLoc]),
+ print(minor,
+ escape_chars(io_lib:format("=== Reason: {testcase_aborted,~p}",
+ [Reason])),
+ []),
+ failed;
+
+progress(failed, CaseNum, Mod, Func, GrName, unknown, Reason, Time,
+ Comment0, {St0,St1}) ->
+ print(major, "=result failed: ~p, ~w", [Reason,unknown_location]),
+ print(1, "*** FAILED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
+ test_server_sup:framework_call(report, [tc_done,{Mod,{Func,GrName},
+ {failed,Reason}}]),
+ TimeStr = io_lib:format(if is_float(Time) -> "~.3fs";
+ true -> "~w"
+ end, [Time]),
+ ErrorReason = escape_chars(lists:flatten(io_lib:format("~p", [Reason]))),
+ ErrorReason1 = lists:flatten([string:strip(S,left) ||
+ S <- string:tokens(ErrorReason,[$\n])]),
+ ErrorReason2 =
+ if length(ErrorReason1) > 63 ->
+ string:substr(ErrorReason1, 1, 60) ++ "...";
+ true ->
+ ErrorReason1
+ end,
+ Comment =
+ case Comment0 of
+ "" -> "<font color=\"red\">" ++ ErrorReason2 ++ "</font>";
+ _ -> "<font color=\"red\">" ++ ErrorReason2 ++
+ xhtml("</font><br>","</font><br />") ++
+ to_string(Comment0)
+ end,
+ print(html,
+ "<td>" ++ St0 ++ "~ts" ++ St1 ++ "</td>"
+ "<td><font color=\"red\">FAILED</font></td>"
+ "<td>~ts</td></tr>\n",
+ [TimeStr,Comment]),
+ print(minor, "=== Location: ~w", [unknown]),
+ {FStr,FormattedReason} = format_exception(Reason),
+ print(minor,
+ escape_chars(io_lib:format("=== Reason: " ++ FStr, [FormattedReason])),
+ []),
+ failed;
+
+progress(failed, CaseNum, Mod, Func, GrName, Loc, Reason, Time,
+ Comment0, {St0,St1}) ->
+ {LocMaj,LocMin} = if Func == error_in_suite ->
+ case get_fw_mod(undefined) of
+ Mod -> {unknown_location,unknown};
+ _ -> {Loc,Loc}
+ end;
+ true -> {Loc,Loc}
+ end,
+ print(major, "=result failed: ~p, ~p", [Reason,LocMaj]),
+ print(1, "*** FAILED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
+ test_server_sup:framework_call(report, [tc_done,{Mod,{Func,GrName},
+ {failed,Reason}}]),
+ TimeStr = io_lib:format(if is_float(Time) -> "~.3fs";
+ true -> "~w"
+ end, [Time]),
+ Comment =
+ case Comment0 of
+ "" -> "";
+ _ -> xhtml("<br>","<br />") ++ to_string(Comment0)
+ end,
+ FormatLastLoc = test_server_sup:format_loc(get_last_loc(LocMaj)),
+ print(html,
+ "<td>" ++ St0 ++ "~ts" ++ St1 ++ "</td>"
+ "<td><font color=\"red\">FAILED</font></td>"
+ "<td><font color=\"red\">~ts</font>~ts</td></tr>\n",
+ [TimeStr,FormatLastLoc,Comment]),
+ FormatLoc = test_server_sup:format_loc(LocMin),
+ print(minor, "=== Location: ~ts", [FormatLoc]),
+ {FStr,FormattedReason} = format_exception(Reason),
+ print(minor, "=== Reason: " ++
+ escape_chars(io_lib:format(FStr, [FormattedReason])), []),
+ failed;
+
+progress(ok, _CaseNum, Mod, Func, GrName, _Loc, RetVal, Time,
+ Comment0, {St0,St1}) ->
+ print(minor, "successfully completed test case", []),
+ test_server_sup:framework_call(report, [tc_done,{Mod,{Func,GrName},ok}]),
+ Comment =
+ case RetVal of
+ {comment,RetComment} ->
+ String = to_string(RetComment),
+ HtmlCmt = test_server_sup:framework_call(format_comment,
+ [String],
+ String),
+ print(major, "=result ok: ~ts", [String]),
+ "<td>" ++ HtmlCmt ++ "</td>";
+ _ ->
+ print(major, "=result ok", []),
+ case Comment0 of
+ "" -> "<td></td>";
+ _ -> "<td>" ++ to_string(Comment0) ++ "</td>"
+ end
+ end,
+ print(major, "=elapsed ~p", [Time]),
+ print(html,
+ "<td>" ++ St0 ++ "~.3fs" ++ St1 ++ "</td>"
+ "<td><font color=\"green\">Ok</font></td>"
+ "~ts</tr>\n",
+ [Time,Comment]),
+ print(minor,
+ escape_chars(io_lib:format("=== Returned value: ~tp", [RetVal])),
+ []),
+ ok.
+
+%%--------------------------------------------------------------------
+%% various help functions
+escape_chars(Term) when not is_list(Term), not is_binary(Term) ->
+ esc_chars_in_list(io_lib:format("~tp", [Term]));
+escape_chars(List = [Term | _]) when not is_list(Term), not is_integer(Term) ->
+ esc_chars_in_list(io_lib:format("~tp", [List]));
+escape_chars(List) ->
+ esc_chars_in_list(List).
+
+esc_chars_in_list([Bin | Io]) when is_binary(Bin) ->
+ [Bin | esc_chars_in_list(Io)];
+esc_chars_in_list([List | Io]) when is_list(List) ->
+ [esc_chars_in_list(List) | esc_chars_in_list(Io)];
+esc_chars_in_list([$< | Io]) ->
+ ["&lt;" | esc_chars_in_list(Io)];
+esc_chars_in_list([$> | Io]) ->
+ ["&gt;" | esc_chars_in_list(Io)];
+esc_chars_in_list([$& | Io]) ->
+ ["&amp;" | esc_chars_in_list(Io)];
+esc_chars_in_list([Char | Io]) when is_integer(Char) ->
+ [Char | esc_chars_in_list(Io)];
+esc_chars_in_list([]) ->
+ [];
+esc_chars_in_list(Bin) ->
+ Bin.
+
+get_fw_mod(Mod) ->
+ case get(test_server_framework) of
+ undefined ->
+ case os:getenv("TEST_SERVER_FRAMEWORK") of
+ FW when FW =:= false; FW =:= "undefined" ->
+ Mod;
+ FW ->
+ list_to_atom(FW)
+ end;
+ '$none' -> Mod;
+ FW -> FW
+ end.
+
+fw_name(?MODULE) ->
+ test_server;
+fw_name(Mod) ->
+ case get(test_server_framework_name) of
+ undefined ->
+ case get_fw_mod(undefined) of
+ undefined ->
+ Mod;
+ Mod ->
+ case os:getenv("TEST_SERVER_FRAMEWORK_NAME") of
+ FWName when FWName =:= false; FWName =:= "undefined" ->
+ Mod;
+ FWName ->
+ list_to_atom(FWName)
+ end;
+ _ ->
+ Mod
+ end;
+ '$none' ->
+ Mod;
+ FWName ->
+ case get_fw_mod(Mod) of
+ Mod -> FWName;
+ _ -> Mod
+ end
+ end.
+
+if_auto_skip(Reason={failed,{_,init_per_testcase,_}}, True, _False) ->
+ {Reason,True()};
+if_auto_skip({skip,Reason={failed,{_,init_per_testcase,_}}}, True, _False) ->
+ {Reason,True()};
+if_auto_skip({auto_skip,Reason}, True, _False) ->
+ {Reason,True()};
+if_auto_skip(Reason, _True, False) ->
+ {Reason,False()}.
+
+update_skip_counters({_T,Pat,_Opts}, {US,AS}) ->
+ {_,Result} = if_auto_skip(Pat, fun() -> {US,AS+1} end, fun() -> {US+1,AS} end),
+ Result;
+update_skip_counters(Pat, {US,AS}) ->
+ {_,Result} = if_auto_skip(Pat, fun() -> {US,AS+1} end, fun() -> {US+1,AS} end),
+ Result.
+
+get_info_str(Mod,Func, 0, _Cases) ->
+ io_lib:format("~w", [{Mod,Func}]);
+get_info_str(_Mod,_Func, CaseNum, unknown) ->
+ "test case " ++ integer_to_list(CaseNum);
+get_info_str(_Mod,_Func, CaseNum, Cases) ->
+ "test case " ++ integer_to_list(CaseNum) ++
+ " of " ++ integer_to_list(Cases).
+
+print_if_known(Known, {SK,AK}, {SU,AU}) ->
+ {S,A} = if Known == unknown -> {SU,AU};
+ true -> {SK,AK}
+ end,
+ io_lib:format(S, A).
+
+to_string(Term) when is_list(Term) ->
+ case (catch io_lib:format("~ts", [Term])) of
+ {'EXIT',_} -> lists:flatten(io_lib:format("~p", [Term]));
+ String -> lists:flatten(String)
+ end;
+to_string(Term) ->
+ lists:flatten(io_lib:format("~p", [Term])).
+
+get_last_loc(Loc) when is_tuple(Loc) ->
+ Loc;
+get_last_loc([Loc|_]) when is_tuple(Loc) ->
+ [Loc];
+get_last_loc(Loc) ->
+ Loc.
+
+reason_to_string({failed,{_,FailFunc,bad_return}}) ->
+ atom_to_list(FailFunc) ++ " bad return value";
+reason_to_string({failed,{_,FailFunc,{timetrap_timeout,_}}}) ->
+ atom_to_list(FailFunc) ++ " timed out";
+reason_to_string(FWInitFail = {failed,{_CB,init_tc,_Reason}}) ->
+ to_string(FWInitFail);
+reason_to_string({failed,{_,FailFunc,_}}) ->
+ atom_to_list(FailFunc) ++ " failed";
+reason_to_string(Other) ->
+ to_string(Other).
+
+%get_font_style(Prop) ->
+% {Col,St0,St1} = get_font_style1(Prop),
+% {{"<font color="++Col++">","</font>"},
+% {"<font color="++Col++">"++St0,St1++"</font>"}}.
+
+get_font_style(NormalCase, Mode) ->
+ Prop = if not NormalCase ->
+ default;
+ true ->
+ case check_prop(parallel, Mode) of
+ false ->
+ case check_prop(sequence, Mode) of
+ false ->
+ default;
+ _ ->
+ sequence
+ end;
+ _ ->
+ parallel
+ end
+ end,
+ {Col,St0,St1} = get_font_style1(Prop),
+ {{"<font color="++Col++">","</font>"},
+ {"<font color="++Col++">"++St0,St1++"</font>"}}.
+
+get_font_style1(parallel) ->
+ {"\"darkslategray\"","<i>","</i>"};
+get_font_style1(sequence) ->
+% {"\"darkolivegreen\"","",""};
+ {"\"saddlebrown\"","",""};
+get_font_style1(default) ->
+ {"\"black\"","",""}.
+%%get_font_style1(skipped) ->
+%% {"\"lightgray\"","",""}.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% format_exception({Error,Stack}) -> {CtrlSeq,Term}
+%%
+%% The default behaviour is that error information gets formatted
+%% (like in the erlang shell) before printed to the minor log file.
+%% The framework application can switch this feature off by setting
+%% *its* application environment variable 'format_exception' to false.
+%% It is also possible to switch formatting off by starting the
+%% test_server node with init argument 'test_server_format_exception'
+%% set to false.
+
+format_exception(Reason={_Error,Stack}) when is_list(Stack) ->
+ case get_fw_mod(undefined) of
+ undefined ->
+ case application:get_env(test_server, format_exception) of
+ {ok,false} ->
+ {"~p",Reason};
+ _ ->
+ do_format_exception(Reason)
+ end;
+ FW ->
+ case application:get_env(FW, format_exception) of
+ {ok,false} ->
+ {"~p",Reason};
+ _ ->
+ do_format_exception(Reason)
+ end
+ end;
+format_exception(Error) ->
+ format_exception({Error,[]}).
+
+do_format_exception(Reason={Error,Stack}) ->
+ StackFun = fun(_, _, _) -> false end,
+ PF = fun(Term, I) ->
+ io_lib:format("~." ++ integer_to_list(I) ++ "p", [Term])
+ end,
+ case catch lib:format_exception(1, error, Error, Stack, StackFun, PF) of
+ {'EXIT',_} ->
+ {"~p",Reason};
+ Formatted ->
+ Formatted1 = re:replace(Formatted, "exception error: ", "", [{return,list}]),
+ {"~ts",lists:flatten(Formatted1)}
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit,
+%% TimetrapData) ->
+%% {{Time,RetVal,Loc,Opts,Comment},DetectedFail,ProcessesBefore,ProcessesAfter} |
+%% {{died,Reason,unknown,Comment},DetectedFail,ProcessesBefore,ProcessesAfter}
+%% Name = atom()
+%% Time = float() (seconds)
+%% RetVal = term()
+%% Loc = term()
+%% Comment = string()
+%% Reason = term()
+%% DetectedFail = [{File,Line}]
+%% ProcessesBefore = ProcessesAfter = integer()
+%%
+
+run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit,
+ TimetrapData) ->
+ test_server:run_test_case_apply({CaseNum,Mod,Func,Args,Name,RunInit,
+ TimetrapData}).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% print(Detail, Format, Args) -> ok
+%% Detail = integer()
+%% Format = string()
+%% Args = [term()]
+%%
+%% Just like io:format, except that depending on the Detail value, the output
+%% is directed to console, major and/or minor log files.
+
+print(Detail, Format) ->
+ print(Detail, Format, []).
+
+print(Detail, Format, Args) ->
+ print(Detail, Format, Args, internal).
+
+print(Detail, ["$tc_html",Format], Args, Printer) ->
+ Msg = io_lib:format(Format, Args),
+ print_or_buffer(Detail, ["$tc_html",Msg], Printer);
+
+print(Detail, Format, Args, Printer) ->
+ Msg = io_lib:format(Format, Args),
+ print_or_buffer(Detail, Msg, Printer).
+
+print_or_buffer(Detail, Msg, Printer) ->
+ test_server_gl:print(group_leader(), Detail, Msg, Printer).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% print_timestamp(Detail, Leader) -> ok
+%%
+%% Prints Leader followed by a time stamp (date and time). Depending on
+%% the Detail value, the output is directed to console, major and/or minor
+%% log files.
+
+print_timestamp(Detail, Leader) ->
+ print(Detail, timestamp_get(Leader), []).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% print_who(Host, User) -> ok
+%%
+%% Logs who runs the suite.
+
+print_who(Host, User) ->
+ UserStr = case User of
+ "" -> "";
+ _ -> " by " ++ User
+ end,
+ print(html, "Run~ts on ~ts", [UserStr,Host]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% format(Format) -> IoLibReturn
+%% format(Detail, Format) -> IoLibReturn
+%% format(Format, Args) -> IoLibReturn
+%% format(Detail, Format, Args) -> IoLibReturn
+%%
+%% Detail = integer()
+%% Format = string()
+%% Args = [term(),...]
+%% IoLibReturn = term()
+%%
+%% Logs the Format string and Args, similar to io:format/1/2 etc. If
+%% Detail is not specified, the default detail level (which is 50) is used.
+%% Which log files the string will be logged in depends on the thresholds
+%% set with set_levels/3. Typically with default detail level, only the
+%% minor log file is used.
+
+format(Format) ->
+ format(minor, Format, []).
+
+format(major, Format) ->
+ format(major, Format, []);
+format(minor, Format) ->
+ format(minor, Format, []);
+format(Detail, Format) when is_integer(Detail) ->
+ format(Detail, Format, []);
+format(Format, Args) ->
+ format(minor, Format, Args).
+
+format(Detail, Format, Args) ->
+ Str =
+ case catch io_lib:format(Format, Args) of
+ {'EXIT',_} ->
+ io_lib:format("illegal format; ~p with args ~p.\n",
+ [Format,Args]);
+ Valid -> Valid
+ end,
+ print_or_buffer(Detail, Str, self()).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% xhtml(BasicHtml, XHtml) -> BasicHtml | XHtml
+%%
+xhtml(HTML, XHTML) ->
+ case get(basic_html) of
+ true -> HTML;
+ _ -> XHTML
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% odd_or_even() -> "odd" | "even"
+%%
+odd_or_even() ->
+ case get(odd_or_even) of
+ even ->
+ put(odd_or_even, odd),
+ "even";
+ _ ->
+ put(odd_or_even, even),
+ "odd"
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timestamp_filename_get(Leader) -> string()
+%% Leader = string()
+%%
+%% Returns a string consisting of Leader concatenated with the current
+%% date and time. The resulting string is suitable as a filename.
+timestamp_filename_get(Leader) ->
+ timestamp_get_internal(Leader,
+ "~ts~w-~2.2.0w-~2.2.0w_~2.2.0w.~2.2.0w.~2.2.0w").
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timestamp_get(Leader) -> string()
+%% Leader = string()
+%%
+%% Returns a string consisting of Leader concatenated with the current
+%% date and time. The resulting string is suitable for display.
+timestamp_get(Leader) ->
+ timestamp_get_internal(Leader,
+ "~ts~w-~2.2.0w-~2.2.0w ~2.2.0w:~2.2.0w:~2.2.0w").
+
+timestamp_get_internal(Leader, Format) ->
+ {YY,MM,DD,H,M,S} = time_get(),
+ io_lib:format(Format, [Leader,YY,MM,DD,H,M,S]).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% time_get() -> {YY,MM,DD,H,M,S}
+%% YY = integer()
+%% MM = integer()
+%% DD = integer()
+%% H = integer()
+%% M = integer()
+%% S = integer()
+%%
+%% Returns the current Year,Month,Day,Hours,Minutes,Seconds.
+%% The function checks that the date doesn't wrap while calling
+%% getting the time.
+time_get() ->
+ {YY,MM,DD} = date(),
+ {H,M,S} = time(),
+ case date() of
+ {YY,MM,DD} ->
+ {YY,MM,DD,H,M,S};
+ _NewDay ->
+ %% date changed between call to date() and time(), try again
+ time_get()
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% make_config(Config) -> NewConfig
+%% Config = [{Key,Value},...]
+%% NewConfig = [{Key,Value},...]
+%%
+%% Creates a configuration list (currently returns it's input)
+
+make_config(Initial) ->
+ Initial.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% update_config(Config, Update) -> NewConfig
+%% Config = [{Key,Value},...]
+%% Update = [{Key,Value},...] | {Key,Value}
+%% NewConfig = [{Key,Value},...]
+%%
+%% Adds or replaces the key-value pairs in config with those in update.
+%% Returns the updated list.
+
+update_config(Config, {Key,Val}) ->
+ case lists:keymember(Key, 1, Config) of
+ true ->
+ lists:keyreplace(Key, 1, Config, {Key,Val});
+ false ->
+ [{Key,Val}|Config]
+ end;
+update_config(Config, [Assoc|Assocs]) ->
+ NewConfig = update_config(Config, Assoc),
+ update_config(NewConfig, Assocs);
+update_config(Config, []) ->
+ Config.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% collect_cases(CurMod, TopCase, SkipList) ->
+%% BasicCaseList | {error,Reason}
+%%
+%% CurMod = atom()
+%% TopCase = term()
+%% SkipList = [term(),...]
+%% BasicCaseList = [term(),...]
+%%
+%% Parses the given test goal(s) in TopCase, and transforms them to a
+%% simple list of test cases to call, when executing the test suite.
+%%
+%% CurMod is the "current" module, that is, the module the last instruction
+%% was read from. May be be set to 'none' initially.
+%%
+%% SkipList is the list of test cases to skip and requirements to deny.
+%%
+%% The BasicCaseList is built out of TopCase, which may be any of the
+%% following terms:
+%%
+%% [] Nothing is added
+%% List list() The list is decomposed, and each element is
+%% treated according to this table
+%% Case atom() CurMod:Case(suite) is called
+%% {module,Case} CurMod:Case(suite) is called
+%% {Module,Case} Module:Case(suite) is called
+%% {module,Module,Case} Module:Case(suite) is called
+%% {module,Module,Case,Args} Module:Case is called with Args as arguments
+%% {dir,Dir} All modules *_SUITE in the named directory
+%% are listed, and each Module:all(suite) is called
+%% {dir,Dir,Pattern} All modules <Pattern>_SUITE in the named dir
+%% are listed, and each Module:all(suite) is called
+%% {conf,InitMF,Cases,FinMF}
+%% {conf,Props,InitMF,Cases,FinMF}
+%% InitMF is placed in the BasicCaseList, then
+%% Cases is treated according to this table, then
+%% FinMF is placed in the BasicCaseList. InitMF
+%% and FinMF are configuration manipulation
+%% functions. See below.
+%% {make,InitMFA,Cases,FinMFA}
+%% InitMFA is placed in the BasicCaseList, then
+%% Cases is treated according to this table, then
+%% FinMFA is placed in the BasicCaseList. InitMFA
+%% and FinMFA are make/unmake functions. If InitMFA
+%% fails, Cases are not run.
+%%
+%% When a function is called, above, it means that the function is invoked
+%% and the return is expected to be:
+%%
+%% [] Leaf case
+%% {req,ReqList} Kept for backwards compatibility - same as []
+%% {req,ReqList,Cases} Kept for backwards compatibility -
+%% Cases parsed recursively with collect_cases/3
+%% Cases (list) Recursively parsed with collect_cases/3
+%%
+%% Leaf cases are added to the BasicCaseList as Module:Case(Config). Each
+%% case is checked against the SkipList. If present, a skip instruction
+%% is inserted instead, which only prints the case name and the reason
+%% why the case was skipped in the log files.
+%%
+%% Configuration manipulation functions are called with the current
+%% configuration list as only argument, and are expected to return a new
+%% configuration list. Such a pair of function may, for example, start a
+%% server and stop it after a serie of test cases.
+%%
+%% SkipCases is expected to be in the format:
+%%
+%% Other Recursively parsed with collect_cases/3
+%% {Mod,Comment} Skip Mod, with Comment
+%% {Mod,Funcs,Comment} Skip listed functions in Mod with Comment
+%% {Mod,Func,Comment} Skip named function in Mod with Comment
+%%
+-record(cc, {mod, % current module
+ skip}). % skip list
+
+collect_all_cases(Top, Skip) when is_list(Skip) ->
+ Result =
+ case collect_cases(Top, #cc{mod=[],skip=Skip}, []) of
+ {ok,Cases,_St} -> Cases;
+ Other -> Other
+ end,
+ Result.
+
+
+collect_cases([], St, _) -> {ok,[],St};
+collect_cases([Case|Cs0], St0, Mode) ->
+ case collect_cases(Case, St0, Mode) of
+ {ok,FlatCases1,St1} ->
+ case collect_cases(Cs0, St1, Mode) of
+ {ok,FlatCases2,St} ->
+ {ok,FlatCases1 ++ FlatCases2,St};
+ {error,_Reason} = Error -> Error
+ end;
+ {error,_Reason} = Error -> Error
+ end;
+
+
+collect_cases({module,Case}, St, Mode) when is_atom(Case), is_atom(St#cc.mod) ->
+ collect_case({St#cc.mod,Case}, St, Mode);
+collect_cases({module,Mod,Case}, St, Mode) ->
+ collect_case({Mod,Case}, St, Mode);
+collect_cases({module,Mod,Case,Args}, St, Mode) ->
+ collect_case({Mod,Case,Args}, St, Mode);
+
+collect_cases({dir,SubDir}, St, Mode) ->
+ collect_files(SubDir, "*_SUITE", St, Mode);
+collect_cases({dir,SubDir,Pattern}, St, Mode) ->
+ collect_files(SubDir, Pattern++"*", St, Mode);
+
+collect_cases({conf,InitF,CaseList,FinMF}, St, Mode) when is_atom(InitF) ->
+ collect_cases({conf,[],{St#cc.mod,InitF},CaseList,FinMF}, St, Mode);
+collect_cases({conf,InitMF,CaseList,FinF}, St, Mode) when is_atom(FinF) ->
+ collect_cases({conf,[],InitMF,CaseList,{St#cc.mod,FinF}}, St, Mode);
+collect_cases({conf,InitMF,CaseList,FinMF}, St0, Mode) ->
+ collect_cases({conf,[],InitMF,CaseList,FinMF}, St0, Mode);
+collect_cases({conf,Props,InitF,CaseList,FinMF}, St, Mode) when is_atom(InitF) ->
+ case init_props(Props) of
+ {error,_} ->
+ {ok,[],St};
+ Props1 ->
+ collect_cases({conf,Props1,{St#cc.mod,InitF},CaseList,FinMF},
+ St, Mode)
+ end;
+collect_cases({conf,Props,InitMF,CaseList,FinF}, St, Mode) when is_atom(FinF) ->
+ case init_props(Props) of
+ {error,_} ->
+ {ok,[],St};
+ Props1 ->
+ collect_cases({conf,Props1,InitMF,CaseList,{St#cc.mod,FinF}},
+ St, Mode)
+ end;
+collect_cases({conf,Props,InitMF,CaseList,FinMF} = Conf, St, Mode) ->
+ case init_props(Props) of
+ {error,_} ->
+ {ok,[],St};
+ Props1 ->
+ Ref = make_ref(),
+ Skips = St#cc.skip,
+ Props2 = [{suite,St#cc.mod} | lists:delete(suite,Props1)],
+ Mode1 = [{Ref,Props2,undefined} | Mode],
+ case in_skip_list({St#cc.mod,Conf}, Skips) of
+ {true,Comment} -> % conf init skipped
+ {ok,[{skip_case,{conf,Ref,InitMF,Comment},Mode1} |
+ [] ++ [{conf,Ref,[],FinMF}]],St};
+ {true,Name,Comment} when is_atom(Name) -> % all cases skipped
+ case collect_cases(CaseList, St, Mode1) of
+ {ok,[],_St} = Empty ->
+ Empty;
+ {ok,FlatCases,St1} ->
+ Cases2Skip = FlatCases ++ [{conf,Ref,
+ keep_name(Props1),
+ FinMF}],
+ Skipped = skip_cases_upto(Ref, Cases2Skip, Comment,
+ conf, Mode1, skip_case),
+ {ok,[{skip_case,{conf,Ref,InitMF,Comment},Mode1} |
+ Skipped],St1};
+ {error,_Reason} = Error ->
+ Error
+ end;
+ {true,ToSkip,_} when is_list(ToSkip) -> % some cases skipped
+ case collect_cases(CaseList,
+ St#cc{skip=ToSkip++Skips}, Mode1) of
+ {ok,[],_St} = Empty ->
+ Empty;
+ {ok,FlatCases,St1} ->
+ {ok,[{conf,Ref,Props1,InitMF} |
+ FlatCases ++ [{conf,Ref,
+ keep_name(Props1),
+ FinMF}]],St1#cc{skip=Skips}};
+ {error,_Reason} = Error ->
+ Error
+ end;
+ false ->
+ case collect_cases(CaseList, St, Mode1) of
+ {ok,[],_St} = Empty ->
+ Empty;
+ {ok,FlatCases,St1} ->
+ {ok,[{conf,Ref,Props1,InitMF} |
+ FlatCases ++ [{conf,Ref,
+ keep_name(Props1),
+ FinMF}]],St1};
+ {error,_Reason} = Error ->
+ Error
+ end
+ end
+ end;
+
+collect_cases({make,InitMFA,CaseList,FinMFA}, St0, Mode) ->
+ case collect_cases(CaseList, St0, Mode) of
+ {ok,[],_St} = Empty -> Empty;
+ {ok,FlatCases,St} ->
+ Ref = make_ref(),
+ {ok,[{make,Ref,InitMFA}|FlatCases ++
+ [{make,Ref,FinMFA}]],St};
+ {error,_Reason} = Error -> Error
+ end;
+
+collect_cases({Module, Cases}, St, Mode) when is_list(Cases) ->
+ case (catch collect_case(Cases, St#cc{mod=Module}, [], Mode)) of
+ Result = {ok,_,_} ->
+ Result;
+ Other ->
+ {error,Other}
+ end;
+
+collect_cases({_Mod,_Case}=Spec, St, Mode) ->
+ collect_case(Spec, St, Mode);
+
+collect_cases({_Mod,_Case,_Args}=Spec, St, Mode) ->
+ collect_case(Spec, St, Mode);
+collect_cases(Case, St, Mode) when is_atom(Case), is_atom(St#cc.mod) ->
+ collect_case({St#cc.mod,Case}, St, Mode);
+collect_cases(Other, St, _Mode) ->
+ {error,{bad_subtest_spec,St#cc.mod,Other}}.
+
+collect_case({Mod,{conf,_,_,_,_}=Conf}, St, Mode) ->
+ collect_case_invoke(Mod, Conf, [], St, Mode);
+
+collect_case(MFA, St, Mode) ->
+ case in_skip_list(MFA, St#cc.skip) of
+ {true,Comment} when Comment /= make_failed ->
+ {ok,[{skip_case,{MFA,Comment},Mode}],St};
+ _ ->
+ case MFA of
+ {Mod,Case} -> collect_case_invoke(Mod, Case, MFA, St, Mode);
+ {_Mod,_Case,_Args} -> {ok,[MFA],St}
+ end
+ end.
+
+collect_case([], St, Acc, _Mode) ->
+ {ok, Acc, St};
+
+collect_case([Case | Cases], St, Acc, Mode) ->
+ {ok, FlatCases, NewSt} = collect_case({St#cc.mod, Case}, St, Mode),
+ collect_case(Cases, NewSt, Acc ++ FlatCases, Mode).
+
+collect_case_invoke(Mod, Case, MFA, St, Mode) ->
+ case get_fw_mod(undefined) of
+ undefined ->
+ case catch apply(Mod, Case, [suite]) of
+ {'EXIT',_} ->
+ {ok,[MFA],St};
+ Suite ->
+ collect_subcases(Mod, Case, MFA, St, Suite, Mode)
+ end;
+ _ ->
+ Suite = test_server_sup:framework_call(get_suite,
+ [Mod,Case],
+ []),
+ collect_subcases(Mod, Case, MFA, St, Suite, Mode)
+ end.
+
+collect_subcases(Mod, Case, MFA, St, Suite, Mode) ->
+ case Suite of
+ [] when Case == all -> {ok,[],St};
+ [] when element(1, Case) == conf -> {ok,[],St};
+ [] -> {ok,[MFA],St};
+%%%! --- START Kept for backwards compatibility ---
+%%%! Requirements are not used
+ {req,ReqList} ->
+ collect_case_deny(Mod, Case, MFA, ReqList, [], St, Mode);
+ {req,ReqList,SubCases} ->
+ collect_case_deny(Mod, Case, MFA, ReqList, SubCases, St, Mode);
+%%%! --- END Kept for backwards compatibility ---
+ {Skip,Reason} when Skip==skip; Skip==skipped ->
+ {ok,[{skip_case,{MFA,Reason},Mode}],St};
+ {error,Reason} ->
+ throw(Reason);
+ SubCases ->
+ collect_case_subcases(Mod, Case, SubCases, St, Mode)
+ end.
+
+collect_case_subcases(Mod, Case, SubCases, St0, Mode) ->
+ OldMod = St0#cc.mod,
+ case collect_cases(SubCases, St0#cc{mod=Mod}, Mode) of
+ {ok,FlatCases,St} ->
+ {ok,FlatCases,St#cc{mod=OldMod}};
+ {error,Reason} ->
+ {error,{{Mod,Case},Reason}}
+ end.
+
+collect_files(Dir, Pattern, St, Mode) ->
+ {ok,Cwd} = file:get_cwd(),
+ Dir1 = filename:join(Cwd, Dir),
+ Wc = filename:join([Dir1,Pattern++"{.erl,"++code:objfile_extension()++"}"]),
+ case catch filelib:wildcard(Wc) of
+ {'EXIT', Reason} ->
+ io:format("Could not collect files: ~p~n", [Reason]),
+ {error,{collect_fail,Dir,Pattern}};
+ Files ->
+ %% convert to module names and remove duplicates
+ Mods = lists:foldl(fun(File, Acc) ->
+ Mod = fullname_to_mod(File),
+ case lists:member(Mod, Acc) of
+ true -> Acc;
+ false -> [Mod | Acc]
+ end
+ end, [], Files),
+ Tests = [{Mod,all} || Mod <- lists:sort(Mods)],
+ collect_cases(Tests, St, Mode)
+ end.
+
+fullname_to_mod(Path) when is_list(Path) ->
+ %% If this is called with a binary, then we are probably in +fnu
+ %% mode and have found a beam file with name encoded as latin1. We
+ %% will let this crash since it can not work to load such a module
+ %% anyway. It should be removed or renamed!
+ list_to_atom(filename:rootname(filename:basename(Path))).
+
+collect_case_deny(Mod, Case, MFA, ReqList, SubCases, St, Mode) ->
+ case {check_deny(ReqList, St#cc.skip),SubCases} of
+ {{denied,Comment},_SubCases} ->
+ {ok,[{skip_case,{MFA,Comment},Mode}],St};
+ {granted,[]} ->
+ {ok,[MFA],St};
+ {granted,SubCases} ->
+ collect_case_subcases(Mod, Case, SubCases, St, Mode)
+ end.
+
+check_deny([Req|Reqs], DenyList) ->
+ case check_deny_req(Req, DenyList) of
+ {denied,_Comment}=Denied -> Denied;
+ granted -> check_deny(Reqs, DenyList)
+ end;
+check_deny([], _DenyList) -> granted;
+check_deny(Req, DenyList) -> check_deny([Req], DenyList).
+
+check_deny_req({Req,Val}, DenyList) ->
+ %%io:format("ValCheck ~p=~p in ~p\n", [Req,Val,DenyList]),
+ case lists:keysearch(Req, 1, DenyList) of
+ {value,{_Req,DenyVal}} when Val >= DenyVal ->
+ {denied,io_lib:format("Requirement ~p=~p", [Req,Val])};
+ _ ->
+ check_deny_req(Req, DenyList)
+ end;
+check_deny_req(Req, DenyList) ->
+ case lists:member(Req, DenyList) of
+ true -> {denied,io_lib:format("Requirement ~p", [Req])};
+ false -> granted
+ end.
+
+in_skip_list({Mod,{conf,Props,InitMF,_CaseList,_FinMF}}, SkipList) ->
+ case in_skip_list(InitMF, SkipList) of
+ {true,_} = Yes ->
+ Yes;
+ _ ->
+ case proplists:get_value(name, Props) of
+ undefined ->
+ false;
+ Name ->
+ ToSkip =
+ lists:flatmap(
+ fun({M,{conf,SProps,_,SCaseList,_},Cmt}) when
+ M == Mod ->
+ case proplists:get_value(name, SProps) of
+ all ->
+ [{M,all,Cmt}];
+ Name ->
+ case SCaseList of
+ all ->
+ [{M,all,Cmt}];
+ _ ->
+ [{M,F,Cmt} || F <- SCaseList]
+ end;
+ _ ->
+ []
+ end;
+ (_) ->
+ []
+ end, SkipList),
+ case ToSkip of
+ [] ->
+ false;
+ _ ->
+ case lists:keysearch(all, 2, ToSkip) of
+ {value,{_,_,Cmt}} -> {true,Name,Cmt};
+ _ -> {true,ToSkip,""}
+ end
+ end
+ end
+ end;
+
+in_skip_list({Mod,Func,_Args}, SkipList) ->
+ in_skip_list({Mod,Func}, SkipList);
+in_skip_list({Mod,Func}, [{Mod,Funcs,Comment}|SkipList]) when is_list(Funcs) ->
+ case lists:member(Func, Funcs) of
+ true ->
+ {true,Comment};
+ _ ->
+ in_skip_list({Mod,Func}, SkipList)
+ end;
+in_skip_list({Mod,Func}, [{Mod,Func,Comment}|_SkipList]) ->
+ {true,Comment};
+in_skip_list({Mod,_Func}, [{Mod,Comment}|_SkipList]) ->
+ {true,Comment};
+in_skip_list({Mod,Func}, [_|SkipList]) ->
+ in_skip_list({Mod,Func}, SkipList);
+in_skip_list(_, []) ->
+ false.
+
+%% remove unnecessary properties
+init_props(Props) ->
+ case get_repeat(Props) of
+ Repeat = {_RepType,N} when N < 2 ->
+ if N == 0 ->
+ {error,{invalid_property,Repeat}};
+ true ->
+ lists:delete(Repeat, Props)
+ end;
+ _ ->
+ Props
+ end.
+
+keep_name(Props) ->
+ lists:filter(fun({name,_}) -> true;
+ ({suite,_}) -> true;
+ (_) -> false end, Props).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Node handling functions %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% get_target_info() -> #target_info
+%%
+%% Returns a record containing system information for target
+
+get_target_info() ->
+ controller_call(get_target_info).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% start_node(SlaveName, Type, Options) ->
+%% {ok, Slave} | {error, Reason}
+%%
+%% Called by test_server. See test_server:start_node/3 for details
+
+start_node(Name, Type, Options) ->
+ T = 10 * ?ACCEPT_TIMEOUT * test_server:timetrap_scale_factor(),
+ format(minor, "Attempt to start ~w node ~p with options ~p",
+ [Type, Name, Options]),
+ case controller_call({start_node,Name,Type,Options}, T) of
+ {{ok,Nodename}, Host, Cmd, Info, Warning} ->
+ format(minor,
+ "Successfully started node ~w on ~tp with command: ~ts",
+ [Nodename, Host, Cmd]),
+ format(major, "=node_start ~w", [Nodename]),
+ case Info of
+ [] -> ok;
+ _ -> format(minor, Info)
+ end,
+ case Warning of
+ [] -> ok;
+ _ ->
+ format(1, Warning),
+ format(minor, Warning)
+ end,
+ {ok, Nodename};
+ {fail,{Ret, Host, Cmd}} ->
+ format(minor,
+ "Failed to start node ~tp on ~tp with command: ~ts~n"
+ "Reason: ~p",
+ [Name, Host, Cmd, Ret]),
+ {fail,Ret};
+ {Ret, undefined, undefined} ->
+ format(minor, "Failed to start node ~tp: ~p", [Name,Ret]),
+ Ret;
+ {Ret, Host, Cmd} ->
+ format(minor,
+ "Failed to start node ~tp on ~tp with command: ~ts~n"
+ "Reason: ~p",
+ [Name, Host, Cmd, Ret]),
+ Ret
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% wait_for_node(Node) -> ok | {error,timeout}
+%%
+%% Wait for a slave/peer node which has been started with
+%% the option {wait,false}. This function returns when
+%% when the new node has contacted test_server_ctrl again
+
+wait_for_node(Slave) ->
+ T = 10000 * test_server:timetrap_scale_factor(),
+ case catch controller_call({wait_for_node,Slave},T) of
+ {'EXIT',{timeout,_}} -> {error,timeout};
+ ok -> ok
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% is_release_available(Release) -> true | false
+%% Release -> string()
+%%
+%% Test if a release (such as "r10b") is available to be
+%% started using start_node/3.
+
+is_release_available(Release) ->
+ controller_call({is_release_available,Release}).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% stop_node(Name) -> ok | {error,Reason}
+%%
+%% Clean up - test_server will stop this node
+
+stop_node(Slave) ->
+ controller_call({stop_node,Slave}).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% DEBUGGER INTERFACE %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+i() ->
+ hformat("Pid", "Initial Call", "Current Function", "Reducts", "Msgs"),
+ Line=lists:duplicate(27, "-"),
+ hformat(Line, Line, Line, Line, Line),
+ display_info(processes(), 0, 0).
+
+p(A,B,C) ->
+ pinfo(ts_pid(A,B,C)).
+p(X) when is_atom(X) ->
+ pinfo(whereis(X));
+p({A,B,C}) ->
+ pinfo(ts_pid(A,B,C));
+p(X) ->
+ pinfo(X).
+
+t() ->
+ t(wall_clock).
+t(X) ->
+ element(1, statistics(X)).
+
+pi(Item,X) ->
+ lists:keysearch(Item,1,p(X)).
+pi(Item,A,B,C) ->
+ lists:keysearch(Item,1,p(A,B,C)).
+
+%% c:pid/3
+ts_pid(X,Y,Z) when is_integer(X), is_integer(Y), is_integer(Z) ->
+ list_to_pid("<" ++ integer_to_list(X) ++ "." ++
+ integer_to_list(Y) ++ "." ++
+ integer_to_list(Z) ++ ">").
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% display_info(Pids, Reductions, Messages) -> void
+%% Pids = [pid(),...]
+%% Reductions = integer()
+%% Messaged = integer()
+%%
+%% Displays info, similar to c:i() about the processes in the list Pids.
+%% Also counts the total number of reductions and msgs for the listed
+%% processes, if called with Reductions = Messages = 0.
+
+display_info([Pid|T], R, M) ->
+ case pinfo(Pid) of
+ undefined ->
+ display_info(T, R, M);
+ Info ->
+ Call = fetch(initial_call, Info),
+ Curr = case fetch(current_function, Info) of
+ {Mod,F,Args} when is_list(Args) ->
+ {Mod,F,length(Args)};
+ Other ->
+ Other
+ end,
+ Reds = fetch(reductions, Info),
+ LM = length(fetch(messages, Info)),
+ pformat(io_lib:format("~w", [Pid]),
+ io_lib:format("~w", [Call]),
+ io_lib:format("~w", [Curr]), Reds, LM),
+ display_info(T, R+Reds, M + LM)
+ end;
+display_info([], R, M) ->
+ Line=lists:duplicate(27, "-"),
+ hformat(Line, Line, Line, Line, Line),
+ pformat("Total", "", "", R, M).
+
+hformat(A1, A2, A3, A4, A5) ->
+ io:format("~-10s ~-27s ~-27s ~8s ~4s~n", [A1,A2,A3,A4,A5]).
+
+pformat(A1, A2, A3, A4, A5) ->
+ io:format("~-10s ~-27s ~-27s ~8w ~4w~n", [A1,A2,A3,A4,A5]).
+
+fetch(Key, Info) ->
+ case lists:keysearch(Key, 1, Info) of
+ {value, {_, Val}} ->
+ Val;
+ _ ->
+ 0
+ end.
+
+pinfo(P) ->
+ Node = node(),
+ case node(P) of
+ Node ->
+ process_info(P);
+ _ ->
+ rpc:call(node(P),erlang,process_info,[P])
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Support functions for COVER %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%
+%% A module is included in the cover analysis if
+%% - it belongs to the tested application and is not listed in the
+%% {exclude,List} part of the App.cover file
+%% - it does not belong to the application, but is listed in the
+%% {include,List} part of the App.cover file
+%% - it does not belong to the application, but is listed in the
+%% {cross,[{Tag,List}]} part of the App.cover file
+%%
+%% The modules listed in the 'cross' part of the cover file are
+%% modules that are heavily used by other tests than the one where
+%% they are explicitly tested. They should then be listed as 'cross'
+%% in the cover file for the test where they are used but do not
+%% belong.
+%%
+%% After all tests are completed, the these modules can be analysed
+%% with coverage data from all tests where they are compiled - see
+%% cross_cover_analyse/2. The result is stored in a file called
+%% cross_cover.html in the run.<timestamp> directory of the
+%% test the modules belong to.
+%%
+%% Example:
+%% If the module m1 belongs to system s1 but is heavily used also in
+%% the tests for another system s2, then the cover files for the two
+%% systems could be like this:
+%%
+%% s1.cover:
+%% {include,[m1]}.
+%%
+%% s2.cover:
+%% {include,[....]}. % modules belonging to system s2
+%% {cross,[{s1,[m1]}]}.
+%%
+%% When the tests for both s1 and s2 are completed, run
+%% cross_cover_analyse(Level,[{s1,S1LogDir},{s2,S2LogDir}]), and
+%% the accumulated cover data for m1 will be written to
+%% S1LogDir/[run.<timestamp>/]cross_cover.html
+%%
+%% S1LogDir and S2LogDir are either the run.<timestamp> directories
+%% for the two tests, or the parent directory of these, in which case
+%% the latest run.<timestamp> directory will be chosen.
+%%
+%% Note that the m1 module will also be presented in the normal
+%% coverage log for s1 (due to the include statement in s1.cover), but
+%% that only includes the coverage achieved by the s1 test itself.
+%%
+%% The Tag in the 'cross' statement in the cover file has no other
+%% purpose than mapping the list of modules ([m1] in the example
+%% above) to the correct log directory where it should be included in
+%% the cross_cover.html file (S1LogDir in the example above).
+%% I.e. the value of the Tag has no meaning, it could be foo as well
+%% as s1 above, as long as the same Tag is used in the cover file and
+%% in the call to cross_cover_analyse/2.
+
+
+%% Cover compilation
+%% The compilation is executed on the target node
+start_cover(#cover{}=CoverInfo) ->
+ cover_compile(CoverInfo);
+start_cover({log,CoverLogDir}=CoverInfo) ->
+ %% Cover is controlled by the framework - here's the log
+ put(test_server_cover_log_dir,CoverLogDir),
+ {ok,CoverInfo}.
+
+cover_compile(CoverInfo) ->
+ test_server:cover_compile(CoverInfo).
+
+%% Read the coverfile for an application and return a list of modules
+%% that are members of the application but shall not be compiled
+%% (Exclude), and a list of modules that are not members of the
+%% application but shall be compiled (Include).
+read_cover_file(none) ->
+ {[],[],[]};
+read_cover_file(CoverFile) ->
+ case file:consult(CoverFile) of
+ {ok,List} ->
+ case check_cover_file(List, [], [], []) of
+ {ok,Exclude,Include,Cross} -> {Exclude,Include,Cross};
+ error ->
+ io:fwrite("Faulty format of CoverFile ~p\n", [CoverFile]),
+ {[],[],[]}
+ end;
+ {error,Reason} ->
+ io:fwrite("Can't read CoverFile ~ts\nReason: ~p\n",
+ [CoverFile,Reason]),
+ {[],[],[]}
+ end.
+
+check_cover_file([{exclude,all}|Rest], _, Include, Cross) ->
+ check_cover_file(Rest, all, Include, Cross);
+check_cover_file([{exclude,Exclude}|Rest], _, Include, Cross) ->
+ case lists:all(fun(M) -> is_atom(M) end, Exclude) of
+ true ->
+ check_cover_file(Rest, Exclude, Include, Cross);
+ false ->
+ error
+ end;
+check_cover_file([{include,Include}|Rest], Exclude, _, Cross) ->
+ case lists:all(fun(M) -> is_atom(M) end, Include) of
+ true ->
+ check_cover_file(Rest, Exclude, Include, Cross);
+ false ->
+ error
+ end;
+check_cover_file([{cross,Cross}|Rest], Exclude, Include, _) ->
+ case check_cross(Cross) of
+ true ->
+ check_cover_file(Rest, Exclude, Include, Cross);
+ false ->
+ error
+ end;
+check_cover_file([], Exclude, Include, Cross) ->
+ {ok,Exclude,Include,Cross}.
+
+check_cross([{Tag,Modules}|Rest]) ->
+ case lists:all(fun(M) -> is_atom(M) end, [Tag|Modules]) of
+ true ->
+ check_cross(Rest);
+ false ->
+ false
+ end;
+check_cross([]) ->
+ true.
+
+
+%% Cover analysis, per application
+%% This analysis is executed on the target node once the test is
+%% completed for an application. This is not the same as the cross
+%% cover analysis, which can be executed on any node after the tests
+%% are finshed.
+%%
+%% This per application analysis writes the file cover.html in the
+%% application's run.<timestamp> directory.
+stop_cover(#cover{}=CoverInfo, TestDir) ->
+ cover_analyse(CoverInfo, TestDir);
+stop_cover(_CoverInfo, _TestDir) ->
+ %% Cover is probably controlled by the framework
+ ok.
+
+make_relative(AbsDir, VsDir) ->
+ DirTokens = filename:split(AbsDir),
+ VsTokens = filename:split(VsDir),
+ filename:join(make_relative1(DirTokens, VsTokens)).
+
+make_relative1([T | DirTs], [T | VsTs]) ->
+ make_relative1(DirTs, VsTs);
+make_relative1(Last = [_File], []) ->
+ Last;
+make_relative1(Last = [_File], VsTs) ->
+ Ups = ["../" || _ <- VsTs],
+ Ups ++ Last;
+make_relative1(DirTs, []) ->
+ DirTs;
+make_relative1(DirTs, VsTs) ->
+ Ups = ["../" || _ <- VsTs],
+ Ups ++ DirTs.
+
+
+cover_analyse(CoverInfo, TestDir) ->
+ write_default_cross_coverlog(TestDir),
+
+ {ok,CoverLog} = open_html_file(filename:join(TestDir, ?coverlog_name)),
+ write_coverlog_header(CoverLog),
+ #cover{app=App,
+ file=CoverFile,
+ excl=Excluded,
+ cross=Cross} = CoverInfo,
+ io:fwrite(CoverLog, "<h1>Coverage for application '~w'</h1>\n", [App]),
+ io:fwrite(CoverLog,
+ "<p><a href=\"~ts\">Coverdata collected over all tests</a></p>",
+ [?cross_coverlog_name]),
+
+ io:fwrite(CoverLog, "<p>CoverFile: <code>~tp</code>\n", [CoverFile]),
+ write_cross_cover_info(TestDir,Cross),
+
+ case length(cover:imported_modules()) of
+ Imps when Imps > 0 ->
+ io:fwrite(CoverLog,
+ "<p>Analysis includes data from ~w imported module(s).\n",
+ [Imps]);
+ _ ->
+ ok
+ end,
+
+ io:fwrite(CoverLog, "<p>Excluded module(s): <code>~tp</code>\n", [Excluded]),
+
+ Coverage = test_server:cover_analyse(TestDir, CoverInfo),
+ write_binary_file(filename:join(TestDir,?raw_coverlog_name),
+ term_to_binary(Coverage)),
+
+ case lists:filter(fun({_M,{_,_,_}}) -> false;
+ (_) -> true
+ end, Coverage) of
+ [] ->
+ ok;
+ Bad ->
+ io:fwrite(CoverLog, "<p>Analysis failed for ~w module(s): "
+ "<code>~w</code>\n",
+ [length(Bad),[BadM || {BadM,{_,_Why}} <- Bad]])
+ end,
+
+ TotPercent = write_cover_result_table(CoverLog, Coverage),
+ write_binary_file(filename:join(TestDir, ?cover_total),
+ term_to_binary(TotPercent)).
+
+%% Cover analysis - accumulated over multiple tests
+%% This can be executed on any node after all tests are finished.
+%% Analyse = overview | details
+%% TagDirs = [{Tag,Dir}]
+%% Tag = atom(), identifier
+%% Dir = string(), the log directory for Tag, it can be a
+%% run.<timestamp> directory or the parent directory of
+%% such (in which case the latest run.<timestamp> directory
+%% is used)
+cross_cover_analyse(Analyse, TagDirs0) ->
+ TagDirs = get_latest_run_dirs(TagDirs0),
+ TagMods = get_all_cross_info(TagDirs,[]),
+ TagDirMods = add_cross_modules(TagMods,TagDirs),
+ CoverdataFiles = get_coverdata_files(TagDirMods),
+ lists:foreach(fun(CDF) -> cover:import(CDF) end, CoverdataFiles),
+ io:fwrite("Cover analysing...\n", []),
+ DetailsFun =
+ case Analyse of
+ details ->
+ fun(Dir,M) ->
+ OutFile = filename:join(Dir,
+ atom_to_list(M) ++
+ ".CROSS_COVER.html"),
+ case cover:analyse_to_file(M, OutFile, [html]) of
+ {ok,_} ->
+ {file,OutFile};
+ Error ->
+ Error
+ end
+ end;
+ _ ->
+ fun(_,_) -> undefined end
+ end,
+ Coverage = analyse_tests(TagDirMods, DetailsFun, []),
+ cover:stop(),
+ write_cross_cover_logs(Coverage,TagDirMods).
+
+write_cross_cover_info(_Dir,[]) ->
+ ok;
+write_cross_cover_info(Dir,Cross) ->
+ write_binary_file(filename:join(Dir,?cross_cover_info),
+ term_to_binary(Cross)).
+
+%% For each test from which there are cross cover analysed
+%% modules, write a cross cover log (cross_cover.html).
+write_cross_cover_logs([{Tag,Coverage}|T],TagDirMods) ->
+ case lists:keyfind(Tag,1,TagDirMods) of
+ {_,Dir,Mods} when Mods=/=[] ->
+ write_binary_file(filename:join(Dir,?raw_cross_coverlog_name),
+ term_to_binary(Coverage)),
+ CoverLogName = filename:join(Dir,?cross_coverlog_name),
+ {ok,CoverLog} = open_html_file(CoverLogName),
+ write_coverlog_header(CoverLog),
+ io:fwrite(CoverLog,
+ "<h1>Coverage results for \'~w\' from all tests</h1>\n",
+ [Tag]),
+ write_cover_result_table(CoverLog, Coverage),
+ io:fwrite("Written file ~tp\n", [CoverLogName]);
+ _ ->
+ ok
+ end,
+ write_cross_cover_logs(T,TagDirMods);
+write_cross_cover_logs([],_) ->
+ io:fwrite("done\n", []).
+
+%% Get the latest run.<timestamp> directories
+get_latest_run_dirs([{Tag,Dir}|Rest]) ->
+ [{Tag,get_latest_run_dir(Dir)} | get_latest_run_dirs(Rest)];
+get_latest_run_dirs([]) ->
+ [].
+
+get_latest_run_dir(Dir) ->
+ case filelib:wildcard(filename:join(Dir,"run.[1-2]*")) of
+ [] ->
+ Dir;
+ [H|T] ->
+ get_latest_dir(T,H)
+ end.
+
+get_latest_dir([H|T],Latest) when H>Latest ->
+ get_latest_dir(T,H);
+get_latest_dir([_|T],Latest) ->
+ get_latest_dir(T,Latest);
+get_latest_dir([],Latest) ->
+ Latest.
+
+get_all_cross_info([{_Tag,Dir}|Rest],Acc) ->
+ case file:read_file(filename:join(Dir,?cross_cover_info)) of
+ {ok,Bin} ->
+ TagMods = binary_to_term(Bin),
+ get_all_cross_info(Rest,TagMods++Acc);
+ _ ->
+ get_all_cross_info(Rest,Acc)
+ end;
+get_all_cross_info([],Acc) ->
+ Acc.
+
+%% Associate the cross cover modules with their log directories
+add_cross_modules(TagMods,TagDirs)->
+ do_add_cross_modules(TagMods,[{Tag,Dir,[]} || {Tag,Dir} <- TagDirs]).
+do_add_cross_modules([{Tag,Mods1}|TagMods],TagDirMods)->
+ NewTagDirMods =
+ case lists:keytake(Tag,1,TagDirMods) of
+ {value,{Tag,Dir,Mods},Rest} ->
+ [{Tag,Dir,lists:umerge(lists:sort(Mods1),Mods)}|Rest];
+ false ->
+ TagDirMods
+ end,
+ do_add_cross_modules(TagMods,NewTagDirMods);
+do_add_cross_modules([],TagDirMods) ->
+ %% Just to get the modules in the same order as in the normal cover log
+ [{Tag,Dir,lists:reverse(Mods)} || {Tag,Dir,Mods} <- TagDirMods].
+
+%% Find all exported coverdata files.
+get_coverdata_files(TagDirMods) ->
+ lists:flatmap(
+ fun({_,LatestDir,_}) ->
+ filelib:wildcard(filename:join(LatestDir,"all.coverdata"))
+ end,
+ TagDirMods).
+
+
+%% For each test, analyse all modules
+%% Used for cross cover analysis.
+analyse_tests([{Tag,LastTest,Modules}|T], DetailsFun, Acc) ->
+ Cov = analyse_modules(LastTest, Modules, DetailsFun, []),
+ analyse_tests(T, DetailsFun, [{Tag,Cov}|Acc]);
+analyse_tests([], _DetailsFun, Acc) ->
+ Acc.
+
+%% Analyse each module
+%% Used for cross cover analysis.
+analyse_modules(Dir, [M|Modules], DetailsFun, Acc) ->
+ {ok,{M,{Cov,NotCov}}} = cover:analyse(M, module),
+ Acc1 = [{M,{Cov,NotCov,DetailsFun(Dir,M)}}|Acc],
+ analyse_modules(Dir, Modules, DetailsFun, Acc1);
+analyse_modules(_Dir, [], _DetailsFun, Acc) ->
+ Acc.
+
+
+%% Support functions for writing the cover logs (both cross and normal)
+write_coverlog_header(CoverLog) ->
+ case catch io:put_chars(CoverLog,html_header("Coverage results")) of
+ {'EXIT',Reason} ->
+ io:format("\n\nERROR: Could not write normal heading in coverlog.\n"
+ "CoverLog: ~w\n"
+ "Reason: ~p\n",
+ [CoverLog,Reason]),
+ io:format(CoverLog,"<html><body>\n", []);
+ _ ->
+ ok
+ end.
+
+
+format_analyse(M,Cov,NotCov,undefined) ->
+ io_lib:fwrite("<tr><td>~w</td>"
+ "<td align=right>~w %</td>"
+ "<td align=right>~w</td>"
+ "<td align=right>~w</td></tr>\n",
+ [M,pc(Cov,NotCov),Cov,NotCov]);
+format_analyse(M,Cov,NotCov,{file,File}) ->
+ io_lib:fwrite("<tr><td><a href=\"~ts\">~w</a></td>"
+ "<td align=right>~w %</td>"
+ "<td align=right>~w</td>"
+ "<td align=right>~w</td></tr>\n",
+ [uri_encode(filename:basename(File)),
+ M,pc(Cov,NotCov),Cov,NotCov]);
+format_analyse(M,Cov,NotCov,{lines,Lines}) ->
+ CoverOutName = atom_to_list(M)++".COVER.html",
+ {ok,CoverOut} = open_html_file(CoverOutName),
+ write_not_covered(CoverOut,M,Lines),
+ ok = file:close(CoverOut),
+ io_lib:fwrite("<tr><td><a href=\"~ts\">~w</a></td>"
+ "<td align=right>~w %</td>"
+ "<td align=right>~w</td>"
+ "<td align=right>~w</td></tr>\n",
+ [uri_encode(CoverOutName),M,pc(Cov,NotCov),Cov,NotCov]);
+format_analyse(M,Cov,NotCov,{error,_}) ->
+ io_lib:fwrite("<tr><td>~w</td>"
+ "<td align=right>~w %</td>"
+ "<td align=right>~w</td>"
+ "<td align=right>~w</td></tr>\n",
+ [M,pc(Cov,NotCov),Cov,NotCov]).
+
+
+pc(0,0) ->
+ 0;
+pc(Cov,NotCov) ->
+ round(Cov/(Cov+NotCov)*100).
+
+
+write_not_covered(CoverOut,M,Lines) ->
+ io:put_chars(CoverOut,html_header("Coverage results for "++atom_to_list(M))),
+ io:fwrite(CoverOut,
+ "The following lines in module ~w are not covered:\n"
+ "<table border=3 cellpadding=5>\n"
+ "<th>Line Number</th>\n",
+ [M]),
+ lists:foreach(fun({{_M,Line},{0,1}}) ->
+ io:fwrite(CoverOut,"<tr><td>~w</td></tr>\n", [Line]);
+ (_) ->
+ ok
+ end,
+ Lines),
+ io:put_chars(CoverOut,"</table>\n</body>\n</html>\n").
+
+
+write_default_coverlog(TestDir) ->
+ {ok,CoverLog} = open_html_file(filename:join(TestDir,?coverlog_name)),
+ write_coverlog_header(CoverLog),
+ io:put_chars(CoverLog,"Cover tool is not used\n</body></html>\n"),
+ ok = file:close(CoverLog).
+
+write_default_cross_coverlog(TestDir) ->
+ {ok,CrossCoverLog} =
+ open_html_file(filename:join(TestDir,?cross_coverlog_name)),
+ write_coverlog_header(CrossCoverLog),
+ io:put_chars(CrossCoverLog,
+ ["No cross cover modules exist for this application,",
+ xhtml("<br>","<br />"),
+ "or cross cover analysis is not completed.\n"
+ "</body></html>\n"]),
+ ok = file:close(CrossCoverLog).
+
+write_cover_result_table(CoverLog,Coverage) ->
+ io:fwrite(CoverLog,
+ "<p><table border=3 cellpadding=5>\n"
+ "<tr><th>Module</th><th>Covered (%)</th><th>Covered (Lines)</th>"
+ "<th>Not covered (Lines)</th>\n",
+ []),
+ {TotCov,TotNotCov} =
+ lists:foldl(fun({M,{Cov,NotCov,Details}},{AccCov,AccNotCov}) ->
+ Str = format_analyse(M,Cov,NotCov,Details),
+ io:fwrite(CoverLog,"~ts", [Str]),
+ {AccCov+Cov,AccNotCov+NotCov};
+ ({_M,{error,_Reason}},{AccCov,AccNotCov}) ->
+ {AccCov,AccNotCov}
+ end,
+ {0,0},
+ Coverage),
+ TotPercent = pc(TotCov,TotNotCov),
+ io:fwrite(CoverLog,
+ "<tr><th align=left>Total</th><th align=right>~w %</th>"
+ "<th align=right>~w</th><th align=right>~w</th></tr>\n"
+ "</table>\n"
+ "</body>\n"
+ "</html>\n",
+ [TotPercent,TotCov,TotNotCov]),
+ ok = file:close(CoverLog),
+ TotPercent.
+
+
+%%%-----------------------------------------------------------------
+%%% Support functions for writing files
+
+%% HTML files are always written with utf8 encoding
+html_header(Title) ->
+ ["<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n"
+ "<!-- autogenerated by '", atom_to_list(?MODULE), "'. -->\n"
+ "<html>\n"
+ "<head>\n"
+ "<title>", Title, "</title>\n"
+ "<meta http-equiv=\"cache-control\" content=\"no-cache\"></meta>\n"
+ "<meta http-equiv=\"content-type\" content=\"text/html; "
+ "charset=utf-8\"></meta>\n"
+ "</head>\n"
+ "<body bgcolor=\"white\" text=\"black\" "
+ "link=\"blue\" vlink=\"purple\" alink=\"red\">\n"].
+
+open_html_file(File) ->
+ open_utf8_file(File).
+
+open_html_file(File,Opts) ->
+ open_utf8_file(File,Opts).
+
+write_html_file(File,Content) ->
+ write_file(File,Content,utf8).
+
+%% The 'major' log file, which is a pure text file is also written
+%% with utf8 encoding
+open_utf8_file(File) ->
+ case file:open(File,AllOpts=[write,{encoding,utf8}]) of
+ {error,Reason} -> {error,{Reason,{File,AllOpts}}};
+ Result -> Result
+ end.
+
+open_utf8_file(File,Opts) ->
+ case file:open(File,AllOpts=[{encoding,utf8}|Opts]) of
+ {error,Reason} -> {error,{Reason,{File,AllOpts}}};
+ Result -> Result
+ end.
+
+%% Write a file with specified encoding
+write_file(File,Content,latin1) ->
+ file:write_file(File,Content);
+write_file(File,Content,utf8) ->
+ write_binary_file(File,unicode:characters_to_binary(Content)).
+
+%% Write a file with only binary data
+write_binary_file(File,Content) ->
+ file:write_file(File,Content).
+
+%% Encoding of hyperlinks in HTML files
+uri_encode(File) ->
+ Encoding = file:native_name_encoding(),
+ uri_encode(File,Encoding).
+
+uri_encode(File,Encoding) ->
+ Components = filename:split(File),
+ filename:join([uri_encode_comp(C,Encoding) || C <- Components]).
+
+%% Encode the reference to a "filename of the given encoding" so it
+%% can be inserted in a utf8 encoded HTML file.
+%% This does almost the same as http_uri:encode/1, except
+%% 1. it does not convert @, : and / (in order to preserve nodename and c:/)
+%% 2. if the file name is in latin1, it also encodes all
+%% characters >127 - i.e. latin1 but not ASCII.
+uri_encode_comp([Char|Chars],Encoding) ->
+ Reserved = sets:is_element(Char, reserved()),
+ case (Char>127 andalso Encoding==latin1) orelse Reserved of
+ true ->
+ [ $% | http_util:integer_to_hexlist(Char)] ++
+ uri_encode_comp(Chars,Encoding);
+ false ->
+ [Char | uri_encode_comp(Chars,Encoding)]
+ end;
+uri_encode_comp([],_) ->
+ [].
+
+%% Copied from http_uri.erl, but slightly modified
+%% (not converting @, : and /)
+reserved() ->
+ sets:from_list([$;, $&, $=, $+, $,, $?,
+ $#, $[, $], $<, $>, $\", ${, $}, $|,
+ $\\, $', $^, $%, $ ]).
+
+encoding(File) ->
+ case epp:read_encoding(File) of
+ none ->
+ epp:default_encoding();
+ E ->
+ E
+ end.
diff --git a/lib/common_test/src/test_server_gl.erl b/lib/common_test/src/test_server_gl.erl
new file mode 100644
index 0000000000..233e59172b
--- /dev/null
+++ b/lib/common_test/src/test_server_gl.erl
@@ -0,0 +1,350 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012-2015. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% This module implements group leader processes for test cases.
+%% Each group leader process handles output to the minor log file for
+%% a test case, and calls test_server_io to handle output to the common
+%% log files. The group leader processes are created and destroyed
+%% through the test_server_io module/process.
+
+-module(test_server_gl).
+-export([start_link/0,stop/1,set_minor_fd/3,unset_minor_fd/1,
+ get_tc_supervisor/1,print/4,set_props/2]).
+
+-export([init/1,handle_call/3,handle_cast/2,handle_info/2,terminate/2]).
+
+-record(st, {tc_supervisor :: 'none'|pid(), %Test case supervisor
+ tc :: mfa() | 'undefined', %Current test case MFA
+ minor :: 'none'|pid(), %Minor fd
+ minor_monitor, %Monitor ref for minor fd
+ capture :: 'none'|pid(), %Capture output
+ reject_io :: boolean(), %Reject I/O requests...
+ permit_io, %... and exceptions
+ auto_nl=true :: boolean(), %Automatically add NL
+ levels, %{Stdout,Major,Minor}
+ escape_chars=true %Switch escaping HTML on/off
+ }).
+
+%% start_link()
+%% Start a new group leader process. Only to be called by
+%% the test_server_io process.
+
+start_link() ->
+ case gen_server:start_link(?MODULE, [], []) of
+ {ok,Pid} ->
+ {ok,Pid};
+ Other ->
+ Other
+ end.
+
+
+%% stop(Pid)
+%% Stop a group leader process. Only to be called by
+%% the test_server_io process.
+
+stop(GL) ->
+ gen_server:cast(GL, stop).
+
+
+%% set_minor_fd(GL, Fd, MFA)
+%% GL = Pid for the group leader process
+%% Fd = file descriptor for the minor log file
+%% MFA = {M,F,A} for the test case owning the minor log file
+%%
+%% Register the file descriptor for the minor log file. Subsequent
+%% IO directed to the minor log file will be written to this file.
+%% Also register the currently executing process at the testcase
+%% supervisor corresponding to this group leader process.
+
+set_minor_fd(GL, Fd, MFA) ->
+ req(GL, {set_minor_fd,Fd,MFA,self()}).
+
+
+%% unset_minor_fd(GL, Fd, MFA)
+%% GL = Pid for the group leader process
+%%
+%% Unregister the file descriptor for minor log file (typically
+%% because the test case has ended the minor log file is about
+%% to be closed). Subsequent IO (for example, by a process spawned
+%% by the testcase process) will go to the unexpected_io log file.
+
+unset_minor_fd(GL) ->
+ req(GL, unset_minor_fd).
+
+
+%% get_tc_supervisor(GL)
+%% GL = Pid for the group leader process
+%%
+%% Return the Pid for the process that supervises the test case
+%% that has this group leader.
+
+get_tc_supervisor(GL) ->
+ req(GL, get_tc_supervisor).
+
+
+%% print(GL, Detail, Format, Args) -> ok
+%% GL = Pid for the group leader process
+%% Detail = integer() | minor | major | html | stdout
+%% Msg = iodata()
+%% Printer = internal | pid()
+%%
+%% Print a message to one of the log files. If Detail is an integer,
+%% it will be compared to the levels (set by set_props/2) to
+%% determine which log file(s) that are to receive the output. If
+%% Detail is an atom, the value of the atom will directly determine
+%% which log file to use. IO to the minor log file will be handled
+%% directly by this group leader process (printing to the file set by
+%% set_minor_fd/3), and all other IO will be handled by calling
+%% test_server_io:print/3.
+
+print(GL, Detail, Msg, Printer) ->
+ req(GL, {print,Detail,Msg,Printer}).
+
+
+%% set_props(GL, [PropertyTuple])
+%% GL = Pid for the group leader process
+%% PropertyTuple = {levels,{Show,Major,Minor}} |
+%% {auto_nl,boolean()} |
+%% {reject_io_reqs,boolean()}
+%%
+%% Set properties for this group leader process.
+
+set_props(GL, PropList) ->
+ req(GL, {set_props,PropList}).
+
+%%% Internal functions.
+
+init([]) ->
+ {ok,#st{tc_supervisor=none,
+ minor=none,
+ minor_monitor=none,
+ capture=none,
+ reject_io=false,
+ permit_io=gb_sets:empty(),
+ auto_nl=true,
+ levels={1,19,10},
+ escape_chars=true
+ }}.
+
+req(GL, Req) ->
+ gen_server:call(GL, Req, infinity).
+
+handle_call(get_tc_supervisor, _From, #st{tc_supervisor=Pid}=St) ->
+ {reply,Pid,St};
+handle_call({set_minor_fd,Fd,MFA,Supervisor}, _From, St) ->
+ Ref = erlang:monitor(process, Fd),
+ {reply,ok,St#st{tc=MFA,minor=Fd,minor_monitor=Ref,
+ tc_supervisor=Supervisor}};
+handle_call(unset_minor_fd, _From, St) ->
+ {reply,ok,St#st{minor=none,tc_supervisor=none}};
+handle_call({set_props,PropList}, _From, St) ->
+ {reply,ok,do_set_props(PropList, St)};
+handle_call({print,Detail,Msg,Printer}, {From,_}, St) ->
+ output(Detail, Msg, Printer, From, St),
+ {reply,ok,St}.
+
+handle_cast(stop, St) ->
+ {stop,normal,St}.
+
+handle_info({'DOWN',Ref,process,_,Reason}=D, #st{minor_monitor=Ref}=St) ->
+ case Reason of
+ normal -> ok;
+ _ ->
+ Data = io_lib:format("=== WARNING === TC: ~w\n"
+ "Got down from minor Fd ~w: ~w\n\n",
+ [St#st.tc,St#st.minor,D]),
+ test_server_io:print_unexpected(Data)
+ end,
+ {noreply,St#st{minor=none,minor_monitor=none}};
+handle_info({permit_io,Pid}, #st{permit_io=P}=St) ->
+ {noreply,St#st{permit_io=gb_sets:add(Pid, P)}};
+handle_info({capture,Cap0}, St) ->
+ Cap = case Cap0 of
+ false -> none;
+ Pid when is_pid(Cap0) -> Pid
+ end,
+ {noreply,St#st{capture=Cap}};
+handle_info({io_request,From,ReplyAs,Req}=IoReq, St) ->
+ try io_req(Req, From, St) of
+ passthrough ->
+ group_leader() ! IoReq;
+ {EscapeHtml,Data} ->
+ case is_io_permitted(From, St) of
+ false ->
+ ok;
+ true ->
+ case St of
+ #st{capture=none} ->
+ ok;
+ #st{capture=CapturePid} ->
+ CapturePid ! {captured,Data}
+ end,
+ case EscapeHtml andalso St#st.escape_chars of
+ true ->
+ output(minor, test_server_ctrl:escape_chars(Data),
+ From, From, St);
+ false ->
+ output(minor, Data, From, From, St)
+ end
+ end,
+ From ! {io_reply,ReplyAs,ok}
+ catch
+ _:_ ->
+ From ! {io_reply,ReplyAs,{error,arguments}}
+ end,
+ {noreply,St};
+handle_info({structured_io,ClientPid,{Detail,Str}}, St) ->
+ output(Detail, Str, ClientPid, ClientPid, St),
+ {noreply,St};
+handle_info({printout,Detail,["$tc_html",Format],Args}, St) ->
+ Str = io_lib:format(Format, Args),
+ output(Detail, ["$tc_html",Str], internal, none, St),
+ {noreply,St};
+handle_info({printout,Detail,Fun}, St) when is_function(Fun)->
+ output(Detail, Fun, internal, none, St),
+ {noreply,St};
+handle_info({printout,Detail,Format,Args}, St) ->
+ Str = io_lib:format(Format, Args),
+ if not St#st.escape_chars ->
+ output(Detail, ["$tc_html",Str], internal, none, St);
+ true ->
+ output(Detail, Str, internal, none, St)
+ end,
+ {noreply,St};
+handle_info(Msg, #st{tc_supervisor=Pid}=St) when is_pid(Pid) ->
+ %% The process overseeing the testcase process also used to be
+ %% the group leader; thus, it is widely expected that it can be
+ %% reached by sending a message to the group leader. Therefore
+ %% we'll need to forward any non-recognized messaged to the test
+ %% case supervisor.
+ Pid ! Msg,
+ {noreply,St};
+handle_info(_Msg, #st{}=St) ->
+ %% There is no known supervisor process. Ignore this message.
+ {noreply,St}.
+
+terminate(_, _) ->
+ ok.
+
+do_set_props([{levels,Levels}|Ps], St) ->
+ do_set_props(Ps, St#st{levels=Levels});
+do_set_props([{auto_nl,AutoNL}|Ps], St) ->
+ do_set_props(Ps, St#st{auto_nl=AutoNL});
+do_set_props([{reject_io_reqs,Bool}|Ps], St) ->
+ do_set_props(Ps, St#st{reject_io=Bool});
+do_set_props([], St) -> St.
+
+io_req({put_chars,Enc,Str}, _, _) when Enc =:= latin1; Enc =:= unicode ->
+ case Str of
+ ["$tc_html",Str0] ->
+ {false,unicode:characters_to_list(Str0, Enc)};
+ _ ->
+ {true,unicode:characters_to_list(Str, Enc)}
+ end;
+io_req({put_chars,Encoding,Mod,Func,[Format,Args]}, _, _) ->
+ case Format of
+ ["$tc_html",Format0] ->
+ Str = Mod:Func(Format0, Args),
+ {false,unicode:characters_to_list(Str, Encoding)};
+ _ ->
+ Str = Mod:Func(Format, Args),
+ {true,unicode:characters_to_list(Str, Encoding)}
+ end;
+io_req(_, _, _) -> passthrough.
+
+output(Level, StrOrFun, Sender, From, St) when is_integer(Level) ->
+ case selected_by_level(Level, stdout, St) of
+ true when hd(StrOrFun) == "$tc_html" ->
+ output(stdout, tl(StrOrFun), Sender, From, St);
+ true when is_function(StrOrFun) ->
+ output(stdout, StrOrFun(stdout), Sender, From, St);
+ true ->
+ output(stdout, StrOrFun, Sender, From, St);
+ false ->
+ ok
+ end,
+ case selected_by_level(Level, major, St) of
+ true when hd(StrOrFun) == "$tc_html" ->
+ output(major, tl(StrOrFun), Sender, From, St);
+ true when is_function(StrOrFun) ->
+ output(major, StrOrFun(major), Sender, From, St);
+ true ->
+ output(major, StrOrFun, Sender, From, St);
+ false ->
+ ok
+ end,
+ case selected_by_level(Level, minor, St) of
+ true when hd(StrOrFun) == "$tc_html" ->
+ output(minor, tl(StrOrFun), Sender, From, St);
+ true when is_function(StrOrFun) ->
+ output(minor, StrOrFun(minor), Sender, From, St);
+ true ->
+ output(minor, test_server_ctrl:escape_chars(StrOrFun),
+ Sender, From, St);
+ false ->
+ ok
+ end;
+output(stdout, Str, _Sender, From, St) ->
+ output_to_file(stdout, Str, From, St);
+output(html, Str, _Sender, From, St) ->
+ output_to_file(html, Str, From, St);
+output(Level, Str, Sender, From, St) when is_atom(Level) ->
+ output_to_file(Level, dress_output(Str, Sender, St), From, St).
+
+output_to_file(minor, Data0, From, #st{tc={M,F,A},minor=none}) ->
+ Data = [io_lib:format("=== ~w:~w/~w\n", [M,F,A]),Data0],
+ test_server_io:print(From, unexpected_io, Data),
+ ok;
+output_to_file(minor, Data, From, #st{tc=TC,minor=Fd}) ->
+ try
+ io:put_chars(Fd, Data)
+ catch
+ Type:Reason ->
+ Data1 =
+ [io_lib:format("=== ERROR === TC: ~w\n"
+ "Failed to write to minor Fd: ~w\n"
+ "Type: ~w\n"
+ "Reason: ~w\n",
+ [TC,Fd,Type,Reason]),
+ Data,"\n"],
+ test_server_io:print(From, unexpected_io, Data1)
+ end;
+output_to_file(Detail, Data, From, _) ->
+ test_server_io:print(From, Detail, Data).
+
+is_io_permitted(From, #st{reject_io=true,permit_io=P}) ->
+ gb_sets:is_member(From, P);
+is_io_permitted(_, #st{reject_io=false}) -> true.
+
+selected_by_level(Level, stdout, #st{levels={Stdout,_,_}}) ->
+ Level =< Stdout;
+selected_by_level(Level, major, #st{levels={_,Major,_}}) ->
+ Level =< Major;
+selected_by_level(Level, minor, #st{levels={_,_,Minor}}) ->
+ Level >= Minor.
+
+dress_output([$=|_]=Str, internal, _) ->
+ [Str,$\n];
+dress_output(Str, internal, _) ->
+ ["=== ",Str,$\n];
+dress_output(Str, _, #st{auto_nl=AutoNL}) ->
+ case AutoNL of
+ true -> [Str,$\n];
+ false -> Str
+ end.
diff --git a/lib/common_test/src/test_server_internal.hrl b/lib/common_test/src/test_server_internal.hrl
new file mode 100644
index 0000000000..1ec2d83417
--- /dev/null
+++ b/lib/common_test/src/test_server_internal.hrl
@@ -0,0 +1,60 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2002-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-define(priv_dir,"log_private").
+-define(MAIN_PORT,3289).
+-define(ACCEPT_TIMEOUT,20000).
+
+%% Target information generated by test_server:init_target_info/0
+%% Once initiated, this information will never change!!
+-record(target_info, {os_family, % atom(); win32 | unix
+ os_type, % result of os:type()
+ host, % string(); the name of the target machine
+ version, % string()
+ system_version, % string()
+ root_dir, % string()
+ emulator, % string()
+ otp_release, % string()
+ username, % string()
+ cookie, % string(); Cookie for target node
+ naming, % string(); "-name" | "-sname"
+ master}). % string(); Was used for OSE's master
+ % node for main target and slave nodes.
+ % For other platforms the target node
+ % itself is master for slave nodes
+
+%% Temporary information generated by test_server_ctrl:read_parameters/X
+%% This information is used when starting the main target, and for
+%% initiating the #target_info record.
+-record(par, {type,
+ target,
+ naming,
+ master,
+ cookie}).
+
+
+-record(cover, {app, % application; Name | none
+ file, % cover spec file
+ incl, % explicitly include modules
+ excl, % explicitly exclude modules
+ level, % analyse level; details | overview
+ mods, % actually cover compiled modules
+ stop=true, % stop cover after analyse; boolean()
+ cross}).% cross cover analyse info
diff --git a/lib/common_test/src/test_server_io.erl b/lib/common_test/src/test_server_io.erl
new file mode 100644
index 0000000000..0d881d0ada
--- /dev/null
+++ b/lib/common_test/src/test_server_io.erl
@@ -0,0 +1,452 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% This module implements a process with the registered name 'test_server_io',
+%% which has two main responsibilities:
+%%
+%% * Manage group leader processes (see the test_server_gl module)
+%% for test cases. A group_leader process is obtained by calling
+%% get_gl/1. Group leader processes will be kept alive as along as
+%% the 'test_server_io' process is alive.
+%%
+%% * Handle output to the common log files (stdout, major, html,
+%% unexpected_io).
+%%
+
+-module(test_server_io).
+-export([start_link/0,stop/1,get_gl/1,set_fd/2,
+ start_transaction/0,end_transaction/0,
+ print_buffered/1,print/3,print_unexpected/1,
+ set_footer/1,set_job_name/1,set_gl_props/1,
+ reset_state/0,finish/0]).
+
+-export([init/1,handle_call/3,handle_info/2,terminate/2]).
+
+-record(st, {fds, % Singleton fds (gb_tree)
+ tags=[], % Known tag types
+ shared_gl :: pid(), % Shared group leader
+ gls, % Group leaders (gb_set)
+ io_buffering=false, % I/O buffering
+ buffered, % Buffered I/O requests
+ html_footer, % HTML footer
+ job_name, % Name of current job.
+ gl_props, % Properties for GL
+ phase, % Indicates current mode
+ offline_buffer, % Buffer I/O during startup
+ stopping, % Reply to when process stopped
+ pending_ops % Perform when process idle
+ }).
+
+start_link() ->
+ case whereis(?MODULE) of
+ undefined ->
+ case gen_server:start_link({local,?MODULE}, ?MODULE, [], []) of
+ {ok,Pid} ->
+ {ok,Pid};
+ Other ->
+ Other
+ end;
+ Pid ->
+ %% already running, reset the state
+ reset_state(),
+ {ok,Pid}
+ end.
+
+stop(FilesToClose) ->
+ OldGL = group_leader(),
+ group_leader(self(), self()),
+ req({stop,FilesToClose}),
+ group_leader(OldGL, self()),
+ ok.
+
+finish() ->
+ req(finish).
+
+%% get_gl(Shared) -> Pid
+%% Shared = boolean()
+%% Pid = pid()
+%%
+%% Return a group leader (a process using the test_server_gl module).
+%% If Shared is true, the shared group leader is returned (suitable for
+%% running sequential test cases), otherwise a new group leader process
+%% is spawned. Group leader processes will live until the
+%% 'test_server_io' process is stopped.
+
+get_gl(Shared) when is_boolean(Shared) ->
+ req({get_gl,Shared}).
+
+%% set_fd(Tag, Fd) -> ok.
+%% Tag = major | html | unexpected_io
+%% Fd = a file descriptor (as returned by file:open/2)
+%%
+%% Associate a file descriptor with the given Tag. This
+%% Tag can later be used in when calling to print/3.
+
+set_fd(Tag, Fd) ->
+ req({set_fd,Tag,Fd}).
+
+%% start_transaction()
+%%
+%% Subsequent calls to print/3 from the process executing start_transaction/0
+%% will cause the messages to be buffered instead of printed directly.
+
+start_transaction() ->
+ req({start_transaction,self()}).
+
+%% end_transaction()
+%%
+%% End the transaction started by start_transaction/0. Subsequent calls to
+%% print/3 will cause the message to be printed directly.
+
+end_transaction() ->
+ req({end_transaction,self()}).
+
+%% print(From, Tag, Msg)
+%% From = pid()
+%% Tag = stdout, or any tag that has been registered using set_fd/2
+%% Msg = string or iolist
+%%
+%% Either print Msg to the file identified by Tag, or buffer the message
+%% start_transaction/0 has been called from the process From.
+%%
+%% NOTE: The tags have various special meanings. For example, 'html'
+%% is assumed to be a HTML file.
+
+print(From, Tag, Msg) ->
+ req({print,From,Tag,Msg}).
+
+%% print_buffered(Pid)
+%% Pid = pid()
+%%
+%% Print all messages buffered in the *first* transaction buffered for Pid.
+%% (If start_transaction/0 and end_transaction/0 has been called N times,
+%% print_buffered/1 must be called N times to print all transactions.)
+
+print_buffered(Pid) ->
+ req({print_buffered,Pid}).
+
+%% print_unexpected(Msg)
+%% Msg = string or iolist
+%%
+%% Print the given string in the unexpected_io log.
+
+print_unexpected(Msg) ->
+ print(xxxFrom,unexpected_io,Msg).
+
+%% set_footer(IoData)
+%%
+%% Set a footer for the file associated with the 'html' tag.
+%% It will be used by print/3 to print a footer for the HTML file.
+
+set_footer(Footer) ->
+ req({set_footer,Footer}).
+
+%% set_job_name(Name)
+%%
+%% Set a name for the currently running job. The name will be used
+%% when printing to 'stdout'.
+%%
+
+set_job_name(Name) ->
+ req({set_job_name,Name}).
+
+%% set_gl_props(PropList)
+%%
+%% Set properties for group leader processes. When a group_leader process
+%% is created, test_server_gl:set_props(PropList) will be called.
+
+set_gl_props(PropList) ->
+ req({set_gl_props,PropList}).
+
+%% reset_state
+%%
+%% Reset the initial state
+reset_state() ->
+ req(reset_state).
+
+%%% Internal functions.
+
+init([]) ->
+ process_flag(trap_exit, true),
+ Empty = gb_trees:empty(),
+ {ok,Shared} = test_server_gl:start_link(),
+ {ok,#st{fds=Empty,shared_gl=Shared,gls=gb_sets:empty(),
+ io_buffering=gb_sets:empty(),
+ buffered=Empty,
+ html_footer="</body>\n</html>\n",
+ job_name="<name not set>",
+ gl_props=[],
+ phase=starting,
+ offline_buffer=[],
+ pending_ops=[]}}.
+
+req(Req) ->
+ gen_server:call(?MODULE, Req, infinity).
+
+handle_call({get_gl,false}, _From, #st{gls=Gls,gl_props=Props}=St) ->
+ {ok,Pid} = test_server_gl:start_link(),
+ test_server_gl:set_props(Pid, Props),
+ {reply,Pid,St#st{gls=gb_sets:insert(Pid, Gls)}};
+handle_call({get_gl,true}, _From, #st{shared_gl=Shared}=St) ->
+ {reply,Shared,St};
+handle_call({set_fd,Tag,Fd}, _From, #st{fds=Fds0,tags=Tags0,
+ offline_buffer=OfflineBuff}=St) ->
+ Fds = gb_trees:enter(Tag, Fd, Fds0),
+ St1 = St#st{fds=Fds,tags=[Tag|lists:delete(Tag, Tags0)]},
+ OfflineBuff1 =
+ if OfflineBuff == [] ->
+ [];
+ true ->
+ %% Fd ready, print anything buffered for associated Tag
+ lists:filtermap(fun({T,From,Str}) when T == Tag ->
+ output(From, Tag, Str, St1),
+ false;
+ (_) ->
+ true
+ end, lists:reverse(OfflineBuff))
+ end,
+ {reply,ok,St1#st{phase=started,
+ offline_buffer=lists:reverse(OfflineBuff1)}};
+handle_call({start_transaction,Pid}, _From, #st{io_buffering=Buffer0,
+ buffered=Buf0}=St) ->
+ Buf = case gb_trees:is_defined(Pid, Buf0) of
+ false -> gb_trees:insert(Pid, queue:new(), Buf0);
+ true -> Buf0
+ end,
+ Buffer = gb_sets:add(Pid, Buffer0),
+ {reply,ok,St#st{io_buffering=Buffer,buffered=Buf}};
+handle_call({print,From,Tag,Str}, _From, St0) ->
+ St = output(From, Tag, Str, St0),
+ {reply,ok,St};
+handle_call({end_transaction,Pid}, _From, #st{io_buffering=Buffer0,
+ buffered=Buffered0}=St0) ->
+ Q0 = gb_trees:get(Pid, Buffered0),
+ Q = queue:in(eot, Q0),
+ Buffered = gb_trees:update(Pid, Q, Buffered0),
+ Buffer = gb_sets:delete_any(Pid, Buffer0),
+ St = St0#st{io_buffering=Buffer,buffered=Buffered},
+ {reply,ok,St};
+handle_call({print_buffered,Pid}, _From, #st{buffered=Buffered0}=St0) ->
+ Q0 = gb_trees:get(Pid, Buffered0),
+ Q = do_print_buffered(Q0, St0),
+ Buffered = gb_trees:update(Pid, Q, Buffered0),
+ St = St0#st{buffered=Buffered},
+ {reply,ok,St};
+handle_call({set_footer,Footer}, _From, St) ->
+ {reply,ok,St#st{html_footer=Footer}};
+handle_call({set_job_name,Name}, _From, St) ->
+ {reply,ok,St#st{job_name=Name}};
+handle_call({set_gl_props,Props}, _From, #st{shared_gl=Shared}=St) ->
+ test_server_gl:set_props(Shared, Props),
+ {reply,ok,St#st{gl_props=Props}};
+handle_call(reset_state, From, #st{phase=stopping,pending_ops=Ops}=St) ->
+ %% can't reset during stopping phase, save op for later
+ Op = fun(NewSt) ->
+ {_,Result,NewSt1} = handle_call(reset_state, From, NewSt),
+ {Result,NewSt1}
+ end,
+ {noreply,St#st{pending_ops=[{From,Op}|Ops]}};
+handle_call(reset_state, _From, #st{fds=Fds,tags=Tags,gls=Gls,
+ offline_buffer=OfflineBuff}) ->
+ %% close open log files
+ lists:foreach(fun(Tag) ->
+ case gb_trees:lookup(Tag, Fds) of
+ none ->
+ ok;
+ {value,Fd} ->
+ file:close(Fd)
+ end
+ end, Tags),
+ GlList = gb_sets:to_list(Gls),
+ [test_server_gl:stop(GL) || GL <- GlList],
+ timer:sleep(100),
+ case lists:filter(fun(GlPid) -> is_process_alive(GlPid) end, GlList) of
+ [] ->
+ ok;
+ _ ->
+ timer:sleep(2000),
+ [exit(GL, kill) || GL <- GlList]
+ end,
+ Empty = gb_trees:empty(),
+ {ok,Shared} = test_server_gl:start_link(),
+ {reply,ok,#st{fds=Empty,shared_gl=Shared,gls=gb_sets:empty(),
+ io_buffering=gb_sets:empty(),
+ buffered=Empty,
+ html_footer="</body>\n</html>\n",
+ job_name="<name not set>",
+ gl_props=[],
+ phase=starting,
+ offline_buffer=OfflineBuff,
+ pending_ops=[]}};
+handle_call({stop,FdTags}, From, #st{fds=Fds0,tags=Tags0,
+ shared_gl=SGL,gls=Gls0}=St0) ->
+ St = St0#st{gls=gb_sets:insert(SGL, Gls0),phase=stopping,stopping=From},
+ gc(St),
+ %% close open log files
+ {Fds1,Tags1} = lists:foldl(fun(Tag, {Fds,Tags}) ->
+ case gb_trees:lookup(Tag, Fds) of
+ none ->
+ {Fds,Tags};
+ {value,Fd} ->
+ file:close(Fd),
+ {gb_trees:delete(Tag, Fds),
+ lists:delete(Tag, Tags)}
+ end
+ end, {Fds0,Tags0}, FdTags),
+ %% Give the users of the surviving group leaders some
+ %% time to finish.
+ erlang:send_after(1000, self(), stop_group_leaders),
+ {noreply,St#st{fds=Fds1,tags=Tags1}};
+handle_call(finish, From, St) ->
+ gen_server:reply(From, ok),
+ {stop,normal,St}.
+
+handle_info({'EXIT',Pid,normal}, #st{gls=Gls0,stopping=From}=St) ->
+ Gls = gb_sets:delete_any(Pid, Gls0),
+ case gb_sets:is_empty(Gls) andalso stopping =/= undefined of
+ true ->
+ %% No more group leaders left.
+ gen_server:reply(From, ok),
+ {noreply,St#st{gls=Gls,phase=stopping,stopping=undefined}};
+ false ->
+ %% Wait for more group leaders to finish.
+ {noreply,St#st{gls=Gls,phase=stopping}}
+ end;
+handle_info({'EXIT',_Pid,Reason}, _St) ->
+ exit(Reason);
+handle_info(stop_group_leaders, #st{gls=Gls}=St) ->
+ %% Stop the remaining group leaders.
+ GlPids = gb_sets:to_list(Gls),
+ [test_server_gl:stop(GL) || GL <- GlPids],
+ timer:sleep(100),
+ Wait =
+ case lists:filter(fun(GlPid) -> is_process_alive(GlPid) end, GlPids) of
+ [] -> 0;
+ _ -> 2000
+ end,
+ erlang:send_after(Wait, self(), kill_group_leaders),
+ {noreply,St};
+handle_info(kill_group_leaders, #st{gls=Gls,stopping=From,
+ pending_ops=Ops}=St) ->
+ [exit(GL, kill) || GL <- gb_sets:to_list(Gls)],
+ if From /= undefined ->
+ gen_server:reply(From, ok);
+ true -> % reply has been sent already
+ ok
+ end,
+ %% we're idle, check if any ops are pending
+ St1 = lists:foldr(fun({ReplyTo,Op},NewSt) ->
+ {Result,NewSt1} = Op(NewSt),
+ gen_server:reply(ReplyTo, Result),
+ NewSt1
+ end, St#st{phase=idle,pending_ops=[]}, Ops),
+ {noreply,St1};
+handle_info(Other, St) ->
+ io:format("Ignoring: ~p\n", [Other]),
+ {noreply,St}.
+
+terminate(_, _) ->
+ ok.
+
+output(From, Tag, Str, #st{io_buffering=Buffered,buffered=Buf0,
+ phase=Phase,offline_buffer=OfflineBuff}=St) ->
+ case gb_sets:is_member(From, Buffered) of
+ false ->
+ case do_output(Tag, Str, Phase, St) of
+ buffer when length(OfflineBuff)>500 ->
+ %% something's wrong, clear buffer
+ St#st{offline_buffer=[]};
+ buffer ->
+ St#st{offline_buffer=[{Tag,From,Str}|OfflineBuff]};
+ _ ->
+ St
+ end;
+ true ->
+ Q0 = gb_trees:get(From, Buf0),
+ Q = queue:in({Tag,Str}, Q0),
+ Buf = gb_trees:update(From, Q, Buf0),
+ St#st{buffered=Buf}
+ end.
+
+do_output(stdout, Str, _, #st{job_name=undefined}) ->
+ io:put_chars(Str);
+do_output(stdout, Str0, _, #st{job_name=Name}) ->
+ Str = io_lib:format("Testing ~ts: ~ts\n", [Name,Str0]),
+ io:put_chars(Str);
+do_output(Tag, Str, Phase, #st{fds=Fds}=St) ->
+ case gb_trees:lookup(Tag, Fds) of
+ none when Phase /= started ->
+ buffer;
+ none ->
+ S = io_lib:format("\n*** ERROR: ~w, line ~w: No known '~p' log file\n",
+ [?MODULE,?LINE,Tag]),
+ do_output(stdout, [S,Str], Phase, St);
+ {value,Fd} ->
+ try
+ io:put_chars(Fd, Str),
+ case Tag of
+ html -> finalise_table(Fd, St);
+ _ -> ok
+ end
+ catch _:Error ->
+ S = io_lib:format("\n*** ERROR: ~w, line ~w: Error writing to "
+ "log file '~p': ~p\n",
+ [?MODULE,?LINE,Tag,Error]),
+ do_output(stdout, [S,Str], Phase, St)
+ end
+ end.
+
+finalise_table(Fd, #st{html_footer=Footer}) ->
+ case file:position(Fd, {cur,0}) of
+ {ok,Pos} ->
+ %% We are writing to a seekable file. Finalise so
+ %% we get complete valid (and viewable) HTML code.
+ %% Then rewind to overwrite the finalising code.
+ io:put_chars(Fd, ["\n</table>\n",Footer]),
+ file:position(Fd, Pos);
+ {error,epipe} ->
+ %% The file is not seekable. We cannot erase what
+ %% we've already written --- so the reader will
+ %% have to wait until we're done.
+ ok
+ end.
+
+do_print_buffered(Q0, St) ->
+ Item = queue:get(Q0),
+ Q = queue:drop(Q0),
+ case Item of
+ eot ->
+ Q;
+ {Tag,Str} ->
+ do_output(Tag, Str, undefined, St),
+ do_print_buffered(Q, St)
+ end.
+
+gc(#st{gls=Gls0}) ->
+ InUse0 = [begin
+ case process_info(P, group_leader) of
+ {group_leader,GL} -> GL;
+ undefined -> undefined
+ end
+ end || P <- processes()],
+ InUse = ordsets:from_list(InUse0),
+ Gls = gb_sets:to_list(Gls0),
+ NotUsed = ordsets:subtract(Gls, InUse),
+ [test_server_gl:stop(Pid) || Pid <- NotUsed],
+ ok.
diff --git a/lib/common_test/src/test_server_node.erl b/lib/common_test/src/test_server_node.erl
new file mode 100644
index 0000000000..c64399e485
--- /dev/null
+++ b/lib/common_test/src/test_server_node.erl
@@ -0,0 +1,766 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2002-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_node).
+-compile(r12).
+
+%%%
+%%% The same compiled code for this module must be possible to load
+%%% in R12B and later.
+%%%
+
+%% Test Controller interface
+-export([is_release_available/1]).
+-export([start_tracer_node/2,trace_nodes/2,stop_tracer_node/1]).
+-export([start_node/5, stop_node/1]).
+-export([kill_nodes/0, nodedown/1]).
+%% Internal export
+-export([node_started/1,trc/1,handle_debug/4]).
+
+-include("test_server_internal.hrl").
+-record(slave_info, {name,socket,client}).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% %%%
+%%% All code in this module executes on the test_server_ctrl process %%%
+%%% except for node_started/1 and trc/1 which execute on a new node. %%%
+%%% %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+is_release_available(Rel) when is_atom(Rel) ->
+ is_release_available(atom_to_list(Rel));
+is_release_available(Rel) ->
+ case os:type() of
+ {unix,_} ->
+ Erl = find_release(Rel),
+ case Erl of
+ none -> false;
+ _ -> filelib:is_regular(Erl)
+ end;
+ _ ->
+ false
+ end.
+
+nodedown(Sock) ->
+ Match = #slave_info{name='$1',socket=Sock,client='$2',_='_'},
+ case ets:match(slave_tab,Match) of
+ [[Node,_Client]] -> % Slave node died
+ gen_tcp:close(Sock),
+ ets:delete(slave_tab,Node),
+ slave_died;
+ [] ->
+ ok
+ end.
+
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Start trace node
+%%%
+start_tracer_node(TraceFile,TI) ->
+ Match = #slave_info{name='$1',_='_'},
+ SlaveNodes = lists:map(fun([N]) -> [" ",N] end,
+ ets:match(slave_tab,Match)),
+ TargetNode = node(),
+ Cookie = TI#target_info.cookie,
+ {ok,LSock} = gen_tcp:listen(0,[binary,{reuseaddr,true},{packet,2}]),
+ {ok,TracePort} = inet:port(LSock),
+ Prog = quote_progname(pick_erl_program(default)),
+ Cmd = lists:concat([Prog, " -sname tracer -hidden -setcookie ", Cookie,
+ " -s ", ?MODULE, " trc ", TraceFile, " ",
+ TracePort, " ", TI#target_info.os_family]),
+ spawn(fun() -> print_data(open_port({spawn,Cmd},[stream])) end),
+%! open_port({spawn,Cmd},[stream]),
+ case gen_tcp:accept(LSock,?ACCEPT_TIMEOUT) of
+ {ok,Sock} ->
+ gen_tcp:close(LSock),
+ receive
+ {tcp,Sock,Result} when is_binary(Result) ->
+ case unpack(Result) of
+ error ->
+ gen_tcp:close(Sock),
+ {error,timeout};
+ {ok,started} ->
+ trace_nodes(Sock,[TargetNode | SlaveNodes]),
+ {ok,Sock};
+ {ok,Error} -> Error
+ end;
+ {tcp_closed,Sock} ->
+ gen_tcp:close(Sock),
+ {error,could_not_start_tracernode}
+ after ?ACCEPT_TIMEOUT ->
+ gen_tcp:close(Sock),
+ {error,timeout}
+ end;
+ Error ->
+ gen_tcp:close(LSock),
+ {error,{could_not_start_tracernode,Error}}
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Start a tracer on each of these nodes and set flags and patterns
+%%%
+trace_nodes(Sock,Nodes) ->
+ Bin = term_to_binary({add_nodes,Nodes}),
+ ok = gen_tcp:send(Sock, tag_trace_message(Bin)),
+ receive_ack(Sock).
+
+
+receive_ack(Sock) ->
+ receive
+ {tcp,Sock,Bin} when is_binary(Bin) ->
+ case unpack(Bin) of
+ error -> receive_ack(Sock);
+ {ok,_} -> ok
+ end;
+ _ ->
+ receive_ack(Sock)
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Stop trace node
+%%%
+stop_tracer_node(Sock) ->
+ Bin = term_to_binary(id(stop)),
+ ok = gen_tcp:send(Sock, tag_trace_message(Bin)),
+ receive {tcp_closed,Sock} -> gen_tcp:close(Sock) end,
+ ok.
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% trc([TraceFile,Nodes]) -> ok
+%%
+%% Start tracing on the given nodes
+%%
+%% This function executes on the new node
+%%
+trc([TraceFile, PortAtom, Type]) ->
+ {Result,Patterns} =
+ case file:consult(TraceFile) of
+ {ok,TI} ->
+ Pat = parse_trace_info(lists:flatten(TI)),
+ {started,Pat};
+ Error ->
+ {Error,[]}
+ end,
+ Port = list_to_integer(atom_to_list(PortAtom)),
+ case catch gen_tcp:connect("localhost", Port, [binary,
+ {reuseaddr,true},
+ {packet,2}]) of
+ {ok,Sock} ->
+ BinResult = term_to_binary(Result),
+ ok = gen_tcp:send(Sock,tag_trace_message(BinResult)),
+ trc_loop(Sock,Patterns,Type);
+ _else ->
+ ok
+ end,
+ erlang:halt().
+trc_loop(Sock,Patterns,Type) ->
+ receive
+ {tcp,Sock,Bin} ->
+ case unpack(Bin) of
+ error ->
+ ttb:stop(),
+ gen_tcp:close(Sock);
+ {ok,{add_nodes,Nodes}} ->
+ add_nodes(Nodes,Patterns,Type),
+ Bin = term_to_binary(id(ok)),
+ ok = gen_tcp:send(Sock, tag_trace_message(Bin)),
+ trc_loop(Sock,Patterns,Type);
+ {ok,stop} ->
+ ttb:stop(),
+ gen_tcp:close(Sock)
+ end;
+ {tcp_closed,Sock} ->
+ ttb:stop(),
+ gen_tcp:close(Sock)
+ end.
+add_nodes(Nodes,Patterns,_Type) ->
+ ttb:tracer(Nodes,[{file,{local, test_server}},
+ {handler, {{?MODULE,handle_debug},initial}}]),
+ ttb:p(all,[call,timestamp]),
+ lists:foreach(fun({TP,M,F,A,Pat}) -> ttb:TP(M,F,A,Pat);
+ ({CTP,M,F,A}) -> ttb:CTP(M,F,A)
+ end,
+ Patterns).
+
+parse_trace_info([{TP,M,Pat}|Pats]) when TP=:=tp; TP=:=tpl ->
+ [{TP,M,'_','_',Pat}|parse_trace_info(Pats)];
+parse_trace_info([{TP,M,F,Pat}|Pats]) when TP=:=tp; TP=:=tpl ->
+ [{TP,M,F,'_',Pat}|parse_trace_info(Pats)];
+parse_trace_info([{TP,M,F,A,Pat}|Pats]) when TP=:=tp; TP=:=tpl ->
+ [{TP,M,F,A,Pat}|parse_trace_info(Pats)];
+parse_trace_info([CTP|Pats]) when CTP=:=ctp; CTP=:=ctpl; CTP=:=ctpg ->
+ [{CTP,'_','_','_'}|parse_trace_info(Pats)];
+parse_trace_info([{CTP,M}|Pats]) when CTP=:=ctp; CTP=:=ctpl; CTP=:=ctpg ->
+ [{CTP,M,'_','_'}|parse_trace_info(Pats)];
+parse_trace_info([{CTP,M,F}|Pats]) when CTP=:=ctp; CTP=:=ctpl; CTP=:=ctpg ->
+ [{CTP,M,F,'_'}|parse_trace_info(Pats)];
+parse_trace_info([{CTP,M,F,A}|Pats]) when CTP=:=ctp; CTP=:=ctpl; CTP=:=ctpg ->
+ [{CTP,M,F,A}|parse_trace_info(Pats)];
+parse_trace_info([]) ->
+ [];
+parse_trace_info([_other|Pats]) -> % ignore
+ parse_trace_info(Pats).
+
+handle_debug(Out,Trace,TI,initial) ->
+ handle_debug(Out,Trace,TI,0);
+handle_debug(_Out,end_of_trace,_TI,N) ->
+ N;
+handle_debug(Out,Trace,_TI,N) ->
+ print_trc(Out,Trace,N),
+ N+1.
+
+print_trc(Out,{trace_ts,P,call,{M,F,A},C,Ts},N) ->
+ io:format(Out,
+ "~w: ~s~n"
+ "Process : ~w~n"
+ "Call : ~w:~w/~w~n"
+ "Arguments : ~p~n"
+ "Caller : ~w~n~n",
+ [N,ts(Ts),P,M,F,length(A),A,C]);
+print_trc(Out,{trace_ts,P,call,{M,F,A},Ts},N) ->
+ io:format(Out,
+ "~w: ~s~n"
+ "Process : ~w~n"
+ "Call : ~w:~w/~w~n"
+ "Arguments : ~p~n~n",
+ [N,ts(Ts),P,M,F,length(A),A]);
+print_trc(Out,{trace_ts,P,return_from,{M,F,A},R,Ts},N) ->
+ io:format(Out,
+ "~w: ~s~n"
+ "Process : ~w~n"
+ "Return from : ~w:~w/~w~n"
+ "Return value : ~p~n~n",
+ [N,ts(Ts),P,M,F,A,R]);
+print_trc(Out,{drop,X},N) ->
+ io:format(Out,
+ "~w: Tracer dropped ~w messages - too busy~n~n",
+ [N,X]);
+print_trc(Out,Trace,N) ->
+ Ts = element(size(Trace),Trace),
+ io:format(Out,
+ "~w: ~s~n"
+ "Trace : ~p~n~n",
+ [N,ts(Ts),Trace]).
+ts({_, _, Micro} = Now) ->
+ {{Y,M,D},{H,Min,S}} = calendar:now_to_local_time(Now),
+ io_lib:format("~4.4.0w-~2.2.0w-~2.2.0w ~2.2.0w:~2.2.0w:~2.2.0w,~6.6.0w",
+ [Y,M,D,H,Min,S,Micro]).
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Start slave/peer nodes (initiated by test_server:start_node/5)
+%%%
+start_node(SlaveName, slave, Options, From, TI) when is_list(SlaveName) ->
+ start_node_slave(list_to_atom(SlaveName), Options, From, TI);
+start_node(SlaveName, slave, Options, From, TI) ->
+ start_node_slave(SlaveName, Options, From, TI);
+start_node(SlaveName, peer, Options, From, TI) when is_atom(SlaveName) ->
+ start_node_peer(atom_to_list(SlaveName), Options, From, TI);
+start_node(SlaveName, peer, Options, From, TI) ->
+ start_node_peer(SlaveName, Options, From, TI);
+start_node(_SlaveName, _Type, _Options, _From, _TI) ->
+ not_implemented_yet.
+
+%%
+%% Peer nodes are always started on the same host as test_server_ctrl
+%%
+%% (Socket communication is used since in early days the test target
+%% and the test server controller node could be on different hosts and
+%% the target could not know the controller node via erlang
+%% distribution)
+%%
+start_node_peer(SlaveName, OptList, From, TI) ->
+ SuppliedArgs = start_node_get_option_value(args, OptList, []),
+ Cleanup = start_node_get_option_value(cleanup, OptList, true),
+ HostStr = test_server_sup:hoststr(),
+ {ok,LSock} = gen_tcp:listen(0,[binary,
+ {reuseaddr,true},
+ {packet,2}]),
+ {ok,WaitPort} = inet:port(LSock),
+ NodeStarted = lists:concat([" -s ", ?MODULE, " node_started ",
+ HostStr, " ", WaitPort]),
+
+ % Support for erl_crash_dump files..
+ CrashDir = test_server_sup:crash_dump_dir(),
+ CrashFile = filename:join([CrashDir,
+ "erl_crash_dump."++cast_to_list(SlaveName)]),
+ CrashArgs = lists:concat([" -env ERL_CRASH_DUMP \"",CrashFile,"\" "]),
+ FailOnError = start_node_get_option_value(fail_on_error, OptList, true),
+ Prog0 = start_node_get_option_value(erl, OptList, default),
+ Prog = quote_progname(pick_erl_program(Prog0)),
+ Args =
+ case string:str(SuppliedArgs,"-setcookie") of
+ 0 -> "-setcookie " ++ TI#target_info.cookie ++ " " ++ SuppliedArgs;
+ _ -> SuppliedArgs
+ end,
+ Cmd = lists:concat([Prog,
+ " -detached ",
+ TI#target_info.naming, " ", SlaveName,
+ NodeStarted,
+ CrashArgs,
+ " ", Args]),
+ Opts = case start_node_get_option_value(env, OptList, []) of
+ [] -> [];
+ Env -> [{env, Env}]
+ end,
+ %% peer is always started on localhost
+ %%
+ %% Bad environment can cause open port to fail. If this happens,
+ %% we ignore it and let the testcase handle the situation...
+ catch open_port({spawn, Cmd}, [stream|Opts]),
+
+ Tmo = 60000 * test_server:timetrap_scale_factor(),
+
+ case start_node_get_option_value(wait, OptList, true) of
+ true ->
+ Ret = wait_for_node_started(LSock,Tmo,undefined,Cleanup,TI,self()),
+ case {Ret,FailOnError} of
+ {{{ok, Node}, Warning},_} ->
+ gen_server:reply(From,{{ok,Node},HostStr,Cmd,[],Warning});
+ {_,false} ->
+ gen_server:reply(From,{Ret, HostStr, Cmd});
+ {_,true} ->
+ gen_server:reply(From,{fail,{Ret, HostStr, Cmd}})
+ end;
+ false ->
+ Nodename = list_to_atom(SlaveName ++ "@" ++ HostStr),
+ I = "=== Not waiting for node",
+ gen_server:reply(From,{{ok, Nodename}, HostStr, Cmd, I, []}),
+ Self = self(),
+ spawn_link(wait_for_node_started_fun(LSock,Tmo,Cleanup,TI,Self)),
+ ok
+ end.
+
+-spec wait_for_node_started_fun(_, _, _, _, _) -> fun(() -> no_return()).
+wait_for_node_started_fun(LSock, Tmo, Cleanup, TI, Self) ->
+ fun() ->
+ wait_for_node_started(LSock,Tmo,undefined,
+ Cleanup,TI,Self),
+ receive after infinity -> ok end
+ end.
+
+%%
+%% Slave nodes are started on a remote host if
+%% - the option remote is given when calling test_server:start_node/3
+%%
+start_node_slave(SlaveName, OptList, From, _TI) ->
+ SuppliedArgs = start_node_get_option_value(args, OptList, []),
+ Cleanup = start_node_get_option_value(cleanup, OptList, true),
+
+ CrashDir = test_server_sup:crash_dump_dir(),
+ CrashFile = filename:join([CrashDir,
+ "erl_crash_dump."++cast_to_list(SlaveName)]),
+ CrashArgs = lists:concat([" -env ERL_CRASH_DUMP \"",CrashFile,"\" "]),
+ Args = lists:concat([" ", SuppliedArgs, CrashArgs]),
+
+ Prog0 = start_node_get_option_value(erl, OptList, default),
+ Prog = pick_erl_program(Prog0),
+ Ret =
+ case start_which_node(OptList) of
+ {error,Reason} -> {{error,Reason},undefined,undefined};
+ Host0 -> do_start_node_slave(Host0,SlaveName,Args,Prog,Cleanup)
+ end,
+ gen_server:reply(From,Ret).
+
+
+do_start_node_slave(Host0, SlaveName, Args, Prog, Cleanup) ->
+ Host =
+ case Host0 of
+ local -> test_server_sup:hoststr();
+ _ -> cast_to_list(Host0)
+ end,
+ Cmd = Prog ++ " " ++ Args,
+ case slave:start(Host, SlaveName, Args, no_link, Prog) of
+ {ok,Nodename} ->
+ case Cleanup of
+ true -> ets:insert(slave_tab,#slave_info{name=Nodename});
+ false -> ok
+ end,
+ {{ok,Nodename}, Host, Cmd, [], []};
+ Ret ->
+ {Ret, Host, Cmd}
+ end.
+
+
+wait_for_node_started(LSock,Timeout,Client,Cleanup,TI,CtrlPid) ->
+ case gen_tcp:accept(LSock,Timeout) of
+ {ok,Sock} ->
+ gen_tcp:close(LSock),
+ receive
+ {tcp,Sock,Started0} when is_binary(Started0) ->
+ case unpack(Started0) of
+ error ->
+ gen_tcp:close(Sock),
+ {error, connection_closed};
+ {ok,Started} ->
+ Version = TI#target_info.otp_release,
+ VsnStr = TI#target_info.system_version,
+ {ok,Nodename, W} =
+ handle_start_node_return(Version,
+ VsnStr,
+ Started),
+ case Cleanup of
+ true ->
+ ets:insert(slave_tab,#slave_info{name=Nodename,
+ socket=Sock,
+ client=Client});
+ false -> ok
+ end,
+ gen_tcp:controlling_process(Sock,CtrlPid),
+ test_server_ctrl:node_started(Nodename),
+ {{ok,Nodename},W}
+ end;
+ {tcp_closed,Sock} ->
+ gen_tcp:close(Sock),
+ {error, connection_closed}
+ after Timeout ->
+ gen_tcp:close(Sock),
+ {error, timeout}
+ end;
+ {error,Reason} ->
+ gen_tcp:close(LSock),
+ {error, {no_connection,Reason}}
+ end.
+
+
+
+handle_start_node_return(Version,VsnStr,{started, Node, Version, VsnStr}) ->
+ {ok, Node, []};
+handle_start_node_return(Version,VsnStr,{started, Node, OVersion, OVsnStr}) ->
+ Str = io_lib:format("WARNING: Started node "
+ "reports different system "
+ "version than current node! "
+ "Current node version: ~p, ~p "
+ "Started node version: ~p, ~p",
+ [Version, VsnStr,
+ OVersion, OVsnStr]),
+ Str1 = lists:flatten(Str),
+ {ok, Node, Str1}.
+
+
+%%
+%% This function executes on the new node
+%%
+node_started([Host,PortAtom]) ->
+ %% Must spawn a new process because the boot process should not
+ %% hang forever!!
+ spawn(node_started_fun(Host,PortAtom)).
+
+-spec node_started_fun(_, _) -> fun(() -> no_return()).
+node_started_fun(Host,PortAtom) ->
+ fun() -> node_started(Host,PortAtom) end.
+
+%% This process hangs forever, just waiting for the socket to be
+%% closed and terminating the node
+node_started(Host,PortAtom) ->
+ {_, Version} = init:script_id(),
+ VsnStr = erlang:system_info(system_version),
+ Port = list_to_integer(atom_to_list(PortAtom)),
+ case catch gen_tcp:connect(Host,Port, [binary,
+ {reuseaddr,true},
+ {packet,2}]) of
+
+ {ok,Sock} ->
+ Started = term_to_binary({started, node(), Version, VsnStr}),
+ ok = gen_tcp:send(Sock, tag_trace_message(Started)),
+ receive _Anyting ->
+ gen_tcp:close(Sock),
+ erlang:halt()
+ end;
+ _else ->
+ erlang:halt()
+ end.
+
+
+-compile({inline, [tag_trace_message/1]}).
+-dialyzer({no_improper_lists, tag_trace_message/1}).
+tag_trace_message(M) ->
+ [1|M].
+
+% start_which_node(Optlist) -> hostname
+start_which_node(Optlist) ->
+ case start_node_get_option_value(remote, Optlist) of
+ undefined ->
+ local;
+ true ->
+ case find_remote_host() of
+ {error, Other} ->
+ {error, Other};
+ RHost ->
+ RHost
+ end
+ end.
+
+find_remote_host() ->
+ HostList=test_server_ctrl:get_hosts(),
+ case lists:delete(test_server_sup:hoststr(), HostList) of
+ [] ->
+ {error, no_remote_hosts};
+ [RHost|_Rest] ->
+ RHost
+ end.
+
+start_node_get_option_value(Key, List) ->
+ start_node_get_option_value(Key, List, undefined).
+
+start_node_get_option_value(Key, List, Default) ->
+ case lists:keysearch(Key, 1, List) of
+ {value, {Key, Value}} ->
+ Value;
+ false ->
+ Default
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% stop_node(Name) -> ok | {error,Reason}
+%%
+%% Clean up - test_server will stop this node
+stop_node(Name) ->
+ case ets:lookup(slave_tab,Name) of
+ [#slave_info{}] ->
+ ets:delete(slave_tab,Name),
+ ok;
+ [] ->
+ {error, not_a_slavenode}
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% kill_nodes() -> ok
+%%
+%% Brutally kill all slavenodes that were not stopped by test_server
+kill_nodes() ->
+ case ets:match_object(slave_tab,'_') of
+ [] -> [];
+ List ->
+ lists:map(fun(SI) -> kill_node(SI) end, List)
+ end.
+
+kill_node(SI) ->
+ Name = SI#slave_info.name,
+ ets:delete(slave_tab,Name),
+ case SI#slave_info.socket of
+ undefined ->
+ catch rpc:call(Name,erlang,halt,[]);
+ Sock ->
+ gen_tcp:close(Sock)
+ end,
+ Name.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% cast_to_list(X) -> string()
+%%% X = list() | atom() | void()
+%%% Returns a string representation of whatever was input
+
+cast_to_list(X) when is_list(X) -> X;
+cast_to_list(X) when is_atom(X) -> atom_to_list(X);
+cast_to_list(X) -> lists:flatten(io_lib:format("~w", [X])).
+
+
+%%% L contains elements of the forms
+%%% {prog, String}
+%%% {release, Rel} where Rel = String | latest | previous
+%%% this
+%%%
+pick_erl_program(default) ->
+ cast_to_list(lib:progname());
+pick_erl_program(L) ->
+ P = random_element(L),
+ case P of
+ {prog, S} ->
+ S;
+ {release, S} ->
+ find_release(S);
+ this ->
+ cast_to_list(lib:progname())
+ end.
+
+%% This is an attempt to distinguish between spaces in the program
+%% path and spaces that separate arguments. The program is quoted to
+%% allow spaces in the path.
+%%
+%% Arguments could exist either if the executable is excplicitly given
+%% ({prog,String}) or if the -program switch to beam is used and
+%% includes arguments (typically done by cerl in OTP test environment
+%% in order to ensure that slave/peer nodes are started with the same
+%% emulator and flags as the test node. The return from lib:progname()
+%% could then typically be '/<full_path_to>/cerl -gcov').
+quote_progname(Progname) ->
+ do_quote_progname(string:tokens(Progname," ")).
+
+do_quote_progname([Prog]) ->
+ "\""++Prog++"\"";
+do_quote_progname([Prog,Arg|Args]) ->
+ case os:find_executable(Prog) of
+ false ->
+ do_quote_progname([Prog++" "++Arg | Args]);
+ _ ->
+ %% this one has an executable - we assume the rest are arguments
+ "\""++Prog++"\""++
+ lists:flatten(lists:map(fun(X) -> [" ",X] end, [Arg|Args]))
+ end.
+
+random_element(L) ->
+ lists:nth(rand:uniform(length(L)), L).
+
+find_release(latest) ->
+ "/usr/local/otp/releases/latest/bin/erl";
+find_release(previous) ->
+ "kaka";
+find_release(Rel) ->
+ find_release(os:type(), Rel).
+
+find_release({unix,sunos}, Rel) ->
+ case os:cmd("uname -p") of
+ "sparc" ++ _ ->
+ "/usr/local/otp/releases/otp_beam_solaris8_" ++ Rel ++ "/bin/erl";
+ _ ->
+ none
+ end;
+find_release({unix,linux}, Rel) ->
+ Candidates = find_rel_linux(Rel),
+ case lists:dropwhile(fun(N) ->
+ not filelib:is_regular(N)
+ end, Candidates) of
+ [] -> none;
+ [Erl|_] -> Erl
+ end;
+find_release(_, _) -> none.
+
+find_rel_linux(Rel) ->
+ case suse_release() of
+ none -> [];
+ SuseRel -> find_rel_suse(Rel, SuseRel)
+ end.
+
+find_rel_suse(Rel, SuseRel) ->
+ Root = "/usr/local/otp/releases/sles",
+ case SuseRel of
+ "11" ->
+ %% Try both SuSE 11, SuSE 10 and SuSe 9 in that order.
+ find_rel_suse_1(Rel, Root++"11") ++
+ find_rel_suse_1(Rel, Root++"10") ++
+ find_rel_suse_1(Rel, Root++"9");
+ "10" ->
+ %% Try both SuSE 10 and SuSe 9 in that order.
+ find_rel_suse_1(Rel, Root++"10") ++
+ find_rel_suse_1(Rel, Root++"9");
+ "9" ->
+ find_rel_suse_1(Rel, Root++"9");
+ _ ->
+ []
+ end.
+
+find_rel_suse_1(Rel, RootWc) ->
+ case erlang:system_info(wordsize) of
+ 4 ->
+ find_rel_suse_2(Rel, RootWc++"_32");
+ 8 ->
+ find_rel_suse_2(Rel, RootWc++"_64") ++
+ find_rel_suse_2(Rel, RootWc++"_32")
+ end.
+
+find_rel_suse_2(Rel, RootWc) ->
+ RelDir = filename:dirname(RootWc),
+ Pat = filename:basename(RootWc ++ "_" ++ Rel) ++ ".*",
+ case file:list_dir(RelDir) of
+ {ok,Dirs} ->
+ case lists:filter(fun(Dir) ->
+ case re:run(Dir, Pat) of
+ nomatch -> false;
+ _ -> true
+ end
+ end, Dirs) of
+ [] ->
+ [];
+ [R|_] ->
+ [filename:join([RelDir,R,"bin","erl"])]
+ end;
+ _ ->
+ []
+ end.
+
+%% suse_release() -> VersionString | none.
+%% Return the major SuSE version number for this platform or
+%% 'none' if this is not a SuSE platform.
+suse_release() ->
+ case file:open("/etc/SuSE-release", [read]) of
+ {ok,Fd} ->
+ try
+ suse_release(Fd)
+ after
+ file:close(Fd)
+ end;
+ {error,_} -> none
+ end.
+
+suse_release(Fd) ->
+ case io:get_line(Fd, '') of
+ eof -> none;
+ Line when is_list(Line) ->
+ case re:run(Line, "^VERSION\\s*=\\s*(\\d+)\s*",
+ [{capture,all_but_first,list}]) of
+ nomatch ->
+ suse_release(Fd);
+ {match,[Version]} ->
+ Version
+ end
+ end.
+
+unpack(Bin) ->
+ {One,Term} = split_binary(Bin, 1),
+ case binary_to_list(One) of
+ [1] ->
+ case catch {ok,binary_to_term(Term)} of
+ {'EXIT',_} -> error;
+ {ok,_}=Res -> Res
+ end;
+ _ -> error
+ end.
+
+id(I) -> I.
+
+print_data(Port) ->
+ receive
+ {Port, {data, Bytes}} ->
+ io:put_chars(Bytes),
+ print_data(Port);
+ {Port, eof} ->
+ Port ! {self(), close},
+ receive
+ {Port, closed} ->
+ true
+ end,
+ receive
+ {'EXIT', Port, _} ->
+ ok
+ after 1 -> % force context switch
+ ok
+ end
+ end.
diff --git a/lib/common_test/src/test_server_sup.erl b/lib/common_test/src/test_server_sup.erl
new file mode 100644
index 0000000000..c4530ba62f
--- /dev/null
+++ b/lib/common_test/src/test_server_sup.erl
@@ -0,0 +1,943 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2014. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% Purpose: Test server support functions.
+%%%-------------------------------------------------------------------
+-module(test_server_sup).
+-export([timetrap/2, timetrap/3, timetrap/4,
+ timetrap_cancel/1, capture_get/1, messages_get/1,
+ timecall/3, call_crash/5, app_test/2, check_new_crash_dumps/0,
+ cleanup_crash_dumps/0, crash_dump_dir/0, tar_crash_dumps/0,
+ get_username/0, get_os_family/0,
+ hostatom/0, hostatom/1, hoststr/0, hoststr/1,
+ framework_call/2,framework_call/3,framework_call/4,
+ format_loc/1,
+ util_start/0, util_stop/0, unique_name/0,
+ call_trace/1,
+ appup_test/1]).
+-include("test_server_internal.hrl").
+-define(crash_dump_tar,"crash_dumps.tar.gz").
+-define(src_listing_ext, ".src.html").
+-record(util_state, {starter, latest_name}).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timetrap(Timeout,Scale,Pid) -> Handle
+%% Handle = term()
+%%
+%% Creates a time trap, that will kill the given process if the
+%% trap is not cancelled with timetrap_cancel/1, within Timeout
+%% milliseconds.
+%% Scale says if the time should be scaled up to compensate for
+%% delays during the test (e.g. if cover is running).
+
+timetrap(Timeout0, Pid) ->
+ timetrap(Timeout0, Timeout0, true, Pid).
+
+timetrap(Timeout0, Scale, Pid) ->
+ timetrap(Timeout0, Timeout0, Scale, Pid).
+
+timetrap(Timeout0, ReportTVal, Scale, Pid) ->
+ process_flag(priority, max),
+ Timeout = if not Scale -> Timeout0;
+ true -> test_server:timetrap_scale_factor() * Timeout0
+ end,
+ TruncTO = trunc(Timeout),
+ receive
+ after TruncTO ->
+ kill_the_process(Pid, Timeout0, TruncTO, ReportTVal)
+ end.
+
+kill_the_process(Pid, Timeout0, TruncTO, ReportTVal) ->
+ case is_process_alive(Pid) of
+ true ->
+ TimeToReport = if Timeout0 == ReportTVal -> TruncTO;
+ true -> ReportTVal end,
+ MFLs = test_server:get_loc(Pid),
+ Mon = erlang:monitor(process, Pid),
+ Trap = {timetrap_timeout,TimeToReport,MFLs},
+ exit(Pid, Trap),
+ receive
+ {'DOWN', Mon, process, Pid, _} ->
+ ok
+ after 10000 ->
+ %% Pid is probably trapping exits, hit it harder...
+ catch error_logger:warning_msg(
+ "Testcase process ~w not "
+ "responding to timetrap "
+ "timeout:~n"
+ " ~p.~n"
+ "Killing testcase...~n",
+ [Pid, Trap]),
+ exit(Pid, kill)
+ end;
+ false ->
+ ok
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% timetrap_cancel(Handle) -> ok
+%% Handle = term()
+%%
+%% Cancels a time trap.
+timetrap_cancel(Handle) ->
+ unlink(Handle),
+ MonRef = erlang:monitor(process, Handle),
+ exit(Handle, kill),
+ receive {'DOWN',MonRef,_,_,_} -> ok
+ after
+ 2000 ->
+ erlang:demonitor(MonRef, [flush]),
+ ok
+ end.
+
+capture_get(Msgs) ->
+ receive
+ {captured,Msg} ->
+ capture_get([Msg|Msgs])
+ after 0 ->
+ lists:reverse(Msgs)
+ end.
+
+messages_get(Msgs) ->
+ receive
+ Msg ->
+ messages_get([Msg|Msgs])
+ after 0 ->
+ lists:reverse(Msgs)
+ end.
+
+timecall(M, F, A) ->
+ {Elapsed, Val} = timer:tc(M, F, A),
+ {Elapsed / 1000000, Val}.
+
+
+call_crash(Time,Crash,M,F,A) ->
+ OldTrapExit = process_flag(trap_exit,true),
+ Pid = spawn_link(M,F,A),
+ Answer =
+ receive
+ {'EXIT',Crash} ->
+ ok;
+ {'EXIT',Pid,Crash} ->
+ ok;
+ {'EXIT',_Reason} when Crash==any ->
+ ok;
+ {'EXIT',Pid,_Reason} when Crash==any ->
+ ok;
+ {'EXIT',Reason} ->
+ test_server:format(12, "Wrong crash reason. Wanted ~p, got ~p.",
+ [Crash, Reason]),
+ exit({wrong_crash_reason,Reason});
+ {'EXIT',Pid,Reason} ->
+ test_server:format(12, "Wrong crash reason. Wanted ~p, got ~p.",
+ [Crash, Reason]),
+ exit({wrong_crash_reason,Reason});
+ {'EXIT',OtherPid,Reason} when OldTrapExit == false ->
+ exit({'EXIT',OtherPid,Reason})
+ after do_trunc(Time) ->
+ exit(call_crash_timeout)
+ end,
+ process_flag(trap_exit,OldTrapExit),
+ Answer.
+
+do_trunc(infinity) -> infinity;
+do_trunc(T) -> trunc(T).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% app_test/2
+%%
+%% Checks one applications .app file for obvious errors.
+%% Checks..
+%% * .. required fields
+%% * .. that all modules specified actually exists
+%% * .. that all requires applications exists
+%% * .. that no module included in the application has export_all
+%% * .. that all modules in the ebin/ dir is included
+%% (This only produce a warning, as all modules does not
+%% have to be included (If the `pedantic' option isn't used))
+app_test(Application, Mode) ->
+ case is_app(Application) of
+ {ok, AppFile} ->
+ do_app_tests(AppFile, Application, Mode);
+ Error ->
+ test_server:fail(Error)
+ end.
+
+is_app(Application) ->
+ case file:consult(filename:join([code:lib_dir(Application),"ebin",
+ atom_to_list(Application)++".app"])) of
+ {ok, [{application, Application, AppFile}] } ->
+ {ok, AppFile};
+ _ ->
+ test_server:format(minor,
+ "Application (.app) file not found, "
+ "or it has very bad syntax.~n"),
+ {error, not_an_application}
+ end.
+
+
+do_app_tests(AppFile, AppName, Mode) ->
+ DictList=
+ [
+ {missing_fields, []},
+ {missing_mods, []},
+ {superfluous_mods_in_ebin, []},
+ {export_all_mods, []},
+ {missing_apps, []}
+ ],
+ fill_dictionary(DictList),
+
+ %% An appfile must (?) have some fields..
+ check_fields([description, modules, registered, applications], AppFile),
+
+ %% Check for missing and extra modules.
+ {value, {modules, Mods}}=lists:keysearch(modules, 1, AppFile),
+ EBinList=lists:sort(get_ebin_modnames(AppName)),
+ {Missing, Extra} = common(lists:sort(Mods), EBinList),
+ put(superfluous_mods_in_ebin, Extra),
+ put(missing_mods, Missing),
+
+ %% Check that no modules in the application has export_all.
+ app_check_export_all(Mods),
+
+ %% Check that all specified applications exists.
+ {value, {applications, Apps}}=
+ lists:keysearch(applications, 1, AppFile),
+ check_apps(Apps),
+
+ A=check_dict(missing_fields, "Inconsistent app file, "
+ "missing fields"),
+ B=check_dict(missing_mods, "Inconsistent app file, "
+ "missing modules"),
+ C=check_dict_tolerant(superfluous_mods_in_ebin, "Inconsistent app file, "
+ "Modules not included in app file.", Mode),
+ D=check_dict(export_all_mods, "Inconsistent app file, "
+ "Modules have `export_all'."),
+ E=check_dict(missing_apps, "Inconsistent app file, "
+ "missing applications."),
+
+ erase_dictionary(DictList),
+ case A+B+C+D+E of
+ 5 ->
+ ok;
+ _ ->
+ test_server:fail()
+ end.
+
+app_check_export_all([]) ->
+ ok;
+app_check_export_all([Mod|Mods]) ->
+ case catch apply(Mod, module_info, [compile]) of
+ {'EXIT', {undef,_}} ->
+ app_check_export_all(Mods);
+ COpts ->
+ case lists:keysearch(options, 1, COpts) of
+ false ->
+ app_check_export_all(Mods);
+ {value, {options, List}} ->
+ case lists:member(export_all, List) of
+ true ->
+ put(export_all_mods, [Mod|get(export_all_mods)]),
+ app_check_export_all(Mods);
+ false ->
+ app_check_export_all(Mods)
+ end
+ end
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% appup_test/1
+%%
+%% Checks one applications .appup file for obvious errors.
+%% Checks..
+%% * .. syntax
+%% * .. that version in app file matches appup file version
+%% * .. validity of appup instructions
+%%
+%% For library application this function checks that the proper
+%% 'restart_application' upgrade and downgrade clauses exist.
+appup_test(Application) ->
+ case is_app(Application) of
+ {ok, AppFile} ->
+ case is_appup(Application, proplists:get_value(vsn, AppFile)) of
+ {ok, Up, Down} ->
+ StartMod = proplists:get_value(mod, AppFile),
+ Modules = proplists:get_value(modules, AppFile),
+ do_appup_tests(StartMod, Application, Up, Down, Modules);
+ Error ->
+ test_server:fail(Error)
+ end;
+ Error ->
+ test_server:fail(Error)
+ end.
+
+is_appup(Application, Version) ->
+ AppupFile = atom_to_list(Application) ++ ".appup",
+ AppupPath = filename:join([code:lib_dir(Application), "ebin", AppupFile]),
+ case file:consult(AppupPath) of
+ {ok, [{Version, Up, Down}]} when is_list(Up), is_list(Down) ->
+ {ok, Up, Down};
+ _ ->
+ test_server:format(
+ minor,
+ "Application upgrade (.appup) file not found, "
+ "or it has very bad syntax.~n"),
+ {error, appup_not_readable}
+ end.
+
+do_appup_tests(undefined, Application, Up, Down, _Modules) ->
+ %% library application
+ case Up of
+ [{<<".*">>, [{restart_application, Application}]}] ->
+ case Down of
+ [{<<".*">>, [{restart_application, Application}]}] ->
+ ok;
+ _ ->
+ test_server:format(
+ minor,
+ "Library application needs restart_application "
+ "downgrade instruction.~n"),
+ {error, library_downgrade_instruction_malformed}
+ end;
+ _ ->
+ test_server:format(
+ minor,
+ "Library application needs restart_application "
+ "upgrade instruction.~n"),
+ {error, library_upgrade_instruction_malformed}
+ end;
+do_appup_tests(_, _Application, Up, Down, Modules) ->
+ %% normal application
+ case check_appup_clauses_plausible(Up, up, Modules) of
+ ok ->
+ case check_appup_clauses_plausible(Down, down, Modules) of
+ ok ->
+ test_server:format(minor, "OK~n");
+ Error ->
+ test_server:format(minor, "ERROR ~p~n", [Error]),
+ test_server:fail(Error)
+ end;
+ Error ->
+ test_server:format(minor, "ERROR ~p~n", [Error]),
+ test_server:fail(Error)
+ end.
+
+check_appup_clauses_plausible([], _Direction, _Modules) ->
+ ok;
+check_appup_clauses_plausible([{Re, Instrs} | Rest], Direction, Modules)
+ when is_binary(Re) ->
+ case re:compile(Re) of
+ {ok, _} ->
+ case check_appup_instructions(Instrs, Direction, Modules) of
+ ok ->
+ check_appup_clauses_plausible(Rest, Direction, Modules);
+ Error ->
+ Error
+ end;
+ {error, Error} ->
+ {error, {version_regex_malformed, Re, Error}}
+ end;
+check_appup_clauses_plausible([{V, Instrs} | Rest], Direction, Modules)
+ when is_list(V) ->
+ case check_appup_instructions(Instrs, Direction, Modules) of
+ ok ->
+ check_appup_clauses_plausible(Rest, Direction, Modules);
+ Error ->
+ Error
+ end;
+check_appup_clauses_plausible(Clause, _Direction, _Modules) ->
+ {error, {clause_malformed, Clause}}.
+
+check_appup_instructions(Instrs, Direction, Modules) ->
+ case check_instructions(Direction, Instrs, Instrs, [], [], Modules) of
+ {_Good, []} ->
+ ok;
+ {_, Bad} ->
+ {error, {bad_instructions, Bad}}
+ end.
+
+check_instructions(_, [], _, Good, Bad, _) ->
+ {lists:reverse(Good), lists:reverse(Bad)};
+check_instructions(UpDown, [Instr | Rest], All, Good, Bad, Modules) ->
+ case catch check_instruction(UpDown, Instr, All, Modules) of
+ ok ->
+ check_instructions(UpDown, Rest, All, [Instr | Good], Bad, Modules);
+ {error, Reason} ->
+ NewBad = [{Instr, Reason} | Bad],
+ check_instructions(UpDown, Rest, All, Good, NewBad, Modules)
+ end.
+
+check_instruction(up, {add_module, Module}, _, Modules) ->
+ %% A new module is added
+ check_module(Module, Modules);
+check_instruction(down, {add_module, Module}, _, Modules) ->
+ %% An old module is re-added
+ case (catch check_module(Module, Modules)) of
+ {error, {unknown_module, Module, Modules}} -> ok;
+ ok -> throw({error, {existing_readded_module, Module}})
+ end;
+check_instruction(_, {load_module, Module}, _, Modules) ->
+ check_module(Module, Modules);
+check_instruction(_, {load_module, Module, DepMods}, _, Modules) ->
+ check_module(Module, Modules),
+ check_depend(DepMods);
+check_instruction(_, {load_module, Module, Pre, Post, DepMods}, _, Modules) ->
+ check_module(Module, Modules),
+ check_depend(DepMods),
+ check_purge(Pre),
+ check_purge(Post);
+check_instruction(up, {delete_module, Module}, _, Modules) ->
+ case (catch check_module(Module, Modules)) of
+ {error, {unknown_module, Module, Modules}} ->
+ ok;
+ ok ->
+ throw({error,{existing_module_deleted, Module}})
+ end;
+check_instruction(down, {delete_module, Module}, _, Modules) ->
+ check_module(Module, Modules);
+check_instruction(_, {update, Module}, _, Modules) ->
+ check_module(Module, Modules);
+check_instruction(_, {update, Module, supervisor}, _, Modules) ->
+ check_module(Module, Modules);
+check_instruction(_, {update, Module, DepMods}, _, Modules)
+ when is_list(DepMods) ->
+ check_module(Module, Modules);
+check_instruction(_, {update, Module, Change}, _, Modules) ->
+ check_module(Module, Modules),
+ check_change(Change);
+check_instruction(_, {update, Module, Change, DepMods}, _, Modules) ->
+ check_module(Module, Modules),
+ check_change(Change),
+ check_depend(DepMods);
+check_instruction(_, {update, Module, Change, Pre, Post, DepMods}, _, Modules) ->
+ check_module(Module, Modules),
+ check_change(Change),
+ check_purge(Pre),
+ check_purge(Post),
+ check_depend(DepMods);
+check_instruction(_,
+ {update, Module, Timeout, Change, Pre, Post, DepMods},
+ _,
+ Modules) ->
+ check_module(Module, Modules),
+ check_timeout(Timeout),
+ check_change(Change),
+ check_purge(Pre),
+ check_purge(Post),
+ check_depend(DepMods);
+check_instruction(_,
+ {update, Module, ModType, Timeout, Change, Pre, Post, DepMods},
+ _,
+ Modules) ->
+ check_module(Module, Modules),
+ check_mod_type(ModType),
+ check_timeout(Timeout),
+ check_change(Change),
+ check_purge(Pre),
+ check_purge(Post),
+ check_depend(DepMods);
+check_instruction(_, {restart_application, Application}, _, _) ->
+ check_application(Application);
+check_instruction(_, {remove_application, Application}, _, _) ->
+ check_application(Application);
+check_instruction(_, {add_application, Application}, _, _) ->
+ check_application(Application);
+check_instruction(_, {add_application, Application, Type}, _, _) ->
+ check_application(Application),
+ check_restart_type(Type);
+check_instruction(_, Instr, _, _) ->
+ throw({error, {low_level_or_invalid_instruction, Instr}}).
+
+check_module(Module, Modules) ->
+ case {is_atom(Module), lists:member(Module, Modules)} of
+ {true, true} -> ok;
+ {true, false} -> throw({error, {unknown_module, Module}});
+ {false, _} -> throw({error, {bad_module, Module}})
+ end.
+
+check_application(App) ->
+ case is_atom(App) of
+ true -> ok;
+ false -> throw({error, {bad_application, App}})
+ end.
+
+check_depend(Dep) when is_list(Dep) -> ok;
+check_depend(Dep) -> throw({error, {bad_depend, Dep}}).
+
+check_restart_type(permanent) -> ok;
+check_restart_type(transient) -> ok;
+check_restart_type(temporary) -> ok;
+check_restart_type(load) -> ok;
+check_restart_type(none) -> ok;
+check_restart_type(Type) -> throw({error, {bad_restart_type, Type}}).
+
+check_timeout(T) when is_integer(T), T > 0 -> ok;
+check_timeout(default) -> ok;
+check_timeout(infinity) -> ok;
+check_timeout(T) -> throw({error, {bad_timeout, T}}).
+
+check_mod_type(static) -> ok;
+check_mod_type(dynamic) -> ok;
+check_mod_type(Type) -> throw({error, {bad_mod_type, Type}}).
+
+check_purge(soft_purge) -> ok;
+check_purge(brutal_purge) -> ok;
+check_purge(Purge) -> throw({error, {bad_purge, Purge}}).
+
+check_change(soft) -> ok;
+check_change({advanced, _}) -> ok;
+check_change(Change) -> throw({error, {bad_change, Change}}).
+
+%% Given two sorted lists, L1 and L2, returns {NotInL2, NotInL1},
+%% NotInL2 is the elements of L1 which don't occurr in L2,
+%% NotInL1 is the elements of L2 which don't ocurr in L1.
+
+common(L1, L2) ->
+ common(L1, L2, [], []).
+
+common([X|Rest1], [X|Rest2], A1, A2) ->
+ common(Rest1, Rest2, A1, A2);
+common([X|Rest1], [Y|Rest2], A1, A2) when X < Y ->
+ common(Rest1, [Y|Rest2], [X|A1], A2);
+common([X|Rest1], [Y|Rest2], A1, A2) ->
+ common([X|Rest1], Rest2, A1, [Y|A2]);
+common([], L, A1, A2) ->
+ {A1, L++A2};
+common(L, [], A1, A2) ->
+ {L++A1, A2}.
+
+check_apps([]) ->
+ ok;
+check_apps([App|Apps]) ->
+ case is_app(App) of
+ {ok, _AppFile} ->
+ ok;
+ {error, _} ->
+ put(missing_apps, [App|get(missing_apps)])
+ end,
+ check_apps(Apps).
+
+check_fields([], _AppFile) ->
+ ok;
+check_fields([L|Ls], AppFile) ->
+ check_field(L, AppFile),
+ check_fields(Ls, AppFile).
+
+check_field(FieldName, AppFile) ->
+ case lists:keymember(FieldName, 1, AppFile) of
+ true ->
+ ok;
+ false ->
+ put(missing_fields, [FieldName|get(missing_fields)]),
+ ok
+ end.
+
+check_dict(Dict, Reason) ->
+ case get(Dict) of
+ [] ->
+ 1; % All ok.
+ List ->
+ io:format("** ~ts (~ts) ->~n~p~n",[Reason, Dict, List]),
+ 0
+ end.
+
+check_dict_tolerant(Dict, Reason, Mode) ->
+ case get(Dict) of
+ [] ->
+ 1; % All ok.
+ List ->
+ io:format("** ~ts (~ts) ->~n~p~n",[Reason, Dict, List]),
+ case Mode of
+ pedantic ->
+ 0;
+ _ ->
+ 1
+ end
+ end.
+
+get_ebin_modnames(AppName) ->
+ Wc=filename:join([code:lib_dir(AppName),"ebin",
+ "*"++code:objfile_extension()]),
+ TheFun=fun(X, Acc) ->
+ [list_to_atom(filename:rootname(
+ filename:basename(X)))|Acc] end,
+ _Files=lists:foldl(TheFun, [], filelib:wildcard(Wc)).
+
+%%
+%% This function removes any erl_crash_dump* files found in the
+%% test server directory. Done only once when the test server
+%% is started.
+%%
+cleanup_crash_dumps() ->
+ Dir = crash_dump_dir(),
+ Dumps = filelib:wildcard(filename:join(Dir, "erl_crash_dump*")),
+ delete_files(Dumps).
+
+crash_dump_dir() ->
+ %% If no framework is known, then we use current working directory
+ %% - in most cases that will be the same as the default log
+ %% directory.
+ {ok,Dir} = test_server_sup:framework_call(get_log_dir,[],file:get_cwd()),
+ Dir.
+
+tar_crash_dumps() ->
+ Dir = crash_dump_dir(),
+ case filelib:wildcard(filename:join(Dir, "erl_crash_dump*")) of
+ [] -> {error,no_crash_dumps};
+ Dumps ->
+ TarFileName = filename:join(Dir,?crash_dump_tar),
+ {ok,Tar} = erl_tar:open(TarFileName,[write,compressed]),
+ lists:foreach(
+ fun(File) ->
+ ok = erl_tar:add(Tar,File,filename:basename(File),[])
+ end,
+ Dumps),
+ ok = erl_tar:close(Tar),
+ delete_files(Dumps),
+ {ok,TarFileName}
+ end.
+
+
+check_new_crash_dumps() ->
+ Dir = crash_dump_dir(),
+ Dumps = filelib:wildcard(filename:join(Dir, "erl_crash_dump*")),
+ case length(Dumps) of
+ 0 ->
+ ok;
+ Num ->
+ test_server_ctrl:format(minor,
+ "Found ~w crash dumps:~n", [Num]),
+ append_files_to_logfile(Dumps),
+ delete_files(Dumps)
+ end.
+
+append_files_to_logfile([]) -> ok;
+append_files_to_logfile([File|Files]) ->
+ NodeName=from($., File),
+ test_server_ctrl:format(minor, "Crash dump from node ~tp:~n",[NodeName]),
+ Fd=get(test_server_minor_fd),
+ case file:read_file(File) of
+ {ok, Bin} ->
+ case file:write(Fd, Bin) of
+ ok ->
+ ok;
+ {error,Error} ->
+ %% Write failed. The following io:format/3 will probably also
+ %% fail, but in that case it will throw an exception so that
+ %% we will be aware of the problem.
+ io:format(Fd, "Unable to write the crash dump "
+ "to this file: ~p~n", [file:format_error(Error)])
+ end;
+ _Error ->
+ io:format(Fd, "Failed to read: ~ts\n", [File])
+ end,
+ append_files_to_logfile(Files).
+
+delete_files([]) -> ok;
+delete_files([File|Files]) ->
+ io:format("Deleting file: ~ts~n", [File]),
+ case file:delete(File) of
+ {error, _} ->
+ case file:rename(File, File++".old") of
+ {error, Error} ->
+ io:format("Could neither delete nor rename file "
+ "~ts: ~ts.~n", [File, Error]);
+ _ ->
+ ok
+ end;
+ _ ->
+ ok
+ end,
+ delete_files(Files).
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% erase_dictionary(Vars) -> ok
+%% Vars = [atom(),...]
+%%
+%% Takes a list of dictionary keys, KeyVals, erases
+%% each key and returns ok.
+erase_dictionary([{Var, _Val}|Vars]) ->
+ erase(Var),
+ erase_dictionary(Vars);
+erase_dictionary([]) ->
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% fill_dictionary(KeyVals) -> void()
+%% KeyVals = [{atom(),term()},...]
+%%
+%% Takes each Key-Value pair, and inserts it in the process dictionary.
+fill_dictionary([{Var,Val}|Vars]) ->
+ put(Var,Val),
+ fill_dictionary(Vars);
+fill_dictionary([]) ->
+ [].
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% get_username() -> UserName
+%%
+%% Returns the current user
+get_username() ->
+ getenv_any(["USER","USERNAME"]).
+
+getenv_any([Key|Rest]) ->
+ case catch os:getenv(Key) of
+ String when is_list(String) -> String;
+ false -> getenv_any(Rest)
+ end;
+getenv_any([]) -> "".
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% get_os_family() -> OsFamily
+%%
+%% Returns the OS family
+get_os_family() ->
+ {OsFamily,_OsName} = os:type(),
+ OsFamily.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% hostatom()/hostatom(Node) -> Host; atom()
+%% hoststr() | hoststr(Node) -> Host; string()
+%%
+%% Returns the OS family
+hostatom() ->
+ hostatom(node()).
+hostatom(Node) ->
+ list_to_atom(hoststr(Node)).
+hoststr() ->
+ hoststr(node()).
+hoststr(Node) when is_atom(Node) ->
+ hoststr(atom_to_list(Node));
+hoststr(Node) when is_list(Node) ->
+ from($@, Node).
+
+from(H, [H | T]) -> T;
+from(H, [_ | T]) -> from(H, T);
+from(_H, []) -> [].
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% framework_call(Callback,Func,Args,DefaultReturn) -> Return | DefaultReturn
+%%
+%% Calls the given Func in Callback
+framework_call(Func,Args) ->
+ framework_call(Func,Args,ok).
+framework_call(Func,Args,DefaultReturn) ->
+ CB = os:getenv("TEST_SERVER_FRAMEWORK"),
+ framework_call(CB,Func,Args,DefaultReturn).
+framework_call(FW,_Func,_Args,DefaultReturn)
+ when FW =:= false; FW =:= "undefined" ->
+ DefaultReturn;
+framework_call(Callback,Func,Args,DefaultReturn) ->
+ Mod = list_to_atom(Callback),
+ case code:is_loaded(Mod) of
+ false -> code:load_file(Mod);
+ _ -> ok
+ end,
+ case erlang:function_exported(Mod,Func,length(Args)) of
+ true ->
+ EH = fun(Reason) -> exit({fw_error,{Mod,Func,Reason}}) end,
+ SetTcState = case Func of
+ end_tc -> true;
+ init_tc -> true;
+ _ -> false
+ end,
+ case SetTcState of
+ true ->
+ test_server:set_tc_state({framework,Mod,Func});
+ false ->
+ ok
+ end,
+ try apply(Mod,Func,Args) of
+ Result ->
+ Result
+ catch
+ exit:Why ->
+ EH(Why);
+ error:Why ->
+ EH({Why,erlang:get_stacktrace()});
+ throw:Why ->
+ EH(Why)
+ end;
+ false ->
+ DefaultReturn
+ end.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% format_loc(Loc) -> string()
+%%
+%% Formats the printout of the line of code read from
+%% process dictionary (test_server_loc). Adds link to
+%% correct line in source code.
+format_loc([{Mod,Func,Line}]) ->
+ [format_loc1({Mod,Func,Line})];
+format_loc([{Mod,Func,Line}|Rest]) ->
+ ["[",format_loc1({Mod,Func,Line}),",\n"|format_loc1(Rest)];
+format_loc([{Mod,LineOrFunc}]) ->
+ format_loc({Mod,LineOrFunc});
+format_loc({Mod,Func}) when is_atom(Func) ->
+ io_lib:format("{~w,~w}",[Mod,Func]);
+format_loc(Loc) ->
+ io_lib:format("~p",[Loc]).
+
+format_loc1([{Mod,Func,Line}]) ->
+ [" ",format_loc1({Mod,Func,Line}),"]"];
+format_loc1([{Mod,Func,Line}|Rest]) ->
+ [" ",format_loc1({Mod,Func,Line}),",\n"|format_loc1(Rest)];
+format_loc1({Mod,Func,Line}) ->
+ ModStr = atom_to_list(Mod),
+ case {lists:member(no_src, get(test_server_logopts)),
+ lists:reverse(ModStr)} of
+ {false,[$E,$T,$I,$U,$S,$_|_]} ->
+ Link = if is_integer(Line) ->
+ integer_to_list(Line);
+ Line == last_expr ->
+ list_to_atom(atom_to_list(Func)++"-last_expr");
+ is_atom(Line) ->
+ atom_to_list(Line);
+ true ->
+ Line
+ end,
+ io_lib:format("{~w,~w,<a href=\"~ts~ts#~s\">~w</a>}",
+ [Mod,Func,
+ test_server_ctrl:uri_encode(downcase(ModStr)),
+ ?src_listing_ext,Link,Line]);
+ _ ->
+ io_lib:format("{~w,~w,~w}",[Mod,Func,Line])
+ end.
+
+downcase(S) -> downcase(S, []).
+downcase([Uc|Rest], Result) when $A =< Uc, Uc =< $Z ->
+ downcase(Rest, [Uc-$A+$a|Result]);
+downcase([C|Rest], Result) ->
+ downcase(Rest, [C|Result]);
+downcase([], Result) ->
+ lists:reverse(Result).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% util_start() -> ok
+%%
+%% Start local utility process
+util_start() ->
+ Starter = self(),
+ case whereis(?MODULE) of
+ undefined ->
+ spawn_link(fun() ->
+ register(?MODULE, self()),
+ util_loop(#util_state{starter=Starter})
+ end);
+ _Pid ->
+ ok
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% util_stop() -> ok
+%%
+%% Stop local utility process
+util_stop() ->
+ try (?MODULE ! {self(),stop}) of
+ _ ->
+ receive {?MODULE,stopped} -> ok
+ after 5000 -> exit(whereis(?MODULE), kill)
+ end
+ catch
+ _:_ ->
+ ok
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% unique_name() -> string()
+%%
+unique_name() ->
+ ?MODULE ! {self(),unique_name},
+ receive {?MODULE,Name} -> Name
+ after 5000 -> exit({?MODULE,no_util_process})
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% util_loop(State) -> ok
+%%
+util_loop(State) ->
+ receive
+ {From,unique_name} ->
+ Nr = erlang:unique_integer([positive]),
+ Name = integer_to_list(Nr),
+ if Name == State#util_state.latest_name ->
+ timer:sleep(1),
+ self() ! {From,unique_name},
+ util_loop(State);
+ true ->
+ From ! {?MODULE,Name},
+ util_loop(State#util_state{latest_name = Name})
+ end;
+ {From,stop} ->
+ catch unlink(State#util_state.starter),
+ From ! {?MODULE,stopped},
+ ok
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% call_trace(TraceSpecFile) -> ok
+%%
+%% Read terms on format {m,Mod} | {f,Mod,Func}
+%% from TraceSpecFile and enable call trace for
+%% specified functions.
+call_trace(TraceSpec) ->
+ case catch try_call_trace(TraceSpec) of
+ {'EXIT',Reason} ->
+ erlang:display(Reason),
+ exit(Reason);
+ Ok ->
+ Ok
+ end.
+
+try_call_trace(TraceSpec) ->
+ case file:consult(TraceSpec) of
+ {ok,Terms} ->
+ dbg:tracer(),
+ %% dbg:p(self(), [p, m, sos, call]),
+ dbg:p(self(), [sos, call]),
+ lists:foreach(fun({m,M}) ->
+ case dbg:tpl(M,[{'_',[],[{return_trace}]}]) of
+ {error,What} -> exit({error,{tracing_failed,What}});
+ _ -> ok
+ end;
+ ({f,M,F}) ->
+ case dbg:tpl(M,F,[{'_',[],[{return_trace}]}]) of
+ {error,What} -> exit({error,{tracing_failed,What}});
+ _ -> ok
+ end;
+ (Huh) ->
+ exit({error,{unrecognized_trace_term,Huh}})
+ end, Terms),
+ ok;
+ {_,Error} ->
+ exit({error,{tracing_failed,TraceSpec,Error}})
+ end.
+
diff --git a/lib/common_test/test/Makefile b/lib/common_test/test/Makefile
index ff4495b104..fd96d06aab 100644
--- a/lib/common_test/test/Makefile
+++ b/lib/common_test/test/Makefile
@@ -65,9 +65,14 @@ MODULES= \
ct_cover_nomerge_SUITE \
ct_groups_search_SUITE \
ct_surefire_SUITE \
- ct_telnet_SUITE
+ ct_telnet_SUITE \
+ erl2html2_SUITE \
+ test_server_SUITE \
+ test_server_test_lib \
+ ct_release_test_SUITE
ERL_FILES= $(MODULES:%=%.erl)
+HRL_FILES= test_server_test_lib.hrl
TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR))
INSTALL_PROGS= $(TARGET_FILES)
@@ -84,8 +89,8 @@ RELSYSDIR = $(RELEASE_PATH)/common_test_test
# FLAGS
# ----------------------------------------------------
-ERL_MAKE_FLAGS += -pa $(ERL_TOP)/lib/test_server/ebin
-ERL_COMPILE_FLAGS += -I$(ERL_TOP)/lib/test_server/include
+ERL_MAKE_FLAGS +=
+ERL_COMPILE_FLAGS +=
EBIN = .
@@ -118,7 +123,7 @@ release_spec: opt
release_tests_spec:
$(INSTALL_DIR) "$(RELSYSDIR)"
- $(INSTALL_DATA) $(ERL_FILES) $(COVERFILE) "$(RELSYSDIR)"
+ $(INSTALL_DATA) $(ERL_FILES) $(HRL_FILES) $(COVERFILE) "$(RELSYSDIR)"
$(INSTALL_DATA) common_test.spec common_test.cover "$(RELSYSDIR)"
chmod -R u+w "$(RELSYSDIR)"
@tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -)
diff --git a/lib/common_test/test/common_test.cover b/lib/common_test/test/common_test.cover
index 87d00c420f..e8e01a9312 100644
--- a/lib/common_test/test/common_test.cover
+++ b/lib/common_test/test/common_test.cover
@@ -1,9 +1,2 @@
%% -*- erlang -*-
{incl_app,common_test,details}.
-{cross,common_test,[{test_server,[erl2html2,
- test_server,
- test_server_ctrl,
- test_server_gl,
- test_server_io,
- test_server_node,
- test_server_sup]}]}.
diff --git a/lib/common_test/test/ct_event_handler_SUITE_data/eh_A.erl b/lib/common_test/test/ct_event_handler_SUITE_data/eh_A.erl
index 28c85ee2db..29c25bf151 100644
--- a/lib/common_test/test/ct_event_handler_SUITE_data/eh_A.erl
+++ b/lib/common_test/test/ct_event_handler_SUITE_data/eh_A.erl
@@ -27,7 +27,7 @@
-behaviour(gen_event).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("common_test/include/ct_event.hrl").
%% gen_event callbacks
diff --git a/lib/common_test/test/ct_hooks_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE.erl
index 8353f5936c..5ec88b438e 100644
--- a/lib/common_test/test/ct_hooks_SUITE.erl
+++ b/lib/common_test/test/ct_hooks_SUITE.erl
@@ -30,7 +30,7 @@
-compile(export_all).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("common_test/include/ct_event.hrl").
-define(eh, ct_test_support_eh).
diff --git a/lib/common_test/test/ct_master_SUITE.erl b/lib/common_test/test/ct_master_SUITE.erl
index 15b49c67c0..55837352e2 100644
--- a/lib/common_test/test/ct_master_SUITE.erl
+++ b/lib/common_test/test/ct_master_SUITE.erl
@@ -135,7 +135,7 @@ make_spec(DataDir, FileName, NodeNames, Suites, Config) ->
C = lists:map(
fun(NodeName) ->
- Rnd = random:uniform(2),
+ Rnd = rand:uniform(2),
if Rnd == 1->
{config,NodeName,filename:join(DataDir,
"master/config.txt")};
diff --git a/lib/common_test/test/ct_misc_1_SUITE.erl b/lib/common_test/test/ct_misc_1_SUITE.erl
index 1db8bcc794..a562719296 100644
--- a/lib/common_test/test/ct_misc_1_SUITE.erl
+++ b/lib/common_test/test/ct_misc_1_SUITE.erl
@@ -31,7 +31,6 @@
-compile(export_all).
-include_lib("common_test/include/ct.hrl").
--include_lib("test_server/include/test_server_line.hrl").
-include_lib("common_test/include/ct_event.hrl").
-define(eh, ct_test_support_eh).
diff --git a/lib/common_test/test/ct_release_test_SUITE.erl b/lib/common_test/test/ct_release_test_SUITE.erl
new file mode 100644
index 0000000000..66d07155ac
--- /dev/null
+++ b/lib/common_test/test/ct_release_test_SUITE.erl
@@ -0,0 +1,190 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2014. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% File: ct_release_test_SUITE
+%%%
+%%% Description:
+%%% Test ct_release_test module
+%%%
+%%%-------------------------------------------------------------------
+-module(ct_release_test_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
+-define(eh, ct_test_support_eh).
+-define(suite, release_test_SUITE).
+
+%%--------------------------------------------------------------------
+%% TEST SERVER CALLBACK FUNCTIONS
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Description: Since Common Test starts another Test Server
+%% instance, the tests need to be performed on a separate node (or
+%% there will be clashes with logging processes etc).
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skipped, "Upgrade tests do currently not work on windows"};
+ _ ->
+ ct_test_support:init_per_suite(Config)
+ end.
+
+end_per_suite(Config) ->
+ ct_test_support:end_per_suite(Config).
+
+init_per_testcase(TestCase, Config) ->
+ ct_test_support:init_per_testcase(TestCase, Config).
+
+end_per_testcase(TestCase, Config) ->
+ ct_test_support:end_per_testcase(TestCase, Config).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ minor,
+ major,
+ major_fail_init,
+ major_fail_upgraded,
+ major_fail_downgraded,
+ major_fail_no_init
+ ].
+
+%%--------------------------------------------------------------------
+%% TEST CASES
+%%--------------------------------------------------------------------
+
+%%%-----------------------------------------------------------------
+%%%
+minor(Config) when is_list(Config) ->
+ {Suite,Cfg} = setup1(Config),
+ {Opts,ERPid} = setup([{suite,Suite},
+ {testcase,minor},
+ {label,minor}|Cfg], Config),
+ execute(minor, Opts, ERPid, Config).
+
+major(Config) when is_list(Config) ->
+ {Suite,Cfg} = setup1(Config),
+ {Opts,ERPid} = setup([{suite,Suite},
+ {testcase,major},
+ {label,major}|Cfg], Config),
+ execute(major, Opts, ERPid, Config).
+
+major_fail_init(Config) when is_list(Config) ->
+ {Suite,Cfg} = setup1(Config),
+ {Opts,ERPid} = setup([{suite,Suite},
+ {testcase,major_fail_init},
+ {label,major_fail_init}|Cfg], Config),
+ execute(major_fail_init, Opts, ERPid, Config).
+
+major_fail_upgraded(Config) when is_list(Config) ->
+ {Suite,Cfg} = setup1(Config),
+ {Opts,ERPid} = setup([{suite,Suite},
+ {testcase,major_fail_upgraded},
+ {label,major_fail_upgraded}|Cfg], Config),
+ execute(major_fail_upgraded, Opts, ERPid, Config).
+
+major_fail_downgraded(Config) when is_list(Config) ->
+ {Suite,Cfg} = setup1(Config),
+ {Opts,ERPid} = setup([{suite,Suite},
+ {testcase,major_fail_downgraded},
+ {label,major_fail_downgraded}|Cfg], Config),
+ execute(major_fail_downgraded, Opts, ERPid, Config).
+
+major_fail_no_init(Config) when is_list(Config) ->
+ {Suite,Cfg} = setup1(Config),
+ {Opts,ERPid} = setup([{suite,Suite},
+ {testcase,major_fail_no_init},
+ {label,major_fail_no_init}|Cfg], Config),
+ execute(major_fail_no_init, Opts, ERPid, Config).
+
+
+%%%-----------------------------------------------------------------
+%%% HELP FUNCTIONS
+%%%-----------------------------------------------------------------
+setup1(Config) ->
+ DataDir = ?config(data_dir, Config),
+ Suite = filename:join(DataDir, atom_to_list(?suite)),
+ Cfg = case ct:get_config(otp_releases) of
+ undefined ->
+ [];
+ Rels ->
+ CfgFile = filename:join(DataDir, "release_test.cfg"),
+ file:write_file(CfgFile,
+ io_lib:format("{otp_releases,~p}.",[Rels])),
+ [{config,CfgFile}]
+ end,
+ {Suite,Cfg}.
+
+setup(Test, Config) ->
+ Opts0 = ct_test_support:get_opts(Config),
+ Level = ?config(trace_level, Config),
+ EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
+ Opts = Opts0 ++ [{event_handler,{?eh,EvHArgs}}|Test],
+ ERPid = ct_test_support:start_event_receiver(Config),
+ {Opts,ERPid}.
+
+execute(Name, Opts, ERPid, Config) ->
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(Name,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+
+ verify_events(Name,Events,Config).
+
+reformat(Events, EH) ->
+ ct_test_support:reformat(Events, EH).
+
+%%%-----------------------------------------------------------------
+%%% TEST EVENTS
+%%%-----------------------------------------------------------------
+verify_events(TC,Events,Config) ->
+ Ok = expected_events(TC,ok),
+ case ct_test_support:verify_events(Ok, Events, Config) of
+ ok ->
+ ok;
+ {event_not_found,{?eh,tc_done,{_Suite,TC,ok}}}=R1 ->
+ ct:log("Did not find 'ok', checking if skipped...",[]),
+ Skipped = expected_events(TC,{skipped,"Old release not available"}),
+ case ct_test_support:verify_events(Skipped, Events, Config) of
+ ok ->
+ {skipped,"Old release not available"};
+ R2 ->
+ ct:log("Did not find skipped case either: ~n~p",[R2]),
+ exit(R1)
+ end
+ end.
+
+expected_events(TC,Result) ->
+ OneTest =
+ [{?eh,start_logging,{'DEF','RUNDIR'}},
+ {?eh,tc_done,{?suite,TC,Result}},
+ {?eh,stop_logging,[]}],
+ %% 2 tests (ct:run_test + script_start) is default
+ OneTest ++ OneTest.
diff --git a/lib/common_test/test/ct_release_test_SUITE_data/release_test_SUITE.erl b/lib/common_test/test/ct_release_test_SUITE_data/release_test_SUITE.erl
new file mode 100644
index 0000000000..04c92be0d1
--- /dev/null
+++ b/lib/common_test/test/ct_release_test_SUITE_data/release_test_SUITE.erl
@@ -0,0 +1,118 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2015. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%%----------------------------------------------------------------
+%%% Purpose: Test the support for application upgrade/code_change test
+%%%-----------------------------------------------------------------
+-module(release_test_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(APP,runtime_tools). % "randomly" selected 'application under test'
+
+%%
+%% all/1
+%%
+all() ->
+ [minor,
+ major,
+ major_fail_init,
+ major_fail_upgraded,
+ major_fail_downgraded,
+ major_fail_no_init].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(major_fail_no_init, Config) ->
+ Config;
+init_per_testcase(_Case, Config) ->
+ ct_release_test:init(Config).
+end_per_testcase(_Case, Config) ->
+ ct_release_test:cleanup(Config).
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+minor(Config) ->
+ ct_release_test:upgrade(?APP,minor,{?MODULE,[]},Config).
+
+major(Config) ->
+ ct_release_test:upgrade(?APP,major,{?MODULE,[]},Config).
+
+major_fail_init(Config) ->
+ try ct_release_test:upgrade(?APP,major,{?MODULE,fail_init},Config)
+ catch exit:{test_case_failed,
+ {test_upgrade_callback,_Mod,_Func,_Args,
+ {'EXIT',{test_case_failed,upgrade_init_failed}}}} ->
+ ok
+ end.
+
+major_fail_upgraded(Config) ->
+ try ct_release_test:upgrade(?APP,major,{?MODULE,fail_upgraded},Config)
+ catch exit:{test_case_failed,
+ {test_upgrade_callback,_Mod,_Func,_Args,
+ {'EXIT',{test_case_failed,upgrade_upgraded_failed}}}} ->
+ ok
+ end.
+
+major_fail_downgraded(Config) ->
+ try ct_release_test:upgrade(?APP,major,{?MODULE,fail_downgraded},Config)
+ catch exit:{test_case_failed,
+ {test_upgrade_callback,_Mod,_Func,_Args,
+ {'EXIT',{test_case_failed,upgrade_downgraded_failed}}}} ->
+ ok
+ end.
+
+major_fail_no_init(Config) ->
+ try ct_release_test:upgrade(?APP,major,[],Config)
+ catch exit:{test_case_failed,"ct_release_test:init/1 not run"} ->
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% ct_release_test callbacks
+
+%% Version numbers are checked by ct_release_test, so there is nothing
+%% more to check here...
+upgrade_init(CtData,fail_init) ->
+ ct:fail(upgrade_init_failed);
+upgrade_init(CtData,State) ->
+ {ok,{FromVsn,ToVsn}} = ct_release_test:get_app_vsns(CtData,?APP),
+ case ct_release_test:get_appup(CtData,?APP) of
+ {ok,{FromVsn,ToVsn,UpInstrs,DownInstrs}} ->
+ io:format("Upgrade/downgrade ~p: ~p <--> ~p~n"
+ "Upgrade instructions: ~p~n"
+ "Downgrade instructions: ~p",
+ [?APP,FromVsn,ToVsn,UpInstrs,DownInstrs]);
+ {error,{vsn_not_found,_}} when FromVsn==ToVsn ->
+ io:format("No upgrade test for ~p, same version",[?APP])
+ end,
+ State.
+upgrade_upgraded(CtData,fail_upgraded) ->
+ ct:fail(upgrade_upgraded_failed);
+upgrade_upgraded(_CtData,State) ->
+ State.
+upgrade_downgraded(CtData,fail_downgraded) ->
+ ct:fail(upgrade_downgraded_failed);
+upgrade_downgraded(_CtData,State) ->
+ State.
diff --git a/lib/common_test/test/ct_test_support.erl b/lib/common_test/test/ct_test_support.erl
index 248ec6c4df..0ee83f45b8 100644
--- a/lib/common_test/test/ct_test_support.erl
+++ b/lib/common_test/test/ct_test_support.erl
@@ -414,14 +414,14 @@ ct_rpc({M,F,A}, Config) ->
%%%-----------------------------------------------------------------
%%% random_error/1
random_error(Config) when is_list(Config) ->
- random:seed(os:timestamp()),
+ rand:seed(exsplus),
Gen = fun(0,_) -> ok; (N,Fun) -> Fun(N-1, Fun) end,
- Gen(random:uniform(100), Gen),
+ Gen(rand:uniform(100), Gen),
ErrorTypes = ['BADMATCH','BADARG','CASE_CLAUSE','FUNCTION_CLAUSE',
'EXIT','THROW','UNDEF'],
- Type = lists:nth(random:uniform(length(ErrorTypes)), ErrorTypes),
- Where = case random:uniform(2) of
+ Type = lists:nth(rand:uniform(length(ErrorTypes)), ErrorTypes),
+ Where = case rand:uniform(2) of
1 ->
io:format("ct_test_support *returning* error of type ~w",
[Type]),
@@ -1228,8 +1228,8 @@ log_events(TC, Events, EvLogDir, Opts) ->
file:close(Dev),
FullLogFile = join_abs_dirs(proplists:get_value(net_dir, Opts),
LogFile),
- io:format("Events written to logfile: <a href=\"file://~s\">~s</a>~n",
- [FullLogFile,FullLogFile]),
+ ct:log("Events written to logfile: <a href=\"file://~s\">~s</a>~n",
+ [FullLogFile,FullLogFile],[no_css]),
io:format(user, "Events written to logfile: ~p~n", [LogFile]).
log_events1(Evs, Dev, "") ->
diff --git a/lib/common_test/test/ct_test_support_eh.erl b/lib/common_test/test/ct_test_support_eh.erl
index 7c3d137901..f3d933da04 100644
--- a/lib/common_test/test/ct_test_support_eh.erl
+++ b/lib/common_test/test/ct_test_support_eh.erl
@@ -27,7 +27,7 @@
-behaviour(gen_event).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("common_test/include/ct_event.hrl").
%% gen_event callbacks
diff --git a/lib/common_test/test/erl2html2_SUITE.erl b/lib/common_test/test/erl2html2_SUITE.erl
new file mode 100644
index 0000000000..8e9f6e773a
--- /dev/null
+++ b/lib/common_test/test/erl2html2_SUITE.erl
@@ -0,0 +1,277 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(erl2html2_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+
+-define(HEADER,
+ ["<!DOCTYPE HTML PUBLIC",
+ "\"-//W3C//DTD HTML 3.2 Final//EN\">\n",
+ "<!-- autogenerated by 'erl2html2' -->\n",
+ "<html>\n",
+ "<head><title>Module ", Src, "</title>\n",
+ "<meta http-equiv=\"cache-control\" ",
+ "content=\"no-cache\"></meta>\n",
+ "</head>\n",
+ "<body bgcolor=\"white\" text=\"black\" ",
+ "link=\"blue\" vlink=\"purple\" alink=\"red\">\n"]).
+
+%%--------------------------------------------------------------------
+%% @spec suite() -> Info
+%% Info = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[ts_install_cth,test_server_test_lib]}].
+
+%%--------------------------------------------------------------------
+%% @spec init_per_suite(Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_suite(Config0) -> void() | {save_config,Config1}
+%% Config0 = Config1 = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+end_per_suite(_Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec init_per_group(GroupName, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_group(_GroupName, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_group(GroupName, Config0) ->
+%% void() | {save_config,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+end_per_group(_GroupName, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1} | {fail,Reason}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec groups() -> [Group]
+%% Group = {GroupName,Properties,GroupsAndTestCases}
+%% GroupName = atom()
+%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+%% TestCase = atom()
+%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+%% repeat_until_any_ok | repeat_until_any_fail
+%% N = integer() | forever
+%% @end
+%%--------------------------------------------------------------------
+groups() ->
+ [].
+
+%%--------------------------------------------------------------------
+%% @spec all() -> GroupsAndTestCases | {skip,Reason}
+%% GroupsAndTestCases = [{group,GroupName} | TestCase]
+%% GroupName = atom()
+%% TestCase = atom()
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+all() ->
+ [macros_defined, macros_undefined].
+
+%%--------------------------------------------------------------------
+%% @spec TestCase(Config0) ->
+%% ok | exit() | {skip,Reason} | {comment,Comment} |
+%% {save_config,Config1} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% Comment = term()
+%% @end
+%%--------------------------------------------------------------------
+macros_defined(Config) ->
+ %% let erl2html2 use epp as parser
+ DataDir = ?config(data_dir,Config),
+ InclDir = filename:join(DataDir, "include"),
+ {Src,Dst} = convert_module("m1",[InclDir],Config),
+ {true,L} = check_line_numbers(Src,Dst),
+ ok = check_link_targets(Src,Dst,L,[{baz,0}],[]),
+ ok.
+
+macros_undefined(Config) ->
+ %% let erl2html2 use epp_dodger as parser
+ {Src,Dst} = convert_module("m1",[],Config),
+ {true,L} = check_line_numbers(Src,Dst),
+ ok = check_link_targets(Src,Dst,L,[{baz,0}],[{quux,0}]),
+ ok.
+
+convert_module(Mod,InclDirs,Config) ->
+ DataDir = ?config(data_dir,Config),
+ PrivDir = ?config(priv_dir,Config),
+ Src = filename:join(DataDir,Mod++".erl"),
+ Dst = filename:join(PrivDir,Mod++".erl.html"),
+ io:format("<a href=\"~s\">~s</a>\n",[Src,filename:basename(Src)]),
+ ok = erl2html2:convert(Src, Dst, InclDirs, "<html><body>"),
+ io:format("<a href=\"~s\">~s</a>\n",[Dst,filename:basename(Dst)]),
+ {Src,Dst}.
+
+%% Check that there are the same number of lines in each file, and
+%% that all line numbers are displayed in the dst file.
+check_line_numbers(Src,Dst) ->
+ {ok,SFd} = file:open(Src,[read]),
+ {ok,DFd} = file:open(Dst,[read]),
+ {ok,SN} = count_src_lines(SFd,0),
+ ok = file:close(SFd),
+ {ok,DN} = read_dst_line_numbers(DFd),
+ ok = file:close(DFd),
+ {SN == DN,SN}.
+
+count_src_lines(Fd,N) ->
+ case io:get_line(Fd,"") of
+ eof ->
+ {ok,N};
+ {error,Reason} ->
+ {error,Reason,N};
+ _Line ->
+ count_src_lines(Fd,N+1)
+ end.
+
+read_dst_line_numbers(Fd) ->
+ "<html><body><pre>\n" = io:get_line(Fd,""),
+ read_dst_line_numbers(Fd,0).
+read_dst_line_numbers(Fd,Last) when is_integer(Last) ->
+ case io:get_line(Fd,"") of
+ eof ->
+ {ok,Last};
+ {error,Reason} ->
+ {error,Reason,Last};
+ "</pre>"++_ ->
+ {ok,Last};
+ "</body>"++_ ->
+ {ok,Last};
+ Line ->
+ %% erlang:display(Line),
+ Num = check_line_number(Last,Line,Line),
+ read_dst_line_numbers(Fd,Num)
+ end.
+
+check_line_number(Last,Line,OrigLine) ->
+ case Line of
+ "<a name="++_ ->
+ [$>|Rest] = lists:dropwhile(fun($>) -> false; (_) -> true end,Line),
+ check_line_number(Last,Rest,OrigLine);
+ _ ->
+ [N |_] = string:tokens(Line,":"),
+% erlang:display(N),
+ Num =
+ try list_to_integer(string:strip(N))
+ catch _:_ -> ct:fail({no_line_number_after,Last,OrigLine})
+ end,
+ if Num == Last+1 ->
+ Num;
+ true ->
+ ct:fail({unexpected_integer,Num,Last})
+ end
+ end.
+
+
+%% Check that there is one link target for each line and one for each
+%% function.
+%% The test module has -compile(export_all), so all functions are
+%% found by listing the exported ones.
+check_link_targets(Src,Dst,L,RmFncs,ShouldRemain) ->
+ Mod = list_to_atom(filename:basename(filename:rootname(Src))),
+ Exports = Mod:module_info(exports)--[{module_info,0},{module_info,1}|RmFncs],
+ LastExprFuncs = [Func || {Func,_A} <- Exports],
+ {ok,{FAs,Fs,L},_} =
+ xmerl_sax_parser:file(Dst,
+ [{event_fun,fun sax_event/3},
+ {event_state,{Exports,LastExprFuncs,0}}]),
+ true = (length(FAs) == length(ShouldRemain)),
+ [] = [FA || FA <- FAs, not lists:member(FA,ShouldRemain)],
+ [] = [F || F <- Fs, not lists:keymember(F,1,ShouldRemain)],
+ ok.
+
+sax_event(Event,_Loc,State) ->
+ sax_event(Event,State).
+
+sax_event({startElement,_Uri,"a",_QN,Attrs},{Exports,LastExprFuncs,PrevLine}) ->
+ {_,_,"name",Name} = lists:keyfind("name",3,Attrs),
+ case catch list_to_integer(Name) of
+ Line when is_integer(Line) ->
+ case PrevLine + 1 of
+ Line ->
+ {Exports,LastExprFuncs,Line};
+ Other ->
+ ct:fail({unexpected_line_number_target,Other})
+ end;
+ {'EXIT',_} ->
+ {match,[FStr,EndStr]} =
+ re:run(Name,"^(.*)-(last_expr|[0-9]+)$",
+ [{capture,all_but_first,list}]),
+ F = list_to_atom(http_uri:decode(FStr)),
+ case EndStr of
+ "last_expr" ->
+ true = lists:member(F,LastExprFuncs),
+ {Exports,lists:delete(F,LastExprFuncs),PrevLine};
+ _ ->
+ A = list_to_integer(EndStr),
+ A = proplists:get_value(F,Exports),
+ {lists:delete({F,A},Exports),LastExprFuncs,PrevLine}
+ end
+ end;
+sax_event(_,State) ->
+ State.
diff --git a/lib/common_test/test/erl2html2_SUITE_data/Makefile.src b/lib/common_test/test/erl2html2_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..942ac0584b
--- /dev/null
+++ b/lib/common_test/test/erl2html2_SUITE_data/Makefile.src
@@ -0,0 +1,2 @@
+all:
+ erlc -Iinclude m1.erl \ No newline at end of file
diff --git a/lib/common_test/test/erl2html2_SUITE_data/header1.hrl b/lib/common_test/test/erl2html2_SUITE_data/header1.hrl
new file mode 100644
index 0000000000..53d1b79ac5
--- /dev/null
+++ b/lib/common_test/test/erl2html2_SUITE_data/header1.hrl
@@ -0,0 +1,4 @@
+baz() ->
+ ok.
+
+-define(MACRO_DEFINING_A_FUNCTION,quux() -> ok).
diff --git a/lib/common_test/test/erl2html2_SUITE_data/include/header2.hrl b/lib/common_test/test/erl2html2_SUITE_data/include/header2.hrl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/common_test/test/erl2html2_SUITE_data/include/header2.hrl
diff --git a/lib/common_test/test/erl2html2_SUITE_data/include/header3.hrl b/lib/common_test/test/erl2html2_SUITE_data/include/header3.hrl
new file mode 100644
index 0000000000..2a20850a3a
--- /dev/null
+++ b/lib/common_test/test/erl2html2_SUITE_data/include/header3.hrl
@@ -0,0 +1 @@
+-define(EPP_SWITCH, on).
diff --git a/lib/common_test/test/erl2html2_SUITE_data/m1.erl b/lib/common_test/test/erl2html2_SUITE_data/m1.erl
new file mode 100644
index 0000000000..1d405963a5
--- /dev/null
+++ b/lib/common_test/test/erl2html2_SUITE_data/m1.erl
@@ -0,0 +1,52 @@
+%% Comment with <html> code &amp; </html>
+%% and also some "quotes" and 'single quotes'
+
+-module(m1).
+
+-compile(export_all).
+
+-include("header1.hrl").
+-include("header2.hrl").
+-include("header3.hrl").
+
+-define(MACRO1,value).
+
+%% This macro is used to select parser in erl2html2.
+%% If EPP_SWITCH is defined epp is used, else epp_dodger.
+epp_switch() ->
+ ?EPP_SWITCH.
+
+%%% Comment
+foo(x) ->
+ %% Comment
+ ok_x;
+foo(y) ->
+ %% Second clause
+ ok_y.
+
+'quoted_foo'() ->
+ ok.
+
+'quoted_foo_with_"_and_/'() ->
+ ok.
+
+'quoted_foo_with_(_and_)'() ->
+ ok.
+
+'quoted_foo_with_<_and_>'() ->
+ ok.
+
+bar() ->
+ do_something(),
+ok. % indentation error, OTP-9710
+
+%% Function inside macro definition
+?MACRO_DEFINING_A_FUNCTION.
+
+%% Two function one one line
+quuux() -> ok. quuuux() -> ok.
+
+%% do_something/0 does something
+do_something() ->
+ ?MACRO1.
+%% comments after last line
diff --git a/lib/common_test/test/test_server_SUITE.erl b/lib/common_test/test/test_server_SUITE.erl
new file mode 100644
index 0000000000..d1c789f34c
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE.erl
@@ -0,0 +1,449 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%%-------------------------------------------------------------------
+%%% @author Lukas Larsson <[email protected]>
+%%% @copyright (C) 2011, Erlang Solutions Ltd.
+%%% @doc
+%%%
+%%% @end
+%%% Created : 15 Feb 2011 by Lukas Larsson <[email protected]>
+%%%-------------------------------------------------------------------
+-module(test_server_SUITE).
+
+%% Note: This directive should only be used in test suites.
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include("test_server_test_lib.hrl").
+-include_lib("kernel/include/file.hrl").
+
+%%--------------------------------------------------------------------
+%% COMMON TEST CALLBACK FUNCTIONS
+%%--------------------------------------------------------------------
+
+%% @spec suite() -> Info
+suite() ->
+ [{ct_hooks,[ts_install_cth,test_server_test_lib]}].
+
+
+%% @spec init_per_suite(Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+init_per_suite(Config) ->
+ [{path_dirs,[proplists:get_value(data_dir,Config)]} | Config].
+
+%% @spec end_per_suite(Config) -> _
+end_per_suite(_Config) ->
+ io:format("TEST_SERVER_FRAMEWORK: ~p",[os:getenv("TEST_SERVER_FRAMEWORK")]),
+ ok.
+
+%% @spec init_per_group(GroupName, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+init_per_group(_GroupName, Config) ->
+ Config.
+
+%% @spec end_per_group(GroupName, Config0) ->
+%% void() | {save_config,Config1}
+end_per_group(_GroupName, _Config) ->
+ ok.
+
+%% @spec init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+%% @spec end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1} | {fail,Reason}
+end_per_testcase(test_server_unicode, _Config) ->
+ [_,Host] = string:tokens(atom_to_list(node()), "@"),
+ N1 = list_to_atom("test_server_tester_latin1" ++ "@" ++ Host),
+ N2 = list_to_atom("test_server_tester_utf8" ++ "@" ++ Host),
+ test_server:stop_node(N1),
+ test_server:stop_node(N2),
+ ok;
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%% @spec: groups() -> [Group]
+groups() ->
+ [].
+
+%% @spec all() -> GroupsAndTestCases | {skip,Reason}
+all() ->
+ [test_server_SUITE, test_server_parallel01_SUITE,
+ test_server_conf02_SUITE, test_server_conf01_SUITE,
+ test_server_skip_SUITE, test_server_shuffle01_SUITE,
+ test_server_break_SUITE, test_server_cover_SUITE,
+ test_server_unicode].
+
+
+%%--------------------------------------------------------------------
+%% TEST CASES
+%%--------------------------------------------------------------------
+%% @spec TestCase(Config0) ->
+%% ok | exit() | {skip,Reason} | {comment,Comment} |
+%% {save_config,Config1} | {skip_and_save,Reason,Config1}
+test_server_SUITE(Config) ->
+% rpc:call(Node,dbg, tracer,[]),
+% rpc:call(Node,dbg, p,[all,c]),
+% rpc:call(Node,dbg, tpl,[test_server_ctrl,x]),
+ run_test_server_tests("test_server_SUITE",
+ [{test_server_SUITE,skip_case7,"SKIPPED!"}],
+ 40, 1, 32, 21, 9, 1, 11, 2, 27, Config).
+
+test_server_parallel01_SUITE(Config) ->
+ run_test_server_tests("test_server_parallel01_SUITE", [],
+ 37, 0, 19, 19, 0, 0, 0, 0, 37, Config).
+
+test_server_shuffle01_SUITE(Config) ->
+ run_test_server_tests("test_server_shuffle01_SUITE", [],
+ 130, 0, 0, 76, 0, 0, 0, 0, 130, Config).
+
+test_server_skip_SUITE(Config) ->
+ run_test_server_tests("test_server_skip_SUITE", [],
+ 3, 0, 1, 0, 1, 0, 3, 0, 0, Config).
+
+test_server_conf01_SUITE(Config) ->
+ run_test_server_tests("test_server_conf01_SUITE", [],
+ 24, 0, 12, 12, 0, 0, 0, 0, 24, Config).
+
+test_server_conf02_SUITE(Config) ->
+ run_test_server_tests("test_server_conf02_SUITE", [],
+ 26, 0, 12, 12, 0, 0, 0, 0, 26, Config).
+
+test_server_break_SUITE(Config) ->
+ run_test_server_tests("test_server_break_SUITE", [],
+ 8, 2, 6, 4, 0, 0, 0, 2, 6, Config).
+
+test_server_cover_SUITE(Config) ->
+ case test_server:is_cover() of
+ true ->
+ {skip, "Cover already running"};
+ false ->
+ PrivDir = ?config(priv_dir,Config),
+
+ %% Test suite has two test cases
+ %% tc1 calls cover_helper:foo/0
+ %% tc2 calls cover_helper:bar/0
+ %% Each function in cover_helper is one line.
+ %%
+ %% First test run skips tc2, so only cover_helper:foo/0 is executed.
+ %% Cover file specifies to include cover_helper in this test run.
+ CoverFile1 = filename:join(PrivDir,"t1.cover"),
+ CoverSpec1 = {include,[cover_helper]},
+ file:write_file(CoverFile1,io_lib:format("~p.~n",[CoverSpec1])),
+ run_test_server_tests("test_server_cover_SUITE",
+ [{test_server_cover_SUITE,tc2,"SKIPPED!"}],
+ 4, 0, 2, 1, 1, 0, 1, 0, 3,
+ CoverFile1, Config),
+
+ %% Next test run skips tc1, so only cover_helper:bar/0 is executed.
+ %% Cover file specifies cross compilation of cover_helper
+ CoverFile2 = filename:join(PrivDir,"t2.cover"),
+ CoverSpec2 = {cross,[{t1,[cover_helper]}]},
+ file:write_file(CoverFile2,io_lib:format("~p.~n",[CoverSpec2])),
+ run_test_server_tests("test_server_cover_SUITE",
+ [{test_server_cover_SUITE,tc1,"SKIPPED!"}],
+ 4, 0, 2, 1, 1, 0, 1, 0, 3, CoverFile2, Config),
+
+ %% Cross cover analyse
+ WorkDir = ?config(work_dir,Config),
+ WC = filename:join([WorkDir,"test_server_cover_SUITE.logs","run.*"]),
+ [D2,D1|_] = lists:reverse(lists:sort(filelib:wildcard(WC))),
+ TagDirs = [{t1,D1},{t2,D2}],
+ test_server_ctrl:cross_cover_analyse(details,TagDirs),
+
+ %% Check that cover log shows only what is really included
+ %% in the test and cross cover log show the accumulated
+ %% result.
+ {ok,Cover1} = file:read_file(filename:join(D1,"cover.log")),
+ [{cover_helper,{1,1,_}}] = binary_to_term(Cover1),
+ {ok,Cover2} = file:read_file(filename:join(D2,"cover.log")),
+ [] = binary_to_term(Cover2),
+ {ok,Cross} = file:read_file(filename:join(D1,"cross_cover.log")),
+ [{cover_helper,{2,0,_}}] = binary_to_term(Cross),
+ ok
+ end.
+
+test_server_unicode(Config) ->
+ run_test_server_tests("test_server_unicode_SUITE", [],
+ 5, 0, 3, 3, 0, 0, 0, 0, 5, Config),
+
+ %% Create and run two test suites - one with filename and content
+ %% in latin1 (if the default filename mode is latin1) and one with
+ %% filename and content in utf8. Both have name and content
+ %% including letters äöå. Check that all logs are generated with
+ %% utf8 encoded filenames.
+ case file:native_name_encoding() of
+ utf8 ->
+ ok;
+ latin1 ->
+ generate_and_run_unicode_test(Config,latin1)
+ end,
+ generate_and_run_unicode_test(Config,utf8).
+
+%%%-----------------------------------------------------------------
+run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc,
+ NUsrSkip, NAutoSkip,
+ NActualSkip, NActualFail, NActualSucc, Config) ->
+ run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc,
+ NUsrSkip, NAutoSkip,
+ NActualSkip, NActualFail, NActualSucc, false, Config).
+
+run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc,
+ NUsrSkip, NAutoSkip,
+ NActualSkip, NActualFail, NActualSucc, Cover, Config) ->
+ Node = proplists:get_value(node, Config),
+ Encoding = rpc:call(Node,file,native_name_encoding,[]),
+ WorkDir = proplists:get_value(work_dir, Config),
+ LogDir = filename:join(WorkDir, SuiteName++".logs"),
+ LogDirUri = test_server_ctrl:uri_encode(LogDir, Encoding),
+ ct:log("<a href=\"file://~s\">Test case log files</a>\n", [LogDirUri]),
+
+ {ok,_Pid} = rpc:call(Node,test_server_ctrl, start, []),
+ case Cover of
+ false ->
+ ok;
+ _ ->
+ rpc:call(Node,test_server_ctrl,cover,[Cover,details])
+ end,
+ rpc:call(Node,
+ test_server_ctrl,add_dir_with_skip,
+ [SuiteName,
+ [proplists:get_value(data_dir,Config)],SuiteName,
+ Skip]),
+
+ until(fun() ->
+ rpc:call(Node,test_server_ctrl,jobs,[]) =:= []
+ end),
+
+ rpc:call(Node,test_server_ctrl, stop, []),
+
+ LogDir1 = translate_filename(LogDir,Encoding),
+ LastRunDir = get_latest_run_dir(LogDir1),
+ LastSuiteLog = filename:join(LastRunDir,"suite.log"),
+ {ok,Data} = test_server_test_lib:parse_suite(LastSuiteLog),
+ check([{"Number of cases",NCases,Data#suite.n_cases},
+ {"Number failed",NFail,Data#suite.n_cases_failed},
+ {"Number expected",NExpected,Data#suite.n_cases_expected},
+ {"Number successful",NSucc,Data#suite.n_cases_succ},
+ {"Number user skipped",NUsrSkip,Data#suite.n_cases_user_skip},
+ {"Number auto skipped",NAutoSkip,Data#suite.n_cases_auto_skip}], ok),
+ {NActualSkip,NActualFail,NActualSucc} =
+ lists:foldl(fun(#tc{ result = skip },{S,F,Su}) ->
+ {S+1,F,Su};
+ (#tc{ result = auto_skip },{S,F,Su}) ->
+ {S+1,F,Su};
+ (#tc{ result = ok },{S,F,Su}) ->
+ {S,F,Su+1};
+ (#tc{ result = failed },{S,F,Su}) ->
+ {S,F+1,Su}
+ end,{0,0,0},Data#suite.cases),
+ Data.
+
+translate_filename(Filename,EncodingOnTestNode) ->
+ case {file:native_name_encoding(),EncodingOnTestNode} of
+ {X,X} -> Filename;
+ {utf8,latin1} -> list_to_binary(Filename);
+ {latin1,utf8} -> unicode:characters_to_binary(Filename)
+ end.
+
+get_latest_run_dir(Dir) ->
+ %% For the time being, filelib:wildcard can not take a binary
+ %% argument, so we avoid using this here.
+ case file:list_dir(Dir) of
+ {ok,Files} ->
+ {ok,RE} = re:compile(<<"^run.[1-2][-_\.0-9]*$">>),
+ RunDirs = lists:filter(
+ fun(F) ->
+ L = l(F),
+ case re:run(F,RE) of
+ {match,[{0,L}]} -> true;
+ _ -> false
+ end
+ end, Files),
+ case RunDirs of
+ [] ->
+ Dir;
+ [H|T] ->
+ filename:join(Dir,get_latest_dir(T,H))
+ end;
+ _ ->
+ Dir
+ end.
+
+l(X) when is_binary(X) -> size(X);
+l(X) when is_list(X) -> length(X).
+
+get_latest_dir([H|T],Latest) when H>Latest ->
+ get_latest_dir(T,H);
+get_latest_dir([_|T],Latest) ->
+ get_latest_dir(T,Latest);
+get_latest_dir([],Latest) ->
+ Latest.
+
+check([{Str,Same,Same}|T], Status) ->
+ io:format("~s: ~p\n", [Str,Same]),
+ check(T, Status);
+check([{Str,Expected,Actual}|T], _) ->
+ io:format("~s: expected ~p, actual ~p\n", [Str,Expected,Actual]),
+ check(T, error);
+check([], ok) -> ok;
+check([], error) -> ?t:fail().
+
+until(Fun) ->
+ case Fun() of
+ true ->
+ ok;
+ false ->
+ timer:sleep(100),
+ until(Fun)
+ end.
+
+generate_and_run_unicode_test(Config0,Encoding) ->
+ DataDir = ?config(data_dir,Config0),
+ Suite = create_unicode_test_suite(DataDir,Encoding),
+
+ %% We can not run this test on default node since it must be
+ %% started with correct file name mode (+fnu/+fnl).
+ %% OBS: the node are stopped by end_per_testcase/2
+ Config1 = lists:keydelete(node,1,Config0),
+ Config2 = lists:keydelete(work_dir,1,Config1),
+ NodeName = list_to_atom("test_server_tester_" ++ atom_to_list(Encoding)),
+ Config = start_node(Config2,NodeName,erts_switch(Encoding)),
+
+ %% Compile the suite
+ Node = proplists:get_value(node,Config),
+ {ok,Mod} = rpc:call(Node,compile,file,[Suite,[report,{outdir,DataDir}]]),
+ ModStr = atom_to_list(Mod),
+
+ %% Clean logdir
+ LogDir0 = filename:join(DataDir,ModStr++".logs"),
+ LogDir = translate_filename(LogDir0,Encoding),
+ rm_dir(LogDir),
+
+ %% Run the test
+ run_test_server_tests(ModStr, [], 3, 0, 1, 1, 0, 0, 0, 0, 3, Config),
+
+ %% Check that all logs are created with utf8 encoded filenames
+ true = filelib:is_dir(LogDir),
+
+ RunDir = get_latest_run_dir(LogDir),
+ true = filelib:is_dir(RunDir),
+
+ LowerModStr = string:to_lower(ModStr),
+ SuiteHtml = translate_filename(LowerModStr++".src.html",Encoding),
+ true = filelib:is_regular(filename:join(RunDir,SuiteHtml)),
+
+ TCLog = translate_filename(LowerModStr++".tc_äöå.html",Encoding),
+ true = filelib:is_regular(filename:join(RunDir,TCLog)),
+ ok.
+
+%% Same as test_server_test_lib:start_slave, but starts a peer with
+%% additional arguments.
+%% The reason for this is that we need to start nodes with +fnu/+fnl,
+%% and that will not work well with a slave node since slave nodes run
+%% remote file system on master - i.e. they will use same file name
+%% mode as the master.
+start_node(Config,Name,Args) ->
+ [_,Host] = string:tokens(atom_to_list(node()), "@"),
+ ct:log("Trying to start ~w@~s~n",[Name,Host]),
+ case test_server:start_node(Name, peer, [{args,Args}]) of
+ {error,Reason} ->
+ test_server:fail(Reason);
+ {ok,Node} ->
+ ct:log("Node ~p started~n", [Node]),
+ test_server_test_lib:prepare_tester_node(Node,Config)
+ end.
+
+create_unicode_test_suite(Dir,Encoding) ->
+ ModStr = "test_server_"++atom_to_list(Encoding)++"_äöå_SUITE",
+ File = filename:join(Dir,ModStr++".erl"),
+ Suite =
+ ["%% -*- ",epp:encoding_to_string(Encoding)," -*-\n",
+ "-module(",ModStr,").\n"
+ "\n"
+ "-export([all/1, init_per_suite/1, end_per_suite/1]).\n"
+ "-export([init_per_testcase/2, end_per_testcase/2]).\n"
+ "-export([tc_äöå/1]).\n"
+ "\n"
+ "-include_lib(\"common_test/include/ct.hrl\").\n"
+ "\n"
+ "all(suite) ->\n"
+ " [tc_äöå].\n"
+ "\n"
+ "init_per_suite(Config) ->\n"
+ " Config.\n"
+ "\n"
+ "end_per_suite(_Config) ->\n"
+ " ok.\n"
+ "\n"
+ "init_per_testcase(_Case,Config) ->\n"
+ " init_timetrap(500,Config).\n"
+ "\n"
+ "init_timetrap(T,Config) ->\n"
+ " Dog = ?t:timetrap(T),\n"
+ " [{watchdog, Dog}|Config].\n"
+ "\n"
+ "end_per_testcase(_Case,Config) ->\n"
+ " cancel_timetrap(Config).\n"
+ "\n"
+ "cancel_timetrap(Config) ->\n"
+ " Dog=?config(watchdog, Config),\n"
+ " ?t:timetrap_cancel(Dog),\n"
+ " ok.\n"
+ "\n"
+ "tc_äöå(Config) when is_list(Config) ->\n"
+ " true = filelib:is_dir(?config(priv_dir,Config)),\n"
+ " ok.\n"],
+ {ok,Fd} = file:open(raw_filename(File,Encoding),[write,{encoding,Encoding}]),
+ io:put_chars(Fd,Suite),
+ ok = file:close(Fd),
+ File.
+
+raw_filename(Name,latin1) -> list_to_binary(Name);
+raw_filename(Name,utf8) -> unicode:characters_to_binary(Name).
+
+rm_dir(Dir) ->
+ case file:list_dir(Dir) of
+ {error,enoent} ->
+ ok;
+ {ok,Files} ->
+ rm_files([filename:join(Dir, F) || F <- Files]),
+ file:del_dir(Dir)
+ end.
+
+rm_files([F | Fs]) ->
+ case file:read_file_info(F) of
+ {ok,#file_info{type=directory}} ->
+ rm_dir(F),
+ rm_files(Fs);
+ {ok,_Regular} ->
+ case file:delete(F) of
+ ok ->
+ rm_files(Fs);
+ {error,Errno} ->
+ exit({del_failed,F,Errno})
+ end
+ end;
+rm_files([]) ->
+ ok.
+
+erts_switch(latin1) -> "+fnl";
+erts_switch(utf8) -> "+fnu".
diff --git a/lib/common_test/test/test_server_SUITE_data/Makefile.src b/lib/common_test/test/test_server_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..5aeb035572
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/Makefile.src
@@ -0,0 +1,11 @@
+all:
+ erlc test_server_SUITE.erl
+ erlc test_server_parallel01_SUITE.erl
+ erlc test_server_conf01_SUITE.erl
+ erlc test_server_shuffle01_SUITE.erl
+ erlc test_server_conf02_SUITE.erl
+ erlc test_server_skip_SUITE.erl
+ erlc test_server_break_SUITE.erl
+ erlc test_server_cover_SUITE.erl
+ erlc +debug_info test_server_cover_SUITE_data/cover_helper.erl
+ erlc test_server_unicode_SUITE.erl
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_SUITE.erl
new file mode 100644
index 0000000000..c3d4315cb8
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_SUITE.erl
@@ -0,0 +1,514 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%------------------------------------------------------------------
+%%% Test Server self test.
+%%%------------------------------------------------------------------
+-module(test_server_SUITE).
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-export([all/1]).
+
+-export([init_per_suite/1, end_per_suite/1]).
+-export([init_per_testcase/2, end_per_testcase/2, fin_per_testcase/2]).
+-export([config/1, comment/1, timetrap/1, timetrap_cancel/1, multiply_timetrap/1,
+ init_per_s/1, init_per_tc/1, end_per_tc/1,
+ timeconv/1, msgs/1, capture/1, timecall/1,
+ do_times/1, do_times_mfa/1, do_times_fun/1,
+ skip_cases/1, skip_case1/1, skip_case2/1, skip_case3/1,
+ skip_case4/1, skip_case5/1, skip_case6/1, skip_case7/1,
+ skip_case8/1, skip_case9/1,
+ conf_init/1, check_new_conf/1, conf_cleanup/1,
+ check_old_conf/1, conf_init_fail/1, start_stop_node/1,
+ cleanup_nodes_init/1, check_survive_nodes/1, cleanup_nodes_fin/1,
+ commercial/1,
+ io_invalid_data/1, print_unexpected/1]).
+
+-export([dummy_function/0,dummy_function/1,doer/1]).
+
+all(doc) -> ["Test Server self test"];
+all(suite) ->
+ [config, comment, timetrap, timetrap_cancel, multiply_timetrap,
+ init_per_s, init_per_tc, end_per_tc,
+ timeconv, msgs, capture, timecall, do_times, skip_cases,
+ commercial, io_invalid_data, print_unexpected,
+ {conf, conf_init, [check_new_conf], conf_cleanup},
+ check_old_conf,
+ {conf, conf_init_fail,[conf_member_skip],conf_cleanup_skip},
+ start_stop_node,
+ {conf, cleanup_nodes_init,[check_survive_nodes],cleanup_nodes_fin},
+ config
+ ].
+
+
+init_per_suite(Config) ->
+ [{init_per_suite_var,ok}|Config].
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
+ Dog = ?t:timetrap(?t:minutes(2)),
+ Config1 = [{watchdog, Dog}|Config],
+ case Func of
+ init_per_tc ->
+ [{strange_var, 1}|Config1];
+ skip_case8 ->
+ {skipped, "This case should be noted as `Skipped'"};
+ skip_case9 ->
+ {skip, "This case should be noted as `Skipped'"};
+ _ ->
+ Config1
+ end;
+init_per_testcase(Func, Config) ->
+ io:format("Func:~p",[Func]),
+ io:format("Config:~p",[Config]),
+ ?t:fail("Arguments to init_per_testcase not correct").
+
+end_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog),
+ case Func of
+ end_per_tc -> io:format("CLEANUP => this test case is ok\n");
+ _Other -> ok
+ end;
+end_per_testcase(Func, Config) ->
+ io:format("Func:~p",[Func]),
+ io:format("Config:~p",[Config]),
+ ?t:fail("Arguments to end_per_testcase not correct").
+
+fin_per_testcase(Func, Config) ->
+ io:format("Func:~p",[Func]),
+ io:format("Config:~p",[Config]),
+ ?t:fail("fin_per_testcase/2 called, should have called end_per_testcase/2").
+
+
+config(suite) -> [];
+config(doc) -> ["Test that the Config variable is decent, ",
+ "and that the std config variables are correct ",
+ "(check that data/priv dir exists)."
+ "Also check that ?config macro works."];
+config(Config) when is_list(Config) ->
+ is_tuplelist(Config),
+ {value,{data_dir,Dd}}=lists:keysearch(data_dir,1,Config),
+ {value,{priv_dir,Dp}}=lists:keysearch(priv_dir,1,Config),
+ true=is_dir(Dd),
+ {ok, _Bin}=file:read_file(filename:join(Dd, "dummy_file")),
+ true=is_dir(Dp),
+
+ Dd = ?config(data_dir,Config),
+ Dp = ?config(priv_dir,Config),
+ ok;
+config(_Config) ->
+ ?t:fail("Config variable is not a list.").
+
+is_tuplelist([]) ->
+ true;
+is_tuplelist([{_A,_B}|Rest]) ->
+ is_tuplelist(Rest);
+is_tuplelist(_) ->
+ false.
+
+is_dir(Dir) ->
+ case file:read_file_info(Dir) of
+ {ok, #file_info{type=directory}} ->
+ true;
+ _ ->
+ false
+ end.
+
+comment(suite) -> [];
+comment(doc) -> ["Print a comment in the HTML log"];
+comment(Config) when is_list(Config) ->
+ ?t:comment("This comment should not occur in the HTML log because a later"
+ " comment shall overwrite it"),
+ ?t:comment("This comment is printed with the comment/1 function."
+ " It should occur in the HTML log").
+
+
+
+timetrap(suite) -> [];
+timetrap(doc) -> ["Test that timetrap works."];
+timetrap(Config) when is_list(Config) ->
+ TrapAfter = 3000,
+ Dog=?t:timetrap(TrapAfter),
+ process_flag(trap_exit, true),
+ TimeOut = TrapAfter * test_server:timetrap_scale_factor() + 1000,
+ receive
+ {'EXIT', Dog, {timetrap_timeout, _, _}} ->
+ ok;
+ {'EXIT', _OtherPid, {timetrap_timeout, _, _}} ->
+ ?t:fail("EXIT signal from wrong process")
+ after
+ TimeOut ->
+ ?t:fail("Timetrap is not working.")
+ end,
+ ?t:timetrap_cancel(Dog),
+ ok.
+
+
+timetrap_cancel(suite) -> [];
+timetrap_cancel(doc) -> ["Test that timetrap_cancel works."];
+timetrap_cancel(Config) when is_list(Config) ->
+ Dog=?t:timetrap(1000),
+ receive
+ after
+ 500 ->
+ ok
+ end,
+ ?t:timetrap_cancel(Dog),
+ receive
+ after 1000 ->
+ ok
+ end,
+ ok.
+
+multiply_timetrap(suite) -> [];
+multiply_timetrap(doc) -> ["Test multiply timetrap"];
+multiply_timetrap(Config) when is_list(Config) ->
+ %% This simulates the call to test_server_ctrl:multiply_timetraps/1:
+ put(test_server_multiply_timetraps,{2,true}),
+
+ Dog = ?t:timetrap(500),
+ timer:sleep(800),
+ ?t:timetrap_cancel(Dog),
+
+ %% Reset
+ put(test_server_multiply_timetraps,1),
+ ok.
+
+
+init_per_s(suite) -> [];
+init_per_s(doc) -> ["Test that a Config that is altered in ",
+ "init_per_suite gets through to the testcases."];
+init_per_s(Config) ->
+ %% Check that the config var sent from init_per_suite
+ %% really exists.
+ {value, {init_per_suite_var, ok}} =
+ lists:keysearch(init_per_suite_var,1,Config),
+
+ %% Check that the other variables still exist.
+ {value,{data_dir,_Dd}}=lists:keysearch(data_dir,1,Config),
+ {value,{priv_dir,_Dp}}=lists:keysearch(priv_dir,1,Config),
+ ok.
+
+init_per_tc(suite) -> [];
+init_per_tc(doc) -> ["Test that a Config that is altered in ",
+ "init_per_testcase gets through to the ",
+ "actual testcase."];
+init_per_tc(Config) ->
+ %% Check that the config var sent from init_per_testcase
+ %% really exists.
+ {value, {strange_var, 1}} = lists:keysearch(strange_var,1,Config),
+
+ %% Check that the other variables still exist.
+ {value,{data_dir,_Dd}}=lists:keysearch(data_dir,1,Config),
+ {value,{priv_dir,_Dp}}=lists:keysearch(priv_dir,1,Config),
+ ok.
+
+end_per_tc(suite) -> [];
+end_per_tc(doc) -> ["Test that end_per_testcase/2 is called even if"
+ " test case fails"];
+end_per_tc(Config) when is_list(Config) ->
+ ?t:fail("This case should fail! Check that \"CLEANUP\" is"
+ " printed in the minor log file.").
+
+
+timeconv(suite) -> [];
+timeconv(doc) -> ["Test that the time unit conversion functions ",
+ "works."];
+timeconv(Config) when is_list(Config) ->
+ Val=2,
+ Secs=Val*1000,
+ Mins=Secs*60,
+ Hrs=Mins*60,
+ Secs=?t:seconds(2),
+ Mins=?t:minutes(2),
+ Hrs=?t:hours(2),
+ ok.
+
+
+msgs(suite) -> [];
+msgs(doc) -> ["Tests the messages_get function."];
+msgs(Config) when is_list(Config) ->
+ self() ! {hej, du},
+ self() ! {lite, "data"},
+ self() ! en_atom,
+ [{hej, du}, {lite, "data"}, en_atom] = ?t:messages_get(),
+ ok.
+
+capture(suite) -> [];
+capture(doc) -> ["Test that the capture functions work properly."];
+capture(Config) when is_list(Config) ->
+ String1="abcedfghjiklmnopqrstuvwxyz",
+ String2="0123456789",
+ ?t:capture_start(),
+ io:format(String1),
+ [String1]=?t:capture_get(),
+ io:format(String2),
+ [String2]=?t:capture_get(),
+ ?t:capture_stop(),
+ []=?t:capture_get(),
+ io:format(String2),
+ []=?t:capture_get(),
+ ok.
+
+timecall(suite) -> [];
+timecall(doc) -> ["Tests that timed calls work."];
+timecall(Config) when is_list(Config) ->
+ {_Time1, liten_apa_e_oxo_farlig} = ?t:timecall(?MODULE, dummy_function, []),
+ {Time2, jag_ar_en_gorilla} = ?t:timecall(?MODULE, dummy_function, [gorilla]),
+ DTime=round(Time2),
+ if
+ DTime<1 ->
+ ?t:fail("Timecall reported a too low time.");
+ DTime==1 ->
+ ok;
+ DTime>1 ->
+ ?t:fail("Timecall reported a too high time.")
+ end,
+ ok.
+
+dummy_function() ->
+ liten_apa_e_oxo_farlig.
+dummy_function(gorilla) ->
+ receive after 1000 -> ok end,
+ jag_ar_en_gorilla.
+
+
+do_times(suite) -> [do_times_mfa, do_times_fun];
+do_times(doc) -> ["Test the do_times function."].
+
+do_times_mfa(suite) -> [];
+do_times_mfa(doc) -> ["Test the do_times function with M,F,A given."];
+do_times_mfa(Config) when is_list(Config) ->
+ ?t:do_times(100, ?MODULE, doer, [self()]),
+ 100=length(?t:messages_get()),
+ ok.
+
+do_times_fun(suite) -> [];
+do_times_fun(doc) -> ["Test the do_times function with fun given."];
+do_times_fun(Config) when is_list(Config) ->
+ Self = self(),
+ ?t:do_times(100, fun() -> doer(Self) end),
+ 100=length(?t:messages_get()),
+ ok.
+
+doer(From) ->
+ From ! a,
+ ok.
+
+skip_cases(doc) -> ["Test all possible ways to skip a test case."];
+skip_cases(suite) -> [skip_case1, skip_case2, skip_case3, skip_case4,
+ skip_case5, skip_case6, skip_case7, skip_case8,
+ skip_case9].
+
+skip_case1(suite) -> [];
+skip_case1(doc) -> ["Test that you can return {skipped, Reason},"
+ " and that Reason is in the comment field in the HTML log"];
+skip_case1(Config) when is_list(Config) ->
+ %% If this comment shows, the case failed!!
+ ?t:comment("ERROR: This case should have been noted as `Skipped'"),
+ %% The Reason in {skipped, Reason} should overwrite a 'comment'
+ {skipped, "This case should be noted as `Skipped'"}.
+
+skip_case2(suite) -> [];
+skip_case2(doc) -> ["Test that you can return {skipped, Reason},"
+ " and that Reason is in the comment field in the HTML log"];
+skip_case2(Config) when is_list(Config) ->
+ %% If this comment shows, the case failed!!
+ ?t:comment("ERROR: This case should have been noted as `Skipped'"),
+ %% The Reason in {skipped, Reason} should overwrite a 'comment'
+ exit({skipped, "This case should be noted as `Skipped'"}).
+
+skip_case3(suite) -> [];
+skip_case3(doc) -> ["Test that you can return {skip, Reason},"
+ " and that Reason is in the comment field in the HTML log"];
+skip_case3(Config) when is_list(Config) ->
+ %% If this comment shows, the case failed!!
+ ?t:comment("ERROR: This case should have been noted as `Skipped'"),
+ %% The Reason in {skip, Reason} should overwrite a 'comment'
+ {skip, "This case should be noted as `Skipped'"}.
+
+skip_case4(suite) -> [];
+skip_case4(doc) -> ["Test that you can return {skip, Reason},"
+ " and that Reason is in the comment field in the HTML log"];
+skip_case4(Config) when is_list(Config) ->
+ %% If this comment shows, the case failed!!
+ ?t:comment("ERROR: This case should have been noted as `Skipped'"),
+ %% The Reason in {skip, Reason} should overwrite a 'comment'
+ exit({skip, "This case should be noted as `Skipped'"}).
+
+skip_case5(suite) -> {skipped, "This case should be noted as `Skipped'"};
+skip_case5(doc) -> ["Test that you can return {skipped, Reason}"
+ " from the specification clause"].
+
+skip_case6(suite) -> {skip, "This case should be noted as `Skipped'"};
+skip_case6(doc) -> ["Test that you can return {skip, Reason}"
+ " from the specification clause"].
+
+skip_case7(suite) -> [];
+skip_case7(doc) -> ["Test that skip works from a test specification file"];
+skip_case7(Config) when is_list(Config) ->
+ %% This case shall be skipped by adding
+ %% {skip, {test_server_SUITE, skip_case7, Reason}}.
+ %% to the test specification file.
+ ?t:fail("This case should have been Skipped by the .spec file").
+
+skip_case8(suite) -> [];
+skip_case8(doc) -> ["Test that {skipped, Reason} works from"
+ " init_per_testcase/2"];
+skip_case8(Config) when is_list(Config) ->
+ %% This case shall be skipped by adding a specific clause to
+ %% returning {skipped, Reason} from init_per_testcase/2 for this case.
+ ?t:fail("This case should have been Skipped by init_per_testcase/2").
+
+skip_case9(suite) -> [];
+skip_case9(doc) -> ["Test that {skip, Reason} works from a init_per_testcase/2"];
+skip_case9(Config) when is_list(Config) ->
+ %% This case shall be skipped by adding a specific clause to
+ %% returning {skip, Reason} from init_per_testcase/2 for this case.
+ ?t:fail("This case should have been Skipped by init_per_testcase/2").
+
+conf_init(doc) -> ["Test successful conf case: Change Config parameter"];
+conf_init(Config) when is_list(Config) ->
+ [{conf_init_var,1389}|Config].
+
+check_new_conf(suite) -> [];
+check_new_conf(doc) -> ["Check that Config parameter changed by"
+ " conf_init is used"];
+check_new_conf(Config) when is_list(Config) ->
+ 1389 = ?config(conf_init_var,Config),
+ ok.
+
+conf_cleanup(doc) -> ["Test successful conf case: Restore Config parameter"];
+conf_cleanup(Config) when is_list(Config) ->
+ lists:keydelete(conf_init_var,1,Config).
+
+check_old_conf(suite) -> [];
+check_old_conf(doc) -> ["Test that the restored Config is used after a"
+ " conf cleanup"];
+check_old_conf(Config) when is_list(Config) ->
+ undefined = ?config(conf_init_var,Config),
+ ok.
+
+conf_init_fail(doc) -> ["Test that config members are skipped if"
+ " conf init function fails."];
+conf_init_fail(Config) when is_list(Config) ->
+ ?t:fail("This case should fail! Check that conf_member_skip and"
+ " conf_cleanup_skip are skipped.").
+
+
+
+start_stop_node(suite) -> [];
+start_stop_node(doc) -> ["Test start and stop of slave and peer nodes"];
+start_stop_node(Config) when is_list(Config) ->
+ {ok,Node2} = ?t:start_node(node2,peer,[]),
+ {error, _} = ?t:start_node(node2,peer,[{fail_on_error,false}]),
+ true = lists:member(Node2,nodes()),
+
+ {ok,Node3} = ?t:start_node(node3,slave,[]),
+ {error, _} = ?t:start_node(node3,slave,[]),
+ true = lists:member(Node3,nodes()),
+
+ {ok,Node4} = ?t:start_node(node4,peer,[{wait,false}]),
+ case lists:member(Node4,nodes()) of
+ true ->
+ ?t:comment("WARNING: Node started with {wait,false}"
+ " is up faster than expected...");
+ false ->
+ test_server:wait_for_node(Node4),
+ true = lists:member(Node4,nodes())
+ end,
+
+ true = ?t:stop_node(Node2),
+ false = lists:member(Node2,nodes()),
+
+ true = ?t:stop_node(Node3),
+ false = lists:member(Node3,nodes()),
+
+ true = ?t:stop_node(Node4),
+ false = lists:member(Node4,nodes()),
+ timer:sleep(2000),
+ false = ?t:stop_node(Node4),
+
+ ok.
+
+cleanup_nodes_init(doc) -> ["Test that nodes are terminated when test case"
+ " is finished unless {cleanup,false} is given."];
+cleanup_nodes_init(Config) when is_list(Config) ->
+ {ok,DieSlave} = ?t:start_node(die_slave, slave, []),
+ {ok,SurviveSlave} = ?t:start_node(survive_slave, slave, [{cleanup,false}]),
+ {ok,DiePeer} = ?t:start_node(die_peer, peer, []),
+ {ok,SurvivePeer} = ?t:start_node(survive_peer, peer, [{cleanup,false}]),
+ [{die_slave,DieSlave},
+ {survive_slave,SurviveSlave},
+ {die_peer,DiePeer},
+ {survive_peer,SurvivePeer} | Config].
+
+
+
+check_survive_nodes(suite) -> [];
+check_survive_nodes(doc) -> ["Test that nodes with {cleanup,false} survived"];
+check_survive_nodes(Config) when is_list(Config) ->
+ timer:sleep(1000),
+ false = lists:member(?config(die_slave,Config),nodes()),
+ true = lists:member(?config(survive_slave,Config),nodes()),
+ false = lists:member(?config(die_peer,Config),nodes()),
+ true = lists:member(?config(survive_peer,Config),nodes()),
+ ok.
+
+
+cleanup_nodes_fin(doc) -> ["Test that nodes started with {cleanup,false}"
+ " can be stopped"];
+cleanup_nodes_fin(Config) when is_list(Config) ->
+ Slave = ?config(survive_slave,Config),
+ Peer = ?config(survive_peer,Config),
+
+ true = ?t:stop_node(Slave),
+ false = lists:member(Slave,nodes()),
+ true = ?t:stop_node(Peer),
+ false = lists:member(Peer,nodes()),
+
+ C1 = lists:keydelete(die_slave,1,Config),
+ C2 = lists:keydelete(survive_slave,1,C1),
+ C3 = lists:keydelete(die_peer,1,C2),
+ lists:keydelete(survive_peer,1,C3).
+
+commercial(Config) when is_list(Config) ->
+ case ?t:is_commercial() of
+ false -> {comment,"Open-source build"};
+ true -> {comment,"Commercial build"}
+ end.
+
+io_invalid_data(Config) when is_list(Config) ->
+ ok = io:put_chars("valid: " ++ [42]),
+ %% OTP-10991 caused this to hang and produce a timetrap timeout:
+ {'EXIT',{badarg,_}} = (catch io:put_chars("invalid: " ++ [42.0])),
+ ok.
+
+print_unexpected(Config) when is_list(Config) ->
+ Str = "-x-x-x- test_server_SUITE:print_unexpected -> Unexpected data -x-x-x-",
+ test_server_io:print_unexpected(Str),
+ UnexpectedLog = filename:join(filename:dirname(?config(tc_logfile,Config)),
+ "unexpected_io.log.html"),
+ {ok,Bin} = file:read_file(UnexpectedLog),
+ match = re:run(Bin, Str, [global,{capture,none}]),
+ ok.
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_SUITE_data/dummy_file b/lib/common_test/test/test_server_SUITE_data/test_server_SUITE_data/dummy_file
new file mode 100644
index 0000000000..65c88fbd75
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_SUITE_data/dummy_file
@@ -0,0 +1 @@
+Dummy file. \ No newline at end of file
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_break_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_break_SUITE.erl
new file mode 100644
index 0000000000..171f83df0f
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_break_SUITE.erl
@@ -0,0 +1,149 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_break_SUITE).
+
+-export([all/1, init_per_suite/1, end_per_suite/1]).
+-export([init_per_testcase/2, end_per_testcase/2]).
+-export([break_in_init_tc/1,
+ break_in_tc/1,
+ break_in_end_tc/1,
+ break_in_end_tc_after_fail/1,
+ break_in_end_tc_after_abort/1,
+ check_all_breaks/1]).
+
+-include_lib("common_test/include/ct.hrl").
+
+all(suite) ->
+ [break_in_init_tc,
+ break_in_tc,
+ break_in_end_tc,
+ break_in_end_tc_after_fail,
+ break_in_end_tc_after_abort,
+ check_all_breaks]. %must be the last test - checks result of previous tests
+
+init_per_suite(Config) ->
+ spawn(fun break_and_continue_sup/0),
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(Case,Config) when Case==break_in_init_tc ->
+ Config1 = init_timetrap(500,Config),
+ break_and_check(Case),
+ Config1;
+init_per_testcase(Case,Config) when Case==check_all_breaks ->
+ init_timetrap({seconds,20},Config);
+init_per_testcase(_Case,Config) ->
+ init_timetrap(500,Config).
+
+init_timetrap(T,Config) ->
+ Dog = ?t:timetrap(T),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(Case,Config) when Case==break_in_end_tc;
+ Case==break_in_end_tc_after_fail;
+ Case==break_in_end_tc_after_abort ->
+ break_and_check(Case),
+ cancel_timetrap(Config);
+end_per_testcase(_Case,Config) ->
+ cancel_timetrap(Config).
+
+cancel_timetrap(Config) ->
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+
+break_in_init_tc(Config) when is_list(Config) ->
+ ok.
+
+break_in_tc(Config) when is_list(Config) ->
+ break_and_check(break_in_tc),
+ ok.
+
+break_in_end_tc(Config) when is_list(Config) ->
+ ok.
+
+break_in_end_tc_after_fail(Config) when is_list(Config) ->
+ ?t:fail(test_case_should_fail).
+
+break_in_end_tc_after_abort(Config) when is_list(Config) ->
+ ?t:adjusted_sleep(2000). % will cause a timetrap timeout
+
+%% This test case checks that all breaks in previous test cases was
+%% also continued, and that the break lasted as long as expected.
+%% The reason for this is that some of the breaks above are in
+%% end_per_testcase, and failures there will only produce a warning,
+%% not an error - so this is to catch the error for real.
+check_all_breaks(Config) when is_list(Config) ->
+ break_and_continue_sup ! {done,self()},
+ receive {Breaks,Continued} ->
+ check_all_breaks(Breaks,Continued)
+ end.
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+
+check_all_breaks([{From,Case,T,Start}|Breaks],[{From,End}|Continued]) ->
+ Diff = timer:now_diff(End,Start),
+ DiffSec = round(Diff/1000000),
+ TSec = round(T/1000000),
+ if DiffSec==TSec ->
+ ?t:format("Break in ~p successfully continued after ~p second(s)~n",
+ [Case,DiffSec]),
+ check_all_breaks(Breaks,Continued);
+ true ->
+ ?t:format("Faulty duration of break in ~p: continued after ~p second(s)~n",
+ [Case,DiffSec]),
+ ?t:fail({faulty_diff,Case,DiffSec,TSec})
+ end;
+check_all_breaks([],[]) ->
+ ok;
+check_all_breaks(Breaks,Continued) ->
+ %% This is probably a case of a missing continue - i.e. a break
+ %% has been started, but it was never continued.
+ ?t:fail({no_match_in_breaks_and_continued,Breaks,Continued}).
+
+break_and_check(Case) ->
+ break_and_continue_sup ! {break,Case,1000,self()},
+ ?t:break(atom_to_list(Case)),
+ break_and_continue_sup ! {continued,self()},
+ ok.
+
+break_and_continue_sup() ->
+ register(break_and_continue_sup,self()),
+ break_and_continue_loop([],[]).
+
+break_and_continue_loop(Breaks,Continued) ->
+ receive
+ {break,Case,T,From} ->
+ Start = now(),
+ {RealT,_} = timer:tc(?t,adjusted_sleep,[T]),
+ ?t:continue(),
+ break_and_continue_loop([{From,Case,RealT,Start}|Breaks],Continued);
+ {continued,From} ->
+ break_and_continue_loop(Breaks,[{From,now()}|Continued]);
+ {done,From} ->
+ From ! {lists:reverse(Breaks),lists:reverse(Continued)}
+ end.
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_conf01_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_conf01_SUITE.erl
new file mode 100644
index 0000000000..e4f40f6c03
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_conf01_SUITE.erl
@@ -0,0 +1,188 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%------------------------------------------------------------------
+%%% Test Server self test.
+%%%------------------------------------------------------------------
+-module(test_server_conf01_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all(doc) -> ["Test simple conf case structure, with and without nested cases"];
+all(suite) ->
+ [
+ {conf, conf1_init, [conf1_tc1, conf1_tc2], conf1_end},
+
+ {conf, [], conf2_init, [conf2_tc1, conf2_tc2], conf2_end},
+
+ {conf, conf3_init, [conf3_tc1,
+
+ {conf, [], conf4_init, [conf4_tc1, conf4_tc2], conf4_end},
+
+ conf3_tc2], conf3_end},
+
+ conf5
+ ].
+
+%%---------- conf cases ----------
+
+conf1_init(Config) when is_list(Config) ->
+ [{cc1,conf1}|Config].
+conf1_end(_Config) ->
+ ok.
+
+conf2_init(Config) when is_list(Config) ->
+ [{cc2,conf2}|Config].
+conf2_end(_Config) ->
+ ok.
+
+conf3_init(Config) when is_list(Config) ->
+ [{cc3,conf3}|Config].
+conf3_end(_Config) ->
+ ok.
+
+conf4_init(Config) when is_list(Config) ->
+ [{cc4,conf4}|Config].
+conf4_end(_Config) ->
+ ok.
+
+conf5_init(Config) when is_list(Config) ->
+ [{cc5,conf5}|Config].
+conf5_end(_Config) ->
+ ok.
+
+conf6_init(Config) when is_list(Config) ->
+ [{cc6,conf6}|Config].
+conf6_end(_Config) ->
+ ok.
+
+
+conf5(suite) -> % test specification
+ [{conf, conf5_init, [conf5_tc1,
+
+ {conf, [], conf6_init, [conf6_tc1, conf6_tc2], conf6_end},
+
+ conf5_tc2], conf5_end}].
+
+
+%%---------- test cases ----------
+
+conf1_tc1(Config) when is_list(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ conf1 = ?config(cc1,Config),
+ ok.
+conf1_tc2(Config) when is_list(Config) ->
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf1 = ?config(cc1,Config),
+ ok.
+
+conf2_tc1(Config) when is_list(Config) ->
+ undefined = ?config(cc1,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ conf2 = ?config(cc2,Config),
+ ok.
+conf2_tc2(Config) when is_list(Config) ->
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf2 = ?config(cc2,Config),
+ ok.
+
+conf3_tc1(Config) when is_list(Config) ->
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ conf3 = ?config(cc3,Config),
+ ok.
+conf3_tc2(Config) when is_list(Config) ->
+ conf3 = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ ok.
+
+conf4_tc1(Config) when is_list(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ conf3 = ?config(cc3,Config),
+ conf4 = ?config(cc4,Config),
+ ok.
+conf4_tc2(Config) when is_list(Config) ->
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf3 = ?config(cc3,Config),
+ conf4 = ?config(cc4,Config),
+ ok.
+
+conf5_tc1(Config) when is_list(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf5 = ?config(cc5,Config),
+ ok.
+conf5_tc2(Config) when is_list(Config) ->
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf5 = ?config(cc5,Config),
+ undefined = ?config(cc6,Config),
+ ok.
+
+conf6_tc1(Config) when is_list(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf5 = ?config(cc5,Config),
+ conf6 = ?config(cc6,Config),
+ ok.
+conf6_tc2(Config) when is_list(Config) ->
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf5 = ?config(cc5,Config),
+ conf6 = ?config(cc6,Config),
+ ok.
+
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_conf02_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_conf02_SUITE.erl
new file mode 100644
index 0000000000..1c6fe6dd0b
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_conf02_SUITE.erl
@@ -0,0 +1,295 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%------------------------------------------------------------------
+%%% Test Server self test.
+%%%------------------------------------------------------------------
+-module(test_server_conf02_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all(doc) -> ["Test simple conf case structure, with and without nested cases"];
+all(suite) ->
+ [
+ {conf, conf1_init, [conf1_tc1, conf1_tc2], conf1_end},
+
+ {conf, [], conf2_init, [conf2_tc1, conf2_tc2], conf2_end},
+
+ {conf, conf3_init, [conf3_tc1,
+
+ {conf, [], conf4_init, [conf4_tc1, conf4_tc2], conf4_end},
+
+ conf3_tc2], conf3_end},
+
+ conf5
+ ].
+
+
+%%---------- conf cases ----------
+
+init_per_suite(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ [{suite,init}|Config].
+end_per_suite(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ ok.
+
+init_per_testcase(TC=conf1_tc1, Config) ->
+ init = ?config(suite,Config),
+ [{tc11,TC}|Config];
+init_per_testcase(TC=conf1_tc2, Config) ->
+ [{tc12,TC}|Config];
+init_per_testcase(TC=conf2_tc1, Config) ->
+ [{tc21,TC}|Config];
+init_per_testcase(TC=conf2_tc2, Config) ->
+ [{tc22,TC}|Config];
+init_per_testcase(TC=conf3_tc1, Config) ->
+ [{tc31,TC}|Config];
+init_per_testcase(TC=conf3_tc2, Config) ->
+ [{tc32,TC}|Config];
+init_per_testcase(TC=conf4_tc1, Config) ->
+ [{tc41,TC}|Config];
+init_per_testcase(TC=conf4_tc2, Config) ->
+ [{tc42,TC}|Config];
+init_per_testcase(TC=conf5_tc1, Config) ->
+ [{tc51,TC}|Config];
+init_per_testcase(TC=conf5_tc2, Config) ->
+ [{tc52,TC}|Config];
+init_per_testcase(TC=conf6_tc1, Config) ->
+ [{tc61,TC}|Config];
+init_per_testcase(TC=conf6_tc2, Config) ->
+ init = ?config(suite,Config),
+ [{tc62,TC}|Config].
+
+end_per_testcase(TC=conf1_tc1, Config) ->
+ init = ?config(suite,Config),
+ TC = ?config(tc11,Config),
+ ok;
+end_per_testcase(TC=conf1_tc2, Config) ->
+ TC = ?config(tc12,Config),
+ ok;
+end_per_testcase(TC=conf2_tc1, Config) ->
+ TC = ?config(tc21,Config),
+ ok;
+end_per_testcase(TC=conf2_tc2, Config) ->
+ TC = ?config(tc22,Config),
+ ok;
+end_per_testcase(TC=conf3_tc1, Config) ->
+ TC = ?config(tc31,Config),
+ ok;
+end_per_testcase(TC=conf3_tc2, Config) ->
+ TC = ?config(tc32,Config),
+ ok;
+end_per_testcase(TC=conf4_tc1, Config) ->
+ TC = ?config(tc41,Config),
+ ok;
+end_per_testcase(TC=conf4_tc2, Config) ->
+ TC = ?config(tc42,Config),
+ ok;
+end_per_testcase(TC=conf5_tc1, Config) ->
+ TC = ?config(tc51,Config),
+ ok;
+end_per_testcase(TC=conf5_tc2, Config) ->
+ TC = ?config(tc52,Config),
+ ok;
+end_per_testcase(TC=conf6_tc1, Config) ->
+ TC = ?config(tc61,Config),
+ ok;
+end_per_testcase(TC=conf6_tc2, Config) ->
+ init = ?config(suite,Config),
+ TC = ?config(tc62,Config),
+ ok.
+
+conf1_init(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ [{cc1,conf1}|Config].
+conf1_end(_Config) ->
+ ok.
+
+conf2_init(Config) when is_list(Config) ->
+ [{cc2,conf2}|Config].
+conf2_end(_Config) ->
+ ok.
+
+conf3_init(Config) when is_list(Config) ->
+ [{cc3,conf3}|Config].
+conf3_end(_Config) ->
+ ok.
+
+conf4_init(Config) when is_list(Config) ->
+ [{cc4,conf4}|Config].
+conf4_end(_Config) ->
+ ok.
+
+conf5_init(Config) when is_list(Config) ->
+ [{cc5,conf5}|Config].
+conf5_end(_Config) ->
+ ok.
+
+conf6_init(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ [{cc6,conf6}|Config].
+conf6_end(_Config) ->
+ ok.
+
+conf5(suite) -> % test specification
+ [{conf, conf5_init, [conf5_tc1,
+
+ {conf, [], conf6_init, [conf6_tc1, conf6_tc2], conf6_end},
+
+ conf5_tc2], conf5_end}].
+
+%%---------- test cases ----------
+
+conf1_tc1(Config) when is_list(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ conf1 = ?config(cc1,Config),
+ conf1_tc1 = ?config(tc11,Config),
+ ok.
+conf1_tc2(Config) when is_list(Config) ->
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ conf1 = ?config(cc1,Config),
+ conf1_tc2 = ?config(tc12,Config),
+ ok.
+
+conf2_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ undefined = ?config(cc1,Config),
+ undefined = ?config(tc11,Config),
+ conf2 = ?config(cc2,Config),
+ conf2_tc1 = ?config(tc21,Config),
+ ok.
+conf2_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ conf2 = ?config(cc2,Config),
+ undefined = ?config(tc21,Config),
+ conf2_tc2 = ?config(tc22,Config),
+ ok.
+
+conf3_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(tc22,Config),
+ conf3 = ?config(cc3,Config),
+ conf3_tc1 = ?config(tc31,Config),
+ ok.
+conf3_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ conf3 = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ undefined = ?config(tc31,Config),
+ undefined = ?config(tc41,Config),
+ conf3_tc2 = ?config(tc32,Config),
+ ok.
+
+conf4_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ conf3 = ?config(cc3,Config),
+ conf4 = ?config(cc4,Config),
+ undefined = ?config(tc32,Config),
+ conf4_tc1 = ?config(tc41,Config),
+ ok.
+conf4_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf3 = ?config(cc3,Config),
+ conf4 = ?config(cc4,Config),
+ undefined = ?config(tc41,Config),
+ conf4_tc2 = ?config(tc42,Config),
+ ok.
+
+conf5_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf5 = ?config(cc5,Config),
+ undefined = ?config(tc42,Config),
+ conf5_tc1 = ?config(tc51,Config),
+ ok.
+conf5_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf5 = ?config(cc5,Config),
+ undefined = ?config(cc6,Config),
+ undefined = ?config(tc51,Config),
+ undefined = ?config(tc62,Config),
+ conf5_tc2 = ?config(tc52,Config),
+ ok.
+
+conf6_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf5 = ?config(cc5,Config),
+ conf6 = ?config(cc6,Config),
+ undefined = ?config(tc52,Config),
+ conf6_tc1 = ?config(tc61,Config),
+ ok.
+conf6_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf5 = ?config(cc5,Config),
+ conf6 = ?config(cc6,Config),
+ undefined = ?config(tc61,Config),
+ conf6_tc2 = ?config(tc62,Config),
+ ok.
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_cover_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_cover_SUITE.erl
new file mode 100644
index 0000000000..3371418980
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_cover_SUITE.erl
@@ -0,0 +1,59 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_cover_SUITE).
+
+-export([all/1, init_per_suite/1, end_per_suite/1]).
+-export([init_per_testcase/2, end_per_testcase/2]).
+-export([tc1/1, tc2/1]).
+
+-include_lib("common_test/include/ct.hrl").
+
+all(suite) ->
+ [tc1,tc2].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(_Case,Config) ->
+ Dog = ?t:timetrap({minutes,10}),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(_Case,Config) ->
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+tc1(Config) when is_list(Config) ->
+ cover_helper:foo(),
+ ok.
+
+tc2(Config) when is_list(Config) ->
+ cover_helper:bar(),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl b/lib/common_test/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl
new file mode 100644
index 0000000000..6c74eb4e8a
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl
@@ -0,0 +1,4 @@
+-module(cover_helper).
+-compile(export_all).
+foo() -> ok.
+bar() -> ok.
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_parallel01_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_parallel01_SUITE.erl
new file mode 100644
index 0000000000..ad639b585d
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_parallel01_SUITE.erl
@@ -0,0 +1,519 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%------------------------------------------------------------------
+%%% Test Server self test.
+%%%------------------------------------------------------------------
+-module(test_server_parallel01_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+%% -------------------------------------------------------------------
+%% Notes on parallel execution of test cases
+%% -------------------------------------------------------------------
+%%
+%% A group nested under a parallel group will start executing in
+%% parallel with previous (parallel) test cases (no matter what
+%% properties the nested group has). Test cases are however never
+%% executed in parallel with the start or end conf case of the same
+%% group! Because of this, the test_server_ctrl loop waits at
+%% the end conf of a group for all parallel cases to finish
+%% before the end conf case actually executes. This has the effect
+%% that it's only after a nested group has finished that any
+%% remaining parallel cases in the previous group get spawned (*).
+%% Example (all parallel cases):
+%%
+%% group1_init |---->
+%% group1_case1 | --------->
+%% group1_case2 | --------------------------------->
+%% group2_init | ---->
+%% group2_case1 | ------>
+%% group2_case2 | ---------->
+%% group2_end | --->
+%% group1_case3 (*)| ---->
+%% group1_case4 (*)| -->
+%% group1_end | --->
+%%
+
+all(doc) -> ["Test simple conf case structure, with and without nested cases"];
+all(suite) ->
+ [
+ {conf, [parallel], conf1_init, [conf1_tc1, conf1_tc2], conf1_end},
+
+ {conf, [parallel], conf2_init, [conf2_tc1, conf2_tc2], conf2_end},
+
+ {conf, [parallel], conf3_init, [conf3_tc1, conf3_tc1,
+
+ {conf, [],
+ conf4_init, [conf4_tc1, conf4_tc2], conf4_end},
+
+ conf3_tc2], conf3_end},
+
+ conf5,
+
+ {conf, [parallel], conf7_init, [conf7_tc1, conf7_tc1,
+
+ {conf, [parallel],
+ conf8_init, [conf8_tc1, conf8_tc2], conf8_end},
+
+ conf7_tc2], conf7_end}
+
+ ].
+
+
+%%---------- conf cases ----------
+
+init_per_suite(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ [{suite,init}|Config].
+end_per_suite(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ ok.
+
+init_per_testcase(TC=conf1_tc1, Config) ->
+ init = ?config(suite,Config),
+ [{tc11,TC}|Config];
+init_per_testcase(TC=conf1_tc2, Config) ->
+ [{tc12,TC}|Config];
+init_per_testcase(TC=conf2_tc1, Config) ->
+ [{tc21,TC}|Config];
+init_per_testcase(TC=conf2_tc2, Config) ->
+ [{tc22,TC}|Config];
+init_per_testcase(TC=conf3_tc1, Config) ->
+ [{tc31,TC}|Config];
+init_per_testcase(TC=conf3_tc2, Config) ->
+ [{tc32,TC}|Config];
+init_per_testcase(TC=conf4_tc1, Config) ->
+ [{tc41,TC}|Config];
+init_per_testcase(TC=conf4_tc2, Config) ->
+ [{tc42,TC}|Config];
+init_per_testcase(TC=conf5_tc1, Config) ->
+ [{tc51,TC}|Config];
+init_per_testcase(TC=conf5_tc2, Config) ->
+ [{tc52,TC}|Config];
+init_per_testcase(TC=conf6_tc1, Config) ->
+ [{tc61,TC}|Config];
+init_per_testcase(TC=conf6_tc2, Config) ->
+ init = ?config(suite,Config),
+ [{tc62,TC}|Config];
+init_per_testcase(TC=conf7_tc1, Config) ->
+ [{tc71,TC}|Config];
+init_per_testcase(TC=conf7_tc2, Config) ->
+ [{tc72,TC}|Config];
+init_per_testcase(TC=conf8_tc1, Config) ->
+ [{tc81,TC}|Config];
+init_per_testcase(TC=conf8_tc2, Config) ->
+ init = ?config(suite,Config),
+ [{tc82,TC}|Config].
+
+end_per_testcase(TC=conf1_tc1, Config) ->
+ init = ?config(suite,Config),
+ TC = ?config(tc11,Config),
+ ok;
+end_per_testcase(TC=conf1_tc2, Config) ->
+ TC = ?config(tc12,Config),
+ ok;
+end_per_testcase(TC=conf2_tc1, Config) ->
+ TC = ?config(tc21,Config),
+ ok;
+end_per_testcase(TC=conf2_tc2, Config) ->
+ TC = ?config(tc22,Config),
+ ok;
+end_per_testcase(TC=conf3_tc1, Config) ->
+ TC = ?config(tc31,Config),
+ ok;
+end_per_testcase(TC=conf3_tc2, Config) ->
+ TC = ?config(tc32,Config),
+ ok;
+end_per_testcase(TC=conf4_tc1, Config) ->
+ TC = ?config(tc41,Config),
+ ok;
+end_per_testcase(TC=conf4_tc2, Config) ->
+ TC = ?config(tc42,Config),
+ ok;
+end_per_testcase(TC=conf5_tc1, Config) ->
+ TC = ?config(tc51,Config),
+ ok;
+end_per_testcase(TC=conf5_tc2, Config) ->
+ TC = ?config(tc52,Config),
+ ok;
+end_per_testcase(TC=conf6_tc1, Config) ->
+ TC = ?config(tc61,Config),
+ ok;
+end_per_testcase(TC=conf6_tc2, Config) ->
+ init = ?config(suite,Config),
+ TC = ?config(tc62,Config),
+ ok;
+end_per_testcase(TC=conf7_tc1, Config) ->
+ TC = ?config(tc71,Config),
+ ok;
+end_per_testcase(TC=conf7_tc2, Config) ->
+ TC = ?config(tc72,Config),
+ ok;
+end_per_testcase(TC=conf8_tc1, Config) ->
+ TC = ?config(tc81,Config),
+ ok;
+end_per_testcase(TC=conf8_tc2, Config) ->
+ init = ?config(suite,Config),
+ TC = ?config(tc82,Config),
+ ok.
+
+conf1_init(Config) when is_list(Config) ->
+ test_server:comment(io_lib:format("~p",[now()])),
+ [parallel] = ?config(tc_group_properties,Config),
+ init = ?config(suite,Config),
+ [{t0,now()},{cc1,conf1}|Config].
+conf1_end(Config) ->
+ %% check 2s & 3s < 4s
+ Ms = timer:now_diff(now(),?config(t0,Config)),
+ test_server:comment(io_lib:format("~p",[now()])),
+ if Ms > 4000000 -> exit({bad_parallel_exec,Ms});
+ Ms < 3000000 -> exit({bad_parallel_exec,Ms});
+ true -> ok
+ end.
+
+conf2_init(Config) when is_list(Config) ->
+ test_server:comment(io_lib:format("~p",[now()])),
+ [parallel] = ?config(tc_group_properties,Config),
+ [{t0,now()},{cc2,conf2}|Config].
+conf2_end(Config) ->
+ %% check 3s & 2s < 4s
+ Ms = timer:now_diff(now(),?config(t0,Config)),
+ test_server:comment(io_lib:format("~p",[now()])),
+ if Ms > 4000000 -> exit({bad_parallel_exec,Ms});
+ Ms < 3000000 -> exit({bad_parallel_exec,Ms});
+ true -> ok
+ end.
+
+conf3_init(Config) when is_list(Config) ->
+ test_server:comment(io_lib:format("~p",[now()])),
+ [parallel] = ?config(tc_group_properties,Config),
+ [{t0,now()},{cc3,conf3}|Config].
+conf3_end(Config) ->
+ %% check 6s & 6s & (2s & 3s) & 1s = ~6s
+ Ms = timer:now_diff(now(),?config(t0,Config)),
+ test_server:comment(io_lib:format("~p",[now()])),
+ if Ms > 7000000 -> exit({bad_parallel_exec,Ms});
+ Ms < 6000000 -> exit({bad_parallel_exec,Ms});
+ true -> ok
+ end.
+
+conf4_init(Config) when is_list(Config) ->
+ test_server:comment(io_lib:format("~p",[now()])),
+ [] = ?config(tc_group_properties,Config),
+ [{t0,now()},{cc4,conf4}|Config].
+conf4_end(Config) ->
+ %% check 2s & 3s >= 5s
+ Ms = timer:now_diff(now(),?config(t0,Config)),
+ test_server:comment(io_lib:format("~p",[now()])),
+ if Ms > 6000000 -> exit({bad_parallel_exec,Ms});
+ Ms < 5000000 -> exit({bad_parallel_exec,Ms});
+ true -> ok
+ end.
+
+conf5_init(Config) when is_list(Config) ->
+ test_server:comment(io_lib:format("~p",[now()])),
+ [] = ?config(tc_group_properties,Config),
+ [{t0,now()},{cc5,conf5}|Config].
+conf5_end(Config) ->
+ %% check 1s & 1s & (3s & 2s) & 1s = ~6s
+ Ms = timer:now_diff(now(),?config(t0,Config)),
+ test_server:comment(io_lib:format("~p",[now()])),
+ if Ms > 7500000 -> exit({bad_parallel_exec,Ms});
+ Ms < 6000000 -> exit({bad_parallel_exec,Ms});
+ true -> ok
+ end.
+
+conf6_init(Config) when is_list(Config) ->
+ test_server:comment(io_lib:format("~p",[now()])),
+ [parallel] = ?config(tc_group_properties,Config),
+ init = ?config(suite,Config),
+ [{t0,now()},{cc6,conf6}|Config].
+conf6_end(Config) ->
+ %% check 3s & 2s < 5s
+ Ms = timer:now_diff(now(),?config(t0,Config)),
+ test_server:comment(io_lib:format("~p",[now()])),
+ if Ms > 4500000 -> exit({bad_parallel_exec,Ms});
+ Ms < 3000000 -> exit({bad_parallel_exec,Ms});
+ true -> ok
+ end.
+
+conf5(suite) -> % test specification
+ [{conf, conf5_init, [conf5_tc1, conf5_tc1,
+
+ {conf, [parallel], conf6_init, [conf6_tc1, conf6_tc2], conf6_end},
+
+ conf5_tc2], conf5_end}].
+
+conf7_init(Config) when is_list(Config) ->
+ test_server:comment(io_lib:format("~p",[now()])),
+ [parallel] = ?config(tc_group_properties,Config),
+ [{t0,now()},{cc7,conf7}|Config].
+conf7_end(Config) ->
+ %% check 1s & 1s & (2s & 2s) & 1s = ~3s
+ Ms = timer:now_diff(now(),?config(t0,Config)),
+ test_server:comment(io_lib:format("~p",[now()])),
+ if Ms > 4000000 -> exit({bad_parallel_exec,Ms});
+ Ms < 3000000 -> exit({bad_parallel_exec,Ms});
+ true -> ok
+ end.
+
+conf8_init(Config) when is_list(Config) ->
+ test_server:comment(io_lib:format("~p",[now()])),
+ [parallel] = ?config(tc_group_properties,Config),
+ init = ?config(suite,Config),
+ [{t0,now()},{cc8,conf8}|Config].
+conf8_end(Config) ->
+ %% check 2s & 2s < 4s
+ Ms = timer:now_diff(now(),?config(t0,Config)),
+ test_server:comment(io_lib:format("~p",[now()])),
+ if Ms > 3000000 -> exit({bad_parallel_exec,Ms});
+ Ms < 2000000 -> exit({bad_parallel_exec,Ms});
+ true -> ok
+ end.
+
+
+%%---------- test cases ----------
+
+conf1_tc1(Config) when is_list(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ conf1 = ?config(cc1,Config),
+ conf1_tc1 = ?config(tc11,Config),
+ timer:sleep(2000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+conf1_tc2(Config) when is_list(Config) ->
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ conf1 = ?config(cc1,Config),
+ conf1_tc2 = ?config(tc12,Config),
+ timer:sleep(3000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+
+conf2_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ undefined = ?config(cc1,Config),
+ undefined = ?config(tc11,Config),
+ conf2 = ?config(cc2,Config),
+ conf2_tc1 = ?config(tc21,Config),
+ timer:sleep(3000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+conf2_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ conf2 = ?config(cc2,Config),
+ undefined = ?config(tc21,Config),
+ conf2_tc2 = ?config(tc22,Config),
+ timer:sleep(2000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+
+conf3_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(tc22,Config),
+ conf3 = ?config(cc3,Config),
+ conf3_tc1 = ?config(tc31,Config),
+ timer:sleep(6000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+conf3_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ conf3 = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ undefined = ?config(tc31,Config),
+ undefined = ?config(tc41,Config),
+ conf3_tc2 = ?config(tc32,Config),
+ timer:sleep(1000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+
+conf4_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ conf3 = ?config(cc3,Config),
+ conf4 = ?config(cc4,Config),
+ undefined = ?config(tc32,Config),
+ conf4_tc1 = ?config(tc41,Config),
+ timer:sleep(2000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+conf4_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf3 = ?config(cc3,Config),
+ conf4 = ?config(cc4,Config),
+ undefined = ?config(tc41,Config),
+ conf4_tc2 = ?config(tc42,Config),
+ timer:sleep(3000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+
+conf5_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf5 = ?config(cc5,Config),
+ undefined = ?config(tc42,Config),
+ conf5_tc1 = ?config(tc51,Config),
+ timer:sleep(1000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+conf5_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf5 = ?config(cc5,Config),
+ undefined = ?config(cc6,Config),
+ undefined = ?config(tc51,Config),
+ undefined = ?config(tc62,Config),
+ conf5_tc2 = ?config(tc52,Config),
+ timer:sleep(1000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+
+conf6_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf5 = ?config(cc5,Config),
+ conf6 = ?config(cc6,Config),
+ undefined = ?config(tc52,Config),
+ conf6_tc1 = ?config(tc61,Config),
+ timer:sleep(3000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+conf6_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf5 = ?config(cc5,Config),
+ conf6 = ?config(cc6,Config),
+ undefined = ?config(tc61,Config),
+ conf6_tc2 = ?config(tc62,Config),
+ timer:sleep(2000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+
+conf7_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ undefined = ?config(cc5,Config),
+ undefined = ?config(cc6,Config),
+ conf7 = ?config(cc7,Config),
+ undefined = ?config(tc62,Config),
+ conf7_tc1 = ?config(tc71,Config),
+ timer:sleep(1000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+conf7_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf7 = ?config(cc7,Config),
+ undefined = ?config(cc8,Config),
+ undefined = ?config(tc71,Config),
+ undefined = ?config(tc82,Config),
+ conf7_tc2 = ?config(tc72,Config),
+ timer:sleep(1000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+
+conf8_tc1(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ undefined = ?config(cc5,Config),
+ undefined = ?config(cc6,Config),
+ conf7 = ?config(cc7,Config),
+ conf8 = ?config(cc8,Config),
+ undefined = ?config(tc72,Config),
+ conf8_tc1 = ?config(tc81,Config),
+ timer:sleep(2000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
+conf8_tc2(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf7 = ?config(cc7,Config),
+ conf8 = ?config(cc8,Config),
+ undefined = ?config(tc81,Config),
+ conf8_tc2 = ?config(tc82,Config),
+ timer:sleep(2000),
+ test_server:comment(io_lib:format("~p",[now()])),
+ ok.
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_shuffle01_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_shuffle01_SUITE.erl
new file mode 100644
index 0000000000..0f7118a810
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_shuffle01_SUITE.erl
@@ -0,0 +1,477 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%------------------------------------------------------------------
+%%% Test Server self test.
+%%%------------------------------------------------------------------
+-module(test_server_shuffle01_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all(doc) -> ["Test simple conf case structure, with and without nested cases"];
+all(suite) ->
+ [
+ {conf, [shuffle], conf1_init, [conf1_tc1, conf1_tc2, conf1_tc3], conf1_end},
+
+ {conf, [{shuffle,{1,2,3}}], conf2_init, [conf2_tc1, conf2_tc2, conf2_tc3], conf2_end},
+
+ {conf, [shuffle], conf3_init, [conf3_tc1, conf3_tc2, conf3_tc3,
+
+ {conf, [], conf4_init,
+ [conf4_tc1, conf4_tc2], conf4_end}],
+ conf3_end},
+
+ conf5,
+
+ {conf, [shuffle,{repeat,5},parallel], conf7_init, [conf7_tc1,
+
+ {conf, [{shuffle,{3,2,1}},{repeat,3}],
+ conf8_init, [conf8_tc1, conf8_tc2, conf8_tc3],
+ conf8_end},
+
+ conf7_tc2, conf7_tc3], conf7_end}
+
+ ].
+
+
+%%---------- conf cases ----------
+
+init_per_suite(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ [{suite,init}|Config].
+end_per_suite(Config) ->
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ ok.
+
+init_per_testcase(TC=conf1_tc1, Config) ->
+ init = ?config(suite,Config),
+ [{tc11,TC}|Config];
+init_per_testcase(TC=conf1_tc2, Config) ->
+ [{tc12,TC}|Config];
+init_per_testcase(TC=conf1_tc3, Config) ->
+ [{tc13,TC}|Config];
+init_per_testcase(TC=conf2_tc1, Config) ->
+ [{tc21,TC}|Config];
+init_per_testcase(TC=conf2_tc2, Config) ->
+ [{tc22,TC}|Config];
+init_per_testcase(TC=conf2_tc3, Config) ->
+ [{tc23,TC}|Config];
+init_per_testcase(TC=conf3_tc1, Config) ->
+ [{tc31,TC}|Config];
+init_per_testcase(TC=conf3_tc2, Config) ->
+ [{tc32,TC}|Config];
+init_per_testcase(TC=conf3_tc3, Config) ->
+ [{tc33,TC}|Config];
+init_per_testcase(TC=conf4_tc1, Config) ->
+ [{tc41,TC}|Config];
+init_per_testcase(TC=conf4_tc2, Config) ->
+ [{tc42,TC}|Config];
+init_per_testcase(TC=conf5_tc1, Config) ->
+ [{tc51,TC}|Config];
+init_per_testcase(TC=conf5_tc2, Config) ->
+ [{tc52,TC}|Config];
+init_per_testcase(TC=conf6_tc1, Config) ->
+ [{tc61,TC}|Config];
+init_per_testcase(TC=conf6_tc2, Config) ->
+ init = ?config(suite,Config),
+ [{tc62,TC}|Config];
+init_per_testcase(TC=conf6_tc3, Config) ->
+ [{tc63,TC}|Config];
+init_per_testcase(TC=conf7_tc1, Config) ->
+ [{tc71,TC}|Config];
+init_per_testcase(TC=conf7_tc2, Config) ->
+ [{tc72,TC}|Config];
+init_per_testcase(TC=conf7_tc3, Config) ->
+ [{tc73,TC}|Config];
+init_per_testcase(TC=conf8_tc1, Config) ->
+ [{tc81,TC}|Config];
+init_per_testcase(TC=conf8_tc2, Config) ->
+ init = ?config(suite,Config),
+ [{tc82,TC}|Config];
+init_per_testcase(TC=conf8_tc3, Config) ->
+ [{tc83,TC}|Config].
+
+end_per_testcase(TC=conf1_tc1, Config) ->
+ init = ?config(suite,Config),
+ TC = ?config(tc11,Config),
+ ok;
+end_per_testcase(TC=conf1_tc2, Config) ->
+ TC = ?config(tc12,Config),
+ ok;
+end_per_testcase(TC=conf1_tc3, Config) ->
+ TC = ?config(tc13,Config),
+ ok;
+end_per_testcase(TC=conf2_tc1, Config) ->
+ TC = ?config(tc21,Config),
+ ok;
+end_per_testcase(TC=conf2_tc2, Config) ->
+ TC = ?config(tc22,Config),
+ ok;
+end_per_testcase(TC=conf2_tc3, Config) ->
+ TC = ?config(tc23,Config),
+ ok;
+end_per_testcase(TC=conf3_tc1, Config) ->
+ TC = ?config(tc31,Config),
+ ok;
+end_per_testcase(TC=conf3_tc2, Config) ->
+ TC = ?config(tc32,Config),
+ ok;
+end_per_testcase(TC=conf3_tc3, Config) ->
+ TC = ?config(tc33,Config),
+ ok;
+end_per_testcase(TC=conf4_tc1, Config) ->
+ TC = ?config(tc41,Config),
+ ok;
+end_per_testcase(TC=conf4_tc2, Config) ->
+ TC = ?config(tc42,Config),
+ ok;
+end_per_testcase(TC=conf5_tc1, Config) ->
+ TC = ?config(tc51,Config),
+ ok;
+end_per_testcase(TC=conf5_tc2, Config) ->
+ TC = ?config(tc52,Config),
+ ok;
+end_per_testcase(TC=conf6_tc1, Config) ->
+ TC = ?config(tc61,Config),
+ ok;
+end_per_testcase(TC=conf6_tc2, Config) ->
+ init = ?config(suite,Config),
+ TC = ?config(tc62,Config),
+ ok;
+end_per_testcase(TC=conf6_tc3, Config) ->
+ TC = ?config(tc63,Config),
+ ok;
+end_per_testcase(TC=conf7_tc1, Config) ->
+ TC = ?config(tc71,Config),
+ ok;
+end_per_testcase(TC=conf7_tc2, Config) ->
+ TC = ?config(tc72,Config),
+ ok;
+end_per_testcase(TC=conf7_tc3, Config) ->
+ TC = ?config(tc73,Config),
+ ok;
+end_per_testcase(TC=conf8_tc1, Config) ->
+ TC = ?config(tc81,Config),
+ ok;
+end_per_testcase(TC=conf8_tc2, Config) ->
+ init = ?config(suite,Config),
+ TC = ?config(tc82,Config),
+ ok;
+end_per_testcase(TC=conf8_tc3, Config) ->
+ TC = ?config(tc83,Config),
+ ok.
+
+
+conf1_init(Config) when is_list(Config) ->
+ init = ?config(suite,Config),
+ [{shuffle,{_,_,_}}] = ?config(tc_group_properties,Config),
+ test_server:comment("Shuffle (random seed)"),
+ [{cc1,conf1}|Config].
+conf1_end(_Config) ->
+ ok.
+
+conf2_init(Config) when is_list(Config) ->
+ [{shuffle,{1,2,3}}] = ?config(tc_group_properties,Config),
+ test_server:comment("Shuffle (user seed)"),
+ [{cc2,conf2}|Config].
+conf2_end(_Config) ->
+ ok.
+
+conf3_init(Config) when is_list(Config) ->
+ [{shuffle,{_,_,_}}] = ?config(tc_group_properties,Config),
+ test_server:comment("Shuffle (random)"),
+ [{cc3,conf3}|Config].
+conf3_end(_Config) ->
+ ok.
+
+conf4_init(Config) when is_list(Config) ->
+ [] = ?config(tc_group_properties,Config),
+ test_server:comment("No shuffle"),
+ [{cc4,conf4}|Config].
+conf4_end(_Config) ->
+ ok.
+
+conf5_init(Config) when is_list(Config) ->
+ [] = ?config(tc_group_properties,Config),
+ test_server:comment("No shuffle"),
+ [{cc5,conf5}|Config].
+conf5_end(_Config) ->
+ ok.
+
+conf6_init(Config) when is_list(Config) ->
+ validate_shuffle(Config),
+ test_server:comment("Shuffle (random)"),
+ init = ?config(suite,Config),
+ [{cc6,conf6}|Config].
+conf6_end(_Config) ->
+ ok.
+
+conf5(suite) -> % test specification
+ [{conf, conf5_init, [conf5_tc1,
+
+ {conf, [shuffle], conf6_init,
+ [conf6_tc1, conf6_tc2, conf6_tc3],
+ conf6_end},
+
+ conf5_tc2], conf5_end}].
+
+conf7_init(Config) when is_list(Config) ->
+ test_server:comment("Group 7, Shuffle (random seed)"),
+ validate_shuffle(Config),
+ [{cc7,conf7}|Config].
+conf7_end(_Config) ->
+ ok.
+
+conf8_init(Config) when is_list(Config) ->
+ test_server:comment("Group 8, Shuffle (user start seed)"),
+ validate_shuffle(Config),
+ init = ?config(suite,Config),
+ [{cc8,conf8}|Config].
+conf8_end(_Config) ->
+ ok.
+
+validate_shuffle(Config) ->
+ case proplists:get_value(shuffle, ?config(tc_group_properties,Config)) of
+ {_,_,_} ->
+ ok;
+ Seed ->
+ %% Must be a valid seed.
+ _ = rand:seed_s(rand:export_seed_s(Seed))
+ end.
+
+
+%%---------- test cases ----------
+
+conf1_tc1(Config) when is_list(Config) ->
+ test_server:comment("Case 1"),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ conf1 = ?config(cc1,Config),
+ conf1_tc1 = ?config(tc11,Config),
+ ok.
+conf1_tc2(Config) when is_list(Config) ->
+ test_server:comment("Case 2"),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ init = ?config(suite,Config),
+ conf1 = ?config(cc1,Config),
+ conf1_tc2 = ?config(tc12,Config),
+ ok.
+conf1_tc3(suite) -> [];
+conf1_tc3(_Config) ->
+ test_server:comment("Case 3"),
+ ok.
+
+conf2_tc1(Config) when is_list(Config) ->
+ test_server:comment("Case 1"),
+ init = ?config(suite,Config),
+ undefined = ?config(cc1,Config),
+ conf2 = ?config(cc2,Config),
+ conf2_tc1 = ?config(tc21,Config),
+ ok.
+conf2_tc2(Config) when is_list(Config) ->
+ test_server:comment("Case 2"),
+ init = ?config(suite,Config),
+ conf2 = ?config(cc2,Config),
+ conf2_tc2 = ?config(tc22,Config),
+ ok.
+conf2_tc3(suite) -> [];
+conf2_tc3(_Config) ->
+ test_server:comment("Case 3"),
+ ok.
+
+conf3_tc1(Config) when is_list(Config) ->
+ test_server:comment("Case 1"),
+ init = ?config(suite,Config),
+ undefined = ?config(cc2,Config),
+ conf3 = ?config(cc3,Config),
+ conf3_tc1 = ?config(tc31,Config),
+ ok.
+conf3_tc2(Config) when is_list(Config) ->
+ test_server:comment("Case 2"),
+ init = ?config(suite,Config),
+ conf3 = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf3_tc2 = ?config(tc32,Config),
+ ok.
+conf3_tc3(suite) -> [];
+conf3_tc3(_Config) ->
+ test_server:comment("Case 3"),
+ ok.
+
+conf4_tc1(Config) when is_list(Config) ->
+ test_server:comment("Case 1"),
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ conf3 = ?config(cc3,Config),
+ conf4 = ?config(cc4,Config),
+ conf4_tc1 = ?config(tc41,Config),
+ ok.
+conf4_tc2(Config) when is_list(Config) ->
+ test_server:comment("Case 2"),
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf3 = ?config(cc3,Config),
+ conf4 = ?config(cc4,Config),
+ conf4_tc2 = ?config(tc42,Config),
+ ok.
+
+conf5_tc1(Config) when is_list(Config) ->
+ test_server:comment("Case 1"),
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf5 = ?config(cc5,Config),
+ conf5_tc1 = ?config(tc51,Config),
+ ok.
+conf5_tc2(Config) when is_list(Config) ->
+ test_server:comment("Case 2"),
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf5 = ?config(cc5,Config),
+ undefined = ?config(cc6,Config),
+ conf5_tc2 = ?config(tc52,Config),
+ ok.
+
+conf6_tc1(Config) when is_list(Config) ->
+ test_server:comment("Case 1"),
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ conf5 = ?config(cc5,Config),
+ conf6 = ?config(cc6,Config),
+ conf6_tc1 = ?config(tc61,Config),
+ ok.
+conf6_tc2(Config) when is_list(Config) ->
+ test_server:comment("Case 2"),
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf5 = ?config(cc5,Config),
+ conf6 = ?config(cc6,Config),
+ conf6_tc2 = ?config(tc62,Config),
+ ok.
+conf6_tc3(suite) -> [];
+conf6_tc3(_Config) ->
+ test_server:comment("Case 3"),
+ ok.
+
+conf7_tc1(Config) when is_list(Config) ->
+ test_server:comment("Case 1"),
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ undefined = ?config(cc5,Config),
+ undefined = ?config(cc6,Config),
+ conf7 = ?config(cc7,Config),
+ conf7_tc1 = ?config(tc71,Config),
+ ok.
+conf7_tc2(Config) when is_list(Config) ->
+ test_server:comment("Case 2"),
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf7 = ?config(cc7,Config),
+ undefined = ?config(cc8,Config),
+ conf7_tc2 = ?config(tc72,Config),
+ ok.
+conf7_tc3(suite) -> [];
+conf7_tc3(_Config) ->
+ test_server:comment("Case 3"),
+ ok.
+
+conf8_tc1(Config) when is_list(Config) ->
+ test_server:comment("Case 1"),
+ init = ?config(suite,Config),
+ case ?config(data_dir,Config) of
+ undefined -> exit(no_data_dir);
+ _ -> ok
+ end,
+ undefined = ?config(cc1,Config),
+ undefined = ?config(cc2,Config),
+ undefined = ?config(cc3,Config),
+ undefined = ?config(cc4,Config),
+ undefined = ?config(cc5,Config),
+ undefined = ?config(cc6,Config),
+ conf7 = ?config(cc7,Config),
+ conf8 = ?config(cc8,Config),
+ conf8_tc1 = ?config(tc81,Config),
+ ok.
+conf8_tc2(Config) when is_list(Config) ->
+ test_server:comment("Case 2"),
+ init = ?config(suite,Config),
+ case ?config(priv_dir,Config) of
+ undefined -> exit(no_priv_dir);
+ _ -> ok
+ end,
+ conf7 = ?config(cc7,Config),
+ conf8 = ?config(cc8,Config),
+ conf8_tc2 = ?config(tc82,Config),
+ ok.
+conf8_tc3(suite) -> [];
+conf8_tc3(_Config) ->
+ test_server:comment("Case 3"),
+ ok.
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_skip_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_skip_SUITE.erl
new file mode 100644
index 0000000000..ae2321c6ad
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_skip_SUITE.erl
@@ -0,0 +1,43 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2004-2011. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_skip_SUITE).
+
+-export([all/1, init_per_suite/1, end_per_suite/1]).
+-export([dummy/1]).
+
+-include_lib("common_test/include/ct.hrl").
+
+all(suite) ->
+ [dummy].
+
+init_per_suite(Config) when is_list(Config) ->
+ {skip,"Skipping init_per_suite - check that \'dummy\' and"
+ " \'end_per_suite\' are also skipped"}.
+
+dummy(suite) -> [];
+dummy(doc) -> ["This testcase should never be executed"];
+dummy(Config) when is_list(Config) ->
+ ?t:fail("This testcase should be executed since"
+ " init_per_suite/1 is skipped").
+
+end_per_suite(doc) -> ["This testcase should never be executed"];
+end_per_suite(Config) when is_list(Config) ->
+ ?t:fail("end_per_suite/1 should not be executed when"
+ " init_per_suite/1 is skipped").
diff --git a/lib/common_test/test/test_server_SUITE_data/test_server_unicode_SUITE.erl b/lib/common_test/test/test_server_SUITE_data/test_server_unicode_SUITE.erl
new file mode 100644
index 0000000000..0cabce995f
--- /dev/null
+++ b/lib/common_test/test/test_server_SUITE_data/test_server_unicode_SUITE.erl
@@ -0,0 +1,82 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_unicode_SUITE).
+
+-export([all/1, init_per_suite/1, end_per_suite/1]).
+-export([init_per_testcase/2, end_per_testcase/2]).
+-export(['#=@: difficult_case_name_äöå'/1,
+ print_and_log_unicode/1,
+ print_and_log_latin1/1]).
+
+-include_lib("common_test/include/ct.hrl").
+
+all(suite) ->
+ ['#=@: difficult_case_name_äöå',
+ print_and_log_unicode,
+ print_and_log_latin1].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(_Case,Config) ->
+ init_timetrap(500,Config).
+
+init_timetrap(T,Config) ->
+ Dog = ?t:timetrap(T),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(_Case,Config) ->
+ cancel_timetrap(Config).
+
+cancel_timetrap(Config) ->
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+
+'#=@: difficult_case_name_äöå'(Config) when is_list(Config) ->
+ ok.
+
+print_and_log_unicode(Config) when is_list(Config) ->
+ String = "שלום-שלום+של 日本語",
+ test_server:comment(String),
+ test_server:capture_start(),
+ io:format("String with ts: ~ts",[String]),
+ test_server:capture_stop(),
+ "String with ts: "++String = lists:flatten(test_server:capture_get()),
+ ok.
+
+print_and_log_latin1(Config) when is_list(Config) ->
+ String = "æøå",
+ test_server:comment(String),
+ test_server:capture_start(),
+ io:format("String with s: ~s",[String]),
+ io:format("String with ts: ~ts",[String]),
+ test_server:capture_stop(),
+ ["String with s: "++String,
+ "String with ts: "++String] =
+ [lists:flatten(L) || L<- test_server:capture_get()],
+ ok.
diff --git a/lib/common_test/test/test_server_test_lib.erl b/lib/common_test/test/test_server_test_lib.erl
new file mode 100644
index 0000000000..e2680938e0
--- /dev/null
+++ b/lib/common_test/test/test_server_test_lib.erl
@@ -0,0 +1,217 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_test_lib).
+
+-export([parse_suite/1]).
+-export([init/2, pre_init_per_testcase/3, post_end_per_testcase/4]).
+
+%% for test_server_SUITE when node can not be started as slave
+-export([prepare_tester_node/2]).
+
+-include("test_server_test_lib.hrl").
+
+%% The CTH hooks all tests
+init(_Id, _Opts) ->
+ [].
+
+pre_init_per_testcase(_TC,Config,State) ->
+ case os:type() of
+ {win32, _} ->
+ %% Extend timeout for windows as starting node
+ %% can take a long time there
+ test_server:timetrap( 120000 * test_server:timetrap_scale_factor());
+ _ ->
+ ok
+ end,
+ {start_slave(Config, 50),State}.
+
+start_slave(Config,_Level) ->
+ [_,Host] = string:tokens(atom_to_list(node()), "@"),
+
+ ct:log("Trying to start ~s~n",
+ ["test_server_tester@"++Host]),
+ case slave:start(Host, test_server_tester, []) of
+ {error,Reason} ->
+ test_server:fail(Reason);
+ {ok,Node} ->
+ ct:log("Node ~p started~n", [Node]),
+ IsCover = test_server:is_cover(),
+ if IsCover ->
+ cover:start(Node);
+ true->
+ ok
+ end,
+ prepare_tester_node(Node,Config)
+ end.
+
+prepare_tester_node(Node,Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ %% We would normally use priv_dir for temporary data,
+ %% but the pathnames gets too long on Windows.
+ %% Until the run-time system can support long pathnames,
+ %% use the data dir.
+ WorkDir = DataDir,
+
+ %% WorkDir as well as directory of Test Server suites
+ %% have to be in code path on Test Server node.
+ [_ | Parts] = lists:reverse(filename:split(DataDir)),
+ TSDir = filename:join(lists:reverse(Parts)),
+ AddPathDirs = case proplists:get_value(path_dirs, Config) of
+ undefined -> [];
+ Ds -> Ds
+ end,
+ PathDirs = [WorkDir,TSDir | AddPathDirs],
+ [true = rpc:call(Node, code, add_patha, [D]) || D <- PathDirs],
+ io:format("Dirs added to code path (on ~w):~n",
+ [Node]),
+ [io:format("~s~n", [D]) || D <- PathDirs],
+
+ true = rpc:call(Node, os, putenv,
+ ["TEST_SERVER_FRAMEWORK", "undefined"]),
+
+ ok = rpc:call(Node, file, set_cwd, [WorkDir]),
+ [{node,Node}, {work_dir,WorkDir} | Config].
+
+post_end_per_testcase(_TC, Config, Return, State) ->
+ Node = proplists:get_value(node, Config),
+ Cover = test_server:is_cover(),
+ if Cover-> cover:flush(Node);
+ true -> ok
+ end,
+ erlang:monitor_node(Node, true),
+ slave:stop(Node),
+ receive
+ {nodedown, Node} ->
+ if Cover -> cover:stop(Node);
+ true -> ok
+ end
+ after 5000 ->
+ erlang:monitor_node(Node, false),
+ receive {nodedown, Node} -> ok after 0 -> ok end %flush
+ end,
+ {Return, State}.
+
+%% Parse an .suite log file
+parse_suite(FileName) ->
+
+ case file:open(FileName, [read, raw, read_ahead]) of
+ {ok, Fd} ->
+ Data = parse_suite(Fd, #suite{ }),
+ file:close(Fd),
+ {ok, Data};
+ _ ->
+ error
+ end.
+
+fline(Fd) ->
+ case prim_file:read_line(Fd) of
+ eof -> eof;
+ {ok, Line} -> Line
+ end.
+
+parse_suite(Fd, S) ->
+ _Started = fline(Fd),
+ _Starting = fline(Fd),
+ "=cases" ++ NCases = fline(Fd),
+ "=user" ++ _User = fline(Fd),
+ "=host" ++ Host = fline(Fd),
+ "=hosts" ++ _Hosts = fline(Fd),
+ "=emulator_vsn" ++ Evsn = fline(Fd),
+ "=emulator" ++ Emu = fline(Fd),
+ "=otp_release" ++ OtpRel = fline(Fd),
+ "=started" ++ Start = fline(Fd),
+ NewS = parse_cases(Fd, S#suite{
+ n_cases_expected = list_to_int(clean(NCases)),
+ host = list_to_binary(clean(Host)),
+ emulator_vsn = list_to_binary(clean(Evsn)),
+ emulator = list_to_binary(clean(Emu)),
+ otp_release = list_to_binary(clean(OtpRel)),
+ started = list_to_binary(clean(Start))
+ }),
+ "=failed" ++ Failed = fline(Fd),
+ "=successful" ++ Succ = fline(Fd),
+ "=user_skipped" ++ UsrSkip = fline(Fd),
+ "=auto_skipped" ++ AutSkip = fline(Fd),
+ NewS#suite{ n_cases_failed = list_to_int(clean(Failed)),
+ n_cases_succ = list_to_int(clean(Succ)),
+ n_cases_user_skip = list_to_int(clean(UsrSkip)),
+ n_cases_auto_skip = list_to_int(clean(AutSkip)) }.
+
+
+parse_cases(Fd, #suite{ n_cases = N,
+ cases = Cases } = S) ->
+ case parse_case(Fd) of
+ finished -> S#suite{ log_ok = true };
+ {eof, Tc} ->
+ S#suite{ n_cases = N + 1,
+ cases = [Tc#tc{ result = crashed }|Cases]};
+ {ok, Case} ->
+ parse_cases(Fd, S#suite{ n_cases = N + 1,
+ cases = [Case|Cases]})
+ end.
+
+parse_case(Fd) -> parse_case(Fd, #tc{}).
+parse_case(Fd, Tc) -> parse_case(fline(Fd), Fd, Tc).
+
+parse_case(eof, _, Tc) -> {eof, Tc};
+parse_case("=case" ++ Case, Fd, Tc) ->
+ Name = list_to_binary(clean(Case)),
+ parse_case(fline(Fd), Fd, Tc#tc{ name = Name });
+parse_case("=logfile" ++ File, Fd, Tc) ->
+ Log = list_to_binary(clean(File)),
+ parse_case(fline(Fd), Fd, Tc#tc{ logfile = Log });
+parse_case("=elapsed" ++ Elapsed, Fd, Tc) ->
+ {ok, [Time], _} = io_lib:fread("~f", clean(Elapsed)),
+ parse_case(fline(Fd), Fd, Tc#tc{ elapsed = Time });
+parse_case("=result" ++ Result, _, Tc) ->
+ case clean(Result) of
+ "ok" ++ _ ->
+ {ok, Tc#tc{ result = ok } };
+ "failed" ++ _ ->
+ {ok, Tc#tc{ result = failed } };
+ "skipped" ++ _ ->
+ {ok, Tc#tc{ result = skip } };
+ "auto_skipped" ++ _ ->
+ {ok, Tc#tc{ result = auto_skip } }
+ end;
+parse_case("=finished" ++ _ , _Fd, #tc{ name = undefined }) ->
+ finished;
+parse_case(_, Fd, Tc) ->
+ parse_case(fline(Fd), Fd, Tc).
+
+skip([]) -> [];
+skip([$ |Ts]) -> skip(Ts);
+skip(Ts) -> Ts.
+
+%rmnl(L) -> L.
+rmnl([]) -> [];
+rmnl([$\n | Ts]) -> rmnl(Ts);
+rmnl([T|Ts]) -> [T | rmnl(Ts)].
+
+clean(L) ->
+ rmnl(skip(L)).
+
+list_to_int(L) ->
+ try
+ list_to_integer(L)
+ catch
+ _:_ ->
+ 0
+ end.
diff --git a/lib/common_test/test/test_server_test_lib.hrl b/lib/common_test/test/test_server_test_lib.hrl
new file mode 100644
index 0000000000..27b7be9618
--- /dev/null
+++ b/lib/common_test/test/test_server_test_lib.hrl
@@ -0,0 +1,23 @@
+-record(tc, {
+ name,
+ result,
+ elapsed,
+ logfile
+ }).
+
+-record(suite, {
+ application,
+ n_cases = 0,
+ n_cases_failed = 0,
+ n_cases_expected = 0,
+ n_cases_succ,
+ n_cases_user_skip,
+ n_cases_auto_skip,
+ cases = [],
+ host,
+ emulator_vsn,
+ emulator,
+ otp_release,
+ started,
+ log_ok = false
+ }).
diff --git a/lib/common_test/test_server/Makefile b/lib/common_test/test_server/Makefile
new file mode 100644
index 0000000000..0adf64b837
--- /dev/null
+++ b/lib/common_test/test_server/Makefile
@@ -0,0 +1,96 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 1996-2013. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# %CopyrightEnd%
+#
+
+include $(ERL_TOP)/make/target.mk
+
+# ----------------------------------------------------
+# Configuration info.
+# ----------------------------------------------------
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+
+# ----------------------------------------------------
+# Target Specs
+# ----------------------------------------------------
+
+EBIN=.
+
+TS_MODULES= \
+ ts \
+ ts_run \
+ ts_lib \
+ ts_make \
+ ts_erl_config \
+ ts_autoconf_win32 \
+ ts_install \
+ ts_install_cth \
+ ts_benchmark
+
+TARGET_MODULES= $(MODULES:%=$(EBIN)/%)
+TS_TARGET_MODULES= $(TS_MODULES:%=$(EBIN)/%)
+
+TS_ERL_FILES = $(TS_MODULES:=.erl)
+TS_HRL_FILES = ts.hrl
+AUTOCONF_FILES = configure.in conf_vars.in
+PROGRAMS = configure config.sub config.guess install-sh
+CONFIG = ts.config ts.unix.config ts.win32.config
+
+TS_TARGET_FILES = $(TS_MODULES:%=$(EBIN)/%.$(EMULATOR))
+
+TS_TARGETS = $(TS_MODULES:%=$(EBIN)/%.$(EMULATOR))
+
+# ----------------------------------------------------
+# FLAGS
+# ----------------------------------------------------
+ERL_COMPILE_FLAGS += -I../include -Werror
+
+# ----------------------------------------------------
+# Targets
+# ----------------------------------------------------
+
+tests debug opt: $(TS_TARGETS)
+
+clean:
+ rm -f $(TS_TARGET_FILES)
+ rm -f core
+
+docs:
+
+configure: configure.in
+ autoconf configure.in > configure
+
+# ----------------------------------------------------
+# Special Build Targets
+# ----------------------------------------------------
+
+# ----------------------------------------------------
+# Release Target
+# ----------------------------------------------------
+include $(ERL_TOP)/make/otp_release_targets.mk
+
+release_tests_spec: opt
+ $(INSTALL_DIR) "$(RELEASE_PATH)/test_server"
+ $(INSTALL_DATA) $(TS_ERL_FILES) $(TS_HRL_FILES) \
+ $(TS_TARGET_FILES) \
+ $(AUTOCONF_FILES) $(CONFIG) \
+ "$(RELEASE_PATH)/test_server"
+ $(INSTALL_SCRIPT) $(PROGRAMS) "$(RELEASE_PATH)/test_server"
+
+release_docs_spec:
+
diff --git a/lib/common_test/test_server/conf_vars.in b/lib/common_test/test_server/conf_vars.in
new file mode 100644
index 0000000000..7c55d7b9ed
--- /dev/null
+++ b/lib/common_test/test_server/conf_vars.in
@@ -0,0 +1,25 @@
+CC:@CC@
+LD:@LD@
+CFLAGS:@CFLAGS@
+EI_CFLAGS:@EI_CFLAGS@
+ERTS_CFLAGS:@ERTS_CFLAGS@
+CROSSLDFLAGS:@CROSSLDFLAGS@
+SHLIB_LD:@SHLIB_LD@
+SHLIB_LDFLAGS:@SHLIB_LDFLAGS@
+SHLIB_LDLIBS:@SHLIB_LDLIBS@
+SHLIB_CFLAGS:@SHLIB_CFLAGS@
+SHLIB_EXTRACT_ALL:@SHLIB_EXTRACT_ALL@
+dll:@SHLIB_SUFFIX@
+DEFS:@DEFS@
+ERTS_LIBS:@ERTS_LIBS@
+LIBS:@LIBS@
+target_host:@target_host@
+CPU:@host_cpu@
+os:@host_os@
+target:@host@
+obj:@obj@
+exe:@exe@
+SSLEAY_ROOT:@SSLEAY_ROOT@
+JAVAC:@JAVAC@
+make_command:@make_command@
+test_c_compiler:@test_c_compiler@
diff --git a/lib/common_test/test_server/configure.in b/lib/common_test/test_server/configure.in
new file mode 100644
index 0000000000..001de72a1e
--- /dev/null
+++ b/lib/common_test/test_server/configure.in
@@ -0,0 +1,509 @@
+dnl Process this file with autoconf to produce a configure script for Erlang.
+dnl
+dnl %CopyrightBegin%
+dnl
+dnl Copyright Ericsson AB 1997-2014. All Rights Reserved.
+dnl
+dnl Licensed under the Apache License, Version 2.0 (the "License");
+dnl you may not use this file except in compliance with the License.
+dnl You may obtain a copy of the License at
+dnl
+dnl http://www.apache.org/licenses/LICENSE-2.0
+dnl
+dnl Unless required by applicable law or agreed to in writing, software
+dnl distributed under the License is distributed on an "AS IS" BASIS,
+dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+dnl See the License for the specific language governing permissions and
+dnl limitations under the License.
+dnl
+dnl %CopyrightEnd%
+dnl
+
+AC_INIT(conf_vars.in)
+
+AC_CANONICAL_HOST
+
+dnl Checks for programs.
+AC_PROG_CC
+
+DEBUG_FLAGS="-g -DDEBUG"
+if test "$GCC" = yes; then
+ DEBUG_FLAGS="$DEBUG_FLAGS -Wall $CFLAGS"
+fi
+AC_SUBST(DEBUG_FLAGS)
+
+AC_ARG_ENABLE(debug-mode,
+[ --enable-debug-mode enable debug mode],
+[ case "$enableval" in
+ no) ;;
+ *) CFLAGS=$DEBUG_FLAGS ;;
+ esac ], )
+
+AC_ARG_ENABLE(m64-build,
+AS_HELP_STRING([--enable-m64-build],
+ [build 64-bit binaries using the -m64 flag to (g)cc]),
+[ case "$enableval" in
+ no) enable_m64_build=no ;;
+ *) enable_m64_build=yes ;;
+ esac
+],enable_m64_build=no)
+
+AC_ARG_ENABLE(m32-build,
+AS_HELP_STRING([--enable-m32-build],
+ [build 32-bit binaries using the -m32 flag to (g)cc]),
+[ case "$enableval" in
+ no) enable_m32_build=no ;;
+ *) enable_m32_build=yes ;;
+ esac
+],enable_m32_build=no)
+
+no_mXX_LDFLAGS="$LDFLAGS"
+
+if test X${enable_m64_build} = Xyes; then
+ CFLAGS="-m64 $CFLAGS"
+ LDFLAGS="-m64 $LDFLAGS"
+fi
+if test X${enable_m32_build} = Xyes; then
+ CFLAGS="-m32 $CFLAGS"
+ LDFLAGS="-m32 $LDFLAGS"
+fi
+
+AC_CHECK_LIB(m, sin)
+
+#--------------------------------------------------------------------
+# Interactive UNIX requires -linet instead of -lsocket, plus it
+# needs net/errno.h to define the socket-related error codes.
+#--------------------------------------------------------------------
+
+AC_CHECK_LIB(inet, main, [LIBS="$LIBS -linet"])
+AC_CHECK_HEADER(net/errno.h, AC_DEFINE(HAVE_NET_ERRNO_H))
+
+#--------------------------------------------------------------------
+# Linux/tcp.h may be needed for sockopt test in kernel
+#--------------------------------------------------------------------
+
+AC_CHECK_HEADER(linux/tcp.h, AC_DEFINE(HAVE_LINUX_TCP_H))
+AC_MSG_CHECKING(for sane linux/tcp.h)
+AC_TRY_COMPILE([#include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include <unistd.h>
+ #include <stdarg.h>
+ #include <sys/types.h>
+ #include <sys/socket.h>
+ #include <sys/wait.h>
+ #include <linux/tcp.h>
+ #include <netinet/in.h>
+ #include <netdb.h>],
+ [return 0;],
+ have_sane_linux_tcp_h=yes,
+ have_sane_linux_tcp_h=no)
+
+if test $have_sane_linux_tcp_h = yes; then
+ AC_DEFINE(HAVE_SANE_LINUX_TCP_H,[1],
+ [Define if we have sane linux/tcp.h])
+ AC_MSG_RESULT(yes)
+else
+ AC_MSG_RESULT(no)
+fi
+
+
+
+#--------------------------------------------------------------------
+# Linux requires sys/socketio.h instead of sys/sockio.h
+#--------------------------------------------------------------------
+AC_CHECK_HEADER(sys/socketio.h, AC_DEFINE(HAVE_SOCKETIO_H))
+
+
+#--------------------------------------------------------------------
+# Misc
+#--------------------------------------------------------------------
+AC_CHECK_HEADER(poll.h, AC_DEFINE(HAVE_POLL_H))
+
+#--------------------------------------------------------------------
+# The statements below define a collection of symbols related to
+# dynamic loading and shared libraries:
+#
+# SHLIB_CFLAGS - Flags to pass to cc when compiling the components
+# of a shared library (may request position-independent
+# code, among other things).
+# SHLIB_LD - Base command to use for combining object files
+# into a shared library.
+# SHLIB_SUFFIX - Suffix to use for the names of dynamically loadable
+# extensions. An empty string means we don't know how
+# to use shared libraries on this platform.
+#--------------------------------------------------------------------
+
+# Step 1: set the variable "system" to hold the name and version number
+# for the system.
+
+AC_MSG_CHECKING([system version (for dynamic loading)])
+system=`./config.sub $host`
+AC_MSG_RESULT($system)
+
+# Step 2: check for existence of -ldl library. This is needed because
+# Linux can use either -ldl or -ldld for dynamic loading.
+
+AC_CHECK_LIB(dl, dlopen, have_dl=yes, have_dl=no)
+
+# Step 3: set configuration options based on system name and version.
+
+SHLIB_LDLIBS=
+fullSrcDir=`cd $srcdir; pwd`
+case $system in
+ *-linux-*)
+ SHLIB_CFLAGS="-fPIC"
+ SHLIB_SUFFIX=".so"
+ if test "$have_dl" = yes; then
+ SHLIB_LD="${CC}"
+ SHLIB_LDFLAGS="$LDFLAGS -shared"
+ LD_FLAGS="-rdynamic"
+ else
+ AC_CHECK_HEADER(dld.h, [
+ SHLIB_LD="ld"
+ SHLIB_LDFLAGS="-shared"])
+ if test X${enable_m64_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers)
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
+ fi
+ SHLIB_EXTRACT_ALL=""
+ ;;
+ *-openbsd*)
+ # Not available on all versions: check for include file.
+ AC_CHECK_HEADER(dlfcn.h, [
+ SHLIB_CFLAGS="-fpic"
+ SHLIB_LD="${CC}"
+ SHLIB_LDFLAGS="$LDFLAGS -shared"
+ SHLIB_SUFFIX=".so"
+ if test X${enable_m64_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers)
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
+ ], [
+ # No dynamic loading.
+ SHLIB_CFLAGS=""
+ SHLIB_LD="ld"
+ SHLIB_LDFLAGS=""
+ SHLIB_SUFFIX=""
+ AC_MSG_ERROR(don't know how to compile and link dynamic drivers)
+ ])
+ SHLIB_EXTRACT_ALL=""
+ ;;
+ *-netbsd*|*-freebsd*|*-dragonfly*)
+ # Not available on all versions: check for include file.
+ AC_CHECK_HEADER(dlfcn.h, [
+ SHLIB_CFLAGS="-fpic"
+ SHLIB_LD="ld"
+ SHLIB_LDFLAGS="$LDFLAGS -Bshareable -x"
+ SHLIB_SUFFIX=".so"
+ if test X${enable_m64_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers)
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
+ ], [
+ # No dynamic loading.
+ SHLIB_CFLAGS=""
+ SHLIB_LD="ld"
+ SHLIB_LDFLAGS=""
+ SHLIB_SUFFIX=""
+ AC_MSG_ERROR(don't know how to compile and link dynamic drivers)
+ ])
+ SHLIB_EXTRACT_ALL=""
+ ;;
+ *-solaris2*|*-sysv4*)
+ SHLIB_CFLAGS="-KPIC"
+ SHLIB_LD="/usr/ccs/bin/ld"
+ SHLIB_LDFLAGS="$no_mXX_LDFLAGS -G -z text"
+ if test X${enable_m64_build} = Xyes; then
+ SHLIB_LDFLAGS="-64 $SHLIB_LDFLAGS"
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
+ SHLIB_SUFFIX=".so"
+ SHLIB_EXTRACT_ALL="-z allextract"
+ ;;
+ *darwin*)
+ SHLIB_CFLAGS="-fno-common"
+ SHLIB_LD="cc"
+ SHLIB_LDFLAGS="$LDFLAGS -bundle -flat_namespace -undefined suppress"
+ SHLIB_SUFFIX=".so"
+ SHLIB_EXTRACT_ALL=""
+ ;;
+ *osf1*)
+ SHLIB_CFLAGS="-fPIC"
+ SHLIB_LD="ld"
+ SHLIB_LDFLAGS="$LDFLAGS -shared"
+ if test X${enable_m64_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers)
+ fi
+ if test X${enable_m32_build} = Xyes; then
+ AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers)
+ fi
+ SHLIB_SUFFIX=".so"
+ SHLIB_EXTRACT_ALL=""
+ ;;
+ *osf5*)
+ SHLIB_CFLAGS="-fPIC"
+ SHLIB_LD="${CC} -shared"
+ SHLIB_LDFLAGS="$LDFLAGS"
+ SHLIB_SUFFIX=".so"
+ SHLIB_EXTRACT_ALL=""
+ ;;
+ *)
+ # No dynamic loading.
+ SHLIB_CFLAGS=""
+ SHLIB_LD="ld"
+ SHLIB_LDFLAGS=""
+ SHLIB_LDLIBS=""
+ SHLIB_SUFFIX=""
+ SHLIB_EXTRACT_ALL=""
+ AC_MSG_ERROR(don't know how to compile and link dynamic drivers)
+ ;;
+esac
+
+# If we're running gcc, then change the C flags for compiling shared
+# libraries to the right flags for gcc, instead of those for the
+# standard manufacturer compiler.
+
+if test "$CC" = "gcc" -o `$CC -v 2>&1 | grep -c gcc` != "0" ; then
+ case $system in
+ *-aix)
+ ;;
+ *-bsd*)
+ ;;
+ *-irix)
+ ;;
+ *-netbsd|*-freebsd|*-openbsd)
+ ;;
+ *-riscos)
+ ;;
+ *ultrix4.*)
+ ;;
+ *darwin*)
+ ;;
+ *)
+ SHLIB_CFLAGS="-fPIC"
+ ;;
+ esac
+fi
+
+# Make it possible for erl_interface to use it's own compiler options
+EI_CFLAGS="$CFLAGS"
+
+# Add thread-safety flags if requested
+AC_ARG_ENABLE(shlib-thread-safety,
+[ --enable-shlib-thread-safety enable thread safety for build shared libraries],
+[ case "$enableval" in
+ no) ;;
+ *) SHLIB_CFLAGS="$SHLIB_CFLAGS -D_THREAD_SAFE -D_REENTRANT"
+ CFLAGS="$CFLAGS -D_THREAD_SAFE -D_REENTRANT"
+ ;;
+ esac ], )
+
+SHLIB_CFLAGS="$SHLIB_CFLAGS $CFLAGS"
+
+
+AC_SUBST(CFLAGS)
+AC_SUBST(SHLIB_LD)
+AC_SUBST(SHLIB_LDFLAGS)
+AC_SUBST(SHLIB_LDLIBS)
+AC_SUBST(SHLIB_CFLAGS)
+AC_SUBST(SHLIB_SUFFIX)
+AC_SUBST(SHLIB_EXTRACT_ALL)
+AC_SUBST(EI_CFLAGS)
+
+#--------------------------------------------------------------------
+# Check for the existence of the -lsocket and -lnsl libraries.
+# The order here is important, so that they end up in the right
+# order in the command line generated by make. Here are some
+# special considerations:
+# 1. Use "connect" and "accept" to check for -lsocket, and
+# "gethostbyname" to check for -lnsl.
+# 2. Use each function name only once: can't redo a check because
+# autoconf caches the results of the last check and won't redo it.
+# 3. Use -lnsl and -lsocket only if they supply procedures that
+# aren't already present in the normal libraries. This is because
+# IRIX 5.2 has libraries, but they aren't needed and they're
+# bogus: they goof up name resolution if used.
+# 4. On some SVR4 systems, can't use -lsocket without -lnsl too.
+# To get around this problem, check for both libraries together
+# if -lsocket doesn't work by itself.
+#--------------------------------------------------------------------
+
+erl_checkBoth=0
+AC_CHECK_FUNC(connect, erl_checkSocket=0, erl_checkSocket=1)
+if test "$erl_checkSocket" = 1; then
+ AC_CHECK_LIB(socket, main, LIBS="$LIBS -lsocket", erl_checkBoth=1)
+fi
+if test "$erl_checkBoth" = 1; then
+ tk_oldLibs=$LIBS
+ LIBS="$LIBS -lsocket -lnsl"
+ AC_CHECK_FUNC(accept, erl_checkNsl=0, [LIBS=$tk_oldLibs])
+fi
+AC_CHECK_FUNC(gethostbyname, , AC_CHECK_LIB(nsl, main, [LIBS="$LIBS -lnsl"]))
+
+dnl Checks for library functions.
+AC_CHECK_FUNCS(strerror)
+AC_CHECK_FUNCS(vsnprintf)
+AC_CHECK_FUNCS(usleep)
+
+# First check if the library is available, then if we can choose between
+# two versions of gethostbyname
+AC_HAVE_LIBRARY(resolv)
+AC_CHECK_LIB(resolv, res_gethostbyname,[AC_DEFINE(HAVE_RES_GETHOSTBYNAME,1)])
+
+#--------------------------------------------------------------------
+# Check for isfinite
+#--------------------------------------------------------------------
+
+AC_MSG_CHECKING([for isfinite])
+AC_TRY_LINK([#include <math.h>],
+ [isfinite(0);], have_isfinite=yes, have_isfinite=no)
+
+if test $have_isfinite = yes; then
+ AC_DEFINE(HAVE_ISFINITE,1)
+ AC_MSG_RESULT(yes)
+else
+ AC_DEFINE(HAVE_FINITE,1)
+ AC_MSG_RESULT(no)
+fi
+
+#--------------------------------------------------------------------
+# Emulator compatible flags (for drivers)
+#--------------------------------------------------------------------
+
+ERTS_CFLAGS=$CFLAGS
+AC_SUBST(ERTS_CFLAGS)
+
+ERTS_LIBS=$LIBS
+AC_SUBST(ERTS_LIBS)
+
+#--------------------------------------------------------------------
+# Special compiler macro to handle cross compiling
+# (HCC) is used to compile tools run in the HOST environment
+#--------------------------------------------------------------------
+HCC='$(CC)'
+AC_SUBST(HCC)
+
+#--------------------------------------------------------------------
+# ld is used for linking on vxworks
+#--------------------------------------------------------------------
+LD='$(CC) $(CFLAGS)'
+AC_SUBST(LD)
+
+#--------------------------------------------------------------------
+# object file suffix
+#--------------------------------------------------------------------
+obj='.o'
+AC_SUBST(obj)
+
+#--------------------------------------------------------------------
+# executable file suffix
+#--------------------------------------------------------------------
+exe=''
+AC_SUBST(exe)
+
+#--------------------------------------------------------------------
+# flags when linking for cross platform targets (yet 'tis useful with
+# native builds)
+#--------------------------------------------------------------------
+CROSSLDFLAGS=''
+AC_SUBST(CROSSLDFLAGS)
+
+dnl
+dnl SSL and CRYPTO needs the library openSSL/ssleay
+dnl
+dnl Check flags --with-ssl, --without-ssl --with-ssl=PATH.
+dnl If no option is given or --with-ssl is set without a path then we
+dnl search for SSL libraries and header files in the standard locations.
+dnl If set to --without-ssl we disable the use of SSL
+dnl If set to --with-ssl=PATH we use that path as the prefix, i.e. we
+dnl use "PATH/include" and "PATH/lib".
+
+AC_SUBST(SSLEAY_ROOT)
+TARGET=$host
+
+# We search for SSL. First in the OTP team ClearCase standard location,
+# then in the common OS standard locations
+# No we do not.
+SSL_APP=ssl
+CRYPTO_APP=crypto
+SSLEAY_ROOT=$TARGET
+#for dir in /usr /usr/pkg /usr/local /usr/local/ssl /usr/lib/ssl /usr/ssl; do
+# AC_CHECK_HEADER($dir/include/openssl/opensslv.h,
+# ac_cv_openssl=yes, ac_cv_openssl=no)
+# if test $ac_cv_openssl = yes ; then
+# SSLEAY_ROOT="$dir"
+# ssl_found=yes
+# break
+# fi
+#done
+
+# Find a usable java compiler
+#
+# WARNING this code is copied from ERTS configure.in, and should be
+# updated if that code changes. I hate duplicating code, but what
+# can I do.
+#
+dnl ERL_TRY_LINK_JAVA(CLASSES, FUNCTION-BODY
+dnl [ACTION_IF_FOUND [, ACTION-IF-NOT-FOUND]])
+dnl Freely inspired by AC_TRY_LINK. (Maybe better to create a
+dnl AC_LANG_JAVA instead...)
+AC_DEFUN(ERL_TRY_LINK_JAVA,
+[java_link='$JAVAC conftest.java 1>&AC_FD_CC'
+changequote(�, �)dnl
+cat > conftest.java <<EOF
+�$1�
+class conftest { public static void main(String[] args) {
+ �$2�
+ ; return; }}
+EOF
+changequote([, ])dnl
+if AC_TRY_EVAL(java_link) && test -s conftest.class; then
+ ifelse([$3], , :, [rm -rf conftest*
+ $3])
+else
+ echo "configure: failed program was:" 1>&AC_FD_CC
+ cat conftest.java 1>&AC_FD_CC
+ echo "configure: PATH was $PATH" 1>&AC_FD_CC
+ifelse([$4], , , [ rm -rf conftest*
+ $4
+])dnl
+fi
+rm -f conftest*])
+dnl
+AC_CHECK_PROGS(JAVAC, javac guavac gcj jikes bock)
+if test -n "$JAVAC"; then
+ dnl Make sure it's at least JDK 1.5
+ AC_CACHE_CHECK(for JDK version 1.5,
+ ac_cv_prog_javac_ver_1_5,
+ [ERL_TRY_LINK_JAVA([], [for (String i : args);],
+ ac_cv_prog_javac_ver_1_5=yes, ac_cv_prog_javac_ver_1_5=no)])
+ if test $ac_cv_prog_javac_ver_1_5 = no; then
+ unset -v JAVAC
+ fi
+fi
+if test -n "$JAVAC"; then
+ AC_SUBST(JAVAC)
+ :
+fi
+
+AC_CHECK_PROGS([make_command], [make gmake], [false])
+AC_SUBST(make_command)
+
+if test "$GCC" = yes; then
+ test_c_compiler="{gnuc, undefined}"
+else
+ test_c_compiler="undefined"
+fi
+AC_SUBST(test_c_compiler)
+
+AC_OUTPUT(conf_vars)
diff --git a/lib/common_test/test_server/cross.cover b/lib/common_test/test_server/cross.cover
new file mode 100644
index 0000000000..07bf0bed5c
--- /dev/null
+++ b/lib/common_test/test_server/cross.cover
@@ -0,0 +1,20 @@
+%%% This is an -*- erlang -*- file.
+%%%
+%%% Elements in this file shall be on the form
+%%% {Application,Modules}.
+%%%
+%%% Application is the name of an application or the atom all.
+%%% Modules is a list of module names
+%%%
+%%% The Application shall include the listed Modules in its cover compilation,
+%%% but not in the cover analysis.
+%%% If Application=all it means that all application shall include the listed
+%%% Modules in the cover compilation.
+%%%
+%%% After all tests are completed, the listed modules are analysed with cover
+%%% data from all tests and the result is stored under the application where
+%%% the modules belong.
+
+{all,[]}.
+
+{observer,[dbg]}.
diff --git a/lib/common_test/test_server/ts.config b/lib/common_test/test_server/ts.config
new file mode 100644
index 0000000000..cf3d269616
--- /dev/null
+++ b/lib/common_test/test_server/ts.config
@@ -0,0 +1,46 @@
+%% -*- erlang -*-
+
+%%% Change these to suite the environment. See the inet_SUITE for info about
+%%% what they are used for.
+%%% test_hosts are looked up using "ypmatch xx yy zz hosts.byname"
+%{test_hosts,[my_ip4_host]}.
+
+%% IPv4 host only - no ipv6 entry must exist!
+%{test_host_ipv4_only,
+% {"my_ip4_host", %Short hostname
+% "my_ip4_host.mydomain.com", %Long hostname
+% "10.10.0.1", %IP string
+% {10,10,0,1}, %IP tuple
+% ["my_ip4_host"], %Any aliases
+% "::ffff:10.10.0.1", %IPv6 string (compatibility addr)
+% {0,0,0,0,0,65535,2570,1} %IPv6 tuple
+% }}.
+
+%{test_dummy_host, {"dummy",
+% "dummy.mydomain.com",
+% "192.168.0.1",
+% {192,168,0,1},
+% ["dummy"],
+% "::ffff:192.168.0.1",
+% {0,0,0,0,0,65535,49320,1}
+% }}.
+
+
+%%% test_hosts are looked up using "ypmatch xx yy zz ipnodes.byname"
+%{ipv6_hosts,[my_ip6_host]}.
+
+
+%{test_host_ipv6_only,
+% {"my_ip6_host", %Short hostname
+% "my_ip6_host.mydomain.com", %Long hostname
+% "::2eff:f2b0:1ea0", %IPv6 string
+% {0,0,0,0,0,12031,62128,7840}, %IPv6 tuple
+% ["my_ip6_host"] %Aliases.
+% }}.
+
+%{test_dummy_ipv6_host, {"dummy6",
+% "dummy6.mydomain.com",
+% "127::1",
+% {295,0,0,0,0,0,0,1},
+% ["dummy6-ip6"]
+% }}.
diff --git a/lib/common_test/test_server/ts.erl b/lib/common_test/test_server/ts.erl
new file mode 100644
index 0000000000..8bbdc8f8cf
--- /dev/null
+++ b/lib/common_test/test_server/ts.erl
@@ -0,0 +1,1019 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2014. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% File : ts.erl
+%%% Purpose : Frontend for running tests.
+%%%-------------------------------------------------------------------
+
+-module(ts).
+
+-export([cl_run/1,
+ run/0, run/1, run/2, run/3, run/4, run/5,
+ run_category/1, run_category/2, run_category/3,
+ tests/0, tests/1, suites/1, categories/1,
+ install/0, install/1,
+ estone/0, estone/1,
+ cross_cover_analyse/1,
+ compile_testcases/0, compile_testcases/1,
+ help/0]).
+
+%% Functions kept for backwards compatibility
+-export([bench/0, bench/1, bench/2, benchmarks/0,
+ smoke_test/0, smoke_test/1,smoke_test/2, smoke_tests/0]).
+
+-export([i/0, l/1, r/0, r/1, r/2, r/3]).
+
+%%%----------------------------------------------------------------------
+%%% This module, ts, is the interface to all of the functionality of
+%%% the TS framework. The picture below shows the relationship of
+%%% the modules:
+%%%
+%%% +-- ts_install --+------ ts_autoconf_win32
+%%% |
+%%% ts ---+ +------ ts_erl_config
+%%% | | ts_lib
+%%% +-- ts_run -----+------ ts_make
+%%% | | ts_filelib
+%%% | +------ ts_make_erl
+%%% |
+%%% +-- ts_benchmark
+%%%
+%%% The modules ts_lib and ts_filelib contains utilities used by
+%%% the other modules.
+%%%
+%%% Module Description
+%%% ------ -----------
+%%% ts Frontend to the test server framework. Contains all
+%%% interface functions.
+%%% ts_install Installs the test suite. On Unix, `autoconf' is
+%%% is used; on Windows, ts_autoconf_win32 is used.
+%%% The result is written to the file `variables'.
+%%% ts_run Supervises running of the tests.
+%%% ts_autconf_win32 An `autoconf' for Windows.
+%%% ts_autconf_cross_env `autoconf' for other platforms (cross environment)
+%%% ts_erl_config Finds out information about the Erlang system,
+%%% for instance the location of erl_interface.
+%%% This works for either an installed OTP or an Erlang
+%%% system running in a git repository/source tree.
+%%% ts_make Interface to run the `make' program on Unix
+%%% and other platforms.
+%%% ts_make_erl A corrected version of the standar Erlang module
+%%% make (used for rebuilding test suites).
+%%% ts_lib Miscellanous utility functions, each used by several
+%%% other modules.
+%%% ts_benchmark Supervises otp benchmarks and collects results.
+%%%----------------------------------------------------------------------
+
+-include_lib("kernel/include/file.hrl").
+-include("ts.hrl").
+
+-define(
+ install_help,
+ [
+ " ts:install()\n",
+ " Install ts with no options.\n",
+ "\n",
+ " ts:install(Options)\n",
+ " Install ts with a list of options, see below.\n",
+ "\n",
+ "Installation options supported:\n\n",
+ " {longnames, true} - Use fully qualified hostnames\n",
+ " {verbose, Level} - Sets verbosity level for TS output (0,1,2), 0 is\n"
+ " quiet(default).\n"
+ " {crossroot, ErlTop}\n"
+ " - Erlang root directory on build host, ~n"
+ " normally same value as $ERL_TOP\n"
+ " {crossenv, [{Key,Val}]}\n"
+ " - Environmentals used by test configure on build host\n"
+ " {crossflags, FlagsString}\n"
+ " - Flags used by test configure on build host\n"
+ " {xcomp, XCompFile}\n"
+ " - The xcomp file to use for cross compiling the~n"
+ " testcases. Using this option will override any~n"
+ " cross* configurations given to ts. Note that you~n"
+ " have to have a correct ERL_TOP as well.~n"
+ ]).
+
+help() ->
+ case filelib:is_file(?variables) of
+ false -> help(uninstalled);
+ true -> help(installed)
+ end.
+
+help(uninstalled) ->
+ H = ["ts is not yet installed. To install use:\n\n"],
+ show_help([H,?install_help]);
+help(installed) ->
+ H = ["\n",
+ "Run functions:\n\n",
+ " ts:run()\n",
+ " Run the tests for all apps. The tests are defined by the\n",
+ " main test specification for each app: ../App_test/App.spec.\n",
+ "\n",
+ " ts:run(Apps)\n",
+ " Apps = atom() | [atom()]\n",
+ " Run the tests for an app, or set of apps. The tests are\n",
+ " defined by the main test specification for each app:\n",
+ " ../App_test/App.spec.\n",
+ "\n",
+ " ts:run(App, Suites)\n",
+ " App = atom(), Suites = atom() | [atom()]\n",
+ " Run one or more test suites for App (i.e. modules named\n",
+ " *_SUITE.erl, located in ../App_test/).\n",
+ "\n",
+ " ts:run(App, Suite, TestCases)\n",
+ " App = atom(), Suite = atom(),\n",
+ " TestCases = TCs | {testcase,TCs}, TCs = atom() | [atom()]\n",
+ " Run one or more test cases (functions) in Suite.\n",
+ "\n",
+ " ts:run(App, Suite, {group,Groups})\n",
+ " App = atom(), Suite = atom(), Groups = atom() | [atom()]\n",
+ " Run one or more test case groups in Suite.\n",
+ "\n",
+ " ts:run(App, Suite, {group,Group}, {testcase,TestCases})\n",
+ " App = atom(), Suite = atom(), Group = atom(),\n",
+ " TestCases = atom() | [atom()]\n",
+ " Run one or more test cases in a test case group in Suite.\n",
+ "\n",
+ " ts:run_category(TestCategory)\n",
+ " TestCategory = smoke | essential | bench | atom()\n",
+ " Run the specified category of tests for all apps.\n",
+ " For each app, the tests are defined by the specification:\n",
+ " ../App_test/App_TestCategory.spec.\n",
+ "\n",
+ " ts:run_category(Apps, TestCategory)\n",
+ " Apps = atom() | [atom()],\n",
+ " TestCategory = smoke | essential | bench | atom()\n",
+ " Run the specified category of tests for the given app or apps.\n",
+ "\n",
+ " Note that the test category parameter may have arbitrary value,\n",
+ " but should correspond to an existing test specification with file\n",
+ " name: ../App_test/App_TestCategory.spec.\n",
+ " Predefined categories exist for smoke tests, essential tests and\n",
+ " benchmark tests. The corresponding specs are:\n",
+ " ../*_test/Spec_smoke.spec, ../*_test/Spec_essential.spec and\n",
+ " ../*_test/Spec_bench.spec.\n",
+ "\n",
+ " All above run functions can take an additional last argument,\n",
+ " Options, which is a list of options (e.g. ts:run(App, Options),\n",
+ " or ts:run_category(Apps, TestCategory, Options)).\n",
+ "\n",
+ "Run options supported:\n\n",
+ " batch - Do not start a new xterm\n",
+ " {verbose, Level} - Same as the verbosity option for install\n",
+ " verbose - Same as {verbose, 1}\n",
+ " {vars, Vars} - Variables in addition to the 'variables' file\n",
+ " Can be any of the install options\n",
+ " {trace, TraceSpec}- Start call trace on target and slave nodes\n",
+ " TraceSpec is the name of a file containing\n",
+ " trace specifications or a list of trace\n",
+ " specification elements.\n",
+ " {config, Path} - Specify which directory ts should get it's \n"
+ " config files from. The files should follow\n"
+ " the convention lib/test_server/src/ts*.config.\n"
+ " These config files can also be specified by\n"
+ " setting the TEST_CONFIG_PATH environment\n"
+ " variable to the directory where the config\n"
+ " files are. The default location is\n"
+ " tests/test_server/.\n"
+ "\n",
+ "Supported trace information elements:\n\n",
+ " {tp | tpl, Mod, [] | match_spec()}\n",
+ " {tp | tpl, Mod, Func, [] | match_spec()}\n",
+ " {tp | tpl, Mod, Func, Arity, [] | match_spec()}\n",
+ " {ctp | ctpl, Mod}\n",
+ " {ctp | ctpl, Mod, Func}\n",
+ " {ctp | ctpl, Mod, Func, Arity}\n",
+ "\n\n",
+ "Support functions:\n\n",
+ " ts:tests()\n",
+ " Returns all apps available for testing.\n",
+ "\n",
+ " ts:tests(TestCategory)\n",
+ " Returns all apps that provide tests in the given category.\n",
+ "\n",
+ " ts:suites(App)\n",
+ " Returns all available test suites for App,\n",
+ " i.e. ../App_test/*_SUITE.erl\n",
+ "\n",
+ " ts:categories(App)\n",
+ " Returns all test categories available for App.\n",
+ "\n",
+ " ts:estone()\n",
+ " Runs estone_SUITE in the kernel application with no run options\n",
+ "\n",
+ " ts:estone(Opts)\n",
+ " Runs estone_SUITE in the kernel application with the given\n",
+ " run options\n",
+ "\n",
+ " ts:cross_cover_analyse(Level)\n",
+ " Use after ts:run with option cover or cover_details. Analyses\n",
+ " modules specified with a 'cross' statement in the cover spec file.\n",
+ " Level can be 'overview' or 'details'.\n",
+ "\n",
+ " ts:compile_testcases()\n",
+ " ts:compile_testcases(Apps)\n",
+ " Compiles all test cases for the given apps, for usage in a\n",
+ " cross compilation environment.\n",
+ "\n\n",
+ "Installation (already done):\n\n"
+ ],
+ show_help([H,?install_help]).
+
+show_help(H) ->
+ io:format(lists:flatten(H)).
+
+
+%% Installs tests.
+install() ->
+ ts_install:install(install_local,[]).
+install(Options) when is_list(Options) ->
+ ts_install:install(install_local,Options).
+
+%% run/0
+%% Runs all specs found by ts:tests(), if any, or returns
+%% {error, no_tests_available}. (batch)
+run() ->
+ case ts:tests() of
+ [] ->
+ {error, no_tests_available};
+ _ ->
+ check_and_run(fun(Vars) -> run_all(Vars) end)
+ end.
+run_all(_Vars) ->
+ run_some(tests(), [batch]).
+
+run_some([], _Opts) ->
+ ok;
+run_some(Apps, Opts) ->
+ case proplists:get_value(test_category, Opts) of
+ bench ->
+ check_and_run(fun(Vars) -> ts_benchmark:run(Apps, Opts, Vars) end);
+ _Other ->
+ run_some1(Apps, Opts)
+ end.
+
+run_some1([], _Opts) ->
+ ok;
+run_some1([{App,Mod}|Apps], Opts) ->
+ case run(App, Mod, Opts) of
+ ok -> ok;
+ Error -> io:format("~p: ~p~n",[{App,Mod},Error])
+ end,
+ run_some1(Apps, Opts);
+run_some1([App|Apps], Opts) ->
+ case run(App, Opts) of
+ ok -> ok;
+ Error -> io:format("~p: ~p~n",[App,Error])
+ end,
+ run_some1(Apps, Opts).
+
+%% This can be used from command line. Both App and
+%% TestCategory must be specified. App may be 'all'
+%% and TestCategory may be 'main'. Examples:
+%% erl -s ts cl_run kernel smoke <options>
+%% erl -s ts cl_run kernel main <options>
+%% erl -s ts cl_run all essential <options>
+%% erl -s ts cl_run all main <options>
+%% When using the 'main' category and running with cover,
+%% one can also use the cross_cover_analysis flag.
+cl_run([App,Cat|Options0]) when is_atom(App) ->
+
+ AllAtomsFun = fun(X) when is_atom(X) -> true;
+ (_) -> false
+ end,
+ Options1 =
+ case lists:all(AllAtomsFun, Options0) of
+ true ->
+ %% Could be from command line
+ lists:map(fun(Opt) ->
+ to_erlang_term(Opt)
+ end, Options0) -- [batch];
+ false ->
+ Options0 -- [batch]
+ end,
+ %% Make sure there is exactly one occurence of 'batch'
+ Options2 = [batch|Options1],
+
+ Result =
+ case {App,Cat} of
+ {all,main} ->
+ run(tests(), Options2);
+ {all,Cat} ->
+ run_category(Cat, Options2);
+ {_,main} ->
+ run(App, Options2);
+ {_,Cat} ->
+ run_category(App, Cat, Options2)
+ end,
+ case check_for_cross_cover_analysis_flag(Options2) of
+ false ->
+ ok;
+ Level ->
+ cross_cover_analyse(Level)
+ end,
+ Result.
+
+%% run/1
+%% Runs tests for one app (interactive).
+run(App) when is_atom(App) ->
+ Options = check_test_get_opts(App, []),
+ File = atom_to_list(App),
+ run_test(File, [{spec,[File++".spec"]},{allow_user_terms,true}], Options);
+
+%% This can be used from command line, e.g.
+%% erl -s ts run all <options>
+%% erl -s ts run main <options>
+run([all,main|Opts]) ->
+ cl_run([all,main|Opts]);
+run([all|Opts]) ->
+ cl_run([all,main|Opts]);
+run([main|Opts]) ->
+ cl_run([all,main|Opts]);
+%% Backwards compatible
+run([all_tests|Opts]) ->
+ cl_run([all,main|Opts]);
+
+%% run/1
+%% Runs the main tests for all available apps
+run(Apps) when is_list(Apps) ->
+ run(Apps, [batch]).
+
+%% run/2
+%% Runs the main tests for all available apps
+run(Apps, Opts) when is_list(Apps), is_list(Opts) ->
+ run_some(Apps, Opts);
+
+%% Runs tests for one app with list of suites or with options
+run(App, ModsOrOpts) when is_atom(App),
+ is_list(ModsOrOpts) ->
+ case is_list_of_suites(ModsOrOpts) of
+ false ->
+ run(App, {opts_list,ModsOrOpts});
+ true ->
+ run_some([{App,M} || M <- ModsOrOpts],
+ [batch])
+ end;
+
+run(App, {opts_list,Opts}) ->
+ Options = check_test_get_opts(App, Opts),
+ File = atom_to_list(App),
+
+ %% check if other test category than main has been specified
+ {CatSpecName,TestCat} =
+ case proplists:get_value(test_category, Opts) of
+ undefined ->
+ {"",main};
+ Cat ->
+ {"_" ++ atom_to_list(Cat),Cat}
+ end,
+
+ WhatToDo =
+ case App of
+ %% Known to exist but fails generic tests below
+ emulator -> test;
+ system -> test;
+ erl_interface -> test;
+ epmd -> test;
+ _ ->
+ case code:lib_dir(App) of
+ {error,bad_name} ->
+ %% Application does not exist
+ skip;
+ Path ->
+ case file:read_file_info(filename:join(Path,"ebin")) of
+ {ok,#file_info{type=directory}} ->
+ %% Erlang application is built
+ test;
+ _ ->
+ case filelib:wildcard(
+ filename:join([Path,"priv","*.jar"])) of
+ [] ->
+ %% The application is not built
+ skip;
+ [_|_] ->
+ %% Java application is built
+ test
+ end
+ end
+ end
+ end,
+ case WhatToDo of
+ skip ->
+ SkipSpec = create_skip_spec(App, suites(App)),
+ run_test(File, [{spec,[SkipSpec]}], Options);
+ test when TestCat == bench ->
+ check_and_run(fun(Vars) ->
+ ts_benchmark:run([App], Options, Vars)
+ end);
+ test ->
+ Spec = File ++ CatSpecName ++ ".spec",
+ run_test(File, [{spec,[Spec]},{allow_user_terms,true}], Options)
+ end;
+
+%% Runs one module for an app (interactive)
+run(App, Mod) when is_atom(App), is_atom(Mod) ->
+ run_test({atom_to_list(App),Mod},
+ [{suite,Mod}],
+ [interactive]).
+
+%% run/3
+%% Run one module for an app with Opts
+run(App, Mod, Opts) when is_atom(App),
+ is_atom(Mod),
+ is_list(Opts) ->
+ Options = check_test_get_opts(App, Opts),
+ run_test({atom_to_list(App),Mod},
+ [{suite,Mod}], Options);
+
+%% Run multiple modules with Opts
+run(App, Mods, Opts) when is_atom(App),
+ is_list(Mods),
+ is_list(Opts) ->
+ run_some([{App,M} || M <- Mods], Opts);
+
+%% Runs one test case in a module.
+run(App, Mod, Case) when is_atom(App),
+ is_atom(Mod),
+ is_atom(Case) ->
+ Options = check_test_get_opts(App, []),
+ Args = [{suite,Mod},{testcase,Case}],
+ run_test(atom_to_list(App), Args, Options);
+
+%% Runs one or more groups in a module.
+run(App, Mod, Grs={group,_Groups}) when is_atom(App),
+ is_atom(Mod) ->
+ Options = check_test_get_opts(App, []),
+ Args = [{suite,Mod},Grs],
+ run_test(atom_to_list(App), Args, Options);
+
+%% Runs one or more test cases in a module.
+run(App, Mod, TCs={testcase,_Cases}) when is_atom(App),
+ is_atom(Mod) ->
+ Options = check_test_get_opts(App, []),
+ Args = [{suite,Mod},TCs],
+ run_test(atom_to_list(App), Args, Options).
+
+%% run/4
+%% Run one test case in a module with Options.
+run(App, Mod, Case, Opts) when is_atom(App),
+ is_atom(Mod),
+ is_atom(Case),
+ is_list(Opts) ->
+ Options = check_test_get_opts(App, Opts),
+ Args = [{suite,Mod},{testcase,Case}],
+ run_test(atom_to_list(App), Args, Options);
+
+%% Run one or more test cases in a module with Options.
+run(App, Mod, {testcase,Cases}, Opts) when is_atom(App),
+ is_atom(Mod) ->
+ run(App, Mod, Cases, Opts);
+run(App, Mod, Cases, Opts) when is_atom(App),
+ is_atom(Mod),
+ is_list(Cases),
+ is_list(Opts) ->
+ Options = check_test_get_opts(App, Opts),
+ Args = [{suite,Mod},Cases],
+ run_test(atom_to_list(App), Args, Options);
+
+%% Run one or more test cases in a group.
+run(App, Mod, Gr={group,_Group}, {testcase,Cases}) when is_atom(App),
+ is_atom(Mod) ->
+ run(App, Mod, Gr, Cases, [batch]);
+
+
+%% Run one or more groups in a module with Options.
+run(App, Mod, Grs={group,_Groups}, Opts) when is_atom(App),
+ is_atom(Mod),
+ is_list(Opts) ->
+ Options = check_test_get_opts(App, Opts),
+ Args = [{suite,Mod},Grs],
+ run_test(atom_to_list(App), Args, Options).
+
+%% run/5
+%% Run one or more test cases in a group with Options.
+run(App, Mod, Group, Cases, Opts) when is_atom(App),
+ is_atom(Mod),
+ is_list(Opts) ->
+ Group1 = if is_tuple(Group) -> Group; true -> {group,Group} end,
+ Cases1 = if is_tuple(Cases) -> Cases; true -> {testcase,Cases} end,
+ Options = check_test_get_opts(App, Opts),
+ Args = [{suite,Mod},Group1,Cases1],
+ run_test(atom_to_list(App), Args, Options).
+
+%% run_category/1
+run_category(TestCategory) when is_atom(TestCategory) ->
+ run_category(TestCategory, [batch]).
+
+%% run_category/2
+run_category(TestCategory, Opts) when is_atom(TestCategory),
+ is_list(Opts) ->
+ case ts:tests(TestCategory) of
+ [] ->
+ {error, no_tests_available};
+ Apps ->
+ Opts1 = [{test_category,TestCategory} | Opts],
+ run_some(Apps, Opts1)
+ end;
+
+run_category(Apps, TestCategory) when is_atom(TestCategory) ->
+ run_category(Apps, TestCategory, [batch]).
+
+%% run_category/3
+run_category(App, TestCategory, Opts) ->
+ Apps = if is_atom(App) -> [App];
+ is_list(App) -> App
+ end,
+ Opts1 = [{test_category,TestCategory} | Opts],
+ run_some(Apps, Opts1).
+
+%%-----------------------------------------------------------------
+%% Functions kept for backwards compatibility
+
+bench() ->
+ run_category(bench, []).
+bench(Opts) when is_list(Opts) ->
+ run_category(bench, Opts);
+bench(App) ->
+ run_category(App, bench, []).
+bench(App, Opts) when is_atom(App) ->
+ run_category(App, bench, Opts);
+bench(Apps, Opts) when is_list(Apps) ->
+ run_category(Apps, bench, Opts).
+
+benchmarks() ->
+ tests(bench).
+
+smoke_test() ->
+ run_category(smoke, []).
+smoke_test(Opts) when is_list(Opts) ->
+ run_category(smoke, Opts);
+smoke_test(App) ->
+ run_category(App, smoke, []).
+smoke_test(App, Opts) when is_atom(App) ->
+ run_category(App, smoke, Opts);
+smoke_test(Apps, Opts) when is_list(Apps) ->
+ run_category(Apps, smoke, Opts).
+
+smoke_tests() ->
+ tests(smoke).
+
+%%-----------------------------------------------------------------
+
+is_list_of_suites(List) ->
+ lists:all(fun(Suite) ->
+ S = if is_atom(Suite) -> atom_to_list(Suite);
+ true -> Suite
+ end,
+ try lists:last(string:tokens(S,"_")) of
+ "SUITE" -> true;
+ "suite" -> true;
+ _ -> false
+ catch
+ _:_ -> false
+ end
+ end, List).
+
+%% Create a spec to skip all SUITES, this is used when the application
+%% to be tested is not part of the OTP release to be tested.
+create_skip_spec(App, SuitesToSkip) ->
+ {ok,Cwd} = file:get_cwd(),
+ AppString = atom_to_list(App),
+ Specname = AppString++"_skip.spec",
+ {ok,D} = file:open(filename:join([filename:dirname(Cwd),
+ AppString++"_test",Specname]),
+ [write]),
+ TestDir = "\"../"++AppString++"_test\"",
+ io:format(D,"{suites, "++TestDir++", all}.~n",[]),
+ io:format(D,"{skip_suites, "++TestDir++", ~w, \"Skipped as application"
+ " is not in path!\"}.",[SuitesToSkip]),
+ Specname.
+
+%% Check testspec for App to be valid and get possible options
+%% from the list.
+check_test_get_opts(App, Opts) ->
+ validate_test(App),
+ Mode = configmember(batch, {batch, interactive}, Opts),
+ Vars = configvars(Opts),
+ Trace = get_config(trace,Opts),
+ ConfigPath = get_config(config,Opts),
+ KeepTopcase = configmember(keep_topcase, {keep_topcase,[]}, Opts),
+ Cover = configcover(App,Opts),
+ lists:flatten([Vars,Mode,Trace,KeepTopcase,Cover,ConfigPath]).
+
+to_erlang_term(Atom) ->
+ String = atom_to_list(Atom),
+ {ok, Tokens, _} = erl_scan:string(lists:append([String, ". "])),
+ {ok, Term} = erl_parse:parse_term(Tokens),
+ Term.
+
+%% Validate that Testspec really is a testspec,
+%% and exit if not.
+validate_test(Testspec) ->
+ case lists:member(Testspec, tests()) of
+ true ->
+ ok;
+ false ->
+ io:format("This testspec does not seem to be "
+ "available.~n Please try ts:tests() "
+ "to see available tests.~n"),
+ exit(self(), {error, test_not_available})
+ end.
+
+configvars(Opts) ->
+ case lists:keysearch(vars, 1, Opts) of
+ {value, {vars, List}} ->
+ List0 = special_vars(Opts),
+ Key = fun(T) -> element(1,T) end,
+ DelDupList =
+ lists:filter(fun(V) ->
+ case lists:keysearch(Key(V),1,List0) of
+ {value,_} -> false;
+ _ -> true
+ end
+ end, List),
+ {vars, [List0|DelDupList]};
+ _ ->
+ {vars, special_vars(Opts)}
+ end.
+
+%% Allow some shortcuts in the options...
+special_vars(Opts) ->
+ SpecVars =
+ case lists:member(verbose, Opts) of
+ true ->
+ [{verbose, 1}];
+ false ->
+ case lists:keysearch(verbose, 1, Opts) of
+ {value, {verbose, Lvl}} ->
+ [{verbose, Lvl}];
+ _ ->
+ [{verbose, 0}]
+ end
+ end,
+ SpecVars1 =
+ case lists:keysearch(diskless, 1, Opts) of
+ {value,{diskless, true}} ->
+ [{diskless, true} | SpecVars];
+ _ ->
+ SpecVars
+ end,
+ case lists:keysearch(testcase_callback, 1, Opts) of
+ {value,{testcase_callback, CBM, CBF}} ->
+ [{ts_testcase_callback, {CBM,CBF}} | SpecVars1];
+ {value,{testcase_callback, CB}} ->
+ [{ts_testcase_callback, CB} | SpecVars1];
+ _ ->
+ SpecVars1
+ end.
+
+get_config(Key,Config) ->
+ case lists:keysearch(Key,1,Config) of
+ {value,Value} -> Value;
+ false -> []
+ end.
+
+configcover(Testspec,[cover|_]) ->
+ {cover,Testspec,default_coverfile(Testspec),overview};
+configcover(Testspec,[cover_details|_]) ->
+ {cover,Testspec,default_coverfile(Testspec),details};
+configcover(Testspec,[{cover,File}|_]) ->
+ {cover,Testspec,File,overview};
+configcover(Testspec,[{cover_details,File}|_]) ->
+ {cover,Testspec,File,details};
+configcover(Testspec,[_H|T]) ->
+ configcover(Testspec,T);
+configcover(_Testspec,[]) ->
+ [].
+
+default_coverfile(Testspec) ->
+ {ok,Cwd} = file:get_cwd(),
+ CoverFile = filename:join([filename:dirname(Cwd),
+ atom_to_list(Testspec)++"_test",
+ atom_to_list(Testspec)++".cover"]),
+ case filelib:is_file(CoverFile) of
+ true ->
+ CoverFile;
+ false ->
+ none
+ end.
+
+configmember(Member, {True, False}, Config) ->
+ case lists:member(Member, Config) of
+ true ->
+ True;
+ false ->
+ False
+ end.
+
+
+check_for_cross_cover_analysis_flag(Config) ->
+ check_for_cross_cover_analysis_flag(Config,false,false).
+check_for_cross_cover_analysis_flag([cover|Config],false,false) ->
+ check_for_cross_cover_analysis_flag(Config,overview,false);
+check_for_cross_cover_analysis_flag([cover|_Config],false,true) ->
+ overview;
+check_for_cross_cover_analysis_flag([cover_details|Config],false,false) ->
+ check_for_cross_cover_analysis_flag(Config,details,false);
+check_for_cross_cover_analysis_flag([cover_details|_Config],false,true) ->
+ details;
+check_for_cross_cover_analysis_flag([cross_cover_analysis|Config],false,_) ->
+ check_for_cross_cover_analysis_flag(Config,false,true);
+check_for_cross_cover_analysis_flag([cross_cover_analysis|_Config],Level,_) ->
+ Level;
+check_for_cross_cover_analysis_flag([_|Config],Level,CrossFlag) ->
+ check_for_cross_cover_analysis_flag(Config,Level,CrossFlag);
+check_for_cross_cover_analysis_flag([],_,_) ->
+ false.
+
+
+%% Returns all available apps.
+tests() ->
+ {ok, Cwd} = file:get_cwd(),
+ ts_lib:specs(Cwd).
+
+%% Returns all apps that provide tests in the given test category
+tests(main) ->
+ {ok, Cwd} = file:get_cwd(),
+ ts_lib:specs(Cwd);
+tests(bench) ->
+ ts_benchmark:benchmarks();
+tests(TestCategory) ->
+ {ok, Cwd} = file:get_cwd(),
+ ts_lib:specialized_specs(Cwd, atom_to_list(TestCategory)).
+
+%% Returns a list of available test suites for App.
+suites(App) ->
+ {ok, Cwd} = file:get_cwd(),
+ ts_lib:suites(Cwd, atom_to_list(App)).
+
+%% Returns all available test categories for App
+categories(App) ->
+ {ok, Cwd} = file:get_cwd(),
+ ts_lib:test_categories(Cwd, atom_to_list(App)).
+
+%%
+%% estone/0, estone/1
+%% Opts = same as Opts or Config for the run(...) function,
+%% e.g. [batch]
+%%
+estone() -> run(emulator,estone_SUITE).
+estone(Opts) when is_list(Opts) -> run(emulator,estone_SUITE,Opts).
+
+%%
+%% cross_cover_analyse/1
+%% Level = details | overview
+%% Can be called on any node after a test (with cover) is
+%% completed. The node's current directory must be the same as when
+%% the tests were run.
+%%
+cross_cover_analyse([Level]) ->
+ cross_cover_analyse(Level);
+cross_cover_analyse(Level) ->
+ Apps = get_last_app_tests(),
+ test_server_ctrl:cross_cover_analyse(Level,Apps).
+
+get_last_app_tests() ->
+ AllTests = filelib:wildcard(filename:join(["*","*_test.logs"])),
+ {ok,RE} = re:compile("^[^/]*/[^\.]*\.(.*)_test\.logs$"),
+ get_last_app_tests(AllTests,RE,[]).
+
+get_last_app_tests([Dir|Dirs],RE,Acc) ->
+ NewAcc =
+ case re:run(Dir,RE,[{capture,all,list}]) of
+ {match,[Dir,AppStr]} ->
+ Dir1 = filename:dirname(Dir), % cover logs in ct_run.<t> dir
+ App = list_to_atom(AppStr),
+ case lists:keytake(App,1,Acc) of
+ {value,{App,LastDir},Rest} ->
+ if Dir1 > LastDir ->
+ [{App,Dir1}|Rest];
+ true ->
+ Acc
+ end;
+ false ->
+ [{App,Dir1} | Acc]
+ end;
+ _ ->
+ Acc
+ end,
+ get_last_app_tests(Dirs,RE,NewAcc);
+get_last_app_tests([],_,Acc) ->
+ Acc.
+
+%%% Implementation.
+
+check_and_run(Fun) ->
+ case file:consult(?variables) of
+ {ok, Vars} ->
+ check_and_run(Fun, Vars);
+ {error, Error} when is_atom(Error) ->
+ {error, not_installed};
+ {error, Reason} ->
+ {error, {bad_installation, file:format_error(Reason)}}
+ end.
+
+check_and_run(Fun, Vars) ->
+ Platform = ts_install:platform_id(Vars),
+ case lists:keysearch(platform_id, 1, Vars) of
+ {value, {_, Platform}} ->
+ case catch apply(Fun, [Vars]) of
+ {'EXIT', Reason} ->
+ exit(Reason);
+ Other ->
+ Other
+ end;
+ {value, {_, OriginalPlatform}} ->
+ io:format("These test suites were installed for '~s'.\n",
+ [OriginalPlatform]),
+ io:format("But the current platform is '~s'.\nPlease "
+ "install for this platform before running "
+ "any tests.\n", [Platform]),
+ {error, inconsistent_platforms};
+ false ->
+ {error, {bad_installation, no_platform}}
+ end.
+
+run_test(File, Args, Options) ->
+ check_and_run(fun(Vars) -> run_test(File, Args, Options, Vars) end).
+
+run_test(File, Args, Options, Vars) ->
+ ts_run:run(File, Args, Options, Vars).
+
+
+%% This module provides some convenient shortcuts to running
+%% the test server from within a started Erlang shell.
+%% (This are here for backwards compatibility.)
+%%
+%% r()
+%% r(Opts)
+%% r(SpecOrMod)
+%% r(SpecOrMod, Opts)
+%% r(Mod, Case)
+%% r(Mod, Case, Opts)
+%% Each of these functions starts the test server if it
+%% isn't already running, then runs the test case(s) selected
+%% by the aguments.
+%% SpecOrMod can be a module name or the name of a test spec file,
+%% with the extension .spec or .spec.OsType. The module Mod will
+%% be reloaded before running the test cases.
+%% Opts = [Opt],
+%% Opt = {Cover,AppOrCoverFile} | {Cover,App,CoverFile}
+%% Cover = cover | cover_details
+%% AppOrCoverFile = App | CoverFile
+%% App = atom(), an application name
+%% CoverFile = string(), name of a cover file
+%% (see doc of test_server_ctrl:cover/2/3)
+%%
+%% i()
+%% Shows information about the jobs being run, by dumping
+%% the process information for the test_server.
+%%
+%% l(Mod)
+%% This function reloads a module just like c:l/1, but works
+%% even for a module in one of the sticky library directories
+%% (for instance, lists can be reloaded).
+
+%% Runs all tests cases in the current directory.
+
+r() ->
+ r([]).
+r(Opts) when is_list(Opts), is_atom(hd(Opts)) ->
+ ensure_ts_started(Opts),
+ test_server_ctrl:add_dir("current_dir", ".");
+
+%% Checks if argument is a spec file or a module
+%% (spec file must be named "*.spec" or "*.spec.OsType")
+%% If module, reloads module and runs all test cases in it.
+%% If spec, runs all test cases in it.
+
+r(SpecOrMod) ->
+ r(SpecOrMod,[]).
+r(SpecOrMod,Opts) when is_list(Opts) ->
+ ensure_ts_started(Opts),
+ case filename:extension(SpecOrMod) of
+ [] ->
+ l(SpecOrMod),
+ test_server_ctrl:add_module(SpecOrMod);
+ ".spec" ->
+ test_server_ctrl:add_spec(SpecOrMod);
+ _ ->
+ Spec2 = filename:rootname(SpecOrMod),
+ case filename:extension(Spec2) of
+ ".spec" ->
+ %% *.spec.Type
+ test_server_ctrl:add_spec(SpecOrMod);
+ _ ->
+ {error, unknown_filetype}
+ end
+ end;
+
+%% Reloads the given module and runs the given test case in it.
+
+r(Mod, Case) ->
+ r(Mod,Case,[]).
+r(Mod, Case, Opts) ->
+ ensure_ts_started(Opts),
+ l(Mod),
+ test_server_ctrl:add_case(Mod, Case).
+
+%% Shows information about the jobs being run.
+
+i() ->
+ ensure_ts_started([]),
+ hformat("Job", "Current", "Total", "Success", "Failed", "Skipped"),
+ i(test_server_ctrl:jobs()).
+
+i([{Name, Pid}|Rest]) when is_pid(Pid) ->
+ {dictionary, PI} = process_info(Pid, dictionary),
+ {value, {_, CaseNum}} = lists:keysearch(test_server_case_num, 1, PI),
+ {value, {_, Cases}} = lists:keysearch(test_server_cases, 1, PI),
+ {value, {_, Failed}} = lists:keysearch(test_server_failed, 1, PI),
+ {value, {_, {UserSkipped,AutoSkipped}}} = lists:keysearch(test_server_skipped, 1, PI),
+ {value, {_, Ok}} = lists:keysearch(test_server_ok, 1, PI),
+ nformat(Name, CaseNum, Cases, Ok, Failed, UserSkipped+AutoSkipped),
+ i(Rest);
+i([]) ->
+ ok.
+
+hformat(A1, A2, A3, A4, A5, A6) ->
+ io:format("~-20s ~8s ~8s ~8s ~8s ~8s~n", [A1,A2,A3,A4,A5,A6]).
+
+nformat(A1, A2, A3, A4, A5, A6) ->
+ io:format("~-20s ~8w ~8w ~8w ~8w ~8w~n", [A1,A2,A3,A4,A5,A6]).
+
+%% Force load of a module even if it is in a sticky directory.
+
+l(Mod) ->
+ case do_load(Mod) of
+ {error, sticky_directory} ->
+ Dir = filename:dirname(code:which(Mod)),
+ code:unstick_dir(Dir),
+ do_load(Mod),
+ code:stick_dir(Dir);
+ X ->
+ X
+ end.
+
+
+ensure_ts_started(Opts) ->
+ Pid = case whereis(test_server_ctrl) of
+ undefined ->
+ test_server_ctrl:start();
+ P when is_pid(P) ->
+ P
+ end,
+ case Opts of
+ [{Cover,AppOrCoverFile}] when Cover==cover; Cover==cover_details ->
+ test_server_ctrl:cover(AppOrCoverFile,cover_type(Cover));
+ [{Cover,App,CoverFile}] when Cover==cover; Cover==cover_details ->
+ test_server_ctrl:cover(App,CoverFile,cover_type(Cover));
+ _ ->
+ ok
+ end,
+ Pid.
+
+cover_type(cover) -> overview;
+cover_type(cover_details) -> details.
+
+do_load(Mod) ->
+ code:purge(Mod),
+ code:load_file(Mod).
+
+
+compile_testcases() ->
+ compile_datadirs("../*/*_data").
+
+compile_testcases(App) when is_atom(App) ->
+ compile_testcases([App]);
+compile_testcases([App | T]) ->
+ compile_datadirs(io_lib:format("../~s_test/*_data", [App])),
+ compile_testcases(T);
+compile_testcases([]) ->
+ ok.
+
+compile_datadirs(DataDirs) ->
+ {ok,Variables} = file:consult("variables"),
+
+ lists:foreach(fun(Dir) ->
+ ts_lib:make_non_erlang(Dir, Variables)
+ end,
+ filelib:wildcard(DataDirs)).
diff --git a/lib/common_test/test_server/ts.hrl b/lib/common_test/test_server/ts.hrl
new file mode 100644
index 0000000000..4c940fdc4f
--- /dev/null
+++ b/lib/common_test/test_server/ts.hrl
@@ -0,0 +1,38 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%% Defines ripped out from test_server (these must remain the same
+%% as in test_server).
+
+-define(logdir_ext, ".logs").
+-define(suitelog_name, "suite.log").
+-define(last_file, "last_name").
+-define(last_link, "last_link").
+-define(last_test, "last_test").
+-define(run_summary, "suite.summary").
+-define(cover_total,"total_cover.log").
+-define(variables, "variables").
+-define(cross_variables, "variables-cross").
+-define(LF, [10]). % Newline in VxWorks script
+-define(CHAR_PER_LINE, 60). % Characters per VxWorks script building line
+-define(CROSS_COOKIE, "cross"). % cookie used when cross platform testing
+-define(TS_PORT, 7887).
+-define(TEST_SERVER_SCRIPT, "test_server_vx.script").
+
diff --git a/lib/common_test/test_server/ts.unix.config b/lib/common_test/test_server/ts.unix.config
new file mode 100644
index 0000000000..1ba5d9033e
--- /dev/null
+++ b/lib/common_test/test_server/ts.unix.config
@@ -0,0 +1,6 @@
+%% -*- erlang -*-
+
+%% Always run a (VNC) X server on host
+%% {xserver, "xserver.example.com:66"}.
+
+{unix,[{telnet,"belegost"},{username,"telnet-test"},{password,"tset-tenlet"},{keep_alive,true}]}.
diff --git a/lib/common_test/test_server/ts.win32.config b/lib/common_test/test_server/ts.win32.config
new file mode 100644
index 0000000000..cae587bea8
--- /dev/null
+++ b/lib/common_test/test_server/ts.win32.config
@@ -0,0 +1,8 @@
+%% -*- erlang -*-
+
+%%% There is no equivalent command to ypmatch on Win32... :-(
+%{hardcoded_hosts,
+% [{"127.0.0.1","localhost"}]}.
+
+%{hardcoded_ipv6_hosts,
+% [{"::1","localhost"}]}.
diff --git a/lib/common_test/test_server/ts_autoconf_win32.erl b/lib/common_test/test_server/ts_autoconf_win32.erl
new file mode 100644
index 0000000000..288305b406
--- /dev/null
+++ b/lib/common_test/test_server/ts_autoconf_win32.erl
@@ -0,0 +1,256 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%% Purpose : Autoconf for Windows.
+
+-module(ts_autoconf_win32).
+-export([configure/0]).
+
+-include("ts.hrl").
+
+configure() ->
+ case variables() of
+ {ok, Vars} ->
+ ts_lib:subst_file("conf_vars.in", "conf_vars", Vars);
+ Error ->
+ Error
+ end.
+
+variables() ->
+ run_tests(tests(), []).
+
+run_tests([{Prompt, Tester}|Rest], Vars) ->
+ io:format("checking ~s... ", [Prompt]),
+ case catch Tester(Vars) of
+ {'EXIT', Reason} ->
+ io:format("FAILED~nExit status: ~p~n", [Reason]),
+ {error, auto_conf_failed};
+ {Result, NewVars} ->
+ io:format("~s~n", [lists:concat([Result])]),
+ run_tests(Rest, NewVars)
+ end;
+run_tests([], Vars) ->
+ {ok, Vars}.
+
+%%% The tests.
+
+tests() ->
+ [{"host system type", fun system_type/1},
+ {"CPU type", fun cpu/1},
+ {"for C compiler", fun c_compiler/1},
+ {"for make program", fun make/1},
+ {"for location of SSL libraries", fun ssl/1},
+ {"for location of Java compiler", fun javac/1}].
+
+system_type(Vars) ->
+ Os = case os:type() of
+ {win32, nt} ->
+ case os:version() of
+ {4,_,_} -> "Windows NT";
+ {5,0,_} -> "Windows 2000";
+ {5,1,_} -> "Windows XP";
+ {5,2,_} -> "Windows 2003";
+ {6,0,_} -> "Windows Vista";
+ {6,1,_} -> "Windows 7";
+ {_,_,_} -> "Windows NCC-1701-D"
+ end;
+ {win32, windows} ->
+ case os:version() of
+ {4,0,_} -> "Windows 95";
+ {4,10,_} -> "Windows 98"
+ end;
+ {win32, _} -> "Windows"
+ end,
+ {Os, [{host_os, Os}, {host, "win32"}|Vars]}.
+
+cpu(Vars) ->
+ Arch = os:getenv("PROCESSOR_ARCHITECTURE"),
+ Level0 = os:getenv("PROCESSOR_Level"),
+ Cpu = case {Arch, Level0} of
+ {"x86", Level} when is_list(Level) ->
+ "i" ++ Level ++ "86";
+ {Other, _Level} when is_list(Other) ->
+ Other;
+ {false, _} ->
+ "i386"
+ end,
+ {Cpu, [{host_cpu, Cpu}|Vars]}.
+
+c_compiler(Vars) ->
+ try
+ CompTests = [{msc, fun visual_cxx/1},
+ {gnuc, fun mingw32/1}],
+ %% First try to find the same compiler that the system
+ %% was built with...
+ UsedCompiler = case erlang:system_info(c_compiler_used) of
+ {UsedCmplr, _} ->
+ case lists:keysearch(UsedCmplr, 1, CompTests) of
+ {value, {UsedCmplr, CompTest}} ->
+ CompTest(Vars);
+ _ ->
+ ok
+ end,
+ UsedCmplr;
+ undefined ->
+ undefined
+ end,
+ %% ... then try to find a compiler...
+ lists:foreach(fun ({Cmplr, _CmplrTst}) when Cmplr =:= UsedCompiler ->
+ ok; % Have already checked for this one
+ ({_Cmplr, CmplrTst}) ->
+ CmplrTst(Vars)
+ end,
+ CompTests),
+ {no, Vars}
+ catch
+ throw:{_Path, _NewVars} = Res -> Res
+ end.
+
+visual_cxx(Vars) ->
+ case os:find_executable("cl") of
+ false ->
+ {no, Vars};
+ Path when is_list(Path) ->
+ {DEFAULT_THR_LIB,
+ ERTS_THR_LIB,
+ DLL,
+ DBG_LINK,
+ DBG_COMP,
+ OPT} =
+ case is_debug_build() of
+ true ->
+ {"-MTd ",
+ "-MDd ",
+ "-LDd ",
+ "-debug -pdb:none ",
+ "-Z7 -DDEBUG",
+ " "};
+ false ->
+ {"-MT ",
+ "-MD ",
+ "-LD ",
+ " ",
+ " ",
+ "-Ox "}
+ end,
+ WIN32 = "-D__WIN32__ ",
+ ERTS_CFLAGS = ERTS_THR_LIB ++ WIN32 ++ OPT ++ DBG_COMP,
+ LIBS = "ws2_32.lib",
+ CC = "cl -nologo",
+ throw({Path, [{'CC', CC},
+ {'LD', CC},
+ {'SHLIB_LD', CC},
+ {'SHLIB_LDFLAGS', ERTS_THR_LIB ++ DLL},
+ {'SHLIB_LDLIBS', "-link " ++ DBG_LINK ++ "kernel32.lib"},
+ {'SHLIB_EXTRACT_ALL', ""},
+ {'CFLAGS', DEFAULT_THR_LIB ++ WIN32 ++ DBG_COMP},
+ {'EI_CFLAGS', DEFAULT_THR_LIB ++ WIN32 ++ DBG_COMP},
+ {'ERTS_CFLAGS', ERTS_CFLAGS},
+ {'SHLIB_CFLAGS', ERTS_CFLAGS++DLL},
+ {'CROSSLDFLAGS', ""},
+ {'DEFS', common_c_defs()},
+ {'SHLIB_SUFFIX', ".dll"},
+ {'ERTS_LIBS', ERTS_THR_LIB ++ LIBS},
+ {'LIBS', DEFAULT_THR_LIB ++ "-link " ++ DBG_LINK ++ LIBS},
+ {obj,".obj"},
+ {exe, ".exe"},
+ {test_c_compiler, "{msc, undefined}"}
+ | Vars]})
+ end.
+
+mingw32(Vars) ->
+ Gcc = "mingw32-gcc",
+ case os:find_executable(Gcc) of
+ false ->
+ {no, Vars};
+ Path when is_list(Path) ->
+ {DBG_COMP,
+ OPT} =
+ case is_debug_build() of
+ true ->
+ {"-DDEBUG",
+ " "};
+ false ->
+ {" ",
+ "-O2 "}
+ end,
+ WIN32 = "-D__WIN32__ ",
+ ERTS_CFLAGS = WIN32 ++ "-g " ++ OPT ++ DBG_COMP,
+ LIBS = "-lws2_32",
+ CC = Gcc,
+ throw({Path, [{'CC', CC},
+ {'LD', CC},
+ {'SHLIB_LD', CC},
+ {'SHLIB_LDFLAGS', "-shared "},
+ {'SHLIB_LDLIBS', " -lkernel32"},
+ {'SHLIB_EXTRACT_ALL', ""},
+ {'CFLAGS', WIN32 ++ DBG_COMP},
+ {'EI_CFLAGS', WIN32 ++ DBG_COMP},
+ {'ERTS_CFLAGS', ERTS_CFLAGS},
+ {'SHLIB_CFLAGS', ERTS_CFLAGS},
+ {'CROSSLDFLAGS', ""},
+ {'DEFS', common_c_defs()},
+ {'SHLIB_SUFFIX', ".dll"},
+ {'ERTS_LIBS', LIBS},
+ {'LIBS', LIBS},
+ {obj,".o"},
+ {exe, ".exe"},
+ {test_c_compiler, "{gnuc, undefined}"}
+ | Vars]})
+ end.
+
+common_c_defs() ->
+ "-DHAVE_STRERROR=1".
+
+make(Vars) ->
+ try
+ find_make("nmake -nologo", Vars),
+ find_make("mingw32-make", Vars)
+ catch
+ throw:{_Path, _NewVars} = Res -> Res
+ end.
+
+find_make(MakeCmd, Vars) ->
+ [Make|_] = string:tokens(MakeCmd, " \t"),
+ case os:find_executable(Make) of
+ false ->
+ {no, Vars};
+ Path when is_list(Path) ->
+ throw({Path, [{make_command, MakeCmd} | Vars]})
+ end.
+
+ssl(Vars) ->
+ {"win32",[{'SSLEAY_ROOT',"win32"}|Vars]}.
+
+javac(Vars) ->
+ case os:find_executable("javac") of
+ false ->
+ {no, Vars};
+ Path when is_list(Path) ->
+ {Path, [{'JAVAC', "javac"} | Vars]}
+ end.
+
+is_debug_build() ->
+ case catch string:str(erlang:system_info(system_version), "debug") of
+ Int when is_integer(Int), Int > 0 ->
+ true;
+ _ ->
+ false
+ end.
diff --git a/lib/common_test/test_server/ts_benchmark.erl b/lib/common_test/test_server/ts_benchmark.erl
new file mode 100644
index 0000000000..3e55edefb0
--- /dev/null
+++ b/lib/common_test/test_server/ts_benchmark.erl
@@ -0,0 +1,87 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012-2012. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(ts_benchmark).
+
+-include_lib("common_test/include/ct_event.hrl").
+-include_lib("kernel/include/file.hrl").
+-include("ts.hrl").
+
+-export([benchmarks/0,
+ run/3]).
+
+%% gen_event callbacks
+-export([init/1, handle_event/2]).
+
+benchmarks() ->
+ {ok, Cwd} = file:get_cwd(),
+ ts_lib:specialized_specs(Cwd,"bench").
+
+run(Specs, Opts, Vars) ->
+ {ok, Cwd} = file:get_cwd(),
+ {{YY,MM,DD},{HH,Mi,SS}} = calendar:local_time(),
+ BName = lists:concat([YY,"_",MM,"_",DD,"T",HH,"_",Mi,"_",SS]),
+ BDir = filename:join([Cwd,BName]),
+ file:make_dir(BDir),
+ [ts_run:run(atom_to_list(Spec),
+ [{spec, [atom_to_list(Spec)++"_bench.spec"]}],
+ [{event_handler, {ts_benchmark, [Spec,BDir]}}|Opts],Vars)
+ || Spec <- Specs],
+ file:delete(filename:join(Cwd,"latest_benchmark")),
+ {ok,D} = file:open(filename:join(Cwd,"latest_benchmark"),[write]),
+ io:format(D,BDir,[]),
+ file:close(D).
+
+
+%%%===================================================================
+%%% gen_event callbacks
+%%%===================================================================
+
+-record(state, { spec, suite, tc, stats_dir}).
+
+init([Spec,Dir]) ->
+ {ok, #state{ spec = Spec, stats_dir = Dir }}.
+
+handle_event(#event{name = tc_start, data = {Suite,Tc}}, State) ->
+ {ok,State#state{ suite = Suite, tc = Tc}};
+handle_event(#event{name = benchmark_data, data = Data}, State) ->
+ Spec = proplists:get_value(application, Data, State#state.spec),
+ Suite = proplists:get_value(suite, Data, State#state.suite),
+ Tc = proplists:get_value(name, Data, State#state.tc),
+ Value = proplists:get_value(value, Data),
+ {ok, D} = file:open(filename:join(
+ [State#state.stats_dir,
+ lists:concat([e(Spec),"-",e(Suite),"-",
+ e(Tc),".ebench"])]),
+ [append]),
+ io:format(D, "~p~n",[Value]),
+ file:close(D),
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+
+e(Atom) when is_atom(Atom) ->
+ Atom;
+e(Str) when is_list(Str) ->
+ lists:map(fun($/) ->
+ $\\;
+ (C) ->
+ C
+ end,Str).
diff --git a/lib/common_test/test_server/ts_erl_config.erl b/lib/common_test/test_server/ts_erl_config.erl
new file mode 100644
index 0000000000..ab7363c106
--- /dev/null
+++ b/lib/common_test/test_server/ts_erl_config.erl
@@ -0,0 +1,403 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%% Purpose : Updates variable list with variables depending on
+%%% running Erlang system.
+
+-module(ts_erl_config).
+
+
+-export([variables/2]).
+
+%% Returns a list of key, value pairs.
+
+variables(Base0, OsType) ->
+ Base1 = erl_include(Base0),
+ Base2 = get_app_vars(fun erl_interface/2, Base1, OsType),
+ Base3 = get_app_vars(fun ic/2, Base2, OsType),
+ Base4 = get_app_vars(fun jinterface/2, Base3, OsType),
+ Base5 = dl_vars(Base4, Base3, OsType),
+ Base6 = emu_vars(Base5),
+ Base7 = get_app_vars(fun ssl/2, Base6, OsType),
+ Base8 = erts_lib(Base7, OsType),
+ Base = separators(Base8, OsType),
+ [{'EMULATOR', tl(code:objfile_extension())},
+ {emu_threads, atom_to_list(erlang:system_info(threads))},
+ {type_marker, case is_debug_build() of
+ true ->
+ ".debug";
+ false ->
+ ""
+ end}
+ | Base].
+
+get_app_vars(AppFun, Vars, OsType) ->
+ case catch AppFun(Vars,OsType) of
+ Res when is_list(Res) ->
+ Res;
+ {cannot_find_app, App} ->
+ io:format("* WARNING: Cannot find ~p!~n", [App]),
+ Vars;
+ {'EXIT', Reason} ->
+ exit(Reason);
+ Garbage ->
+ exit({unexpected_internal_error, Garbage})
+ end.
+
+dl_vars(Vars, Base3, OsType) ->
+ ShlibRules0 = ".SUFFIXES:\n" ++
+ ".SUFFIXES: @dll@ @obj@ .c\n\n" ++
+ ".c@dll@:\n" ++
+ "\t@CC@ -c @SHLIB_CFLAGS@ $(SHLIB_EXTRA_CFLAGS) -I@erl_include@ @DEFS@ $<\n" ++
+ "\t@SHLIB_LD@ @CROSSLDFLAGS@ @SHLIB_LDFLAGS@ $(SHLIB_EXTRA_LDFLAGS) -o $@ $*@obj@ @SHLIB_LDLIBS@ $(SHLIB_EXTRA_LDLIBS)",
+
+ ShlibRules = ts_lib:subst(ShlibRules0, Vars),
+ case get_app_vars2(fun jinterface/2, Base3, OsType) of
+ {App, not_found} ->
+ [{'SHLIB_RULES', ShlibRules}, {App, "not_found"}|Vars];
+ _ ->
+ [{'SHLIB_RULES', ShlibRules}|Vars]
+ end.
+get_app_vars2(AppFun, Vars, OsType) ->
+ case catch AppFun(Vars,OsType) of
+ Res when is_list(Res) ->
+ {jinterface, ok};
+ {cannot_find_app, App} ->
+ {App, not_found};
+ {'EXIT', Reason} ->
+ exit(Reason);
+ Garbage ->
+ exit({unexpected_internal_error, Garbage})
+ end.
+
+erts_lib_name(multi_threaded, {win32, V}) ->
+ link_library("erts_MD" ++ case is_debug_build() of
+ true -> "d";
+ false -> ""
+ end,
+ {win32, V});
+erts_lib_name(single_threaded, {win32, V}) ->
+ link_library("erts_ML" ++ case is_debug_build() of
+ true -> "d";
+ false -> ""
+ end,
+ {win32, V});
+erts_lib_name(multi_threaded, OsType) ->
+ link_library("erts_r", OsType);
+erts_lib_name(single_threaded, OsType) ->
+ link_library("erts", OsType).
+
+erts_lib(Vars,OsType) ->
+ {ErtsLibInclude,
+ ErtsLibIncludeGenerated,
+ ErtsLibIncludeInternal,
+ ErtsLibIncludeInternalGenerated,
+ ErtsLibPath,
+ ErtsLibInternalPath,
+ ErtsLibEthreadMake,
+ ErtsLibInternalMake
+ }
+ = case erl_root(Vars) of
+ {installed, _Root} ->
+ Erts = lib_dir(Vars, erts),
+ ErtsInclude = filename:join([Erts, "include"]),
+ ErtsIncludeInternal = filename:join([ErtsInclude, "internal"]),
+ ErtsLib = filename:join([Erts, "lib"]),
+ ErtsLibInternal = filename:join([ErtsLib, "internal"]),
+ ErtsEthreadMake = filename:join([ErtsIncludeInternal, "ethread.mk"]),
+ ErtsInternalMake = filename:join([ErtsIncludeInternal, "erts_internal.mk"]),
+
+ {ErtsInclude,
+ ErtsInclude,
+ ErtsIncludeInternal,
+ ErtsIncludeInternal,
+ ErtsLib,
+ ErtsLibInternal,
+ ErtsEthreadMake,
+ ErtsInternalMake};
+ {srctree, Root, Target} ->
+ Erts = filename:join([Root, "erts"]),
+ ErtsInclude = filename:join([Erts, "include"]),
+ ErtsIncludeTarget = filename:join([ErtsInclude, Target]),
+ ErtsIncludeInternal = filename:join([ErtsInclude,
+ "internal"]),
+ ErtsIncludeInternalTarget = filename:join([ErtsIncludeInternal,
+ Target]),
+ ErtsLib = filename:join([Erts, "lib", Target]),
+ ErtsLibInternal = filename:join([Erts,
+ "lib",
+ "internal",
+ Target]),
+ ErtsEthreadMake = filename:join([ErtsIncludeInternalTarget, "ethread.mk"]),
+ ErtsInternalMake = filename:join([ErtsIncludeInternalTarget, "erts_internal.mk"]),
+
+ {ErtsInclude,
+ ErtsIncludeTarget,
+ ErtsIncludeInternal,
+ ErtsIncludeInternalTarget,
+ ErtsLib,
+ ErtsLibInternal,
+ ErtsEthreadMake,
+ ErtsInternalMake}
+ end,
+ [{erts_lib_include,
+ quote(filename:nativename(ErtsLibInclude))},
+ {erts_lib_include_generated,
+ quote(filename:nativename(ErtsLibIncludeGenerated))},
+ {erts_lib_include_internal,
+ quote(filename:nativename(ErtsLibIncludeInternal))},
+ {erts_lib_include_internal_generated,
+ quote(filename:nativename(ErtsLibIncludeInternalGenerated))},
+ {erts_lib_path, quote(filename:nativename(ErtsLibPath))},
+ {erts_lib_internal_path, quote(filename:nativename(ErtsLibInternalPath))},
+ {erts_lib_multi_threaded, erts_lib_name(multi_threaded, OsType)},
+ {erts_lib_single_threaded, erts_lib_name(single_threaded, OsType)},
+ {erts_lib_make_ethread, quote(ErtsLibEthreadMake)},
+ {erts_lib_make_internal, quote(ErtsLibInternalMake)}
+ | Vars].
+
+erl_include(Vars) ->
+ Include =
+ case erl_root(Vars) of
+ {installed, Root} ->
+ quote(filename:join([Root, "usr", "include"]));
+ {srctree, Root, Target} ->
+ quote(filename:join([Root, "erts", "emulator", "beam"]))
+ ++ " -I" ++ quote(filename:join([Root, "erts", "emulator"]))
+ ++ system_include(Root, Vars)
+ ++ " -I" ++ quote(filename:join([Root, "erts", "include"]))
+ ++ " -I" ++ quote(filename:join([Root, "erts", "include", Target]))
+ end,
+ [{erl_include, filename:nativename(Include)}|Vars].
+
+
+system_include(Root, Vars) ->
+ SysDir =
+ case ts_lib:var(os, Vars) of
+ "Windows" ++ _T -> "sys/win32";
+ _ -> "sys/unix"
+ end,
+ " -I" ++ quote(filename:nativename(filename:join([Root, "erts", "emulator", SysDir]))).
+
+erl_interface(Vars,OsType) ->
+ {Incl, {LibPath, MkIncl}} =
+ case lib_dir(Vars, erl_interface) of
+ {error, bad_name} ->
+ throw({cannot_find_app, erl_interface});
+ Dir ->
+ {filename:join(Dir, "include"),
+ case erl_root(Vars) of
+ {installed, _Root} ->
+ {filename:join(Dir, "lib"),
+ filename:join([Dir, "src", "eidefs.mk"])};
+ {srctree, _Root, Target} ->
+ {filename:join([Dir, "obj", Target]),
+ filename:join([Dir, "src", Target, "eidefs.mk"])}
+ end}
+ end,
+ Lib = link_library("erl_interface",OsType),
+ Lib1 = link_library("ei",OsType),
+ {LibDrv, Lib1Drv} =
+ case erlang:system_info(threads) of
+ false ->
+ case OsType of
+ {unix,_} ->
+ {link_library("erl_interface_st",OsType),
+ link_library("ei_st",OsType)};
+ _ ->
+ {Lib, Lib1}
+ end;
+ true ->
+ case OsType of
+ {win32, _} ->
+ {link_library("erl_interface_md",OsType),
+ link_library("ei_md",OsType)};
+ _ ->
+ {Lib, Lib1}
+ end
+ end,
+ ThreadLib = case OsType of
+ % FIXME: FreeBSD uses gcc flag '-pthread' or linking with
+ % "libc_r". So it has to be last of libs. This is an
+ % temporary solution, should be configured elsewhere.
+
+ % This temporary solution have now failed!
+ % A new temporary solution is installed ...
+ % {unix,freebsd} -> "-lc_r";
+ {unix,freebsd} ->
+ "-lpthread";
+ {unix,_} ->
+ "-lpthread";
+ _ ->
+ ""
+ end,
+ [{erl_interface_libpath, quote(filename:nativename(LibPath))},
+ {erl_interface_sock_libs, sock_libraries(OsType)},
+ {erl_interface_lib, quote(filename:join(LibPath, Lib))},
+ {erl_interface_eilib, quote(filename:join(LibPath, Lib1))},
+ {erl_interface_lib_drv, quote(filename:join(LibPath, LibDrv))},
+ {erl_interface_eilib_drv, quote(filename:join(LibPath, Lib1Drv))},
+ {erl_interface_threadlib, ThreadLib},
+ {erl_interface_include, quote(filename:nativename(Incl))},
+ {erl_interface_mk_include, quote(filename:nativename(MkIncl))}
+ | Vars].
+
+ic(Vars, OsType) ->
+ {ClassPath, LibPath, Incl} =
+ case lib_dir(Vars, ic) of
+ {error, bad_name} ->
+ throw({cannot_find_app, ic});
+ Dir ->
+ {filename:join([Dir, "priv", "ic.jar"]),
+ case erl_root(Vars) of
+ {installed, _Root} ->
+ filename:join([Dir, "priv", "lib"]);
+ {srctree, _Root, Target} ->
+ filename:join([Dir, "priv", "lib", Target])
+ end,
+ filename:join(Dir, "include")}
+ end,
+ [{ic_classpath, quote(filename:nativename(ClassPath))},
+ {ic_libpath, quote(filename:nativename(LibPath))},
+ {ic_lib, quote(filename:join(filename:nativename(LibPath),link_library("ic", OsType)))},
+ {ic_include_path, quote(filename:nativename(Incl))}|Vars].
+
+jinterface(Vars, _OsType) ->
+ ClassPath =
+ case lib_dir(Vars, jinterface) of
+ {error, bad_name} ->
+ throw({cannot_find_app, jinterface});
+ Dir ->
+ filename:join([Dir, "priv", "OtpErlang.jar"])
+ end,
+ [{jinterface_classpath, quote(filename:nativename(ClassPath))}|Vars].
+
+lib_dir(Vars, Lib) ->
+ LibLibDir = case Lib of
+ erts ->
+ filename:join([code:root_dir(),
+ "erts-" ++ erlang:system_info(version)]);
+ _ ->
+ code:lib_dir(Lib)
+ end,
+ case {get_var(crossroot, Vars), LibLibDir} of
+ {{error, _}, _} -> %no crossroot
+ LibLibDir;
+ {CrossRoot, _} ->
+ %% XXX: Ugly. So ugly I won't comment it
+ %% /Patrik
+ CLibDirList = case Lib of
+ erts ->
+ [CrossRoot, "erts"];
+ _ ->
+ [CrossRoot, "lib", atom_to_list(Lib)]
+ end,
+ CLibDir = filename:join(CLibDirList),
+ Cmd = "ls -d " ++ CLibDir ++ "*",
+ XLibDir = lists:last(string:tokens(os:cmd(Cmd),"\n")),
+ case file:list_dir(XLibDir) of
+ {error, enoent} ->
+ [];
+ _ ->
+ XLibDir
+ end
+ end.
+
+erl_root(Vars) ->
+ Root = case get_var(crossroot,Vars) of
+ {error, notfound} -> code:root_dir();
+ CrossRoot -> CrossRoot
+ end,
+ case ts_lib:erlang_type(Root) of
+ {srctree, _Version} ->
+ Target = get_var(target, Vars),
+ {srctree, Root, Target};
+ {_, _Version} ->
+ {installed, Root}
+ end.
+
+
+get_var(Key, Vars) ->
+ case lists:keysearch(Key, 1, Vars) of
+ {value, {Key, Value}} ->
+ Value;
+ _ ->
+ {error, notfound}
+ end.
+
+
+sock_libraries({win32, _}) ->
+ "ws2_32.lib";
+sock_libraries({unix, _}) ->
+ "". % Included in general libraries if needed.
+
+link_library(LibName,{win32, _}) ->
+ LibName ++ ".lib";
+link_library(LibName,{unix, _}) ->
+ "lib" ++ LibName ++ ".a";
+link_library(_LibName,_Other) ->
+ exit({link_library, not_supported}).
+
+%% Returns emulator specific variables.
+emu_vars(Vars) ->
+ [{is_source_build, is_source_build()},
+ {erl_name, atom_to_list(lib:progname())}|Vars].
+
+is_source_build() ->
+ string:str(erlang:system_info(system_version), "[source]") > 0.
+
+is_debug_build() ->
+ case catch string:str(erlang:system_info(system_version), "debug") of
+ Int when is_integer(Int), Int > 0 ->
+ true;
+ _ ->
+ false
+ end.
+%%
+%% ssl_libdir
+%%
+ssl(Vars, _OsType) ->
+ case lib_dir(Vars, ssl) of
+ {error, bad_name} ->
+ throw({cannot_find_app, ssl});
+ Dir ->
+ [{ssl_libdir, quote(filename:nativename(Dir))}| Vars]
+ end.
+
+separators(Vars, {win32,_}) ->
+ [{'DS',"\\"},{'PS',";"}|Vars];
+separators(Vars, _) ->
+ [{'DS',"/"},{'PS',":"}|Vars].
+
+quote(List) ->
+ case lists:member($ , List) of
+ false -> List;
+ true -> make_quote(List)
+ end.
+
+make_quote(List) ->
+ case os:type() of
+ {win32, _} -> %% nmake"
+ [$"] ++ List ++ [$"];
+ _ -> %% make
+ BackQuote = fun($ , Acc) -> [$\\ , $ |Acc];
+ (Char, Acc) -> [Char|Acc] end,
+ lists:foldr(BackQuote, [], List)
+ end.
diff --git a/lib/common_test/test_server/ts_install.erl b/lib/common_test/test_server/ts_install.erl
new file mode 100644
index 0000000000..600a576820
--- /dev/null
+++ b/lib/common_test/test_server/ts_install.erl
@@ -0,0 +1,465 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(ts_install).
+
+-export([install/2, platform_id/1]).
+
+-include("ts.hrl").
+-include_lib("kernel/include/file.hrl").
+
+install(install_local, Options) ->
+ install(os:type(), Options);
+
+install(TargetSystem, Options) ->
+ case file:consult(?variables) of
+ {ok, Vars} ->
+ case proplists:get_value(cross,Vars) of
+ "yes" when Options == []->
+ target_install(Vars);
+ _ ->
+ build_install(TargetSystem, Options)
+ end;
+ _ ->
+ build_install(TargetSystem, Options)
+ end.
+
+
+build_install(TargetSystem, Options) ->
+ XComp = parse_xcomp_file(proplists:get_value(xcomp,Options)),
+ case autoconf(TargetSystem, XComp++Options) of
+ {ok, Vars0} ->
+ OsType = os_type(TargetSystem),
+ Vars1 = ts_erl_config:variables(Vars0++XComp++Options,OsType),
+ {Options1, Vars2} = add_vars(Vars1, Options),
+ Vars3 = lists:flatten([Options1|Vars2]),
+ write_terms(?variables, Vars3);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+os_type({unix,_}=OsType) -> OsType;
+os_type({win32,_}=OsType) -> OsType.
+
+target_install(CrossVars) ->
+ io:format("Cross installation detected, skipping configure and data_dir make~n"),
+ case file:rename(?variables,?cross_variables) of
+ ok ->
+ ok;
+ _ ->
+ io:format("Could not find variables file from cross make~n"),
+ throw(cross_installation_failed)
+ end,
+ CPU = proplists:get_value('CPU',CrossVars),
+ OS = proplists:get_value(os,CrossVars),
+ {Options,Vars} = add_vars([{cross,"yes"},{'CPU',CPU},{os,OS}],[]),
+ Variables = lists:flatten([Options|Vars]),
+ write_terms(?variables, Variables).
+
+%% Autoconf for various platforms.
+%% unix uses the configure script
+%% win32 uses ts_autoconf_win32
+
+autoconf(TargetSystem, XComp) ->
+ case autoconf1(TargetSystem, XComp) of
+ ok ->
+ autoconf2(file:read_file("conf_vars"));
+ Error ->
+ Error
+ end.
+
+autoconf1({win32, _},[{cross,"no"}]) ->
+ ts_autoconf_win32:configure();
+autoconf1({unix, _},XCompFile) ->
+ unix_autoconf(XCompFile);
+autoconf1(_,_) ->
+ io:format("cross compilation not supported for that this platform~n"),
+ throw(cross_installation_failed).
+
+autoconf2({ok, Bin}) ->
+ get_vars(ts_lib:b2s(Bin), name, [], []);
+autoconf2(Error) ->
+ Error.
+
+get_vars([$:|Rest], name, Current, Result) ->
+ Name = list_to_atom(lists:reverse(Current)),
+ get_vars(Rest, value, [], [Name|Result]);
+get_vars([$\r|Rest], value, Current, Result) ->
+ get_vars(Rest, value, Current, Result);
+get_vars([$\n|Rest], value, Current, [Name|Result]) ->
+ Value = lists:reverse(Current),
+ get_vars(Rest, name, [], [{Name, Value}|Result]);
+get_vars([C|Rest], State, Current, Result) ->
+ get_vars(Rest, State, [C|Current], Result);
+get_vars([], name, [], Result) ->
+ {ok, Result};
+get_vars(_, _, _, _) ->
+ {error, fatal_bad_conf_vars}.
+
+config_flags() ->
+ case os:getenv("CONFIG_FLAGS") of
+ false -> [];
+ CF -> string:tokens(CF, " \t\n")
+ end.
+
+unix_autoconf(XConf) ->
+ Configure = filename:absname("configure"),
+ Flags = proplists:get_value(crossflags,XConf,[]),
+ Env = proplists:get_value(crossenv,XConf,[]),
+ Host = get_xcomp_flag("host", Flags),
+ Build = get_xcomp_flag("build", Flags),
+ Threads = [" --enable-shlib-thread-safety" ||
+ erlang:system_info(threads) /= false],
+ Debug = [" --enable-debug-mode" ||
+ string:str(erlang:system_info(system_version),"debug") > 0],
+ MXX_Build = [Y || Y <- config_flags(),
+ Y == "--enable-m64-build"
+ orelse Y == "--enable-m32-build"],
+ Args = Host ++ Build ++ Threads ++ Debug ++ " " ++ MXX_Build,
+ case filelib:is_file(Configure) of
+ true ->
+ OSXEnv = macosx_cflags(),
+ UnQuotedEnv = assign_vars(unquote(Env++OSXEnv)),
+ io:format("Running ~s~nEnv: ~p~n",
+ [lists:flatten(Configure ++ Args),UnQuotedEnv]),
+ Port = open_port({spawn, lists:flatten(["\"",Configure,"\"",Args])},
+ [stream, eof, {env,UnQuotedEnv}]),
+ ts_lib:print_data(Port);
+ false ->
+ {error, no_configure_script}
+ end.
+
+unquote([{Var,Val}|T]) ->
+ [{Var,unquote(Val)}|unquote(T)];
+unquote([]) ->
+ [];
+unquote("\""++Rest) ->
+ lists:reverse(tl(lists:reverse(Rest)));
+unquote(String) ->
+ String.
+
+assign_vars([]) ->
+ [];
+assign_vars([{VAR,FlagsStr} | VARs]) ->
+ [{VAR,assign_vars(FlagsStr)} | assign_vars(VARs)];
+assign_vars(FlagsStr) ->
+ Flags = [assign_all_vars(Str,[]) || Str <- string:tokens(FlagsStr, [$ ])],
+ string:strip(lists:flatten(lists:map(fun(Flag) ->
+ Flag ++ " "
+ end, Flags)), right).
+
+assign_all_vars([$$ | Rest], FlagSoFar) ->
+ {VarName,Rest1} = get_var_name(Rest, []),
+ assign_all_vars(Rest1, FlagSoFar ++ assign_var(VarName));
+assign_all_vars([Char | Rest], FlagSoFar) ->
+ assign_all_vars(Rest, FlagSoFar ++ [Char]);
+assign_all_vars([], Flag) ->
+ Flag.
+
+get_var_name([Ch | Rest] = Str, VarR) ->
+ case valid_char(Ch) of
+ true -> get_var_name(Rest, [Ch | VarR]);
+ false -> {lists:reverse(VarR),Str}
+ end;
+get_var_name([], VarR) ->
+ {lists:reverse(VarR),[]}.
+
+assign_var(VarName) ->
+ case os:getenv(VarName) of
+ false -> "";
+ Val -> Val
+ end.
+
+valid_char(Ch) when Ch >= $a, Ch =< $z -> true;
+valid_char(Ch) when Ch >= $A, Ch =< $Z -> true;
+valid_char(Ch) when Ch >= $0, Ch =< $9 -> true;
+valid_char($_) -> true;
+valid_char(_) -> false.
+
+get_xcomp_flag(Flag, Flags) ->
+ get_xcomp_flag(Flag, Flag, Flags).
+get_xcomp_flag(Flag, Tag, Flags) ->
+ case proplists:get_value(Flag,Flags) of
+ undefined -> "";
+ "guess" -> [" --",Tag,"=",os:cmd("$ERL_TOP/erts/autoconf/config.guess")];
+ HostVal -> [" --",Tag,"=",HostVal]
+ end.
+
+
+macosx_cflags() ->
+ case os:type() of
+ {unix, darwin} ->
+ %% To ensure that the drivers we build can be loaded
+ %% by the emulator, add either -m32 or -m64 to CFLAGS.
+ WordSize = erlang:system_info(wordsize),
+ Mflag = "-m" ++ integer_to_list(8*WordSize),
+ [{"CFLAGS", Mflag},{"LDFLAGS", Mflag}];
+ _ ->
+ []
+ end.
+
+parse_xcomp_file(undefined) ->
+ [{cross,"no"}];
+parse_xcomp_file(Filepath) ->
+ {ok,Bin} = file:read_file(Filepath),
+ Lines = binary:split(Bin,<<"\n">>,[global,trim]),
+ {Envs,Flags} = parse_xcomp_file(Lines,[],[]),
+ [{cross,"yes"},{crossroot,os:getenv("ERL_TOP")},
+ {crossenv,Envs},{crossflags,Flags}].
+
+parse_xcomp_file([<<A:8,_/binary>> = Line|R],Envs,Flags)
+ when $A =< A, A =< $Z ->
+ [Var,Value] = binary:split(Line,<<"=">>),
+ parse_xcomp_file(R,[{ts_lib:b2s(Var),
+ ts_lib:b2s(Value)}|Envs],Flags);
+parse_xcomp_file([<<"erl_xcomp_",Line/binary>>|R],Envs,Flags) ->
+ [Var,Value] = binary:split(Line,<<"=">>),
+ parse_xcomp_file(R,Envs,[{ts_lib:b2s(Var),
+ ts_lib:b2s(Value)}|Flags]);
+parse_xcomp_file([_|R],Envs,Flags) ->
+ parse_xcomp_file(R,Envs,Flags);
+parse_xcomp_file([],Envs,Flags) ->
+ {lists:reverse(Envs),lists:reverse(Flags)}.
+
+write_terms(Name, Terms) ->
+ case file:open(Name, [write]) of
+ {ok, Fd} ->
+ Result = write_terms1(Fd, remove_duplicates(Terms)),
+ file:close(Fd),
+ Result;
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+write_terms1(Fd, [Term|Rest]) ->
+ ok = io:format(Fd, "~p.\n", [Term]),
+ write_terms1(Fd, Rest);
+write_terms1(_, []) ->
+ ok.
+
+remove_duplicates(List) ->
+ lists:reverse(
+ lists:foldl(fun({Key,Val},Acc) ->
+ R = make_ref(),
+ case proplists:get_value(Key,Acc,R) of
+ R -> [{Key,Val}|Acc];
+ _Else ->
+ Acc
+ end
+ end,[],List)).
+
+add_vars(Vars0, Opts0) ->
+ {Opts,LongNames} =
+ case lists:keymember(longnames, 1, Opts0) of
+ true ->
+ {lists:keydelete(longnames, 1, Opts0),true};
+ false ->
+ {Opts0,false}
+ end,
+ {PlatformId, PlatformLabel, PlatformFilename, Version} =
+ platform([{longnames, LongNames}|Vars0]),
+ NetDir = lists:concat(["/net", hostname()]),
+ Mounted = case file:read_file_info(NetDir) of
+ {ok, #file_info{type = directory}} -> NetDir;
+ _ -> ""
+ end,
+ {Opts, [{longnames, LongNames},
+ {platform_id, PlatformId},
+ {platform_filename, PlatformFilename},
+ {rsh_name, get_rsh_name()},
+ {platform_label, PlatformLabel},
+ {ts_net_dir, Mounted},
+ {erl_flags, []},
+ {erl_release, Version},
+ {ts_testcase_callback, get_testcase_callback()} | Vars0]}.
+
+get_testcase_callback() ->
+ case os:getenv("TS_TESTCASE_CALLBACK") of
+ ModFunc when is_list(ModFunc), ModFunc /= "" ->
+ case string:tokens(ModFunc, " ") of
+ [_Mod,_Func] -> ModFunc;
+ _ -> ""
+ end;
+ _ ->
+ case init:get_argument(ts_testcase_callback) of
+ {ok,[[Mod,Func]]} -> Mod ++ " " ++ Func;
+ _ -> ""
+ end
+ end.
+
+get_rsh_name() ->
+ case os:getenv("ERL_RSH") of
+ false -> "rsh";
+ Str -> Str
+ end.
+
+platform_id(Vars) ->
+ {Id,_,_,_} = platform(Vars),
+ Id.
+
+platform(Vars) ->
+ Hostname = hostname(),
+
+ {Type,Version} = ts_lib:erlang_type(),
+ Cpu = ts_lib:var('CPU', Vars),
+ Os = ts_lib:var(os, Vars),
+
+ ErlType = to_upper(atom_to_list(Type)),
+ OsType = ts_lib:initial_capital(Os),
+ CpuType = ts_lib:initial_capital(Cpu),
+ LinuxDist = linux_dist(),
+ ExtraLabel = extra_platform_label(),
+ Schedulers = schedulers(),
+ BindType = bind_type(),
+ KP = kernel_poll(),
+ IOTHR = io_thread(),
+ LC = lock_checking(),
+ MT = modified_timing(),
+ AsyncThreads = async_threads(),
+ Debug = debug(),
+ CpuBits = word_size(),
+ Common = lists:concat([Hostname,"/",OsType,"/",CpuType,CpuBits,LinuxDist,
+ Schedulers,BindType,KP,IOTHR,LC,MT,AsyncThreads,
+ Debug,ExtraLabel]),
+ PlatformId = lists:concat([ErlType, " ", Version, Common]),
+ PlatformLabel = ErlType ++ Common,
+ PlatformFilename = platform_as_filename(PlatformId),
+ {PlatformId, PlatformLabel, PlatformFilename, Version}.
+
+platform_as_filename(Label) ->
+ lists:map(fun($ ) -> $_;
+ ($/) -> $_;
+ (C) when $A =< C, C =< $Z -> C - $A + $a;
+ (C) -> C end,
+ Label).
+
+to_upper(String) ->
+ lists:map(fun(C) when $a =< C, C =< $z -> C - $a + $A;
+ (C) -> C end,
+ String).
+
+word_size() ->
+ case {erlang:system_info({wordsize,external}),
+ erlang:system_info({wordsize,internal})} of
+ {4,4} -> "";
+ {8,8} -> "/64";
+ {8,4} -> "/Halfword"
+ end.
+
+linux_dist() ->
+ case os:type() of
+ {unix,linux} ->
+ linux_dist_1([fun linux_dist_suse/0]);
+ _ -> ""
+ end.
+
+linux_dist_1([F|T]) ->
+ case F() of
+ "" -> linux_dist_1(T);
+ Str -> Str
+ end;
+linux_dist_1([]) -> "".
+
+linux_dist_suse() ->
+ case filelib:is_file("/etc/SuSE-release") of
+ false -> "";
+ true ->
+ Ver0 = os:cmd("awk '/^VERSION/ {print $3}' /etc/SuSE-release"),
+ [_|Ver1] = lists:reverse(Ver0),
+ Ver = lists:reverse(Ver1),
+ "/Suse" ++ Ver
+ end.
+
+hostname() ->
+ case catch inet:gethostname() of
+ {ok, Hostname} when is_list(Hostname) ->
+ "/" ++ lists:takewhile(fun (C) -> C /= $. end, Hostname);
+ _ ->
+ "/localhost"
+ end.
+
+async_threads() ->
+ case catch erlang:system_info(threads) of
+ true -> "/A"++integer_to_list(erlang:system_info(thread_pool_size));
+ _ -> ""
+ end.
+
+schedulers() ->
+ case catch erlang:system_info(smp_support) of
+ true ->
+ case {erlang:system_info(schedulers),
+ erlang:system_info(schedulers_online)} of
+ {S,S} ->
+ "/S"++integer_to_list(S);
+ {S,O} ->
+ "/S"++integer_to_list(S) ++ ":" ++
+ integer_to_list(O)
+ end;
+ _ -> ""
+ end.
+
+bind_type() ->
+ case catch erlang:system_info(scheduler_bind_type) of
+ thread_no_node_processor_spread -> "/sbttnnps";
+ no_node_processor_spread -> "/sbtnnps";
+ no_node_thread_spread -> "/sbtnnts";
+ processor_spread -> "/sbtps";
+ thread_spread -> "/sbtts";
+ no_spread -> "/sbtns";
+ _ -> ""
+ end.
+
+
+debug() ->
+ case string:str(erlang:system_info(system_version), "debug") of
+ 0 -> "";
+ _ -> "/Debug"
+ end.
+
+lock_checking() ->
+ case catch erlang:system_info(lock_checking) of
+ true -> "/LC";
+ _ -> ""
+ end.
+
+modified_timing() ->
+ case catch erlang:system_info(modified_timing_level) of
+ N when is_integer(N) ->
+ "/T" ++ integer_to_list(N);
+ _ -> ""
+ end.
+
+kernel_poll() ->
+ case catch erlang:system_info(kernel_poll) of
+ true -> "/KP";
+ _ -> ""
+ end.
+
+io_thread() ->
+ case catch erlang:system_info(io_thread) of
+ true -> "/IOTHR";
+ _ -> ""
+ end.
+
+extra_platform_label() ->
+ case os:getenv("TS_EXTRA_PLATFORM_LABEL") of
+ [] -> "";
+ [_|_]=Label -> "/" ++ Label;
+ false -> ""
+ end.
diff --git a/lib/common_test/test_server/ts_install_cth.erl b/lib/common_test/test_server/ts_install_cth.erl
new file mode 100644
index 0000000000..0462e62611
--- /dev/null
+++ b/lib/common_test/test_server/ts_install_cth.erl
@@ -0,0 +1,270 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%% @doc TS Installed SCB
+%%%
+%%% This module does what the make parts of the ts:run/x command did,
+%%% but not the Makefile.first parts! So they have to be done by ts or
+%%% manually!!
+
+-module(ts_install_cth).
+
+%% Suite Callbacks
+-export([id/1]).
+-export([init/2]).
+
+-export([pre_init_per_suite/3]).
+-export([post_init_per_suite/4]).
+-export([pre_end_per_suite/3]).
+-export([post_end_per_suite/4]).
+
+-export([pre_init_per_group/3]).
+-export([post_init_per_group/4]).
+-export([pre_end_per_group/3]).
+-export([post_end_per_group/4]).
+
+-export([pre_init_per_testcase/3]).
+-export([post_init_per_testcase/4]).
+-export([pre_end_per_testcase/3]).
+-export([post_end_per_testcase/4]).
+
+-export([on_tc_fail/3]).
+-export([on_tc_skip/3]).
+
+-export([terminate/1]).
+
+-include_lib("kernel/include/file.hrl").
+
+-type config() :: proplists:proplist().
+-type reason() :: term().
+-type skip_or_fail() :: {skip, reason()} |
+ {auto_skip, reason()} |
+ {fail, reason()}.
+
+-record(state, { ts_conf_dir, target_system, install_opts, nodenames, nodes }).
+
+%% @doc The id of this SCB
+-spec id(Opts :: term()) ->
+ Id :: term().
+id(_Opts) ->
+ ?MODULE.
+
+%% @doc Always called before any other callback function.
+-spec init(Id :: term(), Opts :: proplists:proplist()) ->
+ {ok, State :: #state{}}.
+init(_Id, Opts) ->
+ Nodenames = proplists:get_value(nodenames, Opts, 0),
+ Nodes = proplists:get_value(nodes, Opts, 0),
+ TSConfDir = proplists:get_value(ts_conf_dir, Opts),
+ TargetSystem = proplists:get_value(target_system, Opts, install_local),
+ InstallOpts = proplists:get_value(install_opts, Opts, []),
+ {ok, #state{ nodenames = Nodenames,
+ nodes = Nodes,
+ ts_conf_dir = TSConfDir,
+ target_system = TargetSystem,
+ install_opts = InstallOpts } }.
+
+%% @doc Called before init_per_suite is called.
+-spec pre_init_per_suite(Suite :: atom(),
+ Config :: config(),
+ State :: #state{}) ->
+ {config() | skip_or_fail(), NewState :: #state{}}.
+pre_init_per_suite(Suite,Config,#state{ ts_conf_dir = undefined} = State) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ ParentDir = filename:join(
+ lists:reverse(
+ tl(lists:reverse(filename:split(DataDir))))),
+ TSConfDir = filename:join([ParentDir, "..","test_server"]),
+ pre_init_per_suite(Suite, Config, State#state{ ts_conf_dir = TSConfDir });
+pre_init_per_suite(_Suite,Config,State) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ try
+ {ok,Variables} =
+ file:consult(filename:join(State#state.ts_conf_dir,"variables")),
+ case proplists:get_value(cross,Variables) of
+ "yes" ->
+ ct:log("Not making data dir as tests have been cross compiled");
+ _ ->
+ ts_lib:make_non_erlang(DataDir, Variables)
+ end,
+
+ {add_node_name(Config, State), State}
+ catch error:{badmatch,{error,enoent}} ->
+ {add_node_name(Config, State), State};
+ Error:Reason ->
+ Stack = erlang:get_stacktrace(),
+ ct:pal("~p failed! ~p:{~p,~p}",[?MODULE,Error,Reason,Stack]),
+ {{fail,{?MODULE,{Error,Reason, Stack}}},State}
+ end.
+
+%% @doc Called after init_per_suite.
+-spec post_init_per_suite(Suite :: atom(),
+ Config :: config(),
+ Return :: config() | skip_or_fail(),
+ State :: #state{}) ->
+ {config() | skip_or_fail(), NewState :: #state{}}.
+post_init_per_suite(_Suite,_Config,Return,State) ->
+ test_server_ctrl:kill_slavenodes(),
+ {Return, State}.
+
+%% @doc Called before end_per_suite.
+-spec pre_end_per_suite(Suite :: atom(),
+ Config :: config() | skip_or_fail(),
+ State :: #state{}) ->
+ {ok | skip_or_fail(), NewState :: #state{}}.
+pre_end_per_suite(_Suite,Config,State) ->
+ {Config, State}.
+
+%% @doc Called after end_per_suite.
+-spec post_end_per_suite(Suite :: atom(),
+ Config :: config(),
+ Return :: term(),
+ State :: #state{}) ->
+ {ok | skip_or_fail(), NewState :: #state{}}.
+post_end_per_suite(_Suite,_Config,Return,State) ->
+ {Return, State}.
+
+%% @doc Called before each init_per_group.
+-spec pre_init_per_group(Group :: atom(),
+ Config :: config(),
+ State :: #state{}) ->
+ {config() | skip_or_fail(), NewState :: #state{}}.
+pre_init_per_group(_Group,Config,State) ->
+ {add_node_name(Config, State), State}.
+
+%% @doc Called after each init_per_group.
+-spec post_init_per_group(Group :: atom(),
+ Config :: config(),
+ Return :: config() | skip_or_fail(),
+ State :: #state{}) ->
+ {config() | skip_or_fail(), NewState :: #state{}}.
+post_init_per_group(_Group,_Config,Return,State) ->
+ {Return, State}.
+
+%% @doc Called after each end_per_group.
+-spec pre_end_per_group(Group :: atom(),
+ Config :: config() | skip_or_fail(),
+ State :: #state{}) ->
+ {ok | skip_or_fail(), NewState :: #state{}}.
+pre_end_per_group(_Group,Config,State) ->
+ {Config, State}.
+
+%% @doc Called after each end_per_group.
+-spec post_end_per_group(Group :: atom(),
+ Config :: config(),
+ Return :: term(),
+ State :: #state{}) ->
+ {ok | skip_or_fail(), NewState :: #state{}}.
+post_end_per_group(_Group,_Config,Return,State) ->
+ {Return, State}.
+
+%% @doc Called before each test case.
+-spec pre_init_per_testcase(TC :: atom(),
+ Config :: config(),
+ State :: #state{}) ->
+ {config() | skip_or_fail(), NewState :: #state{}}.
+pre_init_per_testcase(_TC,Config,State) ->
+ {add_node_name(Config, State), State}.
+
+-spec post_init_per_testcase(TC :: atom(),
+ Config :: config(),
+ Return :: term(),
+ State :: #state{}) ->
+ {ok | skip_or_fail(), NewState :: #state{}}.
+post_init_per_testcase(_TC,_Config,Return,State) ->
+ {Return, State}.
+
+%% @doc Called after each test case.
+-spec pre_end_per_testcase(TC :: atom(),
+ Config :: config(),
+ State :: #state{}) ->
+ {config() | skip_or_fail(), NewState :: #state{}}.
+pre_end_per_testcase(_TC,Config,State) ->
+ {Config, State}.
+
+-spec post_end_per_testcase(TC :: atom(),
+ Config :: config(),
+ Return :: term(),
+ State :: #state{}) ->
+ {ok | skip_or_fail(), NewState :: #state{}}.
+post_end_per_testcase(_TC,_Config,Return,State) ->
+ {Return, State}.
+
+%% @doc Called after a test case failed.
+-spec on_tc_fail(TC :: init_per_suite | end_per_suite |
+ init_per_group | end_per_group | atom(),
+ Reason :: term(), State :: #state{}) ->
+ NewState :: #state{}.
+on_tc_fail(_TC, _Reason, State) ->
+ State.
+
+%% @doc Called when a test case is skipped.
+-spec on_tc_skip(TC :: end_per_suite | init_per_group | end_per_group | atom(),
+ {tc_auto_skip, {failed, {Mod :: atom(), Function :: atom(),
+ Reason :: term()}}} |
+ {tc_user_skip, {skipped, Reason :: term()}},
+ State :: #state{}) ->
+ NewState :: #state{}.
+on_tc_skip(_TC, _Reason, State) ->
+ State.
+
+%% @doc Called when the scope of the SCB is done.
+-spec terminate(State :: #state{}) ->
+ term().
+terminate(_State) ->
+ ok.
+
+%%% ============================================================================
+%%% Local functions
+%%% ============================================================================
+
+%% Add a nodename to config if it does not exist
+add_node_name(Config, State) ->
+ case proplists:get_value(nodenames, Config) of
+ undefined ->
+ lists:keystore(
+ nodenames, 1, Config,
+ {nodenames,generate_nodenames(State#state.nodenames)});
+ _Else ->
+ Config
+ end.
+
+
+%% Copied from test_server_ctrl.erl
+generate_nodenames(Num) ->
+ {ok,Name} = inet:gethostname(),
+ generate_nodenames2(Num, [Name], []).
+
+generate_nodenames2(0, _Hosts, Acc) ->
+ Acc;
+generate_nodenames2(N, Hosts, Acc) ->
+ Host=lists:nth((N rem (length(Hosts)))+1, Hosts),
+ Name=list_to_atom(temp_nodename("nod",N) ++ "@" ++ Host),
+ generate_nodenames2(N-1, Hosts, [Name|Acc]).
+
+%% We cannot use erlang:unique_integer([positive])
+%% here since this code in run on older test releases as well.
+temp_nodename(Base,I) ->
+ {A,B,C} = os:timestamp(),
+ Nstr = integer_to_list(I),
+ Astr = integer_to_list(A),
+ Bstr = integer_to_list(B),
+ Cstr = integer_to_list(C),
+ Base++Nstr++Astr++Bstr++Cstr.
diff --git a/lib/common_test/test_server/ts_lib.erl b/lib/common_test/test_server/ts_lib.erl
new file mode 100644
index 0000000000..7c3f450194
--- /dev/null
+++ b/lib/common_test/test_server/ts_lib.erl
@@ -0,0 +1,372 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(ts_lib).
+
+-include_lib("kernel/include/file.hrl").
+-include("ts.hrl").
+
+%% Avoid warning for local function error/1 clashing with autoimported BIF.
+-compile({no_auto_import,[error/1]}).
+-export([error/1, var/2, erlang_type/0,
+ erlang_type/1,
+ initial_capital/1,
+ specs/1, suites/2,
+ test_categories/2, specialized_specs/2,
+ subst_file/3, subst/2, print_data/1,
+ make_non_erlang/2,
+ maybe_atom_to_list/1, progress/4,
+ b2s/1
+ ]).
+
+error(Reason) ->
+ throw({error, Reason}).
+
+%% Returns the value for a variable
+
+var(Name, Vars) ->
+ case lists:keysearch(Name, 1, Vars) of
+ {value, {Name, Value}} ->
+ Value;
+ false ->
+ error({bad_installation, {undefined_var, Name, Vars}})
+ end.
+
+%% Returns the level of verbosity (0-X)
+verbosity(Vars) ->
+ % Check for a single verbose option.
+ case lists:member(verbose, Vars) of
+ true ->
+ 1;
+ false ->
+ case lists:keysearch(verbose, 1, Vars) of
+ {value, {verbose, Level}} ->
+ Level;
+ _ ->
+ 0
+ end
+ end.
+
+% Displays output to the console if verbosity is equal or more
+% than Level.
+progress(Vars, Level, Format, Args) ->
+ V=verbosity(Vars),
+ if
+ V>=Level ->
+ io:format(Format, Args);
+ true ->
+ ok
+ end.
+
+%% Returns: {Type, Version} where Type is otp|src
+
+erlang_type() ->
+ erlang_type(code:root_dir()).
+erlang_type(RootDir) ->
+ {_, Version} = init:script_id(),
+ RelDir = filename:join(RootDir, "releases"), % Only in installed
+ case filelib:is_file(RelDir) of
+ true -> {otp,Version}; % installed OTP
+ false -> {srctree,Version} % source code tree
+ end.
+
+%% Upcases the first letter in a string.
+
+initial_capital([C|Rest]) when $a =< C, C =< $z ->
+ [C-$a+$A|Rest];
+initial_capital(String) ->
+ String.
+
+specialized_specs(Dir,PostFix) ->
+ Specs = filelib:wildcard(filename:join([filename:dirname(Dir),
+ "*_test", "*_"++PostFix++".spec"])),
+ sort_tests([begin
+ DirPart = filename:dirname(Name),
+ AppTest = hd(lists:reverse(filename:split(DirPart))),
+ list_to_atom(string:substr(AppTest, 1, length(AppTest)-5))
+ end || Name <- Specs]).
+
+specs(Dir) ->
+ Specs = filelib:wildcard(filename:join([filename:dirname(Dir),
+ "*_test", "*.{dyn,}spec"])),
+ %% Make sure only to include the main spec for each application
+ MainSpecs =
+ lists:flatmap(fun(FullName) ->
+ [Spec,TestDir|_] =
+ lists:reverse(filename:split(FullName)),
+ [_TestSuffix|TDParts] =
+ lists:reverse(string:tokens(TestDir,[$_,$.])),
+ [_SpecSuffix|SParts] =
+ lists:reverse(string:tokens(Spec,[$_,$.])),
+ if TDParts == SParts ->
+ [filename_to_atom(FullName)];
+ true ->
+ []
+ end
+ end, Specs),
+ sort_tests(MainSpecs).
+
+test_categories(Dir, App) ->
+ Specs = filelib:wildcard(filename:join([filename:dirname(Dir),
+ App++"_test", "*.spec"])),
+ lists:flatmap(fun(FullName) ->
+ [Spec,_TestDir|_] =
+ lists:reverse(filename:split(FullName)),
+ case filename:rootname(Spec -- App) of
+ "" ->
+ [];
+ [_Sep | Cat] ->
+ [list_to_atom(Cat)]
+ end
+ end, Specs).
+
+suites(Dir, App) ->
+ Glob=filename:join([filename:dirname(Dir), App++"_test",
+ "*_SUITE.erl"]),
+ Suites=filelib:wildcard(Glob),
+ [filename_to_atom(Name) || Name <- Suites].
+
+filename_to_atom(Name) ->
+ list_to_atom(filename:rootname(filename:basename(Name))).
+
+%% Sorts a list of either log files directories or spec files.
+
+sort_tests(Tests) ->
+ Sorted = lists:usort([{suite_order(filename_to_atom(X)), X} ||
+ X <- Tests]),
+ [X || {_, X} <- Sorted].
+
+%% This defines the order in which tests should be run and be presented
+%% in index files.
+
+suite_order(emulator) -> 0;
+suite_order(test_server) -> 1;
+suite_order(kernel) -> 4;
+suite_order(stdlib) -> 6;
+suite_order(compiler) -> 8;
+suite_order(hipe) -> 9;
+suite_order(erl_interface) -> 12;
+suite_order(jinterface) -> 14;
+suite_order(sasl) -> 16;
+suite_order(tools) -> 18;
+suite_order(runtime_tools) -> 19;
+suite_order(parsetools) -> 20;
+suite_order(debugger) -> 22;
+suite_order(ic) -> 24;
+suite_order(orber) -> 26;
+suite_order(inets) -> 28;
+suite_order(asn1) -> 30;
+suite_order(os_mon) -> 32;
+suite_order(snmp) -> 38;
+suite_order(mnesia) -> 44;
+suite_order(system) -> 999; %% IMPORTANT: system SHOULD always be last!
+suite_order(_) -> 200.
+
+%% Substitute all occurrences of @var@ in the In file, using
+%% the list of variables in Vars, producing the output file Out.
+%% Returns: ok | {error, Reason}
+
+subst_file(In, Out, Vars) ->
+ case file:read_file(In) of
+ {ok, Bin} ->
+ Subst = subst(b2s(Bin), Vars, []),
+ case file:write_file(Out, unicode:characters_to_binary(Subst)) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ {error, {file_write, Reason}}
+ end;
+ Error ->
+ Error
+ end.
+
+subst(String, Vars) ->
+ subst(String, Vars, []).
+
+subst([$@, $_|Rest], Vars, Result) ->
+ subst_var([$_|Rest], Vars, Result, []);
+subst([$@, C|Rest], Vars, Result) when $A =< C, C =< $Z ->
+ subst_var([C|Rest], Vars, Result, []);
+subst([$@, C|Rest], Vars, Result) when $a =< C, C =< $z ->
+ subst_var([C|Rest], Vars, Result, []);
+subst([C|Rest], Vars, Result) ->
+ subst(Rest, Vars, [C|Result]);
+subst([], _Vars, Result) ->
+ lists:reverse(Result).
+
+subst_var([$@|Rest], Vars, Result, VarAcc) ->
+ Key = list_to_atom(lists:reverse(VarAcc)),
+ {Result1,Rest1} = do_subst_var(Key, Rest, Vars, Result, VarAcc),
+ subst(Rest1, Vars, Result1);
+
+subst_var([C|Rest], Vars, Result, VarAcc) ->
+ subst_var(Rest, Vars, Result, [C|VarAcc]);
+subst_var([], Vars, Result, VarAcc) ->
+ subst([], Vars, [VarAcc++[$@|Result]]).
+
+%% handle conditional
+do_subst_var(Cond, Rest, Vars, Result, _VarAcc) when Cond == 'IFEQ' ;
+ Cond == 'IFNEQ' ->
+ {Bool,Comment,Rest1} = do_test(Rest, Vars, Cond),
+ Rest2 = extract_clause(Bool, Rest1),
+ {lists:reverse(Comment, Result),Rest2};
+%% variable substitution
+do_subst_var(Key, Rest, Vars, Result, VarAcc) ->
+ case lists:keysearch(Key, 1, Vars) of
+ {value, {Key, Value}} ->
+ {lists:reverse(Value, Result),Rest};
+ false ->
+ {[$@|VarAcc++[$@|Result]],Rest}
+ end.
+
+%% check arguments in "@IF[N]EQ@ (Arg1, Arg2)" for equality
+do_test(Rest, Vars, Test) ->
+ {Arg1,Rest1} = get_arg(Rest, Vars, $,, []),
+ {Arg2,Rest2} = get_arg(Rest1, Vars, 41, []), % $)
+ Result = case Arg1 of
+ Arg2 when Test == 'IFEQ' -> true;
+ Arg2 when Test == 'IFNEQ' -> false;
+ _ when Test == 'IFNEQ' -> true;
+ _ -> false
+ end,
+ Comment = io_lib:format("# Result of test: ~s (~s, ~s) -> ~w",
+ [atom_to_list(Test),Arg1,Arg2,Result]),
+ {Result,Comment,Rest2}.
+
+%% extract an argument
+get_arg([$(|Rest], Vars, Stop, _) ->
+ get_arg(Rest, Vars, Stop, []);
+get_arg([Stop|Rest], Vars, Stop, Acc) ->
+ Arg = string:strip(lists:reverse(Acc)),
+ Subst = subst(Arg, Vars),
+ {Subst,Rest};
+get_arg([C|Rest], Vars, Stop, Acc) ->
+ get_arg(Rest, Vars, Stop, [C|Acc]).
+
+%% keep only the true or false conditional clause
+extract_clause(true, Rest) ->
+ extract_clause(true, Rest, []);
+extract_clause(false, Rest) ->
+ Rest1 = discard_clause(Rest), % discard true clause
+ extract_clause(false, Rest1, []).
+
+%% true clause buffered, done
+extract_clause(true, [$@,$E,$L,$S,$E,$@|Rest], Acc) ->
+ Rest1 = discard_clause(Rest), % discard false clause
+ lists:reverse(Acc, Rest1);
+%% buffering of false clause starts now
+extract_clause(false, [$@,$E,$L,$S,$E,$@|Rest], _Acc) ->
+ extract_clause(false, Rest, []);
+%% true clause buffered, done
+extract_clause(true, [$@,$E,$N,$D,$I,$F,$@|Rest], Acc) ->
+ lists:reverse(Acc, Rest);
+%% false clause buffered, done
+extract_clause(false, [$@,$E,$N,$D,$I,$F,$@|Rest], Acc) ->
+ lists:reverse(Acc, Rest);
+%% keep buffering
+extract_clause(Bool, [C|Rest], Acc) ->
+ extract_clause(Bool, Rest, [C|Acc]);
+%% parse error
+extract_clause(_, [], Acc) ->
+ lists:reverse(Acc).
+
+discard_clause([$@,$E,$L,$S,$E,$@|Rest]) ->
+ Rest;
+discard_clause([$@,$E,$N,$D,$I,$F,$@|Rest]) ->
+ Rest;
+discard_clause([_C|Rest]) ->
+ discard_clause(Rest);
+discard_clause([]) -> % parse error
+ [].
+
+
+print_data(Port) ->
+ receive
+ {Port, {data, Bytes}} ->
+ io:put_chars(Bytes),
+ print_data(Port);
+ {Port, eof} ->
+ Port ! {self(), close},
+ receive
+ {Port, closed} ->
+ true
+ end,
+ receive
+ {'EXIT', Port, _} ->
+ ok
+ after 1 -> % force context switch
+ ok
+ end
+ end.
+
+maybe_atom_to_list(To_list) when is_list(To_list) ->
+ To_list;
+maybe_atom_to_list(To_list) when is_atom(To_list)->
+ atom_to_list(To_list).
+
+
+%% Configure and run all the Makefiles in the data dir of the suite
+%% in question
+make_non_erlang(DataDir, Variables) ->
+ %% Make the stuff in all_SUITE_data if it exists
+ AllDir = filename:join(DataDir,"../all_SUITE_data"),
+ case filelib:is_dir(AllDir) of
+ true ->
+ make_non_erlang_do(AllDir,Variables);
+ false ->
+ ok
+ end,
+ make_non_erlang_do(DataDir, Variables).
+
+make_non_erlang_do(DataDir, Variables) ->
+ try
+ MakeCommand = proplists:get_value(make_command,Variables),
+
+ FirstMakefile = filename:join(DataDir,"Makefile.first"),
+ case filelib:is_regular(FirstMakefile) of
+ true ->
+ io:format("Making ~p",[FirstMakefile]),
+ ok = ts_make:make(
+ MakeCommand, DataDir, filename:basename(FirstMakefile));
+ false ->
+ ok
+ end,
+
+ MakefileSrc = filename:join(DataDir,"Makefile.src"),
+ MakefileDest = filename:join(DataDir,"Makefile"),
+ case filelib:is_regular(MakefileSrc) of
+ true ->
+ ok = ts_lib:subst_file(MakefileSrc,MakefileDest,Variables),
+ io:format("Making ~p",[MakefileDest]),
+ ok = ts_make:make([{makefile,"Makefile"},{data_dir,DataDir}
+ | Variables]);
+ false ->
+ ok
+ end
+ after
+ timer:sleep(100) %% maybe unnecessary now when we don't do set_cwd anymore
+ end.
+
+b2s(Bin) ->
+ unicode:characters_to_list(Bin,default_encoding()).
+
+default_encoding() ->
+ try epp:default_encoding()
+ catch error:undef -> latin1
+ end.
diff --git a/lib/common_test/test_server/ts_make.erl b/lib/common_test/test_server/ts_make.erl
new file mode 100644
index 0000000000..921edb264a
--- /dev/null
+++ b/lib/common_test/test_server/ts_make.erl
@@ -0,0 +1,114 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(ts_make).
+
+-export([make/1,make/3,unmake/1]).
+
+-include_lib("common_test/include/ct.hrl").
+
+%% Functions to be called from make test cases.
+
+make(Config) when is_list(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ Makefile = proplists:get_value(makefile, Config),
+ Make = proplists:get_value(make_command, Config),
+ case make(Make, DataDir, Makefile) of
+ ok -> ok;
+ {error,Reason} -> exit({make_failed,Reason})
+ end.
+
+unmake(Config) when is_list(Config) ->
+ ok.
+
+%% Runs `make' in the given directory.
+%% Result: ok | {error, Reason}
+
+make(Make, Dir, Makefile) ->
+ {RunFile, RunCmd, Script} = run_make_script(os:type(), Make, Dir, Makefile),
+ case file:write_file(RunFile, Script) of
+ ok ->
+ Log = filename:join(Dir, "make.log"),
+ file:delete(Log),
+ Port = open_port({spawn, RunCmd}, [eof,stream,in,stderr_to_stdout]),
+ case get_port_data(Port, [], false) of
+ "*ok*" ++ _ -> ok;
+ "*error*" ++ _ -> {error, make};
+ Other ->{error,{make,Other}}
+ end;
+ Error -> Error
+ end.
+
+get_port_data(Port, Last0, Complete0) ->
+ receive
+ {Port,{data,Bytes}} ->
+ {Last, Complete} = update_last(Bytes, Last0, Complete0),
+ get_port_data(Port, Last, Complete);
+ {Port, eof} ->
+ Result = update_last(eof, Last0, Complete0),
+ unlink(Port),
+ exit(Port, die),
+ Result
+ end.
+
+update_last([C|Rest], Line, true) ->
+ try
+ %% Utf-8 list to utf-8 binary
+ %% (e.g. we assume utf-8 bytes from port)
+ io:put_chars(list_to_binary(Line))
+ catch
+ error:badarg ->
+ %% io:put_chars/1 badarged
+ %% this likely means we had unicode code points
+ %% in our bytes buffer (e.g warning from gcc with åäö)
+ io:put_chars(unicode:characters_to_binary(Line))
+ end,
+ io:nl(),
+ update_last([C|Rest], [], false);
+update_last([$\r|Rest], Result, Complete) ->
+ update_last(Rest, Result, Complete);
+update_last([$\n|Rest], Result, _Complete) ->
+ update_last(Rest, lists:reverse(Result), true);
+update_last([C|Rest], Result, Complete) ->
+ update_last(Rest, [C|Result], Complete);
+update_last([], Result, Complete) ->
+ {Result, Complete};
+update_last(eof, Result, _) ->
+ unicode:characters_to_list(list_to_binary(Result)).
+
+run_make_script({win32, _}, Make, Dir, Makefile) ->
+ {"run_make.bat",
+ ".\\run_make",
+ ["@echo off\r\n",
+ "cd \"", filename:nativename(Dir), "\"\r\n",
+ Make, " -f ", Makefile, " \r\n",
+ "if errorlevel 1 echo *error*\r\n",
+ "if not errorlevel 1 echo *ok*\r\n"]};
+run_make_script({unix, _}, Make, Dir, Makefile) ->
+ {"run_make",
+ "/bin/sh ./run_make",
+ ["#!/bin/sh\n",
+ "cd \"", Dir, "\"\n",
+ Make, " -f ", Makefile, " 2>&1\n",
+ "case $? in\n",
+ " 0) echo '*ok*';;\n",
+ " *) echo '*error*';;\n",
+ "esac\n"]};
+run_make_script(_Other, _Make, _Dir, _Makefile) ->
+ exit(dont_know_how_to_make_script_on_this_platform).
diff --git a/lib/common_test/test_server/ts_run.erl b/lib/common_test/test_server/ts_run.erl
new file mode 100644
index 0000000000..188094921d
--- /dev/null
+++ b/lib/common_test/test_server/ts_run.erl
@@ -0,0 +1,455 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%% Purpose : Supervises running of test cases.
+
+-module(ts_run).
+
+-export([run/4,ct_run_test/2]).
+
+-define(DEFAULT_MAKE_TIMETRAP_MINUTES, 60).
+-define(DEFAULT_UNMAKE_TIMETRAP_MINUTES, 15).
+
+-include("ts.hrl").
+
+-import(lists, [member/2,filter/2]).
+
+-record(state,
+ {file, % File given.
+ mod, % Module to run.
+ test_server_args, % Arguments to test server.
+ command, % Command to run.
+ test_dir, % Directory for test suite.
+ makefiles, % List of all makefiles.
+ makefile, % Current makefile.
+ batch, % Are we running in batch mode?
+ data_wc, % Wildcard for data dirs.
+ topcase, % Top case specification.
+ all % Set if we have all_SUITE_data
+ }).
+
+-define(tracefile,"traceinfo").
+
+%% Options is a slightly modified version of the options given to
+%% ts:run. Vars0 are from the variables file.
+run(File, Args0, Options, Vars0) ->
+ Vars=
+ case lists:keysearch(vars, 1, Options) of
+ {value, {vars, Vars1}} ->
+ Vars1++Vars0;
+ _ ->
+ Vars0
+ end,
+ {Batch,Runner} =
+ case {member(interactive, Options), member(batch, Options)} of
+ {false, true} ->
+ {true, fun run_batch/3};
+ _ ->
+ {false, fun run_interactive/3}
+ end,
+ Hooks = [fun init_state/3,
+ fun run_preinits/3,
+ fun make_command/3,
+ Runner],
+ Args = make_common_test_args(Args0,Options,Vars),
+ St = #state{file=File,test_server_args=Args,batch=Batch},
+ R = execute(Hooks, Vars, [], St),
+ case R of
+ {ok,_,_,_} -> ok;
+ Error -> Error
+ end.
+
+execute([Hook|Rest], Vars0, Spec0, St0) ->
+ case Hook(Vars0, Spec0, St0) of
+ ok ->
+ execute(Rest, Vars0, Spec0, St0);
+ {ok, Vars, Spec, St} ->
+ execute(Rest, Vars, Spec, St);
+ Error ->
+ Error
+ end;
+execute([], Vars, Spec, St) ->
+ {ok, Vars, Spec, St}.
+
+%% Wrapper to run tests using ct:run_test/1 and handle any errors.
+
+ct_run_test(Dir, CommonTestArgs) ->
+ try
+ ok = file:set_cwd(Dir),
+ case ct:run_test(CommonTestArgs) of
+ {_,_,_} ->
+ ok;
+ {error,Error} ->
+ io:format("ERROR: ~P\n", [Error,20]);
+ Other ->
+ io:format("~P\n", [Other,20])
+ end
+ catch
+ _:Crash ->
+ io:format("CRASH: ~P\n", [Crash,20])
+ end.
+
+%%
+%% Deletes File from Files when File is on the form .../<SUITE>_data/<file>
+%% when all of <SUITE> has been skipped in Spec, i.e. there
+%% exists a {skip, {<SUITE>, _}} tuple in Spec.
+%%
+del_skipped_suite_data_dir(Files, Spec) ->
+ SkipDirNames = lists:foldl(fun ({skip, {SS, _C}}, SSs) ->
+ [atom_to_list(SS) ++ "_data" | SSs];
+ (_, SSs) ->
+ SSs
+ end,
+ [],
+ Spec),
+ filter(fun (File) ->
+ not member(filename:basename(filename:dirname(File)),
+ SkipDirNames)
+ end,
+ Files).
+
+%% Initialize our internal state.
+
+init_state(Vars, [], St0) ->
+ {FileBase,Wc0,Mod} =
+ case St0#state.file of
+ {Fil,Mod0} -> {Fil, atom_to_list(Mod0) ++ "*_data",Mod0};
+ Fil -> {Fil,"*_SUITE_data",[]}
+ end,
+ {ok,Cwd} = file:get_cwd(),
+ TestDir = filename:join(filename:dirname(Cwd), FileBase++"_test"),
+ case filelib:is_dir(TestDir) of
+ true ->
+ Wc = filename:join(TestDir, Wc0),
+ {ok,Vars,[],St0#state{file=FileBase,mod=Mod,
+ test_dir=TestDir,data_wc=Wc}};
+ false ->
+ {error,{no_test_directory,TestDir}}
+ end.
+
+%% Run any "Makefile.first" files first.
+%% XXX We should fake a failing test case if the make fails.
+
+run_preinits(Vars, Spec, St) ->
+ Wc = filename:join(St#state.data_wc, "Makefile.first"),
+ run_pre_makefiles(del_skipped_suite_data_dir(filelib:wildcard(Wc), Spec),
+ Vars, Spec, St),
+ {ok,Vars,Spec,St}.
+
+run_pre_makefiles([Makefile|Ms], Vars0, Spec0, St0) ->
+ Hooks = [fun run_pre_makefile/3],
+ case execute(Hooks, Vars0, Spec0, St0#state{makefile=Makefile}) of
+ {error,_Reason}=Error -> Error;
+ {ok,Vars,Spec,St} -> run_pre_makefiles(Ms, Vars, Spec, St)
+ end;
+run_pre_makefiles([], Vars, Spec, St) -> {ok,Vars,Spec,St}.
+
+run_pre_makefile(Vars, Spec, St) ->
+ Makefile = St#state.makefile,
+ Shortname = filename:basename(Makefile),
+ DataDir = filename:dirname(Makefile),
+ Make = ts_lib:var(make_command, Vars),
+ case ts_make:make(Make,DataDir, Shortname) of
+ ok -> {ok,Vars,Spec,St};
+ {error,_Reason}=Error -> Error
+ end.
+
+get_config_files() ->
+ TSConfig = "ts.config",
+ [TSConfig | case os:type() of
+ {unix,_} -> ["ts.unix.config"];
+ {win32,_} -> ["ts.win32.config"];
+ _ -> []
+ end].
+
+%% Makes the command to start up the Erlang node to run the tests.
+
+backslashify([$\\, $" | T]) ->
+ [$\\, $" | backslashify(T)];
+backslashify([$" | T]) ->
+ [$\\, $" | backslashify(T)];
+backslashify([H | T]) ->
+ [H | backslashify(T)];
+backslashify([]) ->
+ [].
+
+make_command(Vars, Spec, State) ->
+ {ok,Cwd} = file:get_cwd(),
+ TestDir = State#state.test_dir,
+ TestPath = filename:nativename(TestDir),
+ Erl = case os:getenv("TS_RUN_VALGRIND") of
+ false ->
+ atom_to_list(lib:progname());
+ _ ->
+ case State#state.file of
+ Dir when is_list(Dir) ->
+ os:putenv("VALGRIND_LOGFILE_PREFIX", Dir++"-");
+ _ ->
+ ok
+ end,
+ "cerl -valgrind" ++
+ case erlang:system_info(smp_support) of
+ true -> " -smp";
+ false -> ""
+ end
+ end,
+ Naming =
+ case ts_lib:var(longnames, Vars) of
+ true ->
+ " -name ";
+ false ->
+ " -sname "
+ end,
+ ExtraArgs =
+ case lists:keysearch(erl_start_args,1,Vars) of
+ {value,{erl_start_args,Args}} -> Args;
+ false -> ""
+ end,
+ CrashFile = filename:join(Cwd,State#state.file ++ "_erl_crash.dump"),
+ case filelib:is_file(CrashFile) of
+ true ->
+ io:format("ts_run: Deleting dump: ~ts\n",[CrashFile]),
+ file:delete(CrashFile);
+ false ->
+ ok
+ end,
+
+ %% If Common Test specific variables are needed, add them here
+ %% on form: "{key1,value1}" "{key2,value2}" ...
+ NetDir = ts_lib:var(ts_net_dir, Vars),
+ TestVars = [ "\"{net_dir,\\\"",NetDir,"\\\"}\"" ],
+
+ %% NOTE: Do not use ' in these commands as it wont work on windows
+ Cmd = [Erl, Naming, "test_server"
+ " -rsh ", ts_lib:var(rsh_name, Vars),
+ " -env PATH \"",
+ backslashify(lists:flatten([TestPath, path_separator(),
+ remove_path_spaces()])),
+ "\"",
+ " -env ERL_CRASH_DUMP ", CrashFile,
+ %% uncomment the line below to disable exception formatting
+ %% " -test_server_format_exception false",
+ " -boot start_sasl -sasl errlog_type error",
+ " -pz \"",Cwd,"\"",
+ " -ct_test_vars ",TestVars,
+ " -eval \"ts_run:ct_run_test(\\\"",TestDir,"\\\", ",
+ backslashify(lists:flatten(State#state.test_server_args)),")\""
+ " ",
+ ExtraArgs],
+ {ok, Vars, Spec, State#state{command=lists:flatten(Cmd)}}.
+
+
+run_batch(Vars, _Spec, State) ->
+ process_flag(trap_exit, true),
+ Command = State#state.command ++ " -noinput -s erlang halt",
+ ts_lib:progress(Vars, 1, "Command: ~ts~n", [Command]),
+ io:format(user, "Command: ~ts~n",[Command]),
+ Port = open_port({spawn, Command}, [stream, in, eof]),
+ Timeout = 30000 * case os:getenv("TS_RUN_VALGRIND") of
+ false -> 1;
+ _ -> 100
+ end,
+ tricky_print_data(Port, Timeout).
+
+tricky_print_data(Port, Timeout) ->
+ receive
+ {Port, {data, Bytes}} ->
+ io:put_chars(Bytes),
+ tricky_print_data(Port, Timeout);
+ {Port, eof} ->
+ Port ! {self(), close},
+ receive
+ {Port, closed} ->
+ true
+ end,
+ receive
+ {'EXIT', Port, _} ->
+ ok
+ after 1 -> % force context switch
+ ok
+ end
+ after Timeout ->
+ case erl_epmd:names() of
+ {ok,Names} ->
+ case is_testnode_dead(Names) of
+ true ->
+ io:put_chars("WARNING: No EOF, but "
+ "test_server node is down!\n");
+ false ->
+ tricky_print_data(Port, Timeout)
+ end;
+ _ ->
+ tricky_print_data(Port, Timeout)
+ end
+ end.
+
+is_testnode_dead([]) -> true;
+is_testnode_dead([{"test_server",_}|_]) -> false;
+is_testnode_dead([_|T]) -> is_testnode_dead(T).
+
+run_interactive(Vars, _Spec, State) ->
+ Command = State#state.command,
+ ts_lib:progress(Vars, 1, "Command: ~s~n", [Command]),
+ case ts_lib:var(os, Vars) of
+ "Windows 95" ->
+ %% Windows 95 strikes again! We must redirect standard
+ %% input and output for the `start' command, to force
+ %% standard input and output to the Erlang shell to be
+ %% connected to the newly started console.
+ %% Without these redirections, the Erlang shell would be
+ %% connected to the pipes provided by the port program
+ %% and there would be an inactive console window.
+ os:cmd("start < nul > nul w" ++ Command),
+ ok;
+ "Windows 98" ->
+ os:cmd("start < nul > nul w" ++ Command),
+ ok;
+ "Windows"++_ ->
+ os:cmd("start w" ++ Command),
+ ok;
+ _Other ->
+ %% Assuming ts and controller always run on solaris
+ start_xterm(Command)
+ end.
+
+start_xterm(Command) ->
+ case os:find_executable("xterm") of
+ false ->
+ io:format("The `xterm' program was not found.\n"),
+ {error, no_xterm};
+ _Xterm ->
+ case os:getenv("DISPLAY") of
+ false ->
+ io:format("DISPLAY is not set.\n"),
+ {error, display_not_set};
+ Display ->
+ io:format("Starting xterm (DISPLAY=~s)...\n",
+ [Display]),
+ os:cmd("xterm -sl 10000 -e " ++ Command ++ "&"),
+ ok
+ end
+ end.
+
+path_separator() ->
+ case os:type() of
+ {win32, _} -> ";";
+ {unix, _} -> ":"
+ end.
+
+
+make_common_test_args(Args0, Options0, _Vars) ->
+ Trace =
+ case lists:keysearch(trace,1,Options0) of
+ {value,{trace,TI}} when is_tuple(TI); is_tuple(hd(TI)) ->
+ ok = file:write_file(?tracefile,io_lib:format("~p.~n",[TI])),
+ [{ct_trace,?tracefile}];
+ {value,{trace,TIFile}} when is_atom(TIFile) ->
+ [{ct_trace,atom_to_list(TIFile)}];
+ {value,{trace,TIFile}} ->
+ [{ct_trace,TIFile}];
+ false ->
+ []
+ end,
+ Cover =
+ case lists:keysearch(cover,1,Options0) of
+ {value,{cover, App, none, _Analyse}} ->
+ io:format("No cover file found for ~p~n",[App]),
+ [];
+ {value,{cover,_App,File,_Analyse}} ->
+ [{cover,to_list(File)},{cover_stop,false}];
+ false ->
+ []
+ end,
+
+ Logdir = case lists:keysearch(logdir, 1, Options0) of
+ {value,{logdir, _}} ->
+ [];
+ false ->
+ [{logdir,"../test_server"}]
+ end,
+
+ TimeTrap = [{scale_timetraps, true}],
+
+ {ConfigPath,
+ Options} = case {os:getenv("TEST_CONFIG_PATH"),
+ lists:keysearch(config, 1, Options0)} of
+ {_,{value, {config, Path}}} ->
+ {Path,lists:keydelete(config, 1, Options0)};
+ {false,false} ->
+ {"../test_server",Options0};
+ {Path,_} ->
+ {Path,Options0}
+ end,
+ ConfigFiles = [{config,[filename:join(ConfigPath,File)
+ || File <- get_config_files()]}],
+ io_lib:format("~100000p",[[{abort_if_missing_suites,true} |
+ Args0++Trace++Cover++Logdir++
+ ConfigFiles++Options++TimeTrap]]).
+
+to_list(X) when is_atom(X) ->
+ atom_to_list(X);
+to_list(X) when is_list(X) ->
+ X.
+
+%%
+%% Paths and spaces handling for w2k and XP
+%%
+remove_path_spaces() ->
+ Path = os:getenv("PATH"),
+ case os:type() of
+ {win32,nt} ->
+ remove_path_spaces(Path);
+ _ ->
+ Path
+ end.
+
+remove_path_spaces(Path) ->
+ SPath = split_path(Path),
+ [NSHead|NSTail] = lists:map(fun(X) -> filename:nativename(
+ filename:join(
+ translate_path(split_one(X))))
+ end,
+ SPath),
+ NSHead ++ lists:flatten([[$;|X] || X <- NSTail]).
+
+translate_path(PList) ->
+ %io:format("translate_path([~p|~p]~n",[Base,PList]),
+ translate_path(PList,[]).
+
+
+translate_path([],_) ->
+ [];
+translate_path([PC | T],BaseList) ->
+ FullPath = filename:nativename(filename:join(BaseList ++ [PC])),
+ NewPC = case catch file:altname(FullPath) of
+ {ok,X} ->
+ X;
+ _ ->
+ PC
+ end,
+ %io:format("NewPC:~s, DirList:~p~n",[NewPC,DirList]),
+ NewBase = BaseList ++ [NewPC],
+ [NewPC | translate_path(T,NewBase)].
+
+split_one(Path) ->
+ filename:split(Path).
+
+split_path(Path) ->
+ string:tokens(Path,";").
diff --git a/lib/common_test/vsn.mk b/lib/common_test/vsn.mk
index 34ed657e01..bcd31293fb 100644
--- a/lib/common_test/vsn.mk
+++ b/lib/common_test/vsn.mk
@@ -1 +1 @@
-COMMON_TEST_VSN = 1.11.2
+COMMON_TEST_VSN = 1.12