aboutsummaryrefslogtreecommitdiffstats
path: root/lib/test_server/doc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/test_server/doc')
-rw-r--r--lib/test_server/doc/html/.gitignore0
-rw-r--r--lib/test_server/doc/man3/.gitignore0
-rw-r--r--lib/test_server/doc/man6/.gitignore0
-rw-r--r--lib/test_server/doc/pdf/.gitignore0
-rw-r--r--lib/test_server/doc/src/Makefile138
-rw-r--r--lib/test_server/doc/src/basics_chapter.xml216
-rw-r--r--lib/test_server/doc/src/book.xml49
-rw-r--r--lib/test_server/doc/src/example_chapter.xml150
-rw-r--r--lib/test_server/doc/src/fascicules.xml18
-rw-r--r--lib/test_server/doc/src/make.dep24
-rw-r--r--lib/test_server/doc/src/notes.xml346
-rw-r--r--lib/test_server/doc/src/notes_history.xml112
-rw-r--r--lib/test_server/doc/src/part.xml45
-rw-r--r--lib/test_server/doc/src/part_notes.xml40
-rw-r--r--lib/test_server/doc/src/part_notes_history.xml38
-rw-r--r--lib/test_server/doc/src/ref_man.xml43
-rw-r--r--lib/test_server/doc/src/run_test_chapter.xml49
-rw-r--r--lib/test_server/doc/src/test_server.xml840
-rw-r--r--lib/test_server/doc/src/test_server_app.xml75
-rw-r--r--lib/test_server/doc/src/test_server_ctrl.xml771
-rw-r--r--lib/test_server/doc/src/test_spec_chapter.xml375
-rw-r--r--lib/test_server/doc/src/ts.xml592
-rw-r--r--lib/test_server/doc/src/why_test_chapter.xml140
-rw-r--r--lib/test_server/doc/src/write_framework_chapter.xml166
-rw-r--r--lib/test_server/doc/src/write_test_chapter.xml228
25 files changed, 4455 insertions, 0 deletions
diff --git a/lib/test_server/doc/html/.gitignore b/lib/test_server/doc/html/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/test_server/doc/html/.gitignore
diff --git a/lib/test_server/doc/man3/.gitignore b/lib/test_server/doc/man3/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/test_server/doc/man3/.gitignore
diff --git a/lib/test_server/doc/man6/.gitignore b/lib/test_server/doc/man6/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/test_server/doc/man6/.gitignore
diff --git a/lib/test_server/doc/pdf/.gitignore b/lib/test_server/doc/pdf/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/test_server/doc/pdf/.gitignore
diff --git a/lib/test_server/doc/src/Makefile b/lib/test_server/doc/src/Makefile
new file mode 100644
index 0000000000..e3c1b8ce92
--- /dev/null
+++ b/lib/test_server/doc/src/Makefile
@@ -0,0 +1,138 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2002-2009. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+include $(ERL_TOP)/make/target.mk
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+
+# ----------------------------------------------------
+# Application version
+# ----------------------------------------------------
+include ../../vsn.mk
+VSN=$(TEST_SERVER_VSN)
+APPLICATION=test_server
+
+# ----------------------------------------------------
+# Release directory specification
+# ----------------------------------------------------
+RELSYSDIR = $(RELEASE_PATH)/lib/$(APPLICATION)-$(VSN)
+
+# ----------------------------------------------------
+# Target Specs
+# ----------------------------------------------------
+XML_APPLICATION_FILES = ref_man.xml
+XML_REF3_FILES = \
+ test_server_ctrl.xml \
+ test_server.xml
+XML_REF3_INTERNAL = \
+ ts.xml
+XML_REF6_FILES = test_server_app.xml
+
+XML_PART_FILES = \
+ part.xml \
+ part_notes.xml \
+ part_notes_history.xml
+
+XML_CHAPTER_FILES = \
+ basics_chapter.xml \
+ run_test_chapter.xml \
+ write_test_chapter.xml \
+ test_spec_chapter.xml \
+ example_chapter.xml \
+ write_framework_chapter.xml \
+ notes.xml \
+ notes_history.xml
+
+BOOK_FILES = book.xml
+
+GIF_FILES =
+
+# ----------------------------------------------------
+
+HTML_FILES = $(XML_APPLICATION_FILES:%.xml=$(HTMLDIR)/%.html) \
+ $(XML_PART_FILES:%.xml=$(HTMLDIR)/%.html)
+
+HTML_INTERNAL = $(XML_REF3_INTERNAL:%.xml=$(HTMLDIR)/%.html)
+
+INFO_FILE = ../../info
+
+MAN3_FILES = $(XML_REF3_FILES:%.xml=$(MAN3DIR)/%.3)
+MAN3_INTERNAL = $(XML_REF3_INTERNAL:%.xml=$(MAN3DIR)/%.3)
+MAN6_FILES = $(XML_REF6_FILES:%_app.xml=$(MAN6DIR)/%.6)
+
+HTML_REF_MAN_FILE = $(HTMLDIR)/index.html
+
+TOP_PDF_FILE = $(PDFDIR)/test_server-$(VSN).pdf
+
+# ----------------------------------------------------
+# FLAGS
+# ----------------------------------------------------
+XML_FLAGS +=
+DVIPS_FLAGS +=
+
+# ----------------------------------------------------
+# Targets
+# ----------------------------------------------------
+$(HTMLDIR)/%.gif: %.gif
+ $(INSTALL_DATA) $< $@
+
+docs: pdf html man
+
+pdf: $(TOP_PDF_FILE)
+
+html: gifs $(HTML_REF_MAN_FILE)
+
+man: $(MAN3_FILES) $(MAN3_INTERNAL) $(MAN6_FILES)
+
+gifs: $(GIF_FILES:%=$(HTMLDIR)/%)
+
+debug opt:
+
+clean clean_docs:
+ rm -rf $(HTMLDIR)/*
+ rm -f $(MAN3DIR)/*
+ rm -f $(MAN6DIR)/*
+ rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo)
+ rm -f errs core *~
+
+# ----------------------------------------------------
+# Release Target
+# ----------------------------------------------------
+include $(ERL_TOP)/make/otp_release_targets.mk
+
+release_docs_spec: docs
+ $(INSTALL_DIR) $(RELSYSDIR)/doc/pdf
+ $(INSTALL_DATA) $(TOP_PDF_FILE) $(RELSYSDIR)/doc/pdf
+ $(INSTALL_DIR) $(RELSYSDIR)/doc/html
+ $(INSTALL_DATA) $(HTMLDIR)/* \
+ $(RELSYSDIR)/doc/html
+ $(INSTALL_DATA) $(INFO_FILE) $(RELSYSDIR)
+ $(INSTALL_DIR) $(RELEASE_PATH)/man/man3
+ $(INSTALL_DATA) $(MAN3_FILES) $(RELEASE_PATH)/man/man3
+ $(INSTALL_DIR) $(RELEASE_PATH)/man/man6
+ $(INSTALL_DATA) $(MAN6_FILES) $(RELEASE_PATH)/man/man6
+
+release_spec:
+
+release_tests_spec:
+
+# ----------------------------------------------------
+# Include dependency
+# ----------------------------------------------------
+
+include make.dep
diff --git a/lib/test_server/doc/src/basics_chapter.xml b/lib/test_server/doc/src/basics_chapter.xml
new file mode 100644
index 0000000000..a96cc88075
--- /dev/null
+++ b/lib/test_server/doc/src/basics_chapter.xml
@@ -0,0 +1,216 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Server Basics</title>
+ <prepared>Siri Hansen</prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>basics_chapter.xml</file>
+ </header>
+
+ <section>
+ <title>Introduction</title>
+ <p><em>Test Server</em> is a portable test tool for automated
+ testing of Erlang programs and OTP applications. It provides an
+ interface for running test programs directly with Test Server
+ as well as an interface for integrating Test Server
+ with a framework application. The latter makes it possible to use
+ Test Server as the engine of a higher level test tool
+ application.</p>
+
+ <p>It is strongly recommended that Test Server be used from inside
+ a framework application, rather than interfaced directly for
+ running test programs. Test Server can be pretty difficult to use
+ since it's a very general and quite extensive and complex
+ application. Furthermore, the <c>test_server_ctrl</c> functions
+ are not meant to be used from within the actual test programs. The
+ framework should handle communication with Test Server and deal
+ with the more complex aspects of this interaction automatically so
+ that a higher level interface may be provided for the tester. For
+ test tool usage to be productive, a simpler, more intuitive and
+ (if required) more specific interface is required than what Test Server
+ can provide.</p>
+
+ <p>OTP delivers a general purpose framework for Test Server, called
+ <em>Common Test</em>. This application is a tool well suited for
+ automated black box testing of target systems of <em>any kind</em>
+ (not necessarily implemented in Erlang). Common Test is also a very
+ useful tool for white box testing of Erlang programs and OTP
+ applications. Unless a more specific functionality and/or user
+ interface is required (in which case you might need to implement
+ your own framework), Common Test should do the job for
+ you. Please read the Common Test User's Guide and reference manual
+ for more information.</p>
+
+ <p>Under normal circumstances, knowledge about the Test Server
+ application is not required for using the Common Test framework.
+ However, if you want to use Test Server without a framework,
+ or learn how to integrate it with your own framework, please read on...
+ </p>
+ </section>
+ <section>
+ <title>Getting started</title>
+ <p>Testing when using Test Server is done by running test
+ suites. A test suite is a number of test cases, where each test
+ case tests one or more things. The test case is the smallest unit
+ that the test server deals with. One or more test cases are
+ grouped together into one ordinary Erlang module, which is called
+ a test suite. Several test suite modules can be grouped together
+ in special test specification files representing whole application
+ and/or system test "jobs".
+ </p>
+ <p>The test suite Erlang module must follow a certain interface,
+ which is specified by Test Server. See the section on writing
+ test suites for details about this.
+ </p>
+ <p>Each test case is considered a success if it returns to the
+ caller, no matter what the returned value is. An exception to this
+ is the return value <c>{skip, Reason}</c> which indicates that the
+ test case is skipped. A failure is specified as a crash, no matter
+ what the crash reason is.
+ </p>
+ <p>As a test suite runs, all information (including output to
+ stdout) is recorded in several different log files. A minimum of
+ information is displayed to the user console. This only include
+ start and stop information, plus a note for each failed test case.
+ </p>
+ <p>The result from each test case is recorded in an HTML log file
+ which is created for each test run. Every test case gets one row
+ in a table presenting total time, whether the case was successful
+ or not, if it was skipped, and possibly also a comment. The HTML
+ file has links to each test case's logfile, which may be viewed
+ from e.g. Netscape or any other HTML capable browser.
+ </p>
+ <p>The Test Server consists of three parts:
+ </p>
+ <list type="bulleted">
+ <item>The part that executes the test suites on target and
+ provides support for the test suite author is called
+ <c>test_server</c>. This is described in the chapter about
+ writing test cases in this user's guide, and in the reference
+ manual for the <c>test_server</c> module.</item>
+ <item>The controlling part, which provides the low level
+ operator interface, starts and stops the target node (if remote
+ target) and slave nodes and writes log files, is called
+ <c>test_server_ctrl</c>. The Test Server Controller should not
+ be used directly when running tests. Instead a framework built
+ on top of it should be used. More information
+ about how to write your own framework can be found
+ in this user's guide and in the reference manual for the
+ <c>test_server_ctrl</c> module.</item>
+ </list>
+ </section>
+
+ <section>
+ <title>Definition of terms</title>
+ <taglist>
+ <tag><em>conf(iguration) case</em></tag>
+ <item>This is a group of test cases which need some specific
+ configuration. A conf case contains an initiation function which
+ sets up a specific configuration, one or more test cases using
+ this configuration, and a cleanup function which restores the
+ configuration. A conf case is specified in a test specification
+ either like this:<c>{conf,InitFunc,ListOfCases,CleanupFunc}</c>,
+ or this: <c>{conf,Properties,InitFunc,ListOfCases,CleanupFunc}</c>
+ </item>
+ <tag><em>datadir</em></tag>
+ <item>Data directory for a test suite. This directory contains
+ any files used by the test suite, e.g. additional erlang
+ modules, c code or data files. If the data directory contains
+ code which must be compiled before the test suite is run, it
+ should also contain a makefile source called Makefile.src
+ defining how to compile.
+ </item>
+ <tag><em>documentation clause</em></tag>
+ <item>One of the function clauses in a test case. This clause
+ shall return a list of strings describing what the test case
+ tests.
+ </item>
+ <tag><em>execution clause</em></tag>
+ <item>One of the function clauses in a test case. This clause
+ implements the actual test case, i.e. calls the functions that
+ shall be tested and checks results. The clause shall crash if it
+ fails.
+ </item>
+ <tag><em>major log file</em></tag>
+ <item>This is the test suites log file.
+ </item>
+ <tag><em>Makefile.src</em></tag>
+ <item>This file is used by the test server framework to generate
+ a makefile for a datadir. It contains some special characters
+ which are replaced according to the platform currently tested.
+ </item>
+ <tag><em>minor log file</em></tag>
+ <item>This is a separate log file for each test case.
+ </item>
+ <tag><em>privdir</em></tag>
+ <item>Private directory for a test suite. This directory should
+ be used when the test suite needs to write to files.
+ </item>
+ <tag><em>skip case</em></tag>
+ <item>A test case which shall be skipped.
+ </item>
+ <tag><em>specification clause</em></tag>
+ <item>One of the function clauses in a test case. This clause
+ shall return an empty list, a test specification or
+ <c>{skip,Reason}</c>. If an empty list is returned, it means
+ that the test case shall be executed, and so it must also have
+ an execution clause. Note that the specification clause is
+ always executed on the controller node, i.e. not on the target
+ node.
+ </item>
+ <tag><em>test case</em></tag>
+ <item>A single test included in a test suite. Typically it tests
+ one function in a module or application. A test case is
+ implemented as a function in a test suite module. The function
+ can have three clauses, the documentation-, specification- and
+ execution clause.
+ </item>
+ <tag><em>test specification</em></tag>
+ <item>A specification of which test suites and test cases to
+ run. There can be test specifications on three different levels
+ in a test. The top level is a test specification file which
+ roughly specifies what to test for a whole application. Then
+ there is a test specification for each test suite returned from
+ the <c>all(suite)</c> function in the suite. And there can also
+ be a test specification returned from the specification clause
+ of a test case.
+ </item>
+ <tag><em>test specification file</em></tag>
+ <item>This is a text file containing the test specification for
+ an application. The file has the extension ".spec" or
+ ".spec.Platform", where Platform is e.g. "vxworks".
+ </item>
+ <tag><em>test suite</em></tag>
+ <item>An erlang module containing a collection of test cases for
+ a specific application or module.
+ </item>
+ <tag><em>topcase</em></tag>
+ <item>The first "command" in a test specification file. This
+ command contains the test specification, like this:
+ <c>{topcase,TestSpecification}</c></item>
+ </taglist>
+ </section>
+</chapter>
+
diff --git a/lib/test_server/doc/src/book.xml b/lib/test_server/doc/src/book.xml
new file mode 100644
index 0000000000..960ce48cf7
--- /dev/null
+++ b/lib/test_server/doc/src/book.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE book SYSTEM "book.dtd">
+
+<book xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header titlestyle="normal">
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Server</title>
+ <prepared>Siri Hansen</prepared>
+ <docno></docno>
+ <date>2002-07-11</date>
+ <rev></rev>
+ <file>book.xml</file>
+ </header>
+ <insidecover>
+ </insidecover>
+ <pagetext>Test Server</pagetext>
+ <preamble>
+ <contents level="2"></contents>
+ </preamble>
+ <parts lift="no">
+ <xi:include href="part.xml"/>
+ </parts>
+ <applications>
+ <xi:include href="ref_man.xml"/>
+ </applications>
+ <releasenotes>
+ <xi:include href="notes.xml"/>
+ </releasenotes>
+ <listofterms></listofterms>
+ <index></index>
+</book>
+
diff --git a/lib/test_server/doc/src/example_chapter.xml b/lib/test_server/doc/src/example_chapter.xml
new file mode 100644
index 0000000000..8a06526528
--- /dev/null
+++ b/lib/test_server/doc/src/example_chapter.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Examples</title>
+ <prepared>Siri Hansen</prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>example_chapter.xml</file>
+ </header>
+
+ <section>
+ <title>Test suite</title>
+ <code type="none">
+-module(my_SUITE).
+
+-export([all/1,
+ not_started/1, not_started_func1/1, not_started_func2/1,
+ start/1, stop/1,
+ func1/1, func2/1
+ ]).
+
+-export([init_per_testcase/2, end_per_testcase/2]).
+
+-include("test_server.hrl").
+
+-define(default_timeout, ?t:minutes(1)).
+
+init_per_testcase(_Case, Config) ->
+ ?line Dog=?t:timetrap(?default_timeout),
+ [{watchdog, Dog}|Config].
+end_per_testcase(_Case, Config) ->
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog),
+ ok.
+
+all(suite) ->
+ %% Test specification on test suite level
+ [not_started,
+ {conf, start, [func1, func2], stop}].
+
+not_started(suite) ->
+ %% Test specification on test case level
+ [not_started_func1, not_started_func2];
+not_started(doc) ->
+ ["Testing all functions when application is not started"].
+%% No execution clause unless the specification clause returns [].
+
+
+not_started_func1(suite) ->
+ [];
+not_started_func1(doc) ->
+ ["Testing function 1 when application is not started"].
+not_started_func1(Config) when list(Config) ->
+ ?line {error, not_started} = myapp:func1(dummy_ref,1),
+ ?line {error, not_started} = myapp:func1(dummy_ref,2),
+ ok.
+
+not_started_func2(suite) ->
+ [];
+not_started_func2(doc) ->
+ ["Testing function 2 when application is not started"].
+not_started_func2(Config) when list(Config) ->
+ ?line {error, not_started} = myapp:func2(dummy_ref,1),
+ ?line {error, not_started} = myapp:func2(dummy_ref,2),
+ ok.
+
+
+%% No specification clause needed for an init function in a conf case!!!
+start(doc) ->
+ ["Testing start of my application."];
+start(Config) when list(Config) ->
+ ?line Ref = myapp:start(),
+ case erlang:whereis(my_main_process) of
+ Pid when pid(Pid) ->
+ [{myapp_ref,Ref}|Config];
+ undefined ->
+ %% Since this is the init function in a conf case, the rest of the
+ %% cases in the conf case will be skipped if this case fails.
+ ?t:fail("my_main_process did not start")
+ end.
+
+func1(suite) ->
+ [];
+func1(doc) ->
+ ["Test that func1 returns ok when argument is 1 and error if argument is 2"];
+func1(Config) when list(Config) ->
+ ?line Ref = ?config(myapp_ref,Config),
+ ?line ok = myapp:func1(Ref,1),
+ ?line error = myapp:func1(Ref,2),
+ ok.
+
+func2(suite) ->
+ [];
+func2(doc) ->
+ ["Test that func1 returns ok when argument is 3 and error if argument is 4"];
+func2(Config) when list(Config) ->
+ ?line Ref = ?config(myapp_ref,Config),
+ ?line ok = myapp:func2(Ref,3),
+ ?line error = myapp:func2(Ref,4),
+ ok.
+
+%% No specification clause needed for a cleanup function in a conf case!!!
+stop(doc) ->
+ ["Testing termination of my application"];
+stop(Config) when list(Config) ->
+ ?line Ref = ?config(myapp_ref,Config),
+ ?line ok = myapp:stop(Ref),
+ case erlang:whereis(my_main_process) of
+ undefined ->
+ lists:keydelete(myapp_ref,1,Config);
+ Pid when pid(Pid) ->
+ ?t:fail("my_main_process did not stop")
+ end.
+ </code>
+ </section>
+
+ <section>
+ <title>Test specification file</title>
+ <p><em><c>myapp.spec:</c></em></p>
+ <code type="none">
+{topcase, {dir, "../myapp_test"}}. % Test specification on top level </code>
+ <p><em><c>myapp.spec.vxworks:</c></em></p>
+ <code type="none">
+{topcase, {dir, "../myapp_test"}}. % Test specification on top level
+{skip,{my_SUITE,func2,"Not applicable on VxWorks"}}. </code>
+ </section>
+</chapter>
+
+
diff --git a/lib/test_server/doc/src/fascicules.xml b/lib/test_server/doc/src/fascicules.xml
new file mode 100644
index 0000000000..0678195e07
--- /dev/null
+++ b/lib/test_server/doc/src/fascicules.xml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE fascicules SYSTEM "fascicules.dtd">
+
+<fascicules>
+ <fascicule file="part" href="part_frame.html" entry="no">
+ User's Guide
+ </fascicule>
+ <fascicule file="ref_man" href="ref_man_frame.html" entry="yes">
+ Reference Manual
+ </fascicule>
+ <fascicule file="part_notes" href="part_notes_frame.html" entry="no">
+ Release Notes
+ </fascicule>
+ <fascicule file="" href="../../../../doc/print.html" entry="no">
+ Off-Print
+ </fascicule>
+</fascicules>
+
diff --git a/lib/test_server/doc/src/make.dep b/lib/test_server/doc/src/make.dep
new file mode 100644
index 0000000000..ee9100bd08
--- /dev/null
+++ b/lib/test_server/doc/src/make.dep
@@ -0,0 +1,24 @@
+# ----------------------------------------------------
+# >>>> Do not edit this file <<<<
+# This file was automaticly generated by
+# /home/otp/bin/docdepend
+# ----------------------------------------------------
+
+
+# ----------------------------------------------------
+# TeX files that the DVI file depend on
+# ----------------------------------------------------
+
+book.dvi: basics_chapter.tex book.tex example_chapter.tex \
+ part.tex ref_man.tex run_test_chapter.tex \
+ test_server_app.tex test_server_ctrl.tex \
+ test_server.tex test_spec_chapter.tex \
+ write_framework_chapter.tex \
+ write_test_chapter.tex
+
+# ----------------------------------------------------
+# Source inlined when transforming from source to LaTeX
+# ----------------------------------------------------
+
+book.tex: ref_man.xml
+
diff --git a/lib/test_server/doc/src/notes.xml b/lib/test_server/doc/src/notes.xml
new file mode 100644
index 0000000000..a71c18b5b7
--- /dev/null
+++ b/lib/test_server/doc/src/notes.xml
@@ -0,0 +1,346 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2004</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>APPLICATION Release Notes</title>
+ <prepared>Peter Andersson</prepared>
+ <responsible>Peter Andersson</responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date>2007-11-30</date>
+ <rev>A</rev>
+ <file>notes.xml</file>
+ </header>
+
+<section><title>Test_Server 3.3.5</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ If the init_per_testcase/2 function fails, the test case
+ now gets marked and counted as auto skipped, not user
+ skipped (which would previously happen).</p>
+ <p>
+ Own Id: OTP-8289</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ The documentation is now built with open source tools
+ (xsltproc and fop) that exists on most platforms. One
+ visible change is that the frames are removed.</p>
+ <p>
+ Own Id: OTP-8201</p>
+ </item>
+ <item>
+ <p>
+ It is now possible to fail a test case from the
+ end_per_testcase/2 function, by returning {fail,Reason}.</p>
+ <p>
+ Own Id: OTP-8284</p>
+ </item>
+ <item>
+ <p>
+ It is now possible to fail a test case by having the
+ end_tc/3 framework function return {fail,Reason} for the
+ test case.</p>
+ <p>
+ Own Id: OTP-8285</p>
+ </item>
+ <item>
+ <p>
+ The test_server framework API (e.g. the end_tc/3
+ function) has been modified. See the test_server_ctrl
+ documentation for details.</p>
+ <p>
+ Own Id: OTP-8286 Aux Id: OTP-8285, OTP-8287 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.3.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ When running a suite starting with a test case group,
+ Test Server crashed if init_per_suite/1 exited or
+ returned skip. This has been fixed.</p>
+ <p>
+ Own Id: OTP-8105 Aux Id: OTP-8089 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Various updates and fixes in Common Test and Test Server.</p>
+ <p>
+ Own Id: OTP-8045 Aux Id: OTP-8089,OTP-8105,OTP-8163 </p>
+ </item>
+ <item>
+ <p>
+ Errors in coverage data collection and analysis were
+ difficult to detect. The logging has been improved so
+ that more information about e.g. imported and missing
+ modules is printed to the html log files.</p>
+ <p>
+ Own Id: OTP-8163 Aux Id: seq11374 </p>
+ </item>
+ <item>
+ <p>
+ The Common Test HTML overview pages have been improved.
+ It is now possible to see if a test case has been skipped
+ explicitly or because a configuration function has
+ failed. Also, the history page (all_runs.html) now has
+ scrolling text displaying the test names. The old format
+ (showing names as a truncated string) can still be
+ generated by means of the flag/option 'basic_html'.</p>
+ <p>
+ Own Id: OTP-8177</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.3.2</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Various corrections and improvements of Common Test and
+ Test Server.</p>
+ <p>
+ Own Id: OTP-7981</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.3.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Minor updates and corrections.</p>
+ <p>
+ Own Id: OTP-7897</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.3</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ The conf case in Test Server has been extended with
+ properties that make it possible to execute test cases in
+ parallel, in sequence and in shuffled order. It is now
+ also possible to repeat test cases according to different
+ criterias. The properties can be combined, making it
+ possible to e.g. repeat a conf case a certain number of
+ times and execute the test cases in different (random)
+ order every time. The properties are specified in a list
+ in the conf case definition: {conf, Properties, InitCase,
+ TestCases, EndCase}. The available properties are:
+ parallel, sequence, shuffle, repeat, repeat_until_all_ok,
+ repeat_until_any_ok, repeat_until_any_fail,
+ repeat_until_all_fail.</p>
+ <p>
+ Own Id: OTP-7511 Aux Id: OTP-7839 </p>
+ </item>
+ <item>
+ <p>The test server starts Cover on nodes of the same
+ version as the test server itself only.</p>
+ <p>
+ Own Id: OTP-7699</p>
+ </item>
+ <item>
+ <p>
+ The Erlang mode for Emacs has been updated with new and
+ modified skeletons for Common Test and TS. Syntax for
+ test case groups in Common Test (and conf cases with
+ properties in TS) has been added and a new minimal Common
+ Test suite skeleton has been introduced.</p>
+ <p>
+ Own Id: OTP-7856</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+<section><title>Test_Server 3.2.4.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ The step functionality in Common Test (based on
+ interaction with Debugger) was broken. This has been
+ fixed, and some new step features have also been added.
+ Please see the Common Test User's Guide for details.</p>
+ <p>
+ Own Id: OTP-7800 Aux Id: seq11106 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.2.4</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Miscellaneous updates.</p>
+ <p>
+ Own Id: OTP-7527</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.2.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ When a testcase terminated due to a timetrap, io sent to
+ the group leader from framework:end_tc/3 (using
+ ct:pal/2/3 or ct:log/2/3) would cause deadlock. This has
+ been fixed.</p>
+ <p>
+ Own Id: OTP-7447 Aux Id: seq11010 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Various updates and improvements, plus some minor bug
+ fixes, have been implemented in Common Test and Test
+ Server.</p>
+ <p>
+ Own Id: OTP-7112</p>
+ </item>
+ <item>
+ <p>
+ It is now possible, by means of the new function
+ ct:abort_current_testcase/1 or
+ test_server_ctrl:abort_current_testcase/1, to abort the
+ currently executing test case.</p>
+ <p>
+ Own Id: OTP-7518 Aux Id: OTP-7112 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.2.2</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p><c>erlang:system_info/1</c> now accepts the
+ <c>logical_processors</c>, and <c>debug_compiled</c>
+ arguments. For more info see the, <c>erlang(3)</c>
+ documentation.</p> <p>The scale factor returned by
+ <c>test_server:timetrap_scale_factor/0</c> is now also
+ effected if the emulator uses a larger amount of
+ scheduler threads than the amount of logical processors
+ on the system. </p>
+ <p>
+ Own Id: OTP-7175</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.2.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ When init_per_suite or end_per_suite terminated due to
+ runtime failure, test_server failed to format the line
+ number information properly and crashed. This error has
+ now been fixed.</p>
+ <p>
+ Own Id: OTP-7091</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Test_Server 3.2.0</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Test Server is a portable test server for automated
+ application testing. The server can run test suites on
+ local or remote targets and log progress and results to
+ HTML pages. The main purpose of Test Server is to act as
+ engine inside customized test tools. A callback interface
+ for such framework applications is provided.</p>
+ <p>
+ Own Id: OTP-6989</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+</chapter>
+
diff --git a/lib/test_server/doc/src/notes_history.xml b/lib/test_server/doc/src/notes_history.xml
new file mode 100644
index 0000000000..0392bd74a2
--- /dev/null
+++ b/lib/test_server/doc/src/notes_history.xml
@@ -0,0 +1,112 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2006</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Server Release Notes History</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+
+ <section>
+ <title>Test Server 3.1.1</title>
+
+ <section>
+ <title>Improvements and new features</title>
+ <list type="bulleted">
+ <item>
+ <p>Added functions <c>test_server:break/1</c> and
+ <c>test_server:continue/0</c> for semiautomatic testing.</p>
+ <p><c>test_server:timetrap/1</c> can now also take
+ <c>{hours,H} | {minutes,M | {seconds,S}</c>.</p>
+ <p>Added function
+ <c>test_server_ctrl:multiply_timetraps/1</c>,
+ <c>test_server_ctrl:add_case/3</c>,
+ <c>test_server_ctrl:add_cases/2/3</c>.</p>
+ <p>Added test suite functions <c>init_per_suite/1</c> and
+ <c>end_per_suite/1</c>.</p>
+ <p><c>fin_per_testcase/2</c> is changed to
+ <c>end_per_testcase/2</c>. <c>fin_per_testcase</c> is kept
+ for backwards compatibility.</p>
+ <p>Added support for writing own test server frameworks.
+ Callback functions <c>init_tc/1</c>, <c>end_tc/3</c>,
+ <c>get_suite/2</c>, <c>report/2</c>, <c>warn/1</c>.</p>
+ </item>
+ </list>
+ </section>
+ </section>
+
+ <section>
+ <title>Test Server 3.1</title>
+
+ <section>
+ <title>Improvements and New Features</title>
+ <list type="bulleted">
+ <item>
+ <p>Added the options <c>cover</c> and <c>cover_details</c>
+ to <c>ts:run</c>. When one of these options is used,
+ the tested application will be cover compiled
+ before the test is run. The cover compiled code will also
+ be loaded on all slave or peer nodes started with
+ <c>test_server:start_node</c>. When the test is completed
+ coverage data from all nodes is collected and merged, and
+ presented in the coverage log to which there will be a link
+ from the test suite result page (i.e. the one with the
+ heading "Test suite ... results").</p>
+ <p>The <c>cover_details</c> option will do
+ <c>cover:analyse_to_file</c> for each cover compiled module,
+ while the <c>cover</c> option only will produce a list of
+ modules and the number of covered/uncovered lines in each
+ module.</p>
+ <p>To make it possible to run all test from a script (like in
+ the OTP daily builds), the following is added:
+ <c>ts:run([all_tests | Options])</c>.</p>
+ <p>This means that e.g. the following is possible:
+ <c>erl -s ts run all_tests batch cover</c>.</p>
+ <p>Note that it is also possible to run tests with cover even
+ if you don't use <c>ts</c>.
+ See <c>test_server_ctrl:cover/2/3</c>.</p>
+ <p>Own Id: OTP-4703</p>
+ </item>
+ <item>
+ <p>Removed module <c>ts_save.erl</c> and function
+ <c>ts:save/0/1</c><em>(incompatible)</em>.</p>
+ <p>Added config variable <c>ipv6_hosts</c> to
+ <c>ts:install/1</c> and test spec file.</p>
+ <p>No longer removing duplicates of test cases from test spec
+ <em>(incompatible)</em>.</p>
+ <p>Added function <c>test_server:run_on_shielded_node/2</c>.</p>
+ <p>Creation of html files for test suite source does no longer
+ crash if suite contains more than 9999 lines of code.</p>
+ <p>Added functionality for cross cover compilation,
+ i.e. collection of cover data from all tests.</p>
+ <p>Multiplying timetrap times with 10 when running with cover.</p>
+ <p>Added <c>ts:r/3</c> for running tests with cover.</p>
+ <p>*** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>Own Id: OTP-5040</p>
+ </item>
+ </list>
+ </section>
+ </section>
+</chapter>
+
diff --git a/lib/test_server/doc/src/part.xml b/lib/test_server/doc/src/part.xml
new file mode 100644
index 0000000000..fdcd3d274e
--- /dev/null
+++ b/lib/test_server/doc/src/part.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE part SYSTEM "part.dtd">
+
+<part xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Server User's Guide</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date>2002-07-11</date>
+ <rev></rev>
+ </header>
+ <description>
+ <p><em>Test Server</em> is a portable test server for
+ automated application testing. The server can run test suites
+ on local or remote targets and log progress and results to HTML
+ pages. The main purpose of Test Server is to act as engine
+ inside customized test tools. A callback interface for
+ such framework applications is provided.</p>
+ </description>
+ <xi:include href="basics_chapter.xml"/>
+ <xi:include href="test_spec_chapter.xml"/>
+ <xi:include href="write_test_chapter.xml"/>
+ <xi:include href="run_test_chapter.xml"/>
+ <xi:include href="write_framework_chapter.xml"/>
+ <xi:include href="example_chapter.xml"/>
+</part>
+
diff --git a/lib/test_server/doc/src/part_notes.xml b/lib/test_server/doc/src/part_notes.xml
new file mode 100644
index 0000000000..2347f64ca1
--- /dev/null
+++ b/lib/test_server/doc/src/part_notes.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE part SYSTEM "part.dtd">
+
+<part xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header>
+ <copyright>
+ <year>2004</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Server Release Notes</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <description>
+ <p>The <em>Test Server</em> is a portable test server for
+ application testing. The test server can run automatic test suites
+ on local or remote target and log progress and results to HTML
+ pages. It also provides some support for test suite authors.</p>
+ <p>For information about older versions, see
+ <url href="part_notes_history_frame.html">Release Notes History</url>.</p>
+ </description>
+ <xi:include href="notes.xml"/>
+</part>
+
diff --git a/lib/test_server/doc/src/part_notes_history.xml b/lib/test_server/doc/src/part_notes_history.xml
new file mode 100644
index 0000000000..556d172755
--- /dev/null
+++ b/lib/test_server/doc/src/part_notes_history.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE part SYSTEM "part.dtd">
+
+<part>
+ <header>
+ <copyright>
+ <year>2006</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Server Release Notes History</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <description>
+ <p>The <em>Test Server</em> is a portable test server for
+ application testing. The test server can run automatic test suites
+ on local or remote target and log progress and results to HTML
+ pages. It also provides some support for test suite authors.</p>
+ </description>
+ <include file="notes_history"></include>
+</part>
+
diff --git a/lib/test_server/doc/src/ref_man.xml b/lib/test_server/doc/src/ref_man.xml
new file mode 100644
index 0000000000..17d6093dc0
--- /dev/null
+++ b/lib/test_server/doc/src/ref_man.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE application SYSTEM "application.dtd">
+
+<application xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Server Reference Manual</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>ref_man.xml</file>
+ </header>
+ <description>
+ <p><em>Test Server</em> is a portable test server for
+ automated application testing. The server can run test suites
+ on local or remote targets and log progress and results to HTML
+ pages. The main purpose of Test Server is to act as engine
+ inside customized test tools. A callback interface for
+ such framework applications is provided.</p>
+ </description>
+ <xi:include href="test_server_app.xml"/>
+ <xi:include href="test_server_ctrl.xml"/>
+ <xi:include href="test_server.xml"/>
+</application>
+
diff --git a/lib/test_server/doc/src/run_test_chapter.xml b/lib/test_server/doc/src/run_test_chapter.xml
new file mode 100644
index 0000000000..36bd41da1f
--- /dev/null
+++ b/lib/test_server/doc/src/run_test_chapter.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Running Test Suites</title>
+ <prepared>Siri Hansen</prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>run_test_chapter.xml</file>
+ </header>
+
+ <section>
+ <title>Using the test server controller</title>
+ <p>The test server controller provides a low level interface to
+ all the Test Server functionality. It is possible to use this
+ interface directly, but it is recommended to use a framework
+ such as <em>Common Test</em> instead. If no existing framework
+ suits your needs, you could of course build your own
+ on top of the test server controller. Some information about how
+ to do this can be found in the section named "Writing you own
+ test server framework" in the Test Server User's Guide.
+ </p>
+ <p>For information about using the controller directly, please see
+ all available functions in the reference manual for
+ <c>test_server_ctrl</c>.
+ </p>
+ </section>
+</chapter>
+
diff --git a/lib/test_server/doc/src/test_server.xml b/lib/test_server/doc/src/test_server.xml
new file mode 100644
index 0000000000..6e75425862
--- /dev/null
+++ b/lib/test_server/doc/src/test_server.xml
@@ -0,0 +1,840 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2007</year>
+ <year>2008</year>
+ <holder>Ericsson AB, All Rights Reserved</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ The Initial Developer of the Original Code is Ericsson AB.
+ </legalnotice>
+
+ <title>test_server</title>
+ <prepared>Siri Hansen</prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev></rev>
+ <file>test_server_ref.sgml</file>
+ </header>
+ <module>test_server</module>
+ <modulesummary>This module provides support for test suite authors.</modulesummary>
+ <description>
+ <p>The <c>test_server</c> module aids the test suite author by providing
+ various support functions. The supported functionality includes:
+ </p>
+ <list type="bulleted">
+ <item>Logging and timestamping
+ </item>
+ <item>Capturing output to stdout
+ </item>
+ <item>Retrieving and flushing the message queue of a process
+ </item>
+ <item>Watchdog timers, process sleep, time measurement and unit
+ conversion
+ </item>
+ <item>Private scratch directory for all test suites
+ </item>
+ <item>Start and stop of slave- or peer nodes</item>
+ </list>
+ <p>For more information on how to write test cases and for
+ examples, please see the Test Server User's Guide.
+ </p>
+ </description>
+
+ <section>
+ <title>TEST SUITE SUPPORT FUNCTIONS</title>
+ <p>The following functions are supposed to be used inside a test
+ suite.
+ </p>
+ </section>
+ <funcs>
+ <func>
+ <name>os_type() -> OSType</name>
+ <fsummary>Returns the OS type of the target node</fsummary>
+ <type>
+ <v>OSType = term()</v>
+ <d>This is the same as returned from <c>os:type/0</c></d>
+ </type>
+ <desc>
+ <p>This function can be called on controller or target node, and
+ it will always return the OS type of the target node.</p>
+ </desc>
+ </func>
+ <func>
+ <name>fail()</name>
+ <name>fail(Reason)</name>
+ <fsummary>Makes the test case fail.</fsummary>
+ <type>
+ <v>Reason = term()</v>
+ <d>The reason why the test case failed.</d>
+ </type>
+ <desc>
+ <p>This will make the test suite fail with a given reason, or
+ with <c>suite_failed</c> if no reason was given. Use this
+ function if you want to terminate a test case, as this will
+ make it easier to read the log- and HTML files. <c>Reason</c>
+ will appear in the comment field in the HTML log.</p>
+ </desc>
+ </func>
+ <func>
+ <name>timetrap(Timout) -> Handle</name>
+ <fsummary></fsummary>
+ <type>
+ <v>Timeout = integer() | {hours,H} | {minutes,M} | {seconds,S}</v>
+ <v>H = M = S = integer()</v>
+ <v>Pid = pid()</v>
+ <d>The process that is to be timetrapped (<c>self()</c>by default)</d>
+ </type>
+ <desc>
+ <p>Sets up a time trap for the current process. An expired
+ timetrap kills the process with reason
+ <c>timetrap_timeout</c>. The returned handle is to be given
+ as argument to <c>timetrap_cancel</c> before the timetrap
+ expires. If <c>Timeout</c> is an integer, it is expected to
+ be milliseconds.</p>
+ <note>
+ <p>If the current process is trapping exits, it will not be killed
+ by the exit signal with reason <c>timetrap_timeout</c>.
+ If this happens, the process will be sent an exit signal
+ with reason <c>kill</c> 10 seconds later which will kill the
+ process. Information about the timetrap timeout will in
+ this case not be found in the test logs. However, the
+ error_logger will be sent a warning.</p>
+ </note>
+ </desc>
+ </func>
+ <func>
+ <name>timetrap_cancel(Handle) -> ok</name>
+ <fsummary>Cancels a timetrap.</fsummary>
+ <type>
+ <v>Handle = term()</v>
+ <d>Handle returned from <c>timetrap</c></d>
+ </type>
+ <desc>
+ <p>This function cancels a timetrap. This must be done before
+ the timetrap expires.</p>
+ </desc>
+ </func>
+ <func>
+ <name>timetrap_scale_factor() -> ScaleFactor</name>
+ <fsummary>Returns the scale factor for timeouts.</fsummary>
+ <type>
+ <v>ScaleFactor = integer()</v>
+ </type>
+ <desc>
+ <p>This function returns the scale factor by which all timetraps
+ are scaled. It is normally 1, but can be greater than 1 if
+ the test_server is running <c>cover</c>, using a larger amount of
+ scheduler threads than the amount of logical processors on the
+ system, running under purify, valgrind or in a debug-compiled
+ emulator. The scale factor can be used if you need to scale you
+ own timeouts in test cases with same factor as the test_server
+ uses.</p>
+ </desc>
+ </func>
+ <func>
+ <name>sleep(MSecs) -> ok</name>
+ <fsummary>Suspens the calling task for a specified time.</fsummary>
+ <type>
+ <v>MSecs = integer() | float() | infinity</v>
+ <d>The number of milliseconds to sleep</d>
+ </type>
+ <desc>
+ <p>This function suspends the calling process for at least the
+ supplied number of milliseconds. There are two major reasons
+ why you should use this function instead of
+ <c>timer:sleep</c>, the first being that the module
+ <c>timer</c> may be unavailable at the time the test suite is
+ run, and the second that it also accepts floating point
+ numbers.</p>
+ </desc>
+ </func>
+ <func>
+ <name>hours(N) -> MSecs</name>
+ <name>minutes(N) -> MSecs</name>
+ <name>seconds(N) -> MSecs</name>
+ <fsummary></fsummary>
+ <type>
+ <v>N = integer()</v>
+ <d>Value to convert to milliseconds.</d>
+ </type>
+ <desc>
+ <p>Theese functions convert <c>N</c> number of hours, minutes
+ or seconds into milliseconds.
+ </p>
+ <p>Use this function when you want to
+ <c>test_server:sleep/1</c> for a number of seconds, minutes or
+ hours(!).</p>
+ </desc>
+ </func>
+ <func>
+ <name>format(Format) -> ok</name>
+ <name>format(Format, Args)</name>
+ <name>format(Pri,Format)</name>
+ <name>format(Pri, Format, Args)</name>
+ <fsummary></fsummary>
+ <type>
+ <v>Format = string()</v>
+ <d>Format as described for <c>io_:format</c>.</d>
+ <v>Args = list()</v>
+ <d>List of arguments to format.</d>
+ </type>
+ <desc>
+ <p>Formats output just like <c>io:format</c> but sends the
+ formatted string to a logfile. If the urgency value,
+ <c>Pri</c>, is lower than some threshold value, it will also
+ be written to the test person's console. Default urgency is
+ 50, default threshold for display on the console is 1.
+ </p>
+ <p>Typically, the test person don't want to see everything a
+ test suite outputs, but is merely interested in if the test
+ cases succeeded or not, which the test server tells him. If he
+ would like to see more, he could manually change the threshold
+ values by using the <c>test_server_ctrl:set_levels/3</c>
+ function.</p>
+ </desc>
+ </func>
+ <func>
+ <name>capture_start() -> ok</name>
+ <name>capture_stop() -> ok</name>
+ <name>capture_get() -> list()</name>
+ <fsummary>Captures all output to stdout for a process.</fsummary>
+ <desc>
+ <p>These functions makes it possible to capture all output to
+ stdout from a process started by the test suite. The list of
+ characters captured can be purged by using <c>capture_get</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name>messages_get() -> list()</name>
+ <fsummary>Empty the message queue.</fsummary>
+ <desc>
+ <p>This function will empty and return all the messages
+ currently in the calling process' message queue.</p>
+ </desc>
+ </func>
+ <func>
+ <name>timecall(M, F, A) -> {Time, Value}</name>
+ <fsummary>Measures the time needed to call a function.</fsummary>
+ <type>
+ <v>M = atom()</v>
+ <d>The name of the module where the function resides.</d>
+ <v>F = atom()</v>
+ <d>The name of the function to call in the module.</d>
+ <v>A = list()</v>
+ <d>The arguments to supply the called function.</d>
+ <v>Time = integer()</v>
+ <d>The number of seconds it took to call the function.</d>
+ <v>Value = term()</v>
+ <d>Value returned from the called function.</d>
+ </type>
+ <desc>
+ <p>This function measures the time (in seconds) it takes to
+ call a certain function. The function call is <em>not</em>
+ caught within a catch.</p>
+ </desc>
+ </func>
+ <func>
+ <name>do_times(N, M, F, A) -> ok</name>
+ <name>do_times(N, Fun)</name>
+ <fsummary>Calls MFA or Fun N times.</fsummary>
+ <type>
+ <v>N = integer()</v>
+ <d>Number of times to call MFA.</d>
+ <v>M = atom()</v>
+ <d>Module name where the function resides.</d>
+ <v>F = atom()</v>
+ <d>Function name to call.</d>
+ <v>A = list()</v>
+ <d>Arguments to M:F.</d>
+ </type>
+ <desc>
+ <p>Calls MFA or Fun N times. Useful for extensive testing of a
+ sensitive function.</p>
+ </desc>
+ </func>
+ <func>
+ <name>m_out_of_n(M, N, Fun) -> ok | exit({m_out_of_n_failed, {R,left_to_do}}</name>
+ <fsummary>Fault tolerant <c>do_times</c>.</fsummary>
+ <type>
+ <v>N = integer()</v>
+ <d>Number of times to call the Fun.</d>
+ <v>M = integer()</v>
+ <d>Number of times to require a successful return.</d>
+ </type>
+ <desc>
+ <p>Repeatedly evaluates the given function until it succeeds
+ (doesn't crash) M times. If, after N times, M successful
+ attempts have not been accomplished, the process crashes with
+ reason {m_out_of_n_failed, {R,left_to_do}}, where R indicates
+ how many cases that was still to be successfully completed.
+ </p>
+ <p>For example:
+ </p>
+ <p><c>m_out_of_n(1,4,fun() -> tricky_test_case() end)</c> <br></br>
+Tries to run tricky_test_case() up to 4 times, and is
+ happy if it succeeds once.
+ </p>
+ <p><c>m_out_of_n(7,8,fun() -> clock_sanity_check() end)</c> <br></br>
+Tries running clock_sanity_check() up to 8 times,and
+ allows the function to fail once. This might be useful if
+ clock_sanity_check/0 is known to fail if the clock crosses an
+ hour boundary during the test (and the up to 8 test runs could
+ never cross 2 boundaries)</p>
+ </desc>
+ </func>
+ <func>
+ <name>call_crash(M, F, A) -> Result</name>
+ <name>call_crash(Time, M, F, A) -> Result</name>
+ <name>call_crash(Time, Crash, M, F, A) -> Result</name>
+ <fsummary>Calls MFA and succeeds if it crashes.</fsummary>
+ <type>
+ <v>Result = ok | exit(call_crash_timeout) | exit({wrong_crash_reason, Reason})</v>
+ <v>Crash = term()</v>
+ <d>Crash return from the function.</d>
+ <v>Time = integer()</v>
+ <d>Timeout in milliseconds.</d>
+ <v>M = atom()</v>
+ <d>Module name where the function resides.</d>
+ <v>F = atom()</v>
+ <d>Function name to call.</d>
+ <v>A = list()</v>
+ <d>Arguments to M:F.</d>
+ </type>
+ <desc>
+ <p>Spawns a new process that calls MFA. The call is considered
+ successful if the call crashes with the gives reason
+ (<c>Crash</c>) or any reason if not specified. The call must
+ terminate within the given time (default <c>infinity</c>), or
+ it is considered a failure.</p>
+ </desc>
+ </func>
+ <func>
+ <name>temp_name(Stem) -> Name</name>
+ <fsummary>Returns a unique filename.</fsummary>
+ <type>
+ <v>Stem = string()</v>
+ </type>
+ <desc>
+ <p>Returns a unique filename starting with <c>Stem</c> with
+ enough extra characters appended to make up a unique
+ filename. The filename returned is guaranteed not to exist in
+ the filesystem at the time of the call.</p>
+ </desc>
+ </func>
+ <func>
+ <name>break(Comment) -> ok</name>
+ <fsummary>Cancel all timetraps and wait for call to continue/0.</fsummary>
+ <type>
+ <v>Comment = string()</v>
+ </type>
+ <desc>
+ <p><c>Comment</c> is a string which will be written in
+ the shell, e.g. explaining what to do.</p>
+ <p>This function will cancel all timetraps and pause the
+ execution of the test case until the user executes the
+ <c>continue/0</c> function. It gives the user the opportunity
+ to interact with the erlang node running the tests, e.g. for
+ debugging purposes or for manually executing a part of the
+ test case.</p>
+ <p>When the <c>break/1</c> function is called, the shell will
+ look something like this:</p>
+ <code type="none"><![CDATA[
+ --- SEMIAUTOMATIC TESTING ---
+ The test case executes on process <0.51.0>
+
+
+ "Here is a comment, it could e.g. instruct to pull out a card"
+
+
+ -----------------------------
+
+ Continue with --> test_server:continue(). ]]></code>
+ <p>The user can now interact with the erlang node, and when
+ ready call <c>test_server:continue().</c></p>
+ <p>Note that this function can not be used if the test is
+ executed with <c>ts:run/0/1/2/3/4</c> in <c>batch</c> mode.</p>
+ </desc>
+ </func>
+ <func>
+ <name>continue() -> ok</name>
+ <fsummary>Continue after break/1.</fsummary>
+ <desc>
+ <p>This function must be called in order to continue after a
+ test case has called <c>break/1</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name>run_on_shielded_node(Fun, CArgs) -> term()</name>
+ <fsummary>Execute a function a shielded node.</fsummary>
+ <type>
+ <v>Fun = function() (arity 0)</v>
+ <d>Function to execute on the shielded node.</d>
+ <v>CArg = string()</v>
+ <d>Extra command line arguments to use when starting the shielded node.</d>
+ </type>
+ <desc>
+ <p><c>Fun</c> is executed in a process on a temporarily created
+ hidden node with a proxy for communication with the test server
+ node. The node is called a shielded node (should have been called
+ a shield node). If <c>Fun</c> is successfully executed, the result
+ is returned. A peer node (see <c>start_node/3</c>) started from
+ the shielded node will be shielded from test server node, i.e.
+ they will not be aware of each other. This is useful when you want
+ to start nodes from earlier OTP releases than the OTP release of
+ the test server node.</p>
+ <p>Nodes from an earlier OTP release can normally not be started
+ if the test server hasn't been started in compatibility mode
+ (see the <c>+R</c> flag in the <c>erl(1)</c> documentation) of
+ an earlier release. If a shielded node is started in compatibility
+ mode of an earlier OTP release than the OTP release of the test
+ server node, the shielded node can start nodes of an earlier OTP
+ release.</p>
+ <note>
+ <p>You <em>must</em> make sure that nodes started by the shielded
+ node never communicate directly with the test server node.</p>
+ </note>
+ <note>
+ <p>Slave nodes always communicate with the test server node;
+ therefore, <em>never</em> start <em>slave nodes</em> from the
+ shielded node, <em>always</em> start <em>peer nodes</em>.</p>
+ </note>
+ </desc>
+ </func>
+ <func>
+ <name>start_node(Name, Type, Options) -> {ok, Node} | {error, Reason}</name>
+ <fsummary>Start a node.</fsummary>
+ <type>
+ <v>Name = atom() | string()</v>
+ <d>Name of the slavenode to start (as given to -sname or -name)</d>
+ <v>Type = slave | peer</v>
+ <d>The type of node to start.</d>
+ <v>Options = [{atom(), term()]</v>
+ <d>Tuplelist of options</d>
+ </type>
+ <desc>
+ <p>This functions starts a node, possibly on a remote machine,
+ and guarantees cross architecture transparency. Type is set to
+ either <c>slave</c> or <c>peer</c>.
+ </p>
+ <p><c>slave</c> means that the new node will have a master,
+ i.e. the slave node will terminate if the master terminates,
+ TTY output produced on the slave will be sent back to the
+ master node and file I/O is done via the master. The master is
+ normally the target node unless the target is itself a slave.
+ </p>
+ <p><c>peer</c> means that the new node is an independent node
+ with no master.
+ </p>
+ <p><c>Options</c> is a tuplelist which can contain one or more
+ of
+ </p>
+ <taglist>
+ <tag><c>{remote, true}</c></tag>
+ <item>Start the node on a remote host. If not specified, the
+ node will be started on the local host (with some
+ exceptions, as for the case of VxWorks, where
+ all nodes are started on a remote host). Test cases that
+ require a remote host will fail with a reasonable comment if
+ no remote hosts are available at the time they are run.
+ </item>
+ <tag><c>{args, Arguments}</c></tag>
+ <item>Arguments passed directly to the node. This is
+ typically a string appended to the command line.
+ </item>
+ <tag><c>{wait, false}</c></tag>
+ <item>Don't wait until the node is up. By default, this
+ function does not return until the node is up and running,
+ but this option makes it return as soon as the node start
+ command is given..
+ <br></br>
+Only valid for peer nodes
+ </item>
+ <tag><c>{fail_on_error, false}</c></tag>
+ <item>Returns <c>{error, Reason}</c> rather than failing the
+ test case.
+ <br></br>
+Only valid for peer nodes. Note that slave nodes always
+ act as if they had <c>fail_on_error=false</c></item>
+ <tag><c>{erl, ReleaseList}</c></tag>
+ <item>Use an Erlang emulator determined by ReleaseList when
+ starting nodes, instead of the same emulator as the test
+ server is running. ReleaseList is a list of specifiers,
+ where a specifier is either {release, Rel}, {prog, Prog}, or
+ 'this'. Rel is either the name of a release, e.g., "r12b_patched"
+ or 'latest'. 'this' means using the same emulator as the test
+ server. Prog is the name of an emulator executable. If the
+ list has more than one element, one of them is picked
+ randomly. (Only works on Solaris and Linux, and the test server
+ gives warnings when it notices that nodes are not of the same
+ version as itself.)
+ <br></br>
+ <br></br>
+
+ When specifying this option to run a previous release, use
+ <c>is_release_available/1</c> function to test if the given
+ release is available and skip the test case if not.
+ <br></br>
+ <br></br>
+
+ In order to avoid compatibility problems (may not appear right
+ away), use a shielded node (see <c>run_on_shielded_node/2</c>)
+ when starting nodes from different OTP releases than the test
+ server.
+ </item>
+ <tag><c>{cleanup, false}</c></tag>
+ <item>Tells the test server not to kill this node if it is
+ still alive after the test case is completed. This is useful
+ if the same node is to be used by a group of test cases.
+ </item>
+ <tag><c>{env, Env}</c></tag>
+ <item><c>Env</c> should be a list of tuples <c>{Name, Val}</c>,
+ where <c>Name</c> is the name of an environment variable, and
+ <c>Val</c> is the value it is to have in the started node.
+ Both <c>Name</c> and <c>Val</c> must be strings. The one
+ exception is <c>Val</c> being the atom <c>false</c> (in
+ analogy with <c>os:getenv/1</c>), which removes the
+ environment variable. Only valid for peer nodes. Not
+ available on VxWorks.</item>
+ </taglist>
+ </desc>
+ </func>
+ <func>
+ <name>stop_node(NodeName) -> bool()</name>
+ <fsummary>Stops a node</fsummary>
+ <type>
+ <v>NodeName = term()</v>
+ <d>Name of the node to stop</d>
+ </type>
+ <desc>
+ <p>This functions stops a node previously started with
+ <c>start_node/3</c>. Use this function to stop any node you
+ start, or the test server will produce a warning message in
+ the test logs, and kill the nodes automatically unless it was
+ started with the <c>{cleanup, false}</c> option.</p>
+ </desc>
+ </func>
+ <func>
+ <name>is_commercial() -> bool()</name>
+ <fsummary>Tests whether the emulator is commercially supported</fsummary>
+ <desc>
+ <p>This function test whether the emulator is commercially supported
+ emulator. The tests for a commercially supported emulator could be more
+ stringent (for instance, a commercial release should always contain
+ documentation for all applications).</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>is_release_available(Release) -> bool()</name>
+ <fsummary>Tests whether a release is available</fsummary>
+ <type>
+ <v>Release = string() | atom()</v>
+ <d>Release to test for</d>
+ </type>
+ <desc>
+ <p>This function test whether the release given by
+ <c>Release</c> (for instance, "r12b_patched") is available
+ on the computer that the test_server controller is running on.
+ Typically, you should skip the test case if not.</p>
+ <p>Caution: This function may not be called from the <c>suite</c>
+ clause of a test case, as the test_server will deadlock.</p>
+ </desc>
+ </func>
+ <func>
+ <name>is_native(Mod) -> bool()</name>
+ <fsummary>Checks whether the module is natively compiled or not</fsummary>
+ <type>
+ <v>Mod = atom()</v>
+ <d>A module name</d>
+ </type>
+ <desc>
+ <p>Checks whether the module is natively compiled or not</p>
+ </desc>
+ </func>
+ <func>
+ <name>app_test(App) -> ok | test_server:fail()</name>
+ <name>app_test(App,Mode)</name>
+ <fsummary>Checks an applications .app file for obvious errors</fsummary>
+ <type>
+ <v>App = term()</v>
+ <d>The name of the application to test</d>
+ <v>Mode = pedantic | tolerant</v>
+ <d>Default is pedantic</d>
+ </type>
+ <desc>
+ <p>Checks an applications .app file for obvious errors.
+ The following is checked:
+ </p>
+ <list type="bulleted">
+ <item>required fields
+ </item>
+ <item>that all modules specified actually exists
+ </item>
+ <item>that all requires applications exists
+ </item>
+ <item>that no module included in the application has export_all
+ </item>
+ <item>that all modules in the ebin/ dir is included (If
+ <c>Mode==tolerant</c> this only produces a warning, as all
+ modules does not have to be included)</item>
+ </list>
+ </desc>
+ </func>
+ <func>
+ <name>comment(Comment) -> ok</name>
+ <fsummary>Print a comment on the HTML result page</fsummary>
+ <type>
+ <v>Comment = string()</v>
+ </type>
+ <desc>
+ <p>The given String will occur in the comment field of the
+ table on the HTML result page. If called several times, only
+ the last comment is printed. comment/1 is also overwritten by
+ the return value {comment,Comment} from a test case or by
+ fail/1 (which prints Reason as a comment).</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>TEST SUITE EXPORTS</title>
+ <p>The following functions must be exported from a test suite
+ module.
+ </p>
+ </section>
+ <funcs>
+ <func>
+ <name>all(suite) -> TestSpec | {skip, Comment}</name>
+ <fsummary>Returns the module's test specification</fsummary>
+ <type>
+ <v>TestSpec = list()</v>
+ <v>Comment = string()</v>
+ <d>This comment will be printed on the HTML result page</d>
+ </type>
+ <desc>
+ <p>This function must return the test specification for the
+ test suite module. The syntax of a test specification is
+ described in the Test Server User's Guide.</p>
+ </desc>
+ </func>
+ <func>
+ <name>init_per_suite(Config0) -> Config1 | {skip, Comment}</name>
+ <fsummary>Test suite initiation</fsummary>
+ <type>
+ <v>Config0 = Config1 = [tuple()]</v>
+ <v>Comment = string()</v>
+ <d>Describes why the suite is skipped</d>
+ </type>
+ <desc>
+ <p>This function is called before all other test cases in the
+ suite. <c>Config</c> is the configuration which can be modified
+ here. Whatever is returned from this function is given as
+ <c>Config</c> to the test cases.
+ </p>
+ <p>If this function fails, all test cases in the suite will be
+ skipped.</p>
+ </desc>
+ </func>
+ <func>
+ <name>end_per_suite(Config) -> void()</name>
+ <fsummary>Test suite finalization</fsummary>
+ <type>
+ <v>Config = [tuple()]</v>
+ </type>
+ <desc>
+ <p>This function is called after the last test case in the
+ suite, and can be used to clean up whatever the test cases
+ have done. The return value is ignored.</p>
+ </desc>
+ </func>
+ <func>
+ <name>init_per_testcase(Case, Config0) -> Config1 | {skip, Comment}</name>
+ <fsummary>Test case initiation</fsummary>
+ <type>
+ <v>Case = atom()</v>
+ <v>Config0 = Config1 = [tuple()]</v>
+ <v>Comment = string()</v>
+ <d>Describes why the test case is skipped</d>
+ </type>
+ <desc>
+ <p>This function is called before each test case. The
+ <c>Case</c> argument is the name of the test case, and
+ <c>Config</c> is the configuration which can be modified
+ here. Whatever is returned from this function is given as
+ <c>Config</c> to the test case.</p>
+ </desc>
+ </func>
+ <func>
+ <name>end_per_testcase(Case, Config) -> void()</name>
+ <fsummary>Test case finalization</fsummary>
+ <type>
+ <v>Case = atom()</v>
+ <v>Config = [tuple()]</v>
+ </type>
+ <desc>
+ <p>This function is called after each test case, and can be
+ used to clean up whatever the test case has done. The return
+ value is ignored.</p>
+ </desc>
+ </func>
+ <func>
+ <name>Case(doc) -> [Decription]</name>
+ <name>Case(suite) -> [] | TestSpec | {skip, Comment}</name>
+ <name>Case(Config) -> {skip, Comment} | {comment, Comment} | Ok</name>
+ <fsummary>A test case</fsummary>
+ <type>
+ <v>Description = string()</v>
+ <d>Short description of the test case</d>
+ <v>TestSpec = list()</v>
+ <v>Comment = string()</v>
+ <d>This comment will be printed on the HTML result page</d>
+ <v>Ok = term()</v>
+ <v>Config = [tuple()]</v>
+ <d>Elements from the Config parameter can be read with the ?config macro, see section about test suite support macros</d>
+ </type>
+ <desc>
+ <p>The <em>documentation clause</em> (argument <c>doc</c>) can
+ be used for automatic generation of test documentation or test
+ descriptions.
+ </p>
+ <p>The <em>specification clause</em> (argument <c>spec</c>)
+ shall return an empty list, the test specification for the
+ test case or <c>{skip,Comment}</c>. The syntax of a test
+ specification is described in the Test Server User's Guide.
+ </p>
+ <p><em>Note that the specification clause always is executed on the controller host.</em></p>
+ <p>The <em>execution clause</em> (argument <c>Config</c>) is
+ only called if the specification clause returns an empty list.
+ The execution clause is the real test case. Here you must call
+ the functions you want to test, and do whatever you need to
+ check the result. If something fails, make sure the process
+ crashes or call <c>test_server:fail/0/1</c> (which also will
+ cause the process to crash).
+ </p>
+ <p>You can return <c>{skip,Comment}</c> if you decide not to
+ run the test case after all, e.g. if it is not applicable on
+ this platform.
+ </p>
+ <p>You can return <c>{comment,Comment}</c> if you wish to
+ print some information in the 'Comment' field on the HTML
+ result page.
+ </p>
+ <p>If the execution clause returns anything else, it is
+ considered a success, unless it is <c>{'EXIT',Reason}</c> or
+ <c>{'EXIT',Pid,Reason}</c> which can't be distinguished from a
+ crash, and thus will be considered a failure.
+ </p>
+ <p>A <em>conf test case</em> is a group of test cases with an
+ init and a cleanup function. The init and cleanup functions
+ are also test cases, but they have special rules:</p>
+ <list type="bulleted">
+ <item>They do not need a specification clause.</item>
+ <item>They must always have the execution clause.</item>
+ <item>They must return the <c>Config</c> parameter, a modified
+ version of it or <c>{skip,Comment}</c> from the execution clause.</item>
+ <item>The cleanup function may also return a tuple
+ <c>{return_group_result,Status}</c>, which is used to return the
+ status of the conf case to Test Server and/or to a conf case on a
+ higher level. (<c>Status = ok | skipped | failed</c>).</item>
+ <item><c>init_per_testcase</c> and <c>end_per_testcase</c> are
+ not called before and after these functions.</item>
+ </list>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>TEST SUITE LINE NUMBERS</title>
+ <p>If a test case fails, the test server can report the exact line
+ number at which it failed. There are two ways of doing this,
+ either by using the <c>line</c> macro or by using the
+ <c>test_server_line</c> parse transform.
+ </p>
+ <p>The <c>line</c> macro is described under TEST SUITE SUPPORT
+ MACROS below. The <c>line</c> macro will only report the last line
+ executed when a test case failed.
+ </p>
+ <p>The <c>test_server_line</c> parse transform is activated by
+ including the headerfile <c>test_server_line.hrl</c> in the test
+ suite. When doing this, it is important that the
+ <c>test_server_line</c> module is in the code path of the erlang
+ node compiling the test suite. The parse transform will report a
+ history of a maximum of 10 lines when a test case
+ fails. Consecutive lines in the same function are not shown.
+ </p>
+ <p>The attribute <c>-no_lines(FuncList).</c> can be used in the
+ test suite to exclude specific functions from the parse
+ transform. This is necessary e.g. for functions that are executed
+ on old (i.e. &lt;R10B) OTP releases. <c>FuncList = [{Func,Arity}]</c>.
+ </p>
+ <p>If both the <c>line</c> macro and the parse transform is used in
+ the same module, the parse transform will overrule the macro.
+ </p>
+ </section>
+
+ <section>
+ <title>TEST SUITE SUPPORT MACROS</title>
+ <p>There are some macros defined in the <c>test_server.hrl</c>
+ that are quite useful for test suite programmers:
+ </p>
+ <p>The <em>line</em> macro, is quite
+ essential when writing test cases. It tells the test server
+ exactly what line of code that is being executed, so that it can
+ report this line back if the test case fails. Use this macro at
+ the beginning of every test case line of code.
+ </p>
+ <p>The <em>config</em> macro, is used to
+ retrieve information from the <c>Config</c> variable sent to all
+ test cases. It is used with two arguments, where the first is the
+ name of the configuration variable you wish to retrieve, and the
+ second is the <c>Config</c> variable supplied to the test case
+ from the test server.
+ </p>
+ <p>Possible configuration variables include:</p>
+ <list type="bulleted">
+ <item><c>data_dir</c> - Data file directory.</item>
+ <item><c>priv_dir</c> - Scratch file directory.</item>
+ <item><c>nodes</c> - Nodes specified in the spec file</item>
+ <item><c>nodenames</c> - Generated nodenames.</item>
+ <item>Whatever added by conf test cases or
+ <c>init_per_testcase/2</c></item>
+ </list>
+ <p>Examples of the <c>line</c> and <c>config</c> macros can be
+ seen in the Examples chapter in the user's guide.
+ </p>
+ <p>If the <c>line_trace</c> macro is defined, you will get a
+ timestamp (<c>erlang:now()</c>) in your minor log for each
+ <c>line</c> macro in your suite. This way you can at any time see
+ which line is currently being executed, and when the line was
+ called.
+ </p>
+ <p>The <c>line_trace</c> macro can also be used together with the
+ <c>test_server_line</c> parse transform described above. A
+ timestamp will then be written for each line in the suite, except
+ for functions stated in the <c>-no_lines</c> attribute.
+ </p>
+ <p>The <c>line_trace</c> macro can e.g. be defined as a compile
+ option, like this:
+ <br></br>
+<c>erlc -W -Dline_trace my_SUITE.erl</c></p>
+ </section>
+</erlref>
+
diff --git a/lib/test_server/doc/src/test_server_app.xml b/lib/test_server/doc/src/test_server_app.xml
new file mode 100644
index 0000000000..924cdc886b
--- /dev/null
+++ b/lib/test_server/doc/src/test_server_app.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE appref SYSTEM "appref.dtd">
+
+<appref>
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Server Application</title>
+ <prepared>Siri Hansen</prepared>
+ <responsible>Peter Andersson</responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date>2002-07-12</date>
+ <rev>PA1</rev>
+ <file>test_server_app.xml</file>
+ </header>
+ <app>test_server</app>
+ <appsummary>Test Server for manual or automatic testing of Erlang code</appsummary>
+ <description>
+ <p><em>Test Server</em> is a portable test server for
+ automated application testing. The server can run test suites
+ on local or remote targets and log progress and results to HTML
+ pages. The main purpose of Test Server is to act as engine
+ inside customized test tools. A callback interface for
+ such framework applications is provided.</p>
+ <p>In brief the test server supports:</p>
+ <list type="bulleted">
+ <item>Running multiple, concurrent test suites</item>
+ <item>Running tests on remote and even diskless targets</item>
+ <item>Test suites may contain other test suites, in a tree fashion</item>
+ <item>Logging of the events in a test suite, on both suite and case levels</item>
+ <item>HTML presentation of test suite results</item>
+ <item>HTML presentation of test suite code</item>
+ <item>Support for test suite authors, e.g. start/stop slave nodes</item>
+ <item>Call trace on target and slave nodes</item>
+ </list>
+ <p>For information about how to write test cases and test suites,
+ please see the Test Server User's Guide and the reference
+ manual for the <c>test_server</c> module.
+ </p>
+ <p><em>Common Test</em> is an existing test tool application based on the
+ OTP Test Server. Please read the Common Test User's Guide for more information.
+ </p>
+ </description>
+
+ <section>
+ <title>Configuration</title>
+ <p>There are currently no configuration parameters available for
+ this application.
+ </p>
+ </section>
+
+ <section>
+ <title>SEE ALSO</title>
+ <p></p>
+ </section>
+</appref>
+
diff --git a/lib/test_server/doc/src/test_server_ctrl.xml b/lib/test_server/doc/src/test_server_ctrl.xml
new file mode 100644
index 0000000000..3d95813c14
--- /dev/null
+++ b/lib/test_server/doc/src/test_server_ctrl.xml
@@ -0,0 +1,771 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2007</year>
+ <year>2008</year>
+ <holder>Ericsson AB, All Rights Reserved</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ The Initial Developer of the Original Code is Ericsson AB.
+ </legalnotice>
+
+ <title>The Test Server Controller</title>
+ <prepared>Siri Hansen</prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev></rev>
+ <file>test_server_ctrl_ref.sgml</file>
+ </header>
+ <module>test_server_ctrl</module>
+ <modulesummary>This module provides a low level interface to the Test Server.</modulesummary>
+ <description>
+ <p>The <c>test_server_ctrl</c> module provides a low level
+ interface to the Test Server. This interface is normally
+ not used directly by the tester, but through a framework built
+ on top of <c>test_server_ctrl</c>.
+ </p>
+ <p>Common Test is such a framework, well suited for automated
+ black box testing of target systems of any kind (not necessarily
+ implemented in Erlang). Common Test is also a very useful tool for
+ white box testing Erlang programs and OTP applications.
+ Please see the Common Test User's Guide and reference manual for
+ more information.
+ </p>
+ <p>If you want to write your own framework, some more information
+ can be found in the chapter "Writing your own test server
+ framework" in the Test Server User's Guide. Details about the
+ interface provided by <c>test_server_ctrl</c> follows below.
+ </p>
+ </description>
+ <funcs>
+ <func>
+ <name>start() -> Result</name>
+ <name>start(ParameterFile) -> Result</name>
+ <fsummary>Starts the test server.</fsummary>
+ <type>
+ <v>Result = ok | {error, {already_started, pid()}</v>
+ <v>ParameterFile = atom() | string()</v>
+ </type>
+ <desc>
+ <p>This function starts the test server. If the parameter file
+ is given, it indicates that the target is remote. In that case
+ the target node is started and a socket connection is
+ established between the controller and the target node.
+ </p>
+ <p>The parameter file is a text file containing key-value
+ tuples. Each tuple must be followed by a dot-newline
+ sequence. The following key-value tuples are allowed:
+ </p>
+ <taglist>
+ <tag><c>{type,PlatformType}</c></tag>
+ <item>This is an atom indicating the target platform type,
+ currently supported: <c>PlatformType = vxworks</c> <br></br>
+Mandatory
+ </item>
+ <tag><c>{target,TargetHost}</c></tag>
+ <item>This is the name of the target host, can be atom or
+ string.
+ <br></br>
+Mandatory
+ </item>
+ <tag><c>{slavetargets,SlaveTargets}</c></tag>
+ <item>This is a list of available hosts where slave nodes
+ can be started. The hostnames are given as atoms or strings.
+ <br></br>
+Optional, default <c>SlaveTargets = []</c></item>
+ <tag><c>{longnames,Bool}</c></tag>
+ <item>This indicates if longnames shall be used, i.e. if the
+ <c>-name</c> option should be used for the target node
+ instead of <c>-sname</c> <br></br>
+Optional, default <c>Bool = false</c></item>
+ <tag><c>{master, {MasterHost, MasterCookie}}</c></tag>
+ <item>If target is remote and the target node is started as
+ a slave node, this option indicates which master and
+ cookie to use. The given master
+ will also be used as master for slave nodes started with
+ <c>test_server:start_node/3</c>. It is expected that the
+ <c>erl_boot_server</c> is started on the master node before
+ the <c>test_server_ctrl:start/1</c> function is called.
+ <br></br>
+Optional, if not given the test server controller node
+ is used as master and the <c>erl_boot_server</c> is
+ automatically started.</item>
+ </taglist>
+ </desc>
+ </func>
+ <func>
+ <name>stop() -> ok</name>
+ <fsummary>Stops the test server immediately.</fsummary>
+ <desc>
+ <p>This stops the test server (both controller and target) and
+ all its activity. The running test suite (if any) will be
+ halted.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_dir(Name, Dir) -> ok</name>
+ <name>add_dir(Name, Dir, Pattern) -> ok</name>
+ <name>add_dir(Name, [Dir|Dirs]) -> ok</name>
+ <name>add_dir(Name, [Dir|Dirs], Pattern) -> ok</name>
+ <fsummary>Add a directory to the job queue.</fsummary>
+ <type>
+ <v>Name = term()</v>
+ <d>The jobname for this directory.</d>
+ <v>Dir = term()</v>
+ <d>The directory to scan for test suites.</d>
+ <v>Dirs = [term()]</v>
+ <d>List of directories to scan for test suites.</d>
+ <v>Pattern = term()</v>
+ <d>Suite match pattern. Directories will be scanned for Pattern_SUITE.erl files.</d>
+ </type>
+ <desc>
+ <p>Puts a collection of suites matching (*_SUITE) in given
+ directories into the job queue. <c>Name</c> is an arbitrary
+ name for the job, it can be any erlang term. If <c>Pattern</c>
+ is given, only modules matching <c>Pattern*</c> will be added.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_module(Mod) -> ok</name>
+ <name>add_module(Name, [Mod|Mods]) -> ok</name>
+ <fsummary>Add a module to the job queue with or without a given name.</fsummary>
+ <type>
+ <v>Mod = atom()</v>
+ <v>Mods = [atom()]</v>
+ <d>The name(s) of the module(s) to add.</d>
+ <v>Name = term()</v>
+ <d>Name for the job.</d>
+ </type>
+ <desc>
+ <p>This function adds a module or a list of modules, to the
+ test servers job queue. <c>Name</c> may be any Erlang
+ term. When <c>Name</c> is not given, the job gets the name of
+ the module.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_case(Mod, Case) -> ok</name>
+ <fsummary>Adds one test case to the job queue.</fsummary>
+ <type>
+ <v>Mod = atom()</v>
+ <d>Name of the module the test case is in.</d>
+ <v>Case = atom() </v>
+ <d>Function name of the test case to add.</d>
+ </type>
+ <desc>
+ <p>This function will add one test case to the job queue. The
+ job will be given the module's name.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_case(Name, Mod, Case) -> ok</name>
+ <fsummary>Equivalent to add_case/2, but with specified name.</fsummary>
+ <type>
+ <v>Name = string()</v>
+ <d>Name to use for the test job.</d>
+ </type>
+ <desc>
+ <p>Equivalent to <c>add_case/2</c>, but the test job will get
+ the specified name.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_cases(Mod, Cases) -> ok</name>
+ <fsummary>Adds a list of test cases to the job queue.</fsummary>
+ <type>
+ <v>Mod = atom()</v>
+ <d>Name of the module the test case is in.</d>
+ <v>Cases = [Case] </v>
+ <v>Case = atom() </v>
+ <d>Function names of the test cases to add.</d>
+ </type>
+ <desc>
+ <p>This function will add one or more test cases to the job
+ queue. The job will be given the module's name.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_cases(Name, Mod, Cases) -> ok</name>
+ <fsummary>Equivalent to add_cases/2, but with specified name.</fsummary>
+ <type>
+ <v>Name = string()</v>
+ <d>Name to use for the test job.</d>
+ </type>
+ <desc>
+ <p>Equivalent to <c>add_cases/2</c>, but the test job will get
+ the specified name.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_spec(TestSpecFile) -> ok | {error, nofile}</name>
+ <fsummary>Adds a test specification file to the job queue.</fsummary>
+ <type>
+ <v>TestSpecFile = string()</v>
+ <d>Name of the test specification file</d>
+ </type>
+ <desc>
+ <p>This function will add the content of the given test
+ specification file to the job queue. The job will be given the
+ name of the test specification file, e.g. if the file is
+ called <c>test.spec</c>, the job will be called <c>test</c>.
+ </p>
+ <p>See the reference manual for the test server application
+ for details about the test specification file.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_dir_with_skip(Name, [Dir|Dirs], Skip) -> ok</name>
+ <name>add_dir_with_skip(Name, [Dir|Dirs], Pattern, Skip) -> ok</name>
+ <name>add_module_with_skip(Mod, Skip) -> ok</name>
+ <name>add_module_with_skip(Name, [Mod|Mods], Skip) -> ok</name>
+ <name>add_case_with_skip(Mod, Case, Skip) -> ok</name>
+ <name>add_case_with_skip(Name, Mod, Case, Skip) -> ok</name>
+ <name>add_cases_with_skip(Mod, Cases, Skip) -> ok</name>
+ <name>add_cases_with_skip(Name, Mod, Cases, Skip) -> ok</name>
+ <fsummary>Same purpose as functions listed above, but with extra Skip argument.</fsummary>
+ <type>
+ <v>Skip = [SkipItem]</v>
+ <d>List of items to be skipped from the test.</d>
+ <v>SkipItem = {Mod,Comment} | {Mod,Case,Comment} | {Mod,Cases,Comment}</v>
+ <v>Mod = atom()</v>
+ <d>Test suite name.</d>
+ <v>Comment = string()</v>
+ <d>Reason why suite or case is being skipped.</d>
+ <v>Cases = [Case]</v>
+ <v>Case = atom()</v>
+ <d>Name of test case function.</d>
+ </type>
+ <desc>
+ <p>These functions add test jobs just like the add_dir, add_module,
+ add_case and add_cases functions above, but carry an additional
+ argument, Skip. Skip is a list of items that should be skipped
+ in the current test run. Test job items that occur in the Skip
+ list will be logged as SKIPPED with the associated Comment.</p>
+ </desc>
+ </func>
+ <func>
+ <name>add_tests_with_skip(Name, Tests, Skip) -> ok</name>
+ <fsummary>Adds different types of jobs to the run queue.</fsummary>
+ <type>
+ <v>Name = term()</v>
+ <d>The jobname for this directory.</d>
+ <v>Tests = [TestItem]</v>
+ <d>List of jobs to add to the run queue.</d>
+ <v>TestItem = {Dir,all,all} | {Dir,Mods,all} | {Dir,Mod,Cases}</v>
+ <v>Dir = term()</v>
+ <d>The directory to scan for test suites.</d>
+ <v>Mods = [Mod]</v>
+ <v>Mod = atom()</v>
+ <d>Test suite name.</d>
+ <v>Cases = [Case]</v>
+ <v>Case = atom()</v>
+ <d>Name of test case function.</d>
+ <v>Skip = [SkipItem]</v>
+ <d>List of items to be skipped from the test.</d>
+ <v>SkipItem = {Mod,Comment} | {Mod,Case,Comment} | {Mod,Cases,Comment}</v>
+ <v>Comment = string()</v>
+ <d>Reason why suite or case is being skipped.</d>
+ </type>
+ <desc>
+ <p>This function adds various test jobs to the test_server_ctrl
+ job queue. These jobs can be of different type (all or specific suites
+ in one directory, all or specific cases in one suite, etc). It is also
+ possible to get particular items skipped by passing them along in the
+ Skip list (see the add_*_with_skip functions above).</p>
+ </desc>
+ </func>
+ <func>
+ <name>abort_current_testcase(Reason) -> ok | {error,no_testcase_running}</name>
+ <fsummary>Aborts the test case currently executing.</fsummary>
+ <type>
+ <v>Reason = term()</v>
+ <d>The reason for stopping the test case, which will be printed in the log.</d>
+ </type>
+ <desc>
+ <p>When calling this function, the currently executing test case will be aborted.
+ It is the user's responsibility to know for sure which test case is currently
+ executing. The function is therefore only safe to call from a function which
+ has been called (or synchronously invoked) by the test case.</p>
+ </desc>
+ </func>
+ <func>
+ <name>set_levels(Console, Major, Minor) -> ok</name>
+ <fsummary>Sets the levels of I/O.</fsummary>
+ <type>
+ <v>Console = integer()</v>
+ <d>Level for I/O to be sent to console.</d>
+ <v>Major = integer()</v>
+ <d>Level for I/O to be sent to the major logfile.</d>
+ <v>Minor = integer()</v>
+ <d>Level for I/O to be sent to the minor logfile.</d>
+ </type>
+ <desc>
+ <p>Determines where I/O from test suites/test server will
+ go. All text output from test suites and the test server is
+ tagged with a priority value which ranges from 0 to 100, 100
+ being the most detailed. (see the section about log files in
+ the user's guide). Output from the test cases (using
+ <c>io:format/2</c>) has a detail level of 50. Depending on the
+ levels set by this function, this I/O may be sent to the
+ console, the major log file (for the whole test suite) or to
+ the minor logfile (separate for each test case).
+ </p>
+ <p>All output with detail level:</p>
+ <list type="bulleted">
+ <item>Less than or equal to <c>Console</c> is displayed on
+ the screen (default 1)
+ </item>
+ <item>Less than or equal to <c>Major</c> is logged in the
+ major log file (default 19)
+ </item>
+ <item>Greater than or equal to <c>Minor</c> is logged in the
+ minor log files (default 10)
+ </item>
+ </list>
+ <p>To view the currently set thresholds, use the
+ <c>get_levels/0</c> function.</p>
+ </desc>
+ </func>
+ <func>
+ <name>get_levels() -> {Console, Major, Minor}</name>
+ <fsummary>Returns the current levels.</fsummary>
+ <desc>
+ <p>Returns the current levels. See <c>set_levels/3</c> for
+ types.</p>
+ </desc>
+ </func>
+ <func>
+ <name>jobs() -> JobQueue</name>
+ <fsummary>Returns the job queue.</fsummary>
+ <type>
+ <v>JobQueue = [{list(), pid()}]</v>
+ </type>
+ <desc>
+ <p>This function will return all the jobs currently in the job
+ queue.</p>
+ </desc>
+ </func>
+ <func>
+ <name>multiply_timetraps(N) -> ok</name>
+ <fsummary>All timetraps started after this will be multiplied by N.</fsummary>
+ <type>
+ <v>N = integer() | infinity</v>
+ </type>
+ <desc>
+ <p>This function should be called before a test is started
+ which requires extended timetraps, e.g. if extensive tracing
+ is used. All timetraps started after this call will be
+ multiplied by <c>N</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name>cover(Application,Analyse) -> ok</name>
+ <name>cover(CoverFile,Analyse) -> ok</name>
+ <name>cover(App,CoverFile,Analyse) -> ok</name>
+ <fsummary>Informs the test_server controller that next test shall run with code coverage analysis.</fsummary>
+ <type>
+ <v>Application = atom()</v>
+ <d>OTP application to cover compile</d>
+ <v>CoverFile = string()</v>
+ <d>Name of file listing modules to exclude from or include in cover compilation. The filename must include full path to the file.</d>
+ <v>Analyse = details | overview</v>
+ </type>
+ <desc>
+ <p>This function informs the test_server controller that next
+ test shall run with code coverage analysis. All timetraps will
+ automatically be multiplied by 10 when cover i run.
+ </p>
+ <p><c>Application</c> and <c>CoverFile</c> indicates what to
+ cover compile. If <c>Application</c> is given, the default is
+ that all modules in the <c>ebin</c> directory of the
+ application will be cover compiled. The <c>ebin</c> directory
+ is found by adding <c>ebin</c> to
+ <c>code:lib_dir(Application)</c>.
+ </p>
+ <p>A <c>CoverFile</c> can have the following entries:</p>
+ <code type="none">
+{exclude, all | ExcludeModuleList}.
+{include, IncludeModuleList}. </code>
+ <p>Note that each line must end with a full
+ stop. <c>ExcludeModuleList</c> and <c>IncludeModuleList</c>
+ are lists of atoms, where each atom is a module name.
+ </p>
+ <p>If both an <c>Application</c> and a <c>CoverFile</c> is
+ given, all modules in the application are cover compiled,
+ except for the modules listed in <c>ExcludeModuleList</c>. The
+ modules in <c>IncludeModuleList</c> are also cover compiled.
+ </p>
+ <p>If a <c>CoverFile</c> is given, but no <c>Application</c>,
+ only the modules in <c>IncludeModuleList</c> are cover
+ compiled.
+ </p>
+ <p><c>Analyse</c> indicates the detail level of the cover
+ analysis. If <c>Analyse = details</c>, each cover compiled
+ module will be analysed with
+ <c>cover:analyse_to_file/1</c>. If <c>Analyse = overview</c>
+ an overview of all cover compiled modules is created, listing
+ the number of covered and not covered lines for each module.
+ </p>
+ <p>If the test following this call starts any slave or peer
+ nodes with <c>test_server:start_node/3</c>, the same cover
+ compiled code will be loaded on all nodes. If the loading
+ fails, e.g. if the node runs an old version of OTP, the node
+ will simply not be a part of the coverage analysis. Note that
+ slave or peer nodes must be stopped with
+ <c>test_server:stop_node/1</c> for the node to be part of the
+ coverage analysis, else the test server will not be able to
+ fetch coverage data from the node.
+ </p>
+ <p>When the test is finished, the coverage analysis is
+ automatically completed, logs are created and the cover
+ compiled modules are unloaded. If another test is to be run
+ with coverage analysis, <c>test_server_ctrl:cover/2/3</c> must
+ be called again.
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name>cross_cover_analyse(Level) -> ok</name>
+ <fsummary>Analyse cover data collected from all tests</fsummary>
+ <type>
+ <v>Level = details | overview</v>
+ </type>
+ <desc>
+ <p>Analyse cover data collected from all tests. The modules
+ analysed are the ones listed in the cross cover file
+ <c>cross.cover</c> in the current directory of the test
+ server.</p>
+ <p>The modules listed in the <c>cross.cover</c> file are
+ modules that are heavily used by other applications than the
+ one they belong to. This function should be run after all
+ tests are completed, and the result will be stored in a file
+ called cross_cover.html in the run.&lt;timestamp&gt;
+ directory of the application the modules belong to.
+ </p>
+ <p>The <c>cross.cover</c> file contains elements like this:</p>
+ <pre>
+{App,Modules}. </pre>
+ <p>where <c>App</c> can be an application name or the atom
+ <c>all</c>. The application (or all applications) will cover
+ compile the listed <c>Modules</c>.
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name>trc(TraceInfoFile) -> ok | {error, Reason}</name>
+ <fsummary>Starts call trace on target and slave nodes</fsummary>
+ <type>
+ <v>TraceInfoFile = atom() | string()</v>
+ <d>Name of a file defining which functions to trace and how</d>
+ </type>
+ <desc>
+ <p>This function starts call trace on target and on slave or
+ peer nodes that are started or will be started by the test
+ suites.
+ </p>
+ <p>Timetraps are not extended automatically when tracing is
+ used. Use <c>multiply_timetraps/1</c> if necessary.
+ </p>
+ <p>Note that the trace support in the test server is in a very
+ early stage of the implementation, and thus not yet as
+ powerful as one might wish for.
+ </p>
+ <p>The trace information file specified by the
+ <c>TraceInfoFile</c> argument is a text file containing one or
+ more of the following elements:
+ </p>
+ <list type="bulleted">
+ <item><c>{SetTP,Module,Pattern}.</c></item>
+ <item><c>{SetTP,Module,Function,Pattern}.</c></item>
+ <item><c>{SetTP,Module,Function,Arity,Pattern}.</c></item>
+ <item><c>ClearTP.</c></item>
+ <item><c>{ClearTP,Module}.</c></item>
+ <item><c>{ClearTP,Module,Function}.</c></item>
+ <item><c>{ClearTP,Module,Function,Arity}.</c></item>
+ </list>
+ <taglist>
+ <tag><c>SetTP = tp | tpl</c></tag>
+ <item>This is maps to the corresponding functions in the
+ <c>ttb</c> module in the <c>observer</c>
+ application. <c>tp</c> means set trace pattern on global
+ function calls. <c>tpl</c> means set trace pattern on local
+ and global function calls.
+ </item>
+ <tag><c>ClearTP = ctp | ctpl | ctpg</c></tag>
+ <item>This is maps to the corresponding functions in the
+ <c>ttb</c> module in the <c>observer</c>
+ application. <c>ctp</c> means clear trace pattern (i.e. turn
+ off) on global and local function calls. <c>ctpl</c> means
+ clear trace pattern on local function calls only and <c>ctpg</c>
+ means clear trace pattern on global function calls only.
+ </item>
+ <tag><c>Module = atom()</c></tag>
+ <item>The module to trace
+ </item>
+ <tag><c>Function = atom()</c></tag>
+ <item>The name of the function to trace
+ </item>
+ <tag><c>Arity = integer()</c></tag>
+ <item>The arity of the function to trace
+ </item>
+ <tag><c>Pattern = [] | match_spec()</c></tag>
+ <item>The trace pattern to set for the module or
+ function. For a description of the match_spec() syntax,
+ please turn to the User's guide for the runtime system
+ (erts). The chapter "Match Specification in Erlang" explains
+ the general match specification language.
+ </item>
+ </taglist>
+ <p>The trace result will be logged in a (binary) file called
+ <c>NodeName-test_server</c> in the current directory of the
+ test server controller node. The log must be formatted using
+ <c>ttb:format/1/2</c>.
+ </p>
+ <p>This is valid for all targets except the OSE/Delta target
+ for which all nodes will be logged and automatically formatted
+ in one single text file called <c>allnodes-test_server</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name>stop_trace() -> ok | {error, not_tracing}</name>
+ <fsummary>Stops tracing on target and slave nodes.</fsummary>
+ <desc>
+ <p>This function stops tracing on target, and on slave or peer
+ nodes that are currently running. New slave or peer nodes will
+ no longer be traced after this.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>FUNCTIONS INVOKED FROM COMMAND LINE</title>
+ <p>The following functions are supposed to be invoked from the
+ command line using the <c>-s</c> option when starting the erlang
+ node.</p>
+ </section>
+ <funcs>
+ <func>
+ <name>run_test(CommandLine) -> ok</name>
+ <fsummary>Runs the tests specified on the command line.</fsummary>
+ <type>
+ <v>CommandLine = FlagList</v>
+ </type>
+ <desc>
+ <p>This function is supposed to be invoked from the
+ commandline. It starts the test server, interprets the
+ argument supplied from the commandline, runs the tests
+ specified and when all tests are done, stops the test server
+ and returns to the Erlang prompt.
+ </p>
+ <p>The <c>CommandLine</c> argument is a list of command line
+ flags, typically <c>['KEY1', Value1, 'KEY2', Value2, ...]</c>.
+ The valid command line flags are listed below.
+ </p>
+ <p>Under a UNIX command prompt, this function can be invoked like this:
+ <br></br>
+<c>erl -noshell -s test_server_ctrl run_test KEY1 Value1 KEY2 Value2 ... -s erlang halt</c></p>
+ <p>Or make an alias (this is for unix/tcsh) <br></br>
+<c>alias erl_test 'erl -noshell -s test_server_ctrl run_test \\!* -s erlang halt'</c></p>
+ <p>And then use it like this <br></br>
+<c>erl_test KEY1 Value1 KEY2 Value2 ...</c> <br></br>
+</p>
+ <p>The valid command line flags are</p>
+ <taglist>
+ <tag><c>DIR dir</c></tag>
+ <item>Adds all test modules in the directory <c>dir</c> to
+ the job queue.
+ </item>
+ <tag><c>MODULE mod</c></tag>
+ <item>Adds the module <c>mod</c> to the job queue.
+ </item>
+ <tag><c>CASE mod case</c></tag>
+ <item>Adds the case <c>case</c> in module <c>mod</c> to the
+ job queue.
+ </item>
+ <tag><c>SPEC spec</c></tag>
+ <item>Runs the test specification file <c>spec</c>.
+ </item>
+ <tag><c>SKIPMOD mod</c></tag>
+ <item>Skips all test cases in the module <c>mod</c></item>
+ <tag><c>SKIPCASE mod case</c></tag>
+ <item>Skips the test case <c>case</c> in module <c>mod</c>.
+ </item>
+ <tag><c>NAME name</c></tag>
+ <item>Names the test suite to something else than the
+ default name. This does not apply to <c>SPEC</c> which keeps
+ it's names.
+ </item>
+ <tag><c>PARAMETERS parameterfile</c></tag>
+ <item>Specifies the parameter file to use when starting
+ remote target
+ </item>
+ <tag><c>COVER app cover_file analyse</c></tag>
+ <item>Indicates that the test should be run with cover
+ analysis. <c>app</c>, <c>cover_file</c> and <c>analyse</c>
+ corresponds to the parameters to
+ <c>test_server_ctrl:cover/3</c>. If no cover file is used,
+ the atom <c>none</c> should be given.
+ </item>
+ <tag><c>TRACE traceinfofile</c></tag>
+ <item>Specifies a trace information file. When this option
+ is given, call tracing is started on the target node and all
+ slave or peer nodes that are started. The trace information
+ file specifies which modules and functions to trace. See the
+ function <c>trc/1</c> above for more information about the
+ syntax of this file.
+ </item>
+ </taglist>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>FRAMEWORK CALLBACK FUNCTIONS</title>
+ <p>A test server framework can be defined by setting the
+ environment variable <c>TEST_SERVER_FRAMEWORK</c> to a module
+ name. This module will then be framework callback module, and it
+ must export the following function:</p>
+ </section>
+ <funcs>
+ <func>
+ <name>get_suite(Mod,Func) -> TestCaseList</name>
+ <fsummary>Get subcases.</fsummary>
+ <type>
+ <v>Mod = atom()</v>
+ <v>Func = atom()</v>
+ <v>TestCaseList = [,SubCase]</v>
+ </type>
+ <desc>
+ <p>This function is called before a test case is started. The
+ purpose is to retrieve a list of subcases. The default
+ behaviour of this function should be to call
+ <c>Mod:Func(suite)</c> and return the result from this call.</p>
+ </desc>
+ </func>
+ <func>
+ <name>init_tc(Mod,Func,Args) -> {ok,Args}</name>
+ <fsummary>Preparation for a test case.</fsummary>
+ <type>
+ <v>Mod = atom()</v>
+ <v>Func = atom()</v>
+ <v>Args = [tuple()]</v>
+ <d>Normally Args = [Config]</d>
+ </type>
+ <desc>
+ <p>This function is called when a test case is started. It is
+ called on the process executing the test case function
+ (<c>Mod:Func</c>). Typical use of this function can be to alter
+ the input parameters to the test case function (<c>Args</c>) or
+ to set properties for the executing process.</p>
+ </desc>
+ </func>
+ <func>
+ <name>end_tc(Mod,Func,Args) -> ok</name>
+ <fsummary>Cleanup after a test case.</fsummary>
+ <type>
+ <v>Mod = atom()</v>
+ <v>Func = atom()</v>
+ <v>Args = [tuple()]</v>
+ <d>Normally Args = [Config]</d>
+ </type>
+ <desc>
+ <p>This function is called when a test case is completed. It is
+ called on the process where the test case function
+ (<c>Mod:Func</c>) was executed. Typical use of this function can
+ be to clean up stuff done by <c>init_tc/3</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name>report(What,Data) -> ok</name>
+ <fsummary>Progress report for test.</fsummary>
+ <type>
+ <v>What = atom()</v>
+ <v>Data = term()</v>
+ </type>
+ <desc>
+ <p>This function is called in order to keep the framework upto
+ date about the progress of the test. This is useful e.g. if the
+ framework implements a GUI where the progress information is
+ constantly updated. The following can be reported:
+ </p>
+ <p><c>What = tests_start, Data = {Name,NumCases}</c> <br></br>
+<c>What = tests_done, Data = {Ok,Failed,Skipped}</c> <br></br>
+<c>What = tc_start, Data = {Mod,Func}</c> <br></br>
+<c>What = tc_done, Data = {Mod,Func,Result}</c></p>
+ </desc>
+ </func>
+ <func>
+ <name>error_notification(Mod, Case, Args, Error) -> ok</name>
+ <fsummary>Inform framework of crashing testcase.</fsummary>
+ <type>
+ <v>Mod = atom()</v>
+ <d>Test suite name.</d>
+ <v>Case = atom()</v>
+ <d>Name of test case function.</d>
+ <v>Args = [tuple()]</v>
+ <d>Normally Args = [Config]</d>
+ <v>Error = {Reason,Location}</v>
+ <v>Reason = term()</v>
+ <d>Reason for termination.</d>
+ <v>Location = unknown | [{Mod,Case,Line}]</v>
+ <d>Last known position in Mod before termination.</d>
+ <v>Line = integer()</v>
+ <d>Line number in file Mod.erl.</d>
+ </type>
+ <desc>
+ <p>This function is called as the result of testcase Mod:Case failing
+ with Reason at Location. The function is intended mainly to aid
+ specific logging or error handling in the framework application. Note
+ that for Location to have relevant values (i.e. other than unknown),
+ the <c>line</c> macro or <c>test_server_line</c> parse transform must
+ be used. For details, please see the section about test suite line numbers
+ in the <c>test_server</c> reference manual page.</p>
+ </desc>
+ </func>
+ <func>
+ <name>warn(What) -> boolean()</name>
+ <fsummary>Ask framework if test server should issue a warning for What.</fsummary>
+ <type>
+ <v>What = processes | nodes</v>
+ </type>
+ <desc>
+ <p>The test server checks the number of processes and nodes
+ before and after the test is executed. This function is a
+ question to the framework if the test server should warn when
+ the number of processes or nodes has changed during the test
+ execution. If <c>true</c> is returned, a warning will be written
+ in the test case minor log file.</p>
+ </desc>
+ </func>
+ <func>
+ <name>target_info() -> InfoStr</name>
+ <fsummary>Print info about the target system to the test case log.</fsummary>
+ <type>
+ <v>InfoStr = string() | ""</v>
+ </type>
+ <desc>
+ <p>The test server will ask the framework for information about
+ the test target system and print InfoStr in the test case
+ log file below the host information.</p>
+ </desc>
+ </func>
+ </funcs>
+</erlref>
+
diff --git a/lib/test_server/doc/src/test_spec_chapter.xml b/lib/test_server/doc/src/test_spec_chapter.xml
new file mode 100644
index 0000000000..3a7730d61e
--- /dev/null
+++ b/lib/test_server/doc/src/test_spec_chapter.xml
@@ -0,0 +1,375 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Test Structure and Test Specifications</title>
+ <prepared>Siri Hansen</prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>test_spec_chapter.xml</file>
+ </header>
+
+ <section>
+ <title>Test structure</title>
+ <p>A test consists of a set of test cases. Each test case is
+ implemented as an erlang function. An erlang module implementing
+ one or more test cases is called a test suite.
+ </p>
+ </section>
+
+ <section>
+ <title>Test specifications</title>
+ <p>A test specification is a specification of which test suites
+ and test cases to run and which to skip. A test specification can
+ also group several test cases into conf cases with init and
+ cleanup functions (see section about configuration cases
+ below). In a test there can be test specifications on three
+ different levels:
+ </p>
+ <p>The top level is a test specification file which roughly
+ specifies what to test for a whole application. The test
+ specification in such a file is encapsulated in a topcase
+ command.
+ </p>
+ <p>Then there is a test specification for each test suite,
+ specifying which test cases to run within the suite. The test
+ specification for a test suite is returned from the
+ <c>all(suite)</c> function in the test suite module.
+ </p>
+ <p>And finally there can be a test specification per test case,
+ specifying sub test cases to run. The test specification for a
+ test case is returned from the specification clause of the test
+ case.
+ </p>
+ <p>When a test starts, the total test specification is built in a
+ tree fashion, starting from the top level test specification.
+ </p>
+ <p>The following are the valid elements of a test
+ specification. The specification can be one of these elements or a
+ list with any combination of the elements:
+ </p>
+ <taglist>
+ <tag><c>{Mod, Case}</c></tag>
+ <item>This specifies the test case Mod:Case/1
+ </item>
+ <tag><c>{dir, Dir}</c></tag>
+ <item>This specifies all modules <c>*_SUITE</c> in the directory
+ <c>Dir</c></item>
+ <tag><c>{dir, Dir, Pattern}</c></tag>
+ <item>This specifies all modules <c>Pattern*</c> in the
+ directory <c>Dir</c></item>
+ <tag><c>{conf, Init, TestSpec, Fin}</c></tag>
+ <item>This is a configuration case. In a test specification
+ file, <c>Init</c> and <c>Fin</c> must be
+ <c>{Mod,Func}</c>. Inside a module they can also be just
+ <c>Func</c>. See the section named Configuration Cases below for
+ more information about this.
+ </item>
+ <tag><c>{conf, Properties, Init, TestSpec, Fin}</c></tag>
+ <item>This is a configuration case as explained above, but
+ which also takes a list of execution properties for its group
+ of test cases and nested sub-groups.
+ </item>
+ <tag><c>{make, Init, TestSpec, Fin}</c></tag>
+ <item>This is a special version of a conf case which is only
+ used by the test server framework <c>ts</c>. <c>Init</c> and
+ <c>Fin</c> are make and unmake functions for a data
+ directory. <c>TestSpec</c> is the test specification for the
+ test suite owning the data directory in question. If the make
+ function fails, all tests in the test suite are skipped. The
+ difference between this "make case" and a normal conf case is
+ that for the make case, <c>Init</c> and <c>Fin</c> are given with
+ arguments (<c>{Mod,Func,Args}</c>), and that they are executed
+ on the controller node (i.e. not on target).
+ </item>
+ <tag><c>Case</c></tag>
+ <item>This can only be used inside a module, i.e. not a test
+ specification file. It specifies the test case
+ <c>CurrentModule:Case</c>.
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Test Specification Files</title>
+ <p>A test specification file is a text file containing the top
+ level test specification (a topcase command), and possibly one or
+ more additional commands. A "command" in a test specification file
+ means a key-value tuple ended by a dot-newline sequence.
+ </p>
+ <p>The following commands are valid:
+ </p>
+ <taglist>
+ <tag><c>{topcase, TestSpec}</c></tag>
+ <item>This command is mandatory in all test specification
+ files. <c>TestSpec</c> is the top level test specification of a
+ test.
+ </item>
+ <tag><c>{skip, {Mod, Comment}}</c></tag>
+ <item>This specifies that all cases in the module <c>Mod</c>
+ shall be skipped. <c>Comment</c> is a string.
+ </item>
+ <tag><c>{skip, {Mod, Case, Comment}}</c></tag>
+ <item>This specifies that the case <c>Mod:Case</c> shall be
+ skipped.
+ </item>
+ <tag><c>{skip, {Mod, CaseList, Comment}}</c></tag>
+ <item>This specifies that all cases <c>Mod:Case</c>, where
+ <c>Case</c> is in <c>CaseList</c>, shall be skipped.
+ </item>
+ <tag><c>{nodes, Nodes}</c></tag>
+ <item><c>Nodes</c> is a list of nodenames available to the test
+ suite. It will be added to the <c>Config</c> argument to all
+ test cases. <c>Nodes</c> is a list of atoms.
+ </item>
+ <tag><c>{require_nodenames, Num}</c></tag>
+ <item>Specifies how many nodenames the test suite will
+ need. Theese will be automatically generated and inserted into the
+ <c>Config</c> argument to all test cases. <c>Num</c> is an
+ integer.
+ </item>
+ <tag><c>{hosts, Hosts}</c></tag>
+ <item>This is a list of available hosts on which to start slave
+ nodes. It is used when the <c>{remote, true}</c> option is given
+ to the <c>test_server:start_node/3</c> function. Also, if
+ <c>{require_nodenames, Num}</c> is contained in a test
+ specification file, the generated nodenames will be spread over
+ all hosts given in this <c>Hosts</c> list. The hostnames are
+ atoms or strings.
+ </item>
+ <tag><c>{diskless, true}</c></tag>
+ <item>Adds <c>{diskless, true}</c> to the <c>Config</c> argument
+ to all test cases. This is kept for backwards compatibility and
+ should not be used. Use a configuration case instead.
+ </item>
+ <tag><c>{ipv6_hosts, Hosts}</c></tag>
+ <item>Adds <c>{ipv6_hosts, Hosts}</c> to the <c>Config</c>
+ argument to all test cases.</item>
+ </taglist>
+ <p>All test specification files shall have the extension
+ ".spec". If special test specification files are needed for
+ Windows or VxWorks platforms, additional files with the
+ extension ".spec.win" and ".spec.vxworks" shall be
+ used. This is useful e.g. if some test cases shall be skipped on
+ these platforms.
+ </p>
+ <p>Some examples for test specification files can be found in the
+ Examples section of this user's guide.
+ </p>
+ </section>
+
+ <section>
+ <title>Configuration cases</title>
+ <p>If a group of test cases need the same initialization, a so called
+ <em>configuration</em> or <em>conf</em> case can be used. A conf
+ case consists of an initialization function, the group of test cases
+ needing this initialization and a cleanup or finalization function.
+ </p>
+ <p>If the init function in a conf case fails or returns
+ <c>{skip,Comment}</c>, the rest of the test cases in the conf case
+ (including the cleanup function) are skipped. If the init function
+ succeeds, the cleanup function will always be called, even if some
+ of the test cases in between failed.
+ </p>
+ <p>Both the init function and the cleanup function in a conf case
+ get the <c>Config</c> parameter as only argument. This parameter
+ can be modified or returned as is. Whatever is returned by the
+ init function is given as <c>Config</c> parameter to the rest of
+ the test cases in the conf case, including the cleanup function.
+ </p>
+ <p>If the <c>Config</c> parameter is changed by the init function,
+ it must be restored by the cleanup function. Whatever is returned
+ by the cleanup function will be given to the next test case called.
+ </p>
+ <p>The optional <c>Properties</c> list can be used to specify
+ execution properties for the test cases and possibly nested
+ sub-groups of the configuration case. The available properties are:</p>
+ <pre>
+ Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+ Shuffle = shuffle | {shuffle,Seed}
+ Seed = {integer(),integer(),integer()}
+ RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+ repeat_until_any_ok | repeat_until_any_fail
+ N = integer() | forever</pre>
+
+ <p>If the <c>parallel</c> property is specified, Test Server will execute
+ all test cases in the group in parallel. If <c>sequence</c> is specified,
+ the cases will be executed in a sequence, meaning if one case fails, all
+ following cases will be skipped. If <c>shuffle</c> is specified, the cases
+ in the group will be executed in random order. The <c>repeat</c> property
+ orders Test Server to repeat execution of the cases in the group a given
+ number of times, or until any, or all, cases fail or succeed.</p>
+
+ <p>Properties may be combined so that e.g. if <c>shuffle</c>,
+ <c>repeat_until_any_fail</c> and <c>sequence</c> are all specified, the test
+ cases in the group will be executed repeatedly and in random order until
+ a test case fails, when execution is immediately stopped and the rest of
+ the cases skipped.</p>
+
+ <p>The properties for a conf case is always printed on the top of the HTML log
+ for the group's init function. Also, the total execution time for a conf case
+ can be found at the bottom of the log for the group's end function.</p>
+
+ <p>Configuration cases may be nested so that sets of grouped cases can be
+ configured with the same init- and end functions.</p>
+ </section>
+
+ <section>
+ <title>The parallel property and nested configuration cases</title>
+ <p>If a conf case has a parallel property, its test cases will be spawned
+ simultaneously and get executed in parallel. A test case is not allowed
+ to execute in parallel with the end function however, which means
+ that the time it takes to execute a set of parallel cases is equal to the
+ execution time of the slowest test case in the group. A negative side
+ effect of running test cases in parallel is that the HTML summary pages
+ are not updated with links to the individual test case logs until the
+ end function for the conf case has finished.</p>
+
+ <p>A conf case nested under a parallel conf case will start executing in
+ parallel with previous (parallel) test cases (no matter what properties the
+ nested conf case has). Since, however, test cases are never executed in
+ parallel with the init- or the end function of the same conf case, it's
+ only after a nested group of cases has finished that any remaining parallel
+ cases in the previous conf case get spawned.</p>
+ </section>
+
+ <section>
+ <title>Repeated execution of test cases</title>
+ <marker id="repeated_cases"></marker>
+ <p>A conf case may be repeated a certain number of times
+ (specified by an integer) or indefinitely (specified by <c>forever</c>).
+ The repetition may also be stopped prematurely if any or all cases
+ fail or succeed, i.e. if the property <c>repeat_until_any_fail</c>,
+ <c>repeat_until_any_ok</c>, <c>repeat_until_all_fail</c>, or
+ <c>repeat_until_all_ok</c> is used. If the basic <c>repeat</c>
+ property is used, status of test cases is irrelevant for the repeat
+ operation.</p>
+
+ <p>It is possible to return the status of a conf case (ok or
+ failed), to affect the execution of the conf case on the level above.
+ This is accomplished by, in the end function, looking up the value
+ of <c>tc_group_properties</c> in the <c>Config</c> list and checking the
+ result of the finished test cases. If status <c>failed</c> should be
+ returned from the conf case as a result, the end function should return
+ the value <c>{return_group_result,failed}</c>. The status of a nested conf
+ case is taken into account by Test Server when deciding if execution
+ should be repeated or not (unless the basic <c>repeat</c> property is used).</p>
+
+ <p>The <c>tc_group_properties</c> value is a list of status tuples,
+ each with the key <c>ok</c>, <c>skipped</c> and <c>failed</c>. The
+ value of a status tuple is a list containing names of test cases
+ that have been executed with the corresponding status as result.</p>
+
+ <p>Here's an example of how to return the status from a conf case:</p>
+ <pre>
+ conf_end_function(Config) ->
+ Status = ?config(tc_group_result, Config),
+ case proplists:get_value(failed, Status) of
+ [] -> % no failed cases
+ {return_group_result,ok};
+ _Failed -> % one or more failed
+ {return_group_result,failed}
+ end.</pre>
+
+ <p>It is also possible in the end function to check the status of
+ a nested conf case (maybe to determine what status the current conf case should
+ return). This is as simple as illustrated in the example above, only the
+ name of the end function of the nested conf case is stored in a tuple
+ <c>{group_result,EndFunc}</c>, which can be searched for in the status lists.
+ Example:</p>
+ <pre>
+ conf_end_function_X(Config) ->
+ Status = ?config(tc_group_result, Config),
+ Failed = proplists:get_value(failed, Status),
+ case lists:member({group_result,conf_end_function_Y}, Failed) of
+ true ->
+ {return_group_result,failed};
+ false ->
+ {return_group_result,ok}
+ end;
+ ...</pre>
+
+ <note><p>When a conf case is repeated, the init- and end functions
+ are also always called with each repetition.</p></note>
+ </section>
+
+ <section>
+ <title>Shuffled test case order</title>
+ <p>The order that test cases in a conf case are executed, is under normal
+ circumstances the same as the order defined in the test specification.
+ With the <c>shuffle</c> property set, however, Test Server will instead
+ execute the test cases in random order.</p>
+
+ <p>The user may provide a seed value (a tuple of three integers) with
+ the shuffle property: <c>{shuffle,Seed}</c>. This way, the same shuffling
+ order can be created every time the conf case is executed. If no seed value
+ is given, Test Server creates a "random" seed for the shuffling operation
+ (using the return value of <c>erlang:now()</c>). The seed value is always
+ printed to the log file of the init function so that it can be used to
+ recreate the same execution order in subsequent test runs.</p>
+
+ <note><p>If execution of a conf case with shuffled test cases is repeated,
+ the seed will not be reset in between turns.</p></note>
+
+ <p>If a nested conf case is specified in a conf case with a <c>shuffle</c>
+ property, the execution order of the nested cases in relation to the test cases
+ (and other conf cases) is also random. The order of the test cases in the nested
+ conf case is however not random (unless, of course, this one also has a
+ <c>shuffle</c> property).</p>
+ </section>
+
+ <section>
+ <title>Skipping test cases</title>
+ <p>It is possible to skip certain test cases, for example if you
+ know beforehand that a specific test case fails. This might be
+ functionality which isn't yet implemented, a bug that is known but
+ not yet fixed or some functionality which doesn't work or isn't
+ applicable on a specific platform.
+ </p>
+ <p>There are several different ways to state that a test case
+ should be skipped:</p>
+ <list type="bulleted">
+ <item>Using the <c>{skip,What}</c> command in a test
+ specification file
+ </item>
+ <item>Returning <c>{skip,Reason}</c> from the
+ <c>init_per_testcase/2</c> function
+ </item>
+ <item>Returning <c>{skip,Reason}</c> from the specification
+ clause of the test case
+ </item>
+ <item>Returning <c>{skip,Reason}</c> from the execution clause
+ of the test case
+ </item>
+ </list>
+ <p>The latter of course means that the execution clause is
+ actually called, so the author must make sure that the test case
+ is not run. For more information about the different clauses in a
+ test case, see the chapter about writing test cases.
+ </p>
+ <p>When a test case is skipped, it will be noted as <c>SKIPPED</c>
+ in the HTML log.
+ </p>
+ </section>
+</chapter>
+
diff --git a/lib/test_server/doc/src/ts.xml b/lib/test_server/doc/src/ts.xml
new file mode 100644
index 0000000000..0f91d3eea2
--- /dev/null
+++ b/lib/test_server/doc/src/ts.xml
@@ -0,0 +1,592 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2007</year>
+ <year>2008</year>
+ <holder>Ericsson AB, All Rights Reserved</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ The Initial Developer of the Original Code is Ericsson AB.
+ </legalnotice>
+
+ <title>The OTP Test Server Framework</title>
+ <prepared>Mattias Nilsson</prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev></rev>
+ <file>ts.xml</file>
+ </header>
+ <module>ts</module>
+ <modulesummary>Test Server Framework for testing OTP</modulesummary>
+ <description>
+ <p>This is a framework for testing OTP. The <c>ts</c> module
+ implements the interface to all the functionality in the
+ framework.
+ </p>
+ <p>The framework is built on top of the Test Server Controller,
+ <c>test_server_ctrl</c>, and provides a high level operator
+ interface. The main features added by the framework are:
+ </p>
+ <list type="bulleted">
+ <item>Automatic compilation of test suites and data directories
+ </item>
+ <item>Collection of files in central directories and creation of
+ additional HTML pages for better overview.
+ </item>
+ <item>Single command interface for running all available tests
+ </item>
+ <item>Spawns a new node with correct parameters before starting
+ the test server
+ </item>
+ <item>Atomatically creates the parameter file needed when
+ running tests on remote target
+ </item>
+ </list>
+ <p>More information about the Test Server Framework and how to run
+ test cases can be found in the Test Server User's Guide.
+ </p>
+ <p>For writing you own test server framework, please turn to the
+ reference manual for the Test Server Controller and chapter named
+ "Writing your own test server framework" in the Test Server User's
+ Guide.
+ </p>
+ <p>SETUP</p>
+ <p>To be able to run <c>ts</c>, you must first `install'
+ <c>ts</c> for the current environment. This is done by calling
+ <c>ts:install/0/1/2</c>. A file called `variables' is created
+ and used by <c>ts</c> when running test suites. It is not
+ recommended to edit this file, but it is possible to alter if
+ <c>ts</c> gets the wrong idea about your environment.
+ </p>
+ <p><c>ts:install/0</c> is used if the target platform is the
+ same as the controller host, i.e. if you run on "local target"
+ and no options are needed. Then running <c>ts:install/0</c><c>ts</c>
+ will run an autoconf script for your current
+ environment and set up the necessary variables needed by the
+ test suites.
+ </p>
+ <p><c>ts:install/1</c> or <c>ts:install/2</c> is used if the
+ target platform is different from the controller host, i.e. if
+ you run on "remote target" or if special options are required
+ for your system. VxWorks is currently supported
+ as remote target platform.
+ </p>
+ <p>See the reference manual for detailed information about
+ <c>ts:install/0/1/2</c>.
+ </p>
+ <p>Some of the common variables in the 'variables' file are
+ described below. Do not make any assumptions as of what is found
+ in this file, as it may change at any time.
+ </p>
+ <list>
+ <item><c>longnames</c><br></br>
+ Set to true if the system is using fully qualified
+ nodenames.
+ </item>
+ <item><c>platform_id</c><br></br>
+ This is the currently installed platform identification
+ string.
+ </item>
+ <item><c>platform_filename</c><br></br>
+ This is the name used to create the final save directory
+ for test runs.
+ </item>
+ <item><c>platform_label</c><br></br>
+ This is the string presented in the generated test
+ results index page.
+ </item>
+ <item><c>rsh_name</c><br></br>
+ This is the rsh program to use when starting slave or
+ peer nodes on a remote host.
+ </item>
+ <item><c>erl_flags</c><br></br>
+ Compile time flags used when compiling test suites.
+ </item>
+ <item><c>erl_release</c><br></br>
+ The Erlang/OTP release being tested.
+ </item>
+ <item><c>'EMULATOR'</c><br></br>
+ The emulator being tested (e.g. beam)
+ </item>
+ <item><c>'CPU'</c><br></br>
+ The CPU in the machine running the tests, e.g. sparc.
+ </item>
+ <item><c>target_host</c><br></br>
+ The target host name
+ </item>
+ <item><c>os</c><br></br>
+ The target operating system, e.g. solaris2.8
+ </item>
+ <item><c>target</c><br></br>
+ The current target platform, e.g. sparc-sun-solaris2.8
+ </item>
+ </list>
+ <p>RUNNING TESTS</p>
+ <p>After installing <c>ts</c>, you can run your test with the
+ <c>ts:run/0/1/2/3/4</c> functions. These functions, however,
+ require a special directory structure to be able to find your
+ test suites. Both the test server and all tests must be located
+ under your $TESTROOT directory. The test server implementation
+ shall be located in the directory <c>$TESTROOT/test_server</c>
+ and for each application there must be a directory named
+ <c><![CDATA[$TESTROOT/<application>_test]]></c> containing the .spec file
+ and all test suites and data directories for the
+ application. Note that there shall only be one .spec file for
+ each application.
+ </p>
+ <p><c>$TESTROOT/test_server</c> must be the current directory
+ when calling the <c>ts:run/*</c> function.
+ </p>
+ <p>All available tests can be found with <c>ts:tests()</c>. This
+ will list all applications for which a test specification file
+ <c><![CDATA[../<application>_test/<application>.spec]]></c> can be found.
+ </p>
+ <p>To run all these tests, use <c>ts:run()</c>.
+ </p>
+ <p>To run one or some of the tests, use <c>ts:run(Tests)</c>,
+ where <c>Tests</c> is the name of the application you want to
+ test, or a list of such names.
+ </p>
+ <p>To run one test suite within a test, use
+ <c>ts:run(Test,Suite)</c>.
+ </p>
+ <p>To run one test case within a suite, use
+ <c>ts:run(Test,Suite,Case)</c></p>
+ <p>To all these functions, you can also add a list of
+ options. Please turn to the reference manual for the <c>ts</c>
+ module to see the valid options to use.
+ </p>
+ <p>The function <c>ts:help()</c> displays some simple help for
+ the functions in <c>ts</c>. Use this for quick reference.
+ </p>
+ <p>LOG FILES</p>
+ <p>As the execution of the test suites go on, events are logged in
+ four different ways:
+ </p>
+ <list type="bulleted">
+ <item>Text to the operator's console.</item>
+ <item>Suite related information is sent to the major log file.</item>
+ <item>Case related information is sent to the minor log file.</item>
+ <item>The HTML log file gets updated with test results.</item>
+ </list>
+ <p>Typically the operator, who may run hundreds or thousands of
+ test cases, doesn't want to fill the screen with details
+ about/from the specific test cases. By default, the operator will
+ only see:
+ </p>
+ <list type="bulleted">
+ <item>A confirmation that the test has started.
+ </item>
+ <item>A small note about each failed test case.
+ </item>
+ <item>A summary of all the run test cases.
+ </item>
+ <item>A confirmation that the test run is complete
+ </item>
+ <item>Some special information like error reports and progress
+ reports, printouts written with erlang:display/1 or io:format/3
+ specifically addressed to somewhere other than
+ <c>standard_io</c>.</item>
+ </list>
+ <p>This is enough for the operator to know, and if he wants to dig
+ in deeper into a specific test case result, he can do so by
+ following the links in the HTML presentation to take a look in the
+ major or minor log files.
+ </p>
+ <p>A detailed report of the entire test suite is stored in the
+ major logfile, the exact reason for failure, time spent etc.
+ </p>
+ <p>The HTML log file is a summary of the major log file, but gives
+ a much better overview of the test run. It also has links to every
+ test case's log file for quick viewing with a HTML browser.
+ </p>
+ <p>The minor log file contain full details of every single test
+ case, each one in a separate file. This way the files should be
+ easy to compare with previous test runs, even if the set of test
+ cases change.
+ </p>
+ <p>Which information that goes where is user configurable via the
+ test server controller. Three threshold values determine what
+ comes out on screen, and in the major or minor log files. The
+ contents that goes to the HTML log file is fixed, and cannot be
+ altered.
+ </p>
+
+ </description>
+ <funcs>
+ <func>
+ <name>install() -> ok | {error, Reason}</name>
+ <name>install(TargetSystem) -> ok | {error, Reason}</name>
+ <name>install(Opts) -> ok | {error, Reason}</name>
+ <name>install(TargetSystem,Opts) -> ok | {error, Reason}</name>
+ <fsummary>Installs the Test Server Framework</fsummary>
+ <type>
+ <v>TargetSystem = {Architecture, TargetHost}</v>
+ <v>Architecture = atom() or string()</v>
+ <d>e.g. "ose" or "vxworks_ppc603"</d>
+ <v>TargetHost = atom() or string()</v>
+ <d>The name of the target host</d>
+ <v>Opts = list()</v>
+ </type>
+ <desc>
+ <p>Installs and configures the Test Server Framework for
+ running test suites. If a remote host is to be used, the
+ <c>TargetSystem</c> argument must be given so that "cross
+ installation" can be done. This should be used for testing on
+ VxWorks or OSE/Delta. Installation is required for any of the
+ functions in <c>ts</c> to work.
+ </p>
+ <p>Opts may be one or more of
+ </p>
+ <list>
+ <item><c>{longnames, Bool}</c><br></br>
+ Use fully qualified hostnames for test_server and
+ slave nodes. Bool is <c>true</c> or <c>false</c> (default).
+ </item>
+ <item><c>{verbose, Level}</c><br></br>
+ Verbosity level for test server output, set to 0, 1 or
+ 2, where 0 is quiet(default).
+ </item>
+ <item><c>{hosts, Hosts}</c><br></br>
+ This is a list of available hosts on which to start
+ slave nodes. It is used when the <c>{remote, true}</c>
+ option is given to the <c>test_server:start_node/3</c>
+ function. Also, if <c>{require_nodenames, Num}</c> is
+ contained in a test specification file, the generated
+ nodenames will be spread over all hosts given in this
+ <c>Hosts</c> list. The hostnames are given as atoms or
+ strings.
+ </item>
+ <item><c>{slavetargets, SlaveTarges}</c><br></br>
+ For VxWorks and OSE/Delta only. This is a list of
+ available hosts where slave nodes can be started. This is
+ necessary because only one node can run per host in the
+ VxWorks environment. This is not the same as
+ <c>{hosts, Hosts}</c> because it is used for all slave nodes
+ - not only the ones started with <c>{remote, true}</c>. The
+ hostnames are given as atoms or strings.
+ </item>
+ <item><c>{crossroot, TargetErlRoot}</c><br></br>
+ Erlang root directory on target host
+ <br></br>
+This option is mandatory for remote targets
+ </item>
+ <item><c>{master, {MasterHost, MasterCookie}}</c><br></br>
+ If target is remote and the target node is started as
+ a slave node, this option
+ indicates which master and cookie to use. The given master
+ will also be used as master for slave nodes started with
+ <c>test_server:start_node/3</c>. It is expected that the
+ <c>erl_boot_server</c> is started on the master node before
+ the test is run. If this option is not given, the test
+ server controller node is used as master and the
+ <c>erl_boot_server</c> is automatically started.
+ </item>
+ <item><c>{erl_start_args, ArgString}</c><br></br>
+ Additional arguments to be used when starting the test
+ server controller node. <c>ArgString</c> will be appended to
+ the command line when starting the erlang node. Note that
+ this will only affect the startup of the <em>controller node</em>,
+ i.e. not the target node or any slave nodes
+ startet from a test case.
+ </item>
+ <item><c>{ipv6_hosts, HostList}</c><br></br>
+ This option will be inserted in the
+ <c>Config</c> parameter for each test case. <c>HostList</c>
+ is a list of hosts supporting IPv6.
+ </item>
+ </list>
+ </desc>
+ </func>
+ <func>
+ <name>help() -> ok</name>
+ <fsummary>Presents simple help on the functions in <c>ts</c></fsummary>
+ <desc>
+ <p>Presents simple help on the functions in <c>ts</c>. Useful
+ for quick reference.</p>
+ </desc>
+ </func>
+ <func>
+ <name>tests() -> Tests</name>
+ <fsummary>Returns the list of available tests</fsummary>
+ <desc>
+ <p>Returns the list of available tests. This is actually just
+ a list of all test specification files found by looking up
+ "../*_test/*.spec".
+ </p>
+ <p>In each ../Name_test/ directory there should be one test
+ specification file named Name.spec.</p>
+ </desc>
+ </func>
+ <func>
+ <name>run() -> ok | {error, Reason}</name>
+ <name>run([all_tests|Opts])</name>
+ <name>run(Specs)</name>
+ <name>run(Specs, Opts)</name>
+ <name>run(Spec, Module)</name>
+ <name>run(Spec, Module, Opts)</name>
+ <name>run(Spec, Module, Case)</name>
+ <name>run(Spec, Module, Case, Opts)</name>
+ <fsummary>Runs (specified) test suite(s)</fsummary>
+ <type>
+ <v>Specs = Spec | [Spec]</v>
+ <v>Spec = atom()</v>
+ <v>Module = atom()</v>
+ <v>Case = atom()</v>
+ <v>Opts = [Opt]</v>
+ <v>Opt = batch | verbose | {verbose, Level} | {vars, Vars} | keep_topcase | cover | cover_details |{cover,CoverFile} | {cover_details,CoverFile} | {trace, TraceSpec}</v>
+ <v>Level = integer(); 0 means silent</v>
+ <v>Vars = list() of key-value tuples</v>
+ <v>CoverFile = string(); name of file listing modules to exclude from or include in cover compilation. The name must include full path to the file.</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This function runs test suite(s)/case(s). To be able to run
+ any tests, ts:install must first be called to create the
+ <c>variables</c> file needed. To run a whole test specification,
+ only specify the name of the test specification, and all test
+ suite modules belonging to that test spec will be run. To run
+ a single module in a test specification, use the Module
+ argument to specify the name of the module to run and all test
+ cases in that module will be run, and to run a specified test
+ case, specify the name of the test case using the Case
+ argument. If called with no argument, all test specifications
+ available will be run. Use ts:tests/0 to see the available
+ test specifications.
+ </p>
+ <p>If the <c>batch</c> option is not given, a new xterm is
+ started (unix) when <c>ts:run</c> is called.
+ </p>
+ <p>The <c>verbose</c> option sets the verbosity level for test
+ server output. This has the same effect as if given to
+ <c>ts:install/1/2</c></p>
+ <p>The <c>vars</c> option can be used for adding configuration
+ variables that are not in the <c>variables</c> file generated
+ during installation. Can be any of the <c>Opts</c> valid for
+ <c>ts:install/1/2</c>.
+ </p>
+ <p>The <c>keep_topcase</c> option forces <c>ts</c> to keep the
+ topcase in your test specification file as is. This option can
+ only be used if you don't give the <c>Module</c> or
+ <c>Case</c> parameters to <c>ts:run</c>. The
+ <c>keep_topcase</c> option is necessary if your topcase
+ contains anything other than <c><![CDATA[{dir,"../<Name>_test"}]]></c>. If
+ the option is not used, <c>ts</c> will modify your topcase.
+ </p>
+ <p>The <c>cover</c> and <c>cover_details</c> options indicates
+ that the test shall be run with code coverage
+ analysis. <c>cover_details</c> means that analysis shall be
+ done on the most detailed level. If the test is run with a
+ remote target, this option creates a list of uncovered lines
+ in each cover compiled module. If the test is run with a local
+ target, each cover compiled module will be analysed with
+ <c>cover:analyse_to_file/1</c>. The <c>cover</c> options will
+ only create an overview of all cover compiled modules with the
+ number of covered and not covered lines.
+ </p>
+ <p>The <c>CoverFile</c> which can be given with the
+ <c>cover</c> and <c>cover_details</c> options must be the
+ filename of a file listing modules to be excluded from or
+ included in the cover compilation. By default, <c>ts</c>
+ believes that <c>Spec</c> is the name of an OTP application
+ and that all modules in this application shall be cover
+ compiled. The <c>CoverFile</c> can exclude modules that belong
+ to the application and add modules that don't belong to the
+ application. The file can have the following entries:</p>
+ <code type="none">
+{exclude, all | ExcludeModuleList}.
+{include, IncludeModuleList}. </code>
+ <p>Note that each line must end with a full
+ stop. <c>ExcludeModuleList</c> and <c>IncludeModuleList</c>
+ are lists of atoms, where each atom is a module name.
+ </p>
+ <p>If the <c>cover</c> or <c>cover_details</c> options are
+ given on their own, the directory <c><![CDATA[../<Spec>_test]]></c> is
+ searched for a <c>CoverFile</c> named <c><![CDATA[<Spec>.cover]]></c>. If
+ this file is not found, <c>Spec</c> is assumed to be the name
+ of an OTP application, and all modules in the <c>ebin</c>
+ directory for the application are cover compiled. The
+ <c>ebin</c> directory is found by adding <c>ebin</c> to
+ <c>code:lib_dir(Spec)</c>.
+ </p>
+ <p>The same cover compiled code will be loaded on all slave or
+ peer nodes started with <c>test_server:start_node/3</c>. The
+ exception is nodes that run an old version of OTP. If the loading
+ fails, the node will simply not be a part of the coverage
+ analysis. Note that slave and peer nodes must be stopped with
+ <c>test_server:stop_node/1</c> for the node to be part of the
+ coverage analysis, else the test server will not be able to
+ fetch coverage data from the node.
+ </p>
+ <p>The <c>trace</c> option is used to turn on call trace on
+ target and on slave or peer nodes started with
+ <c>test_server:start_node/3</c>. <c>TraceSpec</c> can be the
+ name of a trace information file, or a list of elements like
+ the ones in a trace information file. Please turn to the
+ reference manual for <c>test_server_ctrl:trc/1</c> for details
+ about the trace information file.
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name>cross_cover_analyse(Level) -> ok</name>
+ <name>cross_cover_analyse([Level]) -> ok</name>
+ <fsummary>Analyse cover data collected from all tests</fsummary>
+ <desc>
+ <p>Analyse cover data collected from all tests.
+ </p>
+ <p>See test_server_ctrl:cross_cover_analyse/1
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name>r() -> ok</name>
+ <name>r(Opts) -> ok</name>
+ <name>r(SpecOrSuite) -> ok</name>
+ <name>r(SpecOrSuite,Opts) -> ok</name>
+ <name>r(Suite,Case) -> ok</name>
+ <name>r(Suite,Case,Opts) -> ok</name>
+ <fsummary>Run test suite or test case without <c>ts</c>installed</fsummary>
+ <type>
+ <v>SpecOrSuite = Spec | Suite</v>
+ <v>Spec = string()</v>
+ <d>"Name.spec" or "Name.spec.OsType", where OsType is vxworks</d>
+ <v>Suite = atom()</v>
+ <v>Case = atom()</v>
+ <v>Opts = [Opt]</v>
+ <v>Opt = {Cover,AppOrCoverFile} | {Cover,Application,CoverFile}</v>
+ <v>Cover = cover | cover_details</v>
+ <v>AppOrCoverFile = Application | CoverFile</v>
+ <v>Application = atom()</v>
+ <d>OTP application to cover compile</d>
+ <v>CoverFile = string()</v>
+ <d>Name of file listing modules to exclude from or include in cover compilation</d>
+ </type>
+ <desc>
+ <p>This function can be used to run a test suites or test
+ cases directly, without any of the additional features added
+ by the test server framework. It is simply a wrapper function
+ for the <c>add_dir</c>, <c>add_spec</c>, <c>add_module</c> and
+ <c>add_case</c> functions in <c>test_server_ctrl</c>:
+ </p>
+ <p><c>r() -> add_dir(".")</c> <br></br>
+<c>r(Spec) -> add_spec(Spec)</c> <br></br>
+<c>r(Suite) -> add_module(Suite)</c> <br></br>
+<c>r(Suite,Case) -> add_case(Suite,Case)</c></p>
+ <p>To use this function, it is required that the test suite is
+ compiled and in the code path of the node where the function
+ is called. The function can be used without having <c>ts</c>
+ installed.
+ </p>
+ <p>For information about the <c>cover</c> and
+ <c>cover_details</c> options, see <c>test_server_ctrl:cover/2/3</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name>index() -> ok | {error, Reason}</name>
+ <fsummary>Updates local index page</fsummary>
+ <type>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This function updates the local index page. This can be
+ useful if a previous test run was not completed and the index
+ is incomplete.</p>
+ </desc>
+ </func>
+ <func>
+ <name>clean() -> ok</name>
+ <name>clean(all) -> ok</name>
+ <fsummary>Cleans up the log directories created when running tests. </fsummary>
+ <desc>
+ <p>This function cleans up log directories created when
+ running test cases. <c>clean/0</c> cleans up all but the last
+ run of each application. <c>clean/1</c> cleans up all test
+ runs found.</p>
+ </desc>
+ </func>
+ <func>
+ <name>estone() -> ok | {error, Reason}</name>
+ <name>estone(Opts) -> ok</name>
+ <fsummary>Runs the EStone test</fsummary>
+ <desc>
+ <p>This function runs the EStone test. It is a shortcut for
+ running the test suite <c>estone_SUITE</c> in the
+ <c>kernel</c> application.
+ </p>
+ <p><c>Opts</c> is the same as the <c>Opts</c> argument for the
+ <c>ts:run</c> functions.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>Makfile.src in Data Directory</title>
+ <p>If a data directory contains code which must be compiled before
+ the test suite is run, a makefile source called
+ <c>Makefile.src</c> can be placed in the data directory. This file
+ will be converted to a valid makefile by <c>ts:run/0/1/2/3/4</c>.
+ </p>
+ <p>The reason for generating the makefile is that you can use
+ variables from the <c>variables</c> file which was generated by
+ <c>ts:install/0/1/2</c>. All occurrences of <c>@Key@</c> in
+ <c>Makefile.src</c> is substituted by the <c>Value</c> from
+ <c>{Key,Value}</c> found in the <c>variables</c> file. Example:
+ </p>
+ <p>Cut from <c>variables</c>:</p>
+ <code type="none">
+ ...
+ {'EMULATOR',"beam"}.
+ {'CFLAGS',"-g -O2"}.
+ {'LD',"$(CC) $(CFLAGS)"}.
+ {'CC',"gcc"}.
+ ...
+ </code>
+ <p><c>Makefile.src</c> for compiling erlang code could look
+ something like this:</p>
+ <code type="none">
+ EFLAGS=+debug_info
+
+ all: ordsets1.@EMULATOR@
+
+ ordsets1.@EMULATOR@: ordsets1.erl
+ erlc $(EFLAGS) ordsets1.erl
+ </code>
+ <p><c>Makefile.src</c> for compiling c code could look
+ something like this:</p>
+ <code type="none">
+ CC = @CC@
+ LD = @LD@
+ CFLAGS = @CFLAGS@ -I@erl_include@ @DEFS@
+ CROSSLDFLAGS = @CROSSLDFLAGS@
+
+ PROGS = nfs_check@exe@
+
+ all: $(PROGS)
+
+ nfs_check@exe@: nfs_check@obj@
+ $(LD) $(CROSSLDFLAGS) -o nfs_check nfs_check@obj@ @LIBS@
+
+ nfs_check@obj@: nfs_check.c
+ $(CC) -c -o nfs_check@obj@ $(CFLAGS) nfs_check.c
+ </code>
+ </section>
+</erlref>
+
diff --git a/lib/test_server/doc/src/why_test_chapter.xml b/lib/test_server/doc/src/why_test_chapter.xml
new file mode 100644
index 0000000000..745d4218f1
--- /dev/null
+++ b/lib/test_server/doc/src/why_test_chapter.xml
@@ -0,0 +1,140 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Why Test</title>
+ <prepared>Siri Hansen</prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+
+ <section>
+ <title>Goals</title>
+ <p>It's not possible to prove that a program is correct by
+ testing. On the contrary, it has been formally proven that it is
+ impossible to prove programs in general by testing. Theoretical
+ program proofs or plain examination of code may be viable options
+ for those that wish to certify that a program is correct. The test
+ server, as it is based on testing, cannot be used for
+ certification. Its intended use is instead to (cost effectively)
+ <em>find bugs</em>. A successful test suite is one that reveals a
+ bug. If a test suite results in Ok, then we know very little that
+ we didn't know before.
+ </p>
+ </section>
+
+ <section>
+ <title>What to test?</title>
+ <p>There are many kinds of test suites. Some concentrate on
+ calling every function in the interface to some module or
+ server. Some other do the same, but uses all kinds of illegal
+ parameters, and verifies that the server stays alive and rejects
+ the requests with reasonable error codes. Some test suites
+ simulate an application (typically consisting of a few modules of
+ an application), some try to do tricky requests in general, some
+ test suites even test internal functions.
+ </p>
+ <p>Another interesting category of test suites are the ones that
+ check that fixed bugs don't reoccur. When a bugfix is introduced,
+ a test case that checks for that specific bug should be written
+ and submitted to the affected test suite(s).
+ </p>
+ <p>Aim for finding bugs. Write whatever test that has the highest
+ probability of finding a bug, now or in the future. Concentrate
+ more on the critical parts. Bugs in critical subsystems are a lot
+ more expensive than others.
+ </p>
+ <p>Aim for functionality testing rather than implementation
+ details. Implementation details change quite often, and the test
+ suites should be long lived. Often implementation details differ
+ on different platforms and versions. If implementation details
+ have to be tested, try to factor them out into separate test
+ cases. Later on these test cases may be rewritten, or just
+ skipped.
+ </p>
+ <p>Also, aim for testing everything once, no less, no more. It's
+ not effective having every test case fail just because one
+ function in the interface changed.
+ </p>
+ </section>
+
+ <section>
+ <title>How much to test</title>
+ <p>There is a unix shell script that counts the number of non
+ commented words (lines and characters too) of source code in each
+ application's test directory and divides with the number of such
+ source words in the src directory. This is a measure of how much
+ test code there is.
+ </p>
+ <p>There has been much debate over how much test code, compared to
+ production code, should be written in a project. More test code
+ finds more bugs, but test code needs to be maintained just like
+ the production code, and it's expensive to write it in the first
+ place. In several articles from relatively mature software
+ organizations that I have read, the amount of test code has been
+ about the same as the production code. </p>
+ <p>In OTP, at the time of
+ writing, few applications come even close to this, some have no
+ test code at all.
+ </p>
+
+ <section>
+ <title>Full coverage</title>
+ <p>It is possible to cover compile the modules being tested
+ before running the test suites. Doing so displays which branches
+ of the code that are tested by the test suite, and which are
+ not. Many use this as a measure of a good test suite. When every
+ single line of source code is covered once by the test suite,
+ the test suite is finished.
+ </p>
+ <p>A coverage of 100% still proves nothing, though. It doesn't
+ mean that the code is error free, that everything is tested. For
+ instance, if a function contains a division, it has to be
+ executed at least twice. Once with parameters that cause
+ division by zero, and once with other parameters.
+ </p>
+ <p>High degree of coverage is good of course, it means that no
+ major parts of the code has been left untested. It's another
+ question whether it's cost effective. You're only likely to find
+ 50% more bugs when going from 67% to 100% coverage, but the work
+ (cost) is maybe 200% as large, or more, because reaching all of
+ those obscure branches is usually complicated.
+ </p>
+ <p>Again, the reason for testing with the test server is to find
+ bugs, not to create certificates of valid code. Maximizing the
+ number of found bugs per hour probably means not going for 100%
+ coverage. For some module the optimum may be 70%, for some other
+ maybe 250%. 100% shouldn't be a goal in itself.</p>
+ </section>
+
+ <section>
+ <title>User interface testing</title>
+ <p>It is very difficult to do sensible testing of user
+ interfaces, especially the graphic ones. The test server has
+ some support for capturing the text I/O that goes to the user,
+ but none for graphics. There are several tools on the market
+ that help with this.</p>
+ </section>
+ </section>
+</chapter>
+
diff --git a/lib/test_server/doc/src/write_framework_chapter.xml b/lib/test_server/doc/src/write_framework_chapter.xml
new file mode 100644
index 0000000000..2fde67132e
--- /dev/null
+++ b/lib/test_server/doc/src/write_framework_chapter.xml
@@ -0,0 +1,166 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Write you own test server framework</title>
+ <prepared>Siri Hansen</prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>write_framework_chapter.xml</file>
+ </header>
+
+ <section>
+ <title>Introduction</title>
+ <p>The test server controller can be interfaced from the operating
+ system or from within Erlang. The nature of your new framework
+ will decide which interface to use. If you want your framework to
+ start a new node for each test, the operating system interface is
+ very convenient. If your node is already started, going from
+ within Erlang might be a more flexible solution.
+ </p>
+ <p>The two methods are described below.
+ </p>
+ </section>
+
+ <section>
+ <title>Interfacing the test server controller from Erlang</title>
+ <p>Using the test server from Erlang means that you have to start
+ the test server and then add test jobs. Use
+ <c>test_server_ctrl:start/0</c> to start a local target or
+ <c>test_server_ctrl:start/1</c> to start a remote target. The test
+ server is stopped by <c>test_server_ctrl:stop/0</c>.
+ </p>
+ <p>The argument to <c>test_server_ctrl:start/1</c> is the name of a
+ parameter file. The parameter file specifies what type of target
+ to start and where to start it, as well as some additional
+ parameters needed for different target types. See the reference
+ manual for a detailed description of all valid parameters.
+ </p>
+
+ <section>
+ <title>Adding test jobs</title>
+ <p>There are many commands available for adding test cases to
+ the test server's job queue: <br></br>
+</p>
+ <list type="bulleted">
+ <item>Single test case <br></br>
+<c>test_server_ctrl:add_case/2/3</c></item>
+ <item>Multiple test cases from same suite <br></br>
+<c>test_server_ctrl:add_cases/2/3</c></item>
+ <item>Test suite module or modules <br></br>
+<c>test_server_ctrl:add_module/1/2</c></item>
+ <item>Some or all test suite modules in a directory <br></br>
+<c>test_server_ctrl:add_dir/2/3</c></item>
+ <item>Test cases specified in a test specification file <br></br>
+<c>test_server_ctrl:add_spec/1</c></item>
+ </list>
+ <p>All test suites are given a unique name, which is usually
+ given when the test suite is added to the job queue. In some
+ cases, a default name is used, as in the case when a module is
+ added without a specified name. The test job name is used to
+ store logfiles, which are stored in the `name.logs' directory
+ under the current directory.
+ </p>
+ <p>See the reference manual for details about the functions for
+ adding test jobs.
+ </p>
+ </section>
+ </section>
+
+ <section>
+ <title>Interfacing the test server controller from the operating system.</title>
+ <p>The function <c>run_test/1</c> is your interface in the test
+ server controller if you wish to use it from the operating
+ system. You simply start an erlang shell and invoke this function
+ with the <c>-s</c> option. <c>run_test/1</c> starts the test
+ server, runs the test specified by the command line and stops the
+ test server. The argument to <c>run_test/1</c> is a list of
+ command line flags, typically
+ <c>['KEY1', Value1, 'KEY2', Value2, ...]</c>.
+ The valid command line flags are listed in the reference manual
+ for <c>test_server_ctrl</c>.
+ </p>
+ <p>A typical command line may look like this <br></br>
+<c>erl -noshell -s test_server_ctrl run_test KEY1 Value1 KEY2 Value2 ... -s erlang halt</c></p>
+ <p>Or make an alias (this is for unix/tcsh) <br></br>
+<c>alias erl_test 'erl -noshell -s test_server_ctrl run_test \\!* -s erlang halt'</c></p>
+ <p>And then use it like this <br></br>
+<c>erl_test KEY1 Value1 KEY2 Value2 ...</c> <br></br>
+</p>
+
+ <section>
+ <title>An Example</title>
+ <p>An example of starting a test run from the command line <br></br>
+</p>
+ <p><c>erl -name test_srv -noshell -rsh /home/super/otp/bin/ctrsh </c> <br></br>
+<c>-pa /clearcase/otp/erts/lib/kernel/test </c> <br></br>
+<c>-boot start_sasl -sasl errlog_type error </c> <br></br>
+<c>-s test_server_ctrl run_test SPEC kernel.spec -s erlang halt</c> <br></br>
+</p>
+ </section>
+ </section>
+
+ <section>
+ <title>Framework callback functions</title>
+ <p>By defining the environment variable
+ <c>TEST_SERVER_FRAMEWORK</c> to a module name, the framework
+ callback functions can be used. The framework callback functions
+ are called by the test server in order let the framework interact
+ with the execution of the tests and to keep the framework upto
+ date with information about the test progress.
+ </p>
+ <p>The framework callback functions are described in the reference
+ manual for <c>test_server_ctrl</c>.
+ </p>
+ <p>Note that this topic is in an early stage of development, and
+ changes might occur.
+ </p>
+ </section>
+
+ <section>
+ <title>Other concerns</title>
+ <p>Some things to think about when writing you own test server
+ framework:
+ </p>
+ <list type="bulleted">
+ <item><c>emulator version</c> - Make sure that the intended
+ version of the emulator is started.
+ </item>
+ <item><c>operating system path</c> - If test cases use port
+ programs, make sure the paths are correct.
+ </item>
+ <item><c>recompilation</c> - Make sure all test suites are fresh
+ compiled.
+ </item>
+ <item><c>test_server.hrl</c> - Make sure the
+ <c>test_server.hrl</c> file is in the include path when
+ compiling test suites.
+ </item>
+ <item><c>running applications</c> - Some test suites require
+ some applications to be running (e.g. sasl). Make sure they are
+ started.
+ </item>
+ </list>
+ </section>
+</chapter>
+
diff --git a/lib/test_server/doc/src/write_test_chapter.xml b/lib/test_server/doc/src/write_test_chapter.xml
new file mode 100644
index 0000000000..12f0dfc361
--- /dev/null
+++ b/lib/test_server/doc/src/write_test_chapter.xml
@@ -0,0 +1,228 @@
+<?xml version="1.0" encoding="latin1" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2002</year><year>2009</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ The contents of this file are subject to the Erlang Public License,
+ Version 1.1, (the "License"); you may not use this file except in
+ compliance with the License. You should have received a copy of the
+ Erlang Public License along with this software. If not, it can be
+ retrieved online at http://www.erlang.org/.
+
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License.
+
+ </legalnotice>
+
+ <title>Writing Test Suites</title>
+ <prepared>Siri Hansen</prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>write_test_chapter.xml</file>
+ </header>
+
+ <section>
+ <title>Support for test suite authors</title>
+ <p>The <c>test_server</c> module provides some useful functions
+ to support the test suite author. This includes:
+ </p>
+ <list type="bulleted">
+ <item>Starting and stopping slave or peer nodes</item>
+ <item>Capturing and checking stdout output</item>
+ <item>Retrieving and flushing process message queue</item>
+ <item>Watchdog timers</item>
+ <item>Checking that a function crashes</item>
+ <item>Checking that a function succeeds at least m out of n times</item>
+ <item>Checking .app files</item>
+ </list>
+ <p>Please turn to the reference manual for the <c>test_server</c>
+ module for details about these functions.
+ </p>
+ </section>
+
+ <section>
+ <title>Test suites</title>
+ <p>A test suite is an ordinary Erlang module that contains test
+ cases. It's recommended that the module has a name on the form
+ *_SUITE.erl. Otherwise, the directory function will not find the
+ modules (by default).
+ </p>
+ <p>For some of the test server support, the test server include
+ file <c>test_server.hrl</c> must be included. Never include it
+ with the full path, for portability reasons. Use the compiler
+ include directive instead.
+ </p>
+ <p>The special function <c>all(suite)</c> in each module is called
+ to get the test specification for that module. The function
+ typically returns a list of test cases in that module, but any
+ test specification could be returned. Please see the chapter
+ about test specifications for details about this.
+ </p>
+ </section>
+
+ <section>
+ <title>Init per test case</title>
+ <p>In each test suite module, the functions
+ <c>init_per_testcase/2</c> and <c>end_per_testcase/2</c> must be
+ implemented.
+ </p>
+ <p><c>init_per_testcase</c> is called before each test case in the
+ test suite, giving a (limited) possibility for initialization.
+ </p>
+ <p><c>end_per_testcase/2</c> is called after each test case is
+ completed, giving a possibility to clean up.
+ </p>
+ <p>The first argument to these functions is the name of the test
+ case. This can be used to do individual initialization and cleanup for
+ each test cases.
+ </p>
+ <p>The second argument is a list of tuples called
+ <c>Config</c>. The first element in a <c>Config</c> tuple
+ should be an atom - a key value to be used for searching.
+ <c>init_per_testcase/2</c> may modify the <c>Config</c>
+ parameter or just return it as is. Whatever is retuned by
+ <c>init_per_testcase/2</c> is given as <c>Config</c> parameter to
+ the test case itself.
+ </p>
+ <p>The return value of <c>end_per_testcase/2</c> is ignored by the
+ test server.
+ </p>
+ </section>
+
+ <section>
+ <title>Test cases</title>
+ <p>The smallest unit that the test server is concerned with is a
+ test case. Each test case can in turn test many things, for
+ example make several calls to the same interface function with
+ different parameters.
+ </p>
+ <p>It is possible to put many or few tests into each test
+ case. How many things each test case tests is up to the author,
+ but here are some things to keep in mind.
+ </p>
+ <p>Very small test cases often leads to more code, since
+ initialization has to be duplicated. Larger code, especially with
+ a lot of duplication, increases maintenance and reduces
+ readability.
+ </p>
+ <p>Larger test cases make it harder to tell what went wrong if it
+ fails, and force us to skip larger portions of test code if a
+ specific part fails. These effects are accentuated when running on
+ multiple platforms because test cases often have to be skipped.
+ </p>
+ <p>A test case generally consists of three parts, the
+ documentation part, the specification part and the execution
+ part. These are implemented as three clauses of the same function.
+ </p>
+ <p>The documentation clause matches the argument '<c>doc</c>' and
+ returns a list for strings describing what the test case tests.
+ </p>
+ <p>The specification clause matches the argument '<c>suite</c>'
+ and returns the test specification for this particular test
+ case. If the test specification is an empty list, this indicates
+ that the test case is a leaf test case, i.e. one to be executed.
+ </p>
+ <p><em>Note that the specification clause of a test case is executed on the test server controller host. This means that if target is remote, the specification clause is probably executed on a different platform than the one tested.</em></p>
+ <p>The execution clause implements the actual test case. It takes
+ one argument, <c>Config</c>, which contain configuration
+ information like <c>data_dir</c> and <c>priv_dir</c>. See <seealso marker="#data_priv_dir">Data and Private Directories</seealso> for
+ more information about these.
+ </p>
+ <p>The <c>Config</c> variable can also contain the
+ <c>nodenames</c> key, if requested by the <c>require_nodenames</c>
+ command in the test suite specification file. All <c>Config</c>
+ items should be extracted using the <c>?config</c> macro. This is
+ to ensure future compatibility if the <c>Config</c> format
+ changes. See the reference manual for <c>test_server</c> for
+ details about this macro.
+ </p>
+ <p>If the execution clause crashes or exits, it is considered a
+ failure. If it returns <c>{skip,Reason}</c>, the test case is
+ considered skipped. If it returns <c>{comment,String}</c>,
+ the string will be added in the 'Comment' field on the HTML
+ result page. If the execution clause returns anything else, it is
+ considered a success, unless it is <c>{'EXIT',Reason}</c> or
+ <c>{'EXIT',Pid,Reason}</c> which can't be distinguished from a
+ crash, and thus will be considered a failure.
+ </p>
+ </section>
+
+ <section>
+ <marker id="data_priv_dir"></marker>
+ <title>Data and Private Directories</title>
+ <p>The data directory (<c>data_dir</c>) is the directory where the test
+ module has its own files needed for the testing. A compiler test
+ case may have source files to feed into the compiler, a release
+ upgrade test case may have some old and new release of
+ something. A graphics test case may have some icons and a test
+ case doing a lot of math with bignums might store the correct
+ answers there. The name of the <c>data_dir</c> is the the name of
+ the test suite and then "_data". For example,
+ <c>"some_path/foo_SUITE.beam"</c> has the data directory
+ <c>"some_path/foo_SUITE_data/"</c>.
+ </p>
+ <p>The <c>priv_dir</c> is the test suite's private directory. This
+ directory should be used when a test case needs to write to
+ files. The name of the private directory is generated by the test
+ server, which also creates the directory.
+ </p>
+ <p><em>Warning:</em> Do not depend on current directory to be
+ writable, or to point to anything in particular. All scratch files
+ are to be written in the <c>priv_dir</c>, and all data files found
+ in <c>data_dir</c>. If the current directory has to be something
+ specific, it must be set with <c>file:set_cwd/1</c>.
+ </p>
+ </section>
+
+ <section>
+ <title>Execution environment</title>
+ <p>Each time a test case is about to be executed, a new process is
+ created with <c>spawn_link</c>. This is so that the test case will
+ have no dependencies to earlier tests, with respect to process flags,
+ process links, messages in the queue, other processes having registered
+ the process, etc. As little as possible is done to change the initial
+ context of the process (what is created by plain spawn). Here is a
+ list of differences:
+ </p>
+ <list type="bulleted">
+ <item>It has a link to the test server. If this link is removed,
+ the test server will not know when the test case is finished,
+ just wait infinitely.
+ </item>
+ <item>It often holds a few items in the process dictionary, all
+ with names starting with '<c>test_server_</c>'. This is to keep
+ track of if/where a test case fails.
+ </item>
+ <item>There is a top-level catch. All of the test case code is
+ catched, so that the location of a crash can be reported back to
+ the test server. If the test case process is killed by another
+ process (thus the catch code is never executed) the test server
+ is not able to tell where the test case was executing.
+ </item>
+ <item>It has a special group leader implemented by the test
+ server. This way the test server is able to capture the io that
+ the test case provokes. This is also used by some of the test
+ server support functions.
+ </item>
+ </list>
+ <p>There is no time limit for a test case, unless the test case
+ itself imposes such a limit, by calling
+ <c>test_server:timetrap/1</c> for example. The call can be made
+ in each test case, or in the <c>init_per_testcase/2</c>
+ function. Make sure to call the corresponding
+ <c>test_server:timetrap_cancel/1</c> function as well, e.g in the
+ <c>end_per_testcase/2</c> function, or else the test cases will
+ always fail.
+ </p>
+ </section>
+
+</chapter>
+