From 84adefa331c4159d432d22840663c38f155cd4c1 Mon Sep 17 00:00:00 2001 From: Erlang/OTP Date: Fri, 20 Nov 2009 14:54:40 +0000 Subject: The R13B03 release. --- lib/test_server/doc/html/.gitignore | 0 lib/test_server/doc/man3/.gitignore | 0 lib/test_server/doc/man6/.gitignore | 0 lib/test_server/doc/pdf/.gitignore | 0 lib/test_server/doc/src/Makefile | 138 ++++ lib/test_server/doc/src/basics_chapter.xml | 216 ++++++ lib/test_server/doc/src/book.xml | 49 ++ lib/test_server/doc/src/example_chapter.xml | 150 ++++ lib/test_server/doc/src/fascicules.xml | 18 + lib/test_server/doc/src/make.dep | 24 + lib/test_server/doc/src/notes.xml | 346 +++++++++ lib/test_server/doc/src/notes_history.xml | 112 +++ lib/test_server/doc/src/part.xml | 45 ++ lib/test_server/doc/src/part_notes.xml | 40 + lib/test_server/doc/src/part_notes_history.xml | 38 + lib/test_server/doc/src/ref_man.xml | 43 ++ lib/test_server/doc/src/run_test_chapter.xml | 49 ++ lib/test_server/doc/src/test_server.xml | 840 +++++++++++++++++++++ lib/test_server/doc/src/test_server_app.xml | 75 ++ lib/test_server/doc/src/test_server_ctrl.xml | 771 +++++++++++++++++++ lib/test_server/doc/src/test_spec_chapter.xml | 375 +++++++++ lib/test_server/doc/src/ts.xml | 592 +++++++++++++++ lib/test_server/doc/src/why_test_chapter.xml | 140 ++++ .../doc/src/write_framework_chapter.xml | 166 ++++ lib/test_server/doc/src/write_test_chapter.xml | 228 ++++++ 25 files changed, 4455 insertions(+) create mode 100644 lib/test_server/doc/html/.gitignore create mode 100644 lib/test_server/doc/man3/.gitignore create mode 100644 lib/test_server/doc/man6/.gitignore create mode 100644 lib/test_server/doc/pdf/.gitignore create mode 100644 lib/test_server/doc/src/Makefile create mode 100644 lib/test_server/doc/src/basics_chapter.xml create mode 100644 lib/test_server/doc/src/book.xml create mode 100644 lib/test_server/doc/src/example_chapter.xml create mode 100644 lib/test_server/doc/src/fascicules.xml create mode 100644 lib/test_server/doc/src/make.dep create mode 100644 lib/test_server/doc/src/notes.xml create mode 100644 lib/test_server/doc/src/notes_history.xml create mode 100644 lib/test_server/doc/src/part.xml create mode 100644 lib/test_server/doc/src/part_notes.xml create mode 100644 lib/test_server/doc/src/part_notes_history.xml create mode 100644 lib/test_server/doc/src/ref_man.xml create mode 100644 lib/test_server/doc/src/run_test_chapter.xml create mode 100644 lib/test_server/doc/src/test_server.xml create mode 100644 lib/test_server/doc/src/test_server_app.xml create mode 100644 lib/test_server/doc/src/test_server_ctrl.xml create mode 100644 lib/test_server/doc/src/test_spec_chapter.xml create mode 100644 lib/test_server/doc/src/ts.xml create mode 100644 lib/test_server/doc/src/why_test_chapter.xml create mode 100644 lib/test_server/doc/src/write_framework_chapter.xml create mode 100644 lib/test_server/doc/src/write_test_chapter.xml (limited to 'lib/test_server/doc') diff --git a/lib/test_server/doc/html/.gitignore b/lib/test_server/doc/html/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lib/test_server/doc/man3/.gitignore b/lib/test_server/doc/man3/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lib/test_server/doc/man6/.gitignore b/lib/test_server/doc/man6/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lib/test_server/doc/pdf/.gitignore b/lib/test_server/doc/pdf/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lib/test_server/doc/src/Makefile b/lib/test_server/doc/src/Makefile new file mode 100644 index 0000000000..e3c1b8ce92 --- /dev/null +++ b/lib/test_server/doc/src/Makefile @@ -0,0 +1,138 @@ +# +# %CopyrightBegin% +# +# Copyright Ericsson AB 2002-2009. All Rights Reserved. +# +# The contents of this file are subject to the Erlang Public License, +# Version 1.1, (the "License"); you may not use this file except in +# compliance with the License. You should have received a copy of the +# Erlang Public License along with this software. If not, it can be +# retrieved online at http://www.erlang.org/. +# +# Software distributed under the License is distributed on an "AS IS" +# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +# the License for the specific language governing rights and limitations +# under the License. +# +# %CopyrightEnd% +# + +include $(ERL_TOP)/make/target.mk +include $(ERL_TOP)/make/$(TARGET)/otp.mk + +# ---------------------------------------------------- +# Application version +# ---------------------------------------------------- +include ../../vsn.mk +VSN=$(TEST_SERVER_VSN) +APPLICATION=test_server + +# ---------------------------------------------------- +# Release directory specification +# ---------------------------------------------------- +RELSYSDIR = $(RELEASE_PATH)/lib/$(APPLICATION)-$(VSN) + +# ---------------------------------------------------- +# Target Specs +# ---------------------------------------------------- +XML_APPLICATION_FILES = ref_man.xml +XML_REF3_FILES = \ + test_server_ctrl.xml \ + test_server.xml +XML_REF3_INTERNAL = \ + ts.xml +XML_REF6_FILES = test_server_app.xml + +XML_PART_FILES = \ + part.xml \ + part_notes.xml \ + part_notes_history.xml + +XML_CHAPTER_FILES = \ + basics_chapter.xml \ + run_test_chapter.xml \ + write_test_chapter.xml \ + test_spec_chapter.xml \ + example_chapter.xml \ + write_framework_chapter.xml \ + notes.xml \ + notes_history.xml + +BOOK_FILES = book.xml + +GIF_FILES = + +# ---------------------------------------------------- + +HTML_FILES = $(XML_APPLICATION_FILES:%.xml=$(HTMLDIR)/%.html) \ + $(XML_PART_FILES:%.xml=$(HTMLDIR)/%.html) + +HTML_INTERNAL = $(XML_REF3_INTERNAL:%.xml=$(HTMLDIR)/%.html) + +INFO_FILE = ../../info + +MAN3_FILES = $(XML_REF3_FILES:%.xml=$(MAN3DIR)/%.3) +MAN3_INTERNAL = $(XML_REF3_INTERNAL:%.xml=$(MAN3DIR)/%.3) +MAN6_FILES = $(XML_REF6_FILES:%_app.xml=$(MAN6DIR)/%.6) + +HTML_REF_MAN_FILE = $(HTMLDIR)/index.html + +TOP_PDF_FILE = $(PDFDIR)/test_server-$(VSN).pdf + +# ---------------------------------------------------- +# FLAGS +# ---------------------------------------------------- +XML_FLAGS += +DVIPS_FLAGS += + +# ---------------------------------------------------- +# Targets +# ---------------------------------------------------- +$(HTMLDIR)/%.gif: %.gif + $(INSTALL_DATA) $< $@ + +docs: pdf html man + +pdf: $(TOP_PDF_FILE) + +html: gifs $(HTML_REF_MAN_FILE) + +man: $(MAN3_FILES) $(MAN3_INTERNAL) $(MAN6_FILES) + +gifs: $(GIF_FILES:%=$(HTMLDIR)/%) + +debug opt: + +clean clean_docs: + rm -rf $(HTMLDIR)/* + rm -f $(MAN3DIR)/* + rm -f $(MAN6DIR)/* + rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo) + rm -f errs core *~ + +# ---------------------------------------------------- +# Release Target +# ---------------------------------------------------- +include $(ERL_TOP)/make/otp_release_targets.mk + +release_docs_spec: docs + $(INSTALL_DIR) $(RELSYSDIR)/doc/pdf + $(INSTALL_DATA) $(TOP_PDF_FILE) $(RELSYSDIR)/doc/pdf + $(INSTALL_DIR) $(RELSYSDIR)/doc/html + $(INSTALL_DATA) $(HTMLDIR)/* \ + $(RELSYSDIR)/doc/html + $(INSTALL_DATA) $(INFO_FILE) $(RELSYSDIR) + $(INSTALL_DIR) $(RELEASE_PATH)/man/man3 + $(INSTALL_DATA) $(MAN3_FILES) $(RELEASE_PATH)/man/man3 + $(INSTALL_DIR) $(RELEASE_PATH)/man/man6 + $(INSTALL_DATA) $(MAN6_FILES) $(RELEASE_PATH)/man/man6 + +release_spec: + +release_tests_spec: + +# ---------------------------------------------------- +# Include dependency +# ---------------------------------------------------- + +include make.dep diff --git a/lib/test_server/doc/src/basics_chapter.xml b/lib/test_server/doc/src/basics_chapter.xml new file mode 100644 index 0000000000..a96cc88075 --- /dev/null +++ b/lib/test_server/doc/src/basics_chapter.xml @@ -0,0 +1,216 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Server Basics + Siri Hansen + + + + basics_chapter.xml +
+ +
+ Introduction +

Test Server is a portable test tool for automated + testing of Erlang programs and OTP applications. It provides an + interface for running test programs directly with Test Server + as well as an interface for integrating Test Server + with a framework application. The latter makes it possible to use + Test Server as the engine of a higher level test tool + application.

+ +

It is strongly recommended that Test Server be used from inside + a framework application, rather than interfaced directly for + running test programs. Test Server can be pretty difficult to use + since it's a very general and quite extensive and complex + application. Furthermore, the test_server_ctrl functions + are not meant to be used from within the actual test programs. The + framework should handle communication with Test Server and deal + with the more complex aspects of this interaction automatically so + that a higher level interface may be provided for the tester. For + test tool usage to be productive, a simpler, more intuitive and + (if required) more specific interface is required than what Test Server + can provide.

+ +

OTP delivers a general purpose framework for Test Server, called + Common Test. This application is a tool well suited for + automated black box testing of target systems of any kind + (not necessarily implemented in Erlang). Common Test is also a very + useful tool for white box testing of Erlang programs and OTP + applications. Unless a more specific functionality and/or user + interface is required (in which case you might need to implement + your own framework), Common Test should do the job for + you. Please read the Common Test User's Guide and reference manual + for more information.

+ +

Under normal circumstances, knowledge about the Test Server + application is not required for using the Common Test framework. + However, if you want to use Test Server without a framework, + or learn how to integrate it with your own framework, please read on... +

+
+
+ Getting started +

Testing when using Test Server is done by running test + suites. A test suite is a number of test cases, where each test + case tests one or more things. The test case is the smallest unit + that the test server deals with. One or more test cases are + grouped together into one ordinary Erlang module, which is called + a test suite. Several test suite modules can be grouped together + in special test specification files representing whole application + and/or system test "jobs". +

+

The test suite Erlang module must follow a certain interface, + which is specified by Test Server. See the section on writing + test suites for details about this. +

+

Each test case is considered a success if it returns to the + caller, no matter what the returned value is. An exception to this + is the return value {skip, Reason} which indicates that the + test case is skipped. A failure is specified as a crash, no matter + what the crash reason is. +

+

As a test suite runs, all information (including output to + stdout) is recorded in several different log files. A minimum of + information is displayed to the user console. This only include + start and stop information, plus a note for each failed test case. +

+

The result from each test case is recorded in an HTML log file + which is created for each test run. Every test case gets one row + in a table presenting total time, whether the case was successful + or not, if it was skipped, and possibly also a comment. The HTML + file has links to each test case's logfile, which may be viewed + from e.g. Netscape or any other HTML capable browser. +

+

The Test Server consists of three parts: +

+ + The part that executes the test suites on target and + provides support for the test suite author is called + test_server. This is described in the chapter about + writing test cases in this user's guide, and in the reference + manual for the test_server module. + The controlling part, which provides the low level + operator interface, starts and stops the target node (if remote + target) and slave nodes and writes log files, is called + test_server_ctrl. The Test Server Controller should not + be used directly when running tests. Instead a framework built + on top of it should be used. More information + about how to write your own framework can be found + in this user's guide and in the reference manual for the + test_server_ctrl module. + +
+ +
+ Definition of terms + + conf(iguration) case + This is a group of test cases which need some specific + configuration. A conf case contains an initiation function which + sets up a specific configuration, one or more test cases using + this configuration, and a cleanup function which restores the + configuration. A conf case is specified in a test specification + either like this:{conf,InitFunc,ListOfCases,CleanupFunc}, + or this: {conf,Properties,InitFunc,ListOfCases,CleanupFunc} + + datadir + Data directory for a test suite. This directory contains + any files used by the test suite, e.g. additional erlang + modules, c code or data files. If the data directory contains + code which must be compiled before the test suite is run, it + should also contain a makefile source called Makefile.src + defining how to compile. + + documentation clause + One of the function clauses in a test case. This clause + shall return a list of strings describing what the test case + tests. + + execution clause + One of the function clauses in a test case. This clause + implements the actual test case, i.e. calls the functions that + shall be tested and checks results. The clause shall crash if it + fails. + + major log file + This is the test suites log file. + + Makefile.src + This file is used by the test server framework to generate + a makefile for a datadir. It contains some special characters + which are replaced according to the platform currently tested. + + minor log file + This is a separate log file for each test case. + + privdir + Private directory for a test suite. This directory should + be used when the test suite needs to write to files. + + skip case + A test case which shall be skipped. + + specification clause + One of the function clauses in a test case. This clause + shall return an empty list, a test specification or + {skip,Reason}. If an empty list is returned, it means + that the test case shall be executed, and so it must also have + an execution clause. Note that the specification clause is + always executed on the controller node, i.e. not on the target + node. + + test case + A single test included in a test suite. Typically it tests + one function in a module or application. A test case is + implemented as a function in a test suite module. The function + can have three clauses, the documentation-, specification- and + execution clause. + + test specification + A specification of which test suites and test cases to + run. There can be test specifications on three different levels + in a test. The top level is a test specification file which + roughly specifies what to test for a whole application. Then + there is a test specification for each test suite returned from + the all(suite) function in the suite. And there can also + be a test specification returned from the specification clause + of a test case. + + test specification file + This is a text file containing the test specification for + an application. The file has the extension ".spec" or + ".spec.Platform", where Platform is e.g. "vxworks". + + test suite + An erlang module containing a collection of test cases for + a specific application or module. + + topcase + The first "command" in a test specification file. This + command contains the test specification, like this: + {topcase,TestSpecification} + +
+
+ diff --git a/lib/test_server/doc/src/book.xml b/lib/test_server/doc/src/book.xml new file mode 100644 index 0000000000..960ce48cf7 --- /dev/null +++ b/lib/test_server/doc/src/book.xml @@ -0,0 +1,49 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Server + Siri Hansen + + 2002-07-11 + + book.xml +
+ + + Test Server + + + + + + + + + + + + + + +
+ diff --git a/lib/test_server/doc/src/example_chapter.xml b/lib/test_server/doc/src/example_chapter.xml new file mode 100644 index 0000000000..8a06526528 --- /dev/null +++ b/lib/test_server/doc/src/example_chapter.xml @@ -0,0 +1,150 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Examples + Siri Hansen + + + + example_chapter.xml +
+ +
+ Test suite + +-module(my_SUITE). + +-export([all/1, + not_started/1, not_started_func1/1, not_started_func2/1, + start/1, stop/1, + func1/1, func2/1 + ]). + +-export([init_per_testcase/2, end_per_testcase/2]). + +-include("test_server.hrl"). + +-define(default_timeout, ?t:minutes(1)). + +init_per_testcase(_Case, Config) -> + ?line Dog=?t:timetrap(?default_timeout), + [{watchdog, Dog}|Config]. +end_per_testcase(_Case, Config) -> + Dog=?config(watchdog, Config), + ?t:timetrap_cancel(Dog), + ok. + +all(suite) -> + %% Test specification on test suite level + [not_started, + {conf, start, [func1, func2], stop}]. + +not_started(suite) -> + %% Test specification on test case level + [not_started_func1, not_started_func2]; +not_started(doc) -> + ["Testing all functions when application is not started"]. +%% No execution clause unless the specification clause returns []. + + +not_started_func1(suite) -> + []; +not_started_func1(doc) -> + ["Testing function 1 when application is not started"]. +not_started_func1(Config) when list(Config) -> + ?line {error, not_started} = myapp:func1(dummy_ref,1), + ?line {error, not_started} = myapp:func1(dummy_ref,2), + ok. + +not_started_func2(suite) -> + []; +not_started_func2(doc) -> + ["Testing function 2 when application is not started"]. +not_started_func2(Config) when list(Config) -> + ?line {error, not_started} = myapp:func2(dummy_ref,1), + ?line {error, not_started} = myapp:func2(dummy_ref,2), + ok. + + +%% No specification clause needed for an init function in a conf case!!! +start(doc) -> + ["Testing start of my application."]; +start(Config) when list(Config) -> + ?line Ref = myapp:start(), + case erlang:whereis(my_main_process) of + Pid when pid(Pid) -> + [{myapp_ref,Ref}|Config]; + undefined -> + %% Since this is the init function in a conf case, the rest of the + %% cases in the conf case will be skipped if this case fails. + ?t:fail("my_main_process did not start") + end. + +func1(suite) -> + []; +func1(doc) -> + ["Test that func1 returns ok when argument is 1 and error if argument is 2"]; +func1(Config) when list(Config) -> + ?line Ref = ?config(myapp_ref,Config), + ?line ok = myapp:func1(Ref,1), + ?line error = myapp:func1(Ref,2), + ok. + +func2(suite) -> + []; +func2(doc) -> + ["Test that func1 returns ok when argument is 3 and error if argument is 4"]; +func2(Config) when list(Config) -> + ?line Ref = ?config(myapp_ref,Config), + ?line ok = myapp:func2(Ref,3), + ?line error = myapp:func2(Ref,4), + ok. + +%% No specification clause needed for a cleanup function in a conf case!!! +stop(doc) -> + ["Testing termination of my application"]; +stop(Config) when list(Config) -> + ?line Ref = ?config(myapp_ref,Config), + ?line ok = myapp:stop(Ref), + case erlang:whereis(my_main_process) of + undefined -> + lists:keydelete(myapp_ref,1,Config); + Pid when pid(Pid) -> + ?t:fail("my_main_process did not stop") + end. + +
+ +
+ Test specification file +

myapp.spec:

+ +{topcase, {dir, "../myapp_test"}}. % Test specification on top level +

myapp.spec.vxworks:

+ +{topcase, {dir, "../myapp_test"}}. % Test specification on top level +{skip,{my_SUITE,func2,"Not applicable on VxWorks"}}. +
+
+ + diff --git a/lib/test_server/doc/src/fascicules.xml b/lib/test_server/doc/src/fascicules.xml new file mode 100644 index 0000000000..0678195e07 --- /dev/null +++ b/lib/test_server/doc/src/fascicules.xml @@ -0,0 +1,18 @@ + + + + + + User's Guide + + + Reference Manual + + + Release Notes + + + Off-Print + + + diff --git a/lib/test_server/doc/src/make.dep b/lib/test_server/doc/src/make.dep new file mode 100644 index 0000000000..ee9100bd08 --- /dev/null +++ b/lib/test_server/doc/src/make.dep @@ -0,0 +1,24 @@ +# ---------------------------------------------------- +# >>>> Do not edit this file <<<< +# This file was automaticly generated by +# /home/otp/bin/docdepend +# ---------------------------------------------------- + + +# ---------------------------------------------------- +# TeX files that the DVI file depend on +# ---------------------------------------------------- + +book.dvi: basics_chapter.tex book.tex example_chapter.tex \ + part.tex ref_man.tex run_test_chapter.tex \ + test_server_app.tex test_server_ctrl.tex \ + test_server.tex test_spec_chapter.tex \ + write_framework_chapter.tex \ + write_test_chapter.tex + +# ---------------------------------------------------- +# Source inlined when transforming from source to LaTeX +# ---------------------------------------------------- + +book.tex: ref_man.xml + diff --git a/lib/test_server/doc/src/notes.xml b/lib/test_server/doc/src/notes.xml new file mode 100644 index 0000000000..a71c18b5b7 --- /dev/null +++ b/lib/test_server/doc/src/notes.xml @@ -0,0 +1,346 @@ + + + + +
+ + 20042009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + APPLICATION Release Notes + Peter Andersson + Peter Andersson + + + + 2007-11-30 + A + notes.xml +
+ +
Test_Server 3.3.5 + +
Fixed Bugs and Malfunctions + + +

+ If the init_per_testcase/2 function fails, the test case + now gets marked and counted as auto skipped, not user + skipped (which would previously happen).

+

+ Own Id: OTP-8289

+
+
+
+ + +
Improvements and New Features + + +

+ The documentation is now built with open source tools + (xsltproc and fop) that exists on most platforms. One + visible change is that the frames are removed.

+

+ Own Id: OTP-8201

+
+ +

+ It is now possible to fail a test case from the + end_per_testcase/2 function, by returning {fail,Reason}.

+

+ Own Id: OTP-8284

+
+ +

+ It is now possible to fail a test case by having the + end_tc/3 framework function return {fail,Reason} for the + test case.

+

+ Own Id: OTP-8285

+
+ +

+ The test_server framework API (e.g. the end_tc/3 + function) has been modified. See the test_server_ctrl + documentation for details.

+

+ Own Id: OTP-8286 Aux Id: OTP-8285, OTP-8287

+
+
+
+ +
+ +
Test_Server 3.3.4 + +
Fixed Bugs and Malfunctions + + +

+ When running a suite starting with a test case group, + Test Server crashed if init_per_suite/1 exited or + returned skip. This has been fixed.

+

+ Own Id: OTP-8105 Aux Id: OTP-8089

+
+
+
+ + +
Improvements and New Features + + +

+ Various updates and fixes in Common Test and Test Server.

+

+ Own Id: OTP-8045 Aux Id: OTP-8089,OTP-8105,OTP-8163

+
+ +

+ Errors in coverage data collection and analysis were + difficult to detect. The logging has been improved so + that more information about e.g. imported and missing + modules is printed to the html log files.

+

+ Own Id: OTP-8163 Aux Id: seq11374

+
+ +

+ The Common Test HTML overview pages have been improved. + It is now possible to see if a test case has been skipped + explicitly or because a configuration function has + failed. Also, the history page (all_runs.html) now has + scrolling text displaying the test names. The old format + (showing names as a truncated string) can still be + generated by means of the flag/option 'basic_html'.

+

+ Own Id: OTP-8177

+
+
+
+ +
+ +
Test_Server 3.3.2 + +
Improvements and New Features + + +

+ Various corrections and improvements of Common Test and + Test Server.

+

+ Own Id: OTP-7981

+
+
+
+ +
+ +
Test_Server 3.3.1 + +
Improvements and New Features + + +

+ Minor updates and corrections.

+

+ Own Id: OTP-7897

+
+
+
+ +
+ +
Test_Server 3.3 + +
Improvements and New Features + + +

+ The conf case in Test Server has been extended with + properties that make it possible to execute test cases in + parallel, in sequence and in shuffled order. It is now + also possible to repeat test cases according to different + criterias. The properties can be combined, making it + possible to e.g. repeat a conf case a certain number of + times and execute the test cases in different (random) + order every time. The properties are specified in a list + in the conf case definition: {conf, Properties, InitCase, + TestCases, EndCase}. The available properties are: + parallel, sequence, shuffle, repeat, repeat_until_all_ok, + repeat_until_any_ok, repeat_until_any_fail, + repeat_until_all_fail.

+

+ Own Id: OTP-7511 Aux Id: OTP-7839

+
+ +

The test server starts Cover on nodes of the same + version as the test server itself only.

+

+ Own Id: OTP-7699

+
+ +

+ The Erlang mode for Emacs has been updated with new and + modified skeletons for Common Test and TS. Syntax for + test case groups in Common Test (and conf cases with + properties in TS) has been added and a new minimal Common + Test suite skeleton has been introduced.

+

+ Own Id: OTP-7856

+
+
+
+ +
+
Test_Server 3.2.4.1 + +
Fixed Bugs and Malfunctions + + +

+ The step functionality in Common Test (based on + interaction with Debugger) was broken. This has been + fixed, and some new step features have also been added. + Please see the Common Test User's Guide for details.

+

+ Own Id: OTP-7800 Aux Id: seq11106

+
+
+
+ +
+ +
Test_Server 3.2.4 + +
Improvements and New Features + + +

+ Miscellaneous updates.

+

+ Own Id: OTP-7527

+
+
+
+ +
+ +
Test_Server 3.2.3 + +
Fixed Bugs and Malfunctions + + +

+ When a testcase terminated due to a timetrap, io sent to + the group leader from framework:end_tc/3 (using + ct:pal/2/3 or ct:log/2/3) would cause deadlock. This has + been fixed.

+

+ Own Id: OTP-7447 Aux Id: seq11010

+
+
+
+ + +
Improvements and New Features + + +

+ Various updates and improvements, plus some minor bug + fixes, have been implemented in Common Test and Test + Server.

+

+ Own Id: OTP-7112

+
+ +

+ It is now possible, by means of the new function + ct:abort_current_testcase/1 or + test_server_ctrl:abort_current_testcase/1, to abort the + currently executing test case.

+

+ Own Id: OTP-7518 Aux Id: OTP-7112

+
+
+
+ +
+ +
Test_Server 3.2.2 + +
Improvements and New Features + + +

erlang:system_info/1 now accepts the + logical_processors, and debug_compiled + arguments. For more info see the, erlang(3) + documentation.

The scale factor returned by + test_server:timetrap_scale_factor/0 is now also + effected if the emulator uses a larger amount of + scheduler threads than the amount of logical processors + on the system.

+

+ Own Id: OTP-7175

+
+
+
+ +
+ +
Test_Server 3.2.1 + +
Improvements and New Features + + +

+ When init_per_suite or end_per_suite terminated due to + runtime failure, test_server failed to format the line + number information properly and crashed. This error has + now been fixed.

+

+ Own Id: OTP-7091

+
+
+
+ +
+ +
Test_Server 3.2.0 + +
Improvements and New Features + + +

+ Test Server is a portable test server for automated + application testing. The server can run test suites on + local or remote targets and log progress and results to + HTML pages. The main purpose of Test Server is to act as + engine inside customized test tools. A callback interface + for such framework applications is provided.

+

+ Own Id: OTP-6989

+
+
+
+ +
+ +
+ diff --git a/lib/test_server/doc/src/notes_history.xml b/lib/test_server/doc/src/notes_history.xml new file mode 100644 index 0000000000..0392bd74a2 --- /dev/null +++ b/lib/test_server/doc/src/notes_history.xml @@ -0,0 +1,112 @@ + + + + +
+ + 20062009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Server Release Notes History + + + + +
+ +
+ Test Server 3.1.1 + +
+ Improvements and new features + + +

Added functions test_server:break/1 and + test_server:continue/0 for semiautomatic testing.

+

test_server:timetrap/1 can now also take + {hours,H} | {minutes,M | {seconds,S}.

+

Added function + test_server_ctrl:multiply_timetraps/1, + test_server_ctrl:add_case/3, + test_server_ctrl:add_cases/2/3.

+

Added test suite functions init_per_suite/1 and + end_per_suite/1.

+

fin_per_testcase/2 is changed to + end_per_testcase/2. fin_per_testcase is kept + for backwards compatibility.

+

Added support for writing own test server frameworks. + Callback functions init_tc/1, end_tc/3, + get_suite/2, report/2, warn/1.

+
+
+
+
+ +
+ Test Server 3.1 + +
+ Improvements and New Features + + +

Added the options cover and cover_details + to ts:run. When one of these options is used, + the tested application will be cover compiled + before the test is run. The cover compiled code will also + be loaded on all slave or peer nodes started with + test_server:start_node. When the test is completed + coverage data from all nodes is collected and merged, and + presented in the coverage log to which there will be a link + from the test suite result page (i.e. the one with the + heading "Test suite ... results").

+

The cover_details option will do + cover:analyse_to_file for each cover compiled module, + while the cover option only will produce a list of + modules and the number of covered/uncovered lines in each + module.

+

To make it possible to run all test from a script (like in + the OTP daily builds), the following is added: + ts:run([all_tests | Options]).

+

This means that e.g. the following is possible: + erl -s ts run all_tests batch cover.

+

Note that it is also possible to run tests with cover even + if you don't use ts. + See test_server_ctrl:cover/2/3.

+

Own Id: OTP-4703

+
+ +

Removed module ts_save.erl and function + ts:save/0/1(incompatible).

+

Added config variable ipv6_hosts to + ts:install/1 and test spec file.

+

No longer removing duplicates of test cases from test spec + (incompatible).

+

Added function test_server:run_on_shielded_node/2.

+

Creation of html files for test suite source does no longer + crash if suite contains more than 9999 lines of code.

+

Added functionality for cross cover compilation, + i.e. collection of cover data from all tests.

+

Multiplying timetrap times with 10 when running with cover.

+

Added ts:r/3 for running tests with cover.

+

*** POTENTIAL INCOMPATIBILITY ***

+

Own Id: OTP-5040

+
+
+
+
+
+ diff --git a/lib/test_server/doc/src/part.xml b/lib/test_server/doc/src/part.xml new file mode 100644 index 0000000000..fdcd3d274e --- /dev/null +++ b/lib/test_server/doc/src/part.xml @@ -0,0 +1,45 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Server User's Guide + + + 2002-07-11 + +
+ +

Test Server is a portable test server for + automated application testing. The server can run test suites + on local or remote targets and log progress and results to HTML + pages. The main purpose of Test Server is to act as engine + inside customized test tools. A callback interface for + such framework applications is provided.

+
+ + + + + + +
+ diff --git a/lib/test_server/doc/src/part_notes.xml b/lib/test_server/doc/src/part_notes.xml new file mode 100644 index 0000000000..2347f64ca1 --- /dev/null +++ b/lib/test_server/doc/src/part_notes.xml @@ -0,0 +1,40 @@ + + + + +
+ + 20042009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Server Release Notes + + + + +
+ +

The Test Server is a portable test server for + application testing. The test server can run automatic test suites + on local or remote target and log progress and results to HTML + pages. It also provides some support for test suite authors.

+

For information about older versions, see + Release Notes History.

+
+ +
+ diff --git a/lib/test_server/doc/src/part_notes_history.xml b/lib/test_server/doc/src/part_notes_history.xml new file mode 100644 index 0000000000..556d172755 --- /dev/null +++ b/lib/test_server/doc/src/part_notes_history.xml @@ -0,0 +1,38 @@ + + + + +
+ + 20062009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Server Release Notes History + + + + +
+ +

The Test Server is a portable test server for + application testing. The test server can run automatic test suites + on local or remote target and log progress and results to HTML + pages. It also provides some support for test suite authors.

+
+ +
+ diff --git a/lib/test_server/doc/src/ref_man.xml b/lib/test_server/doc/src/ref_man.xml new file mode 100644 index 0000000000..17d6093dc0 --- /dev/null +++ b/lib/test_server/doc/src/ref_man.xml @@ -0,0 +1,43 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Server Reference Manual + + + + + ref_man.xml +
+ +

Test Server is a portable test server for + automated application testing. The server can run test suites + on local or remote targets and log progress and results to HTML + pages. The main purpose of Test Server is to act as engine + inside customized test tools. A callback interface for + such framework applications is provided.

+
+ + + +
+ diff --git a/lib/test_server/doc/src/run_test_chapter.xml b/lib/test_server/doc/src/run_test_chapter.xml new file mode 100644 index 0000000000..36bd41da1f --- /dev/null +++ b/lib/test_server/doc/src/run_test_chapter.xml @@ -0,0 +1,49 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Running Test Suites + Siri Hansen + + + + run_test_chapter.xml +
+ +
+ Using the test server controller +

The test server controller provides a low level interface to + all the Test Server functionality. It is possible to use this + interface directly, but it is recommended to use a framework + such as Common Test instead. If no existing framework + suits your needs, you could of course build your own + on top of the test server controller. Some information about how + to do this can be found in the section named "Writing you own + test server framework" in the Test Server User's Guide. +

+

For information about using the controller directly, please see + all available functions in the reference manual for + test_server_ctrl. +

+
+
+ diff --git a/lib/test_server/doc/src/test_server.xml b/lib/test_server/doc/src/test_server.xml new file mode 100644 index 0000000000..6e75425862 --- /dev/null +++ b/lib/test_server/doc/src/test_server.xml @@ -0,0 +1,840 @@ + + + + +
+ + 2007 + 2008 + Ericsson AB, All Rights Reserved + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + The Initial Developer of the Original Code is Ericsson AB. + + + test_server + Siri Hansen + + + + + + + test_server_ref.sgml +
+ test_server + This module provides support for test suite authors. + +

The test_server module aids the test suite author by providing + various support functions. The supported functionality includes: +

+ + Logging and timestamping + + Capturing output to stdout + + Retrieving and flushing the message queue of a process + + Watchdog timers, process sleep, time measurement and unit + conversion + + Private scratch directory for all test suites + + Start and stop of slave- or peer nodes + +

For more information on how to write test cases and for + examples, please see the Test Server User's Guide. +

+
+ +
+ TEST SUITE SUPPORT FUNCTIONS +

The following functions are supposed to be used inside a test + suite. +

+
+ + + os_type() -> OSType + Returns the OS type of the target node + + OSType = term() + This is the same as returned from os:type/0 + + +

This function can be called on controller or target node, and + it will always return the OS type of the target node.

+
+
+ + fail() + fail(Reason) + Makes the test case fail. + + Reason = term() + The reason why the test case failed. + + +

This will make the test suite fail with a given reason, or + with suite_failed if no reason was given. Use this + function if you want to terminate a test case, as this will + make it easier to read the log- and HTML files. Reason + will appear in the comment field in the HTML log.

+
+
+ + timetrap(Timout) -> Handle + + + Timeout = integer() | {hours,H} | {minutes,M} | {seconds,S} + H = M = S = integer() + Pid = pid() + The process that is to be timetrapped (self()by default) + + +

Sets up a time trap for the current process. An expired + timetrap kills the process with reason + timetrap_timeout. The returned handle is to be given + as argument to timetrap_cancel before the timetrap + expires. If Timeout is an integer, it is expected to + be milliseconds.

+ +

If the current process is trapping exits, it will not be killed + by the exit signal with reason timetrap_timeout. + If this happens, the process will be sent an exit signal + with reason kill 10 seconds later which will kill the + process. Information about the timetrap timeout will in + this case not be found in the test logs. However, the + error_logger will be sent a warning.

+
+
+
+ + timetrap_cancel(Handle) -> ok + Cancels a timetrap. + + Handle = term() + Handle returned from timetrap + + +

This function cancels a timetrap. This must be done before + the timetrap expires.

+
+
+ + timetrap_scale_factor() -> ScaleFactor + Returns the scale factor for timeouts. + + ScaleFactor = integer() + + +

This function returns the scale factor by which all timetraps + are scaled. It is normally 1, but can be greater than 1 if + the test_server is running cover, using a larger amount of + scheduler threads than the amount of logical processors on the + system, running under purify, valgrind or in a debug-compiled + emulator. The scale factor can be used if you need to scale you + own timeouts in test cases with same factor as the test_server + uses.

+
+
+ + sleep(MSecs) -> ok + Suspens the calling task for a specified time. + + MSecs = integer() | float() | infinity + The number of milliseconds to sleep + + +

This function suspends the calling process for at least the + supplied number of milliseconds. There are two major reasons + why you should use this function instead of + timer:sleep, the first being that the module + timer may be unavailable at the time the test suite is + run, and the second that it also accepts floating point + numbers.

+
+
+ + hours(N) -> MSecs + minutes(N) -> MSecs + seconds(N) -> MSecs + + + N = integer() + Value to convert to milliseconds. + + +

Theese functions convert N number of hours, minutes + or seconds into milliseconds. +

+

Use this function when you want to + test_server:sleep/1 for a number of seconds, minutes or + hours(!).

+
+
+ + format(Format) -> ok + format(Format, Args) + format(Pri,Format) + format(Pri, Format, Args) + + + Format = string() + Format as described for io_:format. + Args = list() + List of arguments to format. + + +

Formats output just like io:format but sends the + formatted string to a logfile. If the urgency value, + Pri, is lower than some threshold value, it will also + be written to the test person's console. Default urgency is + 50, default threshold for display on the console is 1. +

+

Typically, the test person don't want to see everything a + test suite outputs, but is merely interested in if the test + cases succeeded or not, which the test server tells him. If he + would like to see more, he could manually change the threshold + values by using the test_server_ctrl:set_levels/3 + function.

+
+
+ + capture_start() -> ok + capture_stop() -> ok + capture_get() -> list() + Captures all output to stdout for a process. + +

These functions makes it possible to capture all output to + stdout from a process started by the test suite. The list of + characters captured can be purged by using capture_get.

+
+
+ + messages_get() -> list() + Empty the message queue. + +

This function will empty and return all the messages + currently in the calling process' message queue.

+
+
+ + timecall(M, F, A) -> {Time, Value} + Measures the time needed to call a function. + + M = atom() + The name of the module where the function resides. + F = atom() + The name of the function to call in the module. + A = list() + The arguments to supply the called function. + Time = integer() + The number of seconds it took to call the function. + Value = term() + Value returned from the called function. + + +

This function measures the time (in seconds) it takes to + call a certain function. The function call is not + caught within a catch.

+
+
+ + do_times(N, M, F, A) -> ok + do_times(N, Fun) + Calls MFA or Fun N times. + + N = integer() + Number of times to call MFA. + M = atom() + Module name where the function resides. + F = atom() + Function name to call. + A = list() + Arguments to M:F. + + +

Calls MFA or Fun N times. Useful for extensive testing of a + sensitive function.

+
+
+ + m_out_of_n(M, N, Fun) -> ok | exit({m_out_of_n_failed, {R,left_to_do}} + Fault tolerant do_times. + + N = integer() + Number of times to call the Fun. + M = integer() + Number of times to require a successful return. + + +

Repeatedly evaluates the given function until it succeeds + (doesn't crash) M times. If, after N times, M successful + attempts have not been accomplished, the process crashes with + reason {m_out_of_n_failed, {R,left_to_do}}, where R indicates + how many cases that was still to be successfully completed. +

+

For example: +

+

m_out_of_n(1,4,fun() -> tricky_test_case() end)

+Tries to run tricky_test_case() up to 4 times, and is + happy if it succeeds once. +

+

m_out_of_n(7,8,fun() -> clock_sanity_check() end)

+Tries running clock_sanity_check() up to 8 times,and + allows the function to fail once. This might be useful if + clock_sanity_check/0 is known to fail if the clock crosses an + hour boundary during the test (and the up to 8 test runs could + never cross 2 boundaries)

+
+
+ + call_crash(M, F, A) -> Result + call_crash(Time, M, F, A) -> Result + call_crash(Time, Crash, M, F, A) -> Result + Calls MFA and succeeds if it crashes. + + Result = ok | exit(call_crash_timeout) | exit({wrong_crash_reason, Reason}) + Crash = term() + Crash return from the function. + Time = integer() + Timeout in milliseconds. + M = atom() + Module name where the function resides. + F = atom() + Function name to call. + A = list() + Arguments to M:F. + + +

Spawns a new process that calls MFA. The call is considered + successful if the call crashes with the gives reason + (Crash) or any reason if not specified. The call must + terminate within the given time (default infinity), or + it is considered a failure.

+
+
+ + temp_name(Stem) -> Name + Returns a unique filename. + + Stem = string() + + +

Returns a unique filename starting with Stem with + enough extra characters appended to make up a unique + filename. The filename returned is guaranteed not to exist in + the filesystem at the time of the call.

+
+
+ + break(Comment) -> ok + Cancel all timetraps and wait for call to continue/0. + + Comment = string() + + +

Comment is a string which will be written in + the shell, e.g. explaining what to do.

+

This function will cancel all timetraps and pause the + execution of the test case until the user executes the + continue/0 function. It gives the user the opportunity + to interact with the erlang node running the tests, e.g. for + debugging purposes or for manually executing a part of the + test case.

+

When the break/1 function is called, the shell will + look something like this:

+ + + + "Here is a comment, it could e.g. instruct to pull out a card" + + + ----------------------------- + + Continue with --> test_server:continue(). ]]> +

The user can now interact with the erlang node, and when + ready call test_server:continue().

+

Note that this function can not be used if the test is + executed with ts:run/0/1/2/3/4 in batch mode.

+
+
+ + continue() -> ok + Continue after break/1. + +

This function must be called in order to continue after a + test case has called break/1.

+
+
+ + run_on_shielded_node(Fun, CArgs) -> term() + Execute a function a shielded node. + + Fun = function() (arity 0) + Function to execute on the shielded node. + CArg = string() + Extra command line arguments to use when starting the shielded node. + + +

Fun is executed in a process on a temporarily created + hidden node with a proxy for communication with the test server + node. The node is called a shielded node (should have been called + a shield node). If Fun is successfully executed, the result + is returned. A peer node (see start_node/3) started from + the shielded node will be shielded from test server node, i.e. + they will not be aware of each other. This is useful when you want + to start nodes from earlier OTP releases than the OTP release of + the test server node.

+

Nodes from an earlier OTP release can normally not be started + if the test server hasn't been started in compatibility mode + (see the +R flag in the erl(1) documentation) of + an earlier release. If a shielded node is started in compatibility + mode of an earlier OTP release than the OTP release of the test + server node, the shielded node can start nodes of an earlier OTP + release.

+ +

You must make sure that nodes started by the shielded + node never communicate directly with the test server node.

+
+ +

Slave nodes always communicate with the test server node; + therefore, never start slave nodes from the + shielded node, always start peer nodes.

+
+
+
+ + start_node(Name, Type, Options) -> {ok, Node} | {error, Reason} + Start a node. + + Name = atom() | string() + Name of the slavenode to start (as given to -sname or -name) + Type = slave | peer + The type of node to start. + Options = [{atom(), term()] + Tuplelist of options + + +

This functions starts a node, possibly on a remote machine, + and guarantees cross architecture transparency. Type is set to + either slave or peer. +

+

slave means that the new node will have a master, + i.e. the slave node will terminate if the master terminates, + TTY output produced on the slave will be sent back to the + master node and file I/O is done via the master. The master is + normally the target node unless the target is itself a slave. +

+

peer means that the new node is an independent node + with no master. +

+

Options is a tuplelist which can contain one or more + of +

+ + {remote, true} + Start the node on a remote host. If not specified, the + node will be started on the local host (with some + exceptions, as for the case of VxWorks, where + all nodes are started on a remote host). Test cases that + require a remote host will fail with a reasonable comment if + no remote hosts are available at the time they are run. + + {args, Arguments} + Arguments passed directly to the node. This is + typically a string appended to the command line. + + {wait, false} + Don't wait until the node is up. By default, this + function does not return until the node is up and running, + but this option makes it return as soon as the node start + command is given.. +

+Only valid for peer nodes +
+ {fail_on_error, false} + Returns {error, Reason} rather than failing the + test case. +

+Only valid for peer nodes. Note that slave nodes always + act as if they had fail_on_error=false
+ {erl, ReleaseList} + Use an Erlang emulator determined by ReleaseList when + starting nodes, instead of the same emulator as the test + server is running. ReleaseList is a list of specifiers, + where a specifier is either {release, Rel}, {prog, Prog}, or + 'this'. Rel is either the name of a release, e.g., "r12b_patched" + or 'latest'. 'this' means using the same emulator as the test + server. Prog is the name of an emulator executable. If the + list has more than one element, one of them is picked + randomly. (Only works on Solaris and Linux, and the test server + gives warnings when it notices that nodes are not of the same + version as itself.) +

+

+ + When specifying this option to run a previous release, use + is_release_available/1 function to test if the given + release is available and skip the test case if not. +

+

+ + In order to avoid compatibility problems (may not appear right + away), use a shielded node (see run_on_shielded_node/2) + when starting nodes from different OTP releases than the test + server. +
+ {cleanup, false} + Tells the test server not to kill this node if it is + still alive after the test case is completed. This is useful + if the same node is to be used by a group of test cases. + + {env, Env} + Env should be a list of tuples {Name, Val}, + where Name is the name of an environment variable, and + Val is the value it is to have in the started node. + Both Name and Val must be strings. The one + exception is Val being the atom false (in + analogy with os:getenv/1), which removes the + environment variable. Only valid for peer nodes. Not + available on VxWorks. +
+
+
+ + stop_node(NodeName) -> bool() + Stops a node + + NodeName = term() + Name of the node to stop + + +

This functions stops a node previously started with + start_node/3. Use this function to stop any node you + start, or the test server will produce a warning message in + the test logs, and kill the nodes automatically unless it was + started with the {cleanup, false} option.

+
+
+ + is_commercial() -> bool() + Tests whether the emulator is commercially supported + +

This function test whether the emulator is commercially supported + emulator. The tests for a commercially supported emulator could be more + stringent (for instance, a commercial release should always contain + documentation for all applications).

+
+
+ + + is_release_available(Release) -> bool() + Tests whether a release is available + + Release = string() | atom() + Release to test for + + +

This function test whether the release given by + Release (for instance, "r12b_patched") is available + on the computer that the test_server controller is running on. + Typically, you should skip the test case if not.

+

Caution: This function may not be called from the suite + clause of a test case, as the test_server will deadlock.

+
+
+ + is_native(Mod) -> bool() + Checks whether the module is natively compiled or not + + Mod = atom() + A module name + + +

Checks whether the module is natively compiled or not

+
+
+ + app_test(App) -> ok | test_server:fail() + app_test(App,Mode) + Checks an applications .app file for obvious errors + + App = term() + The name of the application to test + Mode = pedantic | tolerant + Default is pedantic + + +

Checks an applications .app file for obvious errors. + The following is checked: +

+ + required fields + + that all modules specified actually exists + + that all requires applications exists + + that no module included in the application has export_all + + that all modules in the ebin/ dir is included (If + Mode==tolerant this only produces a warning, as all + modules does not have to be included) + +
+
+ + comment(Comment) -> ok + Print a comment on the HTML result page + + Comment = string() + + +

The given String will occur in the comment field of the + table on the HTML result page. If called several times, only + the last comment is printed. comment/1 is also overwritten by + the return value {comment,Comment} from a test case or by + fail/1 (which prints Reason as a comment).

+
+
+
+ +
+ TEST SUITE EXPORTS +

The following functions must be exported from a test suite + module. +

+
+ + + all(suite) -> TestSpec | {skip, Comment} + Returns the module's test specification + + TestSpec = list() + Comment = string() + This comment will be printed on the HTML result page + + +

This function must return the test specification for the + test suite module. The syntax of a test specification is + described in the Test Server User's Guide.

+
+
+ + init_per_suite(Config0) -> Config1 | {skip, Comment} + Test suite initiation + + Config0 = Config1 = [tuple()] + Comment = string() + Describes why the suite is skipped + + +

This function is called before all other test cases in the + suite. Config is the configuration which can be modified + here. Whatever is returned from this function is given as + Config to the test cases. +

+

If this function fails, all test cases in the suite will be + skipped.

+
+
+ + end_per_suite(Config) -> void() + Test suite finalization + + Config = [tuple()] + + +

This function is called after the last test case in the + suite, and can be used to clean up whatever the test cases + have done. The return value is ignored.

+
+
+ + init_per_testcase(Case, Config0) -> Config1 | {skip, Comment} + Test case initiation + + Case = atom() + Config0 = Config1 = [tuple()] + Comment = string() + Describes why the test case is skipped + + +

This function is called before each test case. The + Case argument is the name of the test case, and + Config is the configuration which can be modified + here. Whatever is returned from this function is given as + Config to the test case.

+
+
+ + end_per_testcase(Case, Config) -> void() + Test case finalization + + Case = atom() + Config = [tuple()] + + +

This function is called after each test case, and can be + used to clean up whatever the test case has done. The return + value is ignored.

+
+
+ + Case(doc) -> [Decription] + Case(suite) -> [] | TestSpec | {skip, Comment} + Case(Config) -> {skip, Comment} | {comment, Comment} | Ok + A test case + + Description = string() + Short description of the test case + TestSpec = list() + Comment = string() + This comment will be printed on the HTML result page + Ok = term() + Config = [tuple()] + Elements from the Config parameter can be read with the ?config macro, see section about test suite support macros + + +

The documentation clause (argument doc) can + be used for automatic generation of test documentation or test + descriptions. +

+

The specification clause (argument spec) + shall return an empty list, the test specification for the + test case or {skip,Comment}. The syntax of a test + specification is described in the Test Server User's Guide. +

+

Note that the specification clause always is executed on the controller host.

+

The execution clause (argument Config) is + only called if the specification clause returns an empty list. + The execution clause is the real test case. Here you must call + the functions you want to test, and do whatever you need to + check the result. If something fails, make sure the process + crashes or call test_server:fail/0/1 (which also will + cause the process to crash). +

+

You can return {skip,Comment} if you decide not to + run the test case after all, e.g. if it is not applicable on + this platform. +

+

You can return {comment,Comment} if you wish to + print some information in the 'Comment' field on the HTML + result page. +

+

If the execution clause returns anything else, it is + considered a success, unless it is {'EXIT',Reason} or + {'EXIT',Pid,Reason} which can't be distinguished from a + crash, and thus will be considered a failure. +

+

A conf test case is a group of test cases with an + init and a cleanup function. The init and cleanup functions + are also test cases, but they have special rules:

+ + They do not need a specification clause. + They must always have the execution clause. + They must return the Config parameter, a modified + version of it or {skip,Comment} from the execution clause. + The cleanup function may also return a tuple + {return_group_result,Status}, which is used to return the + status of the conf case to Test Server and/or to a conf case on a + higher level. (Status = ok | skipped | failed). + init_per_testcase and end_per_testcase are + not called before and after these functions. + +
+
+
+ +
+ TEST SUITE LINE NUMBERS +

If a test case fails, the test server can report the exact line + number at which it failed. There are two ways of doing this, + either by using the line macro or by using the + test_server_line parse transform. +

+

The line macro is described under TEST SUITE SUPPORT + MACROS below. The line macro will only report the last line + executed when a test case failed. +

+

The test_server_line parse transform is activated by + including the headerfile test_server_line.hrl in the test + suite. When doing this, it is important that the + test_server_line module is in the code path of the erlang + node compiling the test suite. The parse transform will report a + history of a maximum of 10 lines when a test case + fails. Consecutive lines in the same function are not shown. +

+

The attribute -no_lines(FuncList). can be used in the + test suite to exclude specific functions from the parse + transform. This is necessary e.g. for functions that are executed + on old (i.e. <R10B) OTP releases. FuncList = [{Func,Arity}]. +

+

If both the line macro and the parse transform is used in + the same module, the parse transform will overrule the macro. +

+
+ +
+ TEST SUITE SUPPORT MACROS +

There are some macros defined in the test_server.hrl + that are quite useful for test suite programmers: +

+

The line macro, is quite + essential when writing test cases. It tells the test server + exactly what line of code that is being executed, so that it can + report this line back if the test case fails. Use this macro at + the beginning of every test case line of code. +

+

The config macro, is used to + retrieve information from the Config variable sent to all + test cases. It is used with two arguments, where the first is the + name of the configuration variable you wish to retrieve, and the + second is the Config variable supplied to the test case + from the test server. +

+

Possible configuration variables include:

+ + data_dir - Data file directory. + priv_dir - Scratch file directory. + nodes - Nodes specified in the spec file + nodenames - Generated nodenames. + Whatever added by conf test cases or + init_per_testcase/2 + +

Examples of the line and config macros can be + seen in the Examples chapter in the user's guide. +

+

If the line_trace macro is defined, you will get a + timestamp (erlang:now()) in your minor log for each + line macro in your suite. This way you can at any time see + which line is currently being executed, and when the line was + called. +

+

The line_trace macro can also be used together with the + test_server_line parse transform described above. A + timestamp will then be written for each line in the suite, except + for functions stated in the -no_lines attribute. +

+

The line_trace macro can e.g. be defined as a compile + option, like this: +

+erlc -W -Dline_trace my_SUITE.erl

+
+
+ diff --git a/lib/test_server/doc/src/test_server_app.xml b/lib/test_server/doc/src/test_server_app.xml new file mode 100644 index 0000000000..924cdc886b --- /dev/null +++ b/lib/test_server/doc/src/test_server_app.xml @@ -0,0 +1,75 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Server Application + Siri Hansen + Peter Andersson + + + + 2002-07-12 + PA1 + test_server_app.xml +
+ test_server + Test Server for manual or automatic testing of Erlang code + +

Test Server is a portable test server for + automated application testing. The server can run test suites + on local or remote targets and log progress and results to HTML + pages. The main purpose of Test Server is to act as engine + inside customized test tools. A callback interface for + such framework applications is provided.

+

In brief the test server supports:

+ + Running multiple, concurrent test suites + Running tests on remote and even diskless targets + Test suites may contain other test suites, in a tree fashion + Logging of the events in a test suite, on both suite and case levels + HTML presentation of test suite results + HTML presentation of test suite code + Support for test suite authors, e.g. start/stop slave nodes + Call trace on target and slave nodes + +

For information about how to write test cases and test suites, + please see the Test Server User's Guide and the reference + manual for the test_server module. +

+

Common Test is an existing test tool application based on the + OTP Test Server. Please read the Common Test User's Guide for more information. +

+
+ +
+ Configuration +

There are currently no configuration parameters available for + this application. +

+
+ +
+ SEE ALSO +

+
+
+ diff --git a/lib/test_server/doc/src/test_server_ctrl.xml b/lib/test_server/doc/src/test_server_ctrl.xml new file mode 100644 index 0000000000..3d95813c14 --- /dev/null +++ b/lib/test_server/doc/src/test_server_ctrl.xml @@ -0,0 +1,771 @@ + + + + +
+ + 2007 + 2008 + Ericsson AB, All Rights Reserved + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + The Initial Developer of the Original Code is Ericsson AB. + + + The Test Server Controller + Siri Hansen + + + + + + + test_server_ctrl_ref.sgml +
+ test_server_ctrl + This module provides a low level interface to the Test Server. + +

The test_server_ctrl module provides a low level + interface to the Test Server. This interface is normally + not used directly by the tester, but through a framework built + on top of test_server_ctrl. +

+

Common Test is such a framework, well suited for automated + black box testing of target systems of any kind (not necessarily + implemented in Erlang). Common Test is also a very useful tool for + white box testing Erlang programs and OTP applications. + Please see the Common Test User's Guide and reference manual for + more information. +

+

If you want to write your own framework, some more information + can be found in the chapter "Writing your own test server + framework" in the Test Server User's Guide. Details about the + interface provided by test_server_ctrl follows below. +

+
+ + + start() -> Result + start(ParameterFile) -> Result + Starts the test server. + + Result = ok | {error, {already_started, pid()} + ParameterFile = atom() | string() + + +

This function starts the test server. If the parameter file + is given, it indicates that the target is remote. In that case + the target node is started and a socket connection is + established between the controller and the target node. +

+

The parameter file is a text file containing key-value + tuples. Each tuple must be followed by a dot-newline + sequence. The following key-value tuples are allowed: +

+ + {type,PlatformType} + This is an atom indicating the target platform type, + currently supported: PlatformType = vxworks

+Mandatory +
+ {target,TargetHost} + This is the name of the target host, can be atom or + string. +

+Mandatory +
+ {slavetargets,SlaveTargets} + This is a list of available hosts where slave nodes + can be started. The hostnames are given as atoms or strings. +

+Optional, default SlaveTargets = []
+ {longnames,Bool} + This indicates if longnames shall be used, i.e. if the + -name option should be used for the target node + instead of -sname

+Optional, default Bool = false
+ {master, {MasterHost, MasterCookie}} + If target is remote and the target node is started as + a slave node, this option indicates which master and + cookie to use. The given master + will also be used as master for slave nodes started with + test_server:start_node/3. It is expected that the + erl_boot_server is started on the master node before + the test_server_ctrl:start/1 function is called. +

+Optional, if not given the test server controller node + is used as master and the erl_boot_server is + automatically started.
+
+
+
+ + stop() -> ok + Stops the test server immediately. + +

This stops the test server (both controller and target) and + all its activity. The running test suite (if any) will be + halted.

+
+
+ + add_dir(Name, Dir) -> ok + add_dir(Name, Dir, Pattern) -> ok + add_dir(Name, [Dir|Dirs]) -> ok + add_dir(Name, [Dir|Dirs], Pattern) -> ok + Add a directory to the job queue. + + Name = term() + The jobname for this directory. + Dir = term() + The directory to scan for test suites. + Dirs = [term()] + List of directories to scan for test suites. + Pattern = term() + Suite match pattern. Directories will be scanned for Pattern_SUITE.erl files. + + +

Puts a collection of suites matching (*_SUITE) in given + directories into the job queue. Name is an arbitrary + name for the job, it can be any erlang term. If Pattern + is given, only modules matching Pattern* will be added.

+
+
+ + add_module(Mod) -> ok + add_module(Name, [Mod|Mods]) -> ok + Add a module to the job queue with or without a given name. + + Mod = atom() + Mods = [atom()] + The name(s) of the module(s) to add. + Name = term() + Name for the job. + + +

This function adds a module or a list of modules, to the + test servers job queue. Name may be any Erlang + term. When Name is not given, the job gets the name of + the module.

+
+
+ + add_case(Mod, Case) -> ok + Adds one test case to the job queue. + + Mod = atom() + Name of the module the test case is in. + Case = atom() + Function name of the test case to add. + + +

This function will add one test case to the job queue. The + job will be given the module's name.

+
+
+ + add_case(Name, Mod, Case) -> ok + Equivalent to add_case/2, but with specified name. + + Name = string() + Name to use for the test job. + + +

Equivalent to add_case/2, but the test job will get + the specified name.

+
+
+ + add_cases(Mod, Cases) -> ok + Adds a list of test cases to the job queue. + + Mod = atom() + Name of the module the test case is in. + Cases = [Case] + Case = atom() + Function names of the test cases to add. + + +

This function will add one or more test cases to the job + queue. The job will be given the module's name.

+
+
+ + add_cases(Name, Mod, Cases) -> ok + Equivalent to add_cases/2, but with specified name. + + Name = string() + Name to use for the test job. + + +

Equivalent to add_cases/2, but the test job will get + the specified name.

+
+
+ + add_spec(TestSpecFile) -> ok | {error, nofile} + Adds a test specification file to the job queue. + + TestSpecFile = string() + Name of the test specification file + + +

This function will add the content of the given test + specification file to the job queue. The job will be given the + name of the test specification file, e.g. if the file is + called test.spec, the job will be called test. +

+

See the reference manual for the test server application + for details about the test specification file.

+
+
+ + add_dir_with_skip(Name, [Dir|Dirs], Skip) -> ok + add_dir_with_skip(Name, [Dir|Dirs], Pattern, Skip) -> ok + add_module_with_skip(Mod, Skip) -> ok + add_module_with_skip(Name, [Mod|Mods], Skip) -> ok + add_case_with_skip(Mod, Case, Skip) -> ok + add_case_with_skip(Name, Mod, Case, Skip) -> ok + add_cases_with_skip(Mod, Cases, Skip) -> ok + add_cases_with_skip(Name, Mod, Cases, Skip) -> ok + Same purpose as functions listed above, but with extra Skip argument. + + Skip = [SkipItem] + List of items to be skipped from the test. + SkipItem = {Mod,Comment} | {Mod,Case,Comment} | {Mod,Cases,Comment} + Mod = atom() + Test suite name. + Comment = string() + Reason why suite or case is being skipped. + Cases = [Case] + Case = atom() + Name of test case function. + + +

These functions add test jobs just like the add_dir, add_module, + add_case and add_cases functions above, but carry an additional + argument, Skip. Skip is a list of items that should be skipped + in the current test run. Test job items that occur in the Skip + list will be logged as SKIPPED with the associated Comment.

+
+
+ + add_tests_with_skip(Name, Tests, Skip) -> ok + Adds different types of jobs to the run queue. + + Name = term() + The jobname for this directory. + Tests = [TestItem] + List of jobs to add to the run queue. + TestItem = {Dir,all,all} | {Dir,Mods,all} | {Dir,Mod,Cases} + Dir = term() + The directory to scan for test suites. + Mods = [Mod] + Mod = atom() + Test suite name. + Cases = [Case] + Case = atom() + Name of test case function. + Skip = [SkipItem] + List of items to be skipped from the test. + SkipItem = {Mod,Comment} | {Mod,Case,Comment} | {Mod,Cases,Comment} + Comment = string() + Reason why suite or case is being skipped. + + +

This function adds various test jobs to the test_server_ctrl + job queue. These jobs can be of different type (all or specific suites + in one directory, all or specific cases in one suite, etc). It is also + possible to get particular items skipped by passing them along in the + Skip list (see the add_*_with_skip functions above).

+
+
+ + abort_current_testcase(Reason) -> ok | {error,no_testcase_running} + Aborts the test case currently executing. + + Reason = term() + The reason for stopping the test case, which will be printed in the log. + + +

When calling this function, the currently executing test case will be aborted. + It is the user's responsibility to know for sure which test case is currently + executing. The function is therefore only safe to call from a function which + has been called (or synchronously invoked) by the test case.

+
+
+ + set_levels(Console, Major, Minor) -> ok + Sets the levels of I/O. + + Console = integer() + Level for I/O to be sent to console. + Major = integer() + Level for I/O to be sent to the major logfile. + Minor = integer() + Level for I/O to be sent to the minor logfile. + + +

Determines where I/O from test suites/test server will + go. All text output from test suites and the test server is + tagged with a priority value which ranges from 0 to 100, 100 + being the most detailed. (see the section about log files in + the user's guide). Output from the test cases (using + io:format/2) has a detail level of 50. Depending on the + levels set by this function, this I/O may be sent to the + console, the major log file (for the whole test suite) or to + the minor logfile (separate for each test case). +

+

All output with detail level:

+ + Less than or equal to Console is displayed on + the screen (default 1) + + Less than or equal to Major is logged in the + major log file (default 19) + + Greater than or equal to Minor is logged in the + minor log files (default 10) + + +

To view the currently set thresholds, use the + get_levels/0 function.

+
+
+ + get_levels() -> {Console, Major, Minor} + Returns the current levels. + +

Returns the current levels. See set_levels/3 for + types.

+
+
+ + jobs() -> JobQueue + Returns the job queue. + + JobQueue = [{list(), pid()}] + + +

This function will return all the jobs currently in the job + queue.

+
+
+ + multiply_timetraps(N) -> ok + All timetraps started after this will be multiplied by N. + + N = integer() | infinity + + +

This function should be called before a test is started + which requires extended timetraps, e.g. if extensive tracing + is used. All timetraps started after this call will be + multiplied by N.

+
+
+ + cover(Application,Analyse) -> ok + cover(CoverFile,Analyse) -> ok + cover(App,CoverFile,Analyse) -> ok + Informs the test_server controller that next test shall run with code coverage analysis. + + Application = atom() + OTP application to cover compile + CoverFile = string() + Name of file listing modules to exclude from or include in cover compilation. The filename must include full path to the file. + Analyse = details | overview + + +

This function informs the test_server controller that next + test shall run with code coverage analysis. All timetraps will + automatically be multiplied by 10 when cover i run. +

+

Application and CoverFile indicates what to + cover compile. If Application is given, the default is + that all modules in the ebin directory of the + application will be cover compiled. The ebin directory + is found by adding ebin to + code:lib_dir(Application). +

+

A CoverFile can have the following entries:

+ +{exclude, all | ExcludeModuleList}. +{include, IncludeModuleList}. +

Note that each line must end with a full + stop. ExcludeModuleList and IncludeModuleList + are lists of atoms, where each atom is a module name. +

+

If both an Application and a CoverFile is + given, all modules in the application are cover compiled, + except for the modules listed in ExcludeModuleList. The + modules in IncludeModuleList are also cover compiled. +

+

If a CoverFile is given, but no Application, + only the modules in IncludeModuleList are cover + compiled. +

+

Analyse indicates the detail level of the cover + analysis. If Analyse = details, each cover compiled + module will be analysed with + cover:analyse_to_file/1. If Analyse = overview + an overview of all cover compiled modules is created, listing + the number of covered and not covered lines for each module. +

+

If the test following this call starts any slave or peer + nodes with test_server:start_node/3, the same cover + compiled code will be loaded on all nodes. If the loading + fails, e.g. if the node runs an old version of OTP, the node + will simply not be a part of the coverage analysis. Note that + slave or peer nodes must be stopped with + test_server:stop_node/1 for the node to be part of the + coverage analysis, else the test server will not be able to + fetch coverage data from the node. +

+

When the test is finished, the coverage analysis is + automatically completed, logs are created and the cover + compiled modules are unloaded. If another test is to be run + with coverage analysis, test_server_ctrl:cover/2/3 must + be called again. +

+
+
+ + cross_cover_analyse(Level) -> ok + Analyse cover data collected from all tests + + Level = details | overview + + +

Analyse cover data collected from all tests. The modules + analysed are the ones listed in the cross cover file + cross.cover in the current directory of the test + server.

+

The modules listed in the cross.cover file are + modules that are heavily used by other applications than the + one they belong to. This function should be run after all + tests are completed, and the result will be stored in a file + called cross_cover.html in the run.<timestamp> + directory of the application the modules belong to. +

+

The cross.cover file contains elements like this:

+
+{App,Modules}.        
+

where App can be an application name or the atom + all. The application (or all applications) will cover + compile the listed Modules. +

+
+
+ + trc(TraceInfoFile) -> ok | {error, Reason} + Starts call trace on target and slave nodes + + TraceInfoFile = atom() | string() + Name of a file defining which functions to trace and how + + +

This function starts call trace on target and on slave or + peer nodes that are started or will be started by the test + suites. +

+

Timetraps are not extended automatically when tracing is + used. Use multiply_timetraps/1 if necessary. +

+

Note that the trace support in the test server is in a very + early stage of the implementation, and thus not yet as + powerful as one might wish for. +

+

The trace information file specified by the + TraceInfoFile argument is a text file containing one or + more of the following elements: +

+ + {SetTP,Module,Pattern}. + {SetTP,Module,Function,Pattern}. + {SetTP,Module,Function,Arity,Pattern}. + ClearTP. + {ClearTP,Module}. + {ClearTP,Module,Function}. + {ClearTP,Module,Function,Arity}. + + + SetTP = tp | tpl + This is maps to the corresponding functions in the + ttb module in the observer + application. tp means set trace pattern on global + function calls. tpl means set trace pattern on local + and global function calls. + + ClearTP = ctp | ctpl | ctpg + This is maps to the corresponding functions in the + ttb module in the observer + application. ctp means clear trace pattern (i.e. turn + off) on global and local function calls. ctpl means + clear trace pattern on local function calls only and ctpg + means clear trace pattern on global function calls only. + + Module = atom() + The module to trace + + Function = atom() + The name of the function to trace + + Arity = integer() + The arity of the function to trace + + Pattern = [] | match_spec() + The trace pattern to set for the module or + function. For a description of the match_spec() syntax, + please turn to the User's guide for the runtime system + (erts). The chapter "Match Specification in Erlang" explains + the general match specification language. + + +

The trace result will be logged in a (binary) file called + NodeName-test_server in the current directory of the + test server controller node. The log must be formatted using + ttb:format/1/2. +

+

This is valid for all targets except the OSE/Delta target + for which all nodes will be logged and automatically formatted + in one single text file called allnodes-test_server.

+
+
+ + stop_trace() -> ok | {error, not_tracing} + Stops tracing on target and slave nodes. + +

This function stops tracing on target, and on slave or peer + nodes that are currently running. New slave or peer nodes will + no longer be traced after this.

+
+
+
+ +
+ FUNCTIONS INVOKED FROM COMMAND LINE +

The following functions are supposed to be invoked from the + command line using the -s option when starting the erlang + node.

+
+ + + run_test(CommandLine) -> ok + Runs the tests specified on the command line. + + CommandLine = FlagList + + +

This function is supposed to be invoked from the + commandline. It starts the test server, interprets the + argument supplied from the commandline, runs the tests + specified and when all tests are done, stops the test server + and returns to the Erlang prompt. +

+

The CommandLine argument is a list of command line + flags, typically ['KEY1', Value1, 'KEY2', Value2, ...]. + The valid command line flags are listed below. +

+

Under a UNIX command prompt, this function can be invoked like this: +

+erl -noshell -s test_server_ctrl run_test KEY1 Value1 KEY2 Value2 ... -s erlang halt

+

Or make an alias (this is for unix/tcsh)

+alias erl_test 'erl -noshell -s test_server_ctrl run_test \\!* -s erlang halt'

+

And then use it like this

+erl_test KEY1 Value1 KEY2 Value2 ...

+

+

The valid command line flags are

+ + DIR dir + Adds all test modules in the directory dir to + the job queue. + + MODULE mod + Adds the module mod to the job queue. + + CASE mod case + Adds the case case in module mod to the + job queue. + + SPEC spec + Runs the test specification file spec. + + SKIPMOD mod + Skips all test cases in the module mod + SKIPCASE mod case + Skips the test case case in module mod. + + NAME name + Names the test suite to something else than the + default name. This does not apply to SPEC which keeps + it's names. + + PARAMETERS parameterfile + Specifies the parameter file to use when starting + remote target + + COVER app cover_file analyse + Indicates that the test should be run with cover + analysis. app, cover_file and analyse + corresponds to the parameters to + test_server_ctrl:cover/3. If no cover file is used, + the atom none should be given. + + TRACE traceinfofile + Specifies a trace information file. When this option + is given, call tracing is started on the target node and all + slave or peer nodes that are started. The trace information + file specifies which modules and functions to trace. See the + function trc/1 above for more information about the + syntax of this file. + + +
+
+
+ +
+ FRAMEWORK CALLBACK FUNCTIONS +

A test server framework can be defined by setting the + environment variable TEST_SERVER_FRAMEWORK to a module + name. This module will then be framework callback module, and it + must export the following function:

+
+ + + get_suite(Mod,Func) -> TestCaseList + Get subcases. + + Mod = atom() + Func = atom() + TestCaseList = [,SubCase] + + +

This function is called before a test case is started. The + purpose is to retrieve a list of subcases. The default + behaviour of this function should be to call + Mod:Func(suite) and return the result from this call.

+
+
+ + init_tc(Mod,Func,Args) -> {ok,Args} + Preparation for a test case. + + Mod = atom() + Func = atom() + Args = [tuple()] + Normally Args = [Config] + + +

This function is called when a test case is started. It is + called on the process executing the test case function + (Mod:Func). Typical use of this function can be to alter + the input parameters to the test case function (Args) or + to set properties for the executing process.

+
+
+ + end_tc(Mod,Func,Args) -> ok + Cleanup after a test case. + + Mod = atom() + Func = atom() + Args = [tuple()] + Normally Args = [Config] + + +

This function is called when a test case is completed. It is + called on the process where the test case function + (Mod:Func) was executed. Typical use of this function can + be to clean up stuff done by init_tc/3.

+
+
+ + report(What,Data) -> ok + Progress report for test. + + What = atom() + Data = term() + + +

This function is called in order to keep the framework upto + date about the progress of the test. This is useful e.g. if the + framework implements a GUI where the progress information is + constantly updated. The following can be reported: +

+

What = tests_start, Data = {Name,NumCases}

+What = tests_done, Data = {Ok,Failed,Skipped}

+What = tc_start, Data = {Mod,Func}

+What = tc_done, Data = {Mod,Func,Result}

+
+
+ + error_notification(Mod, Case, Args, Error) -> ok + Inform framework of crashing testcase. + + Mod = atom() + Test suite name. + Case = atom() + Name of test case function. + Args = [tuple()] + Normally Args = [Config] + Error = {Reason,Location} + Reason = term() + Reason for termination. + Location = unknown | [{Mod,Case,Line}] + Last known position in Mod before termination. + Line = integer() + Line number in file Mod.erl. + + +

This function is called as the result of testcase Mod:Case failing + with Reason at Location. The function is intended mainly to aid + specific logging or error handling in the framework application. Note + that for Location to have relevant values (i.e. other than unknown), + the line macro or test_server_line parse transform must + be used. For details, please see the section about test suite line numbers + in the test_server reference manual page.

+
+
+ + warn(What) -> boolean() + Ask framework if test server should issue a warning for What. + + What = processes | nodes + + +

The test server checks the number of processes and nodes + before and after the test is executed. This function is a + question to the framework if the test server should warn when + the number of processes or nodes has changed during the test + execution. If true is returned, a warning will be written + in the test case minor log file.

+
+
+ + target_info() -> InfoStr + Print info about the target system to the test case log. + + InfoStr = string() | "" + + +

The test server will ask the framework for information about + the test target system and print InfoStr in the test case + log file below the host information.

+
+
+
+
+ diff --git a/lib/test_server/doc/src/test_spec_chapter.xml b/lib/test_server/doc/src/test_spec_chapter.xml new file mode 100644 index 0000000000..3a7730d61e --- /dev/null +++ b/lib/test_server/doc/src/test_spec_chapter.xml @@ -0,0 +1,375 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Test Structure and Test Specifications + Siri Hansen + + + + test_spec_chapter.xml +
+ +
+ Test structure +

A test consists of a set of test cases. Each test case is + implemented as an erlang function. An erlang module implementing + one or more test cases is called a test suite. +

+
+ +
+ Test specifications +

A test specification is a specification of which test suites + and test cases to run and which to skip. A test specification can + also group several test cases into conf cases with init and + cleanup functions (see section about configuration cases + below). In a test there can be test specifications on three + different levels: +

+

The top level is a test specification file which roughly + specifies what to test for a whole application. The test + specification in such a file is encapsulated in a topcase + command. +

+

Then there is a test specification for each test suite, + specifying which test cases to run within the suite. The test + specification for a test suite is returned from the + all(suite) function in the test suite module. +

+

And finally there can be a test specification per test case, + specifying sub test cases to run. The test specification for a + test case is returned from the specification clause of the test + case. +

+

When a test starts, the total test specification is built in a + tree fashion, starting from the top level test specification. +

+

The following are the valid elements of a test + specification. The specification can be one of these elements or a + list with any combination of the elements: +

+ + {Mod, Case} + This specifies the test case Mod:Case/1 + + {dir, Dir} + This specifies all modules *_SUITE in the directory + Dir + {dir, Dir, Pattern} + This specifies all modules Pattern* in the + directory Dir + {conf, Init, TestSpec, Fin} + This is a configuration case. In a test specification + file, Init and Fin must be + {Mod,Func}. Inside a module they can also be just + Func. See the section named Configuration Cases below for + more information about this. + + {conf, Properties, Init, TestSpec, Fin} + This is a configuration case as explained above, but + which also takes a list of execution properties for its group + of test cases and nested sub-groups. + + {make, Init, TestSpec, Fin} + This is a special version of a conf case which is only + used by the test server framework ts. Init and + Fin are make and unmake functions for a data + directory. TestSpec is the test specification for the + test suite owning the data directory in question. If the make + function fails, all tests in the test suite are skipped. The + difference between this "make case" and a normal conf case is + that for the make case, Init and Fin are given with + arguments ({Mod,Func,Args}), and that they are executed + on the controller node (i.e. not on target). + + Case + This can only be used inside a module, i.e. not a test + specification file. It specifies the test case + CurrentModule:Case. + + +
+ +
+ Test Specification Files +

A test specification file is a text file containing the top + level test specification (a topcase command), and possibly one or + more additional commands. A "command" in a test specification file + means a key-value tuple ended by a dot-newline sequence. +

+

The following commands are valid: +

+ + {topcase, TestSpec} + This command is mandatory in all test specification + files. TestSpec is the top level test specification of a + test. + + {skip, {Mod, Comment}} + This specifies that all cases in the module Mod + shall be skipped. Comment is a string. + + {skip, {Mod, Case, Comment}} + This specifies that the case Mod:Case shall be + skipped. + + {skip, {Mod, CaseList, Comment}} + This specifies that all cases Mod:Case, where + Case is in CaseList, shall be skipped. + + {nodes, Nodes} + Nodes is a list of nodenames available to the test + suite. It will be added to the Config argument to all + test cases. Nodes is a list of atoms. + + {require_nodenames, Num} + Specifies how many nodenames the test suite will + need. Theese will be automatically generated and inserted into the + Config argument to all test cases. Num is an + integer. + + {hosts, Hosts} + This is a list of available hosts on which to start slave + nodes. It is used when the {remote, true} option is given + to the test_server:start_node/3 function. Also, if + {require_nodenames, Num} is contained in a test + specification file, the generated nodenames will be spread over + all hosts given in this Hosts list. The hostnames are + atoms or strings. + + {diskless, true} + Adds {diskless, true} to the Config argument + to all test cases. This is kept for backwards compatibility and + should not be used. Use a configuration case instead. + + {ipv6_hosts, Hosts} + Adds {ipv6_hosts, Hosts} to the Config + argument to all test cases. + +

All test specification files shall have the extension + ".spec". If special test specification files are needed for + Windows or VxWorks platforms, additional files with the + extension ".spec.win" and ".spec.vxworks" shall be + used. This is useful e.g. if some test cases shall be skipped on + these platforms. +

+

Some examples for test specification files can be found in the + Examples section of this user's guide. +

+
+ +
+ Configuration cases +

If a group of test cases need the same initialization, a so called + configuration or conf case can be used. A conf + case consists of an initialization function, the group of test cases + needing this initialization and a cleanup or finalization function. +

+

If the init function in a conf case fails or returns + {skip,Comment}, the rest of the test cases in the conf case + (including the cleanup function) are skipped. If the init function + succeeds, the cleanup function will always be called, even if some + of the test cases in between failed. +

+

Both the init function and the cleanup function in a conf case + get the Config parameter as only argument. This parameter + can be modified or returned as is. Whatever is returned by the + init function is given as Config parameter to the rest of + the test cases in the conf case, including the cleanup function. +

+

If the Config parameter is changed by the init function, + it must be restored by the cleanup function. Whatever is returned + by the cleanup function will be given to the next test case called. +

+

The optional Properties list can be used to specify + execution properties for the test cases and possibly nested + sub-groups of the configuration case. The available properties are:

+
+      Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+      Shuffle = shuffle | {shuffle,Seed}
+      Seed = {integer(),integer(),integer()}
+      RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+                   repeat_until_any_ok | repeat_until_any_fail
+      N = integer() | forever
+ +

If the parallel property is specified, Test Server will execute + all test cases in the group in parallel. If sequence is specified, + the cases will be executed in a sequence, meaning if one case fails, all + following cases will be skipped. If shuffle is specified, the cases + in the group will be executed in random order. The repeat property + orders Test Server to repeat execution of the cases in the group a given + number of times, or until any, or all, cases fail or succeed.

+ +

Properties may be combined so that e.g. if shuffle, + repeat_until_any_fail and sequence are all specified, the test + cases in the group will be executed repeatedly and in random order until + a test case fails, when execution is immediately stopped and the rest of + the cases skipped.

+ +

The properties for a conf case is always printed on the top of the HTML log + for the group's init function. Also, the total execution time for a conf case + can be found at the bottom of the log for the group's end function.

+ +

Configuration cases may be nested so that sets of grouped cases can be + configured with the same init- and end functions.

+
+ +
+ The parallel property and nested configuration cases +

If a conf case has a parallel property, its test cases will be spawned + simultaneously and get executed in parallel. A test case is not allowed + to execute in parallel with the end function however, which means + that the time it takes to execute a set of parallel cases is equal to the + execution time of the slowest test case in the group. A negative side + effect of running test cases in parallel is that the HTML summary pages + are not updated with links to the individual test case logs until the + end function for the conf case has finished.

+ +

A conf case nested under a parallel conf case will start executing in + parallel with previous (parallel) test cases (no matter what properties the + nested conf case has). Since, however, test cases are never executed in + parallel with the init- or the end function of the same conf case, it's + only after a nested group of cases has finished that any remaining parallel + cases in the previous conf case get spawned.

+
+ +
+ Repeated execution of test cases + +

A conf case may be repeated a certain number of times + (specified by an integer) or indefinitely (specified by forever). + The repetition may also be stopped prematurely if any or all cases + fail or succeed, i.e. if the property repeat_until_any_fail, + repeat_until_any_ok, repeat_until_all_fail, or + repeat_until_all_ok is used. If the basic repeat + property is used, status of test cases is irrelevant for the repeat + operation.

+ +

It is possible to return the status of a conf case (ok or + failed), to affect the execution of the conf case on the level above. + This is accomplished by, in the end function, looking up the value + of tc_group_properties in the Config list and checking the + result of the finished test cases. If status failed should be + returned from the conf case as a result, the end function should return + the value {return_group_result,failed}. The status of a nested conf + case is taken into account by Test Server when deciding if execution + should be repeated or not (unless the basic repeat property is used).

+ +

The tc_group_properties value is a list of status tuples, + each with the key ok, skipped and failed. The + value of a status tuple is a list containing names of test cases + that have been executed with the corresponding status as result.

+ +

Here's an example of how to return the status from a conf case:

+
+      conf_end_function(Config) ->
+          Status = ?config(tc_group_result, Config),
+          case proplists:get_value(failed, Status) of
+              [] ->                                   % no failed cases 
+	          {return_group_result,ok};
+	      _Failed ->                              % one or more failed
+	          {return_group_result,failed}
+          end.
+ +

It is also possible in the end function to check the status of + a nested conf case (maybe to determine what status the current conf case should + return). This is as simple as illustrated in the example above, only the + name of the end function of the nested conf case is stored in a tuple + {group_result,EndFunc}, which can be searched for in the status lists. + Example:

+
+      conf_end_function_X(Config) ->
+          Status = ?config(tc_group_result, Config),
+          Failed = proplists:get_value(failed, Status),
+          case lists:member({group_result,conf_end_function_Y}, Failed) of
+	        true ->
+		    {return_group_result,failed};
+                false ->                                                    
+	            {return_group_result,ok}
+          end; 
+      ...
+ +

When a conf case is repeated, the init- and end functions + are also always called with each repetition.

+
+ +
+ Shuffled test case order +

The order that test cases in a conf case are executed, is under normal + circumstances the same as the order defined in the test specification. + With the shuffle property set, however, Test Server will instead + execute the test cases in random order.

+ +

The user may provide a seed value (a tuple of three integers) with + the shuffle property: {shuffle,Seed}. This way, the same shuffling + order can be created every time the conf case is executed. If no seed value + is given, Test Server creates a "random" seed for the shuffling operation + (using the return value of erlang:now()). The seed value is always + printed to the log file of the init function so that it can be used to + recreate the same execution order in subsequent test runs.

+ +

If execution of a conf case with shuffled test cases is repeated, + the seed will not be reset in between turns.

+ +

If a nested conf case is specified in a conf case with a shuffle + property, the execution order of the nested cases in relation to the test cases + (and other conf cases) is also random. The order of the test cases in the nested + conf case is however not random (unless, of course, this one also has a + shuffle property).

+
+ +
+ Skipping test cases +

It is possible to skip certain test cases, for example if you + know beforehand that a specific test case fails. This might be + functionality which isn't yet implemented, a bug that is known but + not yet fixed or some functionality which doesn't work or isn't + applicable on a specific platform. +

+

There are several different ways to state that a test case + should be skipped:

+ + Using the {skip,What} command in a test + specification file + + Returning {skip,Reason} from the + init_per_testcase/2 function + + Returning {skip,Reason} from the specification + clause of the test case + + Returning {skip,Reason} from the execution clause + of the test case + + +

The latter of course means that the execution clause is + actually called, so the author must make sure that the test case + is not run. For more information about the different clauses in a + test case, see the chapter about writing test cases. +

+

When a test case is skipped, it will be noted as SKIPPED + in the HTML log. +

+
+
+ diff --git a/lib/test_server/doc/src/ts.xml b/lib/test_server/doc/src/ts.xml new file mode 100644 index 0000000000..0f91d3eea2 --- /dev/null +++ b/lib/test_server/doc/src/ts.xml @@ -0,0 +1,592 @@ + + + + +
+ + 2007 + 2008 + Ericsson AB, All Rights Reserved + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + The Initial Developer of the Original Code is Ericsson AB. + + + The OTP Test Server Framework + Mattias Nilsson + + + + + + + ts.xml +
+ ts + Test Server Framework for testing OTP + +

This is a framework for testing OTP. The ts module + implements the interface to all the functionality in the + framework. +

+

The framework is built on top of the Test Server Controller, + test_server_ctrl, and provides a high level operator + interface. The main features added by the framework are: +

+ + Automatic compilation of test suites and data directories + + Collection of files in central directories and creation of + additional HTML pages for better overview. + + Single command interface for running all available tests + + Spawns a new node with correct parameters before starting + the test server + + Atomatically creates the parameter file needed when + running tests on remote target + + +

More information about the Test Server Framework and how to run + test cases can be found in the Test Server User's Guide. +

+

For writing you own test server framework, please turn to the + reference manual for the Test Server Controller and chapter named + "Writing your own test server framework" in the Test Server User's + Guide. +

+

SETUP

+

To be able to run ts, you must first `install' + ts for the current environment. This is done by calling + ts:install/0/1/2. A file called `variables' is created + and used by ts when running test suites. It is not + recommended to edit this file, but it is possible to alter if + ts gets the wrong idea about your environment. +

+

ts:install/0 is used if the target platform is the + same as the controller host, i.e. if you run on "local target" + and no options are needed. Then running ts:install/0ts + will run an autoconf script for your current + environment and set up the necessary variables needed by the + test suites. +

+

ts:install/1 or ts:install/2 is used if the + target platform is different from the controller host, i.e. if + you run on "remote target" or if special options are required + for your system. VxWorks is currently supported + as remote target platform. +

+

See the reference manual for detailed information about + ts:install/0/1/2. +

+

Some of the common variables in the 'variables' file are + described below. Do not make any assumptions as of what is found + in this file, as it may change at any time. +

+ + longnames

+ Set to true if the system is using fully qualified + nodenames. +
+ platform_id

+ This is the currently installed platform identification + string. +
+ platform_filename

+ This is the name used to create the final save directory + for test runs. +
+ platform_label

+ This is the string presented in the generated test + results index page. +
+ rsh_name

+ This is the rsh program to use when starting slave or + peer nodes on a remote host. +
+ erl_flags

+ Compile time flags used when compiling test suites. +
+ erl_release

+ The Erlang/OTP release being tested. +
+ 'EMULATOR'

+ The emulator being tested (e.g. beam) +
+ 'CPU'

+ The CPU in the machine running the tests, e.g. sparc. +
+ target_host

+ The target host name +
+ os

+ The target operating system, e.g. solaris2.8 +
+ target

+ The current target platform, e.g. sparc-sun-solaris2.8 +
+
+

RUNNING TESTS

+

After installing ts, you can run your test with the + ts:run/0/1/2/3/4 functions. These functions, however, + require a special directory structure to be able to find your + test suites. Both the test server and all tests must be located + under your $TESTROOT directory. The test server implementation + shall be located in the directory $TESTROOT/test_server + and for each application there must be a directory named + _test]]> containing the .spec file + and all test suites and data directories for the + application. Note that there shall only be one .spec file for + each application. +

+

$TESTROOT/test_server must be the current directory + when calling the ts:run/* function. +

+

All available tests can be found with ts:tests(). This + will list all applications for which a test specification file + _test/.spec]]> can be found. +

+

To run all these tests, use ts:run(). +

+

To run one or some of the tests, use ts:run(Tests), + where Tests is the name of the application you want to + test, or a list of such names. +

+

To run one test suite within a test, use + ts:run(Test,Suite). +

+

To run one test case within a suite, use + ts:run(Test,Suite,Case)

+

To all these functions, you can also add a list of + options. Please turn to the reference manual for the ts + module to see the valid options to use. +

+

The function ts:help() displays some simple help for + the functions in ts. Use this for quick reference. +

+

LOG FILES

+

As the execution of the test suites go on, events are logged in + four different ways: +

+ + Text to the operator's console. + Suite related information is sent to the major log file. + Case related information is sent to the minor log file. + The HTML log file gets updated with test results. + +

Typically the operator, who may run hundreds or thousands of + test cases, doesn't want to fill the screen with details + about/from the specific test cases. By default, the operator will + only see: +

+ + A confirmation that the test has started. + + A small note about each failed test case. + + A summary of all the run test cases. + + A confirmation that the test run is complete + + Some special information like error reports and progress + reports, printouts written with erlang:display/1 or io:format/3 + specifically addressed to somewhere other than + standard_io. + +

This is enough for the operator to know, and if he wants to dig + in deeper into a specific test case result, he can do so by + following the links in the HTML presentation to take a look in the + major or minor log files. +

+

A detailed report of the entire test suite is stored in the + major logfile, the exact reason for failure, time spent etc. +

+

The HTML log file is a summary of the major log file, but gives + a much better overview of the test run. It also has links to every + test case's log file for quick viewing with a HTML browser. +

+

The minor log file contain full details of every single test + case, each one in a separate file. This way the files should be + easy to compare with previous test runs, even if the set of test + cases change. +

+

Which information that goes where is user configurable via the + test server controller. Three threshold values determine what + comes out on screen, and in the major or minor log files. The + contents that goes to the HTML log file is fixed, and cannot be + altered. +

+ +
+ + + install() -> ok | {error, Reason} + install(TargetSystem) -> ok | {error, Reason} + install(Opts) -> ok | {error, Reason} + install(TargetSystem,Opts) -> ok | {error, Reason} + Installs the Test Server Framework + + TargetSystem = {Architecture, TargetHost} + Architecture = atom() or string() + e.g. "ose" or "vxworks_ppc603" + TargetHost = atom() or string() + The name of the target host + Opts = list() + + +

Installs and configures the Test Server Framework for + running test suites. If a remote host is to be used, the + TargetSystem argument must be given so that "cross + installation" can be done. This should be used for testing on + VxWorks or OSE/Delta. Installation is required for any of the + functions in ts to work. +

+

Opts may be one or more of +

+ + {longnames, Bool}

+ Use fully qualified hostnames for test_server and + slave nodes. Bool is true or false (default). +
+ {verbose, Level}

+ Verbosity level for test server output, set to 0, 1 or + 2, where 0 is quiet(default). +
+ {hosts, Hosts}

+ This is a list of available hosts on which to start + slave nodes. It is used when the {remote, true} + option is given to the test_server:start_node/3 + function. Also, if {require_nodenames, Num} is + contained in a test specification file, the generated + nodenames will be spread over all hosts given in this + Hosts list. The hostnames are given as atoms or + strings. +
+ {slavetargets, SlaveTarges}

+ For VxWorks and OSE/Delta only. This is a list of + available hosts where slave nodes can be started. This is + necessary because only one node can run per host in the + VxWorks environment. This is not the same as + {hosts, Hosts} because it is used for all slave nodes + - not only the ones started with {remote, true}. The + hostnames are given as atoms or strings. +
+ {crossroot, TargetErlRoot}

+ Erlang root directory on target host +

+This option is mandatory for remote targets +
+ {master, {MasterHost, MasterCookie}}

+ If target is remote and the target node is started as + a slave node, this option + indicates which master and cookie to use. The given master + will also be used as master for slave nodes started with + test_server:start_node/3. It is expected that the + erl_boot_server is started on the master node before + the test is run. If this option is not given, the test + server controller node is used as master and the + erl_boot_server is automatically started. +
+ {erl_start_args, ArgString}

+ Additional arguments to be used when starting the test + server controller node. ArgString will be appended to + the command line when starting the erlang node. Note that + this will only affect the startup of the controller node, + i.e. not the target node or any slave nodes + startet from a test case. +
+ {ipv6_hosts, HostList}

+ This option will be inserted in the + Config parameter for each test case. HostList + is a list of hosts supporting IPv6. +
+
+
+
+ + help() -> ok + Presents simple help on the functions in ts + +

Presents simple help on the functions in ts. Useful + for quick reference.

+
+
+ + tests() -> Tests + Returns the list of available tests + +

Returns the list of available tests. This is actually just + a list of all test specification files found by looking up + "../*_test/*.spec". +

+

In each ../Name_test/ directory there should be one test + specification file named Name.spec.

+
+
+ + run() -> ok | {error, Reason} + run([all_tests|Opts]) + run(Specs) + run(Specs, Opts) + run(Spec, Module) + run(Spec, Module, Opts) + run(Spec, Module, Case) + run(Spec, Module, Case, Opts) + Runs (specified) test suite(s) + + Specs = Spec | [Spec] + Spec = atom() + Module = atom() + Case = atom() + Opts = [Opt] + Opt = batch | verbose | {verbose, Level} | {vars, Vars} | keep_topcase | cover | cover_details |{cover,CoverFile} | {cover_details,CoverFile} | {trace, TraceSpec} + Level = integer(); 0 means silent + Vars = list() of key-value tuples + CoverFile = string(); name of file listing modules to exclude from or include in cover compilation. The name must include full path to the file. + Reason = term() + + +

This function runs test suite(s)/case(s). To be able to run + any tests, ts:install must first be called to create the + variables file needed. To run a whole test specification, + only specify the name of the test specification, and all test + suite modules belonging to that test spec will be run. To run + a single module in a test specification, use the Module + argument to specify the name of the module to run and all test + cases in that module will be run, and to run a specified test + case, specify the name of the test case using the Case + argument. If called with no argument, all test specifications + available will be run. Use ts:tests/0 to see the available + test specifications. +

+

If the batch option is not given, a new xterm is + started (unix) when ts:run is called. +

+

The verbose option sets the verbosity level for test + server output. This has the same effect as if given to + ts:install/1/2

+

The vars option can be used for adding configuration + variables that are not in the variables file generated + during installation. Can be any of the Opts valid for + ts:install/1/2. +

+

The keep_topcase option forces ts to keep the + topcase in your test specification file as is. This option can + only be used if you don't give the Module or + Case parameters to ts:run. The + keep_topcase option is necessary if your topcase + contains anything other than _test"}]]>. If + the option is not used, ts will modify your topcase. +

+

The cover and cover_details options indicates + that the test shall be run with code coverage + analysis. cover_details means that analysis shall be + done on the most detailed level. If the test is run with a + remote target, this option creates a list of uncovered lines + in each cover compiled module. If the test is run with a local + target, each cover compiled module will be analysed with + cover:analyse_to_file/1. The cover options will + only create an overview of all cover compiled modules with the + number of covered and not covered lines. +

+

The CoverFile which can be given with the + cover and cover_details options must be the + filename of a file listing modules to be excluded from or + included in the cover compilation. By default, ts + believes that Spec is the name of an OTP application + and that all modules in this application shall be cover + compiled. The CoverFile can exclude modules that belong + to the application and add modules that don't belong to the + application. The file can have the following entries:

+ +{exclude, all | ExcludeModuleList}. +{include, IncludeModuleList}. +

Note that each line must end with a full + stop. ExcludeModuleList and IncludeModuleList + are lists of atoms, where each atom is a module name. +

+

If the cover or cover_details options are + given on their own, the directory _test]]> is + searched for a CoverFile named .cover]]>. If + this file is not found, Spec is assumed to be the name + of an OTP application, and all modules in the ebin + directory for the application are cover compiled. The + ebin directory is found by adding ebin to + code:lib_dir(Spec). +

+

The same cover compiled code will be loaded on all slave or + peer nodes started with test_server:start_node/3. The + exception is nodes that run an old version of OTP. If the loading + fails, the node will simply not be a part of the coverage + analysis. Note that slave and peer nodes must be stopped with + test_server:stop_node/1 for the node to be part of the + coverage analysis, else the test server will not be able to + fetch coverage data from the node. +

+

The trace option is used to turn on call trace on + target and on slave or peer nodes started with + test_server:start_node/3. TraceSpec can be the + name of a trace information file, or a list of elements like + the ones in a trace information file. Please turn to the + reference manual for test_server_ctrl:trc/1 for details + about the trace information file. +

+
+
+ + cross_cover_analyse(Level) -> ok + cross_cover_analyse([Level]) -> ok + Analyse cover data collected from all tests + +

Analyse cover data collected from all tests. +

+

See test_server_ctrl:cross_cover_analyse/1 +

+
+
+ + r() -> ok + r(Opts) -> ok + r(SpecOrSuite) -> ok + r(SpecOrSuite,Opts) -> ok + r(Suite,Case) -> ok + r(Suite,Case,Opts) -> ok + Run test suite or test case without tsinstalled + + SpecOrSuite = Spec | Suite + Spec = string() + "Name.spec" or "Name.spec.OsType", where OsType is vxworks + Suite = atom() + Case = atom() + Opts = [Opt] + Opt = {Cover,AppOrCoverFile} | {Cover,Application,CoverFile} + Cover = cover | cover_details + AppOrCoverFile = Application | CoverFile + Application = atom() + OTP application to cover compile + CoverFile = string() + Name of file listing modules to exclude from or include in cover compilation + + +

This function can be used to run a test suites or test + cases directly, without any of the additional features added + by the test server framework. It is simply a wrapper function + for the add_dir, add_spec, add_module and + add_case functions in test_server_ctrl: +

+

r() -> add_dir(".")

+r(Spec) -> add_spec(Spec)

+r(Suite) -> add_module(Suite)

+r(Suite,Case) -> add_case(Suite,Case)

+

To use this function, it is required that the test suite is + compiled and in the code path of the node where the function + is called. The function can be used without having ts + installed. +

+

For information about the cover and + cover_details options, see test_server_ctrl:cover/2/3.

+
+
+ + index() -> ok | {error, Reason} + Updates local index page + + Reason = term() + + +

This function updates the local index page. This can be + useful if a previous test run was not completed and the index + is incomplete.

+
+
+ + clean() -> ok + clean(all) -> ok + Cleans up the log directories created when running tests. + +

This function cleans up log directories created when + running test cases. clean/0 cleans up all but the last + run of each application. clean/1 cleans up all test + runs found.

+
+
+ + estone() -> ok | {error, Reason} + estone(Opts) -> ok + Runs the EStone test + +

This function runs the EStone test. It is a shortcut for + running the test suite estone_SUITE in the + kernel application. +

+

Opts is the same as the Opts argument for the + ts:run functions.

+
+
+
+ +
+ Makfile.src in Data Directory +

If a data directory contains code which must be compiled before + the test suite is run, a makefile source called + Makefile.src can be placed in the data directory. This file + will be converted to a valid makefile by ts:run/0/1/2/3/4. +

+

The reason for generating the makefile is that you can use + variables from the variables file which was generated by + ts:install/0/1/2. All occurrences of @Key@ in + Makefile.src is substituted by the Value from + {Key,Value} found in the variables file. Example: +

+

Cut from variables:

+ + ... + {'EMULATOR',"beam"}. + {'CFLAGS',"-g -O2"}. + {'LD',"$(CC) $(CFLAGS)"}. + {'CC',"gcc"}. + ... + +

Makefile.src for compiling erlang code could look + something like this:

+ + EFLAGS=+debug_info + + all: ordsets1.@EMULATOR@ + + ordsets1.@EMULATOR@: ordsets1.erl + erlc $(EFLAGS) ordsets1.erl + +

Makefile.src for compiling c code could look + something like this:

+ + CC = @CC@ + LD = @LD@ + CFLAGS = @CFLAGS@ -I@erl_include@ @DEFS@ + CROSSLDFLAGS = @CROSSLDFLAGS@ + + PROGS = nfs_check@exe@ + + all: $(PROGS) + + nfs_check@exe@: nfs_check@obj@ + $(LD) $(CROSSLDFLAGS) -o nfs_check nfs_check@obj@ @LIBS@ + + nfs_check@obj@: nfs_check.c + $(CC) -c -o nfs_check@obj@ $(CFLAGS) nfs_check.c + +
+
+ diff --git a/lib/test_server/doc/src/why_test_chapter.xml b/lib/test_server/doc/src/why_test_chapter.xml new file mode 100644 index 0000000000..745d4218f1 --- /dev/null +++ b/lib/test_server/doc/src/why_test_chapter.xml @@ -0,0 +1,140 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Why Test + Siri Hansen + + + +
+ +
+ Goals +

It's not possible to prove that a program is correct by + testing. On the contrary, it has been formally proven that it is + impossible to prove programs in general by testing. Theoretical + program proofs or plain examination of code may be viable options + for those that wish to certify that a program is correct. The test + server, as it is based on testing, cannot be used for + certification. Its intended use is instead to (cost effectively) + find bugs. A successful test suite is one that reveals a + bug. If a test suite results in Ok, then we know very little that + we didn't know before. +

+
+ +
+ What to test? +

There are many kinds of test suites. Some concentrate on + calling every function in the interface to some module or + server. Some other do the same, but uses all kinds of illegal + parameters, and verifies that the server stays alive and rejects + the requests with reasonable error codes. Some test suites + simulate an application (typically consisting of a few modules of + an application), some try to do tricky requests in general, some + test suites even test internal functions. +

+

Another interesting category of test suites are the ones that + check that fixed bugs don't reoccur. When a bugfix is introduced, + a test case that checks for that specific bug should be written + and submitted to the affected test suite(s). +

+

Aim for finding bugs. Write whatever test that has the highest + probability of finding a bug, now or in the future. Concentrate + more on the critical parts. Bugs in critical subsystems are a lot + more expensive than others. +

+

Aim for functionality testing rather than implementation + details. Implementation details change quite often, and the test + suites should be long lived. Often implementation details differ + on different platforms and versions. If implementation details + have to be tested, try to factor them out into separate test + cases. Later on these test cases may be rewritten, or just + skipped. +

+

Also, aim for testing everything once, no less, no more. It's + not effective having every test case fail just because one + function in the interface changed. +

+
+ +
+ How much to test +

There is a unix shell script that counts the number of non + commented words (lines and characters too) of source code in each + application's test directory and divides with the number of such + source words in the src directory. This is a measure of how much + test code there is. +

+

There has been much debate over how much test code, compared to + production code, should be written in a project. More test code + finds more bugs, but test code needs to be maintained just like + the production code, and it's expensive to write it in the first + place. In several articles from relatively mature software + organizations that I have read, the amount of test code has been + about the same as the production code.

+

In OTP, at the time of + writing, few applications come even close to this, some have no + test code at all. +

+ +
+ Full coverage +

It is possible to cover compile the modules being tested + before running the test suites. Doing so displays which branches + of the code that are tested by the test suite, and which are + not. Many use this as a measure of a good test suite. When every + single line of source code is covered once by the test suite, + the test suite is finished. +

+

A coverage of 100% still proves nothing, though. It doesn't + mean that the code is error free, that everything is tested. For + instance, if a function contains a division, it has to be + executed at least twice. Once with parameters that cause + division by zero, and once with other parameters. +

+

High degree of coverage is good of course, it means that no + major parts of the code has been left untested. It's another + question whether it's cost effective. You're only likely to find + 50% more bugs when going from 67% to 100% coverage, but the work + (cost) is maybe 200% as large, or more, because reaching all of + those obscure branches is usually complicated. +

+

Again, the reason for testing with the test server is to find + bugs, not to create certificates of valid code. Maximizing the + number of found bugs per hour probably means not going for 100% + coverage. For some module the optimum may be 70%, for some other + maybe 250%. 100% shouldn't be a goal in itself.

+
+ +
+ User interface testing +

It is very difficult to do sensible testing of user + interfaces, especially the graphic ones. The test server has + some support for capturing the text I/O that goes to the user, + but none for graphics. There are several tools on the market + that help with this.

+
+
+
+ diff --git a/lib/test_server/doc/src/write_framework_chapter.xml b/lib/test_server/doc/src/write_framework_chapter.xml new file mode 100644 index 0000000000..2fde67132e --- /dev/null +++ b/lib/test_server/doc/src/write_framework_chapter.xml @@ -0,0 +1,166 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Write you own test server framework + Siri Hansen + + + + write_framework_chapter.xml +
+ +
+ Introduction +

The test server controller can be interfaced from the operating + system or from within Erlang. The nature of your new framework + will decide which interface to use. If you want your framework to + start a new node for each test, the operating system interface is + very convenient. If your node is already started, going from + within Erlang might be a more flexible solution. +

+

The two methods are described below. +

+
+ +
+ Interfacing the test server controller from Erlang +

Using the test server from Erlang means that you have to start + the test server and then add test jobs. Use + test_server_ctrl:start/0 to start a local target or + test_server_ctrl:start/1 to start a remote target. The test + server is stopped by test_server_ctrl:stop/0. +

+

The argument to test_server_ctrl:start/1 is the name of a + parameter file. The parameter file specifies what type of target + to start and where to start it, as well as some additional + parameters needed for different target types. See the reference + manual for a detailed description of all valid parameters. +

+ +
+ Adding test jobs +

There are many commands available for adding test cases to + the test server's job queue:

+

+ + Single test case

+test_server_ctrl:add_case/2/3
+ Multiple test cases from same suite

+test_server_ctrl:add_cases/2/3
+ Test suite module or modules

+test_server_ctrl:add_module/1/2
+ Some or all test suite modules in a directory

+test_server_ctrl:add_dir/2/3
+ Test cases specified in a test specification file

+test_server_ctrl:add_spec/1
+
+

All test suites are given a unique name, which is usually + given when the test suite is added to the job queue. In some + cases, a default name is used, as in the case when a module is + added without a specified name. The test job name is used to + store logfiles, which are stored in the `name.logs' directory + under the current directory. +

+

See the reference manual for details about the functions for + adding test jobs. +

+
+
+ +
+ Interfacing the test server controller from the operating system. +

The function run_test/1 is your interface in the test + server controller if you wish to use it from the operating + system. You simply start an erlang shell and invoke this function + with the -s option. run_test/1 starts the test + server, runs the test specified by the command line and stops the + test server. The argument to run_test/1 is a list of + command line flags, typically + ['KEY1', Value1, 'KEY2', Value2, ...]. + The valid command line flags are listed in the reference manual + for test_server_ctrl. +

+

A typical command line may look like this

+erl -noshell -s test_server_ctrl run_test KEY1 Value1 KEY2 Value2 ... -s erlang halt

+

Or make an alias (this is for unix/tcsh)

+alias erl_test 'erl -noshell -s test_server_ctrl run_test \\!* -s erlang halt'

+

And then use it like this

+erl_test KEY1 Value1 KEY2 Value2 ...

+

+ +
+ An Example +

An example of starting a test run from the command line

+

+

erl -name test_srv -noshell -rsh /home/super/otp/bin/ctrsh

+-pa /clearcase/otp/erts/lib/kernel/test

+-boot start_sasl -sasl errlog_type error

+-s test_server_ctrl run_test SPEC kernel.spec -s erlang halt

+

+
+
+ +
+ Framework callback functions +

By defining the environment variable + TEST_SERVER_FRAMEWORK to a module name, the framework + callback functions can be used. The framework callback functions + are called by the test server in order let the framework interact + with the execution of the tests and to keep the framework upto + date with information about the test progress. +

+

The framework callback functions are described in the reference + manual for test_server_ctrl. +

+

Note that this topic is in an early stage of development, and + changes might occur. +

+
+ +
+ Other concerns +

Some things to think about when writing you own test server + framework: +

+ + emulator version - Make sure that the intended + version of the emulator is started. + + operating system path - If test cases use port + programs, make sure the paths are correct. + + recompilation - Make sure all test suites are fresh + compiled. + + test_server.hrl - Make sure the + test_server.hrl file is in the include path when + compiling test suites. + + running applications - Some test suites require + some applications to be running (e.g. sasl). Make sure they are + started. + + +
+
+ diff --git a/lib/test_server/doc/src/write_test_chapter.xml b/lib/test_server/doc/src/write_test_chapter.xml new file mode 100644 index 0000000000..12f0dfc361 --- /dev/null +++ b/lib/test_server/doc/src/write_test_chapter.xml @@ -0,0 +1,228 @@ + + + + +
+ + 20022009 + Ericsson AB. All Rights Reserved. + + + The contents of this file are subject to the Erlang Public License, + Version 1.1, (the "License"); you may not use this file except in + compliance with the License. You should have received a copy of the + Erlang Public License along with this software. If not, it can be + retrieved online at http://www.erlang.org/. + + Software distributed under the License is distributed on an "AS IS" + basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + the License for the specific language governing rights and limitations + under the License. + + + + Writing Test Suites + Siri Hansen + + + + write_test_chapter.xml +
+ +
+ Support for test suite authors +

The test_server module provides some useful functions + to support the test suite author. This includes: +

+ + Starting and stopping slave or peer nodes + Capturing and checking stdout output + Retrieving and flushing process message queue + Watchdog timers + Checking that a function crashes + Checking that a function succeeds at least m out of n times + Checking .app files + +

Please turn to the reference manual for the test_server + module for details about these functions. +

+
+ +
+ Test suites +

A test suite is an ordinary Erlang module that contains test + cases. It's recommended that the module has a name on the form + *_SUITE.erl. Otherwise, the directory function will not find the + modules (by default). +

+

For some of the test server support, the test server include + file test_server.hrl must be included. Never include it + with the full path, for portability reasons. Use the compiler + include directive instead. +

+

The special function all(suite) in each module is called + to get the test specification for that module. The function + typically returns a list of test cases in that module, but any + test specification could be returned. Please see the chapter + about test specifications for details about this. +

+
+ +
+ Init per test case +

In each test suite module, the functions + init_per_testcase/2 and end_per_testcase/2 must be + implemented. +

+

init_per_testcase is called before each test case in the + test suite, giving a (limited) possibility for initialization. +

+

end_per_testcase/2 is called after each test case is + completed, giving a possibility to clean up. +

+

The first argument to these functions is the name of the test + case. This can be used to do individual initialization and cleanup for + each test cases. +

+

The second argument is a list of tuples called + Config. The first element in a Config tuple + should be an atom - a key value to be used for searching. + init_per_testcase/2 may modify the Config + parameter or just return it as is. Whatever is retuned by + init_per_testcase/2 is given as Config parameter to + the test case itself. +

+

The return value of end_per_testcase/2 is ignored by the + test server. +

+
+ +
+ Test cases +

The smallest unit that the test server is concerned with is a + test case. Each test case can in turn test many things, for + example make several calls to the same interface function with + different parameters. +

+

It is possible to put many or few tests into each test + case. How many things each test case tests is up to the author, + but here are some things to keep in mind. +

+

Very small test cases often leads to more code, since + initialization has to be duplicated. Larger code, especially with + a lot of duplication, increases maintenance and reduces + readability. +

+

Larger test cases make it harder to tell what went wrong if it + fails, and force us to skip larger portions of test code if a + specific part fails. These effects are accentuated when running on + multiple platforms because test cases often have to be skipped. +

+

A test case generally consists of three parts, the + documentation part, the specification part and the execution + part. These are implemented as three clauses of the same function. +

+

The documentation clause matches the argument 'doc' and + returns a list for strings describing what the test case tests. +

+

The specification clause matches the argument 'suite' + and returns the test specification for this particular test + case. If the test specification is an empty list, this indicates + that the test case is a leaf test case, i.e. one to be executed. +

+

Note that the specification clause of a test case is executed on the test server controller host. This means that if target is remote, the specification clause is probably executed on a different platform than the one tested.

+

The execution clause implements the actual test case. It takes + one argument, Config, which contain configuration + information like data_dir and priv_dir. See Data and Private Directories for + more information about these. +

+

The Config variable can also contain the + nodenames key, if requested by the require_nodenames + command in the test suite specification file. All Config + items should be extracted using the ?config macro. This is + to ensure future compatibility if the Config format + changes. See the reference manual for test_server for + details about this macro. +

+

If the execution clause crashes or exits, it is considered a + failure. If it returns {skip,Reason}, the test case is + considered skipped. If it returns {comment,String}, + the string will be added in the 'Comment' field on the HTML + result page. If the execution clause returns anything else, it is + considered a success, unless it is {'EXIT',Reason} or + {'EXIT',Pid,Reason} which can't be distinguished from a + crash, and thus will be considered a failure. +

+
+ +
+ + Data and Private Directories +

The data directory (data_dir) is the directory where the test + module has its own files needed for the testing. A compiler test + case may have source files to feed into the compiler, a release + upgrade test case may have some old and new release of + something. A graphics test case may have some icons and a test + case doing a lot of math with bignums might store the correct + answers there. The name of the data_dir is the the name of + the test suite and then "_data". For example, + "some_path/foo_SUITE.beam" has the data directory + "some_path/foo_SUITE_data/". +

+

The priv_dir is the test suite's private directory. This + directory should be used when a test case needs to write to + files. The name of the private directory is generated by the test + server, which also creates the directory. +

+

Warning: Do not depend on current directory to be + writable, or to point to anything in particular. All scratch files + are to be written in the priv_dir, and all data files found + in data_dir. If the current directory has to be something + specific, it must be set with file:set_cwd/1. +

+
+ +
+ Execution environment +

Each time a test case is about to be executed, a new process is + created with spawn_link. This is so that the test case will + have no dependencies to earlier tests, with respect to process flags, + process links, messages in the queue, other processes having registered + the process, etc. As little as possible is done to change the initial + context of the process (what is created by plain spawn). Here is a + list of differences: +

+ + It has a link to the test server. If this link is removed, + the test server will not know when the test case is finished, + just wait infinitely. + + It often holds a few items in the process dictionary, all + with names starting with 'test_server_'. This is to keep + track of if/where a test case fails. + + There is a top-level catch. All of the test case code is + catched, so that the location of a crash can be reported back to + the test server. If the test case process is killed by another + process (thus the catch code is never executed) the test server + is not able to tell where the test case was executing. + + It has a special group leader implemented by the test + server. This way the test server is able to capture the io that + the test case provokes. This is also used by some of the test + server support functions. + + +

There is no time limit for a test case, unless the test case + itself imposes such a limit, by calling + test_server:timetrap/1 for example. The call can be made + in each test case, or in the init_per_testcase/2 + function. Make sure to call the corresponding + test_server:timetrap_cancel/1 function as well, e.g in the + end_per_testcase/2 function, or else the test cases will + always fail. +

+
+ +
+ -- cgit v1.2.3