diff options
Diffstat (limited to 'lib/test_server')
89 files changed, 0 insertions, 25582 deletions
diff --git a/lib/test_server/AUTHORS b/lib/test_server/AUTHORS deleted file mode 100644 index 3212999174..0000000000 --- a/lib/test_server/AUTHORS +++ /dev/null @@ -1,12 +0,0 @@ -Original Authors and Contributors: - -Mattias Nilsson -Bj�rn Gustavsson -Janne Lindblad -Patrik Winroth -Claes Wikstr�m -Siri Hansen -Peter Andersson - -...and others. - diff --git a/lib/test_server/Makefile b/lib/test_server/Makefile deleted file mode 100644 index 382749d1fc..0000000000 --- a/lib/test_server/Makefile +++ /dev/null @@ -1,39 +0,0 @@ -# -# %CopyrightBegin% -# -# Copyright Ericsson AB 1996-2009. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# %CopyrightEnd% -# - -include $(ERL_TOP)/make/target.mk -include $(ERL_TOP)/make/$(TARGET)/otp.mk - -# -# Macros -# - -SUB_DIRECTORIES = src doc/src - -include vsn.mk -VSN = $(TEST_SERVER_VSN) - -SPECIAL_TARGETS = - -# -# Default Subdir Targets -# -include $(ERL_TOP)/make/otp_subdir.mk - diff --git a/lib/test_server/README b/lib/test_server/README deleted file mode 100644 index fc71c90ca8..0000000000 --- a/lib/test_server/README +++ /dev/null @@ -1,113 +0,0 @@ -=========================================================================== - OTP Test Server -=========================================================================== - -To compile the 'test_server' application you need to build and install -a Erlang/OTP system from source. Get your open source Erlang/OTP from -http://www.erlang.org/. The resulting "erlc" command must be in the -search path for commands. - -The Erlang test_server application and the example tests are to be -inserted into an existing source tree for Erlang/OTP. - -You don't run the Test Server or the tests from the source tree. -Instead a test installation area $TESTROOT is used with the resulting -directory structure - - $TESTROOT/test_server - $TESTROOT/<app1>_test - $TESTROOT/<app2>_test - . - . - -For more details see the test_server documentation can be found in the -"$ERL_TOP/lib/test_server/doc/html" directory. - - -Unpacking the sources ---------------------- - -Enter your Erlang/OTP source tree and unpack the OTP Test Server and -optionally the test examples - - % cd otp_src_RXX - % gunzip -c test_server-<VSN>.tar.gz | tar xf - - % gunzip -c emulator-YYYY-MM-DD.tar.gz | tar xf - - % gunzip -c stdlib-YYYY-MM-DD.tar.gz | tar xf - - - -How to build and install the OTP Test Server --------------------------------------------- - -Set the ERL_TOP variable to the top directory of the source tree - - % cd otp_src_RXX - - % setenv ERL_TOP `pwd` - or - % export ERL_TOP=`pwd` - -If not done before you need to run the configure script - - % ./configure - -Then build and install from the the base directory of the test_server -application - - % cd lib/test_server - % gmake release_tests TESTROOT=<some dir> - - -How to build and install the example test suites ------------------------------------------------- - -If you want to build and install the example test suites -you build and install from the the test directories - - % cd $ERL_TOP/lib/stdlib/test - % gmake release_tests TESTROOT=<some dir> - - % cd $ERL_TOP/erts/emulator/test - % gmake release_tests TESTROOT=<some dir> - - -How to run OTP test suites --------------------------- - -First cd into $TESTROOT/test_server - - % cd $TESTROOT/test_server - -Install the OTP Test Server framework - - % erl - 1> ts:install(). - -Check which tests are available - - 2> ts:tests(). - [...] - -Run the collections of test suites one at the time - - 3> ts:run(emulator). (starts a xterm with an Erlang shell) - 4> ts:run(stdlib). (starts a xterm with an Erlang shell) - -or all at once - - 5> ts:run(). (the node running the tests will be in the background) - -Note that it is normal to see lots of error messages in the Erlang -shell. The tests will stress the system with lots of invalid input to -find problems in the error handling. - -Also note that a failing test case does not always indicate a bug in -Erlang/OTP. Differences in the network setup, machine configuration -etc may cause a test case to fail or time out. - -The result of the tests are recorded in the file named "index.html" in -the "$TESTROOT/test_server" directory. You can follow the progress of -tests suites not yet completed from "last_test.html". - -For more details see the test_server documentation can be found in the -"$ERL_TOP/lib/test_server/doc/html" directory. diff --git a/lib/test_server/doc/html/.gitignore b/lib/test_server/doc/html/.gitignore deleted file mode 100644 index e69de29bb2..0000000000 --- a/lib/test_server/doc/html/.gitignore +++ /dev/null diff --git a/lib/test_server/doc/man3/.gitignore b/lib/test_server/doc/man3/.gitignore deleted file mode 100644 index e69de29bb2..0000000000 --- a/lib/test_server/doc/man3/.gitignore +++ /dev/null diff --git a/lib/test_server/doc/man6/.gitignore b/lib/test_server/doc/man6/.gitignore deleted file mode 100644 index e69de29bb2..0000000000 --- a/lib/test_server/doc/man6/.gitignore +++ /dev/null diff --git a/lib/test_server/doc/pdf/.gitignore b/lib/test_server/doc/pdf/.gitignore deleted file mode 100644 index e69de29bb2..0000000000 --- a/lib/test_server/doc/pdf/.gitignore +++ /dev/null diff --git a/lib/test_server/doc/src/Makefile b/lib/test_server/doc/src/Makefile deleted file mode 100644 index 8c5418aee5..0000000000 --- a/lib/test_server/doc/src/Makefile +++ /dev/null @@ -1,140 +0,0 @@ -# -# %CopyrightBegin% -# -# Copyright Ericsson AB 2002-2012. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# %CopyrightEnd% -# - -include $(ERL_TOP)/make/target.mk -include $(ERL_TOP)/make/$(TARGET)/otp.mk - -# ---------------------------------------------------- -# Application version -# ---------------------------------------------------- -include ../../vsn.mk -VSN=$(TEST_SERVER_VSN) -APPLICATION=test_server - -DOC_EXTRA_FRONT_PAGE_INFO=Important note: \ -The Test Server application is obsolete and will be removed \ -in the next major OTP release - -# ---------------------------------------------------- -# Release directory specification -# ---------------------------------------------------- -RELSYSDIR = $(RELEASE_PATH)/lib/$(APPLICATION)-$(VSN) - -# ---------------------------------------------------- -# Target Specs -# ---------------------------------------------------- -XML_APPLICATION_FILES = ref_man.xml -XML_REF3_FILES = \ - test_server_ctrl.xml \ - test_server.xml -XML_REF3_INTERNAL = \ - ts.xml -XML_REF6_FILES = test_server_app.xml - -XML_PART_FILES = \ - part.xml \ - part_notes.xml \ - part_notes_history.xml - -XML_CHAPTER_FILES = \ - basics_chapter.xml \ - run_test_chapter.xml \ - write_test_chapter.xml \ - test_spec_chapter.xml \ - example_chapter.xml \ - write_framework_chapter.xml \ - notes.xml \ - notes_history.xml - -BOOK_FILES = book.xml - -XML_FILES = $(BOOK_FILES) $(XML_APPLICATION_FILES) $(XML_REF3_FILES) $(XML_REF6_FILES) \ - $(XML_PART_FILES) $(XML_CHAPTER_FILES) - -GIF_FILES = - -# ---------------------------------------------------- - -HTML_FILES = $(XML_APPLICATION_FILES:%.xml=$(HTMLDIR)/%.html) \ - $(XML_PART_FILES:%.xml=$(HTMLDIR)/%.html) - -HTML_INTERNAL = $(XML_REF3_INTERNAL:%.xml=$(HTMLDIR)/%.html) - -INFO_FILE = ../../info - -MAN3_FILES = $(XML_REF3_FILES:%.xml=$(MAN3DIR)/%.3) -MAN3_INTERNAL = $(XML_REF3_INTERNAL:%.xml=$(MAN3DIR)/%.3) -MAN6_FILES = $(XML_REF6_FILES:%_app.xml=$(MAN6DIR)/%.6) - -HTML_REF_MAN_FILE = $(HTMLDIR)/index.html - -TOP_PDF_FILE = $(PDFDIR)/test_server-$(VSN).pdf - -# ---------------------------------------------------- -# FLAGS -# ---------------------------------------------------- -XML_FLAGS += -DVIPS_FLAGS += - -# ---------------------------------------------------- -# Targets -# ---------------------------------------------------- -$(HTMLDIR)/%.gif: %.gif - $(INSTALL_DATA) $< $@ - -docs: pdf html man - -pdf: $(TOP_PDF_FILE) - -html: gifs $(HTML_REF_MAN_FILE) - -man: $(MAN3_FILES) $(MAN3_INTERNAL) $(MAN6_FILES) - -gifs: $(GIF_FILES:%=$(HTMLDIR)/%) - -debug opt: - -clean clean_docs: - rm -rf $(HTMLDIR)/* - rm -f $(MAN3DIR)/* - rm -f $(MAN6DIR)/* - rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo) - rm -f errs core *~ - -# ---------------------------------------------------- -# Release Target -# ---------------------------------------------------- -include $(ERL_TOP)/make/otp_release_targets.mk - -release_docs_spec: docs - $(INSTALL_DIR) "$(RELSYSDIR)/doc/pdf" - $(INSTALL_DATA) $(TOP_PDF_FILE) "$(RELSYSDIR)/doc/pdf" - $(INSTALL_DIR) "$(RELSYSDIR)/doc/html" - $(INSTALL_DATA) $(HTMLDIR)/* \ - "$(RELSYSDIR)/doc/html" - $(INSTALL_DATA) $(INFO_FILE) "$(RELSYSDIR)" - $(INSTALL_DIR) "$(RELEASE_PATH)/man/man3" - $(INSTALL_DATA) $(MAN3_FILES) "$(RELEASE_PATH)/man/man3" - $(INSTALL_DIR) "$(RELEASE_PATH)/man/man6" - $(INSTALL_DATA) $(MAN6_FILES) "$(RELEASE_PATH)/man/man6" - -release_spec: - -release_tests_spec: diff --git a/lib/test_server/doc/src/basics_chapter.xml b/lib/test_server/doc/src/basics_chapter.xml deleted file mode 100644 index 9e9f38aab4..0000000000 --- a/lib/test_server/doc/src/basics_chapter.xml +++ /dev/null @@ -1,215 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Server Basics</title> - <prepared>Siri Hansen</prepared> - <docno></docno> - <date></date> - <rev></rev> - <file>basics_chapter.xml</file> - </header> - - <section> - <title>Introduction</title> - <p><em>Test Server</em> is a portable test tool for automated - testing of Erlang programs and OTP applications. It provides an - interface for running test programs directly with Test Server - as well as an interface for integrating Test Server - with a framework application. The latter makes it possible to use - Test Server as the engine of a higher level test tool - application.</p> - - <p>It is strongly recommended that Test Server be used from inside - a framework application, rather than interfaced directly for - running test programs. Test Server can be pretty difficult to use - since it's a very general and quite extensive and complex - application. Furthermore, the <c>test_server_ctrl</c> functions - are not meant to be used from within the actual test programs. The - framework should handle communication with Test Server and deal - with the more complex aspects of this interaction automatically so - that a higher level interface may be provided for the tester. For - test tool usage to be productive, a simpler, more intuitive and - (if required) more specific interface is required than what Test Server - can provide.</p> - - <p>OTP delivers a general purpose framework for Test Server, called - <em>Common Test</em>. This application is a tool well suited for - automated black box testing of target systems of <em>any kind</em> - (not necessarily implemented in Erlang). Common Test is also a very - useful tool for white box testing of Erlang programs and OTP - applications. Unless a more specific functionality and/or user - interface is required (in which case you might need to implement - your own framework), Common Test should do the job for - you. Please read the Common Test User's Guide and reference manual - for more information.</p> - - <p>Under normal circumstances, knowledge about the Test Server - application is not required for using the Common Test framework. - However, if you want to use Test Server without a framework, - or learn how to integrate it with your own framework, please read on... - </p> - </section> - <section> - <title>Getting started</title> - <p>Testing when using Test Server is done by running test - suites. A test suite is a number of test cases, where each test - case tests one or more things. The test case is the smallest unit - that the test server deals with. One or more test cases are - grouped together into one ordinary Erlang module, which is called - a test suite. Several test suite modules can be grouped together - in special test specification files representing whole application - and/or system test "jobs". - </p> - <p>The test suite Erlang module must follow a certain interface, - which is specified by Test Server. See the section on writing - test suites for details about this. - </p> - <p>Each test case is considered a success if it returns to the - caller, no matter what the returned value is. An exception to this - is the return value <c>{skip, Reason}</c> which indicates that the - test case is skipped. A failure is specified as a crash, no matter - what the crash reason is. - </p> - <p>As a test suite runs, all information (including output to - stdout) is recorded in several different log files. A minimum of - information is displayed to the user console. This only include - start and stop information, plus a note for each failed test case. - </p> - <p>The result from each test case is recorded in an HTML log file - which is created for each test run. Every test case gets one row - in a table presenting total time, whether the case was successful - or not, if it was skipped, and possibly also a comment. The HTML - file has links to each test case's logfile, which may be viewed - from e.g. Netscape or any other HTML capable browser. - </p> - <p>The Test Server consists of three parts: - </p> - <list type="bulleted"> - <item>The part that executes the test suites and - provides support for the test suite author is called - <c>test_server</c>. This is described in the chapter about - writing test cases in this user's guide, and in the reference - manual for the <c>test_server</c> module.</item> - <item>The controlling part, which provides the low level - operator interface, starts and stops slave nodes and writes - log files, is called - <c>test_server_ctrl</c>. The Test Server Controller should not - be used directly when running tests. Instead a framework built - on top of it should be used. More information - about how to write your own framework can be found - in this user's guide and in the reference manual for the - <c>test_server_ctrl</c> module.</item> - </list> - </section> - - <section> - <title>Definition of terms</title> - <taglist> - <tag><em>conf(iguration) case</em></tag> - <item>This is a group of test cases which need some specific - configuration. A conf case contains an initiation function which - sets up a specific configuration, one or more test cases using - this configuration, and a cleanup function which restores the - configuration. A conf case is specified in a test specification - either like this:<c>{conf,InitFunc,ListOfCases,CleanupFunc}</c>, - or this: <c>{conf,Properties,InitFunc,ListOfCases,CleanupFunc}</c> - </item> - <tag><em>datadir</em></tag> - <item>Data directory for a test suite. This directory contains - any files used by the test suite, e.g. additional erlang - modules, c code or data files. If the data directory contains - code which must be compiled before the test suite is run, it - should also contain a makefile source called Makefile.src - defining how to compile. - </item> - <tag><em>documentation clause</em></tag> - <item>One of the function clauses in a test case. This clause - shall return a list of strings describing what the test case - tests. - </item> - <tag><em>execution clause</em></tag> - <item>One of the function clauses in a test case. This clause - implements the actual test case, i.e. calls the functions that - shall be tested and checks results. The clause shall crash if it - fails. - </item> - <tag><em>major log file</em></tag> - <item>This is the test suites log file. - </item> - <tag><em>Makefile.src</em></tag> - <item>This file is used by the test server framework to generate - a makefile for a datadir. It contains some special characters - which are replaced according to the platform currently tested. - </item> - <tag><em>minor log file</em></tag> - <item>This is a separate log file for each test case. - </item> - <tag><em>privdir</em></tag> - <item>Private directory for a test suite. This directory should - be used when the test suite needs to write to files. - </item> - <tag><em>skip case</em></tag> - <item>A test case which shall be skipped. - </item> - <tag><em>specification clause</em></tag> - <item>One of the function clauses in a test case. This clause - shall return an empty list, a test specification or - <c>{skip,Reason}</c>. If an empty list is returned, it means - that the test case shall be executed, and so it must also have - an execution clause. - </item> - <tag><em>test case</em></tag> - <item>A single test included in a test suite. Typically it tests - one function in a module or application. A test case is - implemented as a function in a test suite module. The function - can have three clauses, the documentation-, specification- and - execution clause. - </item> - <tag><em>test specification</em></tag> - <item>A specification of which test suites and test cases to - run. There can be test specifications on three different levels - in a test. The top level is a test specification file which - roughly specifies what to test for a whole application. Then - there is a test specification for each test suite returned from - the <c>all(suite)</c> function in the suite. And there can also - be a test specification returned from the specification clause - of a test case. - </item> - <tag><em>test specification file</em></tag> - <item>This is a text file containing the test specification for - an application. The file has the extension ".spec" or - ".spec.Platform", where Platform is e.g. "vxworks". - </item> - <tag><em>test suite</em></tag> - <item>An erlang module containing a collection of test cases for - a specific application or module. - </item> - <tag><em>topcase</em></tag> - <item>The first "command" in a test specification file. This - command contains the test specification, like this: - <c>{topcase,TestSpecification}</c></item> - </taglist> - </section> -</chapter> - diff --git a/lib/test_server/doc/src/book.xml b/lib/test_server/doc/src/book.xml deleted file mode 100644 index 6eb7daae1a..0000000000 --- a/lib/test_server/doc/src/book.xml +++ /dev/null @@ -1,50 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE book SYSTEM "book.dtd"> - -<book xmlns:xi="http://www.w3.org/2001/XInclude"> - <header titlestyle="normal"> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Server</title> - <prepared>Siri Hansen</prepared> - <docno></docno> - <date>2002-07-11</date> - <rev></rev> - <file>book.xml</file> - </header> - <insidecover> - </insidecover> - <pagetext>Test Server</pagetext> - <preamble> - <contents level="2"></contents> - </preamble> - <parts lift="no"> - <xi:include href="part.xml"/> - </parts> - <applications> - <xi:include href="ref_man.xml"/> - </applications> - <releasenotes> - <xi:include href="notes.xml"/> - </releasenotes> - <listofterms></listofterms> - <index></index> -</book> - diff --git a/lib/test_server/doc/src/example_chapter.xml b/lib/test_server/doc/src/example_chapter.xml deleted file mode 100644 index ec152fdd6c..0000000000 --- a/lib/test_server/doc/src/example_chapter.xml +++ /dev/null @@ -1,151 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Examples</title> - <prepared>Siri Hansen</prepared> - <docno></docno> - <date></date> - <rev></rev> - <file>example_chapter.xml</file> - </header> - - <section> - <title>Test suite</title> - <code type="none"> --module(my_SUITE). - --export([all/1, - not_started/1, not_started_func1/1, not_started_func2/1, - start/1, stop/1, - func1/1, func2/1 - ]). - --export([init_per_testcase/2, end_per_testcase/2]). - --include("test_server.hrl"). - --define(default_timeout, ?t:minutes(1)). - -init_per_testcase(_Case, Config) -> - Dog=?t:timetrap(?default_timeout), - [{watchdog, Dog}|Config]. -end_per_testcase(_Case, Config) -> - Dog=?config(watchdog, Config), - ?t:timetrap_cancel(Dog), - ok. - -all(suite) -> - %% Test specification on test suite level - [not_started, - {conf, start, [func1, func2], stop}]. - -not_started(suite) -> - %% Test specification on test case level - [not_started_func1, not_started_func2]; -not_started(doc) -> - ["Testing all functions when application is not started"]. -%% No execution clause unless the specification clause returns []. - - -not_started_func1(suite) -> - []; -not_started_func1(doc) -> - ["Testing function 1 when application is not started"]. -not_started_func1(Config) when list(Config) -> - {error, not_started} = myapp:func1(dummy_ref,1), - {error, not_started} = myapp:func1(dummy_ref,2), - ok. - -not_started_func2(suite) -> - []; -not_started_func2(doc) -> - ["Testing function 2 when application is not started"]. -not_started_func2(Config) when list(Config) -> - {error, not_started} = myapp:func2(dummy_ref,1), - {error, not_started} = myapp:func2(dummy_ref,2), - ok. - - -%% No specification clause needed for an init function in a conf case!!! -start(doc) -> - ["Testing start of my application."]; -start(Config) when list(Config) -> - Ref = myapp:start(), - case erlang:whereis(my_main_process) of - Pid when pid(Pid) -> - [{myapp_ref,Ref}|Config]; - undefined -> - %% Since this is the init function in a conf case, the rest of the - %% cases in the conf case will be skipped if this case fails. - ?t:fail("my_main_process did not start") - end. - -func1(suite) -> - []; -func1(doc) -> - ["Test that func1 returns ok when argument is 1 and error if argument is 2"]; -func1(Config) when list(Config) -> - Ref = ?config(myapp_ref,Config), - ok = myapp:func1(Ref,1), - error = myapp:func1(Ref,2), - ok. - -func2(suite) -> - []; -func2(doc) -> - ["Test that func1 returns ok when argument is 3 and error if argument is 4"]; -func2(Config) when list(Config) -> - Ref = ?config(myapp_ref,Config), - ok = myapp:func2(Ref,3), - error = myapp:func2(Ref,4), - ok. - -%% No specification clause needed for a cleanup function in a conf case!!! -stop(doc) -> - ["Testing termination of my application"]; -stop(Config) when list(Config) -> - Ref = ?config(myapp_ref,Config), - ok = myapp:stop(Ref), - case erlang:whereis(my_main_process) of - undefined -> - lists:keydelete(myapp_ref,1,Config); - Pid when pid(Pid) -> - ?t:fail("my_main_process did not stop") - end. - </code> - </section> - - <section> - <title>Test specification file</title> - <p><em><c>myapp.spec:</c></em></p> - <code type="none"> -{topcase, {dir, "../myapp_test"}}. % Test specification on top level </code> - <p><em><c>myapp.spec.vxworks:</c></em></p> - <code type="none"> -{topcase, {dir, "../myapp_test"}}. % Test specification on top level -{skip,{my_SUITE,func2,"Not applicable on VxWorks"}}. </code> - </section> -</chapter> - - diff --git a/lib/test_server/doc/src/fascicules.xml b/lib/test_server/doc/src/fascicules.xml deleted file mode 100644 index 37feca543f..0000000000 --- a/lib/test_server/doc/src/fascicules.xml +++ /dev/null @@ -1,18 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE fascicules SYSTEM "fascicules.dtd"> - -<fascicules> - <fascicule file="part" href="part_frame.html" entry="no"> - User's Guide - </fascicule> - <fascicule file="ref_man" href="ref_man_frame.html" entry="yes"> - Reference Manual - </fascicule> - <fascicule file="part_notes" href="part_notes_frame.html" entry="no"> - Release Notes - </fascicule> - <fascicule file="" href="../../../../doc/print.html" entry="no"> - Off-Print - </fascicule> -</fascicules> - diff --git a/lib/test_server/doc/src/notes.xml b/lib/test_server/doc/src/notes.xml deleted file mode 100644 index b48bda94d0..0000000000 --- a/lib/test_server/doc/src/notes.xml +++ /dev/null @@ -1,1694 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2004</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>APPLICATION Release Notes</title> - <prepared>Peter Andersson</prepared> - <responsible>Peter Andersson</responsible> - <docno></docno> - <approved></approved> - <checked></checked> - <date>2007-11-30</date> - <rev>A</rev> - <file>notes.xml</file> - </header> - -<section><title>Test_Server 3.9.1</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - When generating Makefile from Makefile.src, - ts_lib:get_arg/4 earlier removed all spaces in the - extracted argument. The code was probably meant for - removing leading and trailing spaces only, and is now - corrected to do so.</p> - <p> - Own Id: OTP-13015</p> - </item> - <item> - <p> - With the Common Test 'create_priv_dir' start option set - to 'auto_per_tc', the name of the priv directory for a - configuration function could clash with the name of the - priv directory for a test case, which would cause Test - Server failure. This error has been corrected.</p> - <p> - Own Id: OTP-13181</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.9</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - The status of an aborted test due to test suite - compilation error has changed from 'auto_skipped' to - 'failed'. This affects both the textual log file, event - handling and CT hook callbacks. The logging of - compilation failures has also been improved, especially - in the case of multiple test suites failing compilation.</p> - <p> - Own Id: OTP-10816</p> - </item> - <item> - <p> - The Test Server source code parser (erl2html2) failed to - handle the macro tuple in the syntax tree returned by - epp_dodger. This error has been corrected.</p> - <p> - Own Id: OTP-12740</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - The Test Server application has been marked as obsolete - and will be removed from OTP in the next major release - (OTP 19.0).</p> - <p> - Own Id: OTP-10923 Aux Id: OTP-12705 </p> - </item> - <item> - <p> - When running OTP tests using the ts interface, it is now - possible to specify so called test categories per OTP - application. A test category is represented by a CT test - specification and defines an arbitrary subset of existing - test suites, groups and cases. Examples of test - categories are 'smoke' (smoke tests) and 'bench' - (benchmarks). (Call ts:help() for more info). Also, - functions for reading terms from the current test - specification during test, ct:get_testspec_terms/0 and - ct:get_testspec_terms/1, have been implemented.</p> - <p> - Own Id: OTP-11962</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.8.1</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - If the last expression in a test case causes a timetrap - timeout, the stack trace is ignored and not printed to - the test case log file. This happens because the - {Suite,TestCase,Line} info is not available in the stack - trace in this scenario, due to tail call elimination. - Common Test has been modified to handle this situation by - inserting a {Suite,TestCase,last_expr} tuple in the - correct place and printing the stack trace as expected.</p> - <p> - Own Id: OTP-12697 Aux Id: seq12848 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.8</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - When installing test suites in a cross compilation - environment, ts_install was not able to read the values - of the environment variables specified in the - configuration file. This has been fixed.</p> - <p> - Own Id: OTP-11441</p> - </item> - <item> - <p> - Printouts by means of ct:log/2/3 or ct:pal/2/3 from the - hook functions on_tc_fail/2 and on_tc_skip/2 would (quite - unexpectedly) end up in the "unexpected i/o" log file - instead of in the test case log file. This behaviour has - been changed so that now, all printouts (including stdio - printouts) from these hook functions will be routed to - the test case log file.</p> - <p> - Own Id: OTP-12468</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - The format of the information printed on top of the test - case (and configuration function) log file has been - slightly modified, mainly in order to make the start - configuration data easier to read and interpret.</p> - <p> - Own Id: OTP-12518 Aux Id: seq12808 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.7.2</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - The source code to html code generator in Test Server - (and Common Test) would fail to generate anchors in the - html code for functions with non-expandable macros, - resulting in bad html links to such functions. This - correction lets the code generator ignore macros that - can't be expanded (i.e. not pre-process them), so that - correct anchors will always be produced.</p> - <p> - Own Id: OTP-11766 Aux Id: seq12556 </p> - </item> - <item> - <p> - Make sure to install .hrl files when needed</p> - <p> - Own Id: OTP-12197</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Distribute <c>autoconf</c> helpers to applications at - build time instead of having multiple identical copies - committed in the repository.</p> - <p> - Own Id: OTP-12348</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.7.1</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - The mechanism for running code cover analysis with - common_test has been improved. Earlier, if a test run - consisted of multiple tests, cover would be started and - stopped for each test. This would give "intermediate" - cover logs available from the "Coverage log" link on the - test suite result pages. To accumulate cover data over - all tests, the 'export' option had to be used in the - cover spec file. This was not well documented, and the - functionality was quite confusing.</p> - <p> - Using the 'nodes' option in the cover spec file would - fail when the test run consisted of multiple tests, since - the specified nodes would only be included in the cover - analysis of the first test.</p> - <p> - The repeated compilation and analysis of the same modules - was also very time consuming.</p> - <p> - To overcome these problems, ct will now only cover - compile and analyze modules once per test run, i.e. once - for each cover spec file. The log file is available via a - new button on the top level index page. The old "Coverage - log" links on the test suite result pages still exist, - but they all point to the same log containing the - accumulated result.</p> - <p> - Own Id: OTP-11971</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.7</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - Application upgrade (appup) files are corrected for the - following applications: </p> - <p> - <c>asn1, common_test, compiler, crypto, debugger, - dialyzer, edoc, eldap, erl_docgen, et, eunit, gs, hipe, - inets, observer, odbc, os_mon, otp_mibs, parsetools, - percept, public_key, reltool, runtime_tools, ssh, - syntax_tools, test_server, tools, typer, webtool, wx, - xmerl</c></p> - <p> - A new test utility for testing appup files is added to - test_server. This is now used by most applications in - OTP.</p> - <p> - (Thanks to Tobias Schlager)</p> - <p> - Own Id: OTP-11744</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Calls to erlang:open_port/2 with 'spawn' are updated to - handle space in the command path.</p> - <p> - Own Id: OTP-10842</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.6.4</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p>The way Common Test handles skipping of test cases has - been updated. In previous versions, returning - <c>{skip,Reason}</c> from a configuration function (such - as init_per_suite or init_per_group), resulted in all - affected test cases getting skipped with status - <c>auto_skipped</c>. This was inappropriate, since this - status is supposed to be used to inform that Common Test - has taken the initiative to skip something (e.g. a test - case group if init_per_group failed). Therefore, in this - version of Common Test, whenever the user skips a suite, - group, or individual test case (by means of a - configuration function or test specification term), the - affected test cases get the status <c>user_skipped</c> - instead.</p> <p>This update has meant a few changes that - may affect Common Test users in various ways:</p> <list> - <item>The test results and statistics will be affected, - which is important to know when running regression tests - and comparing results to previous test runs.</item> - <item>Users that read or parse the textual log file - <c>suite.log</c> will notice that an auto skipped - function is now reported as <c>auto_skipped</c> rather - than <c>skipped</c> as before.</item> <item>When - <c>require</c> fails in an info function (such as suite/0 - or group/1), all affected configuration functions and - test cases are marked as <c>auto_skipped</c>.</item> - <item>If Common Test detects an error in the test suite - (such as e.g. an invalid all/0 function), all affected - configuration functions and test cases are marked as - <c>auto_skipped</c>.</item> <item>If a repeated test run - session reaches a deadline with <c>force_stop</c> - enabled, all remaining test cases are marked as - <c>auto_skipped</c> rather than <c>user_skipped</c> as - before.</item> <item>The event messages that Common Test - generates during test runs have been affected by this - update. For details see OTP-11524.</item> </list> - <p> - Own Id: OTP-11305 Aux Id: OTP-11524 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.6.3</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - Test Server installed an error handler (test_server_h) - only to be able to write the name of the current test - case to stdout whenever it received an error- or progress - report. This functionality was not useful and has been - removed. The built-in Common Test hook, cth_log_redirect, - has instead been improved to now also tag all error- and - progress reports in the log with suite-, group-, and/or - test case name.</p> - <p> - Own Id: OTP-11263 Aux Id: seq12251 </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - A new log, the "Pre- and Post Test I/O Log", has been - introduced, which makes it possible to capture error- and - progress reports, as well as printouts made with ct:log/2 - and ct:pal/2, before and after a test run. (Some minor - improvements of the logging system have been made at the - same time). Links to the new log are found on the Common - Test Framework Log page. The Common Test User's Guide has - been updated with information about the new log and also - with a new section on how to synchronize external - applications with Common Test by means of the CT Hook - init and terminate functions.</p> - <p> - Own Id: OTP-11272</p> - </item> - </list> - </section> - - - <section><title>Known Bugs and Problems</title> - <list> - <item> - <p> - Test Server: Report auto_skipped in major log.</p> - <p> - Own Id: OTP-11297</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.6.2</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - Some unused code related to remote targets is removed, - and documentation is updated.</p> - <p> - Own Id: OTP-10607 Aux Id: kunagi-338 [249] </p> - </item> - <item> - <p> - A bug in test_server_gl caused io requests containing - invalid data (i.e. not unicode:chardata()) to hang, since - no io reply was sent. This has been corrected.</p> - <p> - Own Id: OTP-10991</p> - </item> - <item> - <p> - Common Test would, in case of timetrap error, print a - warning in the log if end_per_testcase wasn't implemented - in the suite, even though it's an optional function. This - printout has been removed.</p> - <p> - Own Id: OTP-11052</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - The '-force_stop' flag to use with time-limited repeats - of test runs can now be used with a new 'skip_rest' - option which causes the rest of the test cases in the - ongoing test job to be skipped when the time limit is - reached. E.g. 'ct_run -spec xxx -duration 010000 - -force_stop skip_rest'</p> - <p> - Own Id: OTP-10856 Aux Id: OTP-10832 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.6.1</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - The unicode update of test_server for R16A introduced a - few potential errors when logging to files. Sometimes ~tp - or ~ts was used for formatting also when writing to files - that were not opened with the {encoding,utf8} option. If - then the argument contained unicode characters above 255, - the file descriptor would crash. This has been corrected - by the following modifications:</p> <list> <item> Since the - 'unexpected_io' log file is used only when the test case - HTML file is not available (e.g. between test cases), - this file is now also a HTML file and as other - test_server HTML logs it is always UTF-8 encoded </item> - <item> Since it is possible to change which information - is going to which log file (with - test_server_ctrl:set_levels/3), we do not have full - control over which information is written to which file. - This means that any printout could be written to the - 'major' log file (suite.log), which was earlier encoded - as latin1. To avoid crashing this file descriptor due to - unicode strings, the 'major' log file is now also encoded - in UTF-8 (possible incopatibility). </item> <item> The - cross_cover.info file is no longer a text file which can - be read with file:consult/1, instead it is written as a - pure binary file using term_to_binary when writing and - binary_to_term when reading. </item> <item> The encoding - of the file named 'last_name', which only content is the - path to the last run.<timestamp> directory, is now - dependent on the file name mode of the VM. If file names - are expected to be unicode, then the 'last_name' file is - UTF-8 encoded, else it is latin1 encoded. </item> </list> - <p> - Also, ~tp has been changed back to ~p unless it is - somehow likely that the argument includes strings. It is - not obvious that this is the correct thing to do, but - some decission had to be taken...</p> - <p> - Own Id: OTP-10780</p> - </item> - <item> - <p> - Using the force_stop flag/option to interrupt a test run - caused a crash in Common Test. This problem has been - solved.</p> - <p> - Own Id: OTP-10832</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.6</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - Line numbering of erlang files that were not correctly - indented could be wrong after coverting to html with - erl2html2:convert/[2,3] (the source code pointed to from - the test case). This has been corrected.</p> - <p> - Also, there are now link targets for each line and not - only for each 10th line, and link targets for functions - now include the arity and not only the function name - (e.g. func/1 has a link target "func-1").</p> - <p> - Own Id: OTP-9710 Aux Id: seq11945, kunagi-201 [112] </p> - </item> - <item> - <p> - Severe errors detected by <c>test_server</c> (e.g. if log - files directories cannot be created) will now be reported - to <c>common_test</c> and noted in the <c>common_test</c> - logs.</p> - <p> - Own Id: OTP-9769 Aux Id: kunagi-202 [113] </p> - </item> - <item> - <p> - The earlier undocumented cross cover feature for - accumulating cover data over multiple tests has now been - fixed and documented.</p> - <p> - Own Id: OTP-9870 Aux Id: kunagi-206 [117] </p> - </item> - <item> - <p> - If the test suite itself was included in code coverage - analysis, then the test_server would not manage to set - data_dir correctly for the test. This has been corrected.</p> - <p> - Own Id: OTP-9956 Aux Id: kunagi-207 [118] </p> - </item> - <item> - <p> - Any call to test_server:break/1 should cancel all active - timetramps. However, in some cases - Suite:end_per_testcase/2 is executed on a different - process than the test case itself, and if - test_server:break/1 was called from there, the timetraps - were not cancelled. This has been corrected.</p> - <p> - Own Id: OTP-10046 Aux Id: kunagi-174 [85] </p> - </item> - <item> - <p>When a test case failed because of a timetrap time - out, the <c>Config</c> data for the case was lost in the - following call to <c>end_per_testcase/2</c>, and also in - calls to the CT Hook function - <c>post_end_per_testcase/4</c>. This problem has been - solved and the <c>Config</c> data is now correctly passed - to the above functions after a timetrap timeout - failure.</p> - <p> - Own Id: OTP-10070 Aux Id: kunagi-175 [86] </p> - </item> - <item> - <p>In test_server, the same process would supervise the - currently running test case and be group leader (and IO - server) for the test case. Furthermore, when running - parallel test cases, new temporary supervisor/group - leader processes were spawned and the process that was - group leader for sequential test cases would not be - active. That would lead to several problems:</p> - <p>* Processes started by init_per_suite will inherit the - group leader of the init_per_suite process (and that - group leader would not process IO requests when parallel - test cases was running). If later a parallel test case - caused such a processto print using (for example) - io:format/2, the calling would hang.</p> - <p>* Similarly, if a process was spawned from a parallel - test case, it would inherit the temporary group leader - for that parallel test case. If that spawned process - later - when the group of parallel tests have finished - - attempted to print something, its group leader would be - dead and there would be <c>badarg</c> exception.</p> - <p>Those problems have been solved by having group - leaders separate from the processes that supervises the - test cases, and keeping temporary group leader process - for parallel test cases alive until no more process in - the system use them as group leaders.</p> - <p>Also, a new <c>unexpected_io.log</c> log file - (reachable from the summary page of each test suite) has - been introduced. All unexpected IO will be printed into - it(for example, IO to a group leader for a parallel test - case that has finished).</p> - <p> - Own Id: OTP-10101 Aux Id: OTP-10125 </p> - </item> - <item> - <p> - The stability of <c>common_test</c> and - <c>test_server</c> when running test cases in parallel - has been improved.</p> - <p> - Own Id: OTP-10480 Aux Id: kunagi-318 [229] </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Added a general framework for executing benchmarks of - Erlang/OTP. Benchmarks for the Erlang VM and mnesia have - been incorporated in the framework. </p> - <p> - For details about how to add more benchmarks see - $ERL_TOP/HOWTO/BENCHMARKS.md in the source distribution.</p> - <p> - Own Id: OTP-10156</p> - </item> - <item> - <p> - Update common test modules to handle Unicode:</p> <list> - <item> Use UTF-8 encoding for all HTML files, except the - HTML version of the test suite generated with - erl2html2:convert, which will have the same encoding as - the original test suite (.erl) file. </item> <item> - Encode link targets in HTML files with - test_server_ctrl:uri_encode/1. </item> <item> Use unicode - modifier 't' with ~s when appropriate. </item> <item> Use - unicode:characters_to_list and - unicode:characters_to_binary for conversion between - binaries and strings instead of binary_to_list and - list_to_binary. </item> </list> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.5.3</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - test_server_h will now recognize info_reports written by - ct connection handlers (according to the description in - cth_conn_log) and ignore them as they will be completely - handled by by ct_conn_log_h.</p> - <p> - Earlier test_server_h would print a tag (testcase name) - before forwarding the report to error_logger_tty_h. This - would cause lots of tags in the log with no info report - following (since error_logger_tty_h did not handle them).</p> - <p> - Own Id: OTP-10571</p> - </item> - </list> - </section> - - - <section><title>Known Bugs and Problems</title> - <list> - <item> - <p> - Restore Config data if lost when test case fails.</p> - <p> - Own Id: OTP-10070 Aux Id: kunagi-175 [86] </p> - </item> - <item> - <p> - IO server error in test_server.</p> - <p> - Own Id: OTP-10125 Aux Id: OTP-10101, kunagi-177 [88] </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.5.2</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - The documentation has been updated with the latest - changes for the test_server_ctrl:report/2 function.</p> - <p> - Own Id: OTP-10086 Aux Id: seq12066 </p> - </item> - <item> - <p> - The ct:get_status/0 function failed to report status if a - parallel test case group was running at the time of the - call. This has been fixed and the return value for the - function has been updated. Please see the ct reference - manual for details.</p> - <p> - Own Id: OTP-10172</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - It is now possible to sort the HTML tables by clicking on - the header elements. In order to reset a sorted table, - the browser window should simply be refreshed. This - feature requires that the browser supports javascript, - and has javascript execution enabled. If the 'ct_run - -basic_html' flag is used, no javascript code is included - in the generated HTML code.</p> - <p> - Own Id: OTP-9896 Aux Id: seq12034, OTP-9835 </p> - </item> - <item> - <p> - Verbosity levels for log printouts has been added. This - makes it possible to specify preferred verbosity for - different categories of log printouts, as well as general - printouts (such as standard IO), to allow control over - which strings get printed and which get ignored. New - versions of the Common Test logging functions, ct:log, - ct:pal and ct:print, have been introduced, with a new - Importance argument added. The Importance value is - compared to the verbosity level at runtime. More - information can be found in the chapter about Logging in - the Common Test User's Guide.</p> - <p> - Own Id: OTP-10067 Aux Id: seq12050 </p> - </item> - <item> - <p> - The Erlang/OTP test runner ts has been extended to allow - cross compilation of test suites. To cross compile the - test suites first follow the normal cross compilation - procedures and release the tests on the build host. Then - install ts using an xcomp specification file and compile - test suites using ts:compile_testcases/0. For more - details see $ERL_TOP/xcomp/README.md.</p> - <p> - Own Id: OTP-10074</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.5.1</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - After a test case timeout or abortion, the - end_per_testcase function executes on a new dedicated - process. The group leader for this process should be set - to the IO server for the test case, which was not done - properly. The result of this error was that no warnings - about end_per_testcase failing or timing out were ever - printed in the test case log. Also, help functions such - as e.g. test_server:stop_node/1, attempting to - synchronize with the IO server, would hang. The fault has - been corrected.</p> - <p> - Own Id: OTP-9666</p> - </item> - <item> - <p> - A deadlock situation could occur if Common Test is - forwarding error_handler printouts to Test Server at the - same time a new test case is starting. This error has - been fixed.</p> - <p> - Own Id: OTP-9894</p> - </item> - <item> - <p> - When a test case was killed because of a timetrap - timeout, the current location (suite, case and line) was - not printed correctly in the log files. This has been - corrected.</p> - <p> - Own Id: OTP-9930 Aux Id: seq12002 </p> - </item> - <item> - <p> - Test Server and Common Test would add new error handlers - with each test run and fail to remove previously added - ones. In the case of Test Server, this would only happen - if SASL was not running on the test node. This has been - fixed.</p> - <p> - Own Id: OTP-9941 Aux Id: seq12009 </p> - </item> - <item> - <p> - If a test case process was terminated due to an exit - signal from a linked process, Test Server failed to - report the correct name of the suite and case to the - framework. This has been corrected.</p> - <p> - Own Id: OTP-9958 Aux Id: OTP-9855 </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - A new optional feature has been introduced that enables - Common Test to generate priv_dir directory names that are - unique for each test case or config function. The name of - the option/flag is 'create_priv_dir' and it can be set to - value 'auto_per_run' (which is the default, existing, - behaviour), or 'auto_per_tc' or 'manual_per_tc'. If - 'auto_per_tc' is used, Test Server creates a dedicated - priv_dir automatically for each test case (which can be - very expensive in case of many and/or repeated cases). If - 'manual_per_tc' is used, the user needs to create the - priv_dir explicitly by calling the new function - ct:make_priv_dir/0.</p> - <p> - Own Id: OTP-9659 Aux Id: seq11930 </p> - </item> - <item> - <p> - A column for test case group name has been added to the - suite overview HTML log file.</p> - <p> - Own Id: OTP-9730 Aux Id: seq11952 </p> - </item> - <item> - <p> - It is now possible to use the post_end_per_testcase CT - hook function to print a comment for a test case in the - overview log file, even if the test case gets killed by a - timetrap or unknown exit signal, or if the - end_per_testcase function times out.</p> - <p> - Own Id: OTP-9855 Aux Id: seq11979 </p> - </item> - <item> - <p> - Common Test will now print error information (with a time - stamp) in the test case log file immediately when a test - case fails. This makes it easier to see when, in time, - the fault actually occured, and aid the job of locating - relevant trace and debug printouts in the log.</p> - <p> - Own Id: OTP-9904 Aux Id: seq11985, OTP-9900 </p> - </item> - <item> - <p> - Test Server has been modified to check the SASL - errlog_type parameter when receiving an error logger - event, so that it doesn't print reports of type that the - user has disabled.</p> - <p> - Own Id: OTP-9955 Aux Id: seq12013 </p> - </item> - <item> - <p> - If an application cannot be found by ts it is - automatically skipped when testing.</p> - <p> - Own Id: OTP-9971</p> - </item> - <item> - <p> - By specifying a user defined function ({M,F,A} or fun) as - timetrap value, either by means of an info function or by - calling ct:timetrap/1, it is now possible to set a - timetrap that will be triggered when the user function - returns.</p> - <p> - Own Id: OTP-9988 Aux Id: OTP-9501, seq11894 </p> - </item> - <item> - <p> - If the optional configuration functions init_per_suite/1 - and end_per_suite/1 are not implemented in the test - suite, local Common Test versions of these functions are - called instead, and will be displayed in the overview log - file. Any printouts made by the pre- or - post_init_per_suite and pre- or post_end_per_suite hook - functions are saved in the log files for these functions.</p> - <p> - Own Id: OTP-9992</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.5</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - The test case group info function has been implemented in - Common Test. Before execution of a test case group, a - call is now made to <c>TestSuite:group(GroupName)</c>. - The function returns a list of test properties, e.g. to - specify timetrap values, require configuration data, etc - (analogue to the test suite- and test case info - function). The scope of the properties set by - <c>group(GroupName)</c> is all test cases and sub-groups - of group <c>GroupName</c>.</p> - <p> - Own Id: OTP-9235</p> - </item> - <item> - <p> - The look of the HTML log files generated by Common Test - and Test Server has been improved (and made easier to - customize) by means of a CSS file.</p> - <p> - Own Id: OTP-9706</p> - </item> - </list> - </section> - - - <section><title>Known Bugs and Problems</title> - <list> - <item> - <p> - Fix problems in CT/TS due to line numbers in exceptions.</p> - <p> - Own Id: OTP-9203</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.4.5</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - An error in how comments are colored in the test suite - overview html log file has been corrected. As result, a - new framework callback function, format_comment/1, has - been introduced.</p> - <p> - Own Id: OTP-9237</p> - </item> - <item> - <p> - Test Server did not release SASL TTY handlers - (sasl_report_tty_h and error_logger_tty_h) properly after - each test run. This error has been fixed.</p> - <p> - Own Id: OTP-9311</p> - </item> - <item> - <p> - Automatically generated init- and end-configuration - functions for test case groups caused incorrect execution - order of test cases. This has been corrected.</p> - <p> - Own Id: OTP-9369</p> - </item> - <item> - <p> - If ct:log/2 was called with bad arguments, this could - cause the Common Test IO handling process to crash. This - fault has been corrected.</p> - <p> - Own Id: OTP-9371 Aux Id: OTP-8933 </p> - </item> - <item> - <p> - A bug has been fixed that made Test Server call the - end_tc/3 framework function with an incorrect module name - as first argument.</p> - <p> - Own Id: OTP-9379 Aux Id: seq11863 </p> - </item> - <item> - <p> - If end_per_testcase caused a timetrap timeout, the actual - test case status was discarded and the test case logged - as successful (even if the case had actually failed - before the call to end_per_testcase). This fault has been - fixed.</p> - <p> - Own Id: OTP-9397</p> - </item> - <item> - <p> - If a timetrap timeout occured during execution of of a - function in a lib module (i.e. a function called directly - or indirectly from a test case), the Suite argument in - the end_tc/3 framework callback function would not - correctly contain the name of the test suite, but the lib - module. (This would only happen if the lib module was - compiled with ct.hrl included). This error has been - solved.</p> - <p> - Own Id: OTP-9398</p> - </item> - <item> - <p> - Add a proplist() type</p> - <p> - Recently I was adding specs to an API and found that - there is no canonical proplist() type defined. (Thanks to - Ryan Zezeski)</p> - <p> - Own Id: OTP-9499</p> - </item> - <item> - <p> XML files have been corrected. </p> - <p> - Own Id: OTP-9550 Aux Id: OTP-9541 </p> - </item> - <item> - <p> - If a test suite would start with a test case group - defined without the init_per_group/2 and end_per_group/2 - function, init_per_suite/1 would not execute initially - and logging of the test run would fail. This error has - been fixed.</p> - <p> - Own Id: OTP-9584</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - A new option, 'logopts', has been introduced, to make it - possible to modify some aspects of the logging behaviour - in Common Test (or Test Server). For example, whenever an - io printout is made, test_server adds newline (\n) to the - end of the output string. This may not always be a - preferred action and can therefore be disabled by means - of "ct_run ... -logopts no_nl" (or ct:run_test([..., - {logopts,[no_nl]}])). A new framework callback function, - get_logopts/0, has been introduced (see the ct_framework - module for details).</p> - <p> - Own Id: OTP-9372 Aux Id: OTP-9396 </p> - </item> - <item> - <p> - A new option, 'logopts', has been introduced, to make it - possible to modify some aspects of the logging behaviour - in Common Test (or Test Server). For example, if the html - version of the test suite source code should not be - generated during the test run (and consequently be - unavailable in the log file system), the feature may be - disabled by means of "ct_run ... -logopts no_src" (or - ct:run_test([..., {logopts,[no_src]}])). A new framework - callback function, get_logopts/0, has been introduced - (see the ct_framework module for details).</p> - <p> - Own Id: OTP-9396 Aux Id: seq11869, OTP-9372 </p> - </item> - <item> - <p> - It is now possible to use a tuple {M,F,A}, or a fun, as - timetrap specification in the suite info function or test - case info functions. The function must return a valid - timeout value, as documented in the common_test man page - and in the User's Guide.</p> - <p> - Own Id: OTP-9501 Aux Id: seq11894 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.4.4</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - It was previously not possible to use timetrap value - 'infinity' with ct:timetrap/1. This has been fixed.</p> - <p> - Own Id: OTP-9159</p> - </item> - <item> - <p> - A bug that made it impossible to cancel the previous - timetrap when calling ct:timetrap/1 has been corrected.</p> - <p> - Own Id: OTP-9233 Aux Id: OTP-9159 </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - When running tests with auto-compilation disabled, Common - Test could only display the test suite source code on - html format in the test case log if the source file was - located in the same directory as the pre-compiled suite. - This has been modified so that Common Test now tries to - locate the source file by means of the test suite module - info (Suite:module_info/1). As a result, a suite may now - be compiled to a different output directory (e.g. - $MYTEST/bin) than the source code directory (e.g. - $MYTEST/src), without the source-code-to-html generation - being affected.</p> - <p> - Own Id: OTP-9138</p> - </item> - <item> - <p> - It is now possible to return a tuple {fail,Reason} from - init_per_testcase/2. The result is that the associated - test case gets logged as failed without ever executing.</p> - <p> - Own Id: OTP-9160 Aux Id: seq11502 </p> - </item> - <item> - <p> - Added DragonflyBSD check in test_server configure.</p> - <p> - Own Id: OTP-9249</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.4.3</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - Updated the ts*.config files to contain information - relevant to testing Erlang/OTP in an open source - environment.</p> - <p> - Own Id: OTP-9017</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Alpha release of Common Test Hooks (CTH). CTHs allow the - users of common test to abtract out common behaviours - from test suites in a much more elegant and flexible way - than was possible before. Note that the addition of this - feature may introduce minor changes in the undocumented - behaviour of the interface inbetween common_test and - test_server.</p> - <p> - *** POTENTIAL INCOMPATIBILITY ***</p> - <p> - Own Id: OTP-8851</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.4.2</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p>Miscellaneous updates</p> - <p> - Own Id: OTP-8976</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.4.1</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - Returning {return_group_result,failed} from end_per_group - in a group that is part of a sequence, did not cause the - proceeding cases (or groups) to get skipped. This has - been fixed.</p> - <p> - Own Id: OTP-8753 Aux Id: seq11644 </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Common Test has been updated to handle start options and - test specification terms for test case groups (and test - cases in groups). Also, an option named 'label', has been - added that associates the test run with a name that - Common Test prints in the overview HTML logs.</p> - <p> - Own Id: OTP-8725 Aux Id: OTP-8727 </p> - </item> - <item> - <p> - It is now possible to skip all tests in a suite, or a - group, by returning {fail,Reason} from the end_tc/5 - framework function for init_per_suite, or init_per_group.</p> - <p> - Own Id: OTP-8805 Aux Id: seq11664 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.4</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - Returning {fail,Reason} from the framework end_tc - function was not handled properly by Test Server for all - test suite functions.</p> - <p> - Own Id: OTP-8492 Aux Id: seq11502 </p> - </item> - <item> - <p> - If the framework end_tc function would hang and get - aborted by Test Server, there was no indication of - failure in the logs. This has been fixed.</p> - <p> - Own Id: OTP-8682 Aux Id: seq11504 </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - It is now possible for the Test Server framework end_tc - function to change the status of the test case from ok or - auto-skipped to failed by returning {fail,Reason}.</p> - <p> - Own Id: OTP-8495 Aux Id: seq11502 </p> - </item> - <item> - <p> - Test Server will now call the end_per_testcase/2 function - even if the test case has been terminated explicitly - (with abort_current_testcase/1), or after a timetrap - timeout. Under these circumstances the return value of - end_per_testcase is completely ignored. Therefore the - function will not be able to change the reason for test - case termination by returning {fail,Reason}, nor will it - be able to save data with {save_config,Data}.</p> - <p> - Own Id: OTP-8500 Aux Id: seq11521 </p> - </item> - <item> - <p> - Previously, a repeat property of a test case group - specified the number of times the group should be - repeated after the main test run. I.e. {repeat,N} would - case the group to execute 1+N times. To be consistent - with the behaviour of the run_test repeat option, this - has been changed. N now specifies the absolute number of - executions instead.</p> - <p> - Own Id: OTP-8689 Aux Id: seq11502 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.3.6</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - The Test Server parse transform did not handle bit string - comprehensions. This has been fixed.</p> - <p> - Own Id: OTP-8458 Aux Id: OTP-8311 </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - The tc_status value in the Config list for a test case - that has failed because of a timetrap timeout, has - changed from {tc_status,timeout} to - {tc_status,timetrap_timeout}.</p> - <p> - Own Id: OTP-8302</p> - </item> - <item> - <p>The documentation is now possible to build in an open - source environment after a number of bugs are fixed and - some features are added in the documentation build - process. </p> - <p>- The arity calculation is updated.</p> - <p>- The module prefix used in the function names for - bif's are removed in the generated links so the links - will look like - "http://www.erlang.org/doc/man/erlang.html#append_element-2" - instead of - "http://www.erlang.org/doc/man/erlang.html#erlang:append_element-2".</p> - <p>- Enhanced the menu positioning in the html - documentation when a new page is loaded.</p> - <p>- A number of corrections in the generation of man - pages (thanks to Sergei Golovan)</p> - <p>- The legal notice is taken from the xml book file so - OTP's build process can be used for non OTP - applications.</p> - <p> - Own Id: OTP-8343</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.3.5</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - If the init_per_testcase/2 function fails, the test case - now gets marked and counted as auto skipped, not user - skipped (which would previously happen).</p> - <p> - Own Id: OTP-8289</p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - The documentation is now built with open source tools - (xsltproc and fop) that exists on most platforms. One - visible change is that the frames are removed.</p> - <p> - Own Id: OTP-8201</p> - </item> - <item> - <p> - It is now possible to fail a test case from the - end_per_testcase/2 function, by returning {fail,Reason}.</p> - <p> - Own Id: OTP-8284</p> - </item> - <item> - <p> - It is now possible to fail a test case by having the - end_tc/3 framework function return {fail,Reason} for the - test case.</p> - <p> - Own Id: OTP-8285</p> - </item> - <item> - <p> - The test_server framework API (e.g. the end_tc/3 - function) has been modified. See the test_server_ctrl - documentation for details.</p> - <p> - Own Id: OTP-8286 Aux Id: OTP-8285, OTP-8287 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.3.4</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - When running a suite starting with a test case group, - Test Server crashed if init_per_suite/1 exited or - returned skip. This has been fixed.</p> - <p> - Own Id: OTP-8105 Aux Id: OTP-8089 </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Various updates and fixes in Common Test and Test Server.</p> - <p> - Own Id: OTP-8045 Aux Id: OTP-8089,OTP-8105,OTP-8163 </p> - </item> - <item> - <p> - Errors in coverage data collection and analysis were - difficult to detect. The logging has been improved so - that more information about e.g. imported and missing - modules is printed to the html log files.</p> - <p> - Own Id: OTP-8163 Aux Id: seq11374 </p> - </item> - <item> - <p> - The Common Test HTML overview pages have been improved. - It is now possible to see if a test case has been skipped - explicitly or because a configuration function has - failed. Also, the history page (all_runs.html) now has - scrolling text displaying the test names. The old format - (showing names as a truncated string) can still be - generated by means of the flag/option 'basic_html'.</p> - <p> - Own Id: OTP-8177</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.3.2</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Various corrections and improvements of Common Test and - Test Server.</p> - <p> - Own Id: OTP-7981</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.3.1</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Minor updates and corrections.</p> - <p> - Own Id: OTP-7897</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.3</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - The conf case in Test Server has been extended with - properties that make it possible to execute test cases in - parallel, in sequence and in shuffled order. It is now - also possible to repeat test cases according to different - criterias. The properties can be combined, making it - possible to e.g. repeat a conf case a certain number of - times and execute the test cases in different (random) - order every time. The properties are specified in a list - in the conf case definition: {conf, Properties, InitCase, - TestCases, EndCase}. The available properties are: - parallel, sequence, shuffle, repeat, repeat_until_all_ok, - repeat_until_any_ok, repeat_until_any_fail, - repeat_until_all_fail.</p> - <p> - Own Id: OTP-7511 Aux Id: OTP-7839 </p> - </item> - <item> - <p>The test server starts Cover on nodes of the same - version as the test server itself only.</p> - <p> - Own Id: OTP-7699</p> - </item> - <item> - <p> - The Erlang mode for Emacs has been updated with new and - modified skeletons for Common Test and TS. Syntax for - test case groups in Common Test (and conf cases with - properties in TS) has been added and a new minimal Common - Test suite skeleton has been introduced.</p> - <p> - Own Id: OTP-7856</p> - </item> - </list> - </section> - -</section> -<section><title>Test_Server 3.2.4.1</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - The step functionality in Common Test (based on - interaction with Debugger) was broken. This has been - fixed, and some new step features have also been added. - Please see the Common Test User's Guide for details.</p> - <p> - Own Id: OTP-7800 Aux Id: seq11106 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.2.4</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Miscellaneous updates.</p> - <p> - Own Id: OTP-7527</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.2.3</title> - - <section><title>Fixed Bugs and Malfunctions</title> - <list> - <item> - <p> - When a testcase terminated due to a timetrap, io sent to - the group leader from framework:end_tc/3 (using - ct:pal/2/3 or ct:log/2/3) would cause deadlock. This has - been fixed.</p> - <p> - Own Id: OTP-7447 Aux Id: seq11010 </p> - </item> - </list> - </section> - - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Various updates and improvements, plus some minor bug - fixes, have been implemented in Common Test and Test - Server.</p> - <p> - Own Id: OTP-7112</p> - </item> - <item> - <p> - It is now possible, by means of the new function - ct:abort_current_testcase/1 or - test_server_ctrl:abort_current_testcase/1, to abort the - currently executing test case.</p> - <p> - Own Id: OTP-7518 Aux Id: OTP-7112 </p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.2.2</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p><c>erlang:system_info/1</c> now accepts the - <c>logical_processors</c>, and <c>debug_compiled</c> - arguments. For more info see the, <c>erlang(3)</c> - documentation.</p> <p>The scale factor returned by - <c>test_server:timetrap_scale_factor/0</c> is now also - effected if the emulator uses a larger amount of - scheduler threads than the amount of logical processors - on the system. </p> - <p> - Own Id: OTP-7175</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.2.1</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - When init_per_suite or end_per_suite terminated due to - runtime failure, test_server failed to format the line - number information properly and crashed. This error has - now been fixed.</p> - <p> - Own Id: OTP-7091</p> - </item> - </list> - </section> - -</section> - -<section><title>Test_Server 3.2.0</title> - - <section><title>Improvements and New Features</title> - <list> - <item> - <p> - Test Server is a portable test server for automated - application testing. The server can run test suites on - local or remote targets and log progress and results to - HTML pages. The main purpose of Test Server is to act as - engine inside customized test tools. A callback interface - for such framework applications is provided.</p> - <p> - Own Id: OTP-6989</p> - </item> - </list> - </section> - -</section> - -</chapter> - diff --git a/lib/test_server/doc/src/notes_history.xml b/lib/test_server/doc/src/notes_history.xml deleted file mode 100644 index ca7880d74f..0000000000 --- a/lib/test_server/doc/src/notes_history.xml +++ /dev/null @@ -1,113 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2006</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Server Release Notes History</title> - <prepared></prepared> - <docno></docno> - <date></date> - <rev></rev> - </header> - - <section> - <title>Test Server 3.1.1</title> - - <section> - <title>Improvements and new features</title> - <list type="bulleted"> - <item> - <p>Added functions <c>test_server:break/1</c> and - <c>test_server:continue/0</c> for semiautomatic testing.</p> - <p><c>test_server:timetrap/1</c> can now also take - <c>{hours,H} | {minutes,M | {seconds,S}</c>.</p> - <p>Added function - <c>test_server_ctrl:multiply_timetraps/1</c>, - <c>test_server_ctrl:add_case/3</c>, - <c>test_server_ctrl:add_cases/2/3</c>.</p> - <p>Added test suite functions <c>init_per_suite/1</c> and - <c>end_per_suite/1</c>.</p> - <p><c>fin_per_testcase/2</c> is changed to - <c>end_per_testcase/2</c>. <c>fin_per_testcase</c> is kept - for backwards compatibility.</p> - <p>Added support for writing own test server frameworks. - Callback functions <c>init_tc/1</c>, <c>end_tc/3</c>, - <c>get_suite/2</c>, <c>report/2</c>, <c>warn/1</c>.</p> - </item> - </list> - </section> - </section> - - <section> - <title>Test Server 3.1</title> - - <section> - <title>Improvements and New Features</title> - <list type="bulleted"> - <item> - <p>Added the options <c>cover</c> and <c>cover_details</c> - to <c>ts:run</c>. When one of these options is used, - the tested application will be cover compiled - before the test is run. The cover compiled code will also - be loaded on all slave or peer nodes started with - <c>test_server:start_node</c>. When the test is completed - coverage data from all nodes is collected and merged, and - presented in the coverage log to which there will be a link - from the test suite result page (i.e. the one with the - heading "Test suite ... results").</p> - <p>The <c>cover_details</c> option will do - <c>cover:analyse_to_file</c> for each cover compiled module, - while the <c>cover</c> option only will produce a list of - modules and the number of covered/uncovered lines in each - module.</p> - <p>To make it possible to run all test from a script (like in - the OTP daily builds), the following is added: - <c>ts:run([all_tests | Options])</c>.</p> - <p>This means that e.g. the following is possible: - <c>erl -s ts run all_tests batch cover</c>.</p> - <p>Note that it is also possible to run tests with cover even - if you don't use <c>ts</c>. - See <c>test_server_ctrl:cover/2/3</c>.</p> - <p>Own Id: OTP-4703</p> - </item> - <item> - <p>Removed module <c>ts_save.erl</c> and function - <c>ts:save/0/1</c><em>(incompatible)</em>.</p> - <p>Added config variable <c>ipv6_hosts</c> to - <c>ts:install/1</c> and test spec file.</p> - <p>No longer removing duplicates of test cases from test spec - <em>(incompatible)</em>.</p> - <p>Added function <c>test_server:run_on_shielded_node/2</c>.</p> - <p>Creation of html files for test suite source does no longer - crash if suite contains more than 9999 lines of code.</p> - <p>Added functionality for cross cover compilation, - i.e. collection of cover data from all tests.</p> - <p>Multiplying timetrap times with 10 when running with cover.</p> - <p>Added <c>ts:r/3</c> for running tests with cover.</p> - <p>*** POTENTIAL INCOMPATIBILITY ***</p> - <p>Own Id: OTP-5040</p> - </item> - </list> - </section> - </section> -</chapter> - diff --git a/lib/test_server/doc/src/part.xml b/lib/test_server/doc/src/part.xml deleted file mode 100644 index 685ed16a94..0000000000 --- a/lib/test_server/doc/src/part.xml +++ /dev/null @@ -1,46 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE part SYSTEM "part.dtd"> - -<part xmlns:xi="http://www.w3.org/2001/XInclude"> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Server User's Guide</title> - <prepared></prepared> - <docno></docno> - <date>2002-07-11</date> - <rev></rev> - </header> - <description> - <p><em>Test Server</em> is a portable test server for - automated application testing. The server can run test suites - and log progress and results to HTML - pages. The main purpose of Test Server is to act as engine - inside customized test tools. A callback interface for - such framework applications is provided.</p> - </description> - <xi:include href="basics_chapter.xml"/> - <xi:include href="test_spec_chapter.xml"/> - <xi:include href="write_test_chapter.xml"/> - <xi:include href="run_test_chapter.xml"/> - <xi:include href="write_framework_chapter.xml"/> - <xi:include href="example_chapter.xml"/> -</part> - diff --git a/lib/test_server/doc/src/part_notes.xml b/lib/test_server/doc/src/part_notes.xml deleted file mode 100644 index 8cb9b6c591..0000000000 --- a/lib/test_server/doc/src/part_notes.xml +++ /dev/null @@ -1,41 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE part SYSTEM "part.dtd"> - -<part xmlns:xi="http://www.w3.org/2001/XInclude"> - <header> - <copyright> - <year>2004</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Server Release Notes</title> - <prepared></prepared> - <docno></docno> - <date></date> - <rev></rev> - </header> - <description> - <p>The <em>Test Server</em> is a portable test server for - application testing. The test server can run automatic test suites - and log progress and results to HTML - pages. It also provides some support for test suite authors.</p> - <p>For information about older versions, see - <url href="part_notes_history_frame.html">Release Notes History</url>.</p> - </description> - <xi:include href="notes.xml"/> -</part> - diff --git a/lib/test_server/doc/src/part_notes_history.xml b/lib/test_server/doc/src/part_notes_history.xml deleted file mode 100644 index 468b5aa8ba..0000000000 --- a/lib/test_server/doc/src/part_notes_history.xml +++ /dev/null @@ -1,39 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE part SYSTEM "part.dtd"> - -<part> - <header> - <copyright> - <year>2006</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Server Release Notes History</title> - <prepared></prepared> - <docno></docno> - <date></date> - <rev></rev> - </header> - <description> - <p>The <em>Test Server</em> is a portable test server for - application testing. The test server can run automatic test suites - and log progress and results to HTML - pages. It also provides some support for test suite authors.</p> - </description> - <include file="notes_history"></include> -</part> - diff --git a/lib/test_server/doc/src/ref_man.xml b/lib/test_server/doc/src/ref_man.xml deleted file mode 100644 index 1b06d9750b..0000000000 --- a/lib/test_server/doc/src/ref_man.xml +++ /dev/null @@ -1,44 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE application SYSTEM "application.dtd"> - -<application xmlns:xi="http://www.w3.org/2001/XInclude"> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Server Reference Manual</title> - <prepared></prepared> - <docno></docno> - <date></date> - <rev></rev> - <file>ref_man.xml</file> - </header> - <description> - <p><em>Test Server</em> is a portable test server for - automated application testing. The server can run test suites - and log progress and results to HTML - pages. The main purpose of Test Server is to act as engine - inside customized test tools. A callback interface for - such framework applications is provided.</p> - </description> - <xi:include href="test_server_app.xml"/> - <xi:include href="test_server_ctrl.xml"/> - <xi:include href="test_server.xml"/> -</application> - diff --git a/lib/test_server/doc/src/run_test_chapter.xml b/lib/test_server/doc/src/run_test_chapter.xml deleted file mode 100644 index cb5b29c993..0000000000 --- a/lib/test_server/doc/src/run_test_chapter.xml +++ /dev/null @@ -1,50 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Running Test Suites</title> - <prepared>Siri Hansen</prepared> - <docno></docno> - <date></date> - <rev></rev> - <file>run_test_chapter.xml</file> - </header> - - <section> - <title>Using the test server controller</title> - <p>The test server controller provides a low level interface to - all the Test Server functionality. It is possible to use this - interface directly, but it is recommended to use a framework - such as <em>Common Test</em> instead. If no existing framework - suits your needs, you could of course build your own - on top of the test server controller. Some information about how - to do this can be found in the section named "Writing you own - test server framework" in the Test Server User's Guide. - </p> - <p>For information about using the controller directly, please see - all available functions in the reference manual for - <c>test_server_ctrl</c>. - </p> - </section> -</chapter> - diff --git a/lib/test_server/doc/src/test_server.xml b/lib/test_server/doc/src/test_server.xml deleted file mode 100644 index 96ff6de3ba..0000000000 --- a/lib/test_server/doc/src/test_server.xml +++ /dev/null @@ -1,853 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE erlref SYSTEM "erlref.dtd"> - -<erlref> - <header> - <copyright> - <year>2007</year> - <year>2013</year> - <holder>Ericsson AB, All Rights Reserved</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - The Initial Developer of the Original Code is Ericsson AB. - </legalnotice> - - <title>test_server</title> - <prepared>Siri Hansen</prepared> - <responsible></responsible> - <docno></docno> - <approved></approved> - <checked></checked> - <date></date> - <rev></rev> - <file>test_server_ref.sgml</file> - </header> - <module>test_server</module> - <modulesummary>This module provides support for test suite authors.</modulesummary> - <description> - <p>The <c>test_server</c> module aids the test suite author by providing - various support functions. The supported functionality includes: - </p> - <list type="bulleted"> - <item>Logging and timestamping - </item> - <item>Capturing output to stdout - </item> - <item>Retrieving and flushing the message queue of a process - </item> - <item>Watchdog timers, process sleep, time measurement and unit - conversion - </item> - <item>Private scratch directory for all test suites - </item> - <item>Start and stop of slave- or peer nodes</item> - </list> - <p>For more information on how to write test cases and for - examples, please see the Test Server User's Guide. - </p> - </description> - - <section> - <title>TEST SUITE SUPPORT FUNCTIONS</title> - <p>The following functions are supposed to be used inside a test - suite. - </p> - </section> - <funcs> - <func> - <name>os_type() -> OSType</name> - <fsummary>Returns the OS type of the target node</fsummary> - <type> - <v>OSType = term()</v> - <d>This is the same as returned from <c>os:type/0</c></d> - </type> - <desc> - <p>This function is equivalent to <c>os:type/0</c>. It is kept - for backwards compatibility.</p> - </desc> - </func> - <func> - <name>fail()</name> - <name>fail(Reason)</name> - <fsummary>Makes the test case fail.</fsummary> - <type> - <v>Reason = term()</v> - <d>The reason why the test case failed.</d> - </type> - <desc> - <p>This will make the test suite fail with a given reason, or - with <c>suite_failed</c> if no reason was given. Use this - function if you want to terminate a test case, as this will - make it easier to read the log- and HTML files. <c>Reason</c> - will appear in the comment field in the HTML log.</p> - </desc> - </func> - <func> - <name>timetrap(Timout) -> Handle</name> - <fsummary></fsummary> - <type> - <v>Timeout = integer() | {hours,H} | {minutes,M} | {seconds,S}</v> - <v>H = M = S = integer()</v> - <v>Pid = pid()</v> - <d>The process that is to be timetrapped (<c>self()</c>by default)</d> - </type> - <desc> - <p>Sets up a time trap for the current process. An expired - timetrap kills the process with reason - <c>timetrap_timeout</c>. The returned handle is to be given - as argument to <c>timetrap_cancel</c> before the timetrap - expires. If <c>Timeout</c> is an integer, it is expected to - be milliseconds.</p> - <note> - <p>If the current process is trapping exits, it will not be killed - by the exit signal with reason <c>timetrap_timeout</c>. - If this happens, the process will be sent an exit signal - with reason <c>kill</c> 10 seconds later which will kill the - process. Information about the timetrap timeout will in - this case not be found in the test logs. However, the - error_logger will be sent a warning.</p> - </note> - </desc> - </func> - <func> - <name>timetrap_cancel(Handle) -> ok</name> - <fsummary>Cancels a timetrap.</fsummary> - <type> - <v>Handle = term()</v> - <d>Handle returned from <c>timetrap</c></d> - </type> - <desc> - <p>This function cancels a timetrap. This must be done before - the timetrap expires.</p> - </desc> - </func> - <func> - <name>timetrap_scale_factor() -> ScaleFactor</name> - <fsummary>Returns the scale factor for timeouts.</fsummary> - <type> - <v>ScaleFactor = integer()</v> - </type> - <desc> - <p>This function returns the scale factor by which all timetraps - are scaled. It is normally 1, but can be greater than 1 if - the test_server is running <c>cover</c>, using a larger amount of - scheduler threads than the amount of logical processors on the - system, running under purify, valgrind or in a debug-compiled - emulator. The scale factor can be used if you need to scale you - own timeouts in test cases with same factor as the test_server - uses.</p> - </desc> - </func> - <func> - <name>sleep(MSecs) -> ok</name> - <fsummary>Suspens the calling task for a specified time.</fsummary> - <type> - <v>MSecs = integer() | float() | infinity</v> - <d>The number of milliseconds to sleep</d> - </type> - <desc> - <p>This function suspends the calling process for at least the - supplied number of milliseconds. There are two major reasons - why you should use this function instead of - <c>timer:sleep</c>, the first being that the module - <c>timer</c> may be unavailable at the time the test suite is - run, and the second that it also accepts floating point - numbers.</p> - </desc> - </func> - <func> - <name>adjusted_sleep(MSecs) -> ok</name> - <fsummary>Suspens the calling task for a specified time.</fsummary> - <type> - <v>MSecs = integer() | float() | infinity</v> - <d>The default number of milliseconds to sleep</d> - </type> - <desc> - <p>This function suspends the calling process for at least the - supplied number of milliseconds. The function behaves the same - way as <c>test_server:sleep/1</c>, only <c>MSecs</c> - will be multiplied by the 'multiply_timetraps' value, if set, - and also automatically scaled up if 'scale_timetraps' is set - to true (which it is by default).</p> - </desc> - </func> - <func> - <name>hours(N) -> MSecs</name> - <name>minutes(N) -> MSecs</name> - <name>seconds(N) -> MSecs</name> - <fsummary></fsummary> - <type> - <v>N = integer()</v> - <d>Value to convert to milliseconds.</d> - </type> - <desc> - <p>Theese functions convert <c>N</c> number of hours, minutes - or seconds into milliseconds. - </p> - <p>Use this function when you want to - <c>test_server:sleep/1</c> for a number of seconds, minutes or - hours(!).</p> - </desc> - </func> - <func> - <name>format(Format) -> ok</name> - <name>format(Format, Args)</name> - <name>format(Pri, Format)</name> - <name>format(Pri, Format, Args)</name> - <fsummary></fsummary> - <type> - <v>Format = string()</v> - <d>Format as described for <c>io_:format</c>.</d> - <v>Args = list()</v> - <d>List of arguments to format.</d> - </type> - <desc> - <p>Formats output just like <c>io:format</c> but sends the - formatted string to a logfile. If the urgency value, - <c>Pri</c>, is lower than some threshold value, it will also - be written to the test person's console. Default urgency is - 50, default threshold for display on the console is 1. - </p> - <p>Typically, the test person don't want to see everything a - test suite outputs, but is merely interested in if the test - cases succeeded or not, which the test server tells him. If he - would like to see more, he could manually change the threshold - values by using the <c>test_server_ctrl:set_levels/3</c> - function.</p> - </desc> - </func> - <func> - <name>capture_start() -> ok</name> - <name>capture_stop() -> ok</name> - <name>capture_get() -> list()</name> - <fsummary>Captures all output to stdout for a process.</fsummary> - <desc> - <p>These functions makes it possible to capture all output to - stdout from a process started by the test suite. The list of - characters captured can be purged by using <c>capture_get</c>.</p> - </desc> - </func> - <func> - <name>messages_get() -> list()</name> - <fsummary>Empty the message queue.</fsummary> - <desc> - <p>This function will empty and return all the messages - currently in the calling process' message queue.</p> - </desc> - </func> - <func> - <name>timecall(M, F, A) -> {Time, Value}</name> - <fsummary>Measures the time needed to call a function.</fsummary> - <type> - <v>M = atom()</v> - <d>The name of the module where the function resides.</d> - <v>F = atom()</v> - <d>The name of the function to call in the module.</d> - <v>A = list()</v> - <d>The arguments to supply the called function.</d> - <v>Time = integer()</v> - <d>The number of seconds it took to call the function.</d> - <v>Value = term()</v> - <d>Value returned from the called function.</d> - </type> - <desc> - <p>This function measures the time (in seconds) it takes to - call a certain function. The function call is <em>not</em> - caught within a catch.</p> - </desc> - </func> - <func> - <name>do_times(N, M, F, A) -> ok</name> - <name>do_times(N, Fun)</name> - <fsummary>Calls MFA or Fun N times.</fsummary> - <type> - <v>N = integer()</v> - <d>Number of times to call MFA.</d> - <v>M = atom()</v> - <d>Module name where the function resides.</d> - <v>F = atom()</v> - <d>Function name to call.</d> - <v>A = list()</v> - <d>Arguments to M:F.</d> - </type> - <desc> - <p>Calls MFA or Fun N times. Useful for extensive testing of a - sensitive function.</p> - </desc> - </func> - <func> - <name>m_out_of_n(M, N, Fun) -> ok | exit({m_out_of_n_failed, {R,left_to_do}}</name> - <fsummary>Fault tolerant <c>do_times</c>.</fsummary> - <type> - <v>N = integer()</v> - <d>Number of times to call the Fun.</d> - <v>M = integer()</v> - <d>Number of times to require a successful return.</d> - </type> - <desc> - <p>Repeatedly evaluates the given function until it succeeds - (doesn't crash) M times. If, after N times, M successful - attempts have not been accomplished, the process crashes with - reason {m_out_of_n_failed, {R,left_to_do}}, where R indicates - how many cases that was still to be successfully completed. - </p> - <p>For example: - </p> - <p><c>m_out_of_n(1,4,fun() -> tricky_test_case() end)</c> <br></br> -Tries to run tricky_test_case() up to 4 times, and is - happy if it succeeds once. - </p> - <p><c>m_out_of_n(7,8,fun() -> clock_sanity_check() end)</c> <br></br> -Tries running clock_sanity_check() up to 8 times,and - allows the function to fail once. This might be useful if - clock_sanity_check/0 is known to fail if the clock crosses an - hour boundary during the test (and the up to 8 test runs could - never cross 2 boundaries)</p> - </desc> - </func> - <func> - <name>call_crash(M, F, A) -> Result</name> - <name>call_crash(Time, M, F, A) -> Result</name> - <name>call_crash(Time, Crash, M, F, A) -> Result</name> - <fsummary>Calls MFA and succeeds if it crashes.</fsummary> - <type> - <v>Result = ok | exit(call_crash_timeout) | exit({wrong_crash_reason, Reason})</v> - <v>Crash = term()</v> - <d>Crash return from the function.</d> - <v>Time = integer()</v> - <d>Timeout in milliseconds.</d> - <v>M = atom()</v> - <d>Module name where the function resides.</d> - <v>F = atom()</v> - <d>Function name to call.</d> - <v>A = list()</v> - <d>Arguments to M:F.</d> - </type> - <desc> - <p>Spawns a new process that calls MFA. The call is considered - successful if the call crashes with the gives reason - (<c>Crash</c>) or any reason if not specified. The call must - terminate within the given time (default <c>infinity</c>), or - it is considered a failure.</p> - </desc> - </func> - <func> - <name>temp_name(Stem) -> Name</name> - <fsummary>Returns a unique filename.</fsummary> - <type> - <v>Stem = string()</v> - </type> - <desc> - <p>Returns a unique filename starting with <c>Stem</c> with - enough extra characters appended to make up a unique - filename. The filename returned is guaranteed not to exist in - the filesystem at the time of the call.</p> - </desc> - </func> - <func> - <name>break(Comment) -> ok</name> - <fsummary>Cancel all timetraps and wait for call to continue/0.</fsummary> - <type> - <v>Comment = string()</v> - </type> - <desc> - <p><c>Comment</c> is a string which will be written in - the shell, e.g. explaining what to do.</p> - <p>This function will cancel all timetraps and pause the - execution of the test case until the user executes the - <c>continue/0</c> function. It gives the user the opportunity - to interact with the erlang node running the tests, e.g. for - debugging purposes or for manually executing a part of the - test case.</p> - <p>When the <c>break/1</c> function is called, the shell will - look something like this:</p> - <code type="none"><![CDATA[ - --- SEMIAUTOMATIC TESTING --- - The test case executes on process <0.51.0> - - - "Here is a comment, it could e.g. instruct to pull out a card" - - - ----------------------------- - - Continue with --> test_server:continue(). ]]></code> - <p>The user can now interact with the erlang node, and when - ready call <c>test_server:continue().</c></p> - <p>Note that this function can not be used if the test is - executed with <c>ts:run/0/1/2/3/4</c> in <c>batch</c> mode.</p> - </desc> - </func> - <func> - <name>continue() -> ok</name> - <fsummary>Continue after break/1.</fsummary> - <desc> - <p>This function must be called in order to continue after a - test case has called <c>break/1</c>.</p> - </desc> - </func> - <func> - <name>run_on_shielded_node(Fun, CArgs) -> term()</name> - <fsummary>Execute a function a shielded node.</fsummary> - <type> - <v>Fun = function() (arity 0)</v> - <d>Function to execute on the shielded node.</d> - <v>CArg = string()</v> - <d>Extra command line arguments to use when starting the shielded node.</d> - </type> - <desc> - <p><c>Fun</c> is executed in a process on a temporarily created - hidden node with a proxy for communication with the test server - node. The node is called a shielded node (should have been called - a shield node). If <c>Fun</c> is successfully executed, the result - is returned. A peer node (see <c>start_node/3</c>) started from - the shielded node will be shielded from test server node, i.e. - they will not be aware of each other. This is useful when you want - to start nodes from earlier OTP releases than the OTP release of - the test server node.</p> - <p>Nodes from an earlier OTP release can normally not be started - if the test server hasn't been started in compatibility mode - (see the <c>+R</c> flag in the <c>erl(1)</c> documentation) of - an earlier release. If a shielded node is started in compatibility - mode of an earlier OTP release than the OTP release of the test - server node, the shielded node can start nodes of an earlier OTP - release.</p> - <note> - <p>You <em>must</em> make sure that nodes started by the shielded - node never communicate directly with the test server node.</p> - </note> - <note> - <p>Slave nodes always communicate with the test server node; - therefore, <em>never</em> start <em>slave nodes</em> from the - shielded node, <em>always</em> start <em>peer nodes</em>.</p> - </note> - </desc> - </func> - <func> - <name>start_node(Name, Type, Options) -> {ok, Node} | {error, Reason}</name> - <fsummary>Start a node.</fsummary> - <type> - <v>Name = atom() | string()</v> - <d>Name of the slavenode to start (as given to -sname or -name)</d> - <v>Type = slave | peer</v> - <d>The type of node to start.</d> - <v>Options = [{atom(), term()]</v> - <d>Tuplelist of options</d> - </type> - <desc> - <p>This functions starts a node, possibly on a remote machine, - and guarantees cross architecture transparency. Type is set to - either <c>slave</c> or <c>peer</c>. - </p> - <p><c>slave</c> means that the new node will have a master, - i.e. the slave node will terminate if the master terminates, - TTY output produced on the slave will be sent back to the - master node and file I/O is done via the master. The master is - normally the target node unless the target is itself a slave. - </p> - <p><c>peer</c> means that the new node is an independent node - with no master. - </p> - <p><c>Options</c> is a tuplelist which can contain one or more - of - </p> - <taglist> - <tag><c>{remote, true}</c></tag> - <item>Start the node on a remote host. If not specified, the - node will be started on the local host. Test cases that - require a remote host will fail with a reasonable comment if - no remote hosts are available at the time they are run. - </item> - <tag><c>{args, Arguments}</c></tag> - <item>Arguments passed directly to the node. This is - typically a string appended to the command line. - </item> - <tag><c>{wait, false}</c></tag> - <item>Don't wait until the node is up. By default, this - function does not return until the node is up and running, - but this option makes it return as soon as the node start - command is given.. - <br></br> -Only valid for peer nodes - </item> - <tag><c>{fail_on_error, false}</c></tag> - <item>Returns <c>{error, Reason}</c> rather than failing the - test case. - <br></br> -Only valid for peer nodes. Note that slave nodes always - act as if they had <c>fail_on_error=false</c></item> - <tag><c>{erl, ReleaseList}</c></tag> - <item>Use an Erlang emulator determined by ReleaseList when - starting nodes, instead of the same emulator as the test - server is running. ReleaseList is a list of specifiers, - where a specifier is either {release, Rel}, {prog, Prog}, or - 'this'. Rel is either the name of a release, e.g., "r12b_patched" - or 'latest'. 'this' means using the same emulator as the test - server. Prog is the name of an emulator executable. If the - list has more than one element, one of them is picked - randomly. (Only works on Solaris and Linux, and the test server - gives warnings when it notices that nodes are not of the same - version as itself.) - <br></br> - <br></br> - - When specifying this option to run a previous release, use - <c>is_release_available/1</c> function to test if the given - release is available and skip the test case if not. - <br></br> - <br></br> - - In order to avoid compatibility problems (may not appear right - away), use a shielded node (see <c>run_on_shielded_node/2</c>) - when starting nodes from different OTP releases than the test - server. - </item> - <tag><c>{cleanup, false}</c></tag> - <item>Tells the test server not to kill this node if it is - still alive after the test case is completed. This is useful - if the same node is to be used by a group of test cases. - </item> - <tag><c>{env, Env}</c></tag> - <item><c>Env</c> should be a list of tuples <c>{Name, Val}</c>, - where <c>Name</c> is the name of an environment variable, and - <c>Val</c> is the value it is to have in the started node. - Both <c>Name</c> and <c>Val</c> must be strings. The one - exception is <c>Val</c> being the atom <c>false</c> (in - analogy with <c>os:getenv/1</c>), which removes the - environment variable. Only valid for peer nodes. Not - available on VxWorks.</item> - <tag><c>{start_cover, false}</c></tag> - <item>By default the test server will start cover on all nodes - when the test is run with code coverage analysis. To make - sure cover is not started on a new node, set this option to - <c>false</c>. This can be necessary if the connection to - the node at some point will be broken but the node is - expected to stay alive. The reason is that a remote cover - node can not continue to run without its main node. Another - solution would be to explicitly stop cover on the node - before breaking the connection, but in some situations (if - old code resides in one or more processes) this is not - possible.</item> - </taglist> - </desc> - </func> - <func> - <name>stop_node(NodeName) -> bool()</name> - <fsummary>Stops a node</fsummary> - <type> - <v>NodeName = term()</v> - <d>Name of the node to stop</d> - </type> - <desc> - <p>This functions stops a node previously started with - <c>start_node/3</c>. Use this function to stop any node you - start, or the test server will produce a warning message in - the test logs, and kill the nodes automatically unless it was - started with the <c>{cleanup, false}</c> option.</p> - </desc> - </func> - <func> - <name>is_commercial() -> bool()</name> - <fsummary>Tests whether the emulator is commercially supported</fsummary> - <desc> - <p>This function test whether the emulator is commercially supported - emulator. The tests for a commercially supported emulator could be more - stringent (for instance, a commercial release should always contain - documentation for all applications).</p> - </desc> - </func> - - <func> - <name>is_release_available(Release) -> bool()</name> - <fsummary>Tests whether a release is available</fsummary> - <type> - <v>Release = string() | atom()</v> - <d>Release to test for</d> - </type> - <desc> - <p>This function test whether the release given by - <c>Release</c> (for instance, "r12b_patched") is available - on the computer that the test_server controller is running on. - Typically, you should skip the test case if not.</p> - <p>Caution: This function may not be called from the <c>suite</c> - clause of a test case, as the test_server will deadlock.</p> - </desc> - </func> - <func> - <name>is_native(Mod) -> bool()</name> - <fsummary>Checks whether the module is natively compiled or not</fsummary> - <type> - <v>Mod = atom()</v> - <d>A module name</d> - </type> - <desc> - <p>Checks whether the module is natively compiled or not</p> - </desc> - </func> - <func> - <name>app_test(App) -> ok | test_server:fail()</name> - <name>app_test(App,Mode)</name> - <fsummary>Checks an applications .app file for obvious errors</fsummary> - <type> - <v>App = term()</v> - <d>The name of the application to test</d> - <v>Mode = pedantic | tolerant</v> - <d>Default is pedantic</d> - </type> - <desc> - <p>Checks an applications .app file for obvious errors. - The following is checked: - </p> - <list type="bulleted"> - <item>required fields - </item> - <item>that all modules specified actually exists - </item> - <item>that all requires applications exists - </item> - <item>that no module included in the application has export_all - </item> - <item>that all modules in the ebin/ dir is included (If - <c>Mode==tolerant</c> this only produces a warning, as all - modules does not have to be included)</item> - </list> - </desc> - </func> - <func> - <name>appup_test(App) -> ok | test_server:fail()</name> - <fsummary>Checks an applications .appup file for obvious errors</fsummary> - <type> - <v>App = term()</v> - <d>The name of the application to test</d> - </type> - <desc> - <p>Checks an applications .appup file for obvious errors. - The following is checked: - </p> - <list type="bulleted"> - <item>syntax - </item> - <item>that .app file version and .appup file version match - </item> - <item>for non-library applications: validity of high-level upgrade - instructions, specifying no instructions is explicitly allowed - (in this case the application is not upgradeable)</item> - <item>for library applications: that there is exactly one wildcard - regexp clause restarting the application when upgrading or - downgrading from any version</item> - </list> - </desc> - </func> - <func> - <name>comment(Comment) -> ok</name> - <fsummary>Print a comment on the HTML result page</fsummary> - <type> - <v>Comment = string()</v> - </type> - <desc> - <p>The given String will occur in the comment field of the - table on the HTML result page. If called several times, only - the last comment is printed. comment/1 is also overwritten by - the return value {comment,Comment} from a test case or by - fail/1 (which prints Reason as a comment).</p> - </desc> - </func> - </funcs> - - <section> - <title>TEST SUITE EXPORTS</title> - <p>The following functions must be exported from a test suite - module. - </p> - </section> - <funcs> - <func> - <name>all(suite) -> TestSpec | {skip, Comment}</name> - <fsummary>Returns the module's test specification</fsummary> - <type> - <v>TestSpec = list()</v> - <v>Comment = string()</v> - <d>This comment will be printed on the HTML result page</d> - </type> - <desc> - <p>This function must return the test specification for the - test suite module. The syntax of a test specification is - described in the Test Server User's Guide.</p> - </desc> - </func> - <func> - <name>init_per_suite(Config0) -> Config1 | {skip, Comment}</name> - <fsummary>Test suite initiation</fsummary> - <type> - <v>Config0 = Config1 = [tuple()]</v> - <v>Comment = string()</v> - <d>Describes why the suite is skipped</d> - </type> - <desc> - <p>This function is called before all other test cases in the - suite. <c>Config</c> is the configuration which can be modified - here. Whatever is returned from this function is given as - <c>Config</c> to the test cases. - </p> - <p>If this function fails, all test cases in the suite will be - skipped.</p> - </desc> - </func> - <func> - <name>end_per_suite(Config) -> void()</name> - <fsummary>Test suite finalization</fsummary> - <type> - <v>Config = [tuple()]</v> - </type> - <desc> - <p>This function is called after the last test case in the - suite, and can be used to clean up whatever the test cases - have done. The return value is ignored.</p> - </desc> - </func> - <func> - <name>init_per_testcase(Case, Config0) -> Config1 | {skip, Comment}</name> - <fsummary>Test case initiation</fsummary> - <type> - <v>Case = atom()</v> - <v>Config0 = Config1 = [tuple()]</v> - <v>Comment = string()</v> - <d>Describes why the test case is skipped</d> - </type> - <desc> - <p>This function is called before each test case. The - <c>Case</c> argument is the name of the test case, and - <c>Config</c> is the configuration which can be modified - here. Whatever is returned from this function is given as - <c>Config</c> to the test case.</p> - </desc> - </func> - <func> - <name>end_per_testcase(Case, Config) -> void()</name> - <fsummary>Test case finalization</fsummary> - <type> - <v>Case = atom()</v> - <v>Config = [tuple()]</v> - </type> - <desc> - <p>This function is called after each test case, and can be - used to clean up whatever the test case has done. The return - value is ignored.</p> - </desc> - </func> - <func> - <name>Case(doc) -> [Decription]</name> - <name>Case(suite) -> [] | TestSpec | {skip, Comment}</name> - <name>Case(Config) -> {skip, Comment} | {comment, Comment} | Ok</name> - <fsummary>A test case</fsummary> - <type> - <v>Description = string()</v> - <d>Short description of the test case</d> - <v>TestSpec = list()</v> - <v>Comment = string()</v> - <d>This comment will be printed on the HTML result page</d> - <v>Ok = term()</v> - <v>Config = [tuple()]</v> - <d>Elements from the Config parameter can be read with the ?config macro, see section about test suite support macros</d> - </type> - <desc> - <p>The <em>documentation clause</em> (argument <c>doc</c>) can - be used for automatic generation of test documentation or test - descriptions. - </p> - <p>The <em>specification clause</em> (argument <c>spec</c>) - shall return an empty list, the test specification for the - test case or <c>{skip,Comment}</c>. The syntax of a test - specification is described in the Test Server User's Guide. - </p> - <p>The <em>execution clause</em> (argument <c>Config</c>) is - only called if the specification clause returns an empty list. - The execution clause is the real test case. Here you must call - the functions you want to test, and do whatever you need to - check the result. If something fails, make sure the process - crashes or call <c>test_server:fail/0/1</c> (which also will - cause the process to crash). - </p> - <p>You can return <c>{skip,Comment}</c> if you decide not to - run the test case after all, e.g. if it is not applicable on - this platform. - </p> - <p>You can return <c>{comment,Comment}</c> if you wish to - print some information in the 'Comment' field on the HTML - result page. - </p> - <p>If the execution clause returns anything else, it is - considered a success, unless it is <c>{'EXIT',Reason}</c> or - <c>{'EXIT',Pid,Reason}</c> which can't be distinguished from a - crash, and thus will be considered a failure. - </p> - <p>A <em>conf test case</em> is a group of test cases with an - init and a cleanup function. The init and cleanup functions - are also test cases, but they have special rules:</p> - <list type="bulleted"> - <item>They do not need a specification clause.</item> - <item>They must always have the execution clause.</item> - <item>They must return the <c>Config</c> parameter, a modified - version of it or <c>{skip,Comment}</c> from the execution clause.</item> - <item>The cleanup function may also return a tuple - <c>{return_group_result,Status}</c>, which is used to return the - status of the conf case to Test Server and/or to a conf case on a - higher level. (<c>Status = ok | skipped | failed</c>).</item> - <item><c>init_per_testcase</c> and <c>end_per_testcase</c> are - not called before and after these functions.</item> - </list> - </desc> - </func> - </funcs> - - - <section> - <title>TEST SUITE SUPPORT MACROS</title> - <p>There are some macros defined in the <c>test_server.hrl</c> - that are quite useful for test suite programmers: - </p> - <p>The <em>config</em> macro, is used to - retrieve information from the <c>Config</c> variable sent to all - test cases. It is used with two arguments, where the first is the - name of the configuration variable you wish to retrieve, and the - second is the <c>Config</c> variable supplied to the test case - from the test server. - </p> - <p>Possible configuration variables include:</p> - <list type="bulleted"> - <item><c>data_dir</c> - Data file directory.</item> - <item><c>priv_dir</c> - Scratch file directory.</item> - <item><c>nodes</c> - Nodes specified in the spec file</item> - <item><c>nodenames</c> - Generated nodenames.</item> - <item>Whatever added by conf test cases or - <c>init_per_testcase/2</c></item> - </list> - <p>Examples of the <c>config</c> macro can be seen in the Examples chapter - in the user's guide.</p> - <p>The <em>line</em> and <em>line_trace</em> macros are deprecated, see - below.</p> - </section> - - <section> - <title>TEST SUITE LINE NUMBERS</title> - <p>In the past, ERTS did not produce line numbers when generating - stacktraces, test_server was thus unable to provide them when reporting - test failures. It had instead two different mecanisms to do it: either by - using the <c>line</c> macro or by using the <c>test_server_line</c> parse - transform. Both are deprecated and should not be used in new tests - anymore.</p> - </section> -</erlref> - diff --git a/lib/test_server/doc/src/test_server_app.xml b/lib/test_server/doc/src/test_server_app.xml deleted file mode 100644 index 4830916561..0000000000 --- a/lib/test_server/doc/src/test_server_app.xml +++ /dev/null @@ -1,75 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE appref SYSTEM "appref.dtd"> - -<appref> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Server Application</title> - <prepared>Siri Hansen</prepared> - <responsible>Peter Andersson</responsible> - <docno></docno> - <approved></approved> - <checked></checked> - <date>2002-07-12</date> - <rev>PA1</rev> - <file>test_server_app.xml</file> - </header> - <app>test_server</app> - <appsummary>Test Server for manual or automatic testing of Erlang code</appsummary> - <description> - <p><em>Test Server</em> is a portable test server for - automated application testing. The server can run test suites - and log progress and results to HTML - pages. The main purpose of Test Server is to act as engine - inside customized test tools. A callback interface for - such framework applications is provided.</p> - <p>In brief the test server supports:</p> - <list type="bulleted"> - <item>Running multiple, concurrent test suites</item> - <item>Test suites may contain other test suites, in a tree fashion</item> - <item>Logging of the events in a test suite, on both suite and case levels</item> - <item>HTML presentation of test suite results</item> - <item>HTML presentation of test suite code</item> - <item>Support for test suite authors, e.g. start/stop slave nodes</item> - <item>Call trace on target and slave nodes</item> - </list> - <p>For information about how to write test cases and test suites, - please see the Test Server User's Guide and the reference - manual for the <c>test_server</c> module. - </p> - <p><em>Common Test</em> is an existing test tool application based on the - OTP Test Server. Please read the Common Test User's Guide for more information. - </p> - </description> - - <section> - <title>Configuration</title> - <p>There are currently no configuration parameters available for - this application. - </p> - </section> - - <section> - <title>SEE ALSO</title> - <p></p> - </section> -</appref> - diff --git a/lib/test_server/doc/src/test_server_ctrl.xml b/lib/test_server/doc/src/test_server_ctrl.xml deleted file mode 100644 index 2762997ece..0000000000 --- a/lib/test_server/doc/src/test_server_ctrl.xml +++ /dev/null @@ -1,844 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE erlref SYSTEM "erlref.dtd"> - -<erlref> - <header> - <copyright> - <year>2007</year> - <year>2013</year> - <holder>Ericsson AB, All Rights Reserved</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - The Initial Developer of the Original Code is Ericsson AB. - </legalnotice> - - <title>The Test Server Controller</title> - <prepared>Siri Hansen, Peter Andersson</prepared> - <responsible></responsible> - <docno></docno> - <approved></approved> - <checked></checked> - <date></date> - <rev></rev> - <file>test_server_ctrl_ref.sgml</file> - </header> - <module>test_server_ctrl</module> - <modulesummary>This module provides a low level interface to the Test Server.</modulesummary> - <description> - <p>The <c>test_server_ctrl</c> module provides a low level - interface to the Test Server. This interface is normally - not used directly by the tester, but through a framework built - on top of <c>test_server_ctrl</c>. - </p> - <p>Common Test is such a framework, well suited for automated - black box testing of target systems of any kind (not necessarily - implemented in Erlang). Common Test is also a very useful tool for - white box testing Erlang programs and OTP applications. - Please see the Common Test User's Guide and reference manual for - more information. - </p> - <p>If you want to write your own framework, some more information - can be found in the chapter "Writing your own test server - framework" in the Test Server User's Guide. Details about the - interface provided by <c>test_server_ctrl</c> follows below. - </p> - </description> - <funcs> - <func> - <name>start() -> Result</name> - <fsummary>Starts the test server.</fsummary> - <type> - <v>Result = ok | {error, {already_started, pid()}</v> - </type> - <desc> - <p>This function starts the test server.</p> - </desc> - </func> - <func> - <name>stop() -> ok</name> - <fsummary>Stops the test server immediately.</fsummary> - <desc> - <p>This stops the test server and - all its activity. The running test suite (if any) will be - halted.</p> - </desc> - </func> - <func> - <name>add_dir(Name, Dir) -> ok</name> - <name>add_dir(Name, Dir, Pattern) -> ok</name> - <name>add_dir(Name, [Dir|Dirs]) -> ok</name> - <name>add_dir(Name, [Dir|Dirs], Pattern) -> ok</name> - <fsummary>Add a directory to the job queue.</fsummary> - <type> - <v>Name = term()</v> - <d>The jobname for this directory.</d> - <v>Dir = term()</v> - <d>The directory to scan for test suites.</d> - <v>Dirs = [term()]</v> - <d>List of directories to scan for test suites.</d> - <v>Pattern = term()</v> - <d>Suite match pattern. Directories will be scanned for Pattern_SUITE.erl files.</d> - </type> - <desc> - <p>Puts a collection of suites matching (*_SUITE) in given - directories into the job queue. <c>Name</c> is an arbitrary - name for the job, it can be any erlang term. If <c>Pattern</c> - is given, only modules matching <c>Pattern*</c> will be added.</p> - </desc> - </func> - <func> - <name>add_module(Mod) -> ok</name> - <name>add_module(Name, [Mod|Mods]) -> ok</name> - <fsummary>Add a module to the job queue with or without a given name.</fsummary> - <type> - <v>Mod = atom()</v> - <v>Mods = [atom()]</v> - <d>The name(s) of the module(s) to add.</d> - <v>Name = term()</v> - <d>Name for the job.</d> - </type> - <desc> - <p>This function adds a module or a list of modules, to the - test servers job queue. <c>Name</c> may be any Erlang - term. When <c>Name</c> is not given, the job gets the name of - the module.</p> - </desc> - </func> - <func> - <name>add_case(Mod, Case) -> ok</name> - <fsummary>Adds one test case to the job queue.</fsummary> - <type> - <v>Mod = atom()</v> - <d>Name of the module the test case is in.</d> - <v>Case = atom() </v> - <d>Function name of the test case to add.</d> - </type> - <desc> - <p>This function will add one test case to the job queue. The - job will be given the module's name.</p> - </desc> - </func> - <func> - <name>add_case(Name, Mod, Case) -> ok</name> - <fsummary>Equivalent to add_case/2, but with specified name.</fsummary> - <type> - <v>Name = string()</v> - <d>Name to use for the test job.</d> - </type> - <desc> - <p>Equivalent to <c>add_case/2</c>, but the test job will get - the specified name.</p> - </desc> - </func> - <func> - <name>add_cases(Mod, Cases) -> ok</name> - <fsummary>Adds a list of test cases to the job queue.</fsummary> - <type> - <v>Mod = atom()</v> - <d>Name of the module the test case is in.</d> - <v>Cases = [Case] </v> - <v>Case = atom() </v> - <d>Function names of the test cases to add.</d> - </type> - <desc> - <p>This function will add one or more test cases to the job - queue. The job will be given the module's name.</p> - </desc> - </func> - <func> - <name>add_cases(Name, Mod, Cases) -> ok</name> - <fsummary>Equivalent to add_cases/2, but with specified name.</fsummary> - <type> - <v>Name = string()</v> - <d>Name to use for the test job.</d> - </type> - <desc> - <p>Equivalent to <c>add_cases/2</c>, but the test job will get - the specified name.</p> - </desc> - </func> - <func> - <name>add_spec(TestSpecFile) -> ok | {error, nofile}</name> - <fsummary>Adds a test specification file to the job queue.</fsummary> - <type> - <v>TestSpecFile = string()</v> - <d>Name of the test specification file</d> - </type> - <desc> - <p>This function will add the content of the given test - specification file to the job queue. The job will be given the - name of the test specification file, e.g. if the file is - called <c>test.spec</c>, the job will be called <c>test</c>. - </p> - <p>See the reference manual for the test server application - for details about the test specification file.</p> - </desc> - </func> - <func> - <name>add_dir_with_skip(Name, [Dir|Dirs], Skip) -> ok</name> - <name>add_dir_with_skip(Name, [Dir|Dirs], Pattern, Skip) -> ok</name> - <name>add_module_with_skip(Mod, Skip) -> ok</name> - <name>add_module_with_skip(Name, [Mod|Mods], Skip) -> ok</name> - <name>add_case_with_skip(Mod, Case, Skip) -> ok</name> - <name>add_case_with_skip(Name, Mod, Case, Skip) -> ok</name> - <name>add_cases_with_skip(Mod, Cases, Skip) -> ok</name> - <name>add_cases_with_skip(Name, Mod, Cases, Skip) -> ok</name> - <fsummary>Same purpose as functions listed above, but with extra Skip argument.</fsummary> - <type> - <v>Skip = [SkipItem]</v> - <d>List of items to be skipped from the test.</d> - <v>SkipItem = {Mod,Comment} | {Mod,Case,Comment} | {Mod,Cases,Comment}</v> - <v>Mod = atom()</v> - <d>Test suite name.</d> - <v>Comment = string()</v> - <d>Reason why suite or case is being skipped.</d> - <v>Cases = [Case]</v> - <v>Case = atom()</v> - <d>Name of test case function.</d> - </type> - <desc> - <p>These functions add test jobs just like the add_dir, add_module, - add_case and add_cases functions above, but carry an additional - argument, Skip. Skip is a list of items that should be skipped - in the current test run. Test job items that occur in the Skip - list will be logged as SKIPPED with the associated Comment.</p> - </desc> - </func> - <func> - <name>add_tests_with_skip(Name, Tests, Skip) -> ok</name> - <fsummary>Adds different types of jobs to the run queue.</fsummary> - <type> - <v>Name = term()</v> - <d>The jobname for this directory.</d> - <v>Tests = [TestItem]</v> - <d>List of jobs to add to the run queue.</d> - <v>TestItem = {Dir,all,all} | {Dir,Mods,all} | {Dir,Mod,Cases}</v> - <v>Dir = term()</v> - <d>The directory to scan for test suites.</d> - <v>Mods = [Mod]</v> - <v>Mod = atom()</v> - <d>Test suite name.</d> - <v>Cases = [Case]</v> - <v>Case = atom()</v> - <d>Name of test case function.</d> - <v>Skip = [SkipItem]</v> - <d>List of items to be skipped from the test.</d> - <v>SkipItem = {Mod,Comment} | {Mod,Case,Comment} | {Mod,Cases,Comment}</v> - <v>Comment = string()</v> - <d>Reason why suite or case is being skipped.</d> - </type> - <desc> - <p>This function adds various test jobs to the test_server_ctrl - job queue. These jobs can be of different type (all or specific suites - in one directory, all or specific cases in one suite, etc). It is also - possible to get particular items skipped by passing them along in the - Skip list (see the add_*_with_skip functions above).</p> - </desc> - </func> - <func> - <name>abort_current_testcase(Reason) -> ok | {error,no_testcase_running}</name> - <fsummary>Aborts the test case currently executing.</fsummary> - <type> - <v>Reason = term()</v> - <d>The reason for stopping the test case, which will be printed in the log.</d> - </type> - <desc> - <p>When calling this function, the currently executing test case will be aborted. - It is the user's responsibility to know for sure which test case is currently - executing. The function is therefore only safe to call from a function which - has been called (or synchronously invoked) by the test case.</p> - </desc> - </func> - <func> - <name>set_levels(Console, Major, Minor) -> ok</name> - <fsummary>Sets the levels of I/O.</fsummary> - <type> - <v>Console = integer()</v> - <d>Level for I/O to be sent to console.</d> - <v>Major = integer()</v> - <d>Level for I/O to be sent to the major logfile.</d> - <v>Minor = integer()</v> - <d>Level for I/O to be sent to the minor logfile.</d> - </type> - <desc> - <p>Determines where I/O from test suites/test server will - go. All text output from test suites and the test server is - tagged with a priority value which ranges from 0 to 100, 100 - being the most detailed. (see the section about log files in - the user's guide). Output from the test cases (using - <c>io:format/2</c>) has a detail level of 50. Depending on the - levels set by this function, this I/O may be sent to the - console, the major log file (for the whole test suite) or to - the minor logfile (separate for each test case). - </p> - <p>All output with detail level:</p> - <list type="bulleted"> - <item>Less than or equal to <c>Console</c> is displayed on - the screen (default 1) - </item> - <item>Less than or equal to <c>Major</c> is logged in the - major log file (default 19) - </item> - <item>Greater than or equal to <c>Minor</c> is logged in the - minor log files (default 10) - </item> - </list> - <p>To view the currently set thresholds, use the - <c>get_levels/0</c> function.</p> - </desc> - </func> - <func> - <name>get_levels() -> {Console, Major, Minor}</name> - <fsummary>Returns the current levels.</fsummary> - <desc> - <p>Returns the current levels. See <c>set_levels/3</c> for - types.</p> - </desc> - </func> - <func> - <name>jobs() -> JobQueue</name> - <fsummary>Returns the job queue.</fsummary> - <type> - <v>JobQueue = [{list(), pid()}]</v> - </type> - <desc> - <p>This function will return all the jobs currently in the job - queue.</p> - </desc> - </func> - <func> - <name>multiply_timetraps(N) -> ok</name> - <fsummary>All timetraps started after this will be multiplied by N.</fsummary> - <type> - <v>N = integer() | infinity</v> - </type> - <desc> - <p>This function should be called before a test is started - which requires extended timetraps, e.g. if extensive tracing - is used. All timetraps started after this call will be - multiplied by <c>N</c>.</p> - </desc> - </func> - <func> - <name>scale_timetraps(Bool) -> ok</name> - <fsummary>.</fsummary> - <type> - <v>Bool = true | false</v> - </type> - <desc> - <p>This function should be called before a test is started. - The parameter specifies if test_server should attempt - to automatically scale the timetrap value in order to compensate - for delays caused by e.g. the cover tool.</p> - </desc> - </func> - <func> - <name>get_timetrap_parameters() -> {N,Bool} </name> - <fsummary>Read the parameter values that affect timetraps.</fsummary> - <type> - <v>N = integer() | infinity</v> - <v>Bool = true | false</v> - </type> - <desc> - <p>This function may be called to read the values set by - <c>multiply_timetraps/1</c> and <c>scale_timetraps/1</c>.</p> - </desc> - </func> - <func> - <name>cover(Application,Analyse) -> ok</name> - <name>cover(CoverFile,Analyse) -> ok</name> - <name>cover(App,CoverFile,Analyse) -> ok</name> - <fsummary>Informs the test_server controller that next test shall run with code coverage analysis.</fsummary> - <type> - <v>Application = atom()</v> - <d>OTP application to cover compile</d> - <v>CoverFile = string()</v> - <d>Name of file listing modules to exclude from or include in cover compilation. The filename must include full path to the file.</d> - <v>Analyse = details | overview</v> - </type> - <desc> - <p>This function informs the test_server controller that next - test shall run with code coverage analysis. All timetraps will - automatically be multiplied by 10 when cover i run. - </p> - <p><c>Application</c> and <c>CoverFile</c> indicates what to - cover compile. If <c>Application</c> is given, the default is - that all modules in the <c>ebin</c> directory of the - application will be cover compiled. The <c>ebin</c> directory - is found by adding <c>ebin</c> to - <c>code:lib_dir(Application)</c>. - </p> - <p>A <c>CoverFile</c> can have the following entries:</p> - <code type="none"> -{exclude, all | ExcludeModuleList}. -{include, IncludeModuleList}. -{cross, CrossCoverInfo}.</code> - <p>Note that each line must end with a full - stop. <c>ExcludeModuleList</c> and <c>IncludeModuleList</c> - are lists of atoms, where each atom is a module name. - </p> - - <p><c>CrossCoverInfo</c> is used when collecting cover data - over multiple tests. Modules listed here are compiled, but - they will not be analysed when the test is finished. See - <seealso - marker="#cross_cover_analyse-2">cross_cover_analyse/2</seealso> - for more information about the cross cover mechanism and the - format of <c>CrossCoverInfo</c>. - </p> - <p>If both an <c>Application</c> and a <c>CoverFile</c> is - given, all modules in the application are cover compiled, - except for the modules listed in <c>ExcludeModuleList</c>. The - modules in <c>IncludeModuleList</c> are also cover compiled. - </p> - <p>If a <c>CoverFile</c> is given, but no <c>Application</c>, - only the modules in <c>IncludeModuleList</c> are cover - compiled. - </p> - <p><c>Analyse</c> indicates the detail level of the cover - analysis. If <c>Analyse = details</c>, each cover compiled - module will be analysed with - <c>cover:analyse_to_file/1</c>. If <c>Analyse = overview</c> - an overview of all cover compiled modules is created, listing - the number of covered and not covered lines for each module. - </p> - <p>If the test following this call starts any slave or peer - nodes with <c>test_server:start_node/3</c>, the same cover - compiled code will be loaded on all nodes. If the loading - fails, e.g. if the node runs an old version of OTP, the node - will simply not be a part of the coverage analysis. Note that - slave or peer nodes must be stopped with - <c>test_server:stop_node/1</c> for the node to be part of the - coverage analysis, else the test server will not be able to - fetch coverage data from the node. - </p> - <p>When the test is finished, the coverage analysis is - automatically completed, logs are created and the cover - compiled modules are unloaded. If another test is to be run - with coverage analysis, <c>test_server_ctrl:cover/2/3</c> must - be called again. - </p> - </desc> - </func> - <func> - <name>cross_cover_analyse(Level, Tests) -> ok</name> - <fsummary>Analyse cover data collected from multiple tests</fsummary> - <type> - <v>Level = details | overview</v> - <v>Tests = [{Tag,LogDir}]</v> - <v>Tag = atom()</v> - <d>Test identifier.</d> - <v>LogDir = string()</v> - <d>Log directory for the test identified by <c>Tag</c>. This - can either be the <c>run.<timestamp></c> directory or - the parent directory of this (in which case the latest - <c>run.<timestamp></c> directory is chosen.</d> - </type> - <desc> - <p>Analyse cover data collected from multiple tests. The modules - analysed are the ones listed in <c>cross</c> statements in - the cover files. These are modules that are heavily used by - other tests than the one where they belong or are explicitly - tested. They should then be listed as cross modules in the - cover file for the test where they are used but do not - belong. Se example below.</p> - <p>This function should be run after all tests are completed, - and the result will be stored in a file called - <c>cross_cover.html</c> in the <c>run.<timestamp></c> - directory of the test the modules belong to.</p> - <p>Note that the function can be executed on any node, and it - does not require <c>test_server_ctrl</c> to be started first.</p> - <p>The <c>cross</c> statement in the cover file must be like this:</p> - <code type="none"> -{cross,[{Tag,Modules}]}.</code> - <p>where <c>Tag</c> is the same as <c>Tag</c> in the - <c>Tests</c> parameter to this function and <c>Modules</c> is a - list of module names (atoms).</p> - <p><em>Example:</em></p> - <p>If the module <c>m1</c> belongs to system <c>s1</c> but is - heavily used also in the tests for another system <c>s2</c>, - then the cover files for the two systems' tests could be like - this:</p> -<code type="none"> -s1.cover: - {include,[m1]}. - -s2.cover: - {include,[....]}. % modules belonging to system s2 - {cross,[{s1,[m1]}]}.</code> - <p>When the tests for both <c>s1</c> and <c>s2</c> are completed, run</p> -<code type="none"> -test_server_ctrl:cross_cover_analyse(Level,[{s1,S1LogDir},{s2,S2LogDir}]) -</code> - - <p>and the accumulated cover data for <c>m1</c> will be written to - <c>S1LogDir/[run.<timestamp>/]cross_cover.html</c>.</p> - <p>Note that the <c>m1</c> module will also be presented in the - normal coverage log for <c>s1</c> (due to the include statement in - <c>s1.cover</c>), but that only includes the coverage achieved by the - <c>s1</c> test itself.</p> - <p>The Tag in the <c>cross</c> statement in the cover file has - no other purpose than mapping the list of modules - (<c>[m1]</c> in the example above) to the correct log - directory where it should be included in the - <c>cross_cover.html</c> file (<c>S1LogDir</c> in the example - above). I.e. the value of <c>Tag</c> has no meaning, it - could be <c>foo</c> as well as <c>s1</c> above, as long as - the same <c>Tag</c> is used in the cover file and in the - call to this function.</p> - </desc> - </func> - <func> - <name>trc(TraceInfoFile) -> ok | {error, Reason}</name> - <fsummary>Starts call trace on target and slave nodes</fsummary> - <type> - <v>TraceInfoFile = atom() | string()</v> - <d>Name of a file defining which functions to trace and how</d> - </type> - <desc> - <p>This function starts call trace on target and on slave or - peer nodes that are started or will be started by the test - suites. - </p> - <p>Timetraps are not extended automatically when tracing is - used. Use <c>multiply_timetraps/1</c> if necessary. - </p> - <p>Note that the trace support in the test server is in a very - early stage of the implementation, and thus not yet as - powerful as one might wish for. - </p> - <p>The trace information file specified by the - <c>TraceInfoFile</c> argument is a text file containing one or - more of the following elements: - </p> - <list type="bulleted"> - <item><c>{SetTP,Module,Pattern}.</c></item> - <item><c>{SetTP,Module,Function,Pattern}.</c></item> - <item><c>{SetTP,Module,Function,Arity,Pattern}.</c></item> - <item><c>ClearTP.</c></item> - <item><c>{ClearTP,Module}.</c></item> - <item><c>{ClearTP,Module,Function}.</c></item> - <item><c>{ClearTP,Module,Function,Arity}.</c></item> - </list> - <taglist> - <tag><c>SetTP = tp | tpl</c></tag> - <item>This is maps to the corresponding functions in the - <c>ttb</c> module in the <c>observer</c> - application. <c>tp</c> means set trace pattern on global - function calls. <c>tpl</c> means set trace pattern on local - and global function calls. - </item> - <tag><c>ClearTP = ctp | ctpl | ctpg</c></tag> - <item>This is maps to the corresponding functions in the - <c>ttb</c> module in the <c>observer</c> - application. <c>ctp</c> means clear trace pattern (i.e. turn - off) on global and local function calls. <c>ctpl</c> means - clear trace pattern on local function calls only and <c>ctpg</c> - means clear trace pattern on global function calls only. - </item> - <tag><c>Module = atom()</c></tag> - <item>The module to trace - </item> - <tag><c>Function = atom()</c></tag> - <item>The name of the function to trace - </item> - <tag><c>Arity = integer()</c></tag> - <item>The arity of the function to trace - </item> - <tag><c>Pattern = [] | match_spec()</c></tag> - <item>The trace pattern to set for the module or - function. For a description of the match_spec() syntax, - please turn to the User's guide for the runtime system - (erts). The chapter "Match Specification in Erlang" explains - the general match specification language. - </item> - </taglist> - <p>The trace result will be logged in a (binary) file called - <c>NodeName-test_server</c> in the current directory of the - test server controller node. The log must be formatted using - <c>ttb:format/1/2</c>. - </p> - </desc> - </func> - <func> - <name>stop_trace() -> ok | {error, not_tracing}</name> - <fsummary>Stops tracing on target and slave nodes.</fsummary> - <desc> - <p>This function stops tracing on target, and on slave or peer - nodes that are currently running. New slave or peer nodes will - no longer be traced after this.</p> - </desc> - </func> - </funcs> - - <section> - <title>FUNCTIONS INVOKED FROM COMMAND LINE</title> - <p>The following functions are supposed to be invoked from the - command line using the <c>-s</c> option when starting the erlang - node.</p> - </section> - <funcs> - <func> - <name>run_test(CommandLine) -> ok</name> - <fsummary>Runs the tests specified on the command line.</fsummary> - <type> - <v>CommandLine = FlagList</v> - </type> - <desc> - <p>This function is supposed to be invoked from the - commandline. It starts the test server, interprets the - argument supplied from the commandline, runs the tests - specified and when all tests are done, stops the test server - and returns to the Erlang prompt. - </p> - <p>The <c>CommandLine</c> argument is a list of command line - flags, typically <c>['KEY1', Value1, 'KEY2', Value2, ...]</c>. - The valid command line flags are listed below. - </p> - <p>Under a UNIX command prompt, this function can be invoked like this: - <br></br> -<c>erl -noshell -s test_server_ctrl run_test KEY1 Value1 KEY2 Value2 ... -s erlang halt</c></p> - <p>Or make an alias (this is for unix/tcsh) <br></br> -<c>alias erl_test 'erl -noshell -s test_server_ctrl run_test \!* -s erlang halt'</c></p> - <p>And then use it like this <br></br> -<c>erl_test KEY1 Value1 KEY2 Value2 ...</c> <br></br> -</p> - <p>The valid command line flags are</p> - <taglist> - <tag><c>DIR dir</c></tag> - <item>Adds all test modules in the directory <c>dir</c> to - the job queue. - </item> - <tag><c>MODULE mod</c></tag> - <item>Adds the module <c>mod</c> to the job queue. - </item> - <tag><c>CASE mod case</c></tag> - <item>Adds the case <c>case</c> in module <c>mod</c> to the - job queue. - </item> - <tag><c>SPEC spec</c></tag> - <item>Runs the test specification file <c>spec</c>. - </item> - <tag><c>SKIPMOD mod</c></tag> - <item>Skips all test cases in the module <c>mod</c></item> - <tag><c>SKIPCASE mod case</c></tag> - <item>Skips the test case <c>case</c> in module <c>mod</c>. - </item> - <tag><c>NAME name</c></tag> - <item>Names the test suite to something else than the - default name. This does not apply to <c>SPEC</c> which keeps - its names. - </item> - <tag><c>COVER app cover_file analyse</c></tag> - <item>Indicates that the test should be run with cover - analysis. <c>app</c>, <c>cover_file</c> and <c>analyse</c> - corresponds to the parameters to - <c>test_server_ctrl:cover/3</c>. If no cover file is used, - the atom <c>none</c> should be given. - </item> - <tag><c>TRACE traceinfofile</c></tag> - <item>Specifies a trace information file. When this option - is given, call tracing is started on the target node and all - slave or peer nodes that are started. The trace information - file specifies which modules and functions to trace. See the - function <c>trc/1</c> above for more information about the - syntax of this file. - </item> - </taglist> - </desc> - </func> - </funcs> - - <section> - <title>FRAMEWORK CALLBACK FUNCTIONS</title> - <p>A test server framework can be defined by setting the - environment variable <c>TEST_SERVER_FRAMEWORK</c> to a module - name. This module will then be framework callback module, and it - must export the following function:</p> - </section> - <funcs> - <func> - <name>get_suite(Mod,Func) -> TestCaseList</name> - <fsummary>Get subcases.</fsummary> - <type> - <v>Mod = atom()</v> - <d>Test suite name.</d> - <v>Func = atom()</v> - <d>Name of test case.</d> - <v>TestCaseList = [SubCase]</v> - <d>List of test cases.</d> - <v>SubCase = atom()</v> - <d>Name of a case.</d> - </type> - <desc> - <p>This function is called before a test case is started. The - purpose is to retrieve a list of subcases. The default - behaviour of this function should be to call - <c>Mod:Func(suite)</c> and return the result from this call.</p> - </desc> - </func> - <func> - <name>init_tc(Mod,Func,Args0) -> {ok,Args1} | {skip,ReasonToSkip} | {auto_skip,ReasonToSkip} | {fail,ReasonToFail}</name> - <fsummary>Preparation for a test case or configuration function.</fsummary> - <type> - <v>Mod = atom()</v> - <d>Test suite name.</d> - <v>Func = atom()</v> - <d>Name of test case or configuration function.</d> - <v>Args0 = Args1 = [tuple()]</v> - <d>Normally Args = [Config]</d> - <v>ReasonToSkip = term()</v> - <d>Reason to skip the test case or configuration function.</d> - <v>ReasonToFail = term()</v> - <d>Reason to fail the test case or configuration function.</d> - </type> - <desc> - <p>This function is called before a test case or configuration - function starts. It is called on the process executing the function - <c>Mod:Func</c>. Typical use of this function can be to alter - the input parameters to the test case function (<c>Args</c>) or - to set properties for the executing process.</p> - <p>By returning <c>{skip,Reason}</c>, <c>Func</c> gets skipped. - <c>Func</c> also gets skipped if <c>{auto_skip,Reason}</c> is returned, - but then gets an auto skipped status (rather than user skipped).</p> - <p>To fail <c>Func</c> immediately instead of executing it, return - <c>{fail,ReasonToFail}.</c></p> - </desc> - </func> - <func> - <name>end_tc(Mod,Func,Status) -> ok | {fail,ReasonToFail}</name> - <fsummary>Cleanup after a test case or configuration function.</fsummary> - <type> - <v>Mod = atom()</v> - <d>Test suite name.</d> - <v>Func = atom()</v> - <d>Name of test case or configuration function.</d> - <v>Status = {Result,Args} | {TCPid,Result,Args}</v> - <d>The status of the test case or configuration function.</d> - <v>ReasonToFail = term()</v> - <d>Reason to fail the test case or configuration function.</d> - <v>Result = ok | Skip | Fail</v> - <d>The final result of the test case or configuration function.</d> - <v>TCPid = pid()</v> - <d>Pid of the process executing Func</d> - <v>Skip = {skip,SkipReason}</v> - <v>SkipReason = term() | {failed,{Mod,init_per_testcase,term()}}</v> - <d>Reason why the function was skipped.</d> - <v>Fail = {error,term()} | {'EXIT',term()} | {timetrap_timeout,integer()} | - {testcase_aborted,term()} | testcase_aborted_or_killed | - {failed,term()} | {failed,{Mod,end_per_testcase,term()}}</v> - <d>Reason why the function failed.</d> - <v>Args = [tuple()]</v> - <d>Normally Args = [Config]</d> - </type> - <desc> - <p>This function is called when a test case, or a configuration function, - is finished. It is normally called on the process where the function - <c>Mod:Func</c> has been executing, but if not, the pid of the test - case process is passed with the <c>Status</c> argument.</p> - <p>Typical use of the <c>end_tc/3</c> function can be to clean up - after <c>init_tc/3</c>.</p> - <p>If <c>Func</c> is a test case, it is possible to analyse the value of - <c>Result</c> to verify that <c>init_per_testcase/2</c> and - <c>end_per_testcase/2</c> executed successfully.</p> - <p>It is possible with <c>end_tc/3</c> to fail an otherwise successful - test case, by returning <c>{fail,ReasonToFail}</c>. The test case <c>Func</c> - will be logged as failed with the provided term as reason.</p> - </desc> - </func> - <func> - <name>report(What,Data) -> ok</name> - <fsummary>Progress report for test.</fsummary> - <type> - <v>What = atom()</v> - <v>Data = term()</v> - </type> - <desc> - <p>This function is called in order to keep the framework up-to-date with - the progress of the test. This is useful e.g. if the - framework implements a GUI where the progress information is - constantly updated. The following can be reported: - </p> - <p><c>What = tests_start, Data = {Name,NumCases}</c><br></br> - <c>What = loginfo, Data = [{topdir,TestRootDir},{rundir,CurrLogDir}]</c><br></br> - <c>What = tests_done, Data = {Ok,Failed,{UserSkipped,AutoSkipped}}</c><br></br> - <c>What = tc_start, Data = {{Mod,{Func,GroupName}},TCLogFile}</c><br></br> - <c>What = tc_done, Data = {Mod,{Func,GroupName},Result}</c><br></br> - <c>What = tc_user_skip, Data = {Mod,{Func,GroupName},Comment}</c><br></br> - <c>What = tc_auto_skip, Data = {Mod,{Func,GroupName},Comment}</c><br></br> - <c>What = framework_error, Data = {{FWMod,FWFunc},Error}</c></p> - <p>Note that for a test case function that doesn't belong to a group, - <c>GroupName</c> has value <c>undefined</c>, otherwise the name of the test - case group.</p> - </desc> - </func> - <func> - <name>error_notification(Mod, Func, Args, Error) -> ok</name> - <fsummary>Inform framework of crashing testcase or configuration function.</fsummary> - <type> - <v>Mod = atom()</v> - <d>Test suite name.</d> - <v>Func = atom()</v> - <d>Name of test case or configuration function.</d> - <v>Args = [tuple()]</v> - <d>Normally Args = [Config]</d> - <v>Error = {Reason,Location}</v> - <v>Reason = term()</v> - <d>Reason for termination.</d> - <v>Location = unknown | [{Mod,Func,Line}]</v> - <d>Last known position in Mod before termination.</d> - <v>Line = integer()</v> - <d>Line number in file Mod.erl.</d> - </type> - <desc> - <p>This function is called as the result of function <c>Mod:Func</c> failing - with Reason at Location. The function is intended mainly to aid - specific logging or error handling in the framework application. Note - that for Location to have relevant values (i.e. other than unknown), - the <c>line</c> macro or <c>test_server_line</c> parse transform must - be used. For details, please see the section about test suite line numbers - in the <c>test_server</c> reference manual page.</p> - </desc> - </func> - <func> - <name>warn(What) -> boolean()</name> - <fsummary>Ask framework if test server should issue a warning for What.</fsummary> - <type> - <v>What = processes | nodes</v> - </type> - <desc> - <p>The test server checks the number of processes and nodes - before and after the test is executed. This function is a - question to the framework if the test server should warn when - the number of processes or nodes has changed during the test - execution. If <c>true</c> is returned, a warning will be written - in the test case minor log file.</p> - </desc> - </func> - <func> - <name>target_info() -> InfoStr</name> - <fsummary>Print info about the target system to the test case log.</fsummary> - <type> - <v>InfoStr = string() | ""</v> - </type> - <desc> - <p>The test server will ask the framework for information about - the test target system and print InfoStr in the test case - log file below the host information.</p> - </desc> - </func> - </funcs> -</erlref> - diff --git a/lib/test_server/doc/src/test_spec_chapter.xml b/lib/test_server/doc/src/test_spec_chapter.xml deleted file mode 100644 index 0a62010364..0000000000 --- a/lib/test_server/doc/src/test_spec_chapter.xml +++ /dev/null @@ -1,375 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Test Structure and Test Specifications</title> - <prepared>Siri Hansen</prepared> - <docno></docno> - <date></date> - <rev></rev> - <file>test_spec_chapter.xml</file> - </header> - - <section> - <title>Test structure</title> - <p>A test consists of a set of test cases. Each test case is - implemented as an erlang function. An erlang module implementing - one or more test cases is called a test suite. - </p> - </section> - - <section> - <title>Test specifications</title> - <p>A test specification is a specification of which test suites - and test cases to run and which to skip. A test specification can - also group several test cases into conf cases with init and - cleanup functions (see section about configuration cases - below). In a test there can be test specifications on three - different levels: - </p> - <p>The top level is a test specification file which roughly - specifies what to test for a whole application. The test - specification in such a file is encapsulated in a topcase - command. - </p> - <p>Then there is a test specification for each test suite, - specifying which test cases to run within the suite. The test - specification for a test suite is returned from the - <c>all(suite)</c> function in the test suite module. - </p> - <p>And finally there can be a test specification per test case, - specifying sub test cases to run. The test specification for a - test case is returned from the specification clause of the test - case. - </p> - <p>When a test starts, the total test specification is built in a - tree fashion, starting from the top level test specification. - </p> - <p>The following are the valid elements of a test - specification. The specification can be one of these elements or a - list with any combination of the elements: - </p> - <taglist> - <tag><c>{Mod, Case}</c></tag> - <item>This specifies the test case Mod:Case/1 - </item> - <tag><c>{dir, Dir}</c></tag> - <item>This specifies all modules <c>*_SUITE</c> in the directory - <c>Dir</c></item> - <tag><c>{dir, Dir, Pattern}</c></tag> - <item>This specifies all modules <c>Pattern*</c> in the - directory <c>Dir</c></item> - <tag><c>{conf, Init, TestSpec, Fin}</c></tag> - <item>This is a configuration case. In a test specification - file, <c>Init</c> and <c>Fin</c> must be - <c>{Mod,Func}</c>. Inside a module they can also be just - <c>Func</c>. See the section named Configuration Cases below for - more information about this. - </item> - <tag><c>{conf, Properties, Init, TestSpec, Fin}</c></tag> - <item>This is a configuration case as explained above, but - which also takes a list of execution properties for its group - of test cases and nested sub-groups. - </item> - <tag><c>{make, Init, TestSpec, Fin}</c></tag> - <item>This is a special version of a conf case which is only - used by the test server framework <c>ts</c>. <c>Init</c> and - <c>Fin</c> are make and unmake functions for a data - directory. <c>TestSpec</c> is the test specification for the - test suite owning the data directory in question. If the make - function fails, all tests in the test suite are skipped. The - difference between this "make case" and a normal conf case is - that for the make case, <c>Init</c> and <c>Fin</c> are given with - arguments (<c>{Mod,Func,Args}</c>). - </item> - <tag><c>Case</c></tag> - <item>This can only be used inside a module, i.e. not a test - specification file. It specifies the test case - <c>CurrentModule:Case</c>. - </item> - </taglist> - </section> - - <section> - <title>Test Specification Files</title> - <p>A test specification file is a text file containing the top - level test specification (a topcase command), and possibly one or - more additional commands. A "command" in a test specification file - means a key-value tuple ended by a dot-newline sequence. - </p> - <p>The following commands are valid: - </p> - <taglist> - <tag><c>{topcase, TestSpec}</c></tag> - <item>This command is mandatory in all test specification - files. <c>TestSpec</c> is the top level test specification of a - test. - </item> - <tag><c>{skip, {Mod, Comment}}</c></tag> - <item>This specifies that all cases in the module <c>Mod</c> - shall be skipped. <c>Comment</c> is a string. - </item> - <tag><c>{skip, {Mod, Case, Comment}}</c></tag> - <item>This specifies that the case <c>Mod:Case</c> shall be - skipped. - </item> - <tag><c>{skip, {Mod, CaseList, Comment}}</c></tag> - <item>This specifies that all cases <c>Mod:Case</c>, where - <c>Case</c> is in <c>CaseList</c>, shall be skipped. - </item> - <tag><c>{nodes, Nodes}</c></tag> - <item><c>Nodes</c> is a list of nodenames available to the test - suite. It will be added to the <c>Config</c> argument to all - test cases. <c>Nodes</c> is a list of atoms. - </item> - <tag><c>{require_nodenames, Num}</c></tag> - <item>Specifies how many nodenames the test suite will - need. Theese will be automatically generated and inserted into the - <c>Config</c> argument to all test cases. <c>Num</c> is an - integer. - </item> - <tag><c>{hosts, Hosts}</c></tag> - <item>This is a list of available hosts on which to start slave - nodes. It is used when the <c>{remote, true}</c> option is given - to the <c>test_server:start_node/3</c> function. Also, if - <c>{require_nodenames, Num}</c> is contained in a test - specification file, the generated nodenames will be spread over - all hosts given in this <c>Hosts</c> list. The hostnames are - atoms or strings. - </item> - <tag><c>{diskless, true}</c></tag> - <item>Adds <c>{diskless, true}</c> to the <c>Config</c> argument - to all test cases. This is kept for backwards compatibility and - should not be used. Use a configuration case instead. - </item> - <tag><c>{ipv6_hosts, Hosts}</c></tag> - <item>Adds <c>{ipv6_hosts, Hosts}</c> to the <c>Config</c> - argument to all test cases.</item> - </taglist> - <p>All test specification files shall have the extension - ".spec". If special test specification files are needed for - Windows or VxWorks platforms, additional files with the - extension ".spec.win" and ".spec.vxworks" shall be - used. This is useful e.g. if some test cases shall be skipped on - these platforms. - </p> - <p>Some examples for test specification files can be found in the - Examples section of this user's guide. - </p> - </section> - - <section> - <title>Configuration cases</title> - <p>If a group of test cases need the same initialization, a so called - <em>configuration</em> or <em>conf</em> case can be used. A conf - case consists of an initialization function, the group of test cases - needing this initialization and a cleanup or finalization function. - </p> - <p>If the init function in a conf case fails or returns - <c>{skip,Comment}</c>, the rest of the test cases in the conf case - (including the cleanup function) are skipped. If the init function - succeeds, the cleanup function will always be called, even if some - of the test cases in between failed. - </p> - <p>Both the init function and the cleanup function in a conf case - get the <c>Config</c> parameter as only argument. This parameter - can be modified or returned as is. Whatever is returned by the - init function is given as <c>Config</c> parameter to the rest of - the test cases in the conf case, including the cleanup function. - </p> - <p>If the <c>Config</c> parameter is changed by the init function, - it must be restored by the cleanup function. Whatever is returned - by the cleanup function will be given to the next test case called. - </p> - <p>The optional <c>Properties</c> list can be used to specify - execution properties for the test cases and possibly nested - sub-groups of the configuration case. The available properties are:</p> - <pre> - Properties = [parallel | sequence | Shuffle | {RepeatType,N}] - Shuffle = shuffle | {shuffle,Seed} - Seed = {integer(),integer(),integer()} - RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail | - repeat_until_any_ok | repeat_until_any_fail - N = integer() | forever</pre> - - <p>If the <c>parallel</c> property is specified, Test Server will execute - all test cases in the group in parallel. If <c>sequence</c> is specified, - the cases will be executed in a sequence, meaning if one case fails, all - following cases will be skipped. If <c>shuffle</c> is specified, the cases - in the group will be executed in random order. The <c>repeat</c> property - orders Test Server to repeat execution of the cases in the group a given - number of times, or until any, or all, cases fail or succeed.</p> - - <p>Properties may be combined so that e.g. if <c>shuffle</c>, - <c>repeat_until_any_fail</c> and <c>sequence</c> are all specified, the test - cases in the group will be executed repeatedly and in random order until - a test case fails, when execution is immediately stopped and the rest of - the cases skipped.</p> - - <p>The properties for a conf case is always printed on the top of the HTML log - for the group's init function. Also, the total execution time for a conf case - can be found at the bottom of the log for the group's end function.</p> - - <p>Configuration cases may be nested so that sets of grouped cases can be - configured with the same init- and end functions.</p> - </section> - - <section> - <title>The parallel property and nested configuration cases</title> - <p>If a conf case has a parallel property, its test cases will be spawned - simultaneously and get executed in parallel. A test case is not allowed - to execute in parallel with the end function however, which means - that the time it takes to execute a set of parallel cases is equal to the - execution time of the slowest test case in the group. A negative side - effect of running test cases in parallel is that the HTML summary pages - are not updated with links to the individual test case logs until the - end function for the conf case has finished.</p> - - <p>A conf case nested under a parallel conf case will start executing in - parallel with previous (parallel) test cases (no matter what properties the - nested conf case has). Since, however, test cases are never executed in - parallel with the init- or the end function of the same conf case, it's - only after a nested group of cases has finished that any remaining parallel - cases in the previous conf case get spawned.</p> - </section> - - <section> - <title>Repeated execution of test cases</title> - <marker id="repeated_cases"></marker> - <p>A conf case may be repeated a certain number of times - (specified by an integer) or indefinitely (specified by <c>forever</c>). - The repetition may also be stopped prematurely if any or all cases - fail or succeed, i.e. if the property <c>repeat_until_any_fail</c>, - <c>repeat_until_any_ok</c>, <c>repeat_until_all_fail</c>, or - <c>repeat_until_all_ok</c> is used. If the basic <c>repeat</c> - property is used, status of test cases is irrelevant for the repeat - operation.</p> - - <p>It is possible to return the status of a conf case (ok or - failed), to affect the execution of the conf case on the level above. - This is accomplished by, in the end function, looking up the value - of <c>tc_group_properties</c> in the <c>Config</c> list and checking the - result of the finished test cases. If status <c>failed</c> should be - returned from the conf case as a result, the end function should return - the value <c>{return_group_result,failed}</c>. The status of a nested conf - case is taken into account by Test Server when deciding if execution - should be repeated or not (unless the basic <c>repeat</c> property is used).</p> - - <p>The <c>tc_group_properties</c> value is a list of status tuples, - each with the key <c>ok</c>, <c>skipped</c> and <c>failed</c>. The - value of a status tuple is a list containing names of test cases - that have been executed with the corresponding status as result.</p> - - <p>Here's an example of how to return the status from a conf case:</p> - <pre> - conf_end_function(Config) -> - Status = ?config(tc_group_result, Config), - case proplists:get_value(failed, Status) of - [] -> % no failed cases - {return_group_result,ok}; - _Failed -> % one or more failed - {return_group_result,failed} - end.</pre> - - <p>It is also possible in the end function to check the status of - a nested conf case (maybe to determine what status the current conf case should - return). This is as simple as illustrated in the example above, only the - name of the end function of the nested conf case is stored in a tuple - <c>{group_result,EndFunc}</c>, which can be searched for in the status lists. - Example:</p> - <pre> - conf_end_function_X(Config) -> - Status = ?config(tc_group_result, Config), - Failed = proplists:get_value(failed, Status), - case lists:member({group_result,conf_end_function_Y}, Failed) of - true -> - {return_group_result,failed}; - false -> - {return_group_result,ok} - end; - ...</pre> - - <note><p>When a conf case is repeated, the init- and end functions - are also always called with each repetition.</p></note> - </section> - - <section> - <title>Shuffled test case order</title> - <p>The order that test cases in a conf case are executed, is under normal - circumstances the same as the order defined in the test specification. - With the <c>shuffle</c> property set, however, Test Server will instead - execute the test cases in random order.</p> - - <p>The user may provide a seed value (a tuple of three integers) with - the shuffle property: <c>{shuffle,Seed}</c>. This way, the same shuffling - order can be created every time the conf case is executed. If no seed value - is given, Test Server creates a "random" seed for the shuffling operation - (using the return value of <c>erlang:now()</c>). The seed value is always - printed to the log file of the init function so that it can be used to - recreate the same execution order in subsequent test runs.</p> - - <note><p>If execution of a conf case with shuffled test cases is repeated, - the seed will not be reset in between turns.</p></note> - - <p>If a nested conf case is specified in a conf case with a <c>shuffle</c> - property, the execution order of the nested cases in relation to the test cases - (and other conf cases) is also random. The order of the test cases in the nested - conf case is however not random (unless, of course, this one also has a - <c>shuffle</c> property).</p> - </section> - - <section> - <title>Skipping test cases</title> - <p>It is possible to skip certain test cases, for example if you - know beforehand that a specific test case fails. This might be - functionality which isn't yet implemented, a bug that is known but - not yet fixed or some functionality which doesn't work or isn't - applicable on a specific platform. - </p> - <p>There are several different ways to state that a test case - should be skipped:</p> - <list type="bulleted"> - <item>Using the <c>{skip,What}</c> command in a test - specification file - </item> - <item>Returning <c>{skip,Reason}</c> from the - <c>init_per_testcase/2</c> function - </item> - <item>Returning <c>{skip,Reason}</c> from the specification - clause of the test case - </item> - <item>Returning <c>{skip,Reason}</c> from the execution clause - of the test case - </item> - </list> - <p>The latter of course means that the execution clause is - actually called, so the author must make sure that the test case - is not run. For more information about the different clauses in a - test case, see the chapter about writing test cases. - </p> - <p>When a test case is skipped, it will be noted as <c>SKIPPED</c> - in the HTML log. - </p> - </section> -</chapter> - diff --git a/lib/test_server/doc/src/ts.xml b/lib/test_server/doc/src/ts.xml deleted file mode 100644 index 60dfdbc545..0000000000 --- a/lib/test_server/doc/src/ts.xml +++ /dev/null @@ -1,568 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE erlref SYSTEM "erlref.dtd"> - -<erlref> - <header> - <copyright> - <year>2007</year> - <year>2013</year> - <holder>Ericsson AB, All Rights Reserved</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - The Initial Developer of the Original Code is Ericsson AB. - </legalnotice> - - <title>The OTP Test Server Framework</title> - <prepared>Mattias Nilsson</prepared> - <responsible></responsible> - <docno></docno> - <approved></approved> - <checked></checked> - <date></date> - <rev></rev> - <file>ts.xml</file> - </header> - <module>ts</module> - <modulesummary>Test Server Framework for testing OTP</modulesummary> - <description> - <p>This is a framework for testing OTP. The <c>ts</c> module - implements the interface to all the functionality in the - framework. - </p> - <p>The framework is built on top of the Test Server Controller, - <c>test_server_ctrl</c>, and provides a high level operator - interface. The main features added by the framework are: - </p> - <list type="bulleted"> - <item>Automatic compilation of test suites and data directories - </item> - <item>Collection of files in central directories and creation of - additional HTML pages for better overview. - </item> - <item>Single command interface for running all available tests - </item> - <item>Spawns a new node with correct parameters before starting - the test server - </item> - <item>Atomatically creates the parameter file needed when - running tests on remote target - </item> - </list> - <p>More information about the Test Server Framework and how to run - test cases can be found in the Test Server User's Guide. - </p> - <p>For writing you own test server framework, please turn to the - reference manual for the Test Server Controller and chapter named - "Writing your own test server framework" in the Test Server User's - Guide. - </p> - <p>SETUP</p> - <p>To be able to run <c>ts</c>, you must first `install' - <c>ts</c> for the current environment. This is done by calling - <c>ts:install/0/1/2</c>. A file called `variables' is created - and used by <c>ts</c> when running test suites. It is not - recommended to edit this file, but it is possible to alter if - <c>ts</c> gets the wrong idea about your environment. - </p> - <p><c>ts:install/0</c> is used if the target platform is the - same as the controller host, i.e. if you run on "local target" - and no options are needed. Then running <c>ts:install/0</c> <c>ts</c> - will run an autoconf script for your current - environment and set up the necessary variables needed by the - test suites. - </p> - <p><c>ts:install/1</c> or <c>ts:install/2</c> is used if the - target platform is different from the controller host, i.e. if - you run on "remote target" or if special options are required - for your system. - </p> - <p>See the reference manual for detailed information about - <c>ts:install/0/1/2</c>. - </p> - <p>Some of the common variables in the 'variables' file are - described below. Do not make any assumptions as of what is found - in this file, as it may change at any time. - </p> - <list> - <item><c>longnames</c><br></br> - Set to true if the system is using fully qualified - nodenames. - </item> - <item><c>platform_id</c><br></br> - This is the currently installed platform identification - string. - </item> - <item><c>platform_filename</c><br></br> - This is the name used to create the final save directory - for test runs. - </item> - <item><c>platform_label</c><br></br> - This is the string presented in the generated test - results index page. - </item> - <item><c>rsh_name</c><br></br> - This is the rsh program to use when starting slave or - peer nodes on a remote host. - </item> - <item><c>erl_flags</c><br></br> - Compile time flags used when compiling test suites. - </item> - <item><c>erl_release</c><br></br> - The Erlang/OTP release being tested. - </item> - <item><c>'EMULATOR'</c><br></br> - The emulator being tested (e.g. beam) - </item> - <item><c>'CPU'</c><br></br> - The CPU in the machine running the tests, e.g. sparc. - </item> - <item><c>target_host</c><br></br> - The target host name - </item> - <item><c>os</c><br></br> - The target operating system, e.g. solaris2.8 - </item> - <item><c>target</c><br></br> - The current target platform, e.g. sparc-sun-solaris2.8 - </item> - </list> - <p>RUNNING TESTS</p> - <p>After installing <c>ts</c>, you can run your test with the - <c>ts:run/0/1/2/3/4</c> functions. These functions, however, - require a special directory structure to be able to find your - test suites. Both the test server and all tests must be located - under your $TESTROOT directory. The test server implementation - shall be located in the directory <c>$TESTROOT/test_server</c> - and for each application there must be a directory named - <c><![CDATA[$TESTROOT/<application>_test]]></c> containing the .spec file - and all test suites and data directories for the - application. Note that there shall only be one .spec file for - each application. - </p> - <p><c>$TESTROOT/test_server</c> must be the current directory - when calling the <c>ts:run/*</c> function. - </p> - <p>All available tests can be found with <c>ts:tests()</c>. This - will list all applications for which a test specification file - <c><![CDATA[../<application>_test/<application>.spec]]></c> can be found. - </p> - <p>To run all these tests, use <c>ts:run()</c>. - </p> - <p>To run one or some of the tests, use <c>ts:run(Tests)</c>, - where <c>Tests</c> is the name of the application you want to - test, or a list of such names. - </p> - <p>To run one test suite within a test, use - <c>ts:run(Test,Suite)</c>. - </p> - <p>To run one test case within a suite, use - <c>ts:run(Test,Suite,Case)</c></p> - <p>To all these functions, you can also add a list of - options. Please turn to the reference manual for the <c>ts</c> - module to see the valid options to use. - </p> - <p>The function <c>ts:help()</c> displays some simple help for - the functions in <c>ts</c>. Use this for quick reference. - </p> - <p>LOG FILES</p> - <p>As the execution of the test suites go on, events are logged in - four different ways: - </p> - <list type="bulleted"> - <item>Text to the operator's console.</item> - <item>Suite related information is sent to the major log file.</item> - <item>Case related information is sent to the minor log file.</item> - <item>The HTML log file gets updated with test results.</item> - </list> - <p>Typically the operator, who may run hundreds or thousands of - test cases, doesn't want to fill the screen with details - about/from the specific test cases. By default, the operator will - only see: - </p> - <list type="bulleted"> - <item>A confirmation that the test has started. - </item> - <item>A small note about each failed test case. - </item> - <item>A summary of all the run test cases. - </item> - <item>A confirmation that the test run is complete - </item> - <item>Some special information like error reports and progress - reports, printouts written with erlang:display/1 or io:format/3 - specifically addressed to somewhere other than - <c>standard_io</c>.</item> - </list> - <p>This is enough for the operator to know, and if he wants to dig - in deeper into a specific test case result, he can do so by - following the links in the HTML presentation to take a look in the - major or minor log files. - </p> - <p>A detailed report of the entire test suite is stored in the - major logfile, the exact reason for failure, time spent etc. - </p> - <p>The HTML log file is a summary of the major log file, but gives - a much better overview of the test run. It also has links to every - test case's log file for quick viewing with a HTML browser. - </p> - <p>The minor log file contain full details of every single test - case, each one in a separate file. This way the files should be - easy to compare with previous test runs, even if the set of test - cases change. - </p> - <p>Which information that goes where is user configurable via the - test server controller. Three threshold values determine what - comes out on screen, and in the major or minor log files. The - contents that goes to the HTML log file is fixed, and cannot be - altered. - </p> - - </description> - <funcs> - <func> - <name>install() -> ok | {error, Reason}</name> - <name>install(TargetSystem) -> ok | {error, Reason}</name> - <name>install(Opts) -> ok | {error, Reason}</name> - <name>install(TargetSystem,Opts) -> ok | {error, Reason}</name> - <fsummary>Installs the Test Server Framework</fsummary> - <type> - <v>TargetSystem = {Architecture, TargetHost}</v> - <v>Architecture = atom() or string()</v> - <d>e.g. "ose" or "vxworks_ppc603"</d> - <v>TargetHost = atom() or string()</v> - <d>The name of the target host</d> - <v>Opts = list()</v> - </type> - <desc> - <p>Installs and configures the Test Server Framework for - running test suites. If a remote host is to be used, the - <c>TargetSystem</c> argument must be given so that "cross - installation" can be done. Installation is required for - any of the functions in <c>ts</c> to work. - </p> - <p>Opts may be one or more of - </p> - <list> - <item><c>{longnames, Bool}</c><br></br> - Use fully qualified hostnames for test_server and - slave nodes. Bool is <c>true</c> or <c>false</c> (default). - </item> - <item><c>{verbose, Level}</c><br></br> - Verbosity level for test server output, set to 0, 1 or - 2, where 0 is quiet(default). - </item> - <item><c>{hosts, Hosts}</c><br></br> - This is a list of available hosts on which to start - slave nodes. It is used when the <c>{remote, true}</c> - option is given to the <c>test_server:start_node/3</c> - function. Also, if <c>{require_nodenames, Num}</c> is - contained in a test specification file, the generated - nodenames will be spread over all hosts given in this - <c>Hosts</c> list. The hostnames are given as atoms or - strings. - </item> - <item><c>{slavetargets, SlaveTarges}</c><br></br> - For VxWorks only. This is a list of - available hosts where slave nodes can be started. This is - necessary because only one node can run per host in the - VxWorks environment. This is not the same as - <c>{hosts, Hosts}</c> because it is used for all slave nodes - - not only the ones started with <c>{remote, true}</c>. The - hostnames are given as atoms or strings. - </item> - <item><c>{crossroot, TargetErlRoot}</c><br></br> - Erlang root directory on target host - <br></br> -This option is mandatory for remote targets - </item> - <item><c>{master, {MasterHost, MasterCookie}}</c><br></br> - If target is remote and the target node is started as - a slave node, this option - indicates which master and cookie to use. The given master - will also be used as master for slave nodes started with - <c>test_server:start_node/3</c>. It is expected that the - <c>erl_boot_server</c> is started on the master node before - the test is run. If this option is not given, the test - server controller node is used as master and the - <c>erl_boot_server</c> is automatically started. - </item> - <item><c>{erl_start_args, ArgString}</c><br></br> - Additional arguments to be used when starting the test - server controller node. <c>ArgString</c> will be appended to - the command line when starting the erlang node. Note that - this will only affect the startup of the <em>controller node</em>, - i.e. not the target node or any slave nodes - startet from a test case. - </item> - <item><c>{ipv6_hosts, HostList}</c><br></br> - This option will be inserted in the - <c>Config</c> parameter for each test case. <c>HostList</c> - is a list of hosts supporting IPv6. - </item> - </list> - </desc> - </func> - <func> - <name>help() -> ok</name> - <fsummary>Presents simple help on the functions in <c>ts</c></fsummary> - <desc> - <p>Presents simple help on the functions in <c>ts</c>. Useful - for quick reference.</p> - </desc> - </func> - <func> - <name>tests() -> Tests</name> - <fsummary>Returns the list of available tests</fsummary> - <desc> - <p>Returns the list of available tests. This is actually just - a list of all test specification files found by looking up - "../*_test/*.spec". - </p> - <p>In each ../Name_test/ directory there should be one test - specification file named Name.spec.</p> - </desc> - </func> - <func> - <name>run() -> ok | {error, Reason}</name> - <name>run([all_tests|Opts])</name> - <name>run(Specs)</name> - <name>run(Specs, Opts)</name> - <name>run(Spec, Module)</name> - <name>run(Spec, Module, Opts)</name> - <name>run(Spec, Module, Case)</name> - <name>run(Spec, Module, Case, Opts)</name> - <fsummary>Runs (specified) test suite(s)</fsummary> - <type> - <v>Specs = Spec | [Spec]</v> - <v>Spec = atom()</v> - <v>Module = atom()</v> - <v>Case = atom()</v> - <v>Opts = [Opt]</v> - <v>Opt = batch | verbose | {verbose, Level} | {vars, Vars} | keep_topcase | cover | cover_details |{cover,CoverFile} | {cover_details,CoverFile} | {trace, TraceSpec}</v> - <v>Level = integer(); 0 means silent</v> - <v>Vars = list() of key-value tuples</v> - <v>CoverFile = string(); name of file listing modules to exclude from or include in cover compilation. The name must include full path to the file.</v> - <v>Reason = term()</v> - </type> - <desc> - <p>This function runs test suite(s)/case(s). To be able to run - any tests, ts:install must first be called to create the - <c>variables</c> file needed. To run a whole test specification, - only specify the name of the test specification, and all test - suite modules belonging to that test spec will be run. To run - a single module in a test specification, use the Module - argument to specify the name of the module to run and all test - cases in that module will be run, and to run a specified test - case, specify the name of the test case using the Case - argument. If called with no argument, all test specifications - available will be run. Use ts:tests/0 to see the available - test specifications. - </p> - <p>If the <c>batch</c> option is not given, a new xterm is - started (unix) when <c>ts:run</c> is called. - </p> - <p>The <c>verbose</c> option sets the verbosity level for test - server output. This has the same effect as if given to - <c>ts:install/1/2</c></p> - <p>The <c>vars</c> option can be used for adding configuration - variables that are not in the <c>variables</c> file generated - during installation. Can be any of the <c>Opts</c> valid for - <c>ts:install/1/2</c>. - </p> - <p>The <c>keep_topcase</c> option forces <c>ts</c> to keep the - topcase in your test specification file as is. This option can - only be used if you don't give the <c>Module</c> or - <c>Case</c> parameters to <c>ts:run</c>. The - <c>keep_topcase</c> option is necessary if your topcase - contains anything other than <c><![CDATA[{dir,"../<Name>_test"}]]></c>. If - the option is not used, <c>ts</c> will modify your topcase. - </p> - <p>The <c>cover</c> and <c>cover_details</c> options indicates - that the test shall be run with code coverage - analysis. <c>cover_details</c> means that analysis shall be - done on the most detailed level. If the test is run with a - remote target, this option creates a list of uncovered lines - in each cover compiled module. If the test is run with a local - target, each cover compiled module will be analysed with - <c>cover:analyse_to_file/1</c>. The <c>cover</c> options will - only create an overview of all cover compiled modules with the - number of covered and not covered lines. - </p> - <p>The <c>CoverFile</c> which can be given with the - <c>cover</c> and <c>cover_details</c> options must be the - filename of a file listing modules to be excluded from or - included in the cover compilation. By default, <c>ts</c> - believes that <c>Spec</c> is the name of an OTP application - and that all modules in this application shall be cover - compiled. The <c>CoverFile</c> can exclude modules that belong - to the application and add modules that don't belong to the - application. The file can have the following entries:</p> - <code type="none"> -{exclude, all | ExcludeModuleList}. -{include, IncludeModuleList}. </code> - <p>Note that each line must end with a full - stop. <c>ExcludeModuleList</c> and <c>IncludeModuleList</c> - are lists of atoms, where each atom is a module name. - </p> - <p>If the <c>cover</c> or <c>cover_details</c> options are - given on their own, the directory <c><![CDATA[../<Spec>_test]]></c> is - searched for a <c>CoverFile</c> named <c><![CDATA[<Spec>.cover]]></c>. If - this file is not found, <c>Spec</c> is assumed to be the name - of an OTP application, and all modules in the <c>ebin</c> - directory for the application are cover compiled. The - <c>ebin</c> directory is found by adding <c>ebin</c> to - <c>code:lib_dir(Spec)</c>. - </p> - <p>The same cover compiled code will be loaded on all slave or - peer nodes started with <c>test_server:start_node/3</c>. The - exception is nodes that run an old version of OTP. If the loading - fails, the node will simply not be a part of the coverage - analysis. Note that slave and peer nodes must be stopped with - <c>test_server:stop_node/1</c> for the node to be part of the - coverage analysis, else the test server will not be able to - fetch coverage data from the node. - </p> - <p>The <c>trace</c> option is used to turn on call trace on - target and on slave or peer nodes started with - <c>test_server:start_node/3</c>. <c>TraceSpec</c> can be the - name of a trace information file, or a list of elements like - the ones in a trace information file. Please turn to the - reference manual for <c>test_server_ctrl:trc/1</c> for details - about the trace information file. - </p> - </desc> - </func> - <func> - <name>cross_cover_analyse(Level) -> ok</name> - <name>cross_cover_analyse([Level]) -> ok</name> - <fsummary>Analyse cover data collected from all tests</fsummary> - <desc> - <p>Analyse cover data collected from all tests. - </p> - <p>See test_server_ctrl:cross_cover_analyse/2 - </p> - </desc> - </func> - <func> - <name>r() -> ok</name> - <name>r(Opts) -> ok</name> - <name>r(SpecOrSuite) -> ok</name> - <name>r(SpecOrSuite,Opts) -> ok</name> - <name>r(Suite,Case) -> ok</name> - <name>r(Suite,Case,Opts) -> ok</name> - <fsummary>Run test suite or test case without <c>ts</c>installed</fsummary> - <type> - <v>SpecOrSuite = Spec | Suite</v> - <v>Spec = string()</v> - <d>"Name.spec" or "Name.spec.OsType", where OsType is vxworks</d> - <v>Suite = atom()</v> - <v>Case = atom()</v> - <v>Opts = [Opt]</v> - <v>Opt = {Cover,AppOrCoverFile} | {Cover,Application,CoverFile}</v> - <v>Cover = cover | cover_details</v> - <v>AppOrCoverFile = Application | CoverFile</v> - <v>Application = atom()</v> - <d>OTP application to cover compile</d> - <v>CoverFile = string()</v> - <d>Name of file listing modules to exclude from or include in cover compilation</d> - </type> - <desc> - <p>This function can be used to run a test suites or test - cases directly, without any of the additional features added - by the test server framework. It is simply a wrapper function - for the <c>add_dir</c>, <c>add_spec</c>, <c>add_module</c> and - <c>add_case</c> functions in <c>test_server_ctrl</c>: - </p> - <p><c>r() -> add_dir(".")</c> <br></br> -<c>r(Spec) -> add_spec(Spec)</c> <br></br> -<c>r(Suite) -> add_module(Suite)</c> <br></br> -<c>r(Suite,Case) -> add_case(Suite,Case)</c></p> - <p>To use this function, it is required that the test suite is - compiled and in the code path of the node where the function - is called. The function can be used without having <c>ts</c> - installed. - </p> - <p>For information about the <c>cover</c> and - <c>cover_details</c> options, see <c>test_server_ctrl:cover/2/3</c>.</p> - </desc> - </func> - <func> - <name>estone() -> ok | {error, Reason}</name> - <name>estone(Opts) -> ok</name> - <fsummary>Runs the EStone test</fsummary> - <desc> - <p>This function runs the EStone test. It is a shortcut for - running the test suite <c>estone_SUITE</c> in the - <c>kernel</c> application. - </p> - <p><c>Opts</c> is the same as the <c>Opts</c> argument for the - <c>ts:run</c> functions.</p> - </desc> - </func> - </funcs> - - <section> - <title>Makfile.src in Data Directory</title> - <p>If a data directory contains code which must be compiled before - the test suite is run, a makefile source called - <c>Makefile.src</c> can be placed in the data directory. This file - will be converted to a valid makefile by <c>ts:run/0/1/2/3/4</c>. - </p> - <p>The reason for generating the makefile is that you can use - variables from the <c>variables</c> file which was generated by - <c>ts:install/0/1/2</c>. All occurrences of <c>@Key@</c> in - <c>Makefile.src</c> is substituted by the <c>Value</c> from - <c>{Key,Value}</c> found in the <c>variables</c> file. Example: - </p> - <p>Cut from <c>variables</c>:</p> - <code type="none"> - ... - {'EMULATOR',"beam"}. - {'CFLAGS',"-g -O2"}. - {'LD',"$(CC) $(CFLAGS)"}. - {'CC',"gcc"}. - ... - </code> - <p><c>Makefile.src</c> for compiling erlang code could look - something like this:</p> - <code type="none"> - EFLAGS=+debug_info - - all: ordsets1.@EMULATOR@ - - ordsets1.@EMULATOR@: ordsets1.erl - erlc $(EFLAGS) ordsets1.erl - </code> - <p><c>Makefile.src</c> for compiling c code could look - something like this:</p> - <code type="none"> - CC = @CC@ - LD = @LD@ - CFLAGS = @CFLAGS@ -I@erl_include@ @DEFS@ - CROSSLDFLAGS = @CROSSLDFLAGS@ - - PROGS = nfs_check@exe@ - - all: $(PROGS) - - nfs_check@exe@: nfs_check@obj@ - $(LD) $(CROSSLDFLAGS) -o nfs_check nfs_check@obj@ @LIBS@ - - nfs_check@obj@: nfs_check.c - $(CC) -c -o nfs_check@obj@ $(CFLAGS) nfs_check.c - </code> - </section> -</erlref> - diff --git a/lib/test_server/doc/src/why_test_chapter.xml b/lib/test_server/doc/src/why_test_chapter.xml deleted file mode 100644 index 3d0e8271b1..0000000000 --- a/lib/test_server/doc/src/why_test_chapter.xml +++ /dev/null @@ -1,141 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Why Test</title> - <prepared>Siri Hansen</prepared> - <docno></docno> - <date></date> - <rev></rev> - </header> - - <section> - <title>Goals</title> - <p>It's not possible to prove that a program is correct by - testing. On the contrary, it has been formally proven that it is - impossible to prove programs in general by testing. Theoretical - program proofs or plain examination of code may be viable options - for those that wish to certify that a program is correct. The test - server, as it is based on testing, cannot be used for - certification. Its intended use is instead to (cost effectively) - <em>find bugs</em>. A successful test suite is one that reveals a - bug. If a test suite results in Ok, then we know very little that - we didn't know before. - </p> - </section> - - <section> - <title>What to test?</title> - <p>There are many kinds of test suites. Some concentrate on - calling every function in the interface to some module or - server. Some other do the same, but uses all kinds of illegal - parameters, and verifies that the server stays alive and rejects - the requests with reasonable error codes. Some test suites - simulate an application (typically consisting of a few modules of - an application), some try to do tricky requests in general, some - test suites even test internal functions. - </p> - <p>Another interesting category of test suites are the ones that - check that fixed bugs don't reoccur. When a bugfix is introduced, - a test case that checks for that specific bug should be written - and submitted to the affected test suite(s). - </p> - <p>Aim for finding bugs. Write whatever test that has the highest - probability of finding a bug, now or in the future. Concentrate - more on the critical parts. Bugs in critical subsystems are a lot - more expensive than others. - </p> - <p>Aim for functionality testing rather than implementation - details. Implementation details change quite often, and the test - suites should be long lived. Often implementation details differ - on different platforms and versions. If implementation details - have to be tested, try to factor them out into separate test - cases. Later on these test cases may be rewritten, or just - skipped. - </p> - <p>Also, aim for testing everything once, no less, no more. It's - not effective having every test case fail just because one - function in the interface changed. - </p> - </section> - - <section> - <title>How much to test</title> - <p>There is a unix shell script that counts the number of non - commented words (lines and characters too) of source code in each - application's test directory and divides with the number of such - source words in the src directory. This is a measure of how much - test code there is. - </p> - <p>There has been much debate over how much test code, compared to - production code, should be written in a project. More test code - finds more bugs, but test code needs to be maintained just like - the production code, and it's expensive to write it in the first - place. In several articles from relatively mature software - organizations that I have read, the amount of test code has been - about the same as the production code. </p> - <p>In OTP, at the time of - writing, few applications come even close to this, some have no - test code at all. - </p> - - <section> - <title>Full coverage</title> - <p>It is possible to cover compile the modules being tested - before running the test suites. Doing so displays which branches - of the code that are tested by the test suite, and which are - not. Many use this as a measure of a good test suite. When every - single line of source code is covered once by the test suite, - the test suite is finished. - </p> - <p>A coverage of 100% still proves nothing, though. It doesn't - mean that the code is error free, that everything is tested. For - instance, if a function contains a division, it has to be - executed at least twice. Once with parameters that cause - division by zero, and once with other parameters. - </p> - <p>High degree of coverage is good of course, it means that no - major parts of the code has been left untested. It's another - question whether it's cost effective. You're only likely to find - 50% more bugs when going from 67% to 100% coverage, but the work - (cost) is maybe 200% as large, or more, because reaching all of - those obscure branches is usually complicated. - </p> - <p>Again, the reason for testing with the test server is to find - bugs, not to create certificates of valid code. Maximizing the - number of found bugs per hour probably means not going for 100% - coverage. For some module the optimum may be 70%, for some other - maybe 250%. 100% shouldn't be a goal in itself.</p> - </section> - - <section> - <title>User interface testing</title> - <p>It is very difficult to do sensible testing of user - interfaces, especially the graphic ones. The test server has - some support for capturing the text I/O that goes to the user, - but none for graphics. There are several tools on the market - that help with this.</p> - </section> - </section> -</chapter> - diff --git a/lib/test_server/doc/src/write_framework_chapter.xml b/lib/test_server/doc/src/write_framework_chapter.xml deleted file mode 100644 index d10b580c34..0000000000 --- a/lib/test_server/doc/src/write_framework_chapter.xml +++ /dev/null @@ -1,160 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Write you own test server framework</title> - <prepared>Siri Hansen</prepared> - <docno></docno> - <date></date> - <rev></rev> - <file>write_framework_chapter.xml</file> - </header> - - <section> - <title>Introduction</title> - <p>The test server controller can be interfaced from the operating - system or from within Erlang. The nature of your new framework - will decide which interface to use. If you want your framework to - start a new node for each test, the operating system interface is - very convenient. If your node is already started, going from - within Erlang might be a more flexible solution. - </p> - <p>The two methods are described below. - </p> - </section> - - <section> - <title>Interfacing the test server controller from Erlang</title> - <p>Using the test server from Erlang means that you have to start - the test server and then add test jobs. Use - <c>test_server_ctrl:start/0</c> to start the test server, and - <c>test_server_ctrl:stop/0</c> to stop it. - </p> - - <section> - <title>Adding test jobs</title> - <p>There are many commands available for adding test cases to - the test server's job queue: <br></br> -</p> - <list type="bulleted"> - <item>Single test case <br></br> -<c>test_server_ctrl:add_case/2/3</c></item> - <item>Multiple test cases from same suite <br></br> -<c>test_server_ctrl:add_cases/2/3</c></item> - <item>Test suite module or modules <br></br> -<c>test_server_ctrl:add_module/1/2</c></item> - <item>Some or all test suite modules in a directory <br></br> -<c>test_server_ctrl:add_dir/2/3</c></item> - <item>Test cases specified in a test specification file <br></br> -<c>test_server_ctrl:add_spec/1</c></item> - </list> - <p>All test suites are given a unique name, which is usually - given when the test suite is added to the job queue. In some - cases, a default name is used, as in the case when a module is - added without a specified name. The test job name is used to - store logfiles, which are stored in the `name.logs' directory - under the current directory. - </p> - <p>See the reference manual for details about the functions for - adding test jobs. - </p> - </section> - </section> - - <section> - <title>Interfacing the test server controller from the operating system.</title> - <p>The function <c>run_test/1</c> is your interface in the test - server controller if you wish to use it from the operating - system. You simply start an erlang shell and invoke this function - with the <c>-s</c> option. <c>run_test/1</c> starts the test - server, runs the test specified by the command line and stops the - test server. The argument to <c>run_test/1</c> is a list of - command line flags, typically - <c>['KEY1', Value1, 'KEY2', Value2, ...]</c>. - The valid command line flags are listed in the reference manual - for <c>test_server_ctrl</c>. - </p> - <p>A typical command line may look like this <br></br> -<c>erl -noshell -s test_server_ctrl run_test KEY1 Value1 KEY2 Value2 ... -s erlang halt</c></p> - <p>Or make an alias (this is for unix/tcsh) <br></br> -<c>alias erl_test 'erl -noshell -s test_server_ctrl run_test \!* -s erlang halt'</c></p> - <p>And then use it like this <br></br> -<c>erl_test KEY1 Value1 KEY2 Value2 ...</c> <br></br> -</p> - - <section> - <title>An Example</title> - <p>An example of starting a test run from the command line <br></br> -</p> - <p><c>erl -name test_srv -noshell -rsh /home/super/otp/bin/ctrsh </c> <br></br> -<c>-pa /clearcase/otp/erts/lib/kernel/test </c> <br></br> -<c>-boot start_sasl -sasl errlog_type error </c> <br></br> -<c>-s test_server_ctrl run_test SPEC kernel.spec -s erlang halt</c> <br></br> -</p> - </section> - </section> - - <section> - <title>Framework callback functions</title> - <p>By defining the environment variable - <c>TEST_SERVER_FRAMEWORK</c> to a module name, the framework - callback functions can be used. The framework callback functions - are called by the test server in order let the framework interact - with the execution of the tests and to keep the framework upto - date with information about the test progress. - </p> - <p>The framework callback functions are described in the reference - manual for <c>test_server_ctrl</c>. - </p> - <p>Note that this topic is in an early stage of development, and - changes might occur. - </p> - </section> - - <section> - <title>Other concerns</title> - <p>Some things to think about when writing you own test server - framework: - </p> - <list type="bulleted"> - <item><c>emulator version</c> - Make sure that the intended - version of the emulator is started. - </item> - <item><c>operating system path</c> - If test cases use port - programs, make sure the paths are correct. - </item> - <item><c>recompilation</c> - Make sure all test suites are fresh - compiled. - </item> - <item><c>test_server.hrl</c> - Make sure the - <c>test_server.hrl</c> file is in the include path when - compiling test suites. - </item> - <item><c>running applications</c> - Some test suites require - some applications to be running (e.g. sasl). Make sure they are - started. - </item> - </list> - </section> -</chapter> - diff --git a/lib/test_server/doc/src/write_test_chapter.xml b/lib/test_server/doc/src/write_test_chapter.xml deleted file mode 100644 index c3e1881b8a..0000000000 --- a/lib/test_server/doc/src/write_test_chapter.xml +++ /dev/null @@ -1,228 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE chapter SYSTEM "chapter.dtd"> - -<chapter> - <header> - <copyright> - <year>2002</year><year>2013</year> - <holder>Ericsson AB. All Rights Reserved.</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - </legalnotice> - - <title>Writing Test Suites</title> - <prepared>Siri Hansen</prepared> - <docno></docno> - <date></date> - <rev></rev> - <file>write_test_chapter.xml</file> - </header> - - <section> - <title>Support for test suite authors</title> - <p>The <c>test_server</c> module provides some useful functions - to support the test suite author. This includes: - </p> - <list type="bulleted"> - <item>Starting and stopping slave or peer nodes</item> - <item>Capturing and checking stdout output</item> - <item>Retrieving and flushing process message queue</item> - <item>Watchdog timers</item> - <item>Checking that a function crashes</item> - <item>Checking that a function succeeds at least m out of n times</item> - <item>Checking .app files</item> - </list> - <p>Please turn to the reference manual for the <c>test_server</c> - module for details about these functions. - </p> - </section> - - <section> - <title>Test suites</title> - <p>A test suite is an ordinary Erlang module that contains test - cases. It's recommended that the module has a name on the form - *_SUITE.erl. Otherwise, the directory function will not find the - modules (by default). - </p> - <p>For some of the test server support, the test server include - file <c>test_server.hrl</c> must be included. Never include it - with the full path, for portability reasons. Use the compiler - include directive instead. - </p> - <p>The special function <c>all(suite)</c> in each module is called - to get the test specification for that module. The function - typically returns a list of test cases in that module, but any - test specification could be returned. Please see the chapter - about test specifications for details about this. - </p> - </section> - - <section> - <title>Init per test case</title> - <p>In each test suite module, the functions - <c>init_per_testcase/2</c> and <c>end_per_testcase/2</c> must be - implemented. - </p> - <p><c>init_per_testcase</c> is called before each test case in the - test suite, giving a (limited) possibility for initialization. - </p> - <p><c>end_per_testcase/2</c> is called after each test case is - completed, giving a possibility to clean up. - </p> - <p>The first argument to these functions is the name of the test - case. This can be used to do individual initialization and cleanup for - each test cases. - </p> - <p>The second argument is a list of tuples called - <c>Config</c>. The first element in a <c>Config</c> tuple - should be an atom - a key value to be used for searching. - <c>init_per_testcase/2</c> may modify the <c>Config</c> - parameter or just return it as is. Whatever is retuned by - <c>init_per_testcase/2</c> is given as <c>Config</c> parameter to - the test case itself. - </p> - <p>The return value of <c>end_per_testcase/2</c> is ignored by the - test server. - </p> - </section> - - <section> - <title>Test cases</title> - <p>The smallest unit that the test server is concerned with is a - test case. Each test case can in turn test many things, for - example make several calls to the same interface function with - different parameters. - </p> - <p>It is possible to put many or few tests into each test - case. How many things each test case tests is up to the author, - but here are some things to keep in mind. - </p> - <p>Very small test cases often leads to more code, since - initialization has to be duplicated. Larger code, especially with - a lot of duplication, increases maintenance and reduces - readability. - </p> - <p>Larger test cases make it harder to tell what went wrong if it - fails, and force us to skip larger portions of test code if a - specific part fails. These effects are accentuated when running on - multiple platforms because test cases often have to be skipped. - </p> - <p>A test case generally consists of three parts, the - documentation part, the specification part and the execution - part. These are implemented as three clauses of the same function. - </p> - <p>The documentation clause matches the argument '<c>doc</c>' and - returns a list for strings describing what the test case tests. - </p> - <p>The specification clause matches the argument '<c>suite</c>' - and returns the test specification for this particular test - case. If the test specification is an empty list, this indicates - that the test case is a leaf test case, i.e. one to be executed. - </p> - <p>The execution clause implements the actual test case. It takes - one argument, <c>Config</c>, which contain configuration - information like <c>data_dir</c> and <c>priv_dir</c>. See <seealso marker="#data_priv_dir">Data and Private Directories</seealso> for - more information about these. - </p> - <p>The <c>Config</c> variable can also contain the - <c>nodenames</c> key, if requested by the <c>require_nodenames</c> - command in the test suite specification file. All <c>Config</c> - items should be extracted using the <c>?config</c> macro. This is - to ensure future compatibility if the <c>Config</c> format - changes. See the reference manual for <c>test_server</c> for - details about this macro. - </p> - <p>If the execution clause crashes or exits, it is considered a - failure. If it returns <c>{skip,Reason}</c>, the test case is - considered skipped. If it returns <c>{comment,String}</c>, - the string will be added in the 'Comment' field on the HTML - result page. If the execution clause returns anything else, it is - considered a success, unless it is <c>{'EXIT',Reason}</c> or - <c>{'EXIT',Pid,Reason}</c> which can't be distinguished from a - crash, and thus will be considered a failure. - </p> - </section> - - <section> - <marker id="data_priv_dir"></marker> - <title>Data and Private Directories</title> - <p>The data directory (<c>data_dir</c>) is the directory where the test - module has its own files needed for the testing. A compiler test - case may have source files to feed into the compiler, a release - upgrade test case may have some old and new release of - something. A graphics test case may have some icons and a test - case doing a lot of math with bignums might store the correct - answers there. The name of the <c>data_dir</c> is the the name of - the test suite and then "_data". For example, - <c>"some_path/foo_SUITE.beam"</c> has the data directory - <c>"some_path/foo_SUITE_data/"</c>. - </p> - <p>The <c>priv_dir</c> is the test suite's private directory. This - directory should be used when a test case needs to write to - files. The name of the private directory is generated by the test - server, which also creates the directory. - </p> - <p><em>Warning:</em> Do not depend on current directory to be - writable, or to point to anything in particular. All scratch files - are to be written in the <c>priv_dir</c>, and all data files found - in <c>data_dir</c>. If the current directory has to be something - specific, it must be set with <c>file:set_cwd/1</c>. - </p> - </section> - - <section> - <title>Execution environment</title> - <p>Each time a test case is about to be executed, a new process is - created with <c>spawn_link</c>. This is so that the test case will - have no dependencies to earlier tests, with respect to process flags, - process links, messages in the queue, other processes having registered - the process, etc. As little as possible is done to change the initial - context of the process (what is created by plain spawn). Here is a - list of differences: - </p> - <list type="bulleted"> - <item>It has a link to the test server. If this link is removed, - the test server will not know when the test case is finished, - just wait infinitely. - </item> - <item>It often holds a few items in the process dictionary, all - with names starting with '<c>test_server_</c>'. This is to keep - track of if/where a test case fails. - </item> - <item>There is a top-level catch. All of the test case code is - catched, so that the location of a crash can be reported back to - the test server. If the test case process is killed by another - process (thus the catch code is never executed) the test server - is not able to tell where the test case was executing. - </item> - <item>It has a special group leader implemented by the test - server. This way the test server is able to capture the io that - the test case provokes. This is also used by some of the test - server support functions. - </item> - </list> - <p>There is no time limit for a test case, unless the test case - itself imposes such a limit, by calling - <c>test_server:timetrap/1</c> for example. The call can be made - in each test case, or in the <c>init_per_testcase/2</c> - function. Make sure to call the corresponding - <c>test_server:timetrap_cancel/1</c> function as well, e.g in the - <c>end_per_testcase/2</c> function, or else the test cases will - always fail. - </p> - </section> - -</chapter> - diff --git a/lib/test_server/ebin/.gitignore b/lib/test_server/ebin/.gitignore deleted file mode 100644 index e69de29bb2..0000000000 --- a/lib/test_server/ebin/.gitignore +++ /dev/null diff --git a/lib/test_server/include/test_server.hrl b/lib/test_server/include/test_server.hrl deleted file mode 100644 index 77864ef3b5..0000000000 --- a/lib/test_server/include/test_server.hrl +++ /dev/null @@ -1,32 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1996-2011. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - --ifdef(line_trace). --line_trace(true). --define(line, - io:format(lists:concat([?MODULE,",",integer_to_list(?LINE),": ~p"]), - [erlang:monotonic_time()-erlang:system_info(start_time)]),). --else. --define(line,). --endif. --define(t,test_server). --define(config,test_server:lookup_config). - - diff --git a/lib/test_server/include/test_server_line.hrl b/lib/test_server/include/test_server_line.hrl deleted file mode 100644 index 37da956cd0..0000000000 --- a/lib/test_server/include/test_server_line.hrl +++ /dev/null @@ -1,20 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2004-2011. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - diff --git a/lib/test_server/info b/lib/test_server/info deleted file mode 100644 index 7a9ed6c700..0000000000 --- a/lib/test_server/info +++ /dev/null @@ -1,2 +0,0 @@ -group: test Test Applications -short: The OTP Test Server diff --git a/lib/test_server/prebuild.skip b/lib/test_server/prebuild.skip deleted file mode 100644 index 8ee4101f6a..0000000000 --- a/lib/test_server/prebuild.skip +++ /dev/null @@ -1 +0,0 @@ -src/autom4te.cache diff --git a/lib/test_server/src/Makefile b/lib/test_server/src/Makefile deleted file mode 100644 index 6a26ee2933..0000000000 --- a/lib/test_server/src/Makefile +++ /dev/null @@ -1,144 +0,0 @@ -# -# %CopyrightBegin% -# -# Copyright Ericsson AB 1996-2013. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# %CopyrightEnd% -# - -include $(ERL_TOP)/make/target.mk - -# ---------------------------------------------------- -# Configuration info. -# ---------------------------------------------------- -include $(ERL_TOP)/make/$(TARGET)/otp.mk - -# ---------------------------------------------------- -# Application version -# ---------------------------------------------------- -include ../vsn.mk -VSN=$(TEST_SERVER_VSN) - -# ---------------------------------------------------- -# Release directory specification -# ---------------------------------------------------- -RELSYSDIR = $(RELEASE_PATH)/lib/test_server-$(VSN) - -# ---------------------------------------------------- -# Target Specs -# ---------------------------------------------------- - -MODULES= test_server_ctrl \ - test_server_gl \ - test_server_io \ - test_server_node \ - test_server \ - test_server_sup \ - erl2html2 - -TS_MODULES= \ - ts \ - ts_run \ - ts_lib \ - ts_make \ - ts_erl_config \ - ts_autoconf_win32 \ - ts_install \ - ts_install_cth \ - ts_benchmark - -TARGET_MODULES= $(MODULES:%=$(EBIN)/%) -TS_TARGET_MODULES= $(TS_MODULES:%=$(EBIN)/%) - -ERL_FILES= $(MODULES:=.erl) -TS_ERL_FILES= $(TS_MODULES:=.erl) -HRL_FILES = ../include/test_server.hrl ../include/test_server_line.hrl -INTERNAL_HRL_FILES = test_server_internal.hrl -TS_HRL_FILES= ts.hrl -C_FILES = -AUTOCONF_FILES = configure.in conf_vars.in -PROGRAMS = configure config.sub config.guess install-sh -CONFIG = ts.config ts.unix.config ts.win32.config - -TARGET_FILES = $(MODULES:%=$(EBIN)/%.$(EMULATOR)) \ - $(APP_TARGET) $(APPUP_TARGET) -TS_TARGET_FILES = $(TS_MODULES:%=$(EBIN)/%.$(EMULATOR)) - -TARGETS = $(MODULES:%=$(EBIN)/%.$(EMULATOR)) $(PROGRAMS) \ - $(APP_TARGET) $(APPUP_TARGET) -TS_TARGETS = $(TS_MODULES:%=$(EBIN)/%.$(EMULATOR)) - -APP_FILE= test_server.app -APP_SRC= $(APP_FILE).src -APP_TARGET= $(EBIN)/$(APP_FILE) - -APPUP_FILE= test_server.appup -APPUP_SRC= $(APPUP_FILE).src -APPUP_TARGET= $(EBIN)/$(APPUP_FILE) - -# ---------------------------------------------------- -# FLAGS -# ---------------------------------------------------- -ERL_COMPILE_FLAGS += -I../include -Werror - -# ---------------------------------------------------- -# Targets -# ---------------------------------------------------- - -tests debug opt: $(TARGETS) $(TS_TARGETS) - -clean: - rm -f $(TARGET_FILES) $(TS_TARGET_FILES) - rm -f core - -docs: - -configure: configure.in - autoconf configure.in > configure - -# ---------------------------------------------------- -# Special Build Targets -# ---------------------------------------------------- -$(APP_TARGET): $(APP_SRC) ../vsn.mk - $(vsn_verbose)sed -e 's;%VSN%;$(VSN);' $< > $@ - -$(APPUP_TARGET): $(APPUP_SRC) ../vsn.mk - $(vsn_verbose)sed -e 's;%VSN%;$(VSN);' $< > $@ - -# ---------------------------------------------------- -# Release Target -# ---------------------------------------------------- -include $(ERL_TOP)/make/otp_release_targets.mk - -release_spec: opt - $(INSTALL_DIR) "$(RELSYSDIR)/src" - $(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)/src" - $(INSTALL_DATA) $(INTERNAL_HRL_FILES) $(TS_HRL_FILES) "$(RELSYSDIR)/src" - $(INSTALL_DIR) "$(RELSYSDIR)/include" - $(INSTALL_DATA) $(HRL_FILES) "$(RELSYSDIR)/include" - $(INSTALL_DIR) "$(RELSYSDIR)/ebin" - $(INSTALL_DATA) $(TARGET_FILES) "$(RELSYSDIR)/ebin" - -release_tests_spec: opt - $(INSTALL_DIR) "$(RELEASE_PATH)/test_server" - $(INSTALL_DATA) $(ERL_FILES) $(TS_ERL_FILES) \ - $(HRL_FILES) $(INTERNAL_HRL_FILES) $(TS_HRL_FILES) \ - $(TS_TARGET_FILES) \ - $(AUTOCONF_FILES) $(C_FILES) $(CONFIG) \ - "$(RELEASE_PATH)/test_server" - $(INSTALL_SCRIPT) $(PROGRAMS) "$(RELEASE_PATH)/test_server" - -release_docs_spec: - diff --git a/lib/test_server/src/conf_vars.in b/lib/test_server/src/conf_vars.in deleted file mode 100644 index 7c55d7b9ed..0000000000 --- a/lib/test_server/src/conf_vars.in +++ /dev/null @@ -1,25 +0,0 @@ -CC:@CC@ -LD:@LD@ -CFLAGS:@CFLAGS@ -EI_CFLAGS:@EI_CFLAGS@ -ERTS_CFLAGS:@ERTS_CFLAGS@ -CROSSLDFLAGS:@CROSSLDFLAGS@ -SHLIB_LD:@SHLIB_LD@ -SHLIB_LDFLAGS:@SHLIB_LDFLAGS@ -SHLIB_LDLIBS:@SHLIB_LDLIBS@ -SHLIB_CFLAGS:@SHLIB_CFLAGS@ -SHLIB_EXTRACT_ALL:@SHLIB_EXTRACT_ALL@ -dll:@SHLIB_SUFFIX@ -DEFS:@DEFS@ -ERTS_LIBS:@ERTS_LIBS@ -LIBS:@LIBS@ -target_host:@target_host@ -CPU:@host_cpu@ -os:@host_os@ -target:@host@ -obj:@obj@ -exe:@exe@ -SSLEAY_ROOT:@SSLEAY_ROOT@ -JAVAC:@JAVAC@ -make_command:@make_command@ -test_c_compiler:@test_c_compiler@ diff --git a/lib/test_server/src/configure.in b/lib/test_server/src/configure.in deleted file mode 100644 index 001de72a1e..0000000000 --- a/lib/test_server/src/configure.in +++ /dev/null @@ -1,509 +0,0 @@ -dnl Process this file with autoconf to produce a configure script for Erlang. -dnl -dnl %CopyrightBegin% -dnl -dnl Copyright Ericsson AB 1997-2014. All Rights Reserved. -dnl -dnl Licensed under the Apache License, Version 2.0 (the "License"); -dnl you may not use this file except in compliance with the License. -dnl You may obtain a copy of the License at -dnl -dnl http://www.apache.org/licenses/LICENSE-2.0 -dnl -dnl Unless required by applicable law or agreed to in writing, software -dnl distributed under the License is distributed on an "AS IS" BASIS, -dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -dnl See the License for the specific language governing permissions and -dnl limitations under the License. -dnl -dnl %CopyrightEnd% -dnl - -AC_INIT(conf_vars.in) - -AC_CANONICAL_HOST - -dnl Checks for programs. -AC_PROG_CC - -DEBUG_FLAGS="-g -DDEBUG" -if test "$GCC" = yes; then - DEBUG_FLAGS="$DEBUG_FLAGS -Wall $CFLAGS" -fi -AC_SUBST(DEBUG_FLAGS) - -AC_ARG_ENABLE(debug-mode, -[ --enable-debug-mode enable debug mode], -[ case "$enableval" in - no) ;; - *) CFLAGS=$DEBUG_FLAGS ;; - esac ], ) - -AC_ARG_ENABLE(m64-build, -AS_HELP_STRING([--enable-m64-build], - [build 64-bit binaries using the -m64 flag to (g)cc]), -[ case "$enableval" in - no) enable_m64_build=no ;; - *) enable_m64_build=yes ;; - esac -],enable_m64_build=no) - -AC_ARG_ENABLE(m32-build, -AS_HELP_STRING([--enable-m32-build], - [build 32-bit binaries using the -m32 flag to (g)cc]), -[ case "$enableval" in - no) enable_m32_build=no ;; - *) enable_m32_build=yes ;; - esac -],enable_m32_build=no) - -no_mXX_LDFLAGS="$LDFLAGS" - -if test X${enable_m64_build} = Xyes; then - CFLAGS="-m64 $CFLAGS" - LDFLAGS="-m64 $LDFLAGS" -fi -if test X${enable_m32_build} = Xyes; then - CFLAGS="-m32 $CFLAGS" - LDFLAGS="-m32 $LDFLAGS" -fi - -AC_CHECK_LIB(m, sin) - -#-------------------------------------------------------------------- -# Interactive UNIX requires -linet instead of -lsocket, plus it -# needs net/errno.h to define the socket-related error codes. -#-------------------------------------------------------------------- - -AC_CHECK_LIB(inet, main, [LIBS="$LIBS -linet"]) -AC_CHECK_HEADER(net/errno.h, AC_DEFINE(HAVE_NET_ERRNO_H)) - -#-------------------------------------------------------------------- -# Linux/tcp.h may be needed for sockopt test in kernel -#-------------------------------------------------------------------- - -AC_CHECK_HEADER(linux/tcp.h, AC_DEFINE(HAVE_LINUX_TCP_H)) -AC_MSG_CHECKING(for sane linux/tcp.h) -AC_TRY_COMPILE([#include <stdio.h> - #include <stdlib.h> - #include <string.h> - #include <unistd.h> - #include <stdarg.h> - #include <sys/types.h> - #include <sys/socket.h> - #include <sys/wait.h> - #include <linux/tcp.h> - #include <netinet/in.h> - #include <netdb.h>], - [return 0;], - have_sane_linux_tcp_h=yes, - have_sane_linux_tcp_h=no) - -if test $have_sane_linux_tcp_h = yes; then - AC_DEFINE(HAVE_SANE_LINUX_TCP_H,[1], - [Define if we have sane linux/tcp.h]) - AC_MSG_RESULT(yes) -else - AC_MSG_RESULT(no) -fi - - - -#-------------------------------------------------------------------- -# Linux requires sys/socketio.h instead of sys/sockio.h -#-------------------------------------------------------------------- -AC_CHECK_HEADER(sys/socketio.h, AC_DEFINE(HAVE_SOCKETIO_H)) - - -#-------------------------------------------------------------------- -# Misc -#-------------------------------------------------------------------- -AC_CHECK_HEADER(poll.h, AC_DEFINE(HAVE_POLL_H)) - -#-------------------------------------------------------------------- -# The statements below define a collection of symbols related to -# dynamic loading and shared libraries: -# -# SHLIB_CFLAGS - Flags to pass to cc when compiling the components -# of a shared library (may request position-independent -# code, among other things). -# SHLIB_LD - Base command to use for combining object files -# into a shared library. -# SHLIB_SUFFIX - Suffix to use for the names of dynamically loadable -# extensions. An empty string means we don't know how -# to use shared libraries on this platform. -#-------------------------------------------------------------------- - -# Step 1: set the variable "system" to hold the name and version number -# for the system. - -AC_MSG_CHECKING([system version (for dynamic loading)]) -system=`./config.sub $host` -AC_MSG_RESULT($system) - -# Step 2: check for existence of -ldl library. This is needed because -# Linux can use either -ldl or -ldld for dynamic loading. - -AC_CHECK_LIB(dl, dlopen, have_dl=yes, have_dl=no) - -# Step 3: set configuration options based on system name and version. - -SHLIB_LDLIBS= -fullSrcDir=`cd $srcdir; pwd` -case $system in - *-linux-*) - SHLIB_CFLAGS="-fPIC" - SHLIB_SUFFIX=".so" - if test "$have_dl" = yes; then - SHLIB_LD="${CC}" - SHLIB_LDFLAGS="$LDFLAGS -shared" - LD_FLAGS="-rdynamic" - else - AC_CHECK_HEADER(dld.h, [ - SHLIB_LD="ld" - SHLIB_LDFLAGS="-shared"]) - if test X${enable_m64_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers) - fi - if test X${enable_m32_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers) - fi - fi - SHLIB_EXTRACT_ALL="" - ;; - *-openbsd*) - # Not available on all versions: check for include file. - AC_CHECK_HEADER(dlfcn.h, [ - SHLIB_CFLAGS="-fpic" - SHLIB_LD="${CC}" - SHLIB_LDFLAGS="$LDFLAGS -shared" - SHLIB_SUFFIX=".so" - if test X${enable_m64_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers) - fi - if test X${enable_m32_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers) - fi - ], [ - # No dynamic loading. - SHLIB_CFLAGS="" - SHLIB_LD="ld" - SHLIB_LDFLAGS="" - SHLIB_SUFFIX="" - AC_MSG_ERROR(don't know how to compile and link dynamic drivers) - ]) - SHLIB_EXTRACT_ALL="" - ;; - *-netbsd*|*-freebsd*|*-dragonfly*) - # Not available on all versions: check for include file. - AC_CHECK_HEADER(dlfcn.h, [ - SHLIB_CFLAGS="-fpic" - SHLIB_LD="ld" - SHLIB_LDFLAGS="$LDFLAGS -Bshareable -x" - SHLIB_SUFFIX=".so" - if test X${enable_m64_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers) - fi - if test X${enable_m32_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers) - fi - ], [ - # No dynamic loading. - SHLIB_CFLAGS="" - SHLIB_LD="ld" - SHLIB_LDFLAGS="" - SHLIB_SUFFIX="" - AC_MSG_ERROR(don't know how to compile and link dynamic drivers) - ]) - SHLIB_EXTRACT_ALL="" - ;; - *-solaris2*|*-sysv4*) - SHLIB_CFLAGS="-KPIC" - SHLIB_LD="/usr/ccs/bin/ld" - SHLIB_LDFLAGS="$no_mXX_LDFLAGS -G -z text" - if test X${enable_m64_build} = Xyes; then - SHLIB_LDFLAGS="-64 $SHLIB_LDFLAGS" - fi - if test X${enable_m32_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers) - fi - SHLIB_SUFFIX=".so" - SHLIB_EXTRACT_ALL="-z allextract" - ;; - *darwin*) - SHLIB_CFLAGS="-fno-common" - SHLIB_LD="cc" - SHLIB_LDFLAGS="$LDFLAGS -bundle -flat_namespace -undefined suppress" - SHLIB_SUFFIX=".so" - SHLIB_EXTRACT_ALL="" - ;; - *osf1*) - SHLIB_CFLAGS="-fPIC" - SHLIB_LD="ld" - SHLIB_LDFLAGS="$LDFLAGS -shared" - if test X${enable_m64_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 64-bit dynamic drivers) - fi - if test X${enable_m32_build} = Xyes; then - AC_MSG_ERROR(don't know how to link 32-bit dynamic drivers) - fi - SHLIB_SUFFIX=".so" - SHLIB_EXTRACT_ALL="" - ;; - *osf5*) - SHLIB_CFLAGS="-fPIC" - SHLIB_LD="${CC} -shared" - SHLIB_LDFLAGS="$LDFLAGS" - SHLIB_SUFFIX=".so" - SHLIB_EXTRACT_ALL="" - ;; - *) - # No dynamic loading. - SHLIB_CFLAGS="" - SHLIB_LD="ld" - SHLIB_LDFLAGS="" - SHLIB_LDLIBS="" - SHLIB_SUFFIX="" - SHLIB_EXTRACT_ALL="" - AC_MSG_ERROR(don't know how to compile and link dynamic drivers) - ;; -esac - -# If we're running gcc, then change the C flags for compiling shared -# libraries to the right flags for gcc, instead of those for the -# standard manufacturer compiler. - -if test "$CC" = "gcc" -o `$CC -v 2>&1 | grep -c gcc` != "0" ; then - case $system in - *-aix) - ;; - *-bsd*) - ;; - *-irix) - ;; - *-netbsd|*-freebsd|*-openbsd) - ;; - *-riscos) - ;; - *ultrix4.*) - ;; - *darwin*) - ;; - *) - SHLIB_CFLAGS="-fPIC" - ;; - esac -fi - -# Make it possible for erl_interface to use it's own compiler options -EI_CFLAGS="$CFLAGS" - -# Add thread-safety flags if requested -AC_ARG_ENABLE(shlib-thread-safety, -[ --enable-shlib-thread-safety enable thread safety for build shared libraries], -[ case "$enableval" in - no) ;; - *) SHLIB_CFLAGS="$SHLIB_CFLAGS -D_THREAD_SAFE -D_REENTRANT" - CFLAGS="$CFLAGS -D_THREAD_SAFE -D_REENTRANT" - ;; - esac ], ) - -SHLIB_CFLAGS="$SHLIB_CFLAGS $CFLAGS" - - -AC_SUBST(CFLAGS) -AC_SUBST(SHLIB_LD) -AC_SUBST(SHLIB_LDFLAGS) -AC_SUBST(SHLIB_LDLIBS) -AC_SUBST(SHLIB_CFLAGS) -AC_SUBST(SHLIB_SUFFIX) -AC_SUBST(SHLIB_EXTRACT_ALL) -AC_SUBST(EI_CFLAGS) - -#-------------------------------------------------------------------- -# Check for the existence of the -lsocket and -lnsl libraries. -# The order here is important, so that they end up in the right -# order in the command line generated by make. Here are some -# special considerations: -# 1. Use "connect" and "accept" to check for -lsocket, and -# "gethostbyname" to check for -lnsl. -# 2. Use each function name only once: can't redo a check because -# autoconf caches the results of the last check and won't redo it. -# 3. Use -lnsl and -lsocket only if they supply procedures that -# aren't already present in the normal libraries. This is because -# IRIX 5.2 has libraries, but they aren't needed and they're -# bogus: they goof up name resolution if used. -# 4. On some SVR4 systems, can't use -lsocket without -lnsl too. -# To get around this problem, check for both libraries together -# if -lsocket doesn't work by itself. -#-------------------------------------------------------------------- - -erl_checkBoth=0 -AC_CHECK_FUNC(connect, erl_checkSocket=0, erl_checkSocket=1) -if test "$erl_checkSocket" = 1; then - AC_CHECK_LIB(socket, main, LIBS="$LIBS -lsocket", erl_checkBoth=1) -fi -if test "$erl_checkBoth" = 1; then - tk_oldLibs=$LIBS - LIBS="$LIBS -lsocket -lnsl" - AC_CHECK_FUNC(accept, erl_checkNsl=0, [LIBS=$tk_oldLibs]) -fi -AC_CHECK_FUNC(gethostbyname, , AC_CHECK_LIB(nsl, main, [LIBS="$LIBS -lnsl"])) - -dnl Checks for library functions. -AC_CHECK_FUNCS(strerror) -AC_CHECK_FUNCS(vsnprintf) -AC_CHECK_FUNCS(usleep) - -# First check if the library is available, then if we can choose between -# two versions of gethostbyname -AC_HAVE_LIBRARY(resolv) -AC_CHECK_LIB(resolv, res_gethostbyname,[AC_DEFINE(HAVE_RES_GETHOSTBYNAME,1)]) - -#-------------------------------------------------------------------- -# Check for isfinite -#-------------------------------------------------------------------- - -AC_MSG_CHECKING([for isfinite]) -AC_TRY_LINK([#include <math.h>], - [isfinite(0);], have_isfinite=yes, have_isfinite=no) - -if test $have_isfinite = yes; then - AC_DEFINE(HAVE_ISFINITE,1) - AC_MSG_RESULT(yes) -else - AC_DEFINE(HAVE_FINITE,1) - AC_MSG_RESULT(no) -fi - -#-------------------------------------------------------------------- -# Emulator compatible flags (for drivers) -#-------------------------------------------------------------------- - -ERTS_CFLAGS=$CFLAGS -AC_SUBST(ERTS_CFLAGS) - -ERTS_LIBS=$LIBS -AC_SUBST(ERTS_LIBS) - -#-------------------------------------------------------------------- -# Special compiler macro to handle cross compiling -# (HCC) is used to compile tools run in the HOST environment -#-------------------------------------------------------------------- -HCC='$(CC)' -AC_SUBST(HCC) - -#-------------------------------------------------------------------- -# ld is used for linking on vxworks -#-------------------------------------------------------------------- -LD='$(CC) $(CFLAGS)' -AC_SUBST(LD) - -#-------------------------------------------------------------------- -# object file suffix -#-------------------------------------------------------------------- -obj='.o' -AC_SUBST(obj) - -#-------------------------------------------------------------------- -# executable file suffix -#-------------------------------------------------------------------- -exe='' -AC_SUBST(exe) - -#-------------------------------------------------------------------- -# flags when linking for cross platform targets (yet 'tis useful with -# native builds) -#-------------------------------------------------------------------- -CROSSLDFLAGS='' -AC_SUBST(CROSSLDFLAGS) - -dnl -dnl SSL and CRYPTO needs the library openSSL/ssleay -dnl -dnl Check flags --with-ssl, --without-ssl --with-ssl=PATH. -dnl If no option is given or --with-ssl is set without a path then we -dnl search for SSL libraries and header files in the standard locations. -dnl If set to --without-ssl we disable the use of SSL -dnl If set to --with-ssl=PATH we use that path as the prefix, i.e. we -dnl use "PATH/include" and "PATH/lib". - -AC_SUBST(SSLEAY_ROOT) -TARGET=$host - -# We search for SSL. First in the OTP team ClearCase standard location, -# then in the common OS standard locations -# No we do not. -SSL_APP=ssl -CRYPTO_APP=crypto -SSLEAY_ROOT=$TARGET -#for dir in /usr /usr/pkg /usr/local /usr/local/ssl /usr/lib/ssl /usr/ssl; do -# AC_CHECK_HEADER($dir/include/openssl/opensslv.h, -# ac_cv_openssl=yes, ac_cv_openssl=no) -# if test $ac_cv_openssl = yes ; then -# SSLEAY_ROOT="$dir" -# ssl_found=yes -# break -# fi -#done - -# Find a usable java compiler -# -# WARNING this code is copied from ERTS configure.in, and should be -# updated if that code changes. I hate duplicating code, but what -# can I do. -# -dnl ERL_TRY_LINK_JAVA(CLASSES, FUNCTION-BODY -dnl [ACTION_IF_FOUND [, ACTION-IF-NOT-FOUND]]) -dnl Freely inspired by AC_TRY_LINK. (Maybe better to create a -dnl AC_LANG_JAVA instead...) -AC_DEFUN(ERL_TRY_LINK_JAVA, -[java_link='$JAVAC conftest.java 1>&AC_FD_CC' -changequote(�, �)dnl -cat > conftest.java <<EOF -�$1� -class conftest { public static void main(String[] args) { - �$2� - ; return; }} -EOF -changequote([, ])dnl -if AC_TRY_EVAL(java_link) && test -s conftest.class; then - ifelse([$3], , :, [rm -rf conftest* - $3]) -else - echo "configure: failed program was:" 1>&AC_FD_CC - cat conftest.java 1>&AC_FD_CC - echo "configure: PATH was $PATH" 1>&AC_FD_CC -ifelse([$4], , , [ rm -rf conftest* - $4 -])dnl -fi -rm -f conftest*]) -dnl -AC_CHECK_PROGS(JAVAC, javac guavac gcj jikes bock) -if test -n "$JAVAC"; then - dnl Make sure it's at least JDK 1.5 - AC_CACHE_CHECK(for JDK version 1.5, - ac_cv_prog_javac_ver_1_5, - [ERL_TRY_LINK_JAVA([], [for (String i : args);], - ac_cv_prog_javac_ver_1_5=yes, ac_cv_prog_javac_ver_1_5=no)]) - if test $ac_cv_prog_javac_ver_1_5 = no; then - unset -v JAVAC - fi -fi -if test -n "$JAVAC"; then - AC_SUBST(JAVAC) - : -fi - -AC_CHECK_PROGS([make_command], [make gmake], [false]) -AC_SUBST(make_command) - -if test "$GCC" = yes; then - test_c_compiler="{gnuc, undefined}" -else - test_c_compiler="undefined" -fi -AC_SUBST(test_c_compiler) - -AC_OUTPUT(conf_vars) diff --git a/lib/test_server/src/cross.cover b/lib/test_server/src/cross.cover deleted file mode 100644 index 07bf0bed5c..0000000000 --- a/lib/test_server/src/cross.cover +++ /dev/null @@ -1,20 +0,0 @@ -%%% This is an -*- erlang -*- file. -%%% -%%% Elements in this file shall be on the form -%%% {Application,Modules}. -%%% -%%% Application is the name of an application or the atom all. -%%% Modules is a list of module names -%%% -%%% The Application shall include the listed Modules in its cover compilation, -%%% but not in the cover analysis. -%%% If Application=all it means that all application shall include the listed -%%% Modules in the cover compilation. -%%% -%%% After all tests are completed, the listed modules are analysed with cover -%%% data from all tests and the result is stored under the application where -%%% the modules belong. - -{all,[]}. - -{observer,[dbg]}. diff --git a/lib/test_server/src/erl2html2.erl b/lib/test_server/src/erl2html2.erl deleted file mode 100644 index 2c63103264..0000000000 --- a/lib/test_server/src/erl2html2.erl +++ /dev/null @@ -1,302 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2015. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%%------------------------------------------------------------------ -%%% Purpose:Convert Erlang files to html. -%%%------------------------------------------------------------------ - --module(erl2html2). --export([convert/3, convert/4]). - -convert([], _Dest, _InclPath) -> % Fake clause. - ok; -convert(File, Dest, InclPath) -> - %% The generated code uses the BGCOLOR attribute in the - %% BODY tag, which wasn't valid until HTML 3.2. Also, - %% good HTML should either override all colour attributes - %% or none of them -- *never* just a few. - %% - %% FIXME: The colours should *really* be set with - %% stylesheets... - %% - %% The html file is written with the same encoding as the input file. - Encoding = encoding(File), - Header = ["<!DOCTYPE HTML PUBLIC " - "\"-//W3C//DTD HTML 3.2 Final//EN\">\n" - "<!-- autogenerated by '",atom_to_list(?MODULE),"'. -->\n" - "<html>\n" - "<head>\n" - "<meta http-equiv=\"Content-Type\" content=\"text/html;" - "charset=",html_encoding(Encoding),"\"/>\n" - "<title>", to_raw_list(File,Encoding), "</title>\n" - "</head>\n\n" - "<body bgcolor=\"white\" text=\"black\"" - " link=\"blue\" vlink=\"purple\" alink=\"red\">\n"], - convert(File, Dest, InclPath, Header). - - -convert(File, Dest, InclPath, Header) -> - %% statistics(runtime), - case parse_file(File, InclPath) of - {ok,Functions} -> - %% {_, Time1} = statistics(runtime), - %% io:format("Parsed file in ~.2f Seconds.~n",[Time1/1000]), - case file:open(File,[raw,{read_ahead,10000}]) of - {ok,SFd} -> - case file:open(Dest,[write,raw]) of - {ok,DFd} -> - file:write(DFd,[Header,"<pre>\n"]), - _Lines = build_html(SFd,DFd,encoding(File),Functions), - file:write(DFd,["</pre>\n",footer(), - "</body>\n</html>\n"]), - %% {_, Time2} = statistics(runtime), - %% io:format("Converted ~p lines in ~.2f Seconds.~n", - %% [_Lines, Time2/1000]), - file:close(SFd), - file:close(DFd), - ok; - Error -> - Error - end; - Error -> - Error - end; - Error -> - Error - end. - -%%%----------------------------------------------------------------- -%%% Parse the input file to get the line numbers for all function -%%% definitions. This will be used when creating link targets for each -%%% function in build_html/5. -%%% -%%% All function clauses are also marked in order to allow -%%% possibly_enhance/2 to write these in bold. -%%% -%%% Use expanded preprocessor directives if possible (epp). Only if -%%% this fails, fall back on using non-expanded code (epp_dodger). - -parse_file(File, InclPath) -> - case epp:open(File, InclPath, []) of - {ok,Epp} -> - try parse_preprocessed_file(Epp,File,false) of - Forms -> - epp:close(Epp), - {ok,Forms} - catch - _:{error,_Reason,true} -> - parse_non_preprocessed_file(File); - _:{error,_Reason,false} -> - {ok,[]} - end; - Error = {error,_} -> - Error - end. - -parse_preprocessed_file(Epp, File, InCorrectFile) -> - case epp:parse_erl_form(Epp) of - {ok,Form} -> - case Form of - {attribute,_,file,{File,_}} -> - parse_preprocessed_file(Epp, File, true); - {attribute,_,file,{_OtherFile,_}} -> - parse_preprocessed_file(Epp, File, false); - {function,L,F,A,Cs} when InCorrectFile -> - {CLs,LastCL} = find_clause_lines(Cs, []), - %% tl(CLs) cause we know the start line already - [{atom_to_list(F),A,get_line(L),LastCL} | tl(CLs)] ++ - parse_preprocessed_file(Epp, File, true); - _ -> - parse_preprocessed_file(Epp, File, InCorrectFile) - end; - {error,Reason={_L,epp,{undefined,_Macro,none}}} -> - throw({error,Reason,InCorrectFile}); - {error,_Reason} -> - parse_preprocessed_file(Epp, File, InCorrectFile); - {eof,_Location} -> - [] - end. - -parse_non_preprocessed_file(File) -> - case file:open(File, []) of - {ok,Epp} -> - Forms = parse_non_preprocessed_file(Epp, File, 1), - file:close(Epp), - {ok,Forms}; - Error = {error,_E} -> - Error - end. - -parse_non_preprocessed_file(Epp, File, Location) -> - case epp_dodger:parse_form(Epp, Location) of - {ok,Tree,Location1} -> - try erl_syntax:revert(Tree) of - {function,L,F,A,Cs} -> - {CLs,LastCL} = find_clause_lines(Cs, []), - %% tl(CLs) cause we know the start line already - [{atom_to_list(F),A,get_line(L),LastCL} | tl(CLs)] ++ - parse_non_preprocessed_file(Epp, File, Location1); - _ -> - parse_non_preprocessed_file(Epp, File, Location1) - catch - _:_ -> parse_non_preprocessed_file(Epp, File, Location1) - end; - {error,_E,Location1} -> - parse_non_preprocessed_file(Epp, File, Location1); - {eof,_Location} -> - [] - end. - -get_line(Anno) -> - erl_anno:line(Anno). - -%%%----------------------------------------------------------------- -%%% Find the line number of the last expression in the function -find_clause_lines([{clause,CL,_Params,_Op,Exprs}], CLs) -> % last clause - try tuple_to_list(lists:last(Exprs)) of - [_Type,ExprLine | _] when is_integer(ExprLine) -> - {lists:reverse([{clause,get_line(CL)}|CLs]), get_line(ExprLine)}; - [tree,_ | Exprs1] -> - find_clause_lines([{clause,CL,undefined,undefined,Exprs1}], CLs); - [macro,{_var,ExprLine,_MACRO} | _] when is_integer(ExprLine) -> - {lists:reverse([{clause,get_line(CL)}|CLs]), get_line(ExprLine)}; - _ -> - {lists:reverse([{clause,get_line(CL)}|CLs]), get_line(CL)} - catch - _:_ -> - {lists:reverse([{clause,get_line(CL)}|CLs]), get_line(CL)} - end; - -find_clause_lines([{clause,CL,_Params,_Op,_Exprs} | Cs], CLs) -> - find_clause_lines(Cs, [{clause,get_line(CL)}|CLs]). - -%%%----------------------------------------------------------------- -%%% Add a link target for each line and one for each function definition. -build_html(SFd,DFd,Encoding,FuncsAndCs) -> - build_html(SFd,DFd,Encoding,file:read_line(SFd),1,FuncsAndCs, - false,undefined). - -%% line of last expression in function found -build_html(SFd,DFd,Enc,{ok,Str},LastL,FuncsAndCs,_IsFuncDef,{F,LastL}) -> - LastLineLink = test_server_ctrl:uri_encode(F++"-last_expr",utf8), - file:write(DFd,["<a name=\"", - to_raw_list(LastLineLink,Enc),"\"/>"]), - build_html(SFd,DFd,Enc,{ok,Str},LastL,FuncsAndCs,true,undefined); -%% function start line found -build_html(SFd,DFd,Enc,{ok,Str},L0,[{F,A,L0,LastL}|FuncsAndCs], - _IsFuncDef,_FAndLastL) -> - FALink = test_server_ctrl:uri_encode(F++"-"++integer_to_list(A),utf8), - file:write(DFd,["<a name=\"",to_raw_list(FALink,Enc),"\"/>"]), - build_html(SFd,DFd,Enc,{ok,Str},L0,FuncsAndCs,true,{F,LastL}); -build_html(SFd,DFd,Enc,{ok,Str},L,[{clause,L}|FuncsAndCs], - _IsFuncDef,FAndLastL) -> - build_html(SFd,DFd,Enc,{ok,Str},L,FuncsAndCs,true,FAndLastL); -build_html(SFd,DFd,Enc,{ok,Str},L,FuncsAndCs,IsFuncDef,FAndLastL) -> - LStr = line_number(L), - Str1 = line(Str,IsFuncDef), - file:write(DFd,[LStr,Str1]), - build_html(SFd,DFd,Enc,file:read_line(SFd),L+1,FuncsAndCs,false,FAndLastL); -build_html(_SFd,_DFd,_Enc,eof,L,_FuncsAndCs,_IsFuncDef,_FAndLastL) -> - L. - -line_number(L) -> - LStr = integer_to_list(L), - Pred = - case length(LStr) of - Length when Length < 5 -> - lists:duplicate(5-Length,$\s); - _ -> - [] - end, - ["<a name=\"",LStr,"\"/>",Pred,LStr,": "]. - -line(Str,IsFuncDef) -> - Str1 = htmlize(Str), - possibly_enhance(Str1,IsFuncDef). - -%%%----------------------------------------------------------------- -%%% Substitute special characters that should not appear in HTML -htmlize([$<|Str]) -> - [$&,$l,$t,$;|htmlize(Str)]; -htmlize([$>|Str]) -> - [$&,$g,$t,$;|htmlize(Str)]; -htmlize([$&|Str]) -> - [$&,$a,$m,$p,$;|htmlize(Str)]; -htmlize([$"|Str]) -> - [$&,$q,$u,$o,$t,$;|htmlize(Str)]; -htmlize([Ch|Str]) -> - [Ch|htmlize(Str)]; -htmlize([]) -> - []. - -%%%----------------------------------------------------------------- -%%% Write comments in italic and function definitions in bold. -possibly_enhance(Str,true) -> - case lists:splitwith(fun($() -> false; (_) -> true end, Str) of - {_,[]} -> Str; - {F,A} -> ["<b>",F,"</b>",A] - end; -possibly_enhance([$%|_]=Str,_) -> - ["<i>",Str--"\n","</i>","\n"]; -possibly_enhance([$-|_]=Str,_) -> - possibly_enhance(Str,true); -possibly_enhance(Str,false) -> - Str. - -%%%----------------------------------------------------------------- -%%% End of the file -footer() -> - "". - -%%%----------------------------------------------------------------- -%%% Read encoding from source file -encoding(File) -> - case epp:read_encoding(File) of - none -> - epp:default_encoding(); - E -> - E - end. - -%%%----------------------------------------------------------------- -%%% Covert encoding atom to string for use in HTML header -html_encoding(latin1) -> - "iso-8859-1"; -html_encoding(utf8) -> - "utf-8". - -%%%----------------------------------------------------------------- -%%% Convert a string to a list of raw printable characters in the -%%% given encoding. This is necessary since the files (source and -%%% destination) are both opened in raw mode (default encoding). Byte -%%% by byte is read from source and written to the destination. This -%%% conversion is needed when printing data that is not first read -%%% from the source. -%%% -%%% Example: if the encoding of the file is utf8, and we have a string -%%% containing "å" = [229], then we need to convert this to [195,165] -%%% before writing. Note that this conversion is only necessary -%%% because the destination file is not (necessarily) opened with utf8 -%%% encoding - it is opened with default encoding in order to allow -%%% raw file mode and byte by byte copying from source. -to_raw_list(X,latin1) when is_list(X) -> - X; -to_raw_list(X,utf8) when is_list(X) -> - binary_to_list(unicode:characters_to_binary(X)). diff --git a/lib/test_server/src/test_server.app.src b/lib/test_server/src/test_server.app.src deleted file mode 100644 index 334be8109d..0000000000 --- a/lib/test_server/src/test_server.app.src +++ /dev/null @@ -1,39 +0,0 @@ -% This is an -*- erlang -*- file. -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2009-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% - -{application, test_server, - [{description, "The OTP Test Server application"}, - {vsn, "%VSN%"}, - {modules, [ - erl2html2, - test_server_ctrl, - test_server, - test_server_io, - test_server_node, - test_server_sup - ]}, - {registered, [test_server_ctrl, - test_server, - test_server_break_process]}, - {applications, [kernel,stdlib]}, - {env, []}, - {runtime_dependencies, ["tools-2.8","stdlib-2.5","runtime_tools-1.8.16", - "observer-2.1","kernel-4.0","inets-6.0", - "syntax_tools-1.7","erts-7.0"]}]}. - diff --git a/lib/test_server/src/test_server.appup.src b/lib/test_server/src/test_server.appup.src deleted file mode 100644 index 7c4aa630ae..0000000000 --- a/lib/test_server/src/test_server.appup.src +++ /dev/null @@ -1,22 +0,0 @@ -%% -*- erlang -*- -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2014. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -{"%VSN%", - [{<<".*">>,[{restart_application, test_server}]}], - [{<<".*">>,[{restart_application, test_server}]}] -}. diff --git a/lib/test_server/src/test_server.erl b/lib/test_server/src/test_server.erl deleted file mode 100644 index da6bf491ac..0000000000 --- a/lib/test_server/src/test_server.erl +++ /dev/null @@ -1,2655 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1996-2015. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% --module(test_server). - --define(DEFAULT_TIMETRAP_SECS, 60). - -%%% TEST_SERVER_CTRL INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([run_test_case_apply/1,init_target_info/0,init_purify/0]). --export([cover_compile/1,cover_analyse/2]). - -%%% TEST_SERVER_SUP INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([get_loc/1,set_tc_state/1]). - -%%% TEST SUITE INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([lookup_config/2]). --export([fail/0,fail/1,format/1,format/2,format/3]). --export([capture_start/0,capture_stop/0,capture_get/0]). --export([messages_get/0]). --export([permit_io/2]). --export([hours/1,minutes/1,seconds/1,sleep/1,adjusted_sleep/1,timecall/3]). --export([timetrap_scale_factor/0,timetrap/1,get_timetrap_info/0, - timetrap_cancel/1,timetrap_cancel/0]). --export([m_out_of_n/3,do_times/4,do_times/2]). --export([call_crash/3,call_crash/4,call_crash/5]). --export([temp_name/1]). --export([start_node/3, stop_node/1, wait_for_node/1, is_release_available/1]). --export([app_test/1, app_test/2, appup_test/1]). --export([is_native/1]). --export([comment/1, make_priv_dir/0]). --export([os_type/0]). --export([run_on_shielded_node/2]). --export([is_cover/0,is_debug/0,is_commercial/0]). - --export([break/1,break/2,break/3,continue/0,continue/1]). - -%%% DEBUGGER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([purify_new_leaks/0, purify_format/2, purify_new_fds_inuse/0, - purify_is_running/0]). - -%%% PRIVATE EXPORTED %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --include("test_server_internal.hrl"). --include_lib("kernel/include/file.hrl"). - -init_target_info() -> - [$.|Emu] = code:objfile_extension(), - {_, OTPRel} = init:script_id(), - TestServerDir = filename:absname(filename:dirname(code:which(?MODULE))), - #target_info{os_family=test_server_sup:get_os_family(), - os_type=os:type(), - version=erlang:system_info(version), - system_version=erlang:system_info(system_version), - root_dir=code:root_dir(), - test_server_dir=TestServerDir, - emulator=Emu, - otp_release=OTPRel, - username=test_server_sup:get_username(), - cookie=atom_to_list(erlang:get_cookie())}. - -init_purify() -> - purify_new_leaks(). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% cover_compile(#cover{app=App,incl=Include,excl=Exclude,cross=Cross}) -> -%% {ok,#cover{mods=AnalyseModules}} | {error,Reason} -%% -%% App = atom() , name of application to be compiled -%% Exclude = [atom()], list of modules to exclude -%% Include = [atom()], list of modules outside of App that should be included -%% in the cover compilation -%% Cross = [atoms()], list of modules outside of App shat should be included -%% in the cover compilation, but that shall not be part of -%% the cover analysis for this application. -%% AnalyseModules = [atom()], list of successfully compiled modules -%% -%% Cover compile the given application. Return {ok,CoverInfo} if -%% compilation succeeds, else (if application is not found and there -%% are no modules to compile) {error,application_not_found}. - -cover_compile(CoverInfo=#cover{app=none,incl=Include,cross=Cross}) -> - CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross), - CompileMods = Include++CrossMods, - case length(CompileMods) of - 0 -> - io:fwrite("WARNING: No modules to cover compile!\n\n",[]), - cover:start(), % start cover server anyway - {ok,CoverInfo#cover{mods=[]}}; - N -> - io:fwrite("Cover compiling ~w modules - " - "this may take some time... ",[N]), - do_cover_compile(CompileMods), - io:fwrite("done\n\n",[]), - {ok,CoverInfo#cover{mods=Include}} - end; -cover_compile(CoverInfo=#cover{app=App,excl=all,incl=Include,cross=Cross}) -> - CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross), - CompileMods = Include++CrossMods, - case length(CompileMods) of - 0 -> - io:fwrite("WARNING: No modules to cover compile!\n\n",[]), - cover:start(), % start cover server anyway - {ok,CoverInfo#cover{mods=[]}}; - N -> - io:fwrite("Cover compiling '~w' (~w files) - " - "this may take some time... ",[App,N]), - io:format("\nWARNING: All modules in \'~w\' are excluded\n" - "Only cover compiling modules in include list " - "and the modules\nin the cross cover file:\n" - "~tp\n", [App,CompileMods]), - do_cover_compile(CompileMods), - io:fwrite("done\n\n",[]), - {ok,CoverInfo#cover{mods=Include}} - end; -cover_compile(CoverInfo=#cover{app=App,excl=Exclude, - incl=Include,cross=Cross}) -> - CrossMods = lists:flatmap(fun({_,M}) -> M end,Cross), - case code:lib_dir(App) of - {error,bad_name} -> - case Include++CrossMods of - [] -> - io:format("\nWARNING: Can't find lib_dir for \'~w\'\n" - "Not cover compiling!\n\n",[App]), - {error,application_not_found}; - CompileMods -> - io:fwrite("Cover compiling '~w' (~w files) - " - "this may take some time... ", - [App,length(CompileMods)]), - io:format("\nWARNING: Can't find lib_dir for \'~w\'\n" - "Only cover compiling modules in include list: " - "~tp\n", [App,Include]), - do_cover_compile(CompileMods), - io:fwrite("done\n\n",[]), - {ok,CoverInfo#cover{mods=Include}} - end; - LibDir -> - EbinDir = filename:join([LibDir,"ebin"]), - WC = filename:join(EbinDir,"*.beam"), - AllMods = module_names(filelib:wildcard(WC)), - AnalyseMods = (AllMods ++ Include) -- Exclude, - CompileMods = AnalyseMods ++ CrossMods, - case length(CompileMods) of - 0 -> - io:fwrite("WARNING: No modules to cover compile!\n\n",[]), - cover:start(), % start cover server anyway - {ok,CoverInfo#cover{mods=[]}}; - N -> - io:fwrite("Cover compiling '~w' (~w files) - " - "this may take some time... ",[App,N]), - do_cover_compile(CompileMods), - io:fwrite("done\n\n",[]), - {ok,CoverInfo#cover{mods=AnalyseMods}} - end - end. - - -module_names(Beams) -> - [list_to_atom(filename:basename(filename:rootname(Beam))) || Beam <- Beams]. - - -do_cover_compile(Modules) -> - cover:start(), - Sticky = prepare_cover_compile(Modules,[]), - R = cover:compile_beam(Modules), - [warn_compile(Error) || Error <- R,element(1,Error)=/=ok], - [code:stick_mod(M) || M <- Sticky], - ok. - -warn_compile({error,{Reason,Module}}) -> - io:fwrite("\nWARNING: Could not cover compile ~ts: ~p\n", - [Module,{error,Reason}]). - -%% Make sure all modules are loaded and unstick if sticky -prepare_cover_compile([M|Ms],Sticky) -> - case {code:is_sticky(M),code:is_loaded(M)} of - {true,_} -> - code:unstick_mod(M), - prepare_cover_compile(Ms,[M|Sticky]); - {false,false} -> - case code:load_file(M) of - {module,_} -> - prepare_cover_compile([M|Ms],Sticky); - Error -> - io:fwrite("\nWARNING: Could not load ~w: ~p\n",[M,Error]), - prepare_cover_compile(Ms,Sticky) - end; - {false,_} -> - prepare_cover_compile(Ms,Sticky) - end; -prepare_cover_compile([],Sticky) -> - Sticky. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% cover_analyse(Dir,#cover{level=Analyse,mods=Modules,stop=Stop) -> -%% [{M,{Cov,NotCov,Details}}] -%% -%% Dir = string() -%% Analyse = details | overview -%% Modules = [atom()], the modules to analyse -%% -%% Cover analysis. If Analyse==details analyse_to_file is used. -%% -%% If Analyse==overview analyse_to_file is not used, only an overview -%% containing the number of covered/not covered lines in each module. -%% -%% Also, cover data will be exported to a file called all.coverdata in -%% the given directory. -%% -%% Finally, if Stop==true, then cover will be stopped after the -%% analysis is completed. Stopping cover causes the original (non -%% cover compiled) modules to be loaded back in. If a process at this -%% point is still running old code of any of the cover compiled -%% modules, meaning that is has not done any fully qualified function -%% call after the cover compilation, the process will now be -%% killed. To avoid this scenario, it is possible to set Stop=false, -%% which means that the modules will stay cover compiled. Note that -%% this is only recommended if the erlang node is being terminated -%% after the test is completed. -cover_analyse(Dir,#cover{level=Analyse,mods=Modules,stop=Stop}) -> - io:fwrite(user, "Cover analysing... ", []), - {ATFOk,ATFFail} = - case Analyse of - details -> - case cover:export(filename:join(Dir,"all.coverdata")) of - ok -> - {result,Ok1,Fail1} = - cover:analyse_to_file(Modules,[{outdir,Dir},html]), - {lists:map(fun(OutFile) -> - M = list_to_atom( - filename:basename( - filename:rootname(OutFile, - ".COVER.html") - ) - ), - {M,{file,OutFile}} - end, Ok1), - lists:map(fun({Reason,M}) -> - {M,{error,Reason}} - end, Fail1)}; - Error -> - {[],lists:map(fun(M) -> {M,Error} end, Modules)} - end; - overview -> - case cover:export(filename:join(Dir,"all.coverdata")) of - ok -> - {[],lists:map(fun(M) -> {M,undefined} end, Modules)}; - Error -> - {[],lists:map(fun(M) -> {M,Error} end, Modules)} - end - end, - {result,AOk,AFail} = cover:analyse(Modules,module), - R0 = merge_analysis_results(AOk,ATFOk++ATFFail,[]) ++ - [{M,{error,Reason}} || {Reason,M} <- AFail], - R = lists:sort(R0), - io:fwrite(user, "done\n\n", []), - - case Stop of - true -> - Sticky = unstick_all_sticky(node()), - cover:stop(), - stick_all_sticky(node(),Sticky); - false -> - ok - end, - R. - -merge_analysis_results([{M,{Cov,NotCov}}|T],ATF,Acc) -> - case lists:keytake(M,1,ATF) of - {value,{_,R},ATF1} -> - merge_analysis_results(T,ATF1,[{M,{Cov,NotCov,R}}|Acc]); - false -> - merge_analysis_results(T,ATF,Acc) - end; -merge_analysis_results([],_,Acc) -> - Acc. - -do_cover_for_node(Node,CoverFunc) -> - do_cover_for_node(Node,CoverFunc,true). -do_cover_for_node(Node,CoverFunc,StickUnstick) -> - %% In case a slave node is starting another slave node! I.e. this - %% function is executed on a slave node - then the cover function - %% must be executed on the master node. This is for instance the - %% case in test_server's own tests. - MainCoverNode = cover:get_main_node(), - Sticky = - if StickUnstick -> unstick_all_sticky(MainCoverNode,Node); - true -> ok - end, - rpc:call(MainCoverNode,cover,CoverFunc,[Node]), - if StickUnstick -> stick_all_sticky(Node,Sticky); - true -> ok - end. - -unstick_all_sticky(Node) -> - unstick_all_sticky(node(),Node). -unstick_all_sticky(MainCoverNode,Node) -> - lists:filter( - fun(M) -> - case code:is_sticky(M) of - true -> - rpc:call(Node,code,unstick_mod,[M]), - true; - false -> - false - end - end, - rpc:call(MainCoverNode,cover,modules,[])). - -stick_all_sticky(Node,Sticky) -> - lists:foreach( - fun(M) -> - rpc:call(Node,code,stick_mod,[M]) - end, - Sticky). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% run_test_case_apply(Mod,Func,Args,Name,RunInit,TimetrapData) -> -%% {Time,Value,Loc,Opts,Comment} | {died,Reason,unknown,Comment} -%% -%% Time = float() (seconds) -%% Value = term() -%% Loc = term() -%% Comment = string() -%% Reason = term() -%% -%% Spawns off a process (case process) that actually runs the test suite. -%% The case process will have the job process as group leader, which makes -%% it possible to capture all it's output from io:format/2, etc. -%% -%% The job process then sits down and waits for news from the case process. -%% -%% Returns a tuple with the time spent (in seconds) in the test case, -%% the return value from the test case or an {'EXIT',Reason} if the case -%% failed, Loc points out where the test case crashed (if it did). Loc -%% is either the name of the function, or {<Module>,<Line>} of the last -%% line executed that had a ?line macro. If the test case did execute -%% erase/0 or similar, it may be empty. Comment is the last comment added -%% by test_server:comment/1, the reason if test_server:fail has been -%% called or the comment given by the return value {comment,Comment} from -%% a test case. -%% -%% {died,Reason,unknown,Comment} is returned if the test case was killed -%% by some other process. Reason is the kill reason provided. -%% -%% TimetrapData = {MultiplyTimetrap,ScaleTimetrap}, which indicates a -%% possible extension of all timetraps. Timetraps will be multiplied by -%% MultiplyTimetrap. If it is infinity, no timetraps will be started at all. -%% ScaleTimetrap indicates if test_server should attemp to automatically -%% compensate timetraps for runtime delays introduced by e.g. tools like -%% cover. - -run_test_case_apply({CaseNum,Mod,Func,Args,Name, - RunInit,TimetrapData}) -> - purify_format("Test case #~w ~w:~w/1", [CaseNum, Mod, Func]), - case os:getenv("TS_RUN_VALGRIND") of - false -> - ok; - _ -> - os:putenv("VALGRIND_LOGFILE_INFIX",atom_to_list(Mod)++"."++ - atom_to_list(Func)++"-") - end, - ProcBef = erlang:system_info(process_count), - Result = run_test_case_apply(Mod, Func, Args, Name, RunInit, - TimetrapData), - ProcAft = erlang:system_info(process_count), - purify_new_leaks(), - DetFail = get(test_server_detected_fail), - {Result,DetFail,ProcBef,ProcAft}. - --type tc_status() :: 'starting' | 'running' | 'init_per_testcase' | - 'end_per_testcase' | {'framework',atom(),atom()} | - 'tc'. --record(st, - { - ref :: reference(), - pid :: pid(), - mf :: {atom(),atom()}, - last_known_loc :: term(), - status :: tc_status() | 'undefined', - ret_val :: term(), - comment :: list(char()), - timeout :: non_neg_integer() | 'infinity', - config :: list() | 'undefined', - end_conf_pid :: pid() | 'undefined' - }). - -run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) -> - print_timestamp(minor,"Started at "), - print(minor, "", [], internal_raw), - TCCallback = get(test_server_testcase_callback), - LogOpts = get(test_server_logopts), - Ref = make_ref(), - Pid = - spawn_link( - fun() -> - run_test_case_eval(Mod, Func, Args, Name, Ref, - RunInit, TimetrapData, - LogOpts, TCCallback) - end), - put(test_server_detected_fail, []), - St = #st{ref=Ref,pid=Pid,mf={Mod,Func},last_known_loc=unknown, - status=starting,ret_val=[],comment="",timeout=infinity, - config=hd(Args)}, - run_test_case_msgloop(St). - -%% Ugly bug (pre R5A): -%% If this process (group leader of the test case) terminates before -%% all messages have been replied back to the io server, the io server -%% hangs. Fixed by the 20 milli timeout check here, and by using monitor in -%% io.erl. -%% -%% A test case is known to have failed if it returns {'EXIT', _} tuple, -%% or sends a message {failed, File, Line} to it's group_leader -%% -run_test_case_msgloop(#st{ref=Ref,pid=Pid,end_conf_pid=EndConfPid0}=St0) -> - receive - {set_tc_state=Tag,From,{Status,Config0}} -> - Config = case Config0 of - unknown -> St0#st.config; - _ -> Config0 - end, - St = St0#st{status=Status,config=Config}, - From ! {self(),Tag,ok}, - run_test_case_msgloop(St); - {abort_current_testcase,_,_}=Abort when St0#st.status =:= starting -> - %% we're in init phase, must must postpone this operation - %% until test case execution is in progress (or FW:init_tc - %% gets killed) - self() ! Abort, - erlang:yield(), - run_test_case_msgloop(St0); - {abort_current_testcase,Reason,From} -> - Line = case is_process_alive(Pid) of - true -> get_loc(Pid); - false -> unknown - end, - Mon = erlang:monitor(process, Pid), - exit(Pid,{testcase_aborted,Reason,Line}), - erlang:yield(), - From ! {self(),abort_current_testcase,ok}, - St = receive - {'DOWN', Mon, process, Pid, _} -> - St0 - after 10000 -> - %% Pid is probably trapping exits, hit it harder... - exit(Pid, kill), - %% here's the only place we know Reason, so we save - %% it as a comment, potentially replacing user data - Error = lists:flatten(io_lib:format("Aborted: ~p", - [Reason])), - Error1 = lists:flatten([string:strip(S,left) || - S <- string:tokens(Error, - [$\n])]), - Comment = if length(Error1) > 63 -> - string:substr(Error1,1,60) ++ "..."; - true -> - Error1 - end, - St0#st{comment=Comment} - end, - run_test_case_msgloop(St); - {sync_apply,From,MFA} -> - do_sync_apply(false,From,MFA), - run_test_case_msgloop(St0); - {sync_apply_proxy,Proxy,From,MFA} -> - do_sync_apply(Proxy,From,MFA), - run_test_case_msgloop(St0); - {comment,NewComment0} -> - NewComment1 = test_server_ctrl:to_string(NewComment0), - NewComment = test_server_sup:framework_call(format_comment, - [NewComment1], - NewComment1), - run_test_case_msgloop(St0#st{comment=NewComment}); - {read_comment,From} -> - From ! {self(),read_comment,St0#st.comment}, - run_test_case_msgloop(St0); - {make_priv_dir,From} -> - Config = case St0#st.config of - undefined -> []; - Config0 -> Config0 - end, - Result = - case proplists:get_value(priv_dir, Config) of - undefined -> - {error,no_priv_dir_in_config}; - PrivDir -> - case file:make_dir(PrivDir) of - ok -> - ok; - {error, eexist} -> - ok; - MkDirError -> - {error,{MkDirError,PrivDir}} - end - end, - From ! {self(),make_priv_dir,Result}, - run_test_case_msgloop(St0); - {'EXIT',Pid,{Ref,Time,Value,Loc,Opts}} -> - RetVal = {Time/1000000,Value,Loc,Opts}, - St = setup_termination(RetVal, St0#st{config=undefined}), - run_test_case_msgloop(St); - {'EXIT',Pid,Reason} -> - %% This exit typically happens when an unknown external process - %% has caused a test case process to terminate (e.g. if a linked - %% process has crashed). - St = - case Reason of - {What,[Loc0={_M,_F,A,[{file,_}|_]}|_]} when - is_integer(A) -> - Loc = rewrite_loc_item(Loc0), - handle_tc_exit(What, St0#st{last_known_loc=[Loc]}); - {What,[Details,Loc0={_M,_F,A,[{file,_}|_]}|_]} when - is_integer(A) -> - Loc = rewrite_loc_item(Loc0), - handle_tc_exit({What,Details}, St0#st{last_known_loc=[Loc]}); - _ -> - handle_tc_exit(Reason, St0) - end, - run_test_case_msgloop(St); - {EndConfPid0,{call_end_conf,Data,_Result}} -> - #st{mf={Mod,Func},config=CurrConf} = St0, - case CurrConf of - _ when is_list(CurrConf) -> - {_Mod,_Func,TCPid,TCExitReason,Loc} = Data, - spawn_fw_call(Mod,Func,CurrConf,TCPid, - TCExitReason,Loc,self()), - St = St0#st{config=undefined,end_conf_pid=undefined}, - run_test_case_msgloop(St); - _ -> - run_test_case_msgloop(St0) - end; - {_FwCallPid,fw_notify_done,{T,Value,Loc,Opts,AddToComment}} -> - %% the framework has been notified, we're finished - RetVal = {T,Value,Loc,Opts}, - Comment0 = St0#st.comment, - Comment = case AddToComment of - undefined -> - Comment0; - _ -> - if Comment0 =:= "" -> - AddToComment; - true -> - Comment0 ++ - test_server_ctrl:xhtml("<br>", - "<br />") ++ - AddToComment - end - end, - St = setup_termination(RetVal, St0#st{comment=Comment, - config=undefined}), - run_test_case_msgloop(St); - {'EXIT',_FwCallPid,{fw_notify_done,Func,Error}} -> - %% a framework function failed - CB = os:getenv("TEST_SERVER_FRAMEWORK"), - Loc = case CB of - FW when FW =:= false; FW =:= "undefined" -> - [{test_server,Func}]; - _ -> - [{list_to_atom(CB),Func}] - end, - RetVal = {died,{framework_error,Loc,Error},Loc}, - St = setup_termination(RetVal, St0#st{comment="Framework error", - config=undefined}), - run_test_case_msgloop(St); - {failed,File,Line} -> - put(test_server_detected_fail, - [{File, Line}| get(test_server_detected_fail)]), - run_test_case_msgloop(St0); - - {user_timetrap,Pid,_TrapTime,StartTime,E={user_timetrap_error,_},_} -> - case update_user_timetraps(Pid, StartTime) of - proceed -> - self() ! {abort_current_testcase,E,Pid}; - ignore -> - ok - end, - run_test_case_msgloop(St0); - {user_timetrap,Pid,TrapTime,StartTime,ElapsedTime,Scale} -> - %% a user timetrap is triggered, ignore it if new - %% timetrap has been started since - case update_user_timetraps(Pid, StartTime) of - proceed -> - TotalTime = if is_integer(TrapTime) -> - TrapTime + ElapsedTime; - true -> - TrapTime - end, - timetrap(TrapTime, TotalTime, Pid, Scale); - ignore -> - ok - end, - run_test_case_msgloop(St0); - {timetrap_cancel_one,Handle,_From} -> - timetrap_cancel_one(Handle, false), - run_test_case_msgloop(St0); - {timetrap_cancel_all,TCPid,_From} -> - timetrap_cancel_all(TCPid, false), - run_test_case_msgloop(St0); - {get_timetrap_info,From,TCPid} -> - Info = get_timetrap_info(TCPid, false), - From ! {self(),get_timetrap_info,Info}, - run_test_case_msgloop(St0); - _Other when not is_tuple(_Other) -> - %% ignore anything not generated by test server - run_test_case_msgloop(St0); - _Other when element(1, _Other) /= 'EXIT', - element(1, _Other) /= started, - element(1, _Other) /= finished, - element(1, _Other) /= print -> - %% ignore anything not generated by test server - run_test_case_msgloop(St0) - after St0#st.timeout -> - #st{ret_val=RetVal,comment=Comment} = St0, - erlang:append_element(RetVal, Comment) - end. - -setup_termination(RetVal, #st{pid=Pid}=St) -> - timetrap_cancel_all(Pid, false), - St#st{ret_val=RetVal,timeout=20}. - -set_tc_state(State) -> - set_tc_state(State,unknown). -set_tc_state(State, Config) -> - tc_supervisor_req(set_tc_state, {State,Config}). - -handle_tc_exit(killed, St) -> - %% probably the result of an exit(TestCase,kill) call, which is the - %% only way to abort a testcase process that traps exits - %% (see abort_current_testcase). - #st{config=Config,mf={Mod,Func},pid=Pid} = St, - Msg = testcase_aborted_or_killed, - spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()), - St; -handle_tc_exit({testcase_aborted,{user_timetrap_error,_}=Msg,_}, St) -> - #st{config=Config,mf={Mod,Func},pid=Pid} = St, - spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()), - St; -handle_tc_exit(Reason, #st{status={framework,FwMod,FwFunc}, - config=Config,pid=Pid}=St) -> - R = case Reason of - {timetrap_timeout,TVal,_} -> - {timetrap,TVal}; - {testcase_aborted=E,AbortReason,_} -> - {E,AbortReason}; - {fw_error,{FwMod,FwFunc,FwError}} -> - FwError; - Other -> - Other - end, - Error = {framework_error,R}, - spawn_fw_call(FwMod, FwFunc, Config, Pid, Error, unknown, self()), - St; -handle_tc_exit(Reason, #st{status=tc,config=Config0,mf={Mod,Func},pid=Pid}=St) - when is_list(Config0) -> - {R,Loc1,F} = case Reason of - {timetrap_timeout=E,TVal,Loc0} -> - {{E,TVal},Loc0,E}; - {testcase_aborted=E,AbortReason,Loc0} -> - Msg = {E,AbortReason}, - {Msg,Loc0,Msg}; - Other -> - {{'EXIT',Other},unknown,Other} - end, - Timeout = end_conf_timeout(Reason, St), - Config = [{tc_status,{failed,F}}|Config0], - EndConfPid = call_end_conf(Mod, Func, Pid, R, Loc1, Config, Timeout), - St#st{end_conf_pid=EndConfPid}; -handle_tc_exit(Reason, #st{config=Config,mf={Mod,Func0},pid=Pid, - status=Status}=St) -> - {R,Loc1} = case Reason of - {timetrap_timeout=E,TVal,Loc0} -> - {{E,TVal},Loc0}; - {testcase_aborted=E,AbortReason,Loc0} -> - {{E,AbortReason},Loc0}; - Other -> - {{'EXIT',Other},St#st.last_known_loc} - end, - Func = case Status of - init_per_testcase=F -> {F,Func0}; - end_per_testcase=F -> {F,Func0}; - _ -> Func0 - end, - spawn_fw_call(Mod, Func, Config, Pid, R, Loc1, self()), - St. - -end_conf_timeout({timetrap_timeout,Timeout,_}, _) -> - Timeout; -end_conf_timeout(_, #st{config=Config}) when is_list(Config) -> - proplists:get_value(default_timeout, Config, ?DEFAULT_TIMETRAP_SECS*1000); -end_conf_timeout(_, _) -> - ?DEFAULT_TIMETRAP_SECS*1000. - -call_end_conf(Mod,Func,TCPid,TCExitReason,Loc,Conf,TVal) -> - Starter = self(), - Data = {Mod,Func,TCPid,TCExitReason,Loc}, - case erlang:function_exported(Mod,end_per_testcase,2) of - false -> - spawn_link(fun() -> - Starter ! {self(),{call_end_conf,Data,ok}} - end); - true -> - do_call_end_conf(Starter,Mod,Func,Data,Conf,TVal) - end. - -do_call_end_conf(Starter,Mod,Func,Data,Conf,TVal) -> - EndConfProc = - fun() -> - process_flag(trap_exit,true), % to catch timetraps - Supervisor = self(), - EndConfApply = - fun() -> - timetrap(TVal), - try apply(Mod,end_per_testcase,[Func,Conf]) of - _ -> ok - catch - _:Why -> - timer:sleep(1), - group_leader() ! {printout,12, - "WARNING! " - "~w:end_per_testcase(~w, ~p)" - " crashed!\n\tReason: ~p\n", - [Mod,Func,Conf,Why]} - end, - Supervisor ! {self(),end_conf} - end, - Pid = spawn_link(EndConfApply), - receive - {Pid,end_conf} -> - Starter ! {self(),{call_end_conf,Data,ok}}; - {'EXIT',Pid,Reason} -> - group_leader() ! {printout,12, - "WARNING! ~w:end_per_testcase(~w, ~p)" - " failed!\n\tReason: ~p\n", - [Mod,Func,Conf,Reason]}, - Starter ! {self(),{call_end_conf,Data,{error,Reason}}}; - {'EXIT',_OtherPid,Reason} -> - %% Probably the parent - not much to do about that - exit(Reason) - end - end, - spawn_link(EndConfProc). - -spawn_fw_call(Mod,{init_per_testcase,Func},CurrConf,Pid, - {timetrap_timeout,TVal}=Why, - Loc,SendTo) -> - FwCall = - fun() -> - Skip = {skip,{failed,{Mod,init_per_testcase,Why}}}, - %% if init_per_testcase fails, the test case - %% should be skipped - try do_end_tc_call(Mod,Func, {Pid,Skip,[CurrConf]}, Why) of - _ -> ok - catch - _:FwEndTCErr -> - exit({fw_notify_done,end_tc,FwEndTCErr}) - end, - %% finished, report back - SendTo ! {self(),fw_notify_done, - {TVal/1000,Skip,Loc,[],undefined}} - end, - spawn_link(FwCall); - -spawn_fw_call(Mod,{end_per_testcase,Func},EndConf,Pid, - {timetrap_timeout,TVal}=Why,_Loc,SendTo) -> - FwCall = - fun() -> - {RetVal,Report} = - case proplists:get_value(tc_status, EndConf) of - undefined -> - E = {failed,{Mod,end_per_testcase,Why}}, - {E,E}; - E = {failed,Reason} -> - {E,{error,Reason}}; - Result -> - E = {failed,{Mod,end_per_testcase,Why}}, - {Result,E} - end, - group_leader() ! {printout,12, - "WARNING! ~w:end_per_testcase(~w, ~p)" - " failed!\n\tReason: timetrap timeout" - " after ~w ms!\n", [Mod,Func,EndConf,TVal]}, - FailLoc = proplists:get_value(tc_fail_loc, EndConf), - try do_end_tc_call(Mod,Func, - {Pid,Report,[EndConf]}, Why) of - _ -> ok - catch - _:FwEndTCErr -> - exit({fw_notify_done,end_tc,FwEndTCErr}) - end, - Warn = "<font color=\"red\">" - "WARNING: end_per_testcase timed out!</font>", - %% finished, report back (if end_per_testcase fails, a warning - %% should be printed as part of the comment) - SendTo ! {self(),fw_notify_done, - {TVal/1000,RetVal,FailLoc,[],Warn}} - end, - spawn_link(FwCall); - -spawn_fw_call(FwMod,FwFunc,_,_Pid,{framework_error,FwError},_,SendTo) -> - FwCall = - fun() -> - test_server_sup:framework_call(report, [framework_error, - {{FwMod,FwFunc}, - FwError}]), - Comment = - lists:flatten( - io_lib:format("<font color=\"red\">" - "WARNING! ~w:~w failed!</font>", - [FwMod,FwFunc])), - %% finished, report back - SendTo ! {self(),fw_notify_done, - {died,{error,{FwMod,FwFunc,FwError}}, - {FwMod,FwFunc},[],Comment}} - end, - spawn_link(FwCall); - -spawn_fw_call(Mod,Func,CurrConf,Pid,Error,Loc,SendTo) -> - Func1 = case Func of - {_InitOrEndPerTC,F} -> F; - F -> F - end, - FwCall = - fun() -> - try fw_error_notify(Mod,Func1,[], - Error,Loc) of - _ -> ok - catch - _:FwErrorNotifyErr -> - exit({fw_notify_done,error_notification, - FwErrorNotifyErr}) - end, - Conf = [{tc_status,{failed,Error}}|CurrConf], - try do_end_tc_call(Mod,Func1, - {Pid,Error,[Conf]},Error) of - _ -> ok - catch - _:FwEndTCErr -> - exit({fw_notify_done,end_tc,FwEndTCErr}) - end, - %% finished, report back - SendTo ! {self(),fw_notify_done,{died,Error,Loc,[],undefined}} - end, - spawn_link(FwCall). - -%% The job proxy process forwards messages between the test case -%% process on a shielded node (and its descendants) and the job process. -%% -%% The job proxy process have to be started by the test-case process -%% on the shielded node! -start_job_proxy() -> - group_leader(spawn(fun () -> job_proxy_msgloop() end), self()), ok. - -%% The io_reply_proxy is not the most satisfying solution but it works... -io_reply_proxy(ReplyTo) -> - receive - IoReply when is_tuple(IoReply), - element(1, IoReply) == io_reply -> - ReplyTo ! IoReply; - _ -> - io_reply_proxy(ReplyTo) - end. - -job_proxy_msgloop() -> - receive - - %% - %% Messages that need intervention by proxy... - %% - - %% io stuff ... - IoReq when tuple_size(IoReq) >= 2, - element(1, IoReq) == io_request -> - - ReplyProxy = spawn(fun () -> io_reply_proxy(element(2, IoReq)) end), - group_leader() ! setelement(2, IoReq, ReplyProxy); - - %% test_server stuff... - {sync_apply, From, MFA} -> - group_leader() ! {sync_apply_proxy, self(), From, MFA}; - {sync_result_proxy, To, Result} -> - To ! {sync_result, Result}; - - %% - %% Messages that need no intervention by proxy... - %% - Msg -> - group_leader() ! Msg - end, - job_proxy_msgloop(). - -%% A test case is known to have failed if it returns {'EXIT', _} tuple, -%% or sends a message {failed, File, Line} to it's group_leader - -run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit, - TimetrapData, LogOpts, TCCallback) -> - put(test_server_multiply_timetraps, TimetrapData), - put(test_server_logopts, LogOpts), - Where = [{Mod,Func}], - put(test_server_loc, Where), - - FWInitResult = test_server_sup:framework_call(init_tc,[Mod,Func,Args0], - {ok,Args0}), - set_tc_state(running), - {{Time,Value},Loc,Opts} = - case FWInitResult of - {ok,Args} -> - run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback); - Error = {error,_Reason} -> - NewResult = do_end_tc_call(Mod,Func, {Error,Args0}, - {auto_skip,{failed,Error}}), - {{0,NewResult},Where,[]}; - {fail,Reason} -> - Conf = [{tc_status,{failed,Reason}} | hd(Args0)], - fw_error_notify(Mod, Func, Conf, Reason), - NewResult = do_end_tc_call(Mod,Func, {{error,Reason},[Conf]}, - {fail,Reason}), - {{0,NewResult},Where,[]}; - Skip = {SkipType,_Reason} when SkipType == skip; - SkipType == skipped -> - NewResult = do_end_tc_call(Mod,Func, - {Skip,Args0}, Skip), - {{0,NewResult},Where,[]}; - AutoSkip = {auto_skip,_Reason} -> - %% special case where a conf case "pretends" to be skipped - NewResult = - do_end_tc_call(Mod,Func, {AutoSkip,Args0}, AutoSkip), - {{0,NewResult},Where,[]} - end, - exit({Ref,Time,Value,Loc,Opts}). - -run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) -> - case RunInit of - run_init -> - set_tc_state(init_per_testcase, hd(Args)), - ensure_timetrap(Args), - case init_per_testcase(Mod, Func, Args) of - Skip = {SkipType,Reason} when SkipType == skip; - SkipType == skipped -> - Line = get_loc(), - Conf = [{tc_status,{skipped,Reason}}|hd(Args)], - NewRes = do_end_tc_call(Mod,Func, - {Skip,[Conf]}, Skip), - {{0,NewRes},Line,[]}; - {skip_and_save,Reason,SaveCfg} -> - Line = get_loc(), - Conf = [{tc_status,{skipped,Reason}}, - {save_config,SaveCfg}|hd(Args)], - NewRes = do_end_tc_call(Mod,Func, {{skip,Reason},[Conf]}, - {skip,Reason}), - {{0,NewRes},Line,[]}; - FailTC = {fail,Reason} -> % user fails the testcase - EndConf = [{tc_status,{failed,Reason}} | hd(Args)], - fw_error_notify(Mod, Func, EndConf, Reason), - NewRes = do_end_tc_call(Mod,Func, - {{error,Reason},[EndConf]}, - FailTC), - {{0,NewRes},[{Mod,Func}],[]}; - {ok,NewConf} -> - %% call user callback function if defined - NewConf1 = - user_callback(TCCallback, Mod, Func, init, NewConf), - %% save current state in controller loop - set_tc_state(tc, NewConf1), - %% execute the test case - {{T,Return},Loc} = {ts_tc(Mod,Func,[NewConf1]), get_loc()}, - {EndConf,TSReturn,FWReturn} = - case Return of - {E,TCError} when E=='EXIT' ; E==failed -> - fw_error_notify(Mod, Func, NewConf1, - TCError, Loc), - {[{tc_status,{failed,TCError}}, - {tc_fail_loc,Loc}|NewConf1], - Return,{error,TCError}}; - SaveCfg={save_config,_} -> - {[{tc_status,ok},SaveCfg|NewConf1],Return,ok}; - {skip_and_save,Why,SaveCfg} -> - Skip = {skip,Why}, - {[{tc_status,{skipped,Why}}, - {save_config,SaveCfg}|NewConf1], - Skip,Skip}; - {SkipType,Why} when SkipType == skip; - SkipType == skipped -> - {[{tc_status,{skipped,Why}}|NewConf1],Return, - Return}; - _ -> - {[{tc_status,ok}|NewConf1],Return,ok} - end, - %% call user callback function if defined - EndConf1 = - user_callback(TCCallback, Mod, Func, 'end', EndConf), - %% update current state in controller loop - {FWReturn1,TSReturn1,EndConf2} = - case end_per_testcase(Mod, Func, EndConf1) of - SaveCfg1={save_config,_} -> - {FWReturn,TSReturn, - [SaveCfg1|lists:keydelete(save_config,1, - EndConf1)]}; - {fail,ReasonToFail} -> - %% user has failed the testcase - fw_error_notify(Mod, Func, EndConf1, - ReasonToFail), - {{error,ReasonToFail}, - {failed,ReasonToFail}, - EndConf1}; - {failed,{_,end_per_testcase,_}} = Failure when - FWReturn == ok -> - %% unexpected termination in end_per_testcase - %% report this as the result to the framework - {Failure,TSReturn,EndConf1}; - _ -> - %% test case result should be reported to - %% framework no matter the status of - %% end_per_testcase - {FWReturn,TSReturn,EndConf1} - end, - %% clear current state in controller loop - case do_end_tc_call(Mod,Func, - {FWReturn1,[EndConf2]}, TSReturn1) of - {failed,Reason} = NewReturn -> - fw_error_notify(Mod,Func,EndConf2, Reason), - {{T,NewReturn},[{Mod,Func}],[]}; - NewReturn -> - {{T,NewReturn},Loc,[]} - end - end; - skip_init -> - set_tc_state(running, hd(Args)), - %% call user callback function if defined - Args1 = user_callback(TCCallback, Mod, Func, init, Args), - ensure_timetrap(Args1), - %% ts_tc does a catch - %% if this is a named conf group, the test case (init or end conf) - %% should be called with the name as the first argument - Args2 = if Name == undefined -> Args1; - true -> [Name | Args1] - end, - %% execute the conf test case - {{T,Return},Loc} = {ts_tc(Mod, Func, Args2),get_loc()}, - %% call user callback function if defined - Return1 = user_callback(TCCallback, Mod, Func, 'end', Return), - {Return2,Opts} = process_return_val([Return1], Mod, Func, - Args1, [{Mod,Func}], Return1), - {{T,Return2},Loc,Opts} - end. - -do_end_tc_call(Mod, Func, Res, Return) -> - FwMod = os:getenv("TEST_SERVER_FRAMEWORK"), - Ref = make_ref(), - if FwMod == "ct_framework" ; FwMod == "undefined"; FwMod == false -> - case test_server_sup:framework_call( - end_tc, [Mod,Func,Res, Return], ok) of - {fail,FWReason} -> - {failed,FWReason}; - ok -> - case Return of - {fail,Reason} -> - {failed,Reason}; - Return -> - Return - end; - NewReturn -> - NewReturn - end; - true -> - case test_server_sup:framework_call(FwMod, end_tc, - [Mod,Func,Res], Ref) of - {fail,FWReason} -> - {failed,FWReason}; - _Else -> - Return - end - end. - -%% the return value is a list and we have to check if it contains -%% the result of an end conf case or if it's a Config list -process_return_val([Return], M,F,A, Loc, Final) when is_list(Return) -> - ReturnTags = [skip,skip_and_save,save_config,comment,return_group_result], - %% check if all elements in the list are valid end conf return value tuples - case lists:all(fun(Val) when is_tuple(Val) -> - lists:any(fun(T) -> T == element(1, Val) end, - ReturnTags); - (ok) -> - true; - (_) -> - false - end, Return) of - true -> % must be return value from end conf case - process_return_val1(Return, M,F,A, Loc, Final, []); - false -> % must be Config value from init conf case - case do_end_tc_call(M, F, {ok,A}, Return) of - {failed, FWReason} = Failed -> - fw_error_notify(M,F,A, FWReason), - {Failed, []}; - NewReturn -> - {NewReturn, []} - end - end; -%% the return value is not a list, so it's the return value from an -%% end conf case or it's a dummy value that can be ignored -process_return_val(Return, M,F,A, Loc, Final) -> - process_return_val1(Return, M,F,A, Loc, Final, []). - -process_return_val1([Failed={E,TCError}|_], M,F,A=[Args], Loc, _, SaveOpts) - when E=='EXIT'; - E==failed -> - fw_error_notify(M,F,A, TCError, Loc), - case do_end_tc_call(M,F, {{error,TCError}, - [[{tc_status,{failed,TCError}}|Args]]}, - Failed) of - {failed,FWReason} -> - {{failed,FWReason},SaveOpts}; - NewReturn -> - {NewReturn,SaveOpts} - end; -process_return_val1([SaveCfg={save_config,_}|Opts], M,F,[Args], - Loc, Final, SaveOpts) -> - process_return_val1(Opts, M,F,[[SaveCfg|Args]], Loc, Final, SaveOpts); -process_return_val1([{skip_and_save,Why,SaveCfg}|Opts], M,F,[Args], - Loc, _, SaveOpts) -> - process_return_val1(Opts, M,F,[[{save_config,SaveCfg}|Args]], - Loc, {skip,Why}, SaveOpts); -process_return_val1([GR={return_group_result,_}|Opts], M,F,A, - Loc, Final, SaveOpts) -> - process_return_val1(Opts, M,F,A, Loc, Final, [GR|SaveOpts]); -process_return_val1([RetVal={Tag,_}|Opts], M,F,A, - Loc, _, SaveOpts) when Tag==skip; - Tag==comment -> - process_return_val1(Opts, M,F,A, Loc, RetVal, SaveOpts); -process_return_val1([_|Opts], M,F,A, Loc, Final, SaveOpts) -> - process_return_val1(Opts, M,F,A, Loc, Final, SaveOpts); -process_return_val1([], M,F,A, _Loc, Final, SaveOpts) -> - case do_end_tc_call(M,F, {Final,A}, Final) of - {failed,FWReason} -> - {{failed,FWReason},SaveOpts}; - NewReturn -> - {NewReturn,lists:reverse(SaveOpts)} - end. - -user_callback(undefined, _, _, _, Args) -> - Args; -user_callback({CBMod,CBFunc}, Mod, Func, InitOrEnd, - [Args]) when is_list(Args) -> - case catch apply(CBMod, CBFunc, [InitOrEnd,Mod,Func,Args]) of - Args1 when is_list(Args1) -> - [Args1]; - _ -> - [Args] - end; -user_callback({CBMod,CBFunc}, Mod, Func, InitOrEnd, Args) -> - case catch apply(CBMod, CBFunc, [InitOrEnd,Mod,Func,Args]) of - Args1 when is_list(Args1) -> - Args1; - _ -> - Args - end. - -init_per_testcase(Mod, Func, Args) -> - case code:is_loaded(Mod) of - false -> code:load_file(Mod); - _ -> ok - end, - case erlang:function_exported(Mod, init_per_testcase, 2) of - true -> - do_init_per_testcase(Mod, [Func|Args]); - false -> - %% Optional init_per_testcase is not defined -- keep quiet. - [Config] = Args, - {ok, Config} - end. - -do_init_per_testcase(Mod, Args) -> - try apply(Mod, init_per_testcase, Args) of - {Skip,Reason} when Skip =:= skip; Skip =:= skipped -> - {skip,Reason}; - {skip_and_save,_,_}=Res -> - Res; - NewConf when is_list(NewConf) -> - case lists:filter(fun(T) when is_tuple(T) -> false; - (_) -> true end, NewConf) of - [] -> - {ok,NewConf}; - Bad -> - group_leader() ! {printout,12, - "ERROR! init_per_testcase has returned " - "bad elements in Config: ~p\n",[Bad]}, - {skip,{failed,{Mod,init_per_testcase,bad_return}}} - end; - {fail,_Reason}=Res -> - Res; - _Other -> - group_leader() ! {printout,12, - "ERROR! init_per_testcase did not return " - "a Config list.\n",[]}, - {skip,{failed,{Mod,init_per_testcase,bad_return}}} - catch - throw:{Skip,Reason} when Skip =:= skip; Skip =:= skipped -> - {skip,Reason}; - exit:{Skip,Reason} when Skip =:= skip; Skip =:= skipped -> - {skip,Reason}; - throw:Other -> - set_loc(erlang:get_stacktrace()), - Line = get_loc(), - FormattedLoc = test_server_sup:format_loc(Line), - group_leader() ! {printout,12, - "ERROR! init_per_testcase thrown!\n" - "\tLocation: ~ts\n\tReason: ~p\n", - [FormattedLoc, Other]}, - {skip,{failed,{Mod,init_per_testcase,Other}}}; - _:Reason0 -> - Stk = erlang:get_stacktrace(), - Reason = {Reason0,Stk}, - set_loc(Stk), - Line = get_loc(), - FormattedLoc = test_server_sup:format_loc(Line), - group_leader() ! {printout,12, - "ERROR! init_per_testcase crashed!\n" - "\tLocation: ~ts\n\tReason: ~p\n", - [FormattedLoc,Reason]}, - {skip,{failed,{Mod,init_per_testcase,Reason}}} - end. - -end_per_testcase(Mod, Func, Conf) -> - case erlang:function_exported(Mod,end_per_testcase,2) of - true -> - do_end_per_testcase(Mod,end_per_testcase,Func,Conf); - false -> - %% Backwards compatibility! - case erlang:function_exported(Mod,fin_per_testcase,2) of - true -> - do_end_per_testcase(Mod,fin_per_testcase,Func,Conf); - false -> - ok - end - end. - -do_end_per_testcase(Mod,EndFunc,Func,Conf) -> - set_tc_state(end_per_testcase, Conf), - try Mod:EndFunc(Func, Conf) of - {save_config,_}=SaveCfg -> - SaveCfg; - {fail,_}=Fail -> - Fail; - _ -> - ok - catch - throw:Other -> - Comment0 = case read_comment() of - "" -> ""; - Cmt -> Cmt ++ test_server_ctrl:xhtml("<br>", - "<br />") - end, - set_loc(erlang:get_stacktrace()), - comment(io_lib:format("~ts<font color=\"red\">" - "WARNING: ~w thrown!" - "</font>\n",[Comment0,EndFunc])), - group_leader() ! {printout,12, - "WARNING: ~w thrown!\n" - "Reason: ~p\n" - "Line: ~ts\n", - [EndFunc, Other, - test_server_sup:format_loc(get_loc())]}, - {failed,{Mod,end_per_testcase,Other}}; - Class:Reason -> - Stk = erlang:get_stacktrace(), - set_loc(Stk), - Why = case Class of - exit -> {'EXIT',Reason}; - error -> {'EXIT',{Reason,Stk}} - end, - Comment0 = case read_comment() of - "" -> ""; - Cmt -> Cmt ++ test_server_ctrl:xhtml("<br>", - "<br />") - end, - comment(io_lib:format("~ts<font color=\"red\">" - "WARNING: ~w crashed!" - "</font>\n",[Comment0,EndFunc])), - group_leader() ! {printout,12, - "WARNING: ~w crashed!\n" - "Reason: ~p\n" - "Line: ~ts\n", - [EndFunc, Reason, - test_server_sup:format_loc(get_loc())]}, - {failed,{Mod,end_per_testcase,Why}} - end. - -get_loc() -> - get(test_server_loc). - -get_loc(Pid) -> - [{current_stacktrace,Stk0},{dictionary,Dict}] = - process_info(Pid, [current_stacktrace,dictionary]), - lists:foreach(fun({Key,Val}) -> put(Key, Val) end, Dict), - Stk = [rewrite_loc_item(Loc) || Loc <- Stk0], - case get(test_server_loc) of - [{Suite,Case}] -> - %% Location info unknown, check if {Suite,Case,Line} - %% is available in stacktrace and if so, use stacktrace - %% instead of current test_server_loc. - %% If location is the last expression in a test case - %% function, the info is not available due to tail call - %% elimination. We need to check if the test case has been - %% called by ts_tc/3 and, if so, insert the test case info - %% at that position. - case [match || {S,C,_L} <- Stk, S == Suite, C == Case] of - [match|_] -> - put(test_server_loc, Stk); - _ -> - {PreTC,PostTC} = - lists:splitwith(fun({test_server,ts_tc,_}) -> - false; - (_) -> - true - end, Stk), - if PostTC == [] -> - ok; - true -> - put(test_server_loc, - PreTC++[{Suite,Case,last_expr} | PostTC]) - end - end; - _ -> - put(test_server_loc, Stk) - end, - get_loc(). - -fw_error_notify(Mod, Func, Args, Error) -> - test_server_sup:framework_call(error_notification, - [Mod,Func,[Args], - {Error,unknown}]). -fw_error_notify(Mod, Func, Args, Error, Loc) -> - test_server_sup:framework_call(error_notification, - [Mod,Func,[Args], - {Error,Loc}]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% print(Detail,Format,Args,Printer) -> ok -%% Detail = integer() -%% Format = string() -%% Args = [term()] -%% -%% Just like io:format, except that depending on the Detail value, the output -%% is directed to console, major and/or minor log files. - -%% print(Detail,Format,Args) -> -%% test_server_ctrl:print(Detail, Format, Args). - -print(Detail,Format,Args,Printer) -> - test_server_ctrl:print(Detail, Format, Args, Printer). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% print_timsteamp(Detail,Leader) -> ok -%% -%% Prints Leader followed by a time stamp (date and time). Depending on -%% the Detail value, the output is directed to console, major and/or minor -%% log files. - -print_timestamp(Detail,Leader) -> - test_server_ctrl:print_timestamp(Detail, Leader). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% lookup_config(Key,Config) -> {value,{Key,Value}} | undefined -%% Key = term() -%% Value = term() -%% Config = [{Key,Value},...] -%% -%% Looks up a specific key in the config list, and returns the value -%% of the associated key, or undefined if the key doesn't exist. - -lookup_config(Key,Config) -> - case lists:keysearch(Key,1,Config) of - {value,{Key,Val}} -> - Val; - _ -> - io:format("Could not find element ~p in Config.~n",[Key]), - undefined - end. - -%% -%% IMPORTANT: get_loc/1 uses the name of this function when analysing -%% stack traces. If the name changes, get_loc/1 must be updated! -%% -ts_tc(M, F, A) -> - Before = erlang:monotonic_time(), - Result = try - apply(M, F, A) - catch - throw:{skip, Reason} -> {skip, Reason}; - throw:{skipped, Reason} -> {skip, Reason}; - exit:{skip, Reason} -> {skip, Reason}; - exit:{skipped, Reason} -> {skip, Reason}; - Type:Reason -> - Stk = erlang:get_stacktrace(), - set_loc(Stk), - case Type of - throw -> - {failed,{thrown,Reason}}; - error -> - {'EXIT',{Reason,Stk}}; - exit -> - {'EXIT',Reason} - end - end, - After = erlang:monotonic_time(), - Elapsed = erlang:convert_time_unit(After-Before, native, micro_seconds), - {Elapsed, Result}. - -set_loc(Stk) -> - Loc = case [rewrite_loc_item(I) || {_,_,_,_}=I <- Stk] of - [{M,F,0}|Stack] -> - [{M,F}|Stack]; - Other -> - Other - end, - put(test_server_loc, Loc). - -rewrite_loc_item({M,F,_,Loc}) -> - {M,F,proplists:get_value(line, Loc, 0)}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% TEST SUITE SUPPORT FUNCTIONS %% -%% %% -%% Note: Some of these functions have been moved to test_server_sup %% -%% in an attempt to keep this modules small (yeah, right!) %% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% format(Format) -> IoLibReturn -%% format(Detail,Format) -> IoLibReturn -%% format(Format,Args) -> IoLibReturn -%% format(Detail,Format,Args) -> IoLibReturn -%% Detail = integer() -%% Format = string() -%% Args = [term(),...] -%% IoLibReturn = term() -%% -%% Logs the Format string and Args, similar to io:format/1/2 etc. If -%% Detail is not specified, the default detail level (which is 50) is used. -%% Which log files the string will be logged in depends on the thresholds -%% set with set_levels/3. Typically with default detail level, only the -%% minor log file is used. -format(Format) -> - format(minor, Format, []). - -format(major, Format) -> - format(major, Format, []); -format(minor, Format) -> - format(minor, Format, []); -format(Detail, Format) when is_integer(Detail) -> - format(Detail, Format, []); -format(Format, Args) -> - format(minor, Format, Args). - -format(Detail, Format, Args) -> - Str = - case catch io_lib:format(Format,Args) of - {'EXIT',_} -> - io_lib:format("illegal format; ~p with args ~p.\n", - [Format,Args]); - Valid -> Valid - end, - log({Detail, Str}). - -log(Msg) -> - group_leader() ! {structured_io, self(), Msg}, - ok. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% capture_start() -> ok -%% capture_stop() -> ok -%% -%% Starts/stops capturing all output from io:format, and similar. Capturing -%% output doesn't stop output from happening. It just makes it possible -%% to retrieve the output using capture_get/0. -%% Starting and stopping capture doesn't affect already captured output. -%% All output is stored as messages in the message queue until retrieved - -capture_start() -> - group_leader() ! {capture,self()}, - ok. - -capture_stop() -> - group_leader() ! {capture,false}, - ok. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% capture_get() -> Output -%% Output = [string(),...] -%% -%% Retrieves all the captured output since last call to capture_get/0. -%% Note that since output arrive as messages to the process, it takes -%% a short while from the call to io:format until all output is available -%% by capture_get/0. It is not necessary to call capture_stop/0 before -%% retreiving the output. -capture_get() -> - test_server_sup:capture_get([]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% messages_get() -> Messages -%% Messages = [term(),...] -%% -%% Returns all messages in the message queue. -messages_get() -> - test_server_sup:messages_get([]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% permit_io(GroupLeader, FromPid) -> ok -%% -%% Make sure proceeding IO from FromPid won't get rejected -permit_io(GroupLeader, FromPid) -> - GroupLeader ! {permit_io,FromPid}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% sleep(Time) -> ok -%% Time = integer() | float() | infinity -%% -%% Sleeps the specified number of milliseconds. This sleep also accepts -%% floating point numbers (which are truncated) and the atom 'infinity'. -sleep(infinity) -> - receive - after infinity -> - ok - end; -sleep(MSecs) -> - receive - after trunc(MSecs) -> - ok - end, - ok. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% adjusted_sleep(Time) -> ok -%% Time = integer() | float() | infinity -%% -%% Sleeps the specified number of milliseconds, multiplied by the -%% 'multiply_timetraps' value (if set) and possibly also automatically scaled -%% up if 'scale_timetraps' is set to true (which is default). -%% This function also accepts floating point numbers (which are truncated) and -%% the atom 'infinity'. -adjusted_sleep(infinity) -> - receive - after infinity -> - ok - end; -adjusted_sleep(MSecs) -> - {Multiplier,ScaleFactor} = - case test_server_ctrl:get_timetrap_parameters() of - {undefined,undefined} -> - {1,1}; - {undefined,false} -> - {1,1}; - {undefined,true} -> - {1,timetrap_scale_factor()}; - {infinity,_} -> - {infinity,1}; - {Mult,undefined} -> - {Mult,1}; - {Mult,false} -> - {Mult,1}; - {Mult,true} -> - {Mult,timetrap_scale_factor()} - end, - receive - after trunc(MSecs*Multiplier*ScaleFactor) -> - ok - end, - ok. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% fail(Reason) -> exit({suite_failed,Reason}) -%% -%% Immediately calls exit. Included because test suites are easier -%% to read when using this function, rather than exit directly. -fail(Reason) -> - comment(cast_to_list(Reason)), - try - exit({suite_failed,Reason}) - catch - Class:R -> - case erlang:get_stacktrace() of - [{?MODULE,fail,1,_}|Stk] -> ok; - Stk -> ok - end, - erlang:raise(Class, R, Stk) - end. - -cast_to_list(X) when is_list(X) -> X; -cast_to_list(X) when is_atom(X) -> atom_to_list(X); -cast_to_list(X) -> lists:flatten(io_lib:format("~p", [X])). - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% fail() -> exit(suite_failed) -%% -%% Immediately calls exit. Included because test suites are easier -%% to read when using this function, rather than exit directly. -fail() -> - try - exit(suite_failed) - catch - Class:R -> - case erlang:get_stacktrace() of - [{?MODULE,fail,0,_}|Stk] -> ok; - Stk -> ok - end, - erlang:raise(Class, R, Stk) - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% break(Comment) -> ok -%% -%% Break a test case so part of the test can be done manually. -%% Use continue/0 to continue. -break(Comment) -> - break(?MODULE, Comment). - -break(CBM, Comment) -> - break(CBM, '', Comment). - -break(CBM, TestCase, Comment) -> - timetrap_cancel(), - {TCName,CntArg,PName} = - if TestCase == '' -> - {"", "", test_server_break_process}; - true -> - Str = atom_to_list(TestCase), - {[32 | Str], Str, - list_to_atom("test_server_break_process_" ++ Str)} - end, - io:format(user, - "\n\n\n--- SEMIAUTOMATIC TESTING ---" - "\nThe test case~ts executes on process ~w" - "\n\n\n~ts" - "\n\n\n-----------------------------\n\n" - "Continue with --> ~w:continue(~ts).\n", - [TCName,self(),Comment,CBM,CntArg]), - case whereis(PName) of - undefined -> - spawn_break_process(self(), PName); - OldBreakProcess -> - OldBreakProcess ! cancel, - spawn_break_process(self(), PName) - end, - receive continue -> ok end. - -spawn_break_process(Pid, PName) -> - spawn(fun() -> - register(PName, self()), - receive - continue -> continue(Pid); - cancel -> ok - end - end). - -continue() -> - case whereis(test_server_break_process) of - undefined -> ok; - BreakProcess -> BreakProcess ! continue - end. - -continue(TestCase) when is_atom(TestCase) -> - PName = list_to_atom("test_server_break_process_" ++ - atom_to_list(TestCase)), - case whereis(PName) of - undefined -> ok; - BreakProcess -> BreakProcess ! continue - end; - -continue(Pid) when is_pid(Pid) -> - Pid ! continue. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timetrap_scale_factor() -> Factor -%% -%% Returns the amount to scale timetraps with. - -%% {X, fun() -> check() end} <- multiply scale with X if Fun() is true -timetrap_scale_factor() -> - timetrap_scale_factor([ - { 2, fun() -> has_lock_checking() end}, - { 3, fun() -> has_superfluous_schedulers() end}, - { 5, fun() -> purify_is_running() end}, - { 6, fun() -> is_debug() end}, - {10, fun() -> is_cover() end} - ]). - -timetrap_scale_factor(Scales) -> - %% The fun in {S, Fun} a filter input to the list comprehension - lists:foldl(fun(S,O) -> O*S end, 1, [ S || {S,F} <- Scales, F()]). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timetrap(Timeout) -> Handle -%% Handle = term() -%% -%% Creates a time trap, that will kill the calling process if the -%% trap is not cancelled with timetrap_cancel/1, within Timeout milliseconds. -timetrap(Timeout) -> - MultAndScale = - case get(test_server_multiply_timetraps) of - undefined -> {fun(T) -> T end, true}; - {undefined,false} -> {fun(T) -> T end, false}; - {undefined,_} -> {fun(T) -> T end, true}; - {infinity,_} -> {fun(_) -> infinity end, false}; - {Int,Scale} -> {fun(infinity) -> infinity; - (T) -> T*Int end, Scale} - end, - timetrap(Timeout, Timeout, self(), MultAndScale). - -%% when the function is called from different process than -%% the test case, the test_server_multiply_timetraps data -%% is unknown and must be passed as argument -timetrap(Timeout, TCPid, MultAndScale) -> - timetrap(Timeout, Timeout, TCPid, MultAndScale). - -timetrap(Timeout0, TimeToReport0, TCPid, MultAndScale = {Multiplier,Scale}) -> - %% the time_ms call will either convert Timeout to ms or spawn a - %% user timetrap which sends the result to the IO server process - Timeout = time_ms(Timeout0, TCPid, MultAndScale), - Timeout1 = Multiplier(Timeout), - TimeToReport = if Timeout0 == TimeToReport0 -> - Timeout1; - true -> - %% only convert to ms, don't start a - %% user timetrap - time_ms_check(TimeToReport0) - end, - cancel_default_timetrap(self() == TCPid), - Handle = case Timeout1 of - infinity -> - infinity; - _ -> - spawn_link(test_server_sup,timetrap,[Timeout1,TimeToReport, - Scale,TCPid]) - end, - - %% ERROR! This sets dict on IO process instead of testcase process - %% if Timeout is return value from previous user timetrap!! - - case get(test_server_timetraps) of - undefined -> - put(test_server_timetraps,[{Handle,TCPid,{TimeToReport,Scale}}]); - List -> - List1 = lists:delete({infinity,TCPid,{infinity,false}}, List), - put(test_server_timetraps,[{Handle,TCPid, - {TimeToReport,Scale}}|List1]) - end, - Handle. - -ensure_timetrap(Config) -> - case get(test_server_timetraps) of - [_|_] -> - ok; - _ -> - case get(test_server_default_timetrap) of - undefined -> ok; - Garbage -> - erase(test_server_default_timetrap), - format("=== WARNING: garbage in " - "test_server_default_timetrap: ~p~n", - [Garbage]) - end, - DTmo = case lists:keysearch(default_timeout,1,Config) of - {value,{default_timeout,Tmo}} -> Tmo; - _ -> ?DEFAULT_TIMETRAP_SECS - end, - format("=== test_server setting default " - "timetrap of ~p seconds~n", - [DTmo]), - put(test_server_default_timetrap, timetrap(seconds(DTmo))) - end. - -%% executing on IO process, no default timetrap ever set here -cancel_default_timetrap(false) -> - ok; -cancel_default_timetrap(true) -> - case get(test_server_default_timetrap) of - undefined -> - ok; - TimeTrap when is_pid(TimeTrap) -> - timetrap_cancel(TimeTrap), - erase(test_server_default_timetrap), - format("=== test_server canceled default timetrap " - "since another timetrap was set~n"), - ok; - Garbage -> - erase(test_server_default_timetrap), - format("=== WARNING: garbage in " - "test_server_default_timetrap: ~p~n", - [Garbage]), - error - end. - -time_ms({hours,N}, _, _) -> hours(N); -time_ms({minutes,N}, _, _) -> minutes(N); -time_ms({seconds,N}, _, _) -> seconds(N); -time_ms({Other,_N}, _, _) -> - format("=== ERROR: Invalid time specification: ~p. " - "Should be seconds, minutes, or hours.~n", [Other]), - exit({invalid_time_format,Other}); -time_ms(Ms, _, _) when is_integer(Ms) -> Ms; -time_ms(infinity, _, _) -> infinity; -time_ms(Fun, TCPid, MultAndScale) when is_function(Fun) -> - time_ms_apply(Fun, TCPid, MultAndScale); -time_ms({M,F,A}=MFA, TCPid, MultAndScale) when is_atom(M), - is_atom(F), - is_list(A) -> - time_ms_apply(MFA, TCPid, MultAndScale); -time_ms(Other, _, _) -> exit({invalid_time_format,Other}). - -time_ms_check(MFA = {M,F,A}) when is_atom(M), is_atom(F), is_list(A) -> - MFA; -time_ms_check(Fun) when is_function(Fun) -> - Fun; -time_ms_check(Other) -> - time_ms(Other, undefined, undefined). - -time_ms_apply(Func, TCPid, MultAndScale) -> - {_,GL} = process_info(TCPid, group_leader), - WhoAmI = self(), % either TC or IO server - T0 = erlang:monotonic_time(), - UserTTSup = - spawn(fun() -> - user_timetrap_supervisor(Func, WhoAmI, TCPid, - GL, T0, MultAndScale) - end), - receive - {UserTTSup,infinity} -> - %% remember the user timetrap so that it can be cancelled - save_user_timetrap(TCPid, UserTTSup, T0), - %% we need to make sure the user timetrap function - %% gets time to execute and return - timetrap(infinity, TCPid, MultAndScale) - after 5000 -> - exit(UserTTSup, kill), - if WhoAmI /= GL -> - exit({user_timetrap_error,time_ms_apply}); - true -> - format("=== ERROR: User timetrap execution failed!", []), - ignore - end - end. - -user_timetrap_supervisor(Func, Spawner, TCPid, GL, T0, MultAndScale) -> - process_flag(trap_exit, true), - Spawner ! {self(),infinity}, - MonRef = monitor(process, TCPid), - UserTTSup = self(), - group_leader(GL, UserTTSup), - UserTT = spawn_link(fun() -> call_user_timetrap(Func, UserTTSup) end), - receive - {UserTT,Result} -> - demonitor(MonRef, [flush]), - T1 = erlang:monotonic_time(), - Elapsed = erlang:convert_time_unit(T1-T0, native, milli_seconds), - try time_ms_check(Result) of - TimeVal -> - %% this is the new timetrap value to set (return value - %% from a fun or an MFA) - GL ! {user_timetrap,TCPid,TimeVal,T0,Elapsed,MultAndScale} - catch _:_ -> - %% when other than a legal timetrap value is returned - %% which will be the normal case for user timetraps - GL ! {user_timetrap,TCPid,0,T0,Elapsed,MultAndScale} - end; - {'EXIT',UserTT,Error} when Error /= normal -> - demonitor(MonRef, [flush]), - GL ! {user_timetrap,TCPid,0,T0,{user_timetrap_error,Error}, - MultAndScale}; - {'DOWN',MonRef,_,_,_} -> - demonitor(MonRef, [flush]), - exit(UserTT, kill) - end. - -call_user_timetrap(Func, Sup) when is_function(Func) -> - try Func() of - Result -> - Sup ! {self(),Result} - catch _:Error -> - exit({Error,erlang:get_stacktrace()}) - end; -call_user_timetrap({M,F,A}, Sup) -> - try apply(M,F,A) of - Result -> - Sup ! {self(),Result} - catch _:Error -> - exit({Error,erlang:get_stacktrace()}) - end. - -save_user_timetrap(TCPid, UserTTSup, StartTime) -> - %% save pid of user timetrap supervisor process so that - %% it may be stopped even before the timetrap func has returned - NewUserTT = {TCPid,{UserTTSup,StartTime}}, - case get(test_server_user_timetrap) of - undefined -> - put(test_server_user_timetrap, [NewUserTT]); - UserTTSups -> - case proplists:get_value(TCPid, UserTTSups) of - undefined -> - put(test_server_user_timetrap, - [NewUserTT | UserTTSups]); - PrevTTSup -> - %% remove prev user timetrap - remove_user_timetrap(PrevTTSup), - put(test_server_user_timetrap, - [NewUserTT | proplists:delete(TCPid, - UserTTSups)]) - end - end. - -update_user_timetraps(TCPid, StartTime) -> - %% called when a user timetrap is triggered - case get(test_server_user_timetrap) of - undefined -> - proceed; - UserTTs -> - case proplists:get_value(TCPid, UserTTs) of - {_UserTTSup,StartTime} -> % same timetrap - put(test_server_user_timetrap, - proplists:delete(TCPid, UserTTs)), - proceed; - {OtherUserTTSup,OtherStartTime} -> - case OtherStartTime - StartTime of - Diff when Diff >= 0 -> - ignore; - _ -> - exit(OtherUserTTSup, kill), - put(test_server_user_timetrap, - proplists:delete(TCPid, UserTTs)), - proceed - end; - undefined -> - proceed - end - end. - -remove_user_timetrap(TTSup) -> - exit(TTSup, kill). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timetrap_cancel(Handle) -> ok -%% Handle = term() -%% -%% Cancels a time trap. -timetrap_cancel(Handle) -> - timetrap_cancel_one(Handle, true). - -timetrap_cancel_one(infinity, _SendToServer) -> - ok; -timetrap_cancel_one(Handle, SendToServer) -> - case get(test_server_timetraps) of - undefined -> - ok; - [{Handle,_,_}] -> - erase(test_server_timetraps); - Timers -> - case lists:keysearch(Handle, 1, Timers) of - {value,_} -> - put(test_server_timetraps, - lists:keydelete(Handle, 1, Timers)); - false when SendToServer == true -> - group_leader() ! {timetrap_cancel_one,Handle,self()}; - false -> - ok - end - end, - test_server_sup:timetrap_cancel(Handle). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timetrap_cancel() -> ok -%% -%% Cancels timetrap for current test case. -timetrap_cancel() -> - timetrap_cancel_all(self(), true). - -timetrap_cancel_all(TCPid, SendToServer) -> - case get(test_server_timetraps) of - undefined -> - ok; - Timers -> - [timetrap_cancel_one(Handle, false) || - {Handle,Pid,_} <- Timers, Pid == TCPid] - end, - case get(test_server_user_timetrap) of - undefined -> - ok; - UserTTs -> - case proplists:get_value(TCPid, UserTTs) of - {UserTTSup,_StartTime} -> - remove_user_timetrap(UserTTSup), - put(test_server_user_timetrap, - proplists:delete(TCPid, UserTTs)); - undefined -> - ok - end - end, - if SendToServer == true -> - group_leader() ! {timetrap_cancel_all,TCPid,self()}; - true -> - ok - end, - ok. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% get_timetrap_info() -> {Timeout,Scale} | undefined -%% -%% Read timetrap info for current test case -get_timetrap_info() -> - get_timetrap_info(self(), true). - -get_timetrap_info(TCPid, SendToServer) -> - case get(test_server_timetraps) of - undefined -> - undefined; - Timers -> - case [Info || {Handle,Pid,Info} <- Timers, - Pid == TCPid, Handle /= infinity] of - [I|_] -> - I; - [] when SendToServer == true -> - tc_supervisor_req({get_timetrap_info,TCPid}); - [] -> - undefined - end - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% hours(N) -> Milliseconds -%% minutes(N) -> Milliseconds -%% seconds(N) -> Milliseconds -%% N = integer() | float() -%% Milliseconds = integer() -%% -%% Transforms the named units to milliseconds. Fractions in the input -%% are accepted. The output is an integer. -hours(N) -> trunc(N * 1000 * 60 * 60). -minutes(N) -> trunc(N * 1000 * 60). -seconds(N) -> trunc(N * 1000). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% tc_supervisor_req(Tag) -> Result -%% tc_supervisor_req(Tag, Msg) -> Result -%% - -tc_supervisor_req(Tag) -> - Pid = test_server_gl:get_tc_supervisor(group_leader()), - Pid ! {Tag,self()}, - receive - {Pid,Tag,Result} -> - Result - after 5000 -> - error(no_answer_from_tc_supervisor) - end. - -tc_supervisor_req(Tag, Msg) -> - Pid = test_server_gl:get_tc_supervisor(group_leader()), - Pid ! {Tag,self(),Msg}, - receive - {Pid,Tag,Result} -> - Result - after 5000 -> - error(no_answer_from_tc_supervisor) - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timecall(M,F,A) -> {Time,Val} -%% Time = float() -%% -%% Measures the time spent evaluating MFA. The measurement is done with -%% erlang:now/0, and should have pretty good accuracy on most platforms. -%% The function is not evaluated in a catch context. -timecall(M, F, A) -> - test_server_sup:timecall(M,F,A). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% do_times(N,M,F,A) -> ok -%% do_times(N,Fun) -> -%% N = integer() -%% Fun = fun() -> void() -%% -%% Evaluates MFA or Fun N times, and returns ok. -do_times(N,M,F,A) when N>0 -> - apply(M,F,A), - do_times(N-1,M,F,A); -do_times(0,_,_,_) -> - ok. - -do_times(N,Fun) when N>0 -> - Fun(), - do_times(N-1,Fun); -do_times(0,_) -> - ok. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% m_out_of_n(M,N,Fun) -> ok | exit({m_out_of_n_failed,{R,left_to_do}}) -%% M = integer() -%% N = integer() -%% Fun = fun() -> void() -%% R = integer() -%% -%% Repeats evaluating the given function until it succeeded (didn't crash) -%% M times. If, after N times, M successful attempts have not been -%% accomplished, the process crashes with reason {m_out_of_n_failed -%% {R,left_to_do}}, where R indicates how many cases that remained to be -%% successfully completed. -%% -%% For example: -%% m_out_of_n(1,4,fun() -> tricky_test_case() end) -%% Tries to run tricky_test_case() up to 4 times, -%% and is happy if it succeeds once. -%% -%% m_out_of_n(7,8,fun() -> clock_sanity_check() end) -%% Tries running clock_sanity_check() up to 8 -%% times and allows the function to fail once. -%% This might be useful if clock_sanity_check/0 -%% is known to fail if the clock crosses an hour -%% boundary during the test (and the up to 8 -%% test runs could never cross 2 boundaries) -m_out_of_n(0,_,_) -> - ok; -m_out_of_n(M,0,_) -> - exit({m_out_of_n_failed,{M,left_to_do}}); -m_out_of_n(M,N,Fun) -> - case catch Fun() of - {'EXIT',_} -> - m_out_of_n(M,N-1,Fun); - _Other -> - m_out_of_n(M-1,N-1,Fun) - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%call_crash(M,F,A) -%%call_crash(Time,M,F,A) -%%call_crash(Time,Crash,M,F,A) -%% M - atom() -%% F - atom() -%% A - [term()] -%% Time - integer() in milliseconds. -%% Crash - term() -%% -%% Spaws a new process that calls MFA. The call is considered -%% successful if the call crashes with the given reason (Crash), -%% or any other reason if Crash is not specified. -%% ** The call must terminate withing the given Time (defaults -%% to infinity), or it is considered a failure (exit with reason -%% 'call_crash_timeout' is generated). - -call_crash(M,F,A) -> - call_crash(infinity,M,F,A). -call_crash(Time,M,F,A) -> - call_crash(Time,any,M,F,A). -call_crash(Time,Crash,M,F,A) -> - test_server_sup:call_crash(Time,Crash,M,F,A). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% start_node(SlaveName, Type, Options) -> -%% {ok, Slave} | {error, Reason} -%% -%% SlaveName = string(), atom(). -%% Type = slave | peer -%% Options = [{tuple(), term()}] -%% -%% OptionList is a tuplelist wich may contain one -%% or more of these members: -%% -%% Slave and Peer: -%% {remote, true} - Start the node on a remote host. If not specified, -%% the node will be started on the local host (with -%% some exceptions, for instance VxWorks, -%% where all nodes are started on a remote host). -%% {args, Arguments} - Arguments passed directly to the node. -%% {cleanup, false} - Nodes started with this option will not be killed -%% by the test server after completion of the test case -%% Therefore it is IMPORTANT that the USER terminates -%% the node!! -%% {erl, ReleaseList} - Use an Erlang emulator determined by ReleaseList -%% when starting nodes, instead of the same emulator -%% as the test server is running. ReleaseList is a list -%% of specifiers, where a specifier is either -%% {release, Rel}, {prog, Prog}, or 'this'. Rel is -%% either the name of a release, e.g., "r7a" or -%% 'latest'. 'this' means using the same emulator as -%% the test server. Prog is the name of an emulator -%% executable. If the list has more than one element, -%% one of them is picked randomly. (Only -%% works on Solaris and Linux, and the test -%% server gives warnings when it notices that -%% nodes are not of the same version as -%% itself.) -%% -%% Peer only: -%% {wait, false} - Don't wait for the node to be started. -%% {fail_on_error, false} - Returns {error, Reason} rather than failing -%% the test case. This option can only be used with -%% peer nodes. -%% Note that slave nodes always act as if they had -%% fail_on_error==false. -%% - -start_node(Name, Type, Options) -> - lists:foreach( - fun(N) -> - case firstname(N) of - Name -> - format("=== WARNING: Trying to start node \'~w\' when node" - " with same first name exists: ~w", [Name, N]); - _other -> ok - end - end, - nodes()), - - group_leader() ! {sync_apply, - self(), - {test_server_ctrl,start_node,[Name,Type,Options]}}, - Result = receive {sync_result,R} -> R end, - - case Result of - {ok,Node} -> - - %% Cannot run cover on shielded node or on a node started - %% by a shielded node. - Cover = case is_cover(Node) of - true -> - proplists:get_value(start_cover,Options,true); - false -> - false - end, - - net_adm:ping(Node), - case Cover of - true -> - do_cover_for_node(Node,start); - _ -> - ok - end, - {ok,Node}; - {fail,Reason} -> fail(Reason); - Error -> Error - end. - -firstname(N) -> - list_to_atom(upto($@,atom_to_list(N))). - -%% This should!!! crash if H is not member in list. -upto(H, [H | _T]) -> []; -upto(H, [X | T]) -> [X | upto(H,T)]. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% wait_for_node(Name) -> ok | {error,timeout} -%% -%% If a node is started with the options {wait,false}, this function -%% can be used to wait for the node to come up from the -%% test server point of view (i.e. wait until it has contacted -%% the test server controller after startup) -wait_for_node(Slave) -> - group_leader() ! {sync_apply, - self(), - {test_server_ctrl,wait_for_node,[Slave]}}, - Result = receive {sync_result,R} -> R end, - case Result of - ok -> - net_adm:ping(Slave), - case is_cover(Slave) of - true -> - do_cover_for_node(Slave,start); - _ -> - ok - end; - _ -> - ok - end, - Result. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% stop_node(Name) -> true|false -%% -%% Kills a (remote) node. -%% Also inform test_server_ctrl so it can clean up! -stop_node(Slave) -> - Cover = is_cover(Slave), - if Cover -> do_cover_for_node(Slave,flush,false); - true -> ok - end, - group_leader() ! {sync_apply,self(),{test_server_ctrl,stop_node,[Slave]}}, - Result = receive {sync_result,R} -> R end, - case Result of - ok -> - erlang:monitor_node(Slave, true), - slave:stop(Slave), - receive - {nodedown, Slave} -> - format(minor, "Stopped slave node: ~w", [Slave]), - format(major, "=node_stop ~w", [Slave]), - if Cover -> do_cover_for_node(Slave,stop,false); - true -> ok - end, - true - after 30000 -> - format("=== WARNING: Node ~w does not seem to terminate.", - [Slave]), - erlang:monitor_node(Slave, false), - receive {nodedown, Slave} -> ok after 0 -> ok end, - false - end; - {error, _Reason} -> - %% Either, the node is already dead or it was started - %% with the {cleanup,false} option, or it was started - %% in some other way than test_server:start_node/3 - format("=== WARNING: Attempt to stop a nonexisting slavenode (~w)~n" - "=== Trying to kill it anyway!!!", - [Slave]), - case net_adm:ping(Slave)of - pong -> - erlang:monitor_node(Slave, true), - slave:stop(Slave), - receive - {nodedown, Slave} -> - format(minor, "Stopped slave node: ~w", [Slave]), - format(major, "=node_stop ~w", [Slave]), - if Cover -> do_cover_for_node(Slave,stop,false); - true -> ok - end, - true - after 30000 -> - format("=== WARNING: Node ~w does not seem to terminate.", - [Slave]), - erlang:monitor_node(Slave, false), - receive {nodedown, Slave} -> ok after 0 -> ok end, - false - end; - pang -> - if Cover -> do_cover_for_node(Slave,stop,false); - true -> ok - end, - false - end - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% is_release_available(Release) -> true | false -%% Release -> string() -%% -%% Test if a release (such as "r10b") is available to be -%% started using start_node/3. - -is_release_available(Release) -> - group_leader() ! {sync_apply, - self(), - {test_server_ctrl,is_release_available,[Release]}}, - receive {sync_result,R} -> R end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% run_on_shielded_node(Fun, CArgs) -> term() -%% Fun -> function() -%% CArg -> list() -%% -%% -%% Fun is executed in a process on a temporarily created -%% hidden node. Communication with the job process goes -%% via a job proxy process on the hidden node, i.e. the -%% group leader of the test case process is the job proxy -%% process. This makes it possible to start nodes from the -%% hidden node that are unaware of the test server node. -%% Without the job proxy process all processes would have -%% a process residing on the test_server node as group_leader. -%% -%% Fun - Function to execute -%% CArg - Extra command line arguments to use when starting -%% the shielded node. -%% -%% If Fun is successfully executed, the result is returned. -%% - -run_on_shielded_node(Fun, CArgs) when is_function(Fun), is_list(CArgs) -> - Nr = erlang:unique_integer([positive]), - Name = "shielded_node-" ++ integer_to_list(Nr), - Node = case start_node(Name, slave, [{args, "-hidden " ++ CArgs}]) of - {ok, N} -> N; - Err -> fail({failed_to_start_shielded_node, Err}) - end, - Master = self(), - Ref = make_ref(), - Slave = spawn(Node, - fun () -> - start_job_proxy(), - receive - Ref -> - Master ! {Ref, Fun()} - end, - receive after infinity -> infinity end - end), - MRef = erlang:monitor(process, Slave), - Slave ! Ref, - receive - {'DOWN', MRef, _, _, Info} -> - stop_node(Node), - fail(Info); - {Ref, Res} -> - stop_node(Node), - receive - {'DOWN', MRef, _, _, _} -> - Res - end - end. - -%% Return true if Name or node() is a shielded node -is_shielded(Name) -> - case {cast_to_list(Name),atom_to_list(node())} of - {"shielded_node"++_,_} -> true; - {_,"shielded_node"++_} -> true; - _ -> false - end. - -same_version(Name) -> - ThisVersion = erlang:system_info(version), - OtherVersion = rpc:call(Name, erlang, system_info, [version]), - ThisVersion =:= OtherVersion. - -is_cover(Name) -> - case is_cover() of - true -> - not is_shielded(Name) andalso same_version(Name); - false -> - false - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% temp_name(Stem) -> string() -%% Stem = string() -%% -%% Create a unique file name, based on (starting with) Stem. -%% A filename of the form <Stem><Number> is generated, and the -%% function checks that that file doesn't already exist. -temp_name(Stem) -> - Num = erlang:unique_integer([positive]), - RandomName = Stem ++ integer_to_list(Num), - {ok,Files} = file:list_dir(filename:dirname(Stem)), - case lists:member(RandomName,Files) of - true -> - %% oh, already exists - bad luck. Try again. - temp_name(Stem); %% recursively try again - false -> - RandomName - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% app_test/1 -%% -app_test(App) -> - app_test(App, pedantic). -app_test(App, Mode) -> - test_server_sup:app_test(App, Mode). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% appup_test/1 -%% -appup_test(App) -> - test_server_sup:appup_test(App). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% is_native(Mod) -> true | false -%% -%% Checks wether the module is natively compiled or not. - -is_native(Mod) -> - (catch Mod:module_info(native)) =:= true. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% comment(String) -> ok -%% -%% The given String will occur in the comment field -%% of the table on the test suite result page. If -%% called several times, only the last comment is -%% printed. -%% comment/1 is also overwritten by the return value -%% {comment,Comment} or fail/1 (which prints Reason -%% as a comment). -comment(String) -> - group_leader() ! {comment,String}, - ok. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% read_comment() -> string() -%% -%% Read the current comment string stored in -%% state during test case execution. -read_comment() -> - tc_supervisor_req(read_comment). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% make_priv_dir() -> ok -%% -%% Order test server to create the private directory -%% for the current test case. -make_priv_dir() -> - tc_supervisor_req(make_priv_dir). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% os_type() -> OsType -%% -%% Returns the OsType of the target node. OsType is -%% the same as returned from os:type() -os_type() -> - os:type(). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% is_cover() -> boolean() -%% -%% Returns true if cover is running, else false -is_cover() -> - case whereis(cover_server) of - undefined -> false; - _ -> true - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% is_debug() -> boolean() -%% -%% Returns true if the emulator is debug-compiled, false otherwise. -is_debug() -> - case catch erlang:system_info(debug_compiled) of - {'EXIT', _} -> - case string:str(erlang:system_info(system_version), "debug") of - Int when is_integer(Int), Int > 0 -> true; - _ -> false - end; - Res -> - Res - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% has_lock_checking() -> boolean() -%% -%% Returns true if the emulator has lock checking enabled, false otherwise. -has_lock_checking() -> - case catch erlang:system_info(lock_checking) of - {'EXIT', _} -> false; - Res -> Res - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% has_superfluous_schedulers() -> boolean() -%% -%% Returns true if the emulator has more scheduler threads than logical -%% processors, false otherwise. -has_superfluous_schedulers() -> - case catch {erlang:system_info(schedulers), - erlang:system_info(logical_processors)} of - {S, P} when is_integer(S), is_integer(P), S > P -> true; - _ -> false - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% is_commercial_build() -> boolean() -%% -%% Returns true if the current emulator is commercially supported. -%% (The emulator will not have "[source]" in its start-up message.) -%% We might want to do more tests on a commercial platform, for instance -%% ensuring that all applications have documentation). -is_commercial() -> - case string:str(erlang:system_info(system_version), "source") of - Int when is_integer(Int), Int > 0 -> false; - _ -> true - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% DEBUGGER INTERFACE %% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% purify_is_running() -> false|true -%% -%% Tests if Purify is currently running. - -purify_is_running() -> - case catch erlang:system_info({error_checker, running}) of - {'EXIT', _} -> false; - Res -> Res - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% purify_new_leaks() -> false|BytesLeaked -%% BytesLeaked = integer() -%% -%% Checks for new memory leaks if Purify is active. -%% Returns the number of bytes leaked, or false if Purify -%% is not running. -purify_new_leaks() -> - case catch erlang:system_info({error_checker, memory}) of - {'EXIT', _} -> false; - Leaked when is_integer(Leaked) -> Leaked - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% purify_new_fds_inuse() -> false|FdsInuse -%% FdsInuse = integer() -%% -%% Checks for new file descriptors in use. -%% Returns the number of new file descriptors in use, or false -%% if Purify is not running. -purify_new_fds_inuse() -> - case catch erlang:system_info({error_checker, fd}) of - {'EXIT', _} -> false; - Inuse when is_integer(Inuse) -> Inuse - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% purify_format(Format, Args) -> ok -%% Format = string() -%% Args = lists() -%% -%% Outputs the formatted string to Purify's logfile,if Purify is active. -purify_format(Format, Args) -> - (catch erlang:system_info({error_checker, io_lib:format(Format, Args)})), - ok. - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% -%% Apply given function and reply to caller or proxy. -%% -do_sync_apply(Proxy, From, {M,F,A}) -> - Result = apply(M, F, A), - if is_pid(Proxy) -> Proxy ! {sync_result_proxy,From,Result}; - true -> From ! {sync_result,Result} - end. diff --git a/lib/test_server/src/test_server_ctrl.erl b/lib/test_server/src/test_server_ctrl.erl deleted file mode 100644 index 8a46996bc3..0000000000 --- a/lib/test_server/src/test_server_ctrl.erl +++ /dev/null @@ -1,5646 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2002-2014. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(test_server_ctrl). -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% %% -%% The Erlang Test Server %% -%% %% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% -%% MODULE DEPENDENCIES: -%% HARD TO REMOVE: erlang, lists, io_lib, gen_server, file, io, string, -%% code, ets, rpc, gen_tcp, inet, erl_tar, sets, -%% test_server, test_server_sup, test_server_node -%% EASIER TO REMOVE: filename, filelib, lib, re -%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%% SUPERVISOR INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([start/0, start/1, start_link/1, stop/0]). - -%%% OPERATOR INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([add_spec/1, add_dir/2, add_dir/3]). --export([add_module/1, add_module/2, - add_conf/3, - add_case/2, add_case/3, add_cases/2, add_cases/3]). --export([add_dir_with_skip/3, add_dir_with_skip/4, add_tests_with_skip/3]). --export([add_module_with_skip/2, add_module_with_skip/3, - add_conf_with_skip/4, - add_case_with_skip/3, add_case_with_skip/4, - add_cases_with_skip/3, add_cases_with_skip/4]). --export([jobs/0, run_test/1, wait_finish/0, idle_notify/1, - abort_current_testcase/1, abort/0]). --export([start_get_totals/1, stop_get_totals/0]). --export([reject_io_reqs/1, get_levels/0, set_levels/3]). --export([multiply_timetraps/1, scale_timetraps/1, get_timetrap_parameters/0]). --export([create_priv_dir/1]). --export([cover/1, cover/2, cover/3, - cover_compile/7, cover_analyse/2, cross_cover_analyse/2, - trc/1, stop_trace/0]). --export([testcase_callback/1]). --export([set_random_seed/1]). --export([kill_slavenodes/0]). - -%%% TEST_SERVER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([print/2, print/3, print/4, print_timestamp/2]). --export([start_node/3, stop_node/1, wait_for_node/1, is_release_available/1]). --export([format/1, format/2, format/3, to_string/1]). --export([get_target_info/0]). --export([get_hosts/0]). --export([node_started/1]). --export([uri_encode/1,uri_encode/2]). - -%%% DEBUGGER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([i/0, p/1, p/3, pi/2, pi/4, t/0, t/1]). - -%%% PRIVATE EXPORTED %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% --export([init/1, terminate/2]). --export([handle_call/3, handle_cast/2, handle_info/2]). --export([do_test_cases/4]). --export([do_spec/2, do_spec_list/2]). --export([xhtml/2]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - --include("test_server_internal.hrl"). --include_lib("kernel/include/file.hrl"). --define(suite_ext, "_SUITE"). --define(log_ext, ".log.html"). --define(src_listing_ext, ".src.html"). --define(logdir_ext, ".logs"). --define(data_dir_suffix, "_data/"). --define(suitelog_name, "suite.log"). --define(coverlog_name, "cover.html"). --define(raw_coverlog_name, "cover.log"). --define(cross_coverlog_name, "cross_cover.html"). --define(raw_cross_coverlog_name, "cross_cover.log"). --define(cross_cover_info, "cross_cover.info"). --define(cover_total, "total_cover.log"). --define(unexpected_io_log, "unexpected_io.log.html"). --define(last_file, "last_name"). --define(last_link, "last_link"). --define(last_test, "last_test"). --define(html_ext, ".html"). --define(now, os:timestamp()). - --define(void_fun, fun() -> ok end). --define(mod_result(X), if X == skip -> skipped; - X == auto_skip -> skipped; - true -> X end). - --define(auto_skip_color, "#FFA64D"). --define(user_skip_color, "#FF8000"). --define(sortable_table_name, "SortableTable"). - --record(state,{jobs=[], levels={1,19,10}, reject_io_reqs=false, - multiply_timetraps=1, scale_timetraps=true, - create_priv_dir=auto_per_run, finish=false, - target_info, trc=false, cover=false, wait_for_node=[], - testcase_callback=undefined, idle_notify=[], - get_totals=false, random_seed=undefined}). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% OPERATOR INTERFACE - -add_dir(Name, Job=[Dir|_Dirs]) when is_list(Dir) -> - add_job(cast_to_list(Name), - lists:map(fun(D)-> {dir,cast_to_list(D)} end, Job)); -add_dir(Name, Dir) -> - add_job(cast_to_list(Name), {dir,cast_to_list(Dir)}). - -add_dir(Name, Job=[Dir|_Dirs], Pattern) when is_list(Dir) -> - add_job(cast_to_list(Name), - lists:map(fun(D)-> {dir,cast_to_list(D), - cast_to_list(Pattern)} end, Job)); -add_dir(Name, Dir, Pattern) -> - add_job(cast_to_list(Name), {dir,cast_to_list(Dir),cast_to_list(Pattern)}). - -add_module(Mod) when is_atom(Mod) -> - add_job(atom_to_list(Mod), {Mod,all}). - -add_module(Name, Mods) when is_list(Mods) -> - add_job(cast_to_list(Name), lists:map(fun(Mod) -> {Mod,all} end, Mods)). - -add_conf(Name, Mod, Conf) when is_tuple(Conf) -> - add_job(cast_to_list(Name), {Mod,[Conf]}); - -add_conf(Name, Mod, Confs) when is_list(Confs) -> - add_job(cast_to_list(Name), {Mod,Confs}). - -add_case(Mod, Case) when is_atom(Mod), is_atom(Case) -> - add_job(atom_to_list(Mod), {Mod,Case}). - -add_case(Name, Mod, Case) when is_atom(Mod), is_atom(Case) -> - add_job(Name, {Mod,Case}). - -add_cases(Mod, Cases) when is_atom(Mod), is_list(Cases) -> - add_job(atom_to_list(Mod), {Mod,Cases}). - -add_cases(Name, Mod, Cases) when is_atom(Mod), is_list(Cases) -> - add_job(Name, {Mod,Cases}). - -add_spec(Spec) -> - Name = filename:rootname(Spec, ".spec"), - case filelib:is_file(Spec) of - true -> add_job(Name, {spec,Spec}); - false -> {error,nofile} - end. - -%% This version of the interface is to be used if there are -%% suites or cases that should be skipped. - -add_dir_with_skip(Name, Job=[Dir|_Dirs], Skip) when is_list(Dir) -> - add_job(cast_to_list(Name), - lists:map(fun(D)-> {dir,cast_to_list(D)} end, Job), - Skip); -add_dir_with_skip(Name, Dir, Skip) -> - add_job(cast_to_list(Name), {dir,cast_to_list(Dir)}, Skip). - -add_dir_with_skip(Name, Job=[Dir|_Dirs], Pattern, Skip) when is_list(Dir) -> - add_job(cast_to_list(Name), - lists:map(fun(D)-> {dir,cast_to_list(D), - cast_to_list(Pattern)} end, Job), - Skip); -add_dir_with_skip(Name, Dir, Pattern, Skip) -> - add_job(cast_to_list(Name), - {dir,cast_to_list(Dir),cast_to_list(Pattern)}, Skip). - -add_module_with_skip(Mod, Skip) when is_atom(Mod) -> - add_job(atom_to_list(Mod), {Mod,all}, Skip). - -add_module_with_skip(Name, Mods, Skip) when is_list(Mods) -> - add_job(cast_to_list(Name), lists:map(fun(Mod) -> {Mod,all} end, Mods), Skip). - -add_conf_with_skip(Name, Mod, Conf, Skip) when is_tuple(Conf) -> - add_job(cast_to_list(Name), {Mod,[Conf]}, Skip); - -add_conf_with_skip(Name, Mod, Confs, Skip) when is_list(Confs) -> - add_job(cast_to_list(Name), {Mod,Confs}, Skip). - -add_case_with_skip(Mod, Case, Skip) when is_atom(Mod), is_atom(Case) -> - add_job(atom_to_list(Mod), {Mod,Case}, Skip). - -add_case_with_skip(Name, Mod, Case, Skip) when is_atom(Mod), is_atom(Case) -> - add_job(Name, {Mod,Case}, Skip). - -add_cases_with_skip(Mod, Cases, Skip) when is_atom(Mod), is_list(Cases) -> - add_job(atom_to_list(Mod), {Mod,Cases}, Skip). - -add_cases_with_skip(Name, Mod, Cases, Skip) when is_atom(Mod), is_list(Cases) -> - add_job(Name, {Mod,Cases}, Skip). - -add_tests_with_skip(LogDir, Tests, Skip) -> - add_job(LogDir, - lists:map(fun({Dir,all,all}) -> - {Dir,{dir,Dir}}; - ({Dir,Mods,all}) -> - {Dir,lists:map(fun(M) -> {M,all} end, Mods)}; - ({Dir,Mod,Cases}) -> - {Dir,{Mod,Cases}} - end, Tests), - Skip). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% COMMAND LINE INTERFACE - -parse_cmd_line(Cmds) -> - parse_cmd_line(Cmds, [], [], local, false, false, undefined). - -parse_cmd_line(['SPEC',Spec|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) -> - case file:consult(Spec) of - {ok, TermList} -> - Name = filename:rootname(Spec), - parse_cmd_line(Cmds, TermList++SpecList, [Name|Names], Param, - Trc, Cov, TCCB); - {error,Reason} -> - io:format("Can't open ~w: ~p\n",[Spec, file:format_error(Reason)]), - parse_cmd_line(Cmds, SpecList, Names, Param, Trc, Cov, TCCB) - end; -parse_cmd_line(['NAME',Name|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) -> - parse_cmd_line(Cmds, SpecList, [{name,atom_to_list(Name)}|Names], - Param, Trc, Cov, TCCB); -parse_cmd_line(['SKIPMOD',Mod|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) -> - parse_cmd_line(Cmds, [{skip,{Mod,"by command line"}}|SpecList], Names, - Param, Trc, Cov, TCCB); -parse_cmd_line(['SKIPCASE',Mod,Case|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) -> - parse_cmd_line(Cmds, [{skip,{Mod,Case,"by command line"}}|SpecList], Names, - Param, Trc, Cov, TCCB); -parse_cmd_line(['DIR',Dir|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) -> - Name = filename:basename(Dir), - parse_cmd_line(Cmds, [{topcase,{dir,Name}}|SpecList], [Name|Names], - Param, Trc, Cov, TCCB); -parse_cmd_line(['MODULE',Mod|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) -> - parse_cmd_line(Cmds,[{topcase,{Mod,all}}|SpecList],[atom_to_list(Mod)|Names], - Param, Trc, Cov, TCCB); -parse_cmd_line(['CASE',Mod,Case|Cmds], SpecList, Names, Param, Trc, Cov, TCCB) -> - parse_cmd_line(Cmds,[{topcase,{Mod,Case}}|SpecList],[atom_to_list(Mod)|Names], - Param, Trc, Cov, TCCB); -parse_cmd_line(['TRACE',Trc|Cmds], SpecList, Names, Param, _Trc, Cov, TCCB) -> - parse_cmd_line(Cmds, SpecList, Names, Param, Trc, Cov, TCCB); -parse_cmd_line(['COVER',App,CF,Analyse|Cmds], SpecList, Names, Param, Trc, _Cov, TCCB) -> - parse_cmd_line(Cmds, SpecList, Names, Param, Trc, {{App,CF}, Analyse}, TCCB); -parse_cmd_line(['TESTCASE_CALLBACK',Mod,Func|Cmds], SpecList, Names, Param, Trc, Cov, _) -> - parse_cmd_line(Cmds, SpecList, Names, Param, Trc, Cov, {Mod,Func}); -parse_cmd_line([Obj|_Cmds], _SpecList, _Names, _Param, _Trc, _Cov, _TCCB) -> - io:format("~w: Bad argument: ~w\n", [?MODULE,Obj]), - io:format(" Use the `ts' module to start tests.\n", []), - io:format(" (If you ARE using `ts', there is a bug in `ts'.)\n", []), - halt(1); -parse_cmd_line([], SpecList, Names, Param, Trc, Cov, TCCB) -> - NameList = lists:reverse(Names, ["suite"]), - Name = case lists:keysearch(name, 1, NameList) of - {value,{name,N}} -> N; - false -> hd(NameList) - end, - {lists:reverse(SpecList), Name, Param, Trc, Cov, TCCB}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% cast_to_list(X) -> string() -%% X = list() | atom() | void() -%% Returns a string representation of whatever was input - -cast_to_list(X) when is_list(X) -> X; -cast_to_list(X) when is_atom(X) -> atom_to_list(X); -cast_to_list(X) -> lists:flatten(io_lib:format("~w", [X])). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% START INTERFACE - -%% Kept for backwards compatibility -start(_) -> - start(). -start_link(_) -> - start_link(). - - -start() -> - case gen_server:start({local,?MODULE}, ?MODULE, [], []) of - {ok, Pid} -> - {ok, Pid}; - Other -> - Other - end. - -start_link() -> - case gen_server:start_link({local,?MODULE}, ?MODULE, [], []) of - {ok, Pid} -> - {ok, Pid}; - Other -> - Other - end. - -run_test(CommandLine) -> - process_flag(trap_exit,true), - {SpecList,Name,Param,Trc,Cov,TCCB} = parse_cmd_line(CommandLine), - {ok,_TSPid} = start_link(Param), - case Trc of - false -> ok; - File -> trc(File) - end, - case Cov of - false -> ok; - {{App,CoverFile},Analyse} -> cover(App, maybe_file(CoverFile), Analyse) - end, - testcase_callback(TCCB), - add_job(Name, {command_line,SpecList}), - - wait_finish(). - -%% Converted CoverFile to a string unless it is 'none' -maybe_file(none) -> - none; -maybe_file(CoverFile) -> - atom_to_list(CoverFile). - -idle_notify(Fun) -> - {ok, Pid} = controller_call({idle_notify,Fun}), - Pid. - -start_get_totals(Fun) -> - {ok, Pid} = controller_call({start_get_totals,Fun}), - Pid. - -stop_get_totals() -> - ok = controller_call(stop_get_totals), - ok. - -wait_finish() -> - OldTrap = process_flag(trap_exit, true), - {ok, Pid} = finish(true), - link(Pid), - receive - {'EXIT',Pid,_} -> - ok - end, - process_flag(trap_exit, OldTrap), - ok. - -abort_current_testcase(Reason) -> - controller_call({abort_current_testcase,Reason}). - -abort() -> - OldTrap = process_flag(trap_exit, true), - {ok, Pid} = finish(abort), - link(Pid), - receive - {'EXIT',Pid,_} -> - ok - end, - process_flag(trap_exit, OldTrap), - ok. - -finish(Abort) -> - controller_call({finish,Abort}). - -stop() -> - controller_call(stop). - -jobs() -> - controller_call(jobs). - -get_levels() -> - controller_call(get_levels). - -set_levels(Show, Major, Minor) -> - controller_call({set_levels,Show,Major,Minor}). - -reject_io_reqs(Bool) -> - controller_call({reject_io_reqs,Bool}). - -multiply_timetraps(N) -> - controller_call({multiply_timetraps,N}). - -scale_timetraps(Bool) -> - controller_call({scale_timetraps,Bool}). - -get_timetrap_parameters() -> - controller_call(get_timetrap_parameters). - -create_priv_dir(Value) -> - controller_call({create_priv_dir,Value}). - -trc(TraceFile) -> - controller_call({trace,TraceFile}, 2*?ACCEPT_TIMEOUT). - -stop_trace() -> - controller_call(stop_trace). - -node_started(Node) -> - gen_server:cast(?MODULE, {node_started,Node}). - -cover(App, Analyse) when is_atom(App) -> - cover(App, none, Analyse); -cover(CoverFile, Analyse) -> - cover(none, CoverFile, Analyse). -cover(App, CoverFile, Analyse) -> - {Excl,Incl,Cross} = read_cover_file(CoverFile), - CoverInfo = #cover{app=App, - file=CoverFile, - excl=Excl, - incl=Incl, - cross=Cross, - level=Analyse}, - controller_call({cover,CoverInfo}). - -cover(CoverInfo) -> - controller_call({cover,CoverInfo}). - -cover_compile(App,File,Excl,Incl,Cross,Analyse,Stop) -> - cover_compile(#cover{app=App, - file=File, - excl=Excl, - incl=Incl, - cross=Cross, - level=Analyse, - stop=Stop}). - -testcase_callback(ModFunc) -> - controller_call({testcase_callback,ModFunc}). - -set_random_seed(Seed) -> - controller_call({set_random_seed,Seed}). - -kill_slavenodes() -> - controller_call(kill_slavenodes). - -get_hosts() -> - get(test_server_hosts). - -%%-------------------------------------------------------------------- - -add_job(Name, TopCase) -> - add_job(Name, TopCase, []). - -add_job(Name, TopCase, Skip) -> - SuiteName = - case Name of - "." -> "current_dir"; - ".." -> "parent_dir"; - Other -> Other - end, - Dir = filename:absname(SuiteName), - controller_call({add_job,Dir,SuiteName,TopCase,Skip}). - -controller_call(Arg) -> - case catch gen_server:call(?MODULE, Arg, infinity) of - {'EXIT',{{badarg,_},{gen_server,call,_}}} -> - exit(test_server_ctrl_not_running); - {'EXIT',Reason} -> - exit(Reason); - Other -> - Other - end. -controller_call(Arg, Timeout) -> - case catch gen_server:call(?MODULE, Arg, Timeout) of - {'EXIT',{{badarg,_},{gen_server,call,_}}} -> - exit(test_server_ctrl_not_running); - {'EXIT',Reason} -> - exit(Reason); - Other -> - Other - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% init([]) -%% -%% init() is the init function of the test_server's gen_server. -%% -init([]) -> - case os:getenv("TEST_SERVER_CALL_TRACE") of - false -> - ok; - "" -> - ok; - TraceSpec -> - test_server_sup:call_trace(TraceSpec) - end, - process_flag(trap_exit, true), - %% copy format_exception setting from init arg to application environment - case init:get_argument(test_server_format_exception) of - {ok,[[TSFE]]} -> - application:set_env(test_server, format_exception, list_to_atom(TSFE)); - _ -> - ok - end, - test_server_sup:cleanup_crash_dumps(), - test_server_sup:util_start(), - State = #state{jobs=[],finish=false}, - TI0 = test_server:init_target_info(), - TargetHost = test_server_sup:hoststr(), - TI = TI0#target_info{host=TargetHost, - naming=naming(), - master=TargetHost}, - ets:new(slave_tab, [named_table,set,public,{keypos,2}]), - set_hosts([TI#target_info.host]), - {ok,State#state{target_info=TI}}. - -naming() -> - case lists:member($., test_server_sup:hoststr()) of - true -> "-name"; - false -> "-sname" - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(kill_slavenodes, From, State) -> ok -%% -%% Kill all slave nodes that remain after a test case -%% is completed. -%% -handle_call(kill_slavenodes, _From, State) -> - Nodes = test_server_node:kill_nodes(), - {reply, Nodes, State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({set_hosts, HostList}, From, State) -> ok -%% -%% Set the global hostlist. -%% -handle_call({set_hosts, Hosts}, _From, State) -> - set_hosts(Hosts), - {reply, ok, State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(get_hosts, From, State) -> [Hosts] -%% -%% Returns the lists of hosts that the test server -%% can use for slave nodes. This is primarily used -%% for nodename generation. -%% -handle_call(get_hosts, _From, State) -> - Hosts = get_hosts(), - {reply, Hosts, State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({add_job,Dir,Name,TopCase,Skip}, _, State) -> -%% ok | {error,Reason} -%% -%% Dir = string() -%% Name = string() -%% TopCase = term() -%% Skip = [SkipItem] -%% SkipItem = {Mod,Comment} | {Mod,Case,Comment} | {Mod,Cases,Comment} -%% Mod = Case = atom() -%% Comment = string() -%% Cases = [Case] -%% -%% Adds a job to the job queue. The name of the job is Name. A log directory -%% will be created in Dir/Name.logs. TopCase may be anything that -%% collect_cases/3 accepts, plus the following: -%% -%% {spec,SpecName} executes the named test suite specification file. Commands -%% in the file should be in the format accepted by do_spec_list/1. -%% -%% {command_line,SpecList} executes the list of specification instructions -%% supplied, which should be in the format accepted by do_spec_list/1. - -handle_call({add_job,Dir,Name,TopCase,Skip}, _From, State) -> - LogDir = Dir ++ ?logdir_ext, - ExtraTools = - case State#state.cover of - false -> []; - CoverInfo -> [{cover,CoverInfo}] - end, - ExtraTools1 = - case State#state.random_seed of - undefined -> ExtraTools; - Seed -> [{random_seed,Seed}|ExtraTools] - end, - case lists:keysearch(Name, 1, State#state.jobs) of - false -> - case TopCase of - {spec,SpecName} -> - Pid = spawn_tester( - ?MODULE, do_spec, - [SpecName,{State#state.multiply_timetraps, - State#state.scale_timetraps}], - LogDir, Name, State#state.levels, - State#state.reject_io_reqs, - State#state.create_priv_dir, - State#state.testcase_callback, ExtraTools1), - NewJobs = [{Name,Pid}|State#state.jobs], - {reply, ok, State#state{jobs=NewJobs}}; - {command_line,SpecList} -> - Pid = spawn_tester( - ?MODULE, do_spec_list, - [SpecList,{State#state.multiply_timetraps, - State#state.scale_timetraps}], - LogDir, Name, State#state.levels, - State#state.reject_io_reqs, - State#state.create_priv_dir, - State#state.testcase_callback, ExtraTools1), - NewJobs = [{Name,Pid}|State#state.jobs], - {reply, ok, State#state{jobs=NewJobs}}; - TopCase -> - case State#state.get_totals of - {CliPid,Fun} -> - Result = count_test_cases(TopCase, Skip), - Fun(CliPid, Result), - {reply, ok, State}; - _ -> - Cfg = make_config([]), - Pid = spawn_tester( - ?MODULE, do_test_cases, - [TopCase,Skip,Cfg, - {State#state.multiply_timetraps, - State#state.scale_timetraps}], - LogDir, Name, State#state.levels, - State#state.reject_io_reqs, - State#state.create_priv_dir, - State#state.testcase_callback, ExtraTools1), - NewJobs = [{Name,Pid}|State#state.jobs], - {reply, ok, State#state{jobs=NewJobs}} - end - end; - _ -> - {reply,{error,name_already_in_use},State} - end; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(jobs, _, State) -> JobList -%% JobList = [{Name,Pid}, ...] -%% Name = string() -%% Pid = pid() -%% -%% Return the list of current jobs. - -handle_call(jobs, _From, State) -> - {reply,State#state.jobs,State}; - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({abort_current_testcase,Reason}, _, State) -> Result -%% Reason = term() -%% Result = ok | {error,no_testcase_running} -%% -%% Attempts to abort the test case that's currently running. - -handle_call({abort_current_testcase,Reason}, _From, State) -> - case State#state.jobs of - [{_,Pid}|_] -> - Pid ! {abort_current_testcase,Reason,self()}, - receive - {Pid,abort_current_testcase,Result} -> - {reply, Result, State} - after 10000 -> - {reply, {error,no_testcase_running}, State} - end; - _ -> - {reply, {error,no_testcase_running}, State} - end; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({finish,Fini}, _, State) -> {ok,Pid} -%% Fini = true | abort -%% -%% Tells the test_server to stop as soon as there are no test suites -%% running. Immediately if none are running. Abort is handled as soon -%% as current test finishes. - -handle_call({finish,Fini}, _From, State) -> - case State#state.jobs of - [] -> - lists:foreach(fun({Cli,Fun}) -> Fun(Cli,Fini) end, - State#state.idle_notify), - State2 = State#state{finish=false}, - {stop,shutdown,{ok,self()}, State2}; - _SomeJobs -> - State2 = State#state{finish=Fini}, - {reply, {ok,self()}, State2} - end; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({idle_notify,Fun}, From, State) -> {ok,Pid} -%% -%% Lets a test client subscribe to receive a notification when the -%% test server becomes idle (can be used to syncronize jobs). -%% test_server calls Fun(From) when idle. - -handle_call({idle_notify,Fun}, {Cli,_Ref}, State) -> - case State#state.jobs of - [] -> self() ! report_idle; - _ -> ok - end, - Subscribed = State#state.idle_notify, - {reply, {ok,self()}, State#state{idle_notify=[{Cli,Fun}|Subscribed]}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(start_get_totals, From, State) -> {ok,Pid} -%% -%% Switch on the mode where the test server will only -%% report back the number of tests it would execute -%% given some subsequent jobs. - -handle_call({start_get_totals,Fun}, {Cli,_Ref}, State) -> - {reply, {ok,self()}, State#state{get_totals={Cli,Fun}}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(stop_get_totals, From, State) -> ok -%% -%% Lets a test client subscribe to receive a notification when the -%% test server becomes idle (can be used to syncronize jobs). -%% test_server calls Fun(From) when idle. - -handle_call(stop_get_totals, {_Cli,_Ref}, State) -> - {reply, ok, State#state{get_totals=false}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(get_levels, _, State) -> {Show,Major,Minor} -%% Show = integer() -%% Major = integer() -%% Minor = integer() -%% -%% Returns a 3-tuple with the logging thresholds. -%% All output and information from a test suite is tagged with a detail -%% level. Lower values are more "important". Text that is output using -%% io:format or similar is automatically tagged with detail level 50. -%% -%% All output with detail level: -%% less or equal to Show is displayed on the screen (default 1) -%% less or equal to Major is logged in the major log file (default 19) -%% greater or equal to Minor is logged in the minor log files (default 10) - -handle_call(get_levels, _From, State) -> - {reply,State#state.levels,State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({set_levels,Show,Major,Minor}, _, State) -> ok -%% Show = integer() -%% Major = integer() -%% Minor = integer() -%% -%% Sets the logging thresholds, see handle_call(get_levels,...) above. - -handle_call({set_levels,Show,Major,Minor}, _From, State) -> - {reply,ok,State#state{levels={Show,Major,Minor}}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({reject_io_reqs,Bool}, _, State) -> ok -%% Bool = bool() -%% -%% May be used to switch off stdout printouts to the minor log file - -handle_call({reject_io_reqs,Bool}, _From, State) -> - {reply,ok,State#state{reject_io_reqs=Bool}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({multiply_timetraps,N}, _, State) -> ok -%% N = integer() | infinity -%% -%% Multiplies all timetraps set by test cases with N - -handle_call({multiply_timetraps,N}, _From, State) -> - {reply,ok,State#state{multiply_timetraps=N}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({scale_timetraps,Bool}, _, State) -> ok -%% Bool = true | false -%% -%% Specifies if test_server should scale the timetrap value -%% automatically if e.g. cover is running. - -handle_call({scale_timetraps,Bool}, _From, State) -> - {reply,ok,State#state{scale_timetraps=Bool}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(get_timetrap_parameters, _, State) -> {Multiplier,Scale} -%% Multiplier = integer() | infinity -%% Scale = true | false -%% -%% Returns the parameter values that affect timetraps. - -handle_call(get_timetrap_parameters, _From, State) -> - {reply,{State#state.multiply_timetraps,State#state.scale_timetraps},State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({trace,TraceFile}, _, State) -> ok | {error,Reason} -%% -%% Starts a separate node (trace control node) which -%% starts tracing on target and all slave nodes -%% -%% TraceFile is a text file with elements of type -%% {Trace,Mod,TracePattern}. -%% {Trace,Mod,Func,TracePattern}. -%% {Trace,Mod,Func,Arity,TracePattern}. -%% -%% Trace = tp | tpl; local or global call trace -%% Mod,Func = atom(), Arity=integer(); defines what to trace -%% TracePattern = [] | match_spec() -%% -%% The 'call' trace flag is set on all processes, and then -%% the given trace patterns are set. - -handle_call({trace,TraceFile}, _From, State=#state{trc=false}) -> - TI = State#state.target_info, - case test_server_node:start_tracer_node(TraceFile, TI) of - {ok,Tracer} -> {reply,ok,State#state{trc=Tracer}}; - Error -> {reply,Error,State} - end; -handle_call({trace,_TraceFile}, _From, State) -> - {reply,{error,already_tracing},State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(stop_trace, _, State) -> ok | {error,Reason} -%% -%% Stops tracing on target and all slave nodes and -%% terminates trace control node - -handle_call(stop_trace, _From, State=#state{trc=false}) -> - {reply,{error,not_tracing},State}; -handle_call(stop_trace, _From, State) -> - R = test_server_node:stop_tracer_node(State#state.trc), - {reply,R,State#state{trc=false}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({cover,CoverInfo}, _, State) -> ok | {error,Reason} -%% -%% Set specification of cover analysis to be used when running tests -%% (see start_extra_tools/1 and stop_extra_tools/1) - -handle_call({cover,CoverInfo}, _From, State) -> - {reply,ok,State#state{cover=CoverInfo}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({create_priv_dir,Value}, _, State) -> ok | {error,Reason} -%% -%% Set create_priv_dir to either auto_per_run (create common priv dir once -%% per test run), manual_per_tc (the priv dir name will be unique for each -%% test case, but the user has to call test_server:make_priv_dir/0 to create -%% it), or auto_per_tc (unique priv dir created automatically for each test -%% case). - -handle_call({create_priv_dir,Value}, _From, State) -> - {reply,ok,State#state{create_priv_dir=Value}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({testcase_callback,{Mod,Func}}, _, State) -> ok | {error,Reason} -%% -%% Add a callback function that will be called before and after every -%% test case (on the test case process): -%% -%% Mod:Func(Suite,TestCase,InitOrEnd,Config) -%% -%% InitOrEnd = init | 'end'. - -handle_call({testcase_callback,ModFunc}, _From, State) -> - case ModFunc of - {Mod,Func} -> - case code:is_loaded(Mod) of - {file,_} -> - ok; - false -> - code:load_file(Mod) - end, - case erlang:function_exported(Mod,Func,4) of - true -> - ok; - false -> - io:format(user, - "WARNING! Callback function ~w:~w/4 undefined.~n~n", - [Mod,Func]) - end; - _ -> - ok - end, - {reply,ok,State#state{testcase_callback=ModFunc}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({set_random_seed,Seed}, _, State) -> ok | {error,Reason} -%% -%% Let operator set a random seed value to be used e.g. for shuffling -%% test cases. - -handle_call({set_random_seed,Seed}, _From, State) -> - {reply,ok,State#state{random_seed=Seed}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(stop, _, State) -> ok -%% -%% Stops the test server immediately. -%% Some cleanup is done by terminate/2 - -handle_call(stop, _From, State) -> - {stop, shutdown, ok, State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call(get_target_info, _, State) -> TI -%% -%% TI = #target_info{} -%% -%% Returns information about target - -handle_call(get_target_info, _From, State) -> - {reply, State#state.target_info, State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({start_node,Name,Type,Options}, _, State) -> -%% ok | {error,Reason} -%% -%% Starts a new node (slave or peer) - -handle_call({start_node, Name, Type, Options}, From, State) -> - %% test_server_ctrl does gen_server:reply/2 explicitly - test_server_node:start_node(Name, Type, Options, From, - State#state.target_info), - {noreply,State}; - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({wait_for_node,Node}, _, State) -> ok -%% -%% Waits for a new node to take contact. Used if -%% node is started with option {wait,false} - -handle_call({wait_for_node, Node}, From, State) -> - NewWaitList = - case ets:lookup(slave_tab,Node) of - [] -> - [{Node,From}|State#state.wait_for_node]; - _ -> - gen_server:reply(From,ok), - State#state.wait_for_node - end, - {noreply,State#state{wait_for_node=NewWaitList}}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({stop_node,Name}, _, State) -> ok | {error,Reason} -%% -%% Stops a slave or peer node. This is actually only some cleanup -%% - the node is really stopped by test_server when this returns. - -handle_call({stop_node, Name}, _From, State) -> - R = test_server_node:stop_node(Name), - {reply, R, State}; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_call({is_release_available,Name}, _, State) -> ok | {error,Reason} -%% -%% Tests if the release is available. - -handle_call({is_release_available, Release}, _From, State) -> - R = test_server_node:is_release_available(Release), - {reply, R, State}. - -%%-------------------------------------------------------------------- -set_hosts(Hosts) -> - put(test_server_hosts, Hosts). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_cast({node_started,Name}, _, State) -%% -%% Called by test_server_node when a slave/peer node is fully started. - -handle_cast({node_started,Node}, State) -> - case State#state.trc of - false -> ok; - Trc -> test_server_node:trace_nodes(Trc, [Node]) - end, - NewWaitList = - case lists:keysearch(Node,1,State#state.wait_for_node) of - {value,{Node,From}} -> - gen_server:reply(From, ok), - lists:keydelete(Node, 1, State#state.wait_for_node); - false -> - State#state.wait_for_node - end, - {noreply, State#state{wait_for_node=NewWaitList}}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_info({'EXIT',Pid,Reason}, State) -%% Pid = pid() -%% Reason = term() -%% -%% Handles exit messages from linked processes. Only test suites are -%% expected to be linked. When a test suite terminates, it is removed -%% from the job queue. - -handle_info(report_idle, State) -> - Finish = State#state.finish, - lists:foreach(fun({Cli,Fun}) -> Fun(Cli,Finish) end, - State#state.idle_notify), - {noreply,State#state{idle_notify=[]}}; - - -handle_info({'EXIT',Pid,Reason}, State) -> - case lists:keysearch(Pid,2,State#state.jobs) of - false -> - %% not our problem - {noreply,State}; - {value,{Name,_}} -> - NewJobs = lists:keydelete(Pid, 2, State#state.jobs), - case Reason of - normal -> - fine; - killed -> - io:format("Suite ~ts was killed\n", [Name]); - _Other -> - io:format("Suite ~ts was killed with reason ~p\n", - [Name,Reason]) - end, - State2 = State#state{jobs=NewJobs}, - Finish = State2#state.finish, - case NewJobs of - [] -> - lists:foreach(fun({Cli,Fun}) -> Fun(Cli,Finish) end, - State2#state.idle_notify), - case Finish of - false -> - {noreply,State2#state{idle_notify=[]}}; - _ -> % true | abort - %% test_server:finish() has been called and - %% there are no jobs in the job queue => - %% stop the test_server_ctrl - {stop,shutdown,State2#state{finish=false}} - end; - _ -> % pending jobs - case Finish of - abort -> % abort test now! - lists:foreach(fun({Cli,Fun}) -> Fun(Cli,Finish) end, - State2#state.idle_notify), - {stop,shutdown,State2#state{finish=false}}; - _ -> % true | false - {noreply, State2} - end - end - end; - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_info({tcp_closed,Sock}, State) -%% -%% A Socket was closed. This indicates that a node died. -%% This can be -%% *Slave or peer node started by a test suite -%% *Trace controll node - -handle_info({tcp_closed,Sock}, State=#state{trc=Sock}) -> - %% Tracer node died - can't really do anything - %%! Maybe print something??? - {noreply,State#state{trc=false}}; -handle_info({tcp_closed,Sock}, State) -> - test_server_node:nodedown(Sock), - {noreply,State}; -handle_info(_, State) -> - %% dummy; accept all, do nothing. - {noreply, State}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% terminate(Reason, State) -> ok -%% Reason = term() -%% -%% Cleans up when the test_server is terminating. Kills the running -%% test suites (if any) and any possible remainting slave node - -terminate(_Reason, State) -> - test_server_sup:util_stop(), - case State#state.trc of - false -> ok; - Sock -> test_server_node:stop_tracer_node(Sock) - end, - kill_all_jobs(State#state.jobs), - test_server_node:kill_nodes(), - ok. - -kill_all_jobs([{_Name,JobPid}|Jobs]) -> - exit(JobPid, kill), - kill_all_jobs(Jobs); -kill_all_jobs([]) -> - ok. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%----------------------- INTERNAL FUNCTIONS -----------------------%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% spawn_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs, -%% CreatePrivDir, TestCaseCallback, ExtraTools) -> Pid -%% Mod = atom() -%% Func = atom() -%% Args = [term(),...] -%% Dir = string() -%% Name = string() -%% Levels = {integer(),integer(),integer()} -%% RejectIoReqs = bool() -%% CreatePrivDir = auto_per_run | manual_per_tc | auto_per_tc -%% TestCaseCallback = {CBMod,CBFunc} | undefined -%% ExtraTools = [ExtraTool,...] -%% ExtraTool = CoverInfo | TraceInfo | RandomSeed -%% -%% Spawns a test suite execute-process, just an ordinary spawn, except -%% that it will set a lot of dictionary information before starting the -%% named function. Also, the execution is timed and protected by a catch. -%% When the named function is done executing, a summary of the results -%% is printed to the log files. - -spawn_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs, - CreatePrivDir, TCCallback, ExtraTools) -> - spawn_link(fun() -> - init_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs, - CreatePrivDir, TCCallback, ExtraTools) - end). - -init_tester(Mod, Func, Args, Dir, Name, {_,_,MinLev}=Levels, - RejectIoReqs, CreatePrivDir, TCCallback, ExtraTools) -> - process_flag(trap_exit, true), - test_server_io:start_link(), - put(test_server_name, Name), - put(test_server_dir, Dir), - put(test_server_total_time, 0), - put(test_server_ok, 0), - put(test_server_failed, 0), - put(test_server_skipped, {0,0}), - put(test_server_minor_level, MinLev), - put(test_server_create_priv_dir, CreatePrivDir), - put(test_server_random_seed, proplists:get_value(random_seed, ExtraTools)), - put(test_server_testcase_callback, TCCallback), - case os:getenv("TEST_SERVER_FRAMEWORK") of - FW when FW =:= false; FW =:= "undefined" -> - put(test_server_framework, '$none'); - FW -> - put(test_server_framework_name, list_to_atom(FW)), - case os:getenv("TEST_SERVER_FRAMEWORK_NAME") of - FWName when FWName =:= false; FWName =:= "undefined" -> - put(test_server_framework_name, '$none'); - FWName -> - put(test_server_framework_name, list_to_atom(FWName)) - end - end, - - %% before first print, read and set logging options - LogOpts = test_server_sup:framework_call(get_logopts, [], []), - put(test_server_logopts, LogOpts), - - StartedExtraTools = start_extra_tools(ExtraTools), - - test_server_io:set_job_name(Name), - test_server_io:set_gl_props([{levels,Levels}, - {auto_nl,not lists:member(no_nl, LogOpts)}, - {reject_io_reqs,RejectIoReqs}]), - group_leader(test_server_io:get_gl(true), self()), - {TimeMy,Result} = ts_tc(Mod, Func, Args), - set_io_buffering(undefined), - test_server_io:set_job_name(undefined), - catch stop_extra_tools(StartedExtraTools), - case Result of - {'EXIT',test_suites_done} -> - ok; - {'EXIT',_Pid,Reason} -> - print(1, "EXIT, reason ~p", [Reason]); - {'EXIT',Reason} -> - report_severe_error(Reason), - print(1, "EXIT, reason ~p", [Reason]) - end, - Time = TimeMy/1000000, - SuccessStr = - case get(test_server_failed) of - 0 -> "Ok"; - _ -> "FAILED" - end, - {SkippedN,SkipStr} = - case get(test_server_skipped) of - {0,0} -> - {0,""}; - {USkipped,ASkipped} -> - Skipped = USkipped+ASkipped, - {Skipped,io_lib:format(", ~w Skipped", [Skipped])} - end, - OkN = get(test_server_ok), - FailedN = get(test_server_failed), - print(html,"\n</tbody>\n<tfoot>\n" - "<tr><td></td><td><b>TOTAL</b></td><td></td><td></td><td></td>" - "<td>~.3fs</td><td><b>~ts</b></td><td>~w Ok, ~w Failed~ts of ~w</td></tr>\n" - "</tfoot>\n", - [Time,SuccessStr,OkN,FailedN,SkipStr,OkN+FailedN+SkippedN]), - - test_server_io:stop([major,html,unexpected_io]), - {UnexpectedIoName,UnexpectedIoFooter} = get(test_server_unexpected_footer), - {ok,UnexpectedIoFd} = open_html_file(UnexpectedIoName, [append]), - io:put_chars(UnexpectedIoFd, "\n</pre>\n"++UnexpectedIoFooter), - file:close(UnexpectedIoFd), - ok. - -report_severe_error(Reason) -> - test_server_sup:framework_call(report, [severe_error,Reason]). - -ts_tc(M,F,A) -> - Before = erlang:monotonic_time(), - Result = (catch apply(M, F, A)), - After = erlang:monotonic_time(), - Elapsed = erlang:convert_time_unit(After-Before, - native, - micro_seconds), - {Elapsed, Result}. - -start_extra_tools(ExtraTools) -> - start_extra_tools(ExtraTools, []). -start_extra_tools([{cover,CoverInfo} | ExtraTools], Started) -> - case start_cover(CoverInfo) of - {ok,NewCoverInfo} -> - start_extra_tools(ExtraTools,[{cover,NewCoverInfo}|Started]); - {error,_} -> - start_extra_tools(ExtraTools, Started) - end; -start_extra_tools([_ | ExtraTools], Started) -> - start_extra_tools(ExtraTools, Started); -start_extra_tools([], Started) -> - Started. - -stop_extra_tools(ExtraTools) -> - TestDir = get(test_server_log_dir_base), - case lists:keymember(cover, 1, ExtraTools) of - false -> - write_default_coverlog(TestDir); - true -> - ok - end, - stop_extra_tools(ExtraTools, TestDir). - -stop_extra_tools([{cover,CoverInfo}|ExtraTools], TestDir) -> - stop_cover(CoverInfo,TestDir), - stop_extra_tools(ExtraTools, TestDir); -%%stop_extra_tools([_ | ExtraTools], TestDir) -> -%% stop_extra_tools(ExtraTools, TestDir); -stop_extra_tools([], _) -> - ok. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% do_spec(SpecName, TimetrapSpec) -> {error,Reason} | exit(Result) -%% SpecName = string() -%% TimetrapSpec = MultiplyTimetrap | {MultiplyTimetrap,ScaleTimetrap} -%% MultiplyTimetrap = integer() | infinity -%% ScaleTimetrap = bool() -%% -%% Reads the named test suite specification file, and executes it. -%% -%% This function is meant to be called by a process created by -%% spawn_tester/10, which sets up some necessary dictionary values. - -do_spec(SpecName, TimetrapSpec) when is_list(SpecName) -> - case file:consult(SpecName) of - {ok,TermList} -> - do_spec_list(TermList,TimetrapSpec); - {error,Reason} -> - io:format("Can't open ~ts: ~p\n", [SpecName,Reason]), - {error,{cant_open_spec,Reason}} - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% do_spec_list(TermList, TimetrapSpec) -> exit(Result) -%% TermList = [term()|...] -%% TimetrapSpec = MultiplyTimetrap | {MultiplyTimetrap,ScaleTimetrap} -%% MultiplyTimetrap = integer() | infinity -%% ScaleTimetrap = bool() -%% -%% Executes a list of test suite specification commands. The following -%% commands are available, and may occur zero or more times (if several, -%% the contents is appended): -%% -%% {topcase,TopCase} Specifies top level test goals. TopCase has the syntax -%% specified by collect_cases/3. -%% -%% {skip,Skip} Specifies test cases to skip, and lists requirements that -%% cannot be granted during the test run. Skip has the syntax specified -%% by collect_cases/3. -%% -%% {nodes,Nodes} Lists node names avaliable to the test suites. Nodes have -%% the syntax specified by collect_cases/3. -%% -%% {require_nodenames, Num} Specifies how many nodenames the test suite will -%% need. Theese are automaticly generated and inserted into the Config by the -%% test_server. The caller may specify other hosts to run theese nodes by -%% using the {hosts, Hosts} option. If there are no hosts specified, all -%% nodenames will be generated from the local host. -%% -%% {hosts, Hosts} Specifies a list of available hosts on which to start -%% slave nodes. It is used when the {remote, true} option is given to the -%% test_server:start_node/3 function. Also, if {require_nodenames, Num} is -%% contained in the TermList, the generated nodenames will be spread over -%% all hosts given in this Hosts list. The hostnames are given as atoms or -%% strings. -%% -%% {diskless, true}</c></tag> is kept for backwards compatiblilty and -%% should not be used. Use a configuration test case instead. -%% -%% This function is meant to be called by a process created by -%% spawn_tester/10, which sets up some necessary dictionary values. - -do_spec_list(TermList0, TimetrapSpec) -> - Nodes = [], - TermList = - case lists:keysearch(hosts, 1, TermList0) of - {value, {hosts, Hosts0}} -> - Hosts = lists:map(fun(H) -> cast_to_list(H) end, Hosts0), - controller_call({set_hosts, Hosts}), - lists:keydelete(hosts, 1, TermList0); - _ -> - TermList0 - end, - DefaultConfig = make_config([{nodes,Nodes}]), - {TopCases,SkipList,Config} = do_spec_terms(TermList, [], [], DefaultConfig), - do_test_cases(TopCases, SkipList, Config, TimetrapSpec). - -do_spec_terms([], TopCases, SkipList, Config) -> - {TopCases,SkipList,Config}; -do_spec_terms([{topcase,TopCase}|Terms], TopCases, SkipList, Config) -> - do_spec_terms(Terms,[TopCase|TopCases], SkipList, Config); -do_spec_terms([{skip,Skip}|Terms], TopCases, SkipList, Config) -> - do_spec_terms(Terms, TopCases, [Skip|SkipList], Config); -do_spec_terms([{nodes,Nodes}|Terms], TopCases, SkipList, Config) -> - do_spec_terms(Terms, TopCases, SkipList, - update_config(Config, {nodes,Nodes})); -do_spec_terms([{diskless,How}|Terms], TopCases, SkipList, Config) -> - do_spec_terms(Terms, TopCases, SkipList, - update_config(Config, {diskless,How})); -do_spec_terms([{config,MoreConfig}|Terms], TopCases, SkipList, Config) -> - do_spec_terms(Terms, TopCases, SkipList, Config++MoreConfig); -do_spec_terms([{default_timeout,Tmo}|Terms], TopCases, SkipList, Config) -> - do_spec_terms(Terms, TopCases, SkipList, - update_config(Config, {default_timeout,Tmo})); - -do_spec_terms([{require_nodenames,NumNames}|Terms], TopCases, SkipList, Config) -> - NodeNames0=generate_nodenames(NumNames), - NodeNames=lists:delete([], NodeNames0), - do_spec_terms(Terms, TopCases, SkipList, - update_config(Config, {nodenames,NodeNames})); -do_spec_terms([Other|Terms], TopCases, SkipList, Config) -> - io:format("** WARNING: Spec file contains unknown directive ~p\n", - [Other]), - do_spec_terms(Terms, TopCases, SkipList, Config). - - - -generate_nodenames(Num) -> - Hosts = case controller_call(get_hosts) of - [] -> - TI = controller_call(get_target_info), - [TI#target_info.host]; - List -> - List - end, - generate_nodenames2(Num, Hosts, []). - -generate_nodenames2(0, _Hosts, Acc) -> - Acc; -generate_nodenames2(N, Hosts, Acc) -> - Host=lists:nth((N rem (length(Hosts)))+1, Hosts), - Name=list_to_atom(temp_nodename("nod", []) ++ "@" ++ Host), - generate_nodenames2(N-1, Hosts, [Name|Acc]). - -temp_nodename([], Acc) -> - lists:flatten(Acc); -temp_nodename([Chr|Base], Acc) -> - {A,B,C} = ?now, - New = [Chr | integer_to_list(Chr bxor A bxor B+A bxor C+B)], - temp_nodename(Base, [New|Acc]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% count_test_cases(TopCases, SkipCases) -> {Suites,NoOfCases} | error -%% TopCases = term() (See collect_cases/3) -%% SkipCases = term() (See collect_cases/3) -%% Suites = list() -%% NoOfCases = integer() | unknown -%% -%% Counts the test cases that are about to run and returns that number. -%% If there's a conf group in TestSpec with a repeat property, the total number -%% of cases can not be calculated and NoOfCases = unknown. -count_test_cases(TopCases, SkipCases) when is_list(TopCases) -> - case collect_all_cases(TopCases, SkipCases) of - {error,_Why} = Error -> - Error; - TestSpec -> - {get_suites(TestSpec, []), - case remove_conf(TestSpec) of - {repeats,_} -> - unknown; - TestSpec1 -> - length(TestSpec1) - end} - end; - -count_test_cases(TopCase, SkipCases) -> - count_test_cases([TopCase], SkipCases). - - -remove_conf(Cases) -> - remove_conf(Cases, [], false). - -remove_conf([{conf, _Ref, Props, _MF}|Cases], NoConf, Repeats) -> - case get_repeat(Props) of - undefined -> - remove_conf(Cases, NoConf, Repeats); - {_RepType,1} -> - remove_conf(Cases, NoConf, Repeats); - _ -> - remove_conf(Cases, NoConf, true) - end; -remove_conf([{make,_Ref,_MF}|Cases], NoConf, Repeats) -> - remove_conf(Cases, NoConf, Repeats); -remove_conf([{skip_case,{{_M,all},_Cmt},_Mode}|Cases], NoConf, Repeats) -> - remove_conf(Cases, NoConf, Repeats); -remove_conf([{skip_case,{Type,_Ref,_MF,_Cmt}}|Cases], - NoConf, Repeats) when Type==conf; - Type==make -> - remove_conf(Cases, NoConf, Repeats); -remove_conf([{skip_case,{Type,_Ref,_MF,_Cmt},_Mode}|Cases], - NoConf, Repeats) when Type==conf; - Type==make -> - remove_conf(Cases, NoConf, Repeats); -remove_conf([C={Mod,error_in_suite,_}|Cases], NoConf, Repeats) -> - FwMod = get_fw_mod(?MODULE), - if Mod == FwMod -> - remove_conf(Cases, NoConf, Repeats); - true -> - remove_conf(Cases, [C|NoConf], Repeats) - end; -remove_conf([C|Cases], NoConf, Repeats) -> - remove_conf(Cases, [C|NoConf], Repeats); -remove_conf([], NoConf, true) -> - {repeats,lists:reverse(NoConf)}; -remove_conf([], NoConf, false) -> - lists:reverse(NoConf). - -get_suites([{skip_case,{{Mod,_F},_Cmt},_Mode}|Tests], Mods) when is_atom(Mod) -> - case add_mod(Mod, Mods) of - true -> get_suites(Tests, [Mod|Mods]); - false -> get_suites(Tests, Mods) - end; -get_suites([{Mod,_Case}|Tests], Mods) when is_atom(Mod) -> - case add_mod(Mod, Mods) of - true -> get_suites(Tests, [Mod|Mods]); - false -> get_suites(Tests, Mods) - end; -get_suites([{Mod,_Func,_Args}|Tests], Mods) when is_atom(Mod) -> - case add_mod(Mod, Mods) of - true -> get_suites(Tests, [Mod|Mods]); - false -> get_suites(Tests, Mods) - end; -get_suites([_|Tests], Mods) -> - get_suites(Tests, Mods); - -get_suites([], Mods) -> - lists:reverse(Mods). - -add_mod(Mod, Mods) -> - case string:rstr(atom_to_list(Mod), "_SUITE") of - 0 -> false; - _ -> % test suite - case lists:member(Mod, Mods) of - true -> false; - false -> true - end - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% do_test_cases(TopCases, SkipCases, Config, TimetrapSpec) -> -%% exit(Result) -%% -%% TopCases = term() (See collect_cases/3) -%% SkipCases = term() (See collect_cases/3) -%% Config = term() (See collect_cases/3) -%% TimetrapSpec = MultiplyTimetrap | {MultiplyTimetrap,ScaleTimetrap} -%% MultiplyTimetrap = integer() | infinity -%% ScaleTimetrap = bool() -%% -%% Initializes and starts the test run, for "ordinary" test suites. -%% Creates log directories and log files, inserts initial timestamps and -%% configuration information into the log files. -%% -%% This function is meant to be called by a process created by -%% spawn_tester/10, which sets up some necessary dictionary values. -do_test_cases(TopCases, SkipCases, - Config, MultiplyTimetrap) when is_integer(MultiplyTimetrap); - MultiplyTimetrap == infinity -> - do_test_cases(TopCases, SkipCases, Config, {MultiplyTimetrap,true}); - -do_test_cases(TopCases, SkipCases, - Config, TimetrapData) when is_list(TopCases), - is_tuple(TimetrapData) -> - {ok,TestDir} = start_log_file(), - FwMod = get_fw_mod(?MODULE), - case collect_all_cases(TopCases, SkipCases) of - {error,Why} -> - print(1, "Error starting: ~p", [Why]), - exit(test_suites_done); - TestSpec0 -> - N = case remove_conf(TestSpec0) of - {repeats,_} -> unknown; - TS -> length(TS) - end, - put(test_server_cases, N), - put(test_server_case_num, 0), - - TestSpec = - add_init_and_end_per_suite(TestSpec0, undefined, undefined, FwMod), - - TI = get_target_info(), - print(1, "Starting test~ts", - [print_if_known(N, {", ~w test cases",[N]}, - {" (with repeated test cases)",[]})]), - Test = get(test_server_name), - TestName = if is_list(Test) -> - lists:flatten(io_lib:format("~ts", [Test])); - true -> - lists:flatten(io_lib:format("~tp", [Test])) - end, - TestDescr = "Test " ++ TestName ++ " results", - - test_server_sup:framework_call(report, [tests_start,{Test,N}]), - - {Header,Footer} = - case test_server_sup:framework_call(get_html_wrapper, - [TestDescr,true,TestDir, - {[],[2,3,4,7,8],[1,6]}], "") of - Empty when (Empty == "") ; (element(2,Empty) == "") -> - put(basic_html, true), - {[html_header(TestDescr), - "<h2>Results for test ", TestName, "</h2>\n"], - "\n</body>\n</html>\n"}; - {basic_html,Html0,Html1} -> - put(basic_html, true), - {Html0++["<h1>Results for <i>",TestName,"</i></h1>\n"], - Html1}; - {xhtml,Html0,Html1} -> - put(basic_html, false), - {Html0++["<h1>Results for <i>",TestName,"</i></h1>\n"], - Html1} - end, - - print(html, Header), - - print(html, xhtml("<p>", "<h4>")), - print_timestamp(html, "Test started at "), - print(html, xhtml("</p>", "</h4>")), - - print(html, xhtml("\n<p><b>Host info:</b><br>\n", - "\n<p><b>Host info:</b><br />\n")), - print_who(test_server_sup:hoststr(), test_server_sup:get_username()), - print(html, xhtml("<br>Used Erlang v~ts in <tt>~ts</tt></p>\n", - "<br />Used Erlang v~ts in \"~ts\"</p>\n"), - [erlang:system_info(version), code:root_dir()]), - - if FwMod == ?MODULE -> - print(html, xhtml("\n<p><b>Target Info:</b><br>\n", - "\n<p><b>Target Info:</b><br />\n")), - print_who(TI#target_info.host, TI#target_info.username), - print(html,xhtml("<br>Used Erlang v~ts in <tt>~ts</tt></p>\n", - "<br />Used Erlang v~ts in \"~ts\"</p>\n"), - [TI#target_info.version, TI#target_info.root_dir]); - true -> - case test_server_sup:framework_call(target_info, []) of - TargetInfo when is_list(TargetInfo), - length(TargetInfo) > 0 -> - print(html, xhtml("\n<p><b>Target info:</b><br>\n", - "\n<p><b>Target info:</b><br />\n")), - print(html, "~ts</p>\n", [TargetInfo]); - _ -> - ok - end - end, - CoverLog = - case get(test_server_cover_log_dir) of - undefined -> - ?coverlog_name; - AbsLogDir -> - AbsLog = filename:join(AbsLogDir,?coverlog_name), - make_relative(AbsLog, TestDir) - end, - print(html, - "<p><ul>\n" - "<li><a href=\"~ts\">Full textual log</a></li>\n" - "<li><a href=\"~ts\">Coverage log</a></li>\n" - "<li><a href=\"~ts\">Unexpected I/O log</a></li>\n</ul></p>\n", - [?suitelog_name,CoverLog,?unexpected_io_log]), - print(html, - "<p>~ts</p>\n" ++ - xhtml(["<table bgcolor=\"white\" border=\"3\" cellpadding=\"5\">\n", - "<thead>\n"], - ["<table id=\"",?sortable_table_name,"\">\n", - "<thead>\n"]) ++ - "<tr><th>Num</th><th>Module</th><th>Group</th>" ++ - "<th>Case</th><th>Log</th><th>Time</th><th>Result</th>" ++ - "<th>Comment</th></tr>\n</thead>\n<tbody>\n", - [print_if_known(N, {"<i>Executing <b>~w</b> test cases...</i>" - ++ xhtml("\n<br>\n", "\n<br />\n"),[N]}, - {"",[]})]), - - print(major, "=cases ~w", [get(test_server_cases)]), - print(major, "=user ~ts", [TI#target_info.username]), - print(major, "=host ~ts", [TI#target_info.host]), - - %% If there are no hosts specified,use only the local host - case controller_call(get_hosts) of - [] -> - print(major, "=hosts ~ts", [TI#target_info.host]), - controller_call({set_hosts, [TI#target_info.host]}); - Hosts -> - Str = lists:flatten(lists:map(fun(X) -> [X," "] end, Hosts)), - print(major, "=hosts ~ts", [Str]) - end, - print(major, "=emulator_vsn ~ts", [TI#target_info.version]), - print(major, "=emulator ~ts", [TI#target_info.emulator]), - print(major, "=otp_release ~ts", [TI#target_info.otp_release]), - print(major, "=started ~s", - [lists:flatten(timestamp_get(""))]), - - test_server_io:set_footer(Footer), - - run_test_cases(TestSpec, Config, TimetrapData) - end; - -do_test_cases(TopCase, SkipCases, Config, TimetrapSpec) -> - %% when not list(TopCase) - do_test_cases([TopCase], SkipCases, Config, TimetrapSpec). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% start_log_file() -> {ok,TestDirName} | exit({Error,Reason}) -%% Stem = string() -%% -%% Creates the log directories, the major log file and the html log file. -%% The log files are initialized with some header information. -%% -%% The name of the log directory will be <Name>.logs/run.<Date>/ where -%% Name is the test suite name and Date is the current date and time. - -start_log_file() -> - Dir = get(test_server_dir), - case file:make_dir(Dir) of - ok -> - ok; - {error, eexist} -> - ok; - MkDirError -> - log_file_error(MkDirError, Dir) - end, - TestDir = timestamp_filename_get(filename:join(Dir, "run.")), - TestDir1 = - case file:make_dir(TestDir) of - ok -> - TestDir; - {error,eexist} -> - timer:sleep(1000), - %% we need min 1 second between timestamps unfortunately - TestDirX = timestamp_filename_get(filename:join(Dir, "run.")), - case file:make_dir(TestDirX) of - ok -> - TestDirX; - MkDirError2 -> - log_file_error(MkDirError2, TestDirX) - end; - MkDirError2 -> - log_file_error(MkDirError2, TestDir) - end, - FilenameMode = file:native_name_encoding(), - ok = write_file(filename:join(Dir, ?last_file), - TestDir1 ++ "\n", - FilenameMode), - ok = write_file(?last_file, TestDir1 ++ "\n", FilenameMode), - put(test_server_log_dir_base,TestDir1), - - MajorName = filename:join(TestDir1, ?suitelog_name), - HtmlName = MajorName ++ ?html_ext, - UnexpectedName = filename:join(TestDir1, ?unexpected_io_log), - - {ok,Major} = open_utf8_file(MajorName), - {ok,Html} = open_html_file(HtmlName), - - {UnexpHeader,UnexpFooter} = - case test_server_sup:framework_call(get_html_wrapper, - ["Unexpected I/O log",false, - TestDir, undefined],"") of - UEmpty when (UEmpty == "") ; (element(2,UEmpty) == "") -> - {html_header("Unexpected I/O log"),"\n</body>\n</html>\n"}; - {basic_html,UH,UF} -> - {UH,UF}; - {xhtml,UH,UF} -> - {UH,UF} - end, - - {ok,Unexpected} = open_html_file(UnexpectedName), - io:put_chars(Unexpected, [UnexpHeader, - xhtml("<br>\n<h2>Unexpected I/O</h2>", - "<br />\n<h3>Unexpected I/O</h3>"), - "\n<pre>\n"]), - put(test_server_unexpected_footer,{UnexpectedName,UnexpFooter}), - - test_server_io:set_fd(major, Major), - test_server_io:set_fd(html, Html), - test_server_io:set_fd(unexpected_io, Unexpected), - - make_html_link(filename:absname(?last_test ++ ?html_ext), - HtmlName, filename:basename(Dir)), - LinkName = filename:join(Dir, ?last_link), - make_html_link(LinkName ++ ?html_ext, HtmlName, - filename:basename(Dir)), - - PrivDir = filename:join(TestDir1, ?priv_dir), - ok = file:make_dir(PrivDir), - put(test_server_priv_dir,PrivDir++"/"), - print_timestamp(major, "Suite started at "), - - LogInfo = [{topdir,Dir},{rundir,lists:flatten(TestDir1)}], - test_server_sup:framework_call(report, [loginfo,LogInfo]), - {ok,TestDir1}. - -log_file_error(Error, Dir) -> - exit({cannot_create_log_dir,{Error,lists:flatten(Dir)}}). - -make_html_link(LinkName, Target, Explanation) -> - %% if possible use a relative reference to Target. - TargetL = filename:split(Target), - PwdL = filename:split(filename:dirname(LinkName)), - Href = case lists:prefix(PwdL, TargetL) of - true -> - uri_encode(filename:join(lists:nthtail(length(PwdL),TargetL))); - false -> - "file:" ++ uri_encode(Target) - end, - H = [html_header(Explanation), - "<h1>Last test</h1>\n" - "<a href=\"",Href,"\">",Explanation,"</a>\n" - "</body>\n</html>\n"], - ok = write_html_file(LinkName, H). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% start_minor_log_file(Mod, Func, ParallelTC) -> AbsName -%% Mod = atom() -%% Func = atom() -%% ParallelTC = bool() -%% AbsName = string() -%% -%% Create a minor log file for the test case Mod,Func,Args. The log file -%% will be stored in the log directory under the name <Mod>.<Func>.html. -%% Some header info will also be inserted into the log file. If the test -%% case runs in a parallel group, then to avoid clashing file names if the -%% case is executed more than once, the name <Mod>.<Func>.<Timestamp>.html -%% is used. - -start_minor_log_file(Mod, Func, ParallelTC) -> - MFA = {Mod,Func,1}, - LogDir = get(test_server_log_dir_base), - Name0 = lists:flatten(io_lib:format("~w.~w~ts", [Mod,Func,?html_ext])), - Name = downcase(Name0), - AbsName = filename:join(LogDir, Name), - case (ParallelTC orelse (element(1,file:read_file_info(AbsName))==ok)) of - false -> %% normal case, unique name - start_minor_log_file1(Mod, Func, LogDir, AbsName, MFA); - true -> %% special case, duplicate names - Tag = test_server_sup:unique_name(), - Name1_0 = - lists:flatten(io_lib:format("~w.~w.~ts~ts", [Mod,Func,Tag, - ?html_ext])), - Name1 = downcase(Name1_0), - AbsName1 = filename:join(LogDir, Name1), - start_minor_log_file1(Mod, Func, LogDir, AbsName1, MFA) - end. - -start_minor_log_file1(Mod, Func, LogDir, AbsName, MFA) -> - {ok,Fd} = open_html_file(AbsName), - Lev = get(test_server_minor_level)+1000, %% far down in the minor levels - put(test_server_minor_fd, Fd), - test_server_gl:set_minor_fd(group_leader(), Fd, MFA), - - TestDescr = io_lib:format("Test ~w:~w result", [Mod,Func]), - {Header,Footer} = - case test_server_sup:framework_call(get_html_wrapper, - [TestDescr,false, - filename:dirname(AbsName), - undefined], "") of - Empty when (Empty == "") ; (element(2,Empty) == "") -> - put(basic_html, true), - {html_header(TestDescr), "\n</body>\n</html>\n"}; - {basic_html,Html0,Html1} -> - put(basic_html, true), - {Html0,Html1}; - {xhtml,Html0,Html1} -> - put(basic_html, false), - {Html0,Html1} - end, - put(test_server_minor_footer, Footer), - io:put_chars(Fd, Header), - - io:put_chars(Fd, "<a name=\"top\"></a>"), - io:put_chars(Fd, "<pre>\n"), - - SrcListing = downcase(atom_to_list(Mod)) ++ ?src_listing_ext, - - case get_fw_mod(?MODULE) of - Mod when Func == error_in_suite -> - ok; - _ -> - {Info,Arity} = - if Func == init_per_suite; Func == end_per_suite -> - {"Config function: ", 1}; - Func == init_per_group; Func == end_per_group -> - {"Config function: ", 2}; - true -> - {"Test case: ", 1} - end, - - case {filelib:is_file(filename:join(LogDir, SrcListing)), - lists:member(no_src, get(test_server_logopts))} of - {true,false} -> - print(Lev, Info ++ "<a href=\"~ts#~ts\">~w:~w/~w</a> " - "(click for source code)\n", - [uri_encode(SrcListing), - uri_encode(atom_to_list(Func)++"-1",utf8), - Mod,Func,Arity]); - _ -> - print(Lev, Info ++ "~w:~w/~w\n", [Mod,Func,Arity]) - end - end, - - AbsName. - -stop_minor_log_file() -> - test_server_gl:unset_minor_fd(group_leader()), - Fd = get(test_server_minor_fd), - Footer = get(test_server_minor_footer), - io:put_chars(Fd, "</pre>\n" ++ Footer), - ok = file:close(Fd), - put(test_server_minor_fd, undefined). - -downcase(S) -> downcase(S, []). -downcase([Uc|Rest], Result) when $A =< Uc, Uc =< $Z -> - downcase(Rest, [Uc-$A+$a|Result]); -downcase([C|Rest], Result) -> - downcase(Rest, [C|Result]); -downcase([], Result) -> - lists:reverse(Result). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% html_convert_modules(TestSpec, Config) -> ok -%% Isolate the modules affected by TestSpec and -%% make sure they are converted to html. -%% -%% Errors are silently ignored. - -html_convert_modules(TestSpec, _Config, FwMod) -> - Mods = html_isolate_modules(TestSpec, FwMod), - html_convert_modules(Mods), - copy_html_files(get(test_server_dir), get(test_server_log_dir_base)). - -%% Retrieve a list of modules out of the test spec. -html_isolate_modules(List, FwMod) -> - html_isolate_modules(List, sets:new(), FwMod). - -html_isolate_modules([], Set, _) -> sets:to_list(Set); -html_isolate_modules([{skip_case,{_Case,_Cmt},_Mode}|Cases], Set, FwMod) -> - html_isolate_modules(Cases, Set, FwMod); -html_isolate_modules([{conf,_Ref,Props,{FwMod,_Func}}|Cases], Set, FwMod) -> - Set1 = case proplists:get_value(suite, Props) of - undefined -> Set; - Mod -> sets:add_element(Mod, Set) - end, - html_isolate_modules(Cases, Set1, FwMod); -html_isolate_modules([{conf,_Ref,_Props,{Mod,_Func}}|Cases], Set, FwMod) -> - html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod); -html_isolate_modules([{skip_case,{conf,_Ref,{FwMod,_Func},_Cmt},Mode}|Cases], - Set, FwMod) -> - Set1 = case proplists:get_value(suite, get_props(Mode)) of - undefined -> Set; - Mod -> sets:add_element(Mod, Set) - end, - html_isolate_modules(Cases, Set1, FwMod); -html_isolate_modules([{skip_case,{conf,_Ref,{Mod,_Func},_Cmt},_Props}|Cases], - Set, FwMod) -> - html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod); -html_isolate_modules([{Mod,_Case}|Cases], Set, FwMod) -> - html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod); -html_isolate_modules([{Mod,_Case,_Args}|Cases], Set, FwMod) -> - html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod). - -%% Given a list of modules, convert each module's source code to HTML. -html_convert_modules([Mod|Mods]) -> - case code:which(Mod) of - Path when is_list(Path) -> - SrcFile = filename:rootname(Path) ++ ".erl", - FoundSrcFile = - case file:read_file_info(SrcFile) of - {ok,SInfo} -> - {SrcFile,SInfo}; - {error,_} -> - ModInfo = Mod:module_info(compile), - case proplists:get_value(source, ModInfo) of - undefined -> - undefined; - OtherSrcFile -> - case file:read_file_info(OtherSrcFile) of - {ok,SInfo} -> - {OtherSrcFile,SInfo}; - {error,_} -> - undefined - end - end - end, - case FoundSrcFile of - undefined -> - html_convert_modules(Mods); - {SrcFile1,SrcFileInfo} -> - DestDir = get(test_server_dir), - Name = atom_to_list(Mod), - DestFile = filename:join(DestDir, - downcase(Name)++?src_listing_ext), - html_possibly_convert(SrcFile1, SrcFileInfo, DestFile), - html_convert_modules(Mods) - end; - _Other -> - html_convert_modules(Mods) - end; -html_convert_modules([]) -> ok. - -%% Convert source code to HTML if possible and needed. -html_possibly_convert(Src, SrcInfo, Dest) -> - case file:read_file_info(Dest) of - {ok,DestInfo} when DestInfo#file_info.mtime >= SrcInfo#file_info.mtime -> - ok; % dest file up to date - _ -> - InclPath = case application:get_env(test_server, include) of - {ok,Incls} -> Incls; - _ -> [] - end, - - OutDir = get(test_server_log_dir_base), - case test_server_sup:framework_call(get_html_wrapper, - ["Module "++Src,false, - OutDir,undefined, - encoding(Src)], "") of - Empty when (Empty == "") ; (element(2,Empty) == "") -> - erl2html2:convert(Src, Dest, InclPath); - {_,Header,_} -> - erl2html2:convert(Src, Dest, InclPath, Header) - end - end. - -%% Copy all HTML files in InDir to OutDir. -copy_html_files(InDir, OutDir) -> - Files = filelib:wildcard(filename:join(InDir, "*" ++ ?src_listing_ext)), - lists:foreach(fun (Src) -> copy_html_file(Src, OutDir) end, Files). - -copy_html_file(Src, DestDir) -> - Dest = filename:join(DestDir, filename:basename(Src)), - case file:read_file(Src) of - {ok,Bin} -> - ok = write_binary_file(Dest, Bin); - {error,_Reason} -> - io:format("File ~ts: read failed\n", [Src]) - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% add_init_and_end_per_suite(TestSpec, Mod, Ref, FwMod) -> NewTestSpec -%% -%% Expands TestSpec with an initial init_per_suite, and a final -%% end_per_suite element, per each discovered suite in the list. - -add_init_and_end_per_suite([{make,_,_}=Case|Cases], LastMod, LastRef, FwMod) -> - [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)]; -add_init_and_end_per_suite([{skip_case,{{Mod,all},_},_}=Case|Cases], LastMod, - LastRef, FwMod) when Mod =/= LastMod -> - {PreCases, NextMod, NextRef} = - do_add_end_per_suite_and_skip(LastMod, LastRef, Mod, FwMod), - PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, - NextRef, FwMod)]; -add_init_and_end_per_suite([{skip_case,{{Mod,_},_Cmt},_Mode}=Case|Cases], - LastMod, LastRef, FwMod) when Mod =/= LastMod -> - {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), - PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, - NextRef, FwMod)]; -add_init_and_end_per_suite([{skip_case,{conf,_,{Mod,_},_},_}=Case|Cases], - LastMod, LastRef, FwMod) when Mod =/= LastMod -> - {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), - PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, - NextRef, FwMod)]; -add_init_and_end_per_suite([{skip_case,{conf,_,{Mod,_},_}}=Case|Cases], LastMod, - LastRef, FwMod) when Mod =/= LastMod -> - {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), - PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, - NextRef, FwMod)]; -add_init_and_end_per_suite([{conf,Ref,Props,{FwMod,Func}}=Case|Cases], LastMod, - LastRef, FwMod) -> - %% if Mod == FwMod, this conf test is (probably) a test case group where - %% the init- and end-functions are missing in the suite, and if so, - %% the suite name should be stored as {suite,Suite} in Props - case proplists:get_value(suite, Props) of - Suite when Suite =/= undefined, Suite =/= LastMod -> - {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Suite, FwMod), - Case1 = {conf,Ref,[{suite,NextMod}|proplists:delete(suite,Props)], - {FwMod,Func}}, - PreCases ++ [Case1|add_init_and_end_per_suite(Cases, NextMod, - NextRef, FwMod)]; - _ -> - [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)] - end; -add_init_and_end_per_suite([{conf,_,_,{Mod,_}}=Case|Cases], LastMod, - LastRef, FwMod) when Mod =/= LastMod, Mod =/= FwMod -> - {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), - PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, - NextRef, FwMod)]; -add_init_and_end_per_suite([SkipCase|Cases], LastMod, LastRef, FwMod) - when element(1,SkipCase) == skip_case; element(1,SkipCase) == auto_skip_case-> - [SkipCase|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)]; -add_init_and_end_per_suite([{conf,_,_,_}=Case|Cases], LastMod, LastRef, FwMod) -> - [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)]; -add_init_and_end_per_suite([{Mod,_}=Case|Cases], LastMod, LastRef, FwMod) - when Mod =/= LastMod, Mod =/= FwMod -> - {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), - PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, - NextRef, FwMod)]; -add_init_and_end_per_suite([{Mod,_,_}=Case|Cases], LastMod, LastRef, FwMod) - when Mod =/= LastMod, Mod =/= FwMod -> - {PreCases, NextMod, NextRef} = - do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod), - PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, - NextRef, FwMod)]; -add_init_and_end_per_suite([Case|Cases], LastMod, LastRef, FwMod)-> - [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)]; -add_init_and_end_per_suite([], _LastMod, undefined, _FwMod) -> - []; -add_init_and_end_per_suite([], _LastMod, skipped_suite, _FwMod) -> - []; -add_init_and_end_per_suite([], LastMod, LastRef, FwMod) -> - %% we'll add end_per_suite here even if it's not exported - %% (and simply let the call fail if it's missing) - case erlang:function_exported(LastMod, end_per_suite, 1) of - true -> - [{conf,LastRef,[],{LastMod,end_per_suite}}]; - false -> - %% let's call a "fake" end_per_suite if it exists - case erlang:function_exported(FwMod, end_per_suite, 1) of - true -> - [{conf,LastRef,[{suite,LastMod}],{FwMod,end_per_suite}}]; - false -> - [{conf,LastRef,[],{LastMod,end_per_suite}}] - end - end. - -do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod) -> - case code:is_loaded(Mod) of - false -> code:load_file(Mod); - _ -> ok - end, - {Init,NextMod,NextRef} = - case erlang:function_exported(Mod, init_per_suite, 1) of - true -> - Ref = make_ref(), - {[{conf,Ref,[],{Mod,init_per_suite}}],Mod,Ref}; - false -> - %% let's call a "fake" init_per_suite if it exists - case erlang:function_exported(FwMod, init_per_suite, 1) of - true -> - Ref = make_ref(), - {[{conf,Ref,[{suite,Mod}], - {FwMod,init_per_suite}}],Mod,Ref}; - false -> - {[],Mod,undefined} - end - - end, - Cases = - if LastRef==undefined -> - Init; - LastRef==skipped_suite -> - Init; - true -> - %% we'll add end_per_suite here even if it's not exported - %% (and simply let the call fail if it's missing) - case erlang:function_exported(LastMod, end_per_suite, 1) of - true -> - [{conf,LastRef,[],{LastMod,end_per_suite}}|Init]; - false -> - %% let's call a "fake" end_per_suite if it exists - case erlang:function_exported(FwMod, end_per_suite, 1) of - true -> - [{conf,LastRef,[{suite,Mod}], - {FwMod,end_per_suite}}|Init]; - false -> - [{conf,LastRef,[],{LastMod,end_per_suite}}|Init] - end - end - end, - {Cases,NextMod,NextRef}. - -do_add_end_per_suite_and_skip(LastMod, LastRef, Mod, FwMod) -> - case LastRef of - No when No==undefined ; No==skipped_suite -> - {[],Mod,skipped_suite}; - _Ref -> - case erlang:function_exported(LastMod, end_per_suite, 1) of - true -> - {[{conf,LastRef,[],{LastMod,end_per_suite}}], - Mod,skipped_suite}; - false -> - case erlang:function_exported(FwMod, end_per_suite, 1) of - true -> - %% let's call "fake" end_per_suite if it exists - {[{conf,LastRef,[],{FwMod,end_per_suite}}], - Mod,skipped_suite}; - false -> - {[{conf,LastRef,[],{LastMod,end_per_suite}}], - Mod,skipped_suite} - end - end - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% run_test_cases(TestSpec, Config, TimetrapData) -> exit(Result) -%% -%% Runs the specified tests, then displays/logs the summary. - -run_test_cases(TestSpec, Config, TimetrapData) -> - test_server:init_purify(), - case lists:member(no_src, get(test_server_logopts)) of - true -> - ok; - false -> - FwMod = get_fw_mod(?MODULE), - html_convert_modules(TestSpec, Config, FwMod) - end, - - run_test_cases_loop(TestSpec, [Config], TimetrapData, [], []), - - {AllSkippedN,UserSkipN,AutoSkipN,SkipStr} = - case get(test_server_skipped) of - {0,0} -> {0,0,0,""}; - {US,AS} -> {US+AS,US,AS,io_lib:format(", ~w skipped", [US+AS])} - end, - OkN = get(test_server_ok), - FailedN = get(test_server_failed), - print(1, "TEST COMPLETE, ~w ok, ~w failed~ts of ~w test cases\n", - [OkN,FailedN,SkipStr,OkN+FailedN+AllSkippedN]), - test_server_sup:framework_call(report, [tests_done, - {OkN,FailedN,{UserSkipN,AutoSkipN}}]), - print(major, "=finished ~s", [lists:flatten(timestamp_get(""))]), - print(major, "=failed ~w", [FailedN]), - print(major, "=successful ~w", [OkN]), - print(major, "=user_skipped ~w", [UserSkipN]), - print(major, "=auto_skipped ~w", [AutoSkipN]), - exit(test_suites_done). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% run_test_cases_loop(TestCases, Config, TimetrapData, Mode, Status) -> ok -%% TestCases = [Test,...] -%% Config = [[{Key,Val},...],...] -%% TimetrapData = {MultiplyTimetrap,ScaleTimetrap} -%% MultiplyTimetrap = integer() | infinity -%% ScaleTimetrap = bool() -%% Mode = [{Ref,[Prop,..],StartTime}] -%% Ref = reference() -%% Prop = {name,Name} | sequence | parallel | -%% shuffle | {shuffle,Seed} | -%% repeat | {repeat,N} | -%% repeat_until_all_ok | {repeat_until_all_ok,N} | -%% repeat_until_any_ok | {repeat_until_any_ok,N} | -%% repeat_until_any_fail | {repeat_until_any_fail,N} | -%% repeat_until_all_fail | {repeat_until_all_fail,N} -%% Status = [{Ref,{{Ok,Skipped,Failed},CopiedCases}}] -%% Ok = Skipped = Failed = [Case,...] -%% -%% Execute the TestCases under configuration Config. Config is a list -%% of lists, where hd(Config) holds the config tuples for the current -%% conf case and tl(Config) is the data for the higher level conf cases. -%% Config data is "inherited" from top to nested conf cases, but -%% never the other way around. if length(Config) == 1, Config contains -%% only the initial config data for the suite. -%% -%% Test may be one of the following: -%% -%% {conf,Ref,Props,{Mod,Func}} Mod:Func is a configuration modification -%% function, call it with the current configuration as argument. It will -%% return a new configuration. -%% -%% {make,Ref,{Mod,Func,Args}} Mod:Func is a make function, and it is called -%% with the given arguments. -%% -%% {Mod,Case} This is a normal test case. Determine the correct -%% configuration, and insert {Mod,Case,Config} as head of the list, -%% then reiterate. -%% -%% {Mod,Case,Args} A test case with predefined argument (usually a normal -%% test case which just got a fresh configuration (see above)). -%% -%% {skip_case,{conf,Ref,Case,Comment}} An init conf case gets skipped -%% by the user. This will also cause the end conf case to be skipped. -%% Note that it is not possible to skip an end conf case directly (it -%% can only be skipped indirectly by a skipped init conf case). The -%% comment (which gets printed in the log files) describes why the case -%% was skipped. -%% -%% {skip_case,{Case,Comment},Mode} A normal test case skipped by the user. -%% The comment (which gets printed in the log files) describes why the -%% case was skipped. -%% -%% {auto_skip_case,{conf,Ref,Case,Comment},Mode} This is the result of -%% an end conf case being automatically skipped due to a failing init -%% conf case. It could also be a nested conf case that gets skipped -%% because of a failed or skipped top level conf. -%% -%% {auto_skip_case,{Case,Comment},Mode} This is a normal test case which -%% gets automatically skipped because of a failing init conf case or -%% because of a failing previous test case in a sequence. -%% -%% ------------------------------------------------------------------- -%% Description of IO handling during execution of parallel test cases: -%% ------------------------------------------------------------------- -%% -%% A conf group can have an associated list of properties. If the -%% parallel property is specified for a group, it means the test cases -%% should be spawned and run in parallel rather than called sequentially -%% (which is always the default mode). Test cases that execute in parallel -%% also write to their respective minor log files in parallel. Printouts -%% to common log files, such as the summary html file and the major log -%% file on text format, still have to be processed sequentially. For this -%% reason, the Mode argument specifies if a parallel group is currently -%% being executed. -%% -%% The low-level mechanism for buffering IO for the common log files -%% is handled by the test_server_io module. Buffering is turned on by -%% test_server_io:start_transaction/0 and off by calling -%% test_server_io:end_transaction/0. The buffered data for the transaction -%% can printed by calling test_server_io:print_buffered/1. -%% -%% This module is responsible for turning on IO buffering and to later -%% test_server_io:print_buffered/1 to print the data. To help with this, -%% two variables in the process dictionary are used: -%% 'test_server_common_io_handler' and 'test_server_queued_io'. The values -%% are set to as follwing: -%% -%% Value Meaning -%% ----- ------- -%% undefined No parallel test cases running -%% {tc,Pid} Running test cases in a top-level parallel group -%% {Ref,Pid} Running sequential test case inside a parallel group -%% -%% FIXME: The Pid is no longer used. -%% -%% If a conf group nested under a parallel group in the test -%% specification should be started, the 'test_server_common_io_handler' -%% value gets set also on the main process. -%% -%% During execution of a parallel group (or of a group nested under a -%% parallel group), *any* new test case being started gets registered -%% in a list saved in the dictionary with 'test_server_queued_io' as key. -%% When the top level parallel group is finished (only then can we be -%% sure all parallel test cases have finished and "reported in"), the -%% list of test cases is traversed in order and test_server_io:print_buffered/1 -%% can be called for each test case. See handle_test_case_io_and_status/0 -%% for details. -%% -%% To be able to handle nested conf groups with different properties, -%% the Mode argument specifies a list of {Ref,Properties} tuples. -%% The head of the Mode list at any given time identifies the group -%% currently being processed. The tail of the list identifies groups -%% on higher level. -%% -%% ------------------------------------------------------------------- -%% Notes on parallel execution of test cases -%% ------------------------------------------------------------------- -%% -%% A group nested under a parallel group will start executing in -%% parallel with previous (parallel) test cases (no matter what -%% properties the nested group has). Test cases are however never -%% executed in parallel with the start or end conf case of the same -%% group! Because of this, the test_server_ctrl loop waits at -%% the end conf of a group for all parallel cases to finish -%% before the end conf case actually executes. This has the effect -%% that it's only after a nested group has finished that any -%% remaining parallel cases in the previous group get spawned (*). -%% Example (all parallel cases): -%% -%% group1_init |----> -%% group1_case1 | ---------> -%% group1_case2 | ---------------------------------> -%% group2_init | ----> -%% group2_case1 | ------> -%% group2_case2 | ----------> -%% group2_end | ---> -%% group1_case3 (*)| ----> -%% group1_case4 (*)| --> -%% group1_end | ---> -%% - -run_test_cases_loop([{SkipTag,CaseData={Type,_Ref,_Case,_Comment}}|Cases], - Config, TimetrapData, Mode, Status) when - ((SkipTag==auto_skip_case) or (SkipTag==skip_case)) and - ((Type==conf) or (Type==make)) -> - run_test_cases_loop([{SkipTag,CaseData,Mode}|Cases], - Config, TimetrapData, Mode, Status); - -run_test_cases_loop([{SkipTag,{Type,Ref,Case,Comment},SkipMode}|Cases], - Config, TimetrapData, Mode, Status) when - ((SkipTag==auto_skip_case) or (SkipTag==skip_case)) and - ((Type==conf) or (Type==make)) -> - file:set_cwd(filename:dirname(get(test_server_dir))), - CurrIOHandler = get(test_server_common_io_handler), - ParentMode = tl(Mode), - - {AutoOrUser,ReportTag} = - if SkipTag == auto_skip_case -> {auto,tc_auto_skip}; - SkipTag == skip_case -> {user,tc_user_skip} - end, - - %% check and update the mode for test case execution and io msg handling - case {curr_ref(Mode),check_props(parallel, Mode)} of - {Ref,Ref} -> - case check_props(parallel, ParentMode) of - false -> - %% this is a skipped end conf for a top level parallel - %% group, buffered io can be flushed - handle_test_case_io_and_status(), - set_io_buffering(undefined), - {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment, - false, SkipMode), - ConfData = {Mod,{Func,get_name(SkipMode)},Comment}, - test_server_sup:framework_call(report, - [ReportTag,ConfData]), - run_test_cases_loop(Cases, Config, TimetrapData, ParentMode, - delete_status(Ref, Status)); - _ -> - %% this is a skipped end conf for a parallel group nested - %% under a parallel group (io buffering is active) - wait_for_cases(Ref), - {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment, - true, SkipMode), - ConfData = {Mod,{Func,get_name(SkipMode)},Comment}, - test_server_sup:framework_call(report, [ReportTag,ConfData]), - case CurrIOHandler of - {Ref,_} -> - %% current_io_handler was set by start conf of this - %% group, so we can unset it now (no more io from main - %% process needs to be buffered) - set_io_buffering(undefined); - _ -> - ok - end, - run_test_cases_loop(Cases, Config, - TimetrapData, ParentMode, - delete_status(Ref, Status)) - end; - {Ref,false} -> - %% this is a skipped end conf for a non-parallel group that's not - %% nested under a parallel group - {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment, - false, SkipMode), - ConfData = {Mod,{Func,get_name(SkipMode)},Comment}, - test_server_sup:framework_call(report, [ReportTag,ConfData]), - - %% Check if this group is auto skipped because of error in the - %% init conf. If so, check if the parent group is a sequence, - %% and if it is, skip all proceeding tests in that group. - GrName = get_name(Mode), - Cases1 = - case get_tc_results(Status) of - {_,_,Fails} when length(Fails) > 0 -> - case lists:member({group_result,GrName}, Fails) of - true -> - case check_prop(sequence, ParentMode) of - false -> - Cases; - ParentRef -> - Reason = {group_result,GrName,failed}, - skip_cases_upto(ParentRef, Cases, - Reason, tc, ParentMode, - SkipTag) - end; - false -> - Cases - end; - _ -> - Cases - end, - run_test_cases_loop(Cases1, Config, TimetrapData, ParentMode, - delete_status(Ref, Status)); - {Ref,_} -> - %% this is a skipped end conf for a non-parallel group nested under - %% a parallel group (io buffering is active) - {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment, - true, SkipMode), - ConfData = {Mod,{Func,get_name(SkipMode)},Comment}, - test_server_sup:framework_call(report, [ReportTag,ConfData]), - case CurrIOHandler of - {Ref,_} -> - %% current_io_handler was set by start conf of this - %% group, so we can unset it now (no more io from main - %% process needs to be buffered) - set_io_buffering(undefined); - _ -> - ok - end, - run_test_cases_loop(Cases, Config, TimetrapData, tl(Mode), - delete_status(Ref, Status)); - {_,false} -> - %% this is a skipped start conf for a group which is not nested - %% under a parallel group - {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment, - false, SkipMode), - ConfData = {Mod,{Func,get_name(SkipMode)},Comment}, - test_server_sup:framework_call(report, [ReportTag,ConfData]), - run_test_cases_loop(Cases, Config, TimetrapData, - [conf(Ref,[])|Mode], Status); - {_,Ref0} when is_reference(Ref0) -> - %% this is a skipped start conf for a group nested under a parallel - %% group and if this is the first nested group, io buffering must - %% be activated - if CurrIOHandler == undefined -> - set_io_buffering({Ref,self()}); - true -> - ok - end, - {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment, - true, SkipMode), - ConfData = {Mod,{Func,get_name(SkipMode)},Comment}, - test_server_sup:framework_call(report, [ReportTag,ConfData]), - run_test_cases_loop(Cases, Config, TimetrapData, - [conf(Ref,[])|Mode], Status) - end; - -run_test_cases_loop([{auto_skip_case,{Case,Comment},SkipMode}|Cases], - Config, TimetrapData, Mode, Status) -> - {Mod,Func} = skip_case(auto, undefined, get(test_server_case_num)+1, - Case, Comment, is_io_buffered(), SkipMode), - test_server_sup:framework_call(report, [tc_auto_skip, - {Mod,{Func,get_name(SkipMode)}, - Comment}]), - run_test_cases_loop(Cases, Config, TimetrapData, Mode, - update_status(skipped, Mod, Func, Status)); - -run_test_cases_loop([{skip_case,{{Mod,all}=Case,Comment},SkipMode}|Cases], - Config, TimetrapData, Mode, Status) -> - skip_case(user, undefined, 0, Case, Comment, false, SkipMode), - test_server_sup:framework_call(report, [tc_user_skip, - {Mod,{all,get_name(SkipMode)}, - Comment}]), - run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status); - -run_test_cases_loop([{skip_case,{Case,Comment},SkipMode}|Cases], - Config, TimetrapData, Mode, Status) -> - {Mod,Func} = skip_case(user, undefined, get(test_server_case_num)+1, - Case, Comment, is_io_buffered(), SkipMode), - test_server_sup:framework_call(report, [tc_user_skip, - {Mod,{Func,get_name(SkipMode)}, - Comment}]), - run_test_cases_loop(Cases, Config, TimetrapData, Mode, - update_status(skipped, Mod, Func, Status)); - -%% a start *or* end conf case, wrapping test cases or other conf cases -run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0, - Config, TimetrapData, Mode0, Status) -> - CurrIOHandler = get(test_server_common_io_handler), - %% check and update the mode for test case execution and io msg handling - {StartConf,Mode,IOHandler,ConfTime,Status1} = - case {curr_ref(Mode0),check_props(parallel, Mode0)} of - {Ref,Ref} -> - case check_props(parallel, tl(Mode0)) of - false -> - %% this is an end conf for a top level parallel group, - %% collect results from the test case processes - %% and calc total time - OkSkipFail = handle_test_case_io_and_status(), - file:set_cwd(filename:dirname(get(test_server_dir))), - After = ?now, - Before = get(test_server_parallel_start_time), - Elapsed = timer:now_diff(After, Before)/1000000, - put(test_server_total_time, Elapsed), - {false,tl(Mode0),undefined,Elapsed, - update_status(Ref, OkSkipFail, Status)}; - _ -> - %% this is an end conf for a parallel group nested under a - %% parallel group (io buffering is active) - OkSkipFail = wait_for_cases(Ref), - queue_test_case_io(Ref, self(), 0, Mod, Func), - Elapsed = timer:now_diff(?now, conf_start(Ref, Mode0))/1000000, - case CurrIOHandler of - {Ref,_} -> - %% current_io_handler was set by start conf of this - %% group, so we can unset it after this case (no - %% more io from main process needs to be buffered) - {false,tl(Mode0),undefined,Elapsed, - update_status(Ref, OkSkipFail, Status)}; - _ -> - {false,tl(Mode0),CurrIOHandler,Elapsed, - update_status(Ref, OkSkipFail, Status)} - end - end; - {Ref,false} -> - %% this is an end conf for a non-parallel group that's not - %% nested under a parallel group, so no need to buffer io - {false,tl(Mode0),undefined, - timer:now_diff(?now, conf_start(Ref, Mode0))/1000000, Status}; - {Ref,_} -> - %% this is an end conf for a non-parallel group nested under - %% a parallel group (io buffering is active) - queue_test_case_io(Ref, self(), 0, Mod, Func), - Elapsed = timer:now_diff(?now, conf_start(Ref, Mode0))/1000000, - case CurrIOHandler of - {Ref,_} -> - %% current_io_handler was set by start conf of this - %% group, so we can unset it after this case (no - %% more io from main process needs to be buffered) - {false,tl(Mode0),undefined,Elapsed,Status}; - _ -> - {false,tl(Mode0),CurrIOHandler,Elapsed,Status} - end; - {_,false} -> - %% this is a start conf for a group which is not nested under a - %% parallel group, check if this case starts a new parallel group - case lists:member(parallel, Props) of - true -> - %% prepare for execution of parallel group - put(test_server_parallel_start_time, ?now), - put(test_server_queued_io, []); - false -> - ok - end, - {true,[conf(Ref,Props)|Mode0],undefined,0,Status}; - {_,_Ref0} -> - %% this is a start conf for a group nested under a parallel group, the - %% parallel_start_time and parallel_test_cases values have already been set - queue_test_case_io(Ref, self(), 0, Mod, Func), - %% if this is the first nested group under a parallel group, io - %% buffering must be activated - IOHandler1 = if CurrIOHandler == undefined -> - IOH = {Ref,self()}, - set_io_buffering(IOH), - IOH; - true -> - CurrIOHandler - end, - {true,[conf(Ref,Props)|Mode0],IOHandler1,0,Status} - end, - - %% if this is a start conf we check if cases should be shuffled - {[_Conf|Cases1]=Cs1,Shuffle} = - if StartConf -> - case get_shuffle(Props) of - undefined -> - {Cs0,undefined}; - {_,repeated} -> - %% if group is repeated, a new seed should not be set every - %% turn - last one is saved in dictionary - CurrSeed = get(test_server_curr_random_seed), - {shuffle_cases(Ref, Cs0, CurrSeed),{shuffle,CurrSeed}}; - {_,Seed} -> - UseSeed= - %% Determine which seed to use by: - %% 1. check the TS_RANDOM_SEED env variable - %% 2. check random_seed in process state - %% 3. use value provided with shuffle option - %% 4. use timestamp() values for seed - case os:getenv("TS_RANDOM_SEED") of - Undef when Undef == false ; Undef == "undefined" -> - case get(test_server_random_seed) of - undefined -> Seed; - TSRS -> TSRS - end; - NumStr -> - %% Ex: "123 456 789" or "123,456,789" -> {123,456,789} - list_to_tuple([list_to_integer(NS) || - NS <- string:tokens(NumStr, [$ ,$:,$,])]) - end, - {shuffle_cases(Ref, Cs0, UseSeed),{shuffle,UseSeed}} - end; - not StartConf -> - {Cs0,undefined} - end, - - %% if this is a start conf we check if Props specifies repeat and if so - %% we copy the group and carry the copy until the end conf where we - %% decide to perform the repetition or not - {Repeating,Status2,Cases,ReportRepeatStop} = - if StartConf -> - case get_repeat(Props) of - undefined -> - %% we *must* have a status entry for every conf since we - %% will continously update status with test case results - %% without knowing the Ref (but update hd(Status)) - {false,new_status(Ref, Status1),Cases1,?void_fun}; - {_RepType,N} when N =< 1 -> - {false,new_status(Ref, Status1),Cases1,?void_fun}; - _ -> - {Copied,_} = copy_cases(Ref, make_ref(), Cs1), - {true,new_status(Ref, Copied, Status1),Cases1,?void_fun} - end; - not StartConf -> - RepVal = get_repeat(get_props(Mode0)), - ReportStop = - fun() -> - print(minor, "~n*** Stopping repeat operation ~w", [RepVal]), - print(1, "Stopping repeat operation ~w", [RepVal]) - end, - CopiedCases = get_copied_cases(Status1), - EndStatus = delete_status(Ref, Status1), - %% check in Mode0 if this is a repeat conf - case RepVal of - undefined -> - {false,EndStatus,Cases1,?void_fun}; - {_RepType,N} when N =< 1 -> - {false,EndStatus,Cases1,?void_fun}; - {repeat,_} -> - {true,EndStatus,CopiedCases++Cases1,?void_fun}; - {repeat_until_all_ok,_} -> - {RestCs,Fun} = case get_tc_results(Status1) of - {_,_,[]} -> - {Cases1,ReportStop}; - _ -> - {CopiedCases++Cases1,?void_fun} - end, - {true,EndStatus,RestCs,Fun}; - {repeat_until_any_ok,_} -> - {RestCs,Fun} = case get_tc_results(Status1) of - {Ok,_,_Fails} when length(Ok) > 0 -> - {Cases1,ReportStop}; - _ -> - {CopiedCases++Cases1,?void_fun} - end, - {true,EndStatus,RestCs,Fun}; - {repeat_until_any_fail,_} -> - {RestCs,Fun} = case get_tc_results(Status1) of - {_,_,Fails} when length(Fails) > 0 -> - {Cases1,ReportStop}; - _ -> - {CopiedCases++Cases1,?void_fun} - end, - {true,EndStatus,RestCs,Fun}; - {repeat_until_all_fail,_} -> - {RestCs,Fun} = case get_tc_results(Status1) of - {[],_,_} -> - {Cases1,ReportStop}; - _ -> - {CopiedCases++Cases1,?void_fun} - end, - {true,EndStatus,RestCs,Fun} - end - end, - - ReportAbortRepeat = fun(What) when Repeating -> - print(minor, "~n*** Aborting repeat operation " - "(configuration case ~w)", [What]), - print(1, "Aborting repeat operation " - "(configuration case ~w)", [What]); - (_) -> ok - end, - CfgProps = if StartConf -> - if Shuffle == undefined -> - [{tc_group_properties,Props}]; - true -> - [{tc_group_properties, - [Shuffle|delete_shuffle(Props)]}] - end; - not StartConf -> - {TcOk,TcSkip,TcFail} = get_tc_results(Status1), - [{tc_group_properties,get_props(Mode0)}, - {tc_group_result,[{ok,TcOk}, - {skipped,TcSkip}, - {failed,TcFail}]}] - end, - - SuiteName = proplists:get_value(suite, Props), - case get(test_server_create_priv_dir) of - auto_per_run -> % use common priv_dir - TSDirs = [{priv_dir,get(test_server_priv_dir)}, - {data_dir,get_data_dir(Mod, SuiteName)}]; - _ -> - TSDirs = [{data_dir,get_data_dir(Mod, SuiteName)}] - end, - - ActualCfg = - if not StartConf -> - update_config(hd(Config), TSDirs ++ CfgProps); - true -> - GroupPath = lists:flatmap(fun({_Ref,[],_T}) -> []; - ({_Ref,GrProps,_T}) -> [GrProps] - end, Mode0), - update_config(hd(Config), - TSDirs ++ [{tc_group_path,GroupPath} | CfgProps]) - end, - - CurrMode = curr_mode(Ref, Mode0, Mode), - ConfCaseResult = run_test_case(Ref, 0, Mod, Func, [ActualCfg], skip_init, - TimetrapData, CurrMode), - - case ConfCaseResult of - {_,NewCfg,_} when Func == init_per_suite, is_list(NewCfg) -> - %% check that init_per_suite returned data on correct format - case lists:filter(fun({_,_}) -> false; - (_) -> true end, NewCfg) of - [] -> - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(Cases, [NewCfg|Config], - TimetrapData, Mode, Status2); - Bad -> - print(minor, - "~n*** ~w returned bad elements in Config: ~p.~n", - [Func,Bad]), - Reason = {failed,{Mod,init_per_suite,bad_return}}, - Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode, - auto_skip_case), - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(Cases2, Config, TimetrapData, Mode, - delete_status(Ref, Status2)) - end; - {_,NewCfg,_} when StartConf, is_list(NewCfg) -> - print_conf_time(ConfTime), - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(Cases, [NewCfg|Config], TimetrapData, Mode, Status2); - {_,{framework_error,{FwMod,FwFunc},Reason},_} -> - print(minor, "~n*** ~w failed in ~w. Reason: ~p~n", - [FwMod,FwFunc,Reason]), - print(1, "~w failed in ~w. Reason: ~p~n", [FwMod,FwFunc,Reason]), - exit(framework_error); - {_,Fail,_} when element(1,Fail) == 'EXIT'; - element(1,Fail) == timetrap_timeout; - element(1,Fail) == user_timetrap_error; - element(1,Fail) == failed -> - {Cases2,Config1,Status3} = - if StartConf -> - ReportAbortRepeat(failed), - print(minor, "~n*** ~w failed.~n" - " Skipping all cases.", [Func]), - Reason = {failed,{Mod,Func,Fail}}, - {skip_cases_upto(Ref, Cases, Reason, conf, CurrMode, - auto_skip_case), - Config, - update_status(failed, group_result, get_name(Mode), - delete_status(Ref, Status2))}; - not StartConf -> - ReportRepeatStop(), - print_conf_time(ConfTime), - {Cases,tl(Config),delete_status(Ref, Status2)} - end, - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(Cases2, Config1, TimetrapData, Mode, Status3); - - {_,{auto_skip,SkipReason},_} -> - %% this case can only happen if the framework (not the user) - %% decides to skip execution of a conf function - {Cases2,Config1,Status3} = - if StartConf -> - ReportAbortRepeat(auto_skipped), - print(minor, "~n*** ~w auto skipped.~n" - " Skipping all cases.", [Func]), - {skip_cases_upto(Ref, Cases, SkipReason, conf, CurrMode, - auto_skip_case), - Config, - delete_status(Ref, Status2)}; - not StartConf -> - ReportRepeatStop(), - print_conf_time(ConfTime), - {Cases,tl(Config),delete_status(Ref, Status2)} - end, - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(Cases2, Config1, TimetrapData, Mode, Status3); - - {_,{Skip,Reason},_} when StartConf and ((Skip==skip) or (Skip==skipped)) -> - ReportAbortRepeat(skipped), - print(minor, "~n*** ~w skipped.~n" - " Skipping all cases.", [Func]), - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(skip_cases_upto(Ref, Cases, Reason, conf, - CurrMode, skip_case), - [hd(Config)|Config], TimetrapData, Mode, - delete_status(Ref, Status2)); - {_,{skip_and_save,Reason,_SavedConfig},_} when StartConf -> - ReportAbortRepeat(skipped), - print(minor, "~n*** ~w skipped.~n" - " Skipping all cases.", [Func]), - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(skip_cases_upto(Ref, Cases, Reason, conf, - CurrMode, skip_case), - [hd(Config)|Config], TimetrapData, Mode, - delete_status(Ref, Status2)); - {_,_Other,_} when Func == init_per_suite -> - print(minor, "~n*** init_per_suite failed to return a Config list.~n", []), - Reason = {failed,{Mod,init_per_suite,bad_return}}, - Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode, - auto_skip_case), - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(Cases2, Config, TimetrapData, Mode, - delete_status(Ref, Status2)); - {_,_Other,_} when StartConf -> - print_conf_time(ConfTime), - set_io_buffering(IOHandler), - ReportRepeatStop(), - stop_minor_log_file(), - run_test_cases_loop(Cases, [hd(Config)|Config], TimetrapData, - Mode, Status2); - {_,_EndConfRetVal,Opts} -> - %% Check if return_group_result is set (ok, skipped or failed) and - %% if so: - %% 1) *If* the parent group is a sequence, skip all proceeding tests - %% in that group. - %% 2) Return the value to the group "above" so that result may be - %% used for evaluating a 'repeat_until_*' property. - GrName = get_name(Mode0, Func), - {Cases2,Status3} = - case lists:keysearch(return_group_result, 1, Opts) of - {value,{_,failed}} -> - case {curr_ref(Mode),check_prop(sequence, Mode)} of - {ParentRef,ParentRef} -> - Reason = {group_result,GrName,failed}, - {skip_cases_upto(ParentRef, Cases, Reason, tc, - Mode, auto_skip_case), - update_status(failed, group_result, GrName, - delete_status(Ref, Status2))}; - _ -> - {Cases,update_status(failed, group_result, GrName, - delete_status(Ref, Status2))} - end; - {value,{_,GroupResult}} -> - {Cases,update_status(GroupResult, group_result, GrName, - delete_status(Ref, Status2))}; - false -> - {Cases,update_status(ok, group_result, GrName, - delete_status(Ref, Status2))} - end, - print_conf_time(ConfTime), - ReportRepeatStop(), - set_io_buffering(IOHandler), - stop_minor_log_file(), - run_test_cases_loop(Cases2, tl(Config), TimetrapData, - Mode, Status3) - end; - -run_test_cases_loop([{make,Ref,{Mod,Func,Args}}|Cases0], Config, TimetrapData, - Mode, Status) -> - case run_test_case(Ref, 0, Mod, Func, Args, skip_init, TimetrapData) of - {_,Why={'EXIT',_},_} -> - print(minor, "~n*** ~w failed.~n" - " Skipping all cases.", [Func]), - Reason = {failed,{Mod,Func,Why}}, - Cases = skip_cases_upto(Ref, Cases0, Reason, conf, Mode, - auto_skip_case), - stop_minor_log_file(), - run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status); - {_,_Whatever,_} -> - stop_minor_log_file(), - run_test_cases_loop(Cases0, Config, TimetrapData, Mode, Status) - end; - -run_test_cases_loop([{conf,_Ref,_Props,_X}=Conf|_Cases0], - Config, _TimetrapData, _Mode, _Status) -> - erlang:error(badarg, [Conf,Config]); - -run_test_cases_loop([{Mod,Case}|Cases], Config, TimetrapData, Mode, Status) -> - ActualCfg = - case get(test_server_create_priv_dir) of - auto_per_run -> - update_config(hd(Config), [{priv_dir,get(test_server_priv_dir)}, - {data_dir,get_data_dir(Mod)}]); - _ -> - update_config(hd(Config), [{data_dir,get_data_dir(Mod)}]) - end, - run_test_cases_loop([{Mod,Case,[ActualCfg]}|Cases], Config, - TimetrapData, Mode, Status); - -run_test_cases_loop([{Mod,Func,Args}|Cases], Config, TimetrapData, Mode, Status) -> - {Num,RunInit} = - case FwMod = get_fw_mod(?MODULE) of - Mod when Func == error_in_suite -> - {-1,skip_init}; - _ -> - {put(test_server_case_num, get(test_server_case_num)+1), - run_init} - end, - - %% check the current execution mode and save info about the case if - %% detected that printouts to common log files is handled later - - case check_prop(parallel, Mode) =:= false andalso is_io_buffered() of - true -> - %% sequential test case nested in a parallel group; - %% io is buffered, so we must queue this test case - queue_test_case_io(undefined, self(), Num+1, Mod, Func); - false -> - ok - end, - - case run_test_case(undefined, Num+1, Mod, Func, Args, - RunInit, TimetrapData, Mode) of - %% callback to framework module failed, exit immediately - {_,{framework_error,{FwMod,FwFunc},Reason},_} -> - print(minor, "~n*** ~w failed in ~w. Reason: ~p~n", - [FwMod,FwFunc,Reason]), - print(1, "~w failed in ~w. Reason: ~p~n", [FwMod,FwFunc,Reason]), - stop_minor_log_file(), - exit(framework_error); - %% sequential execution of test case finished - {Time,RetVal,_} -> - {Failed,Status1} = - case Time of - died -> - {true,update_status(failed, Mod, Func, Status)}; - _ when is_tuple(RetVal) -> - case element(1, RetVal) of - R when R=='EXIT'; R==failed -> - {true,update_status(failed, Mod, Func, Status)}; - R when R==skip; R==skipped -> - {false,update_status(skipped, Mod, Func, Status)}; - _ -> - {false,update_status(ok, Mod, Func, Status)} - end; - _ -> - {false,update_status(ok, Mod, Func, Status)} - end, - case check_prop(sequence, Mode) of - false -> - stop_minor_log_file(), - run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status1); - Ref -> - %% the case is in a sequence; we must check the result and - %% determine if the following cases should run or be skipped - if not Failed -> % proceed with next case - stop_minor_log_file(), - run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status1); - true -> % skip rest of cases in sequence - print(minor, "~n*** ~w failed.~n" - " Skipping all other cases in sequence.", - [Func]), - Reason = {failed,{Mod,Func}}, - Cases2 = skip_cases_upto(Ref, Cases, Reason, tc, - Mode, auto_skip_case), - stop_minor_log_file(), - run_test_cases_loop(Cases2, Config, TimetrapData, Mode, Status1) - end - end; - %% the test case is being executed in parallel with the main process (and - %% other test cases) and Pid is the dedicated process executing the case - Pid -> - %% io from Pid will be buffered by the test_server_io process and - %% handled later, so we have to save info about the case - queue_test_case_io(undefined, Pid, Num+1, Mod, Func), - run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status) - end; - -%% TestSpec processing finished -run_test_cases_loop([], _Config, _TimetrapData, _, _) -> - ok. - -%%-------------------------------------------------------------------- -%% various help functions - -new_status(Ref, Status) -> - [{Ref,{{[],[],[]},[]}} | Status]. - -new_status(Ref, CopiedCases, Status) -> - [{Ref,{{[],[],[]},CopiedCases}} | Status]. - -delete_status(Ref, Status) -> - lists:keydelete(Ref, 1, Status). - -update_status(ok, Mod, Func, [{Ref,{{Ok,Skip,Fail},Cs}} | Status]) -> - [{Ref,{{Ok++[{Mod,Func}],Skip,Fail},Cs}} | Status]; - -update_status(skipped, Mod, Func, [{Ref,{{Ok,Skip,Fail},Cs}} | Status]) -> - [{Ref,{{Ok,Skip++[{Mod,Func}],Fail},Cs}} | Status]; - -update_status(failed, Mod, Func, [{Ref,{{Ok,Skip,Fail},Cs}} | Status]) -> - [{Ref,{{Ok,Skip,Fail++[{Mod,Func}]},Cs}} | Status]; - -update_status(_, _, _, []) -> - []. - -update_status(Ref, {Ok,Skip,Fail}, [{Ref,{{Ok0,Skip0,Fail0},Cs}} | Status]) -> - [{Ref,{{Ok0++Ok,Skip0++Skip,Fail0++Fail},Cs}} | Status]. - -get_copied_cases([{_,{_,Cases}} | _Status]) -> - Cases. - -get_tc_results([{_,{OkSkipFail,_}} | _Status]) -> - OkSkipFail; -get_tc_results([]) -> % in case init_per_suite crashed - {[],[],[]}. - -conf(Ref, Props) -> - {Ref,Props,?now}. - -curr_ref([{Ref,_Props,_}|_]) -> - Ref; -curr_ref([]) -> - undefined. - -curr_mode(Ref, Mode0, Mode1) -> - case curr_ref(Mode1) of - Ref -> Mode1; - _ -> Mode0 - end. - -get_props([{_,Props,_} | _]) -> - Props; -get_props([]) -> - []. - -check_prop(_Attrib, []) -> - false; -check_prop(Attrib, [{Ref,Props,_}|_]) -> - case lists:member(Attrib, Props) of - true -> Ref; - false -> false - end. - -check_props(Attrib, Mode) -> - case [R || {R,Ps,_} <- Mode, lists:member(Attrib, Ps)] of - [] -> false; - [Ref|_] -> Ref - end. - -get_name(Mode, Def) -> - case get_name(Mode) of - undefined -> Def; - Name -> Name - end. - -get_name([{_Ref,Props,_}|_]) -> - proplists:get_value(name, Props); -get_name([]) -> - undefined. - -conf_start(Ref, Mode) -> - case lists:keysearch(Ref, 1, Mode) of - {value,{_,_,T}} -> T; - false -> 0 - end. - - -get_data_dir(Mod) -> - get_data_dir(Mod, undefined). - -get_data_dir(Mod, Suite) -> - UseMod = if Suite == undefined -> Mod; - true -> Suite - end, - case code:which(UseMod) of - non_existing -> - print(12, "The module ~w is not loaded", [Mod]), - []; - cover_compiled -> - MainCoverNode = cover:get_main_node(), - {file,File} = rpc:call(MainCoverNode,cover,is_compiled,[UseMod]), - do_get_data_dir(UseMod,File); - FullPath -> - do_get_data_dir(UseMod,FullPath) - end. - -do_get_data_dir(Mod,File) -> - filename:dirname(File) ++ "/" ++ atom_to_list(Mod) ++ ?data_dir_suffix. - -print_conf_time(0) -> - ok; -print_conf_time(ConfTime) -> - print(major, "=group_time ~.3fs", [ConfTime]), - print(minor, "~n=== Total execution time of group: ~.3fs~n", [ConfTime]). - -print_props([]) -> - ok; -print_props(Props) -> - print(major, "=group_props ~p", [Props]), - print(minor, "Group properties: ~p~n", [Props]). - -%% repeat N times: {repeat,N} -%% repeat N times or until all successful: {repeat_until_all_ok,N} -%% repeat N times or until at least one successful: {repeat_until_any_ok,N} -%% repeat N times or until at least one case fails: {repeat_until_any_fail,N} -%% repeat N times or until all fails: {repeat_until_all_fail,N} -%% N = integer() | forever -get_repeat(Props) -> - get_prop([repeat,repeat_until_all_ok,repeat_until_any_ok, - repeat_until_any_fail,repeat_until_all_fail], forever, Props). - -update_repeat(Props) -> - case get_repeat(Props) of - undefined -> - Props; - {RepType,N} -> - Props1 = - if N == forever -> - [{RepType,N}|lists:keydelete(RepType, 1, Props)]; - N < 3 -> - lists:keydelete(RepType, 1, Props); - N >= 3 -> - [{RepType,N-1}|lists:keydelete(RepType, 1, Props)] - end, - %% if shuffle is used in combination with repeat, a new - %% seed shouldn't be set every new turn - case get_shuffle(Props1) of - undefined -> - Props1; - _ -> - [{shuffle,repeated}|delete_shuffle(Props1)] - end - end. - -get_shuffle(Props) -> - get_prop([shuffle], ?now, Props). - -delete_shuffle(Props) -> - delete_prop([shuffle], Props). - -%% Return {Item,Value} if found, else if Item alone -%% is found, return {Item,Default} -get_prop([Item|Items], Default, Props) -> - case lists:keysearch(Item, 1, Props) of - {value,R} -> - R; - false -> - case lists:member(Item, Props) of - true -> - {Item,Default}; - false -> - get_prop(Items, Default, Props) - end - end; -get_prop([], _Def, _Props) -> - undefined. - -delete_prop([Item|Items], Props) -> - Props1 = lists:delete(Item, lists:keydelete(Item, 1, Props)), - delete_prop(Items, Props1); -delete_prop([], Props) -> - Props. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% shuffle_cases(Ref, Cases, Seed) -> Cases1 -%% -%% Shuffles the order of Cases. - -shuffle_cases(Ref, Cases, undefined) -> - shuffle_cases(Ref, Cases, ?now); - -shuffle_cases(Ref, [{conf,Ref,_,_}=Start | Cases], Seed) -> - {N,CasesToShuffle,Rest} = cases_to_shuffle(Ref, Cases), - ShuffledCases = random_order(N, random:uniform_s(N, Seed), CasesToShuffle, []), - [Start|ShuffledCases] ++ Rest. - -cases_to_shuffle(Ref, Cases) -> - cases_to_shuffle(Ref, Cases, 1, []). - -cases_to_shuffle(Ref, [{conf,Ref,_,_} | _]=Cs, N, Ix) -> % end - {N-1,Ix,Cs}; -cases_to_shuffle(Ref, [{skip_case,{_,Ref,_,_},_} | _]=Cs, N, Ix) -> % end - {N-1,Ix,Cs}; - -cases_to_shuffle(Ref, [{conf,Ref1,_,_}=C | Cs], N, Ix) -> % nested group - {Cs1,Rest} = get_subcases(Ref1, Cs, []), - cases_to_shuffle(Ref, Rest, N+1, [{N,[C|Cs1]} | Ix]); -cases_to_shuffle(Ref, [{skip_case,{_,Ref1,_,_},_}=C | Cs], N, Ix) -> % nested group - {Cs1,Rest} = get_subcases(Ref1, Cs, []), - cases_to_shuffle(Ref, Rest, N+1, [{N,[C|Cs1]} | Ix]); - -cases_to_shuffle(Ref, [C | Cs], N, Ix) -> - cases_to_shuffle(Ref, Cs, N+1, [{N,[C]} | Ix]). - -get_subcases(SubRef, [{conf,SubRef,_,_}=C | Cs], SubCs) -> - {lists:reverse([C|SubCs]),Cs}; -get_subcases(SubRef, [{skip_case,{_,SubRef,_,_},_}=C | Cs], SubCs) -> - {lists:reverse([C|SubCs]),Cs}; -get_subcases(SubRef, [C|Cs], SubCs) -> - get_subcases(SubRef, Cs, [C|SubCs]). - -random_order(1, {_Pos,Seed}, [{_Ix,CaseOrGroup}], Shuffled) -> - %% save current seed to be used if test cases are repeated - put(test_server_curr_random_seed, Seed), - Shuffled++CaseOrGroup; -random_order(N, {Pos,NewSeed}, IxCases, Shuffled) -> - {First,[{_Ix,CaseOrGroup}|Rest]} = lists:split(Pos-1, IxCases), - random_order(N-1, random:uniform_s(N-1, NewSeed), - First++Rest, Shuffled++CaseOrGroup). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% skip_case(Type, Ref, CaseNum, Case, Comment, SendSync) -> {Mod,Func} -%% -%% Prints info about a skipped case in the major and html log files. -%% SendSync determines if start and finished messages must be sent so -%% that the printouts can be buffered and handled in order with io from -%% parallel processes. -skip_case(Type, Ref, CaseNum, Case, Comment, SendSync, Mode) -> - MF = {Mod,Func} = case Case of - {M,F,_A} -> {M,F}; - {M,F} -> {M,F} - end, - if SendSync -> - queue_test_case_io(Ref, self(), CaseNum, Mod, Func), - self() ! {started,Ref,self(),CaseNum,Mod,Func}, - test_server_io:start_transaction(), - skip_case1(Type, CaseNum, Mod, Func, Comment, Mode), - test_server_io:end_transaction(), - self() ! {finished,Ref,self(),CaseNum,Mod,Func,skipped,{0,skipped,[]}}; - not SendSync -> - skip_case1(Type, CaseNum, Mod, Func, Comment, Mode) - end, - MF. - -skip_case1(Type, CaseNum, Mod, Func, Comment, Mode) -> - {{Col0,Col1},_} = get_font_style((CaseNum > 0), Mode), - ResultCol = if Type == auto -> ?auto_skip_color; - Type == user -> ?user_skip_color - end, - print(major, "~n=case ~w:~w", [Mod,Func]), - GroupName = case get_name(Mode) of - undefined -> - ""; - GrName -> - GrName1 = cast_to_list(GrName), - print(major, "=group_props ~p", [[{name,GrName1}]]), - GrName1 - end, - print(major, "=started ~s", [lists:flatten(timestamp_get(""))]), - Comment1 = reason_to_string(Comment), - if Type == auto -> - print(major, "=result auto_skipped: ~ts", [Comment1]); - Type == user -> - print(major, "=result skipped: ~ts", [Comment1]) - end, - if CaseNum == 0 -> - print(2,"*** Skipping ~w ***", [{Mod,Func}]); - true -> - print(2,"*** Skipping test case #~w ~w ***", [CaseNum,{Mod,Func}]) - end, - TR = xhtml("<tr valign=\"top\">", ["<tr class=\"",odd_or_even(),"\">"]), - GroupName = case get_name(Mode) of - undefined -> ""; - Name -> cast_to_list(Name) - end, - print(html, - TR ++ "<td>" ++ Col0 ++ "~ts" ++ Col1 ++ "</td>" - "<td>" ++ Col0 ++ "~w" ++ Col1 ++ "</td>" - "<td>" ++ Col0 ++ "~ts" ++ Col1 ++ "</td>" - "<td>" ++ Col0 ++ "~w" ++ Col1 ++ "</td>" - "<td>" ++ Col0 ++ "< >" ++ Col1 ++ "</td>" - "<td>" ++ Col0 ++ "0.000s" ++ Col1 ++ "</td>" - "<td><font color=\"~ts\">SKIPPED</font></td>" - "<td>~ts</td></tr>\n", - [num2str(CaseNum),fw_name(Mod),GroupName,Func,ResultCol,Comment1]), - - if CaseNum > 0 -> - {US,AS} = get(test_server_skipped), - case Type of - user -> put(test_server_skipped, {US+1,AS}); - auto -> put(test_server_skipped, {US,AS+1}) - end, - put(test_server_case_num, CaseNum); - true -> % conf - ok - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% skip_cases_upto(Ref, Cases, Reason, Origin, Mode, SkipType) -> Cases1 -%% -%% SkipType = skip_case | auto_skip_case -%% Mark all cases tagged with Ref as skipped. - -skip_cases_upto(Ref, Cases, Reason, Origin, Mode, SkipType) -> - {_,Modified,Rest} = - modify_cases_upto(Ref, {skip,Reason,Origin,Mode,SkipType}, Cases), - Modified++Rest. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% copy_cases(OrigRef, NewRef, Cases) -> Cases1 -%% -%% Copy the test cases marked with OrigRef and tag the copies with NewRef. -%% The start conf case copy will also get its repeat property updated. - -copy_cases(OrigRef, NewRef, Cases) -> - {Original,Altered,Rest} = modify_cases_upto(OrigRef, {copy,NewRef}, Cases), - {Altered,Original++Altered++Rest}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% modify_cases_upto(Ref, ModOp, Cases) -> {Original,Altered,Remaining} -%% -%% ModOp = {skip,Reason,Origin,Mode} | {copy,NewRef} -%% Origin = conf | tc -%% -%% Modifies Cases according to ModOp and returns the original elements, -%% the modified versions of these elements and the remaining (untouched) -%% cases. - -modify_cases_upto(Ref, ModOp, Cases) -> - {Original,Altered,Rest} = modify_cases_upto(Ref, ModOp, Cases, [], []), - {lists:reverse(Original),lists:reverse(Altered),Rest}. - -%% first case of a copy operation is the start conf -modify_cases_upto(Ref, {copy,NewRef}=Op, [{conf,Ref,Props,MF}=C|T], Orig, Alt) -> - modify_cases_upto(Ref, Op, T, [C|Orig], [{conf,NewRef,update_repeat(Props),MF}|Alt]); - -modify_cases_upto(Ref, ModOp, Cases, Orig, Alt) -> - %% we need to check if there's an end conf case with the - %% same ref in the list, if not, this *is* an end conf case - case lists:any(fun({_,R,_,_}) when R == Ref -> true; - ({_,R,_}) when R == Ref -> true; - ({skip_case,{_,R,_,_},_}) when R == Ref -> true; - ({skip_case,{_,R,_,_}}) when R == Ref -> true; - (_) -> false - end, Cases) of - true -> - modify_cases_upto1(Ref, ModOp, Cases, Orig, Alt); - false -> - {[],[],Cases} - end. - -%% next case is a conf with same ref, must be end conf = we're done -modify_cases_upto1(Ref, {skip,Reason,conf,Mode,skip_case}, - [{conf,Ref,_Props,MF}|T], Orig, Alt) -> - {Orig,[{skip_case,{conf,Ref,MF,Reason},Mode}|Alt],T}; -modify_cases_upto1(Ref, {skip,Reason,conf,Mode,auto_skip_case}, - [{conf,Ref,_Props,MF}|T], Orig, Alt) -> - {Orig,[{auto_skip_case,{conf,Ref,MF,Reason},Mode}|Alt],T}; -modify_cases_upto1(Ref, {copy,NewRef}, [{conf,Ref,Props,MF}=C|T], Orig, Alt) -> - {[C|Orig],[{conf,NewRef,update_repeat(Props),MF}|Alt],T}; - -%% we've skipped all remaining cases in a sequence -modify_cases_upto1(Ref, {skip,_,tc,_,_}, - [{conf,Ref,_Props,_MF}|_]=Cs, Orig, Alt) -> - {Orig,Alt,Cs}; - -%% next is a make case -modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType}, - [{make,Ref,MF}|T], Orig, Alt) -> - {Orig,[{SkipType,{make,Ref,MF,Reason},Mode}|Alt],T}; -modify_cases_upto1(Ref, {copy,NewRef}, [{make,Ref,MF}=M|T], Orig, Alt) -> - {[M|Orig],[{make,NewRef,MF}|Alt],T}; - -%% next case is a user skipped end conf with the same ref = we're done -modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType}, - [{skip_case,{Type,Ref,MF,_Cmt},_}|T], Orig, Alt) -> - {Orig,[{SkipType,{Type,Ref,MF,Reason},Mode}|Alt],T}; -modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType}, - [{skip_case,{Type,Ref,MF,_Cmt}}|T], Orig, Alt) -> - {Orig,[{SkipType,{Type,Ref,MF,Reason},Mode}|Alt],T}; -modify_cases_upto1(Ref, {copy,NewRef}, - [{skip_case,{Type,Ref,MF,Cmt},Mode}=C|T], Orig, Alt) -> - {[C|Orig],[{skip_case,{Type,NewRef,MF,Cmt},Mode}|Alt],T}; -modify_cases_upto1(Ref, {copy,NewRef}, - [{skip_case,{Type,Ref,MF,Cmt}}=C|T], Orig, Alt) -> - {[C|Orig],[{skip_case,{Type,NewRef,MF,Cmt}}|Alt],T}; - -%% next is a skip_case, could be one test case or 'all' in suite, we must proceed -modify_cases_upto1(Ref, ModOp, [{skip_case,{_F,_Cmt},_Mode}=MF|T], Orig, Alt) -> - modify_cases_upto1(Ref, ModOp, T, [MF|Orig], [MF|Alt]); - -%% next is a normal case (possibly in a sequence), mark as skipped, or copy, and proceed -modify_cases_upto1(Ref, {skip,Reason,_,Mode,skip_case}=Op, - [{_M,_F}=MF|T], Orig, Alt) -> - modify_cases_upto1(Ref, Op, T, Orig, [{skip_case,{MF,Reason},Mode}|Alt]); -modify_cases_upto1(Ref, {skip,Reason,_,Mode,auto_skip_case}=Op, - [{_M,_F}=MF|T], Orig, Alt) -> - modify_cases_upto1(Ref, Op, T, Orig, [{auto_skip_case,{MF,Reason},Mode}|Alt]); -modify_cases_upto1(Ref, CopyOp, [{_M,_F}=MF|T], Orig, Alt) -> - modify_cases_upto1(Ref, CopyOp, T, [MF|Orig], [MF|Alt]); - -%% next is a conf case, modify the Mode arg to keep track of sub groups -modify_cases_upto1(Ref, {skip,Reason,FType,Mode,SkipType}, - [{conf,OtherRef,Props,_MF}|T], Orig, Alt) -> - case hd(Mode) of - {OtherRef,_,_} -> % end conf - modify_cases_upto1(Ref, {skip,Reason,FType,tl(Mode),SkipType}, - T, Orig, Alt); - _ -> % start conf - Mode1 = [conf(OtherRef,Props)|Mode], - modify_cases_upto1(Ref, {skip,Reason,FType,Mode1,SkipType}, - T, Orig, Alt) - end; - -%% next is some other case, ignore or copy -modify_cases_upto1(Ref, {skip,_,_,_,_}=Op, [_Other|T], Orig, Alt) -> - modify_cases_upto1(Ref, Op, T, Orig, Alt); -modify_cases_upto1(Ref, CopyOp, [C|T], Orig, Alt) -> - modify_cases_upto1(Ref, CopyOp, T, [C|Orig], [C|Alt]). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% set_io_buffering(IOHandler) -> PrevIOHandler -%% -%% Save info about current process (always the main process) buffering -%% io printout messages from parallel test case processes (*and* possibly -%% also the main process). - -set_io_buffering(IOHandler) -> - put(test_server_common_io_handler, IOHandler). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% is_io_buffered() -> true|false -%% -%% Test whether is being buffered. - -is_io_buffered() -> - get(test_server_common_io_handler) =/= undefined. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% queue_test_case_io(Pid, Num, Mod, Func) -> ok -%% -%% Save info about test case that gets its io buffered. This can -%% be a parallel test case or it can be a test case (conf or normal) -%% that belongs to a group nested under a parallel group. The queue -%% is processed after io buffering is disabled. See run_test_cases_loop/4 -%% and handle_test_case_io_and_status/0 for more info. - -queue_test_case_io(Ref, Pid, Num, Mod, Func) -> - Entry = {Ref,Pid,Num,Mod,Func}, - %% the order of the test cases is very important! - put(test_server_queued_io, - get(test_server_queued_io)++[Entry]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% wait_for_cases(Ref) -> {Ok,Skipped,Failed} -%% -%% At the end of a nested parallel group, we have to wait for the test -%% cases to terminate before we can go on (since test cases never execute -%% in parallel with the end conf case of the group). When a top level -%% parallel group is finished, buffered io messages must be handled and -%% this is taken care of by handle_test_case_io_and_status/0. - -wait_for_cases(Ref) -> - case get(test_server_queued_io) of - [] -> - {[],[],[]}; - Cases -> - [_Start|TCs] = - lists:dropwhile(fun({R,_,_,_,_}) when R == Ref -> false; - (_) -> true - end, Cases), - wait_and_resend(Ref, TCs, [],[],[]) - end. - -wait_and_resend(Ref, [{OtherRef,_,0,_,_}|Ps], - Ok,Skip,Fail) when is_reference(OtherRef), - OtherRef /= Ref -> - %% ignore cases that belong to nested group - Ps1 = rm_cases_upto(OtherRef, Ps), - wait_and_resend(Ref, Ps1, Ok,Skip,Fail); - -wait_and_resend(Ref, [{_,CurrPid,CaseNum,Mod,Func}|Ps] = Cases, Ok,Skip,Fail) -> - receive - {finished,_Ref,CurrPid,CaseNum,Mod,Func,Result,_RetVal} = Msg -> - %% resend message to main process so that it can be used - %% to test_server_io:print_buffered/1 later - self() ! Msg, - MF = {Mod,Func}, - {Ok1,Skip1,Fail1} = - case Result of - ok -> {[MF|Ok],Skip,Fail}; - skipped -> {Ok,[MF|Skip],Fail}; - failed -> {Ok,Skip,[MF|Fail]} - end, - wait_and_resend(Ref, Ps, Ok1,Skip1,Fail1); - {'EXIT',CurrPid,Reason} when Reason /= normal -> - %% unexpected termination of test case process - {value,{_,_,CaseNum,Mod,Func}} = lists:keysearch(CurrPid, 2, Cases), - print(1, "Error! Process for test case #~w (~w:~w) died! Reason: ~p", - [CaseNum, Mod, Func, Reason]), - exit({unexpected_termination,{CaseNum,Mod,Func},{CurrPid,Reason}}) - end; - -wait_and_resend(_, [], Ok,Skip,Fail) -> - {lists:reverse(Ok),lists:reverse(Skip),lists:reverse(Fail)}. - -rm_cases_upto(Ref, [{Ref,_,0,_,_}|Ps]) -> - Ps; -rm_cases_upto(Ref, [_|Ps]) -> - rm_cases_upto(Ref, Ps). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% handle_test_case_io_and_status() -> [Ok,Skipped,Failed} -%% -%% Each parallel test case process prints to its own minor log file during -%% execution. The common log files (major, html etc) must however be -%% written to sequentially. This is handled by calling -%% test_server_io:start_transaction/0 to tell the test_server_io process -%% to buffer all print requests. -%% -%% An io session is always started with a -%% {started,Ref,Pid,Num,Mod,Func} message (and -%% test_server_io:start_transaction/0 will be called) and terminated -%% with {finished,Ref,Pid,Num,Mod,Func,Result,RetVal} (and -%% test_server_io:end_transaction/0 will be called). The result -%% shipped with the finished message from a parallel process is used -%% to update status data of the current test run. An 'EXIT' message -%% from each parallel test case process (after finishing and -%% terminating) is also received and handled here. -%% -%% During execution of a parallel group, any cases (conf or normal) -%% belonging to a nested group will also get its io printouts buffered. -%% This is necessary to get the major and html log files written in -%% correct sequence. This function handles also the print messages -%% generated by nested group cases that have been executed sequentially -%% by the main process (note that these cases do not generate 'EXIT' -%% messages, only 'start' and 'finished' messages). -%% -%% See the header comment for run_test_cases_loop/4 for more -%% info about IO handling. -%% -%% Note: It is important that the type of messages handled here -%% do not get consumed by test_server:run_test_case_msgloop/5 -%% during the test case execution (e.g. in the catch clause of -%% the receive)! - -handle_test_case_io_and_status() -> - case get(test_server_queued_io) of - [] -> - {[],[],[]}; - Cases -> - %% Cases = [{Ref,Pid,CaseNum,Mod,Func} | ...] - Result = handle_io_and_exit_loop([], Cases, [],[],[]), - Main = self(), - %% flush normal exit messages - lists:foreach(fun({_,Pid,_,_,_}) when Pid /= Main -> - receive - {'EXIT',Pid,normal} -> ok - after - 1000 -> ok - end; - (_) -> - ok - end, Cases), - Result - end. - -%% Handle cases (without Ref) that belong to the top parallel group (i.e. when Refs = []) -handle_io_and_exit_loop([], [{undefined,CurrPid,CaseNum,Mod,Func}|Ps] = Cases, Ok,Skip,Fail) -> - %% retrieve the start message for the current io session (= testcase) - receive - {started,_,CurrPid,CaseNum,Mod,Func} -> - {Ok1,Skip1,Fail1} = - case handle_io_and_exits(self(), CurrPid, CaseNum, Mod, Func, Cases) of - {ok,MF} -> {[MF|Ok],Skip,Fail}; - {skipped,MF} -> {Ok,[MF|Skip],Fail}; - {failed,MF} -> {Ok,Skip,[MF|Fail]} - end, - handle_io_and_exit_loop([], Ps, Ok1,Skip1,Fail1) - after - 1000 -> - exit({testcase_failed_to_start,Mod,Func}) - end; - -%% Handle cases that belong to groups nested under top parallel group -handle_io_and_exit_loop(Refs, [{Ref,CurrPid,CaseNum,Mod,Func}|Ps] = Cases, Ok,Skip,Fail) -> - receive - {started,_,CurrPid,CaseNum,Mod,Func} -> - handle_io_and_exits(self(), CurrPid, CaseNum, Mod, Func, Cases), - Refs1 = - case Refs of - [Ref|Rs] -> % must be end conf case for subgroup - Rs; - _ when is_reference(Ref) -> % must be start of new subgroup - [Ref|Refs]; - _ -> % must be normal subgroup testcase - Refs - end, - handle_io_and_exit_loop(Refs1, Ps, Ok,Skip,Fail) - after - 1000 -> - exit({testcase_failed_to_start,Mod,Func}) - end; - -handle_io_and_exit_loop(_, [], Ok,Skip,Fail) -> - {lists:reverse(Ok),lists:reverse(Skip),lists:reverse(Fail)}. - -handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases) -> - receive - {abort_current_testcase=Tag,_Reason,From} -> - %% If a parallel group is executing, there is no unique - %% current test case, so we must generate an error. - From ! {self(),Tag,{error,parallel_group}}, - handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases); - %% end of io session from test case executed by main process - {finished,_,Main,CaseNum,Mod,Func,Result,_RetVal} -> - test_server_io:print_buffered(CurrPid), - {Result,{Mod,Func}}; - %% end of io session from test case executed by parallel process - {finished,_,CurrPid,CaseNum,Mod,Func,Result,RetVal} -> - test_server_io:print_buffered(CurrPid), - case Result of - ok -> - put(test_server_ok, get(test_server_ok)+1); - failed -> - put(test_server_failed, get(test_server_failed)+1); - skipped -> - SkipCounters = - update_skip_counters(RetVal, get(test_server_skipped)), - put(test_server_skipped, SkipCounters) - end, - {Result,{Mod,Func}}; - - %% unexpected termination of test case process - {'EXIT',TCPid,Reason} when Reason /= normal -> - test_server_io:print_buffered(CurrPid), - {value,{_,_,Num,M,F}} = lists:keysearch(TCPid, 2, Cases), - print(1, "Error! Process for test case #~w (~w:~w) died! Reason: ~p", - [Num, M, F, Reason]), - exit({unexpected_termination,{Num,M,F},{TCPid,Reason}}) - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% run_test_case(Ref, Num, Mod, Func, Args, RunInit, -%% TimetrapData, Mode) -> RetVal -%% -%% Creates the minor log file and inserts some test case specific headers -%% and footers into the log files. Then the test case is executed and the -%% result is printed to the log files (also info about lingering processes -%% & slave nodes in the system is presented). -%% -%% RunInit decides if the per test case init is to be run (true for all -%% but conf cases). -%% -%% Mode specifies if the test case should be executed by a dedicated, -%% parallel, process rather than sequentially by the main process. If -%% the former, the new process is spawned and the dictionary of the main -%% process is copied to the test case process. -%% -%% RetVal is the result of executing the test case. It contains info -%% about the execution time and the return value of the test case function. - -run_test_case(Ref, Num, Mod, Func, Args, RunInit, TimetrapData) -> - file:set_cwd(filename:dirname(get(test_server_dir))), - run_test_case1(Ref, Num, Mod, Func, Args, RunInit, - TimetrapData, [], self()). - -run_test_case(Ref, Num, Mod, Func, Args, skip_init, TimetrapData, Mode) -> - %% a conf case is always executed by the main process - run_test_case1(Ref, Num, Mod, Func, Args, skip_init, - TimetrapData, Mode, self()); - -run_test_case(Ref, Num, Mod, Func, Args, RunInit, TimetrapData, Mode) -> - file:set_cwd(filename:dirname(get(test_server_dir))), - Main = self(), - case check_prop(parallel, Mode) of - false -> - %% this is a sequential test case - run_test_case1(Ref, Num, Mod, Func, Args, RunInit, - TimetrapData, Mode, Main); - _Ref -> - %% this a parallel test case, spawn the new process - Dictionary = get(), - {dictionary,Dictionary} = process_info(self(), dictionary), - spawn_link( - fun() -> - process_flag(trap_exit, true), - [put(Key, Val) || {Key,Val} <- Dictionary], - set_io_buffering({tc,Main}), - run_test_case1(Ref, Num, Mod, Func, Args, RunInit, - TimetrapData, Mode, Main) - end) - end. - -run_test_case1(Ref, Num, Mod, Func, Args, RunInit, - TimetrapData, Mode, Main) -> - group_leader(test_server_io:get_gl(Main == self()), self()), - - %% if io is being buffered, send start io session message - %% (no matter if case runs on parallel or main process) - case is_io_buffered() of - false -> ok; - true -> - test_server_io:start_transaction(), - Main ! {started,Ref,self(),Num,Mod,Func} - end, - TSDir = get(test_server_dir), - - print(major, "=case ~w:~w", [Mod, Func]), - MinorName = start_minor_log_file(Mod, Func, self() /= Main), - MinorBase = filename:basename(MinorName), - print(major, "=logfile ~ts", [filename:basename(MinorName)]), - - UpdatedArgs = - %% maybe create unique private directory for test case or config func - case get(test_server_create_priv_dir) of - auto_per_run -> - update_config(hd(Args), [{tc_logfile,MinorName}]); - PrivDirMode -> - %% create unique private directory for test case - RunDir = filename:dirname(MinorName), - Ext = - if Num == 0 -> - Int = erlang:unique_integer([positive,monotonic]), - lists:flatten(io_lib:format(".cfg.~w", [Int])); - true -> - lists:flatten(io_lib:format(".~w", [Num])) - end, - PrivDir = filename:join(RunDir, ?priv_dir) ++ Ext, - if PrivDirMode == auto_per_tc -> - ok = file:make_dir(PrivDir); - PrivDirMode == manual_per_tc -> - ok - end, - update_config(hd(Args), [{priv_dir,PrivDir++"/"}, - {tc_logfile,MinorName}]) - end, - GrName = get_name(Mode), - test_server_sup:framework_call(report, - [tc_start,{{Mod,{Func,GrName}}, - MinorName}]), - - {ok,Cwd} = file:get_cwd(), - Args2Print = if is_list(UpdatedArgs) -> - lists:keydelete(tc_group_result, 1, UpdatedArgs); - true -> - UpdatedArgs - end, - if RunInit == skip_init -> - print_props(get_props(Mode)); - true -> - ok - end, - print(minor, "Config value:\n\n ~tp\n", [Args2Print]), - print(minor, "Current directory is ~tp\n", [Cwd]), - - GrNameStr = case GrName of - undefined -> ""; - Name -> cast_to_list(Name) - end, - print(major, "=started ~s", [lists:flatten(timestamp_get(""))]), - {{Col0,Col1},Style} = get_font_style((RunInit==run_init), Mode), - TR = xhtml("<tr valign=\"top\">", ["<tr class=\"",odd_or_even(),"\">"]), - EncMinorBase = uri_encode(MinorBase), - print(html, TR ++ "<td>" ++ Col0 ++ "~ts" ++ Col1 ++ "</td>" - "<td>" ++ Col0 ++ "~w" ++ Col1 ++ "</td>" - "<td>" ++ Col0 ++ "~ts" ++ Col1 ++ "</td>" - "<td><a href=\"~ts\">~w</a></td>" - "<td><a href=\"~ts#top\"><</a> <a href=\"~ts#end\">></a></td>", - [num2str(Num),fw_name(Mod),GrNameStr,EncMinorBase,Func, - EncMinorBase,EncMinorBase]), - - do_unless_parallel(Main, fun erlang:yield/0), - - %% run the test case - {Result,DetectedFail,ProcsBefore,ProcsAfter} = - run_test_case_apply(Num, Mod, Func, [UpdatedArgs], GrName, - RunInit, TimetrapData), - {Time,RetVal,Loc,Opts,Comment} = - case Result of - Normal={_Time,_RetVal,_Loc,_Opts,_Comment} -> Normal; - {died,DReason,DLoc,DCmt} -> {died,DReason,DLoc,[],DCmt} - end, - - print(minor, "<a name=\"end\"></a>", [], internal_raw), - print(minor, "\n", [], internal_raw), - print_timestamp(minor, "Ended at "), - print(major, "=ended ~s", [lists:flatten(timestamp_get(""))]), - - do_unless_parallel(Main, fun() -> file:set_cwd(filename:dirname(TSDir)) end), - - %% call the appropriate progress function clause to print the results to log - Status = - case {Time,RetVal} of - {died,{timetrap_timeout,TimetrapTimeout}} -> - progress(failed, Num, Mod, Func, GrName, Loc, - timetrap_timeout, TimetrapTimeout, Comment, Style); - {died,Reason} -> - progress(failed, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style); - {_,{'EXIT',{Skip,Reason}}} when Skip==skip; Skip==skipped; - Skip==auto_skip -> - progress(skip, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style); - {_,{'EXIT',_Pid,{Skip,Reason}}} when Skip==skip; Skip==skipped -> - progress(skip, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style); - {_,{'EXIT',_Pid,Reason}} -> - progress(failed, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style); - {_,{'EXIT',Reason}} -> - progress(failed, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style); - {_,{Fail,Reason}} when Fail =:= fail; Fail =:= failed -> - progress(failed, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style); - {_,Reason={auto_skip,_Why}} -> - progress(skip, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style); - {_,{Skip,Reason}} when Skip==skip; Skip==skipped -> - progress(skip, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style); - {Time,RetVal} -> - case DetectedFail of - [] -> - progress(ok, Num, Mod, Func, GrName, Loc, RetVal, - Time, Comment, Style); - - Reason -> - progress(failed, Num, Mod, Func, GrName, Loc, Reason, - Time, Comment, Style) - end - end, - %% if the test case was executed sequentially, this updates the - %% status count on the main process (status of parallel test cases - %% is updated later by the handle_test_case_io_and_status/0 function) - case {RunInit,Status} of - {skip_init,_} -> % conf doesn't count - ok; - {_,ok} -> - put(test_server_ok, get(test_server_ok)+1); - {_,failed} -> - put(test_server_failed, get(test_server_failed)+1); - {_,skip} -> - {US,AS} = get(test_server_skipped), - put(test_server_skipped, {US+1,AS}); - {_,auto_skip} -> - {US,AS} = get(test_server_skipped), - put(test_server_skipped, {US,AS+1}) - end, - %% only if test case execution is sequential do we care about the - %% remaining processes and slave nodes count - case self() of - Main -> - case test_server_sup:framework_call(warn, [processes], true) of - true -> - if ProcsBefore < ProcsAfter -> - print(minor, - "WARNING: ~w more processes in system after test case", - [ProcsAfter-ProcsBefore]); - ProcsBefore > ProcsAfter -> - print(minor, - "WARNING: ~w less processes in system after test case", - [ProcsBefore-ProcsAfter]); - true -> ok - end; - false -> - ok - end, - case test_server_sup:framework_call(warn, [nodes], true) of - true -> - case catch controller_call(kill_slavenodes) of - {'EXIT',_} = Exit -> - print(minor, - "WARNING: There might be slavenodes left in the" - " system. I tried to kill them, but I failed: ~p\n", - [Exit]); - [] -> ok; - List -> - print(minor, "WARNING: ~w slave nodes in system after test"++ - "case. Tried to killed them.~n"++ - " Names:~p", - [length(List),List]) - end; - false -> - ok - end; - _ -> - ok - end, - %% if the test case was executed sequentially, this updates the execution - %% time count on the main process (adding execution time of parallel test - %% case groups is done in run_test_cases_loop/4) - if is_number(Time) -> - put(test_server_total_time, get(test_server_total_time)+Time); - true -> - ok - end, - test_server_sup:check_new_crash_dumps(), - - %% if io is being buffered, send finished message - %% (no matter if case runs on parallel or main process) - case is_io_buffered() of - false -> - ok; - true -> - test_server_io:end_transaction(), - Main ! {finished,Ref,self(),Num,Mod,Func, - ?mod_result(Status),{Time,RetVal,Opts}} - end, - {Time,RetVal,Opts}. - - -%%-------------------------------------------------------------------- -%% various help functions - -%% Call Action if we are running on the main process (not parallel). -do_unless_parallel(Main, Action) when is_function(Action, 0) -> - case self() of - Main -> Action(); - _ -> ok - end. - -num2str(0) -> ""; -num2str(N) -> integer_to_list(N). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% progress(Result, CaseNum, Mod, Func, Location, Reason, Time, -%% Comment, TimeFormat) -> Result -%% -%% Prints the result of the test case to log file. -%% Note: Strings that are to be written to the minor log must -%% be prefixed with "=== " here, or the indentation will be wrong. - -progress(skip, CaseNum, Mod, Func, GrName, Loc, Reason, Time, - Comment, {St0,St1}) -> - {Reason1,{Color,Ret,ReportTag}} = - if_auto_skip(Reason, - fun() -> {?auto_skip_color,auto_skip,auto_skipped} end, - fun() -> {?user_skip_color,skip,skipped} end), - print(major, "=result ~w: ~p", [ReportTag,Reason1]), - print(1, "*** SKIPPED ~ts ***", - [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]), - test_server_sup:framework_call(report, [tc_done,{Mod,{Func,GrName}, - {ReportTag,Reason1}}]), - ReasonStr = reason_to_string(Reason1), - ReasonStr1 = lists:flatten([string:strip(S,left) || - S <- string:tokens(ReasonStr,[$\n])]), - ReasonStr2 = - if length(ReasonStr1) > 80 -> - string:substr(ReasonStr1, 1, 77) ++ "..."; - true -> - ReasonStr1 - end, - Comment1 = case Comment of - "" -> ""; - _ -> xhtml("<br>(","<br />(") ++ to_string(Comment) ++ ")" - end, - print(html, - "<td>" ++ St0 ++ "~.3fs" ++ St1 ++ "</td>" - "<td><font color=\"~ts\">SKIPPED</font></td>" - "<td>~ts~ts</td></tr>\n", - [Time,Color,ReasonStr2,Comment1]), - FormatLoc = test_server_sup:format_loc(Loc), - print(minor, "=== Location: ~ts", [FormatLoc]), - print(minor, "=== Reason: ~ts", [ReasonStr1]), - Ret; - -progress(failed, CaseNum, Mod, Func, GrName, Loc, timetrap_timeout, T, - Comment0, {St0,St1}) -> - print(major, "=result failed: timeout, ~p", [Loc]), - print(1, "*** FAILED ~ts ***", - [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]), - test_server_sup:framework_call(report, - [tc_done,{Mod,{Func,GrName}, - {failed,timetrap_timeout}}]), - FormatLastLoc = test_server_sup:format_loc(get_last_loc(Loc)), - ErrorReason = io_lib:format("{timetrap_timeout,~ts}", [FormatLastLoc]), - Comment = - case Comment0 of - "" -> "<font color=\"red\">" ++ ErrorReason ++ "</font>"; - _ -> "<font color=\"red\">" ++ ErrorReason ++ - xhtml("</font><br>","</font><br />") ++ to_string(Comment0) - end, - print(html, - "<td>" ++ St0 ++ "~.3fs" ++ St1 ++ "</td>" - "<td><font color=\"red\">FAILED</font></td>" - "<td>~ts</td></tr>\n", - [T/1000,Comment]), - FormatLoc = test_server_sup:format_loc(Loc), - print(minor, "=== Location: ~ts", [FormatLoc]), - print(minor, "=== Reason: timetrap timeout", []), - failed; - -progress(failed, CaseNum, Mod, Func, GrName, Loc, {testcase_aborted,Reason}, _T, - Comment0, {St0,St1}) -> - print(major, "=result failed: testcase_aborted, ~p", [Loc]), - print(1, "*** FAILED ~ts ***", - [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]), - test_server_sup:framework_call(report, - [tc_done,{Mod,{Func,GrName}, - {failed,testcase_aborted}}]), - FormatLastLoc = test_server_sup:format_loc(get_last_loc(Loc)), - ErrorReason = io_lib:format("{testcase_aborted,~ts}", [FormatLastLoc]), - Comment = - case Comment0 of - "" -> "<font color=\"red\">" ++ ErrorReason ++ "</font>"; - _ -> "<font color=\"red\">" ++ ErrorReason ++ - xhtml("</font><br>","</font><br />") ++ to_string(Comment0) - end, - print(html, - "<td>" ++ St0 ++ "died" ++ St1 ++ "</td>" - "<td><font color=\"red\">FAILED</font></td>" - "<td>~ts</td></tr>\n", - [Comment]), - FormatLoc = test_server_sup:format_loc(Loc), - print(minor, "=== Location: ~ts", [FormatLoc]), - print(minor, "=== Reason: {testcase_aborted,~p}", [Reason]), - failed; - -progress(failed, CaseNum, Mod, Func, GrName, unknown, Reason, Time, - Comment0, {St0,St1}) -> - print(major, "=result failed: ~p, ~w", [Reason,unknown_location]), - print(1, "*** FAILED ~ts ***", - [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]), - test_server_sup:framework_call(report, [tc_done,{Mod,{Func,GrName}, - {failed,Reason}}]), - TimeStr = io_lib:format(if is_float(Time) -> "~.3fs"; - true -> "~w" - end, [Time]), - ErrorReason = lists:flatten(io_lib:format("~p", [Reason])), - ErrorReason1 = lists:flatten([string:strip(S,left) || - S <- string:tokens(ErrorReason,[$\n])]), - ErrorReason2 = - if length(ErrorReason1) > 63 -> - string:substr(ErrorReason1, 1, 60) ++ "..."; - true -> - ErrorReason1 - end, - Comment = - case Comment0 of - "" -> "<font color=\"red\">" ++ ErrorReason2 ++ "</font>"; - _ -> "<font color=\"red\">" ++ ErrorReason2 ++ - xhtml("</font><br>","</font><br />") ++ - to_string(Comment0) - end, - print(html, - "<td>" ++ St0 ++ "~ts" ++ St1 ++ "</td>" - "<td><font color=\"red\">FAILED</font></td>" - "<td>~ts</td></tr>\n", - [TimeStr,Comment]), - print(minor, "=== Location: ~w", [unknown]), - {FStr,FormattedReason} = format_exception(Reason), - print(minor, "=== Reason: " ++ FStr, [FormattedReason]), - failed; - -progress(failed, CaseNum, Mod, Func, GrName, Loc, Reason, Time, - Comment0, {St0,St1}) -> - {LocMaj,LocMin} = if Func == error_in_suite -> - case get_fw_mod(undefined) of - Mod -> {unknown_location,unknown}; - _ -> {Loc,Loc} - end; - true -> {Loc,Loc} - end, - print(major, "=result failed: ~p, ~p", [Reason,LocMaj]), - print(1, "*** FAILED ~ts ***", - [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]), - test_server_sup:framework_call(report, [tc_done,{Mod,{Func,GrName}, - {failed,Reason}}]), - TimeStr = io_lib:format(if is_float(Time) -> "~.3fs"; - true -> "~w" - end, [Time]), - Comment = - case Comment0 of - "" -> ""; - _ -> xhtml("<br>","<br />") ++ to_string(Comment0) - end, - FormatLastLoc = test_server_sup:format_loc(get_last_loc(LocMaj)), - print(html, - "<td>" ++ St0 ++ "~ts" ++ St1 ++ "</td>" - "<td><font color=\"red\">FAILED</font></td>" - "<td><font color=\"red\">~ts</font>~ts</td></tr>\n", - [TimeStr,FormatLastLoc,Comment]), - FormatLoc = test_server_sup:format_loc(LocMin), - print(minor, "=== Location: ~ts", [FormatLoc]), - {FStr,FormattedReason} = format_exception(Reason), - print(minor, "=== Reason: " ++ FStr, [FormattedReason]), - failed; - -progress(ok, _CaseNum, Mod, Func, GrName, _Loc, RetVal, Time, - Comment0, {St0,St1}) -> - print(minor, "successfully completed test case", []), - test_server_sup:framework_call(report, [tc_done,{Mod,{Func,GrName},ok}]), - Comment = - case RetVal of - {comment,RetComment} -> - String = to_string(RetComment), - HtmlCmt = test_server_sup:framework_call(format_comment, - [String], - String), - print(major, "=result ok: ~ts", [String]), - "<td>" ++ HtmlCmt ++ "</td>"; - _ -> - print(major, "=result ok", []), - case Comment0 of - "" -> "<td></td>"; - _ -> "<td>" ++ to_string(Comment0) ++ "</td>" - end - end, - print(major, "=elapsed ~p", [Time]), - print(html, - "<td>" ++ St0 ++ "~.3fs" ++ St1 ++ "</td>" - "<td><font color=\"green\">Ok</font></td>" - "~ts</tr>\n", - [Time,Comment]), - print(minor, "=== Returned value: ~p", [RetVal]), - ok. - -%%-------------------------------------------------------------------- -%% various help functions - -get_fw_mod(Mod) -> - case get(test_server_framework) of - undefined -> - case os:getenv("TEST_SERVER_FRAMEWORK") of - FW when FW =:= false; FW =:= "undefined" -> - Mod; - FW -> - list_to_atom(FW) - end; - '$none' -> Mod; - FW -> FW - end. - -fw_name(?MODULE) -> - test_server; -fw_name(Mod) -> - case get(test_server_framework_name) of - undefined -> - case get_fw_mod(undefined) of - undefined -> - Mod; - Mod -> - case os:getenv("TEST_SERVER_FRAMEWORK_NAME") of - FWName when FWName =:= false; FWName =:= "undefined" -> - Mod; - FWName -> - list_to_atom(FWName) - end; - _ -> - Mod - end; - '$none' -> - Mod; - FWName -> - case get_fw_mod(Mod) of - Mod -> FWName; - _ -> Mod - end - end. - -if_auto_skip(Reason={failed,{_,init_per_testcase,_}}, True, _False) -> - {Reason,True()}; -if_auto_skip({skip,Reason={failed,{_,init_per_testcase,_}}}, True, _False) -> - {Reason,True()}; -if_auto_skip({auto_skip,Reason}, True, _False) -> - {Reason,True()}; -if_auto_skip(Reason, _True, False) -> - {Reason,False()}. - -update_skip_counters({_T,Pat,_Opts}, {US,AS}) -> - {_,Result} = if_auto_skip(Pat, fun() -> {US,AS+1} end, fun() -> {US+1,AS} end), - Result; -update_skip_counters(Pat, {US,AS}) -> - {_,Result} = if_auto_skip(Pat, fun() -> {US,AS+1} end, fun() -> {US+1,AS} end), - Result. - -get_info_str(Mod,Func, 0, _Cases) -> - io_lib:format("~w", [{Mod,Func}]); -get_info_str(_Mod,_Func, CaseNum, unknown) -> - "test case " ++ integer_to_list(CaseNum); -get_info_str(_Mod,_Func, CaseNum, Cases) -> - "test case " ++ integer_to_list(CaseNum) ++ - " of " ++ integer_to_list(Cases). - -print_if_known(Known, {SK,AK}, {SU,AU}) -> - {S,A} = if Known == unknown -> {SU,AU}; - true -> {SK,AK} - end, - io_lib:format(S, A). - -to_string(Term) when is_list(Term) -> - case (catch io_lib:format("~ts", [Term])) of - {'EXIT',_} -> lists:flatten(io_lib:format("~p", [Term])); - String -> lists:flatten(String) - end; -to_string(Term) -> - lists:flatten(io_lib:format("~p", [Term])). - -get_last_loc(Loc) when is_tuple(Loc) -> - Loc; -get_last_loc([Loc|_]) when is_tuple(Loc) -> - [Loc]; -get_last_loc(Loc) -> - Loc. - -reason_to_string({failed,{_,FailFunc,bad_return}}) -> - atom_to_list(FailFunc) ++ " bad return value"; -reason_to_string({failed,{_,FailFunc,{timetrap_timeout,_}}}) -> - atom_to_list(FailFunc) ++ " timed out"; -reason_to_string(FWInitFail = {failed,{_CB,init_tc,_Reason}}) -> - to_string(FWInitFail); -reason_to_string({failed,{_,FailFunc,_}}) -> - atom_to_list(FailFunc) ++ " failed"; -reason_to_string(Other) -> - to_string(Other). - -%get_font_style(Prop) -> -% {Col,St0,St1} = get_font_style1(Prop), -% {{"<font color="++Col++">","</font>"}, -% {"<font color="++Col++">"++St0,St1++"</font>"}}. - -get_font_style(NormalCase, Mode) -> - Prop = if not NormalCase -> - default; - true -> - case check_prop(parallel, Mode) of - false -> - case check_prop(sequence, Mode) of - false -> - default; - _ -> - sequence - end; - _ -> - parallel - end - end, - {Col,St0,St1} = get_font_style1(Prop), - {{"<font color="++Col++">","</font>"}, - {"<font color="++Col++">"++St0,St1++"</font>"}}. - -get_font_style1(parallel) -> - {"\"darkslategray\"","<i>","</i>"}; -get_font_style1(sequence) -> -% {"\"darkolivegreen\"","",""}; - {"\"saddlebrown\"","",""}; -get_font_style1(default) -> - {"\"black\"","",""}. -%%get_font_style1(skipped) -> -%% {"\"lightgray\"","",""}. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% format_exception({Error,Stack}) -> {CtrlSeq,Term} -%% -%% The default behaviour is that error information gets formatted -%% (like in the erlang shell) before printed to the minor log file. -%% The framework application can switch this feature off by setting -%% *its* application environment variable 'format_exception' to false. -%% It is also possible to switch formatting off by starting the -%% test_server node with init argument 'test_server_format_exception' -%% set to false. - -format_exception(Reason={_Error,Stack}) when is_list(Stack) -> - case get_fw_mod(undefined) of - undefined -> - case application:get_env(test_server, format_exception) of - {ok,false} -> - {"~p",Reason}; - _ -> - do_format_exception(Reason) - end; - FW -> - case application:get_env(FW, format_exception) of - {ok,false} -> - {"~p",Reason}; - _ -> - do_format_exception(Reason) - end - end; -format_exception(Error) -> - format_exception({Error,[]}). - -do_format_exception(Reason={Error,Stack}) -> - StackFun = fun(_, _, _) -> false end, - PF = fun(Term, I) -> - io_lib:format("~." ++ integer_to_list(I) ++ "p", [Term]) - end, - case catch lib:format_exception(1, error, Error, Stack, StackFun, PF) of - {'EXIT',_} -> - {"~p",Reason}; - Formatted -> - Formatted1 = re:replace(Formatted, "exception error: ", "", [{return,list}]), - {"~ts",lists:flatten(Formatted1)} - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit, -%% TimetrapData) -> -%% {{Time,RetVal,Loc,Opts,Comment},DetectedFail,ProcessesBefore,ProcessesAfter} | -%% {{died,Reason,unknown,Comment},DetectedFail,ProcessesBefore,ProcessesAfter} -%% Name = atom() -%% Time = float() (seconds) -%% RetVal = term() -%% Loc = term() -%% Comment = string() -%% Reason = term() -%% DetectedFail = [{File,Line}] -%% ProcessesBefore = ProcessesAfter = integer() -%% - -run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit, - TimetrapData) -> - test_server:run_test_case_apply({CaseNum,Mod,Func,Args,Name,RunInit, - TimetrapData}). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% print(Detail, Format, Args) -> ok -%% Detail = integer() -%% Format = string() -%% Args = [term()] -%% -%% Just like io:format, except that depending on the Detail value, the output -%% is directed to console, major and/or minor log files. - -print(Detail, Format) -> - print(Detail, Format, []). - -print(Detail, Format, Args) -> - print(Detail, Format, Args, internal). - -print(Detail, Format, Args, Printer) -> - Msg = io_lib:format(Format, Args), - print_or_buffer(Detail, Msg, Printer). - -print_or_buffer(Detail, Msg, Printer) -> - test_server_gl:print(group_leader(), Detail, Msg, Printer). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% print_timestamp(Detail, Leader) -> ok -%% -%% Prints Leader followed by a time stamp (date and time). Depending on -%% the Detail value, the output is directed to console, major and/or minor -%% log files. - -print_timestamp(Detail, Leader) -> - print(Detail, timestamp_get(Leader), []). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% print_who(Host, User) -> ok -%% -%% Logs who runs the suite. - -print_who(Host, User) -> - UserStr = case User of - "" -> ""; - _ -> " by " ++ User - end, - print(html, "Run~ts on ~ts", [UserStr,Host]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% format(Format) -> IoLibReturn -%% format(Detail, Format) -> IoLibReturn -%% format(Format, Args) -> IoLibReturn -%% format(Detail, Format, Args) -> IoLibReturn -%% -%% Detail = integer() -%% Format = string() -%% Args = [term(),...] -%% IoLibReturn = term() -%% -%% Logs the Format string and Args, similar to io:format/1/2 etc. If -%% Detail is not specified, the default detail level (which is 50) is used. -%% Which log files the string will be logged in depends on the thresholds -%% set with set_levels/3. Typically with default detail level, only the -%% minor log file is used. - -format(Format) -> - format(minor, Format, []). - -format(major, Format) -> - format(major, Format, []); -format(minor, Format) -> - format(minor, Format, []); -format(Detail, Format) when is_integer(Detail) -> - format(Detail, Format, []); -format(Format, Args) -> - format(minor, Format, Args). - -format(Detail, Format, Args) -> - Str = - case catch io_lib:format(Format, Args) of - {'EXIT',_} -> - io_lib:format("illegal format; ~p with args ~p.\n", - [Format,Args]); - Valid -> Valid - end, - print_or_buffer(Detail, Str, self()). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% xhtml(BasicHtml, XHtml) -> BasicHtml | XHtml -%% -xhtml(HTML, XHTML) -> - case get(basic_html) of - true -> HTML; - _ -> XHTML - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% odd_or_even() -> "odd" | "even" -%% -odd_or_even() -> - case get(odd_or_even) of - even -> - put(odd_or_even, odd), - "even"; - _ -> - put(odd_or_even, even), - "odd" - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timestamp_filename_get(Leader) -> string() -%% Leader = string() -%% -%% Returns a string consisting of Leader concatenated with the current -%% date and time. The resulting string is suitable as a filename. -timestamp_filename_get(Leader) -> - timestamp_get_internal(Leader, - "~ts~w-~2.2.0w-~2.2.0w_~2.2.0w.~2.2.0w.~2.2.0w"). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timestamp_get(Leader) -> string() -%% Leader = string() -%% -%% Returns a string consisting of Leader concatenated with the current -%% date and time. The resulting string is suitable for display. -timestamp_get(Leader) -> - timestamp_get_internal(Leader, - "~ts~w-~2.2.0w-~2.2.0w ~2.2.0w:~2.2.0w:~2.2.0w"). - -timestamp_get_internal(Leader, Format) -> - {YY,MM,DD,H,M,S} = time_get(), - io_lib:format(Format, [Leader,YY,MM,DD,H,M,S]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% time_get() -> {YY,MM,DD,H,M,S} -%% YY = integer() -%% MM = integer() -%% DD = integer() -%% H = integer() -%% M = integer() -%% S = integer() -%% -%% Returns the current Year,Month,Day,Hours,Minutes,Seconds. -%% The function checks that the date doesn't wrap while calling -%% getting the time. -time_get() -> - {YY,MM,DD} = date(), - {H,M,S} = time(), - case date() of - {YY,MM,DD} -> - {YY,MM,DD,H,M,S}; - _NewDay -> - %% date changed between call to date() and time(), try again - time_get() - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% make_config(Config) -> NewConfig -%% Config = [{Key,Value},...] -%% NewConfig = [{Key,Value},...] -%% -%% Creates a configuration list (currently returns it's input) - -make_config(Initial) -> - Initial. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% update_config(Config, Update) -> NewConfig -%% Config = [{Key,Value},...] -%% Update = [{Key,Value},...] | {Key,Value} -%% NewConfig = [{Key,Value},...] -%% -%% Adds or replaces the key-value pairs in config with those in update. -%% Returns the updated list. - -update_config(Config, {Key,Val}) -> - case lists:keymember(Key, 1, Config) of - true -> - lists:keyreplace(Key, 1, Config, {Key,Val}); - false -> - [{Key,Val}|Config] - end; -update_config(Config, [Assoc|Assocs]) -> - NewConfig = update_config(Config, Assoc), - update_config(NewConfig, Assocs); -update_config(Config, []) -> - Config. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% collect_cases(CurMod, TopCase, SkipList) -> -%% BasicCaseList | {error,Reason} -%% -%% CurMod = atom() -%% TopCase = term() -%% SkipList = [term(),...] -%% BasicCaseList = [term(),...] -%% -%% Parses the given test goal(s) in TopCase, and transforms them to a -%% simple list of test cases to call, when executing the test suite. -%% -%% CurMod is the "current" module, that is, the module the last instruction -%% was read from. May be be set to 'none' initially. -%% -%% SkipList is the list of test cases to skip and requirements to deny. -%% -%% The BasicCaseList is built out of TopCase, which may be any of the -%% following terms: -%% -%% [] Nothing is added -%% List list() The list is decomposed, and each element is -%% treated according to this table -%% Case atom() CurMod:Case(suite) is called -%% {module,Case} CurMod:Case(suite) is called -%% {Module,Case} Module:Case(suite) is called -%% {module,Module,Case} Module:Case(suite) is called -%% {module,Module,Case,Args} Module:Case is called with Args as arguments -%% {dir,Dir} All modules *_SUITE in the named directory -%% are listed, and each Module:all(suite) is called -%% {dir,Dir,Pattern} All modules <Pattern>_SUITE in the named dir -%% are listed, and each Module:all(suite) is called -%% {conf,InitMF,Cases,FinMF} -%% {conf,Props,InitMF,Cases,FinMF} -%% InitMF is placed in the BasicCaseList, then -%% Cases is treated according to this table, then -%% FinMF is placed in the BasicCaseList. InitMF -%% and FinMF are configuration manipulation -%% functions. See below. -%% {make,InitMFA,Cases,FinMFA} -%% InitMFA is placed in the BasicCaseList, then -%% Cases is treated according to this table, then -%% FinMFA is placed in the BasicCaseList. InitMFA -%% and FinMFA are make/unmake functions. If InitMFA -%% fails, Cases are not run. -%% -%% When a function is called, above, it means that the function is invoked -%% and the return is expected to be: -%% -%% [] Leaf case -%% {req,ReqList} Kept for backwards compatibility - same as [] -%% {req,ReqList,Cases} Kept for backwards compatibility - -%% Cases parsed recursively with collect_cases/3 -%% Cases (list) Recursively parsed with collect_cases/3 -%% -%% Leaf cases are added to the BasicCaseList as Module:Case(Config). Each -%% case is checked against the SkipList. If present, a skip instruction -%% is inserted instead, which only prints the case name and the reason -%% why the case was skipped in the log files. -%% -%% Configuration manipulation functions are called with the current -%% configuration list as only argument, and are expected to return a new -%% configuration list. Such a pair of function may, for example, start a -%% server and stop it after a serie of test cases. -%% -%% SkipCases is expected to be in the format: -%% -%% Other Recursively parsed with collect_cases/3 -%% {Mod,Comment} Skip Mod, with Comment -%% {Mod,Funcs,Comment} Skip listed functions in Mod with Comment -%% {Mod,Func,Comment} Skip named function in Mod with Comment -%% --record(cc, {mod, % current module - skip}). % skip list - -collect_all_cases(Top, Skip) when is_list(Skip) -> - Result = - case collect_cases(Top, #cc{mod=[],skip=Skip}, []) of - {ok,Cases,_St} -> Cases; - Other -> Other - end, - Result. - - -collect_cases([], St, _) -> {ok,[],St}; -collect_cases([Case|Cs0], St0, Mode) -> - case collect_cases(Case, St0, Mode) of - {ok,FlatCases1,St1} -> - case collect_cases(Cs0, St1, Mode) of - {ok,FlatCases2,St} -> - {ok,FlatCases1 ++ FlatCases2,St}; - {error,_Reason} = Error -> Error - end; - {error,_Reason} = Error -> Error - end; - - -collect_cases({module,Case}, St, Mode) when is_atom(Case), is_atom(St#cc.mod) -> - collect_case({St#cc.mod,Case}, St, Mode); -collect_cases({module,Mod,Case}, St, Mode) -> - collect_case({Mod,Case}, St, Mode); -collect_cases({module,Mod,Case,Args}, St, Mode) -> - collect_case({Mod,Case,Args}, St, Mode); - -collect_cases({dir,SubDir}, St, Mode) -> - collect_files(SubDir, "*_SUITE", St, Mode); -collect_cases({dir,SubDir,Pattern}, St, Mode) -> - collect_files(SubDir, Pattern++"*", St, Mode); - -collect_cases({conf,InitF,CaseList,FinMF}, St, Mode) when is_atom(InitF) -> - collect_cases({conf,[],{St#cc.mod,InitF},CaseList,FinMF}, St, Mode); -collect_cases({conf,InitMF,CaseList,FinF}, St, Mode) when is_atom(FinF) -> - collect_cases({conf,[],InitMF,CaseList,{St#cc.mod,FinF}}, St, Mode); -collect_cases({conf,InitMF,CaseList,FinMF}, St0, Mode) -> - collect_cases({conf,[],InitMF,CaseList,FinMF}, St0, Mode); -collect_cases({conf,Props,InitF,CaseList,FinMF}, St, Mode) when is_atom(InitF) -> - case init_props(Props) of - {error,_} -> - {ok,[],St}; - Props1 -> - collect_cases({conf,Props1,{St#cc.mod,InitF},CaseList,FinMF}, - St, Mode) - end; -collect_cases({conf,Props,InitMF,CaseList,FinF}, St, Mode) when is_atom(FinF) -> - case init_props(Props) of - {error,_} -> - {ok,[],St}; - Props1 -> - collect_cases({conf,Props1,InitMF,CaseList,{St#cc.mod,FinF}}, - St, Mode) - end; -collect_cases({conf,Props,InitMF,CaseList,FinMF} = Conf, St, Mode) -> - case init_props(Props) of - {error,_} -> - {ok,[],St}; - Props1 -> - Ref = make_ref(), - Skips = St#cc.skip, - Props2 = [{suite,St#cc.mod} | lists:delete(suite,Props1)], - Mode1 = [{Ref,Props2,undefined} | Mode], - case in_skip_list({St#cc.mod,Conf}, Skips) of - {true,Comment} -> % conf init skipped - {ok,[{skip_case,{conf,Ref,InitMF,Comment},Mode1} | - [] ++ [{conf,Ref,[],FinMF}]],St}; - {true,Name,Comment} when is_atom(Name) -> % all cases skipped - case collect_cases(CaseList, St, Mode1) of - {ok,[],_St} = Empty -> - Empty; - {ok,FlatCases,St1} -> - Cases2Skip = FlatCases ++ [{conf,Ref, - keep_name(Props1), - FinMF}], - Skipped = skip_cases_upto(Ref, Cases2Skip, Comment, - conf, Mode1, skip_case), - {ok,[{skip_case,{conf,Ref,InitMF,Comment},Mode1} | - Skipped],St1}; - {error,_Reason} = Error -> - Error - end; - {true,ToSkip,_} when is_list(ToSkip) -> % some cases skipped - case collect_cases(CaseList, - St#cc{skip=ToSkip++Skips}, Mode1) of - {ok,[],_St} = Empty -> - Empty; - {ok,FlatCases,St1} -> - {ok,[{conf,Ref,Props1,InitMF} | - FlatCases ++ [{conf,Ref, - keep_name(Props1), - FinMF}]],St1#cc{skip=Skips}}; - {error,_Reason} = Error -> - Error - end; - false -> - case collect_cases(CaseList, St, Mode1) of - {ok,[],_St} = Empty -> - Empty; - {ok,FlatCases,St1} -> - {ok,[{conf,Ref,Props1,InitMF} | - FlatCases ++ [{conf,Ref, - keep_name(Props1), - FinMF}]],St1}; - {error,_Reason} = Error -> - Error - end - end - end; - -collect_cases({make,InitMFA,CaseList,FinMFA}, St0, Mode) -> - case collect_cases(CaseList, St0, Mode) of - {ok,[],_St} = Empty -> Empty; - {ok,FlatCases,St} -> - Ref = make_ref(), - {ok,[{make,Ref,InitMFA}|FlatCases ++ - [{make,Ref,FinMFA}]],St}; - {error,_Reason} = Error -> Error - end; - -collect_cases({Module, Cases}, St, Mode) when is_list(Cases) -> - case (catch collect_case(Cases, St#cc{mod=Module}, [], Mode)) of - Result = {ok,_,_} -> - Result; - Other -> - {error,Other} - end; - -collect_cases({_Mod,_Case}=Spec, St, Mode) -> - collect_case(Spec, St, Mode); - -collect_cases({_Mod,_Case,_Args}=Spec, St, Mode) -> - collect_case(Spec, St, Mode); -collect_cases(Case, St, Mode) when is_atom(Case), is_atom(St#cc.mod) -> - collect_case({St#cc.mod,Case}, St, Mode); -collect_cases(Other, St, _Mode) -> - {error,{bad_subtest_spec,St#cc.mod,Other}}. - -collect_case({Mod,{conf,_,_,_,_}=Conf}, St, Mode) -> - collect_case_invoke(Mod, Conf, [], St, Mode); - -collect_case(MFA, St, Mode) -> - case in_skip_list(MFA, St#cc.skip) of - {true,Comment} when Comment /= make_failed -> - {ok,[{skip_case,{MFA,Comment},Mode}],St}; - _ -> - case MFA of - {Mod,Case} -> collect_case_invoke(Mod, Case, MFA, St, Mode); - {_Mod,_Case,_Args} -> {ok,[MFA],St} - end - end. - -collect_case([], St, Acc, _Mode) -> - {ok, Acc, St}; - -collect_case([Case | Cases], St, Acc, Mode) -> - {ok, FlatCases, NewSt} = collect_case({St#cc.mod, Case}, St, Mode), - collect_case(Cases, NewSt, Acc ++ FlatCases, Mode). - -collect_case_invoke(Mod, Case, MFA, St, Mode) -> - case get_fw_mod(undefined) of - undefined -> - case catch apply(Mod, Case, [suite]) of - {'EXIT',_} -> - {ok,[MFA],St}; - Suite -> - collect_subcases(Mod, Case, MFA, St, Suite, Mode) - end; - _ -> - Suite = test_server_sup:framework_call(get_suite, - [Mod,Case], - []), - collect_subcases(Mod, Case, MFA, St, Suite, Mode) - end. - -collect_subcases(Mod, Case, MFA, St, Suite, Mode) -> - case Suite of - [] when Case == all -> {ok,[],St}; - [] when element(1, Case) == conf -> {ok,[],St}; - [] -> {ok,[MFA],St}; -%%%! --- START Kept for backwards compatibility --- -%%%! Requirements are not used - {req,ReqList} -> - collect_case_deny(Mod, Case, MFA, ReqList, [], St, Mode); - {req,ReqList,SubCases} -> - collect_case_deny(Mod, Case, MFA, ReqList, SubCases, St, Mode); -%%%! --- END Kept for backwards compatibility --- - {Skip,Reason} when Skip==skip; Skip==skipped -> - {ok,[{skip_case,{MFA,Reason},Mode}],St}; - {error,Reason} -> - throw(Reason); - SubCases -> - collect_case_subcases(Mod, Case, SubCases, St, Mode) - end. - -collect_case_subcases(Mod, Case, SubCases, St0, Mode) -> - OldMod = St0#cc.mod, - case collect_cases(SubCases, St0#cc{mod=Mod}, Mode) of - {ok,FlatCases,St} -> - {ok,FlatCases,St#cc{mod=OldMod}}; - {error,Reason} -> - {error,{{Mod,Case},Reason}} - end. - -collect_files(Dir, Pattern, St, Mode) -> - {ok,Cwd} = file:get_cwd(), - Dir1 = filename:join(Cwd, Dir), - Wc = filename:join([Dir1,Pattern++"{.erl,"++code:objfile_extension()++"}"]), - case catch filelib:wildcard(Wc) of - {'EXIT', Reason} -> - io:format("Could not collect files: ~p~n", [Reason]), - {error,{collect_fail,Dir,Pattern}}; - Files -> - %% convert to module names and remove duplicates - Mods = lists:foldl(fun(File, Acc) -> - Mod = fullname_to_mod(File), - case lists:member(Mod, Acc) of - true -> Acc; - false -> [Mod | Acc] - end - end, [], Files), - Tests = [{Mod,all} || Mod <- lists:sort(Mods)], - collect_cases(Tests, St, Mode) - end. - -fullname_to_mod(Path) when is_list(Path) -> - %% If this is called with a binary, then we are probably in +fnu - %% mode and have found a beam file with name encoded as latin1. We - %% will let this crash since it can not work to load such a module - %% anyway. It should be removed or renamed! - list_to_atom(filename:rootname(filename:basename(Path))). - -collect_case_deny(Mod, Case, MFA, ReqList, SubCases, St, Mode) -> - case {check_deny(ReqList, St#cc.skip),SubCases} of - {{denied,Comment},_SubCases} -> - {ok,[{skip_case,{MFA,Comment},Mode}],St}; - {granted,[]} -> - {ok,[MFA],St}; - {granted,SubCases} -> - collect_case_subcases(Mod, Case, SubCases, St, Mode) - end. - -check_deny([Req|Reqs], DenyList) -> - case check_deny_req(Req, DenyList) of - {denied,_Comment}=Denied -> Denied; - granted -> check_deny(Reqs, DenyList) - end; -check_deny([], _DenyList) -> granted; -check_deny(Req, DenyList) -> check_deny([Req], DenyList). - -check_deny_req({Req,Val}, DenyList) -> - %%io:format("ValCheck ~p=~p in ~p\n", [Req,Val,DenyList]), - case lists:keysearch(Req, 1, DenyList) of - {value,{_Req,DenyVal}} when Val >= DenyVal -> - {denied,io_lib:format("Requirement ~p=~p", [Req,Val])}; - _ -> - check_deny_req(Req, DenyList) - end; -check_deny_req(Req, DenyList) -> - case lists:member(Req, DenyList) of - true -> {denied,io_lib:format("Requirement ~p", [Req])}; - false -> granted - end. - -in_skip_list({Mod,{conf,Props,InitMF,_CaseList,_FinMF}}, SkipList) -> - case in_skip_list(InitMF, SkipList) of - {true,_} = Yes -> - Yes; - _ -> - case proplists:get_value(name, Props) of - undefined -> - false; - Name -> - ToSkip = - lists:flatmap( - fun({M,{conf,SProps,_,SCaseList,_},Cmt}) when - M == Mod -> - case proplists:get_value(name, SProps) of - all -> - [{M,all,Cmt}]; - Name -> - case SCaseList of - all -> - [{M,all,Cmt}]; - _ -> - [{M,F,Cmt} || F <- SCaseList] - end; - _ -> - [] - end; - (_) -> - [] - end, SkipList), - case ToSkip of - [] -> - false; - _ -> - case lists:keysearch(all, 2, ToSkip) of - {value,{_,_,Cmt}} -> {true,Name,Cmt}; - _ -> {true,ToSkip,""} - end - end - end - end; - -in_skip_list({Mod,Func,_Args}, SkipList) -> - in_skip_list({Mod,Func}, SkipList); -in_skip_list({Mod,Func}, [{Mod,Funcs,Comment}|SkipList]) when is_list(Funcs) -> - case lists:member(Func, Funcs) of - true -> - {true,Comment}; - _ -> - in_skip_list({Mod,Func}, SkipList) - end; -in_skip_list({Mod,Func}, [{Mod,Func,Comment}|_SkipList]) -> - {true,Comment}; -in_skip_list({Mod,_Func}, [{Mod,Comment}|_SkipList]) -> - {true,Comment}; -in_skip_list({Mod,Func}, [_|SkipList]) -> - in_skip_list({Mod,Func}, SkipList); -in_skip_list(_, []) -> - false. - -%% remove unnecessary properties -init_props(Props) -> - case get_repeat(Props) of - Repeat = {_RepType,N} when N < 2 -> - if N == 0 -> - {error,{invalid_property,Repeat}}; - true -> - lists:delete(Repeat, Props) - end; - _ -> - Props - end. - -keep_name(Props) -> - lists:filter(fun({name,_}) -> true; - ({suite,_}) -> true; - (_) -> false end, Props). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% Node handling functions %% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% get_target_info() -> #target_info -%% -%% Returns a record containing system information for target - -get_target_info() -> - controller_call(get_target_info). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% start_node(SlaveName, Type, Options) -> -%% {ok, Slave} | {error, Reason} -%% -%% Called by test_server. See test_server:start_node/3 for details - -start_node(Name, Type, Options) -> - T = 10 * ?ACCEPT_TIMEOUT * test_server:timetrap_scale_factor(), - format(minor, "Attempt to start ~w node ~p with options ~p", - [Type, Name, Options]), - case controller_call({start_node,Name,Type,Options}, T) of - {{ok,Nodename}, Host, Cmd, Info, Warning} -> - format(minor, - "Successfully started node ~w on ~tp with command: ~ts", - [Nodename, Host, Cmd]), - format(major, "=node_start ~w", [Nodename]), - case Info of - [] -> ok; - _ -> format(minor, Info) - end, - case Warning of - [] -> ok; - _ -> - format(1, Warning), - format(minor, Warning) - end, - {ok, Nodename}; - {fail,{Ret, Host, Cmd}} -> - format(minor, - "Failed to start node ~tp on ~tp with command: ~ts~n" - "Reason: ~p", - [Name, Host, Cmd, Ret]), - {fail,Ret}; - {Ret, undefined, undefined} -> - format(minor, "Failed to start node ~tp: ~p", [Name,Ret]), - Ret; - {Ret, Host, Cmd} -> - format(minor, - "Failed to start node ~tp on ~tp with command: ~ts~n" - "Reason: ~p", - [Name, Host, Cmd, Ret]), - Ret - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% wait_for_node(Node) -> ok | {error,timeout} -%% -%% Wait for a slave/peer node which has been started with -%% the option {wait,false}. This function returns when -%% when the new node has contacted test_server_ctrl again - -wait_for_node(Slave) -> - T = 10000 * test_server:timetrap_scale_factor(), - case catch controller_call({wait_for_node,Slave},T) of - {'EXIT',{timeout,_}} -> {error,timeout}; - ok -> ok - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% is_release_available(Release) -> true | false -%% Release -> string() -%% -%% Test if a release (such as "r10b") is available to be -%% started using start_node/3. - -is_release_available(Release) -> - controller_call({is_release_available,Release}). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% stop_node(Name) -> ok | {error,Reason} -%% -%% Clean up - test_server will stop this node - -stop_node(Slave) -> - controller_call({stop_node,Slave}). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% DEBUGGER INTERFACE %% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -i() -> - hformat("Pid", "Initial Call", "Current Function", "Reducts", "Msgs"), - Line=lists:duplicate(27, "-"), - hformat(Line, Line, Line, Line, Line), - display_info(processes(), 0, 0). - -p(A,B,C) -> - pinfo(ts_pid(A,B,C)). -p(X) when is_atom(X) -> - pinfo(whereis(X)); -p({A,B,C}) -> - pinfo(ts_pid(A,B,C)); -p(X) -> - pinfo(X). - -t() -> - t(wall_clock). -t(X) -> - element(1, statistics(X)). - -pi(Item,X) -> - lists:keysearch(Item,1,p(X)). -pi(Item,A,B,C) -> - lists:keysearch(Item,1,p(A,B,C)). - -%% c:pid/3 -ts_pid(X,Y,Z) when is_integer(X), is_integer(Y), is_integer(Z) -> - list_to_pid("<" ++ integer_to_list(X) ++ "." ++ - integer_to_list(Y) ++ "." ++ - integer_to_list(Z) ++ ">"). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% display_info(Pids, Reductions, Messages) -> void -%% Pids = [pid(),...] -%% Reductions = integer() -%% Messaged = integer() -%% -%% Displays info, similar to c:i() about the processes in the list Pids. -%% Also counts the total number of reductions and msgs for the listed -%% processes, if called with Reductions = Messages = 0. - -display_info([Pid|T], R, M) -> - case pinfo(Pid) of - undefined -> - display_info(T, R, M); - Info -> - Call = fetch(initial_call, Info), - Curr = case fetch(current_function, Info) of - {Mod,F,Args} when is_list(Args) -> - {Mod,F,length(Args)}; - Other -> - Other - end, - Reds = fetch(reductions, Info), - LM = length(fetch(messages, Info)), - pformat(io_lib:format("~w", [Pid]), - io_lib:format("~w", [Call]), - io_lib:format("~w", [Curr]), Reds, LM), - display_info(T, R+Reds, M + LM) - end; -display_info([], R, M) -> - Line=lists:duplicate(27, "-"), - hformat(Line, Line, Line, Line, Line), - pformat("Total", "", "", R, M). - -hformat(A1, A2, A3, A4, A5) -> - io:format("~-10s ~-27s ~-27s ~8s ~4s~n", [A1,A2,A3,A4,A5]). - -pformat(A1, A2, A3, A4, A5) -> - io:format("~-10s ~-27s ~-27s ~8w ~4w~n", [A1,A2,A3,A4,A5]). - -fetch(Key, Info) -> - case lists:keysearch(Key, 1, Info) of - {value, {_, Val}} -> - Val; - _ -> - 0 - end. - -pinfo(P) -> - Node = node(), - case node(P) of - Node -> - process_info(P); - _ -> - rpc:call(node(P),erlang,process_info,[P]) - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% Support functions for COVER %% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% -%% A module is included in the cover analysis if -%% - it belongs to the tested application and is not listed in the -%% {exclude,List} part of the App.cover file -%% - it does not belong to the application, but is listed in the -%% {include,List} part of the App.cover file -%% - it does not belong to the application, but is listed in the -%% {cross,[{Tag,List}]} part of the App.cover file -%% -%% The modules listed in the 'cross' part of the cover file are -%% modules that are heavily used by other tests than the one where -%% they are explicitly tested. They should then be listed as 'cross' -%% in the cover file for the test where they are used but do not -%% belong. -%% -%% After all tests are completed, the these modules can be analysed -%% with coverage data from all tests where they are compiled - see -%% cross_cover_analyse/2. The result is stored in a file called -%% cross_cover.html in the run.<timestamp> directory of the -%% test the modules belong to. -%% -%% Example: -%% If the module m1 belongs to system s1 but is heavily used also in -%% the tests for another system s2, then the cover files for the two -%% systems could be like this: -%% -%% s1.cover: -%% {include,[m1]}. -%% -%% s2.cover: -%% {include,[....]}. % modules belonging to system s2 -%% {cross,[{s1,[m1]}]}. -%% -%% When the tests for both s1 and s2 are completed, run -%% cross_cover_analyse(Level,[{s1,S1LogDir},{s2,S2LogDir}]), and -%% the accumulated cover data for m1 will be written to -%% S1LogDir/[run.<timestamp>/]cross_cover.html -%% -%% S1LogDir and S2LogDir are either the run.<timestamp> directories -%% for the two tests, or the parent directory of these, in which case -%% the latest run.<timestamp> directory will be chosen. -%% -%% Note that the m1 module will also be presented in the normal -%% coverage log for s1 (due to the include statement in s1.cover), but -%% that only includes the coverage achieved by the s1 test itself. -%% -%% The Tag in the 'cross' statement in the cover file has no other -%% purpose than mapping the list of modules ([m1] in the example -%% above) to the correct log directory where it should be included in -%% the cross_cover.html file (S1LogDir in the example above). -%% I.e. the value of the Tag has no meaning, it could be foo as well -%% as s1 above, as long as the same Tag is used in the cover file and -%% in the call to cross_cover_analyse/2. - - -%% Cover compilation -%% The compilation is executed on the target node -start_cover(#cover{}=CoverInfo) -> - cover_compile(CoverInfo); -start_cover({log,CoverLogDir}=CoverInfo) -> - %% Cover is controlled by the framework - here's the log - put(test_server_cover_log_dir,CoverLogDir), - {ok,CoverInfo}. - -cover_compile(CoverInfo) -> - test_server:cover_compile(CoverInfo). - -%% Read the coverfile for an application and return a list of modules -%% that are members of the application but shall not be compiled -%% (Exclude), and a list of modules that are not members of the -%% application but shall be compiled (Include). -read_cover_file(none) -> - {[],[],[]}; -read_cover_file(CoverFile) -> - case file:consult(CoverFile) of - {ok,List} -> - case check_cover_file(List, [], [], []) of - {ok,Exclude,Include,Cross} -> {Exclude,Include,Cross}; - error -> - io:fwrite("Faulty format of CoverFile ~p\n", [CoverFile]), - {[],[],[]} - end; - {error,Reason} -> - io:fwrite("Can't read CoverFile ~ts\nReason: ~p\n", - [CoverFile,Reason]), - {[],[],[]} - end. - -check_cover_file([{exclude,all}|Rest], _, Include, Cross) -> - check_cover_file(Rest, all, Include, Cross); -check_cover_file([{exclude,Exclude}|Rest], _, Include, Cross) -> - case lists:all(fun(M) -> is_atom(M) end, Exclude) of - true -> - check_cover_file(Rest, Exclude, Include, Cross); - false -> - error - end; -check_cover_file([{include,Include}|Rest], Exclude, _, Cross) -> - case lists:all(fun(M) -> is_atom(M) end, Include) of - true -> - check_cover_file(Rest, Exclude, Include, Cross); - false -> - error - end; -check_cover_file([{cross,Cross}|Rest], Exclude, Include, _) -> - case check_cross(Cross) of - true -> - check_cover_file(Rest, Exclude, Include, Cross); - false -> - error - end; -check_cover_file([], Exclude, Include, Cross) -> - {ok,Exclude,Include,Cross}. - -check_cross([{Tag,Modules}|Rest]) -> - case lists:all(fun(M) -> is_atom(M) end, [Tag|Modules]) of - true -> - check_cross(Rest); - false -> - false - end; -check_cross([]) -> - true. - - -%% Cover analysis, per application -%% This analysis is executed on the target node once the test is -%% completed for an application. This is not the same as the cross -%% cover analysis, which can be executed on any node after the tests -%% are finshed. -%% -%% This per application analysis writes the file cover.html in the -%% application's run.<timestamp> directory. -stop_cover(#cover{}=CoverInfo, TestDir) -> - cover_analyse(CoverInfo, TestDir); -stop_cover(_CoverInfo, _TestDir) -> - %% Cover is probably controlled by the framework - ok. - -make_relative(AbsDir, VsDir) -> - DirTokens = filename:split(AbsDir), - VsTokens = filename:split(VsDir), - filename:join(make_relative1(DirTokens, VsTokens)). - -make_relative1([T | DirTs], [T | VsTs]) -> - make_relative1(DirTs, VsTs); -make_relative1(Last = [_File], []) -> - Last; -make_relative1(Last = [_File], VsTs) -> - Ups = ["../" || _ <- VsTs], - Ups ++ Last; -make_relative1(DirTs, []) -> - DirTs; -make_relative1(DirTs, VsTs) -> - Ups = ["../" || _ <- VsTs], - Ups ++ DirTs. - - -cover_analyse(CoverInfo, TestDir) -> - write_default_cross_coverlog(TestDir), - - {ok,CoverLog} = open_html_file(filename:join(TestDir, ?coverlog_name)), - write_coverlog_header(CoverLog), - #cover{app=App, - file=CoverFile, - excl=Excluded, - cross=Cross} = CoverInfo, - io:fwrite(CoverLog, "<h1>Coverage for application '~w'</h1>\n", [App]), - io:fwrite(CoverLog, - "<p><a href=\"~ts\">Coverdata collected over all tests</a></p>", - [?cross_coverlog_name]), - - io:fwrite(CoverLog, "<p>CoverFile: <code>~tp</code>\n", [CoverFile]), - write_cross_cover_info(TestDir,Cross), - - case length(cover:imported_modules()) of - Imps when Imps > 0 -> - io:fwrite(CoverLog, - "<p>Analysis includes data from ~w imported module(s).\n", - [Imps]); - _ -> - ok - end, - - io:fwrite(CoverLog, "<p>Excluded module(s): <code>~tp</code>\n", [Excluded]), - - Coverage = test_server:cover_analyse(TestDir, CoverInfo), - write_binary_file(filename:join(TestDir,?raw_coverlog_name), - term_to_binary(Coverage)), - - case lists:filter(fun({_M,{_,_,_}}) -> false; - (_) -> true - end, Coverage) of - [] -> - ok; - Bad -> - io:fwrite(CoverLog, "<p>Analysis failed for ~w module(s): " - "<code>~w</code>\n", - [length(Bad),[BadM || {BadM,{_,_Why}} <- Bad]]) - end, - - TotPercent = write_cover_result_table(CoverLog, Coverage), - write_binary_file(filename:join(TestDir, ?cover_total), - term_to_binary(TotPercent)). - -%% Cover analysis - accumulated over multiple tests -%% This can be executed on any node after all tests are finished. -%% Analyse = overview | details -%% TagDirs = [{Tag,Dir}] -%% Tag = atom(), identifier -%% Dir = string(), the log directory for Tag, it can be a -%% run.<timestamp> directory or the parent directory of -%% such (in which case the latest run.<timestamp> directory -%% is used) -cross_cover_analyse(Analyse, TagDirs0) -> - TagDirs = get_latest_run_dirs(TagDirs0), - TagMods = get_all_cross_info(TagDirs,[]), - TagDirMods = add_cross_modules(TagMods,TagDirs), - CoverdataFiles = get_coverdata_files(TagDirMods), - lists:foreach(fun(CDF) -> cover:import(CDF) end, CoverdataFiles), - io:fwrite("Cover analysing...\n", []), - DetailsFun = - case Analyse of - details -> - fun(Dir,M) -> - OutFile = filename:join(Dir, - atom_to_list(M) ++ - ".CROSS_COVER.html"), - case cover:analyse_to_file(M, OutFile, [html]) of - {ok,_} -> - {file,OutFile}; - Error -> - Error - end - end; - _ -> - fun(_,_) -> undefined end - end, - Coverage = analyse_tests(TagDirMods, DetailsFun, []), - cover:stop(), - write_cross_cover_logs(Coverage,TagDirMods). - -write_cross_cover_info(_Dir,[]) -> - ok; -write_cross_cover_info(Dir,Cross) -> - write_binary_file(filename:join(Dir,?cross_cover_info), - term_to_binary(Cross)). - -%% For each test from which there are cross cover analysed -%% modules, write a cross cover log (cross_cover.html). -write_cross_cover_logs([{Tag,Coverage}|T],TagDirMods) -> - case lists:keyfind(Tag,1,TagDirMods) of - {_,Dir,Mods} when Mods=/=[] -> - write_binary_file(filename:join(Dir,?raw_cross_coverlog_name), - term_to_binary(Coverage)), - CoverLogName = filename:join(Dir,?cross_coverlog_name), - {ok,CoverLog} = open_html_file(CoverLogName), - write_coverlog_header(CoverLog), - io:fwrite(CoverLog, - "<h1>Coverage results for \'~w\' from all tests</h1>\n", - [Tag]), - write_cover_result_table(CoverLog, Coverage), - io:fwrite("Written file ~tp\n", [CoverLogName]); - _ -> - ok - end, - write_cross_cover_logs(T,TagDirMods); -write_cross_cover_logs([],_) -> - io:fwrite("done\n", []). - -%% Get the latest run.<timestamp> directories -get_latest_run_dirs([{Tag,Dir}|Rest]) -> - [{Tag,get_latest_run_dir(Dir)} | get_latest_run_dirs(Rest)]; -get_latest_run_dirs([]) -> - []. - -get_latest_run_dir(Dir) -> - case filelib:wildcard(filename:join(Dir,"run.[1-2]*")) of - [] -> - Dir; - [H|T] -> - get_latest_dir(T,H) - end. - -get_latest_dir([H|T],Latest) when H>Latest -> - get_latest_dir(T,H); -get_latest_dir([_|T],Latest) -> - get_latest_dir(T,Latest); -get_latest_dir([],Latest) -> - Latest. - -get_all_cross_info([{_Tag,Dir}|Rest],Acc) -> - case file:read_file(filename:join(Dir,?cross_cover_info)) of - {ok,Bin} -> - TagMods = binary_to_term(Bin), - get_all_cross_info(Rest,TagMods++Acc); - _ -> - get_all_cross_info(Rest,Acc) - end; -get_all_cross_info([],Acc) -> - Acc. - -%% Associate the cross cover modules with their log directories -add_cross_modules(TagMods,TagDirs)-> - do_add_cross_modules(TagMods,[{Tag,Dir,[]} || {Tag,Dir} <- TagDirs]). -do_add_cross_modules([{Tag,Mods1}|TagMods],TagDirMods)-> - NewTagDirMods = - case lists:keytake(Tag,1,TagDirMods) of - {value,{Tag,Dir,Mods},Rest} -> - [{Tag,Dir,lists:umerge(lists:sort(Mods1),Mods)}|Rest]; - false -> - TagDirMods - end, - do_add_cross_modules(TagMods,NewTagDirMods); -do_add_cross_modules([],TagDirMods) -> - %% Just to get the modules in the same order as in the normal cover log - [{Tag,Dir,lists:reverse(Mods)} || {Tag,Dir,Mods} <- TagDirMods]. - -%% Find all exported coverdata files. -get_coverdata_files(TagDirMods) -> - lists:flatmap( - fun({_,LatestDir,_}) -> - filelib:wildcard(filename:join(LatestDir,"all.coverdata")) - end, - TagDirMods). - - -%% For each test, analyse all modules -%% Used for cross cover analysis. -analyse_tests([{Tag,LastTest,Modules}|T], DetailsFun, Acc) -> - Cov = analyse_modules(LastTest, Modules, DetailsFun, []), - analyse_tests(T, DetailsFun, [{Tag,Cov}|Acc]); -analyse_tests([], _DetailsFun, Acc) -> - Acc. - -%% Analyse each module -%% Used for cross cover analysis. -analyse_modules(Dir, [M|Modules], DetailsFun, Acc) -> - {ok,{M,{Cov,NotCov}}} = cover:analyse(M, module), - Acc1 = [{M,{Cov,NotCov,DetailsFun(Dir,M)}}|Acc], - analyse_modules(Dir, Modules, DetailsFun, Acc1); -analyse_modules(_Dir, [], _DetailsFun, Acc) -> - Acc. - - -%% Support functions for writing the cover logs (both cross and normal) -write_coverlog_header(CoverLog) -> - case catch io:put_chars(CoverLog,html_header("Coverage results")) of - {'EXIT',Reason} -> - io:format("\n\nERROR: Could not write normal heading in coverlog.\n" - "CoverLog: ~w\n" - "Reason: ~p\n", - [CoverLog,Reason]), - io:format(CoverLog,"<html><body>\n", []); - _ -> - ok - end. - - -format_analyse(M,Cov,NotCov,undefined) -> - io_lib:fwrite("<tr><td>~w</td>" - "<td align=right>~w %</td>" - "<td align=right>~w</td>" - "<td align=right>~w</td></tr>\n", - [M,pc(Cov,NotCov),Cov,NotCov]); -format_analyse(M,Cov,NotCov,{file,File}) -> - io_lib:fwrite("<tr><td><a href=\"~ts\">~w</a></td>" - "<td align=right>~w %</td>" - "<td align=right>~w</td>" - "<td align=right>~w</td></tr>\n", - [uri_encode(filename:basename(File)), - M,pc(Cov,NotCov),Cov,NotCov]); -format_analyse(M,Cov,NotCov,{lines,Lines}) -> - CoverOutName = atom_to_list(M)++".COVER.html", - {ok,CoverOut} = open_html_file(CoverOutName), - write_not_covered(CoverOut,M,Lines), - ok = file:close(CoverOut), - io_lib:fwrite("<tr><td><a href=\"~ts\">~w</a></td>" - "<td align=right>~w %</td>" - "<td align=right>~w</td>" - "<td align=right>~w</td></tr>\n", - [uri_encode(CoverOutName),M,pc(Cov,NotCov),Cov,NotCov]); -format_analyse(M,Cov,NotCov,{error,_}) -> - io_lib:fwrite("<tr><td>~w</td>" - "<td align=right>~w %</td>" - "<td align=right>~w</td>" - "<td align=right>~w</td></tr>\n", - [M,pc(Cov,NotCov),Cov,NotCov]). - - -pc(0,0) -> - 0; -pc(Cov,NotCov) -> - round(Cov/(Cov+NotCov)*100). - - -write_not_covered(CoverOut,M,Lines) -> - io:put_chars(CoverOut,html_header("Coverage results for "++atom_to_list(M))), - io:fwrite(CoverOut, - "The following lines in module ~w are not covered:\n" - "<table border=3 cellpadding=5>\n" - "<th>Line Number</th>\n", - [M]), - lists:foreach(fun({{_M,Line},{0,1}}) -> - io:fwrite(CoverOut,"<tr><td>~w</td></tr>\n", [Line]); - (_) -> - ok - end, - Lines), - io:put_chars(CoverOut,"</table>\n</body>\n</html>\n"). - - -write_default_coverlog(TestDir) -> - {ok,CoverLog} = open_html_file(filename:join(TestDir,?coverlog_name)), - write_coverlog_header(CoverLog), - io:put_chars(CoverLog,"Cover tool is not used\n</body></html>\n"), - ok = file:close(CoverLog). - -write_default_cross_coverlog(TestDir) -> - {ok,CrossCoverLog} = - open_html_file(filename:join(TestDir,?cross_coverlog_name)), - write_coverlog_header(CrossCoverLog), - io:put_chars(CrossCoverLog, - ["No cross cover modules exist for this application,", - xhtml("<br>","<br />"), - "or cross cover analysis is not completed.\n" - "</body></html>\n"]), - ok = file:close(CrossCoverLog). - -write_cover_result_table(CoverLog,Coverage) -> - io:fwrite(CoverLog, - "<p><table border=3 cellpadding=5>\n" - "<tr><th>Module</th><th>Covered (%)</th><th>Covered (Lines)</th>" - "<th>Not covered (Lines)</th>\n", - []), - {TotCov,TotNotCov} = - lists:foldl(fun({M,{Cov,NotCov,Details}},{AccCov,AccNotCov}) -> - Str = format_analyse(M,Cov,NotCov,Details), - io:fwrite(CoverLog,"~ts", [Str]), - {AccCov+Cov,AccNotCov+NotCov}; - ({_M,{error,_Reason}},{AccCov,AccNotCov}) -> - {AccCov,AccNotCov} - end, - {0,0}, - Coverage), - TotPercent = pc(TotCov,TotNotCov), - io:fwrite(CoverLog, - "<tr><th align=left>Total</th><th align=right>~w %</th>" - "<th align=right>~w</th><th align=right>~w</th></tr>\n" - "</table>\n" - "</body>\n" - "</html>\n", - [TotPercent,TotCov,TotNotCov]), - ok = file:close(CoverLog), - TotPercent. - - -%%%----------------------------------------------------------------- -%%% Support functions for writing files - -%% HTML files are always written with utf8 encoding -html_header(Title) -> - ["<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n" - "<!-- autogenerated by '", atom_to_list(?MODULE), "'. -->\n" - "<html>\n" - "<head>\n" - "<title>", Title, "</title>\n" - "<meta http-equiv=\"cache-control\" content=\"no-cache\">\n" - "<meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\n" - "</head>\n" - "<body bgcolor=\"white\" text=\"black\" " - "link=\"blue\" vlink=\"purple\" alink=\"red\">\n"]. - -open_html_file(File) -> - open_utf8_file(File). - -open_html_file(File,Opts) -> - open_utf8_file(File,Opts). - -write_html_file(File,Content) -> - write_file(File,Content,utf8). - -%% The 'major' log file, which is a pure text file is also written -%% with utf8 encoding -open_utf8_file(File) -> - case file:open(File,AllOpts=[write,{encoding,utf8}]) of - {error,Reason} -> {error,{Reason,{File,AllOpts}}}; - Result -> Result - end. - -open_utf8_file(File,Opts) -> - case file:open(File,AllOpts=[{encoding,utf8}|Opts]) of - {error,Reason} -> {error,{Reason,{File,AllOpts}}}; - Result -> Result - end. - -%% Write a file with specified encoding -write_file(File,Content,latin1) -> - file:write_file(File,Content); -write_file(File,Content,utf8) -> - write_binary_file(File,unicode:characters_to_binary(Content)). - -%% Write a file with only binary data -write_binary_file(File,Content) -> - file:write_file(File,Content). - -%% Encoding of hyperlinks in HTML files -uri_encode(File) -> - Encoding = file:native_name_encoding(), - uri_encode(File,Encoding). - -uri_encode(File,Encoding) -> - Components = filename:split(File), - filename:join([uri_encode_comp(C,Encoding) || C <- Components]). - -%% Encode the reference to a "filename of the given encoding" so it -%% can be inserted in a utf8 encoded HTML file. -%% This does almost the same as http_uri:encode/1, except -%% 1. it does not convert @, : and / (in order to preserve nodename and c:/) -%% 2. if the file name is in latin1, it also encodes all -%% characters >127 - i.e. latin1 but not ASCII. -uri_encode_comp([Char|Chars],Encoding) -> - Reserved = sets:is_element(Char, reserved()), - case (Char>127 andalso Encoding==latin1) orelse Reserved of - true -> - [ $% | http_util:integer_to_hexlist(Char)] ++ - uri_encode_comp(Chars,Encoding); - false -> - [Char | uri_encode_comp(Chars,Encoding)] - end; -uri_encode_comp([],_) -> - []. - -%% Copied from http_uri.erl, but slightly modified -%% (not converting @, : and /) -reserved() -> - sets:from_list([$;, $&, $=, $+, $,, $?, - $#, $[, $], $<, $>, $\", ${, $}, $|, - $\\, $', $^, $%, $ ]). - -encoding(File) -> - case epp:read_encoding(File) of - none -> - epp:default_encoding(); - E -> - E - end. diff --git a/lib/test_server/src/test_server_gl.erl b/lib/test_server/src/test_server_gl.erl deleted file mode 100644 index c5ec3ccbe6..0000000000 --- a/lib/test_server/src/test_server_gl.erl +++ /dev/null @@ -1,301 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2012-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% -%% This module implements group leader processes for test cases. -%% Each group leader process handles output to the minor log file for -%% a test case, and calls test_server_io to handle output to the common -%% log files. The group leader processes are created and destroyed -%% through the test_server_io module/process. - --module(test_server_gl). --export([start_link/0,stop/1,set_minor_fd/3,unset_minor_fd/1, - get_tc_supervisor/1,print/4,set_props/2]). - --export([init/1,handle_call/3,handle_cast/2,handle_info/2,terminate/2]). - --record(st, {tc_supervisor :: 'none'|pid(), %Test case supervisor - tc :: mfa(), %Current test case MFA - minor :: 'none'|pid(), %Minor fd - minor_monitor, %Monitor ref for minor fd - capture :: 'none'|pid(), %Capture output - reject_io :: boolean(), %Reject I/O requests... - permit_io, %... and exceptions - auto_nl=true :: boolean(), %Automatically add NL - levels %{Stdout,Major,Minor} - }). - -%% start_link() -%% Start a new group leader process. Only to be called by -%% the test_server_io process. - -start_link() -> - case gen_server:start_link(?MODULE, [], []) of - {ok,Pid} -> - {ok,Pid}; - Other -> - Other - end. - - -%% stop(Pid) -%% Stop a group leader process. Only to be called by -%% the test_server_io process. - -stop(GL) -> - gen_server:cast(GL, stop). - - -%% set_minor_fd(GL, Fd, MFA) -%% GL = Pid for the group leader process -%% Fd = file descriptor for the minor log file -%% MFA = {M,F,A} for the test case owning the minor log file -%% -%% Register the file descriptor for the minor log file. Subsequent -%% IO directed to the minor log file will be written to this file. -%% Also register the currently executing process at the testcase -%% supervisor corresponding to this group leader process. - -set_minor_fd(GL, Fd, MFA) -> - req(GL, {set_minor_fd,Fd,MFA,self()}). - - -%% unset_minor_fd(GL, Fd, MFA) -%% GL = Pid for the group leader process -%% -%% Unregister the file descriptor for minor log file (typically -%% because the test case has ended the minor log file is about -%% to be closed). Subsequent IO (for example, by a process spawned -%% by the testcase process) will go to the unexpected_io log file. - -unset_minor_fd(GL) -> - req(GL, unset_minor_fd). - - -%% get_tc_supervisor(GL) -%% GL = Pid for the group leader process -%% -%% Return the Pid for the process that supervises the test case -%% that has this group leader. - -get_tc_supervisor(GL) -> - req(GL, get_tc_supervisor). - - -%% print(GL, Detail, Format, Args) -> ok -%% GL = Pid for the group leader process -%% Detail = integer() | minor | major | html | stdout -%% Msg = iodata() -%% Printer = internal | pid() -%% -%% Print a message to one of the log files. If Detail is an integer, -%% it will be compared to the levels (set by set_props/2) to -%% determine which log file(s) that are to receive the output. If -%% Detail is an atom, the value of the atom will directly determine -%% which log file to use. IO to the minor log file will be handled -%% directly by this group leader process (printing to the file set by -%% set_minor_fd/3), and all other IO will be handled by calling -%% test_server_io:print/3. - -print(GL, Detail, Msg, Printer) -> - req(GL, {print,Detail,Msg,Printer}). - - -%% set_props(GL, [PropertyTuple]) -%% GL = Pid for the group leader process -%% PropertyTuple = {levels,{Show,Major,Minor}} | -%% {auto_nl,boolean()} | -%% {reject_io_reqs,boolean()} -%% -%% Set properties for this group leader process. - -set_props(GL, PropList) -> - req(GL, {set_props,PropList}). - -%%% Internal functions. - -init([]) -> - {ok,#st{tc_supervisor=none, - minor=none, - minor_monitor=none, - capture=none, - reject_io=false, - permit_io=gb_sets:empty(), - auto_nl=true, - levels={1,19,10} - }}. - -req(GL, Req) -> - gen_server:call(GL, Req, infinity). - -handle_call(get_tc_supervisor, _From, #st{tc_supervisor=Pid}=St) -> - {reply,Pid,St}; -handle_call({set_minor_fd,Fd,MFA,Supervisor}, _From, St) -> - Ref = erlang:monitor(process, Fd), - {reply,ok,St#st{tc=MFA,minor=Fd,minor_monitor=Ref, - tc_supervisor=Supervisor}}; -handle_call(unset_minor_fd, _From, St) -> - {reply,ok,St#st{minor=none,tc_supervisor=none}}; -handle_call({set_props,PropList}, _From, St) -> - {reply,ok,do_set_props(PropList, St)}; -handle_call({print,Detail,Msg,Printer}, {From,_}, St) -> - output(Detail, Msg, Printer, From, St), - {reply,ok,St}. - -handle_cast(stop, St) -> - {stop,normal,St}. - -handle_info({'DOWN',Ref,process,_,Reason}=D, #st{minor_monitor=Ref}=St) -> - case Reason of - normal -> ok; - _ -> - Data = io_lib:format("=== WARNING === TC: ~w\n" - "Got down from minor Fd ~w: ~w\n\n", - [St#st.tc,St#st.minor,D]), - test_server_io:print_unexpected(Data) - end, - {noreply,St#st{minor=none,minor_monitor=none}}; -handle_info({permit_io,Pid}, #st{permit_io=P}=St) -> - {noreply,St#st{permit_io=gb_sets:add(Pid, P)}}; -handle_info({capture,Cap0}, St) -> - Cap = case Cap0 of - false -> none; - Pid when is_pid(Cap0) -> Pid - end, - {noreply,St#st{capture=Cap}}; -handle_info({io_request,From,ReplyAs,Req}=IoReq, St) -> - try io_req(Req, From, St) of - passthrough -> - group_leader() ! IoReq; - Data -> - case is_io_permitted(From, St) of - false -> - ok; - true -> - case St of - #st{capture=none} -> - ok; - #st{capture=CapturePid} -> - CapturePid ! {captured,Data} - end, - output(minor, Data, From, From, St) - end, - From ! {io_reply,ReplyAs,ok} - catch - _:_ -> - From ! {io_reply,ReplyAs,{error,arguments}} - end, - {noreply,St}; -handle_info({structured_io,ClientPid,{Detail,Str}}, St) -> - output(Detail, Str, ClientPid, ClientPid, St), - {noreply,St}; -handle_info({printout,Detail,Format,Args}, St) -> - Str = io_lib:format(Format, Args), - output(Detail, Str, internal, none, St), - {noreply,St}; -handle_info(Msg, #st{tc_supervisor=Pid}=St) when is_pid(Pid) -> - %% The process overseeing the testcase process also used to be - %% the group leader; thus, it is widely expected that it can be - %% reached by sending a message to the group leader. Therefore - %% we'll need to forward any non-recognized messaged to the test - %% case supervisor. - Pid ! Msg, - {noreply,St}; -handle_info(_Msg, #st{}=St) -> - %% There is no known supervisor process. Ignore this message. - {noreply,St}. - -terminate(_, _) -> - ok. - -do_set_props([{levels,Levels}|Ps], St) -> - do_set_props(Ps, St#st{levels=Levels}); -do_set_props([{auto_nl,AutoNL}|Ps], St) -> - do_set_props(Ps, St#st{auto_nl=AutoNL}); -do_set_props([{reject_io_reqs,Bool}|Ps], St) -> - do_set_props(Ps, St#st{reject_io=Bool}); -do_set_props([], St) -> St. - -io_req({put_chars,Enc,Bytes}, _, _) when Enc =:= latin1; Enc =:= unicode -> - unicode:characters_to_list(Bytes, Enc); -io_req({put_chars,Encoding,Mod,Func,[Format,Args]}, _, _) -> - Str = Mod:Func(Format, Args), - unicode:characters_to_list(Str, Encoding); -io_req(_, _, _) -> passthrough. - -output(Level, Str, Sender, From, St) when is_integer(Level) -> - case selected_by_level(Level, stdout, St) of - true -> output(stdout, Str, Sender, From, St); - false -> ok - end, - case selected_by_level(Level, major, St) of - true -> output(major, Str, Sender, From, St); - false -> ok - end, - case selected_by_level(Level, minor, St) of - true -> output(minor, Str, Sender, From, St); - false -> ok - end; -output(stdout, Str, _Sender, From, St) -> - output_to_file(stdout, Str, From, St); -output(html, Str, _Sender, From, St) -> - output_to_file(html, Str, From, St); -output(Level, Str, Sender, From, St) when is_atom(Level) -> - output_to_file(Level, dress_output(Str, Sender, St), From, St). - -output_to_file(minor, Data0, From, #st{tc={M,F,A},minor=none}) -> - Data = [io_lib:format("=== ~w:~w/~w\n", [M,F,A]),Data0], - test_server_io:print(From, unexpected_io, Data), - ok; -output_to_file(minor, Data, From, #st{tc=TC,minor=Fd}) -> - try - io:put_chars(Fd, Data) - catch - Type:Reason -> - Data1 = - [io_lib:format("=== ERROR === TC: ~w\n" - "Failed to write to minor Fd: ~w\n" - "Type: ~w\n" - "Reason: ~w\n", - [TC,Fd,Type,Reason]), - Data,"\n"], - test_server_io:print(From, unexpected_io, Data1) - end; -output_to_file(Detail, Data, From, _) -> - test_server_io:print(From, Detail, Data). - -is_io_permitted(From, #st{reject_io=true,permit_io=P}) -> - gb_sets:is_member(From, P); -is_io_permitted(_, #st{reject_io=false}) -> true. - -selected_by_level(Level, stdout, #st{levels={Stdout,_,_}}) -> - Level =< Stdout; -selected_by_level(Level, major, #st{levels={_,Major,_}}) -> - Level =< Major; -selected_by_level(Level, minor, #st{levels={_,_,Minor}}) -> - Level >= Minor. - -dress_output([$=|_]=Str, internal, _) -> - [Str,$\n]; -dress_output(Str, internal, _) -> - ["=== ",Str,$\n]; -dress_output(Str, _, #st{auto_nl=AutoNL}) -> - case AutoNL of - true -> [Str,$\n]; - false -> Str - end. diff --git a/lib/test_server/src/test_server_internal.hrl b/lib/test_server/src/test_server_internal.hrl deleted file mode 100644 index 578f359010..0000000000 --- a/lib/test_server/src/test_server_internal.hrl +++ /dev/null @@ -1,61 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2002-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - --define(priv_dir,"log_private"). --define(MAIN_PORT,3289). --define(ACCEPT_TIMEOUT,20000). - -%% Target information generated by test_server:init_target_info/0 -%% Once initiated, this information will never change!! --record(target_info, {os_family, % atom(); win32 | unix - os_type, % result of os:type() - host, % string(); the name of the target machine - version, % string() - system_version, % string() - root_dir, % string() - test_server_dir, % string() - emulator, % string() - otp_release, % string() - username, % string() - cookie, % string(); Cookie for target node - naming, % string(); "-name" | "-sname" - master}). % string(); Was used for OSE's master - % node for main target and slave nodes. - % For other platforms the target node - % itself is master for slave nodes - -%% Temporary information generated by test_server_ctrl:read_parameters/X -%% This information is used when starting the main target, and for -%% initiating the #target_info record. --record(par, {type, - target, - naming, - master, - cookie}). - - --record(cover, {app, % application; Name | none - file, % cover spec file - incl, % explicitly include modules - excl, % explicitly exclude modules - level, % analyse level; details | overview - mods, % actually cover compiled modules - stop=true, % stop cover after analyse; boolean() - cross}).% cross cover analyse info diff --git a/lib/test_server/src/test_server_io.erl b/lib/test_server/src/test_server_io.erl deleted file mode 100644 index 0d881d0ada..0000000000 --- a/lib/test_server/src/test_server_io.erl +++ /dev/null @@ -1,452 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2012-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% -%% This module implements a process with the registered name 'test_server_io', -%% which has two main responsibilities: -%% -%% * Manage group leader processes (see the test_server_gl module) -%% for test cases. A group_leader process is obtained by calling -%% get_gl/1. Group leader processes will be kept alive as along as -%% the 'test_server_io' process is alive. -%% -%% * Handle output to the common log files (stdout, major, html, -%% unexpected_io). -%% - --module(test_server_io). --export([start_link/0,stop/1,get_gl/1,set_fd/2, - start_transaction/0,end_transaction/0, - print_buffered/1,print/3,print_unexpected/1, - set_footer/1,set_job_name/1,set_gl_props/1, - reset_state/0,finish/0]). - --export([init/1,handle_call/3,handle_info/2,terminate/2]). - --record(st, {fds, % Singleton fds (gb_tree) - tags=[], % Known tag types - shared_gl :: pid(), % Shared group leader - gls, % Group leaders (gb_set) - io_buffering=false, % I/O buffering - buffered, % Buffered I/O requests - html_footer, % HTML footer - job_name, % Name of current job. - gl_props, % Properties for GL - phase, % Indicates current mode - offline_buffer, % Buffer I/O during startup - stopping, % Reply to when process stopped - pending_ops % Perform when process idle - }). - -start_link() -> - case whereis(?MODULE) of - undefined -> - case gen_server:start_link({local,?MODULE}, ?MODULE, [], []) of - {ok,Pid} -> - {ok,Pid}; - Other -> - Other - end; - Pid -> - %% already running, reset the state - reset_state(), - {ok,Pid} - end. - -stop(FilesToClose) -> - OldGL = group_leader(), - group_leader(self(), self()), - req({stop,FilesToClose}), - group_leader(OldGL, self()), - ok. - -finish() -> - req(finish). - -%% get_gl(Shared) -> Pid -%% Shared = boolean() -%% Pid = pid() -%% -%% Return a group leader (a process using the test_server_gl module). -%% If Shared is true, the shared group leader is returned (suitable for -%% running sequential test cases), otherwise a new group leader process -%% is spawned. Group leader processes will live until the -%% 'test_server_io' process is stopped. - -get_gl(Shared) when is_boolean(Shared) -> - req({get_gl,Shared}). - -%% set_fd(Tag, Fd) -> ok. -%% Tag = major | html | unexpected_io -%% Fd = a file descriptor (as returned by file:open/2) -%% -%% Associate a file descriptor with the given Tag. This -%% Tag can later be used in when calling to print/3. - -set_fd(Tag, Fd) -> - req({set_fd,Tag,Fd}). - -%% start_transaction() -%% -%% Subsequent calls to print/3 from the process executing start_transaction/0 -%% will cause the messages to be buffered instead of printed directly. - -start_transaction() -> - req({start_transaction,self()}). - -%% end_transaction() -%% -%% End the transaction started by start_transaction/0. Subsequent calls to -%% print/3 will cause the message to be printed directly. - -end_transaction() -> - req({end_transaction,self()}). - -%% print(From, Tag, Msg) -%% From = pid() -%% Tag = stdout, or any tag that has been registered using set_fd/2 -%% Msg = string or iolist -%% -%% Either print Msg to the file identified by Tag, or buffer the message -%% start_transaction/0 has been called from the process From. -%% -%% NOTE: The tags have various special meanings. For example, 'html' -%% is assumed to be a HTML file. - -print(From, Tag, Msg) -> - req({print,From,Tag,Msg}). - -%% print_buffered(Pid) -%% Pid = pid() -%% -%% Print all messages buffered in the *first* transaction buffered for Pid. -%% (If start_transaction/0 and end_transaction/0 has been called N times, -%% print_buffered/1 must be called N times to print all transactions.) - -print_buffered(Pid) -> - req({print_buffered,Pid}). - -%% print_unexpected(Msg) -%% Msg = string or iolist -%% -%% Print the given string in the unexpected_io log. - -print_unexpected(Msg) -> - print(xxxFrom,unexpected_io,Msg). - -%% set_footer(IoData) -%% -%% Set a footer for the file associated with the 'html' tag. -%% It will be used by print/3 to print a footer for the HTML file. - -set_footer(Footer) -> - req({set_footer,Footer}). - -%% set_job_name(Name) -%% -%% Set a name for the currently running job. The name will be used -%% when printing to 'stdout'. -%% - -set_job_name(Name) -> - req({set_job_name,Name}). - -%% set_gl_props(PropList) -%% -%% Set properties for group leader processes. When a group_leader process -%% is created, test_server_gl:set_props(PropList) will be called. - -set_gl_props(PropList) -> - req({set_gl_props,PropList}). - -%% reset_state -%% -%% Reset the initial state -reset_state() -> - req(reset_state). - -%%% Internal functions. - -init([]) -> - process_flag(trap_exit, true), - Empty = gb_trees:empty(), - {ok,Shared} = test_server_gl:start_link(), - {ok,#st{fds=Empty,shared_gl=Shared,gls=gb_sets:empty(), - io_buffering=gb_sets:empty(), - buffered=Empty, - html_footer="</body>\n</html>\n", - job_name="<name not set>", - gl_props=[], - phase=starting, - offline_buffer=[], - pending_ops=[]}}. - -req(Req) -> - gen_server:call(?MODULE, Req, infinity). - -handle_call({get_gl,false}, _From, #st{gls=Gls,gl_props=Props}=St) -> - {ok,Pid} = test_server_gl:start_link(), - test_server_gl:set_props(Pid, Props), - {reply,Pid,St#st{gls=gb_sets:insert(Pid, Gls)}}; -handle_call({get_gl,true}, _From, #st{shared_gl=Shared}=St) -> - {reply,Shared,St}; -handle_call({set_fd,Tag,Fd}, _From, #st{fds=Fds0,tags=Tags0, - offline_buffer=OfflineBuff}=St) -> - Fds = gb_trees:enter(Tag, Fd, Fds0), - St1 = St#st{fds=Fds,tags=[Tag|lists:delete(Tag, Tags0)]}, - OfflineBuff1 = - if OfflineBuff == [] -> - []; - true -> - %% Fd ready, print anything buffered for associated Tag - lists:filtermap(fun({T,From,Str}) when T == Tag -> - output(From, Tag, Str, St1), - false; - (_) -> - true - end, lists:reverse(OfflineBuff)) - end, - {reply,ok,St1#st{phase=started, - offline_buffer=lists:reverse(OfflineBuff1)}}; -handle_call({start_transaction,Pid}, _From, #st{io_buffering=Buffer0, - buffered=Buf0}=St) -> - Buf = case gb_trees:is_defined(Pid, Buf0) of - false -> gb_trees:insert(Pid, queue:new(), Buf0); - true -> Buf0 - end, - Buffer = gb_sets:add(Pid, Buffer0), - {reply,ok,St#st{io_buffering=Buffer,buffered=Buf}}; -handle_call({print,From,Tag,Str}, _From, St0) -> - St = output(From, Tag, Str, St0), - {reply,ok,St}; -handle_call({end_transaction,Pid}, _From, #st{io_buffering=Buffer0, - buffered=Buffered0}=St0) -> - Q0 = gb_trees:get(Pid, Buffered0), - Q = queue:in(eot, Q0), - Buffered = gb_trees:update(Pid, Q, Buffered0), - Buffer = gb_sets:delete_any(Pid, Buffer0), - St = St0#st{io_buffering=Buffer,buffered=Buffered}, - {reply,ok,St}; -handle_call({print_buffered,Pid}, _From, #st{buffered=Buffered0}=St0) -> - Q0 = gb_trees:get(Pid, Buffered0), - Q = do_print_buffered(Q0, St0), - Buffered = gb_trees:update(Pid, Q, Buffered0), - St = St0#st{buffered=Buffered}, - {reply,ok,St}; -handle_call({set_footer,Footer}, _From, St) -> - {reply,ok,St#st{html_footer=Footer}}; -handle_call({set_job_name,Name}, _From, St) -> - {reply,ok,St#st{job_name=Name}}; -handle_call({set_gl_props,Props}, _From, #st{shared_gl=Shared}=St) -> - test_server_gl:set_props(Shared, Props), - {reply,ok,St#st{gl_props=Props}}; -handle_call(reset_state, From, #st{phase=stopping,pending_ops=Ops}=St) -> - %% can't reset during stopping phase, save op for later - Op = fun(NewSt) -> - {_,Result,NewSt1} = handle_call(reset_state, From, NewSt), - {Result,NewSt1} - end, - {noreply,St#st{pending_ops=[{From,Op}|Ops]}}; -handle_call(reset_state, _From, #st{fds=Fds,tags=Tags,gls=Gls, - offline_buffer=OfflineBuff}) -> - %% close open log files - lists:foreach(fun(Tag) -> - case gb_trees:lookup(Tag, Fds) of - none -> - ok; - {value,Fd} -> - file:close(Fd) - end - end, Tags), - GlList = gb_sets:to_list(Gls), - [test_server_gl:stop(GL) || GL <- GlList], - timer:sleep(100), - case lists:filter(fun(GlPid) -> is_process_alive(GlPid) end, GlList) of - [] -> - ok; - _ -> - timer:sleep(2000), - [exit(GL, kill) || GL <- GlList] - end, - Empty = gb_trees:empty(), - {ok,Shared} = test_server_gl:start_link(), - {reply,ok,#st{fds=Empty,shared_gl=Shared,gls=gb_sets:empty(), - io_buffering=gb_sets:empty(), - buffered=Empty, - html_footer="</body>\n</html>\n", - job_name="<name not set>", - gl_props=[], - phase=starting, - offline_buffer=OfflineBuff, - pending_ops=[]}}; -handle_call({stop,FdTags}, From, #st{fds=Fds0,tags=Tags0, - shared_gl=SGL,gls=Gls0}=St0) -> - St = St0#st{gls=gb_sets:insert(SGL, Gls0),phase=stopping,stopping=From}, - gc(St), - %% close open log files - {Fds1,Tags1} = lists:foldl(fun(Tag, {Fds,Tags}) -> - case gb_trees:lookup(Tag, Fds) of - none -> - {Fds,Tags}; - {value,Fd} -> - file:close(Fd), - {gb_trees:delete(Tag, Fds), - lists:delete(Tag, Tags)} - end - end, {Fds0,Tags0}, FdTags), - %% Give the users of the surviving group leaders some - %% time to finish. - erlang:send_after(1000, self(), stop_group_leaders), - {noreply,St#st{fds=Fds1,tags=Tags1}}; -handle_call(finish, From, St) -> - gen_server:reply(From, ok), - {stop,normal,St}. - -handle_info({'EXIT',Pid,normal}, #st{gls=Gls0,stopping=From}=St) -> - Gls = gb_sets:delete_any(Pid, Gls0), - case gb_sets:is_empty(Gls) andalso stopping =/= undefined of - true -> - %% No more group leaders left. - gen_server:reply(From, ok), - {noreply,St#st{gls=Gls,phase=stopping,stopping=undefined}}; - false -> - %% Wait for more group leaders to finish. - {noreply,St#st{gls=Gls,phase=stopping}} - end; -handle_info({'EXIT',_Pid,Reason}, _St) -> - exit(Reason); -handle_info(stop_group_leaders, #st{gls=Gls}=St) -> - %% Stop the remaining group leaders. - GlPids = gb_sets:to_list(Gls), - [test_server_gl:stop(GL) || GL <- GlPids], - timer:sleep(100), - Wait = - case lists:filter(fun(GlPid) -> is_process_alive(GlPid) end, GlPids) of - [] -> 0; - _ -> 2000 - end, - erlang:send_after(Wait, self(), kill_group_leaders), - {noreply,St}; -handle_info(kill_group_leaders, #st{gls=Gls,stopping=From, - pending_ops=Ops}=St) -> - [exit(GL, kill) || GL <- gb_sets:to_list(Gls)], - if From /= undefined -> - gen_server:reply(From, ok); - true -> % reply has been sent already - ok - end, - %% we're idle, check if any ops are pending - St1 = lists:foldr(fun({ReplyTo,Op},NewSt) -> - {Result,NewSt1} = Op(NewSt), - gen_server:reply(ReplyTo, Result), - NewSt1 - end, St#st{phase=idle,pending_ops=[]}, Ops), - {noreply,St1}; -handle_info(Other, St) -> - io:format("Ignoring: ~p\n", [Other]), - {noreply,St}. - -terminate(_, _) -> - ok. - -output(From, Tag, Str, #st{io_buffering=Buffered,buffered=Buf0, - phase=Phase,offline_buffer=OfflineBuff}=St) -> - case gb_sets:is_member(From, Buffered) of - false -> - case do_output(Tag, Str, Phase, St) of - buffer when length(OfflineBuff)>500 -> - %% something's wrong, clear buffer - St#st{offline_buffer=[]}; - buffer -> - St#st{offline_buffer=[{Tag,From,Str}|OfflineBuff]}; - _ -> - St - end; - true -> - Q0 = gb_trees:get(From, Buf0), - Q = queue:in({Tag,Str}, Q0), - Buf = gb_trees:update(From, Q, Buf0), - St#st{buffered=Buf} - end. - -do_output(stdout, Str, _, #st{job_name=undefined}) -> - io:put_chars(Str); -do_output(stdout, Str0, _, #st{job_name=Name}) -> - Str = io_lib:format("Testing ~ts: ~ts\n", [Name,Str0]), - io:put_chars(Str); -do_output(Tag, Str, Phase, #st{fds=Fds}=St) -> - case gb_trees:lookup(Tag, Fds) of - none when Phase /= started -> - buffer; - none -> - S = io_lib:format("\n*** ERROR: ~w, line ~w: No known '~p' log file\n", - [?MODULE,?LINE,Tag]), - do_output(stdout, [S,Str], Phase, St); - {value,Fd} -> - try - io:put_chars(Fd, Str), - case Tag of - html -> finalise_table(Fd, St); - _ -> ok - end - catch _:Error -> - S = io_lib:format("\n*** ERROR: ~w, line ~w: Error writing to " - "log file '~p': ~p\n", - [?MODULE,?LINE,Tag,Error]), - do_output(stdout, [S,Str], Phase, St) - end - end. - -finalise_table(Fd, #st{html_footer=Footer}) -> - case file:position(Fd, {cur,0}) of - {ok,Pos} -> - %% We are writing to a seekable file. Finalise so - %% we get complete valid (and viewable) HTML code. - %% Then rewind to overwrite the finalising code. - io:put_chars(Fd, ["\n</table>\n",Footer]), - file:position(Fd, Pos); - {error,epipe} -> - %% The file is not seekable. We cannot erase what - %% we've already written --- so the reader will - %% have to wait until we're done. - ok - end. - -do_print_buffered(Q0, St) -> - Item = queue:get(Q0), - Q = queue:drop(Q0), - case Item of - eot -> - Q; - {Tag,Str} -> - do_output(Tag, Str, undefined, St), - do_print_buffered(Q, St) - end. - -gc(#st{gls=Gls0}) -> - InUse0 = [begin - case process_info(P, group_leader) of - {group_leader,GL} -> GL; - undefined -> undefined - end - end || P <- processes()], - InUse = ordsets:from_list(InUse0), - Gls = gb_sets:to_list(Gls0), - NotUsed = ordsets:subtract(Gls, InUse), - [test_server_gl:stop(Pid) || Pid <- NotUsed], - ok. diff --git a/lib/test_server/src/test_server_node.erl b/lib/test_server/src/test_server_node.erl deleted file mode 100644 index 4e6839fc6b..0000000000 --- a/lib/test_server/src/test_server_node.erl +++ /dev/null @@ -1,759 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2002-2014. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(test_server_node). --compile(r12). - -%%% -%%% The same compiled code for this module must be possible to load -%%% in R12B and later. -%%% - -%% Test Controller interface --export([is_release_available/1]). --export([start_tracer_node/2,trace_nodes/2,stop_tracer_node/1]). --export([start_node/5, stop_node/1]). --export([kill_nodes/0, nodedown/1]). -%% Internal export --export([node_started/1,trc/1,handle_debug/4]). - --include("test_server_internal.hrl"). --record(slave_info, {name,socket,client}). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% %%% -%%% All code in this module executes on the test_server_ctrl process %%% -%%% except for node_started/1 and trc/1 which execute on a new node. %%% -%%% %%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -is_release_available(Rel) when is_atom(Rel) -> - is_release_available(atom_to_list(Rel)); -is_release_available(Rel) -> - case os:type() of - {unix,_} -> - Erl = find_release(Rel), - case Erl of - none -> false; - _ -> filelib:is_regular(Erl) - end; - _ -> - false - end. - -nodedown(Sock) -> - Match = #slave_info{name='$1',socket=Sock,client='$2',_='_'}, - case ets:match(slave_tab,Match) of - [[Node,_Client]] -> % Slave node died - gen_tcp:close(Sock), - ets:delete(slave_tab,Node), - slave_died; - [] -> - ok - end. - - - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% Start trace node -%%% -start_tracer_node(TraceFile,TI) -> - Match = #slave_info{name='$1',_='_'}, - SlaveNodes = lists:map(fun([N]) -> [" ",N] end, - ets:match(slave_tab,Match)), - TargetNode = node(), - Cookie = TI#target_info.cookie, - {ok,LSock} = gen_tcp:listen(0,[binary,{reuseaddr,true},{packet,2}]), - {ok,TracePort} = inet:port(LSock), - Prog = quote_progname(pick_erl_program(default)), - Cmd = lists:concat([Prog, " -sname tracer -hidden -setcookie ", Cookie, - " -s ", ?MODULE, " trc ", TraceFile, " ", - TracePort, " ", TI#target_info.os_family]), - spawn(fun() -> print_data(open_port({spawn,Cmd},[stream])) end), -%! open_port({spawn,Cmd},[stream]), - case gen_tcp:accept(LSock,?ACCEPT_TIMEOUT) of - {ok,Sock} -> - gen_tcp:close(LSock), - receive - {tcp,Sock,Result} when is_binary(Result) -> - case unpack(Result) of - error -> - gen_tcp:close(Sock), - {error,timeout}; - {ok,started} -> - trace_nodes(Sock,[TargetNode | SlaveNodes]), - {ok,Sock}; - {ok,Error} -> Error - end; - {tcp_closed,Sock} -> - gen_tcp:close(Sock), - {error,could_not_start_tracernode} - after ?ACCEPT_TIMEOUT -> - gen_tcp:close(Sock), - {error,timeout} - end; - Error -> - gen_tcp:close(LSock), - {error,{could_not_start_tracernode,Error}} - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% Start a tracer on each of these nodes and set flags and patterns -%%% -trace_nodes(Sock,Nodes) -> - Bin = term_to_binary({add_nodes,Nodes}), - ok = gen_tcp:send(Sock, [1|Bin]), - receive_ack(Sock). - - -receive_ack(Sock) -> - receive - {tcp,Sock,Bin} when is_binary(Bin) -> - case unpack(Bin) of - error -> receive_ack(Sock); - {ok,_} -> ok - end; - _ -> - receive_ack(Sock) - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% Stop trace node -%%% -stop_tracer_node(Sock) -> - Bin = term_to_binary(id(stop)), - ok = gen_tcp:send(Sock, [1|Bin]), - receive {tcp_closed,Sock} -> gen_tcp:close(Sock) end, - ok. - - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% trc([TraceFile,Nodes]) -> ok -%% -%% Start tracing on the given nodes -%% -%% This function executes on the new node -%% -trc([TraceFile, PortAtom, Type]) -> - {Result,Patterns} = - case file:consult(TraceFile) of - {ok,TI} -> - Pat = parse_trace_info(lists:flatten(TI)), - {started,Pat}; - Error -> - {Error,[]} - end, - Port = list_to_integer(atom_to_list(PortAtom)), - case catch gen_tcp:connect("localhost", Port, [binary, - {reuseaddr,true}, - {packet,2}]) of - {ok,Sock} -> - BinResult = term_to_binary(Result), - ok = gen_tcp:send(Sock,[1|BinResult]), - trc_loop(Sock,Patterns,Type); - _else -> - ok - end, - erlang:halt(). -trc_loop(Sock,Patterns,Type) -> - receive - {tcp,Sock,Bin} -> - case unpack(Bin) of - error -> - ttb:stop(), - gen_tcp:close(Sock); - {ok,{add_nodes,Nodes}} -> - add_nodes(Nodes,Patterns,Type), - Bin = term_to_binary(id(ok)), - ok = gen_tcp:send(Sock, [1|Bin]), - trc_loop(Sock,Patterns,Type); - {ok,stop} -> - ttb:stop(), - gen_tcp:close(Sock) - end; - {tcp_closed,Sock} -> - ttb:stop(), - gen_tcp:close(Sock) - end. -add_nodes(Nodes,Patterns,_Type) -> - ttb:tracer(Nodes,[{file,{local, test_server}}, - {handler, {{?MODULE,handle_debug},initial}}]), - ttb:p(all,[call,timestamp]), - lists:foreach(fun({TP,M,F,A,Pat}) -> ttb:TP(M,F,A,Pat); - ({CTP,M,F,A}) -> ttb:CTP(M,F,A) - end, - Patterns). - -parse_trace_info([{TP,M,Pat}|Pats]) when TP=:=tp; TP=:=tpl -> - [{TP,M,'_','_',Pat}|parse_trace_info(Pats)]; -parse_trace_info([{TP,M,F,Pat}|Pats]) when TP=:=tp; TP=:=tpl -> - [{TP,M,F,'_',Pat}|parse_trace_info(Pats)]; -parse_trace_info([{TP,M,F,A,Pat}|Pats]) when TP=:=tp; TP=:=tpl -> - [{TP,M,F,A,Pat}|parse_trace_info(Pats)]; -parse_trace_info([CTP|Pats]) when CTP=:=ctp; CTP=:=ctpl; CTP=:=ctpg -> - [{CTP,'_','_','_'}|parse_trace_info(Pats)]; -parse_trace_info([{CTP,M}|Pats]) when CTP=:=ctp; CTP=:=ctpl; CTP=:=ctpg -> - [{CTP,M,'_','_'}|parse_trace_info(Pats)]; -parse_trace_info([{CTP,M,F}|Pats]) when CTP=:=ctp; CTP=:=ctpl; CTP=:=ctpg -> - [{CTP,M,F,'_'}|parse_trace_info(Pats)]; -parse_trace_info([{CTP,M,F,A}|Pats]) when CTP=:=ctp; CTP=:=ctpl; CTP=:=ctpg -> - [{CTP,M,F,A}|parse_trace_info(Pats)]; -parse_trace_info([]) -> - []; -parse_trace_info([_other|Pats]) -> % ignore - parse_trace_info(Pats). - -handle_debug(Out,Trace,TI,initial) -> - handle_debug(Out,Trace,TI,0); -handle_debug(_Out,end_of_trace,_TI,N) -> - N; -handle_debug(Out,Trace,_TI,N) -> - print_trc(Out,Trace,N), - N+1. - -print_trc(Out,{trace_ts,P,call,{M,F,A},C,Ts},N) -> - io:format(Out, - "~w: ~s~n" - "Process : ~w~n" - "Call : ~w:~w/~w~n" - "Arguments : ~p~n" - "Caller : ~w~n~n", - [N,ts(Ts),P,M,F,length(A),A,C]); -print_trc(Out,{trace_ts,P,call,{M,F,A},Ts},N) -> - io:format(Out, - "~w: ~s~n" - "Process : ~w~n" - "Call : ~w:~w/~w~n" - "Arguments : ~p~n~n", - [N,ts(Ts),P,M,F,length(A),A]); -print_trc(Out,{trace_ts,P,return_from,{M,F,A},R,Ts},N) -> - io:format(Out, - "~w: ~s~n" - "Process : ~w~n" - "Return from : ~w:~w/~w~n" - "Return value : ~p~n~n", - [N,ts(Ts),P,M,F,A,R]); -print_trc(Out,{drop,X},N) -> - io:format(Out, - "~w: Tracer dropped ~w messages - too busy~n~n", - [N,X]); -print_trc(Out,Trace,N) -> - Ts = element(size(Trace),Trace), - io:format(Out, - "~w: ~s~n" - "Trace : ~p~n~n", - [N,ts(Ts),Trace]). -ts({_, _, Micro} = Now) -> - {{Y,M,D},{H,Min,S}} = calendar:now_to_local_time(Now), - io_lib:format("~4.4.0w-~2.2.0w-~2.2.0w ~2.2.0w:~2.2.0w:~2.2.0w,~6.6.0w", - [Y,M,D,H,Min,S,Micro]). - - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% Start slave/peer nodes (initiated by test_server:start_node/5) -%%% -start_node(SlaveName, slave, Options, From, TI) when is_list(SlaveName) -> - start_node_slave(list_to_atom(SlaveName), Options, From, TI); -start_node(SlaveName, slave, Options, From, TI) -> - start_node_slave(SlaveName, Options, From, TI); -start_node(SlaveName, peer, Options, From, TI) when is_atom(SlaveName) -> - start_node_peer(atom_to_list(SlaveName), Options, From, TI); -start_node(SlaveName, peer, Options, From, TI) -> - start_node_peer(SlaveName, Options, From, TI); -start_node(_SlaveName, _Type, _Options, _From, _TI) -> - not_implemented_yet. - -%% -%% Peer nodes are always started on the same host as test_server_ctrl -%% -%% (Socket communication is used since in early days the test target -%% and the test server controller node could be on different hosts and -%% the target could not know the controller node via erlang -%% distribution) -%% -start_node_peer(SlaveName, OptList, From, TI) -> - SuppliedArgs = start_node_get_option_value(args, OptList, []), - Cleanup = start_node_get_option_value(cleanup, OptList, true), - HostStr = test_server_sup:hoststr(), - {ok,LSock} = gen_tcp:listen(0,[binary, - {reuseaddr,true}, - {packet,2}]), - {ok,WaitPort} = inet:port(LSock), - NodeStarted = lists:concat([" -s ", ?MODULE, " node_started ", - HostStr, " ", WaitPort]), - - % Support for erl_crash_dump files.. - CrashFile = filename:join([TI#target_info.test_server_dir, - "erl_crash_dump."++cast_to_list(SlaveName)]), - CrashArgs = lists:concat([" -env ERL_CRASH_DUMP \"",CrashFile,"\" "]), - FailOnError = start_node_get_option_value(fail_on_error, OptList, true), - Pa = TI#target_info.test_server_dir, - Prog0 = start_node_get_option_value(erl, OptList, default), - Prog = quote_progname(pick_erl_program(Prog0)), - Args = - case string:str(SuppliedArgs,"-setcookie") of - 0 -> "-setcookie " ++ TI#target_info.cookie ++ " " ++ SuppliedArgs; - _ -> SuppliedArgs - end, - Cmd = lists:concat([Prog, - " -detached ", - TI#target_info.naming, " ", SlaveName, - " -pa \"", Pa,"\"", - NodeStarted, - CrashArgs, - " ", Args]), - Opts = case start_node_get_option_value(env, OptList, []) of - [] -> []; - Env -> [{env, Env}] - end, - %% peer is always started on localhost - %% - %% Bad environment can cause open port to fail. If this happens, - %% we ignore it and let the testcase handle the situation... - catch open_port({spawn, Cmd}, [stream|Opts]), - - Tmo = 60000 * test_server:timetrap_scale_factor(), - - case start_node_get_option_value(wait, OptList, true) of - true -> - Ret = wait_for_node_started(LSock,Tmo,undefined,Cleanup,TI,self()), - case {Ret,FailOnError} of - {{{ok, Node}, Warning},_} -> - gen_server:reply(From,{{ok,Node},HostStr,Cmd,[],Warning}); - {_,false} -> - gen_server:reply(From,{Ret, HostStr, Cmd}); - {_,true} -> - gen_server:reply(From,{fail,{Ret, HostStr, Cmd}}) - end; - false -> - Nodename = list_to_atom(SlaveName ++ "@" ++ HostStr), - I = "=== Not waiting for node", - gen_server:reply(From,{{ok, Nodename}, HostStr, Cmd, I, []}), - Self = self(), - spawn_link( - fun() -> - wait_for_node_started(LSock,Tmo,undefined, - Cleanup,TI,Self), - receive after infinity -> ok end - end), - ok - end. - -%% -%% Slave nodes are started on a remote host if -%% - the option remote is given when calling test_server:start_node/3 -%% -start_node_slave(SlaveName, OptList, From, TI) -> - SuppliedArgs = start_node_get_option_value(args, OptList, []), - Cleanup = start_node_get_option_value(cleanup, OptList, true), - - CrashFile = filename:join([TI#target_info.test_server_dir, - "erl_crash_dump."++cast_to_list(SlaveName)]), - CrashArgs = lists:concat([" -env ERL_CRASH_DUMP \"",CrashFile,"\" "]), - Pa = TI#target_info.test_server_dir, - Args = lists:concat([" -pa \"", Pa, "\" ", SuppliedArgs, CrashArgs]), - - Prog0 = start_node_get_option_value(erl, OptList, default), - Prog = pick_erl_program(Prog0), - Ret = - case start_which_node(OptList) of - {error,Reason} -> {{error,Reason},undefined,undefined}; - Host0 -> do_start_node_slave(Host0,SlaveName,Args,Prog,Cleanup) - end, - gen_server:reply(From,Ret). - - -do_start_node_slave(Host0, SlaveName, Args, Prog, Cleanup) -> - Host = - case Host0 of - local -> test_server_sup:hoststr(); - _ -> cast_to_list(Host0) - end, - Cmd = Prog ++ " " ++ Args, - case slave:start(Host, SlaveName, Args, no_link, Prog) of - {ok,Nodename} -> - case Cleanup of - true -> ets:insert(slave_tab,#slave_info{name=Nodename}); - false -> ok - end, - {{ok,Nodename}, Host, Cmd, [], []}; - Ret -> - {Ret, Host, Cmd} - end. - - -wait_for_node_started(LSock,Timeout,Client,Cleanup,TI,CtrlPid) -> - case gen_tcp:accept(LSock,Timeout) of - {ok,Sock} -> - gen_tcp:close(LSock), - receive - {tcp,Sock,Started0} when is_binary(Started0) -> - case unpack(Started0) of - error -> - gen_tcp:close(Sock), - {error, connection_closed}; - {ok,Started} -> - Version = TI#target_info.otp_release, - VsnStr = TI#target_info.system_version, - {ok,Nodename, W} = - handle_start_node_return(Version, - VsnStr, - Started), - case Cleanup of - true -> - ets:insert(slave_tab,#slave_info{name=Nodename, - socket=Sock, - client=Client}); - false -> ok - end, - gen_tcp:controlling_process(Sock,CtrlPid), - test_server_ctrl:node_started(Nodename), - {{ok,Nodename},W} - end; - {tcp_closed,Sock} -> - gen_tcp:close(Sock), - {error, connection_closed} - after Timeout -> - gen_tcp:close(Sock), - {error, timeout} - end; - {error,Reason} -> - gen_tcp:close(LSock), - {error, {no_connection,Reason}} - end. - - - -handle_start_node_return(Version,VsnStr,{started, Node, Version, VsnStr}) -> - {ok, Node, []}; -handle_start_node_return(Version,VsnStr,{started, Node, OVersion, OVsnStr}) -> - Str = io_lib:format("WARNING: Started node " - "reports different system " - "version than current node! " - "Current node version: ~p, ~p " - "Started node version: ~p, ~p", - [Version, VsnStr, - OVersion, OVsnStr]), - Str1 = lists:flatten(Str), - {ok, Node, Str1}. - - -%% -%% This function executes on the new node -%% -node_started([Host,PortAtom]) -> - %% Must spawn a new process because the boot process should not - %% hang forever!! - spawn(fun() -> node_started(Host,PortAtom) end). - -%% This process hangs forever, just waiting for the socket to be -%% closed and terminating the node -node_started(Host,PortAtom) -> - {_, Version} = init:script_id(), - VsnStr = erlang:system_info(system_version), - Port = list_to_integer(atom_to_list(PortAtom)), - case catch gen_tcp:connect(Host,Port, [binary, - {reuseaddr,true}, - {packet,2}]) of - - {ok,Sock} -> - Started = term_to_binary({started, node(), Version, VsnStr}), - ok = gen_tcp:send(Sock, [1|Started]), - receive _Anyting -> - gen_tcp:close(Sock), - erlang:halt() - end; - _else -> - erlang:halt() - end. - - - - - -% start_which_node(Optlist) -> hostname -start_which_node(Optlist) -> - case start_node_get_option_value(remote, Optlist) of - undefined -> - local; - true -> - case find_remote_host() of - {error, Other} -> - {error, Other}; - RHost -> - RHost - end - end. - -find_remote_host() -> - HostList=test_server_ctrl:get_hosts(), - case lists:delete(test_server_sup:hoststr(), HostList) of - [] -> - {error, no_remote_hosts}; - [RHost|_Rest] -> - RHost - end. - -start_node_get_option_value(Key, List) -> - start_node_get_option_value(Key, List, undefined). - -start_node_get_option_value(Key, List, Default) -> - case lists:keysearch(Key, 1, List) of - {value, {Key, Value}} -> - Value; - false -> - Default - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% stop_node(Name) -> ok | {error,Reason} -%% -%% Clean up - test_server will stop this node -stop_node(Name) -> - case ets:lookup(slave_tab,Name) of - [#slave_info{}] -> - ets:delete(slave_tab,Name), - ok; - [] -> - {error, not_a_slavenode} - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% kill_nodes() -> ok -%% -%% Brutally kill all slavenodes that were not stopped by test_server -kill_nodes() -> - case ets:match_object(slave_tab,'_') of - [] -> []; - List -> - lists:map(fun(SI) -> kill_node(SI) end, List) - end. - -kill_node(SI) -> - Name = SI#slave_info.name, - ets:delete(slave_tab,Name), - case SI#slave_info.socket of - undefined -> - catch rpc:call(Name,erlang,halt,[]); - Sock -> - gen_tcp:close(Sock) - end, - Name. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% cast_to_list(X) -> string() -%%% X = list() | atom() | void() -%%% Returns a string representation of whatever was input - -cast_to_list(X) when is_list(X) -> X; -cast_to_list(X) when is_atom(X) -> atom_to_list(X); -cast_to_list(X) -> lists:flatten(io_lib:format("~w", [X])). - - -%%% L contains elements of the forms -%%% {prog, String} -%%% {release, Rel} where Rel = String | latest | previous -%%% this -%%% -pick_erl_program(default) -> - cast_to_list(lib:progname()); -pick_erl_program(L) -> - P = random_element(L), - case P of - {prog, S} -> - S; - {release, S} -> - find_release(S); - this -> - cast_to_list(lib:progname()) - end. - -%% This is an attempt to distinguish between spaces in the program -%% path and spaces that separate arguments. The program is quoted to -%% allow spaces in the path. -%% -%% Arguments could exist either if the executable is excplicitly given -%% ({prog,String}) or if the -program switch to beam is used and -%% includes arguments (typically done by cerl in OTP test environment -%% in order to ensure that slave/peer nodes are started with the same -%% emulator and flags as the test node. The return from lib:progname() -%% could then typically be '/<full_path_to>/cerl -gcov'). -quote_progname(Progname) -> - do_quote_progname(string:tokens(Progname," ")). - -do_quote_progname([Prog]) -> - "\""++Prog++"\""; -do_quote_progname([Prog,Arg|Args]) -> - case os:find_executable(Prog) of - false -> - do_quote_progname([Prog++" "++Arg | Args]); - _ -> - %% this one has an executable - we assume the rest are arguments - "\""++Prog++"\""++ - lists:flatten(lists:map(fun(X) -> [" ",X] end, [Arg|Args])) - end. - -random_element(L) -> - random:seed(os:timestamp()), - lists:nth(random:uniform(length(L)), L). - -find_release(latest) -> - "/usr/local/otp/releases/latest/bin/erl"; -find_release(previous) -> - "kaka"; -find_release(Rel) -> - find_release(os:type(), Rel). - -find_release({unix,sunos}, Rel) -> - case os:cmd("uname -p") of - "sparc" ++ _ -> - "/usr/local/otp/releases/otp_beam_solaris8_" ++ Rel ++ "/bin/erl"; - _ -> - none - end; -find_release({unix,linux}, Rel) -> - Candidates = find_rel_linux(Rel), - case lists:dropwhile(fun(N) -> - not filelib:is_regular(N) - end, Candidates) of - [] -> none; - [Erl|_] -> Erl - end; -find_release(_, _) -> none. - -find_rel_linux(Rel) -> - case suse_release() of - none -> []; - SuseRel -> find_rel_suse(Rel, SuseRel) - end. - -find_rel_suse(Rel, SuseRel) -> - Root = "/usr/local/otp/releases/sles", - case SuseRel of - "11" -> - %% Try both SuSE 11, SuSE 10 and SuSe 9 in that order. - find_rel_suse_1(Rel, Root++"11") ++ - find_rel_suse_1(Rel, Root++"10") ++ - find_rel_suse_1(Rel, Root++"9"); - "10" -> - %% Try both SuSE 10 and SuSe 9 in that order. - find_rel_suse_1(Rel, Root++"10") ++ - find_rel_suse_1(Rel, Root++"9"); - "9" -> - find_rel_suse_1(Rel, Root++"9"); - _ -> - [] - end. - -find_rel_suse_1(Rel, RootWc) -> - case erlang:system_info(wordsize) of - 4 -> - find_rel_suse_2(Rel, RootWc++"_32"); - 8 -> - find_rel_suse_2(Rel, RootWc++"_64") ++ - find_rel_suse_2(Rel, RootWc++"_32") - end. - -find_rel_suse_2(Rel, RootWc) -> - RelDir = filename:dirname(RootWc), - Pat = filename:basename(RootWc ++ "_" ++ Rel) ++ ".*", - case file:list_dir(RelDir) of - {ok,Dirs} -> - case lists:filter(fun(Dir) -> - case re:run(Dir, Pat) of - nomatch -> false; - _ -> true - end - end, Dirs) of - [] -> - []; - [R|_] -> - [filename:join([RelDir,R,"bin","erl"])] - end; - _ -> - [] - end. - -%% suse_release() -> VersionString | none. -%% Return the major SuSE version number for this platform or -%% 'none' if this is not a SuSE platform. -suse_release() -> - case file:open("/etc/SuSE-release", [read]) of - {ok,Fd} -> - try - suse_release(Fd) - after - file:close(Fd) - end; - {error,_} -> none - end. - -suse_release(Fd) -> - case io:get_line(Fd, '') of - eof -> none; - Line when is_list(Line) -> - case re:run(Line, "^VERSION\\s*=\\s*(\\d+)\s*", - [{capture,all_but_first,list}]) of - nomatch -> - suse_release(Fd); - {match,[Version]} -> - Version - end - end. - -unpack(Bin) -> - {One,Term} = split_binary(Bin, 1), - case binary_to_list(One) of - [1] -> - case catch {ok,binary_to_term(Term)} of - {'EXIT',_} -> error; - {ok,_}=Res -> Res - end; - _ -> error - end. - -id(I) -> I. - -print_data(Port) -> - receive - {Port, {data, Bytes}} -> - io:put_chars(Bytes), - print_data(Port); - {Port, eof} -> - Port ! {self(), close}, - receive - {Port, closed} -> - true - end, - receive - {'EXIT', Port, _} -> - ok - after 1 -> % force context switch - ok - end - end. diff --git a/lib/test_server/src/test_server_sup.erl b/lib/test_server/src/test_server_sup.erl deleted file mode 100644 index fc2cfd57bd..0000000000 --- a/lib/test_server/src/test_server_sup.erl +++ /dev/null @@ -1,939 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1998-2014. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%%------------------------------------------------------------------- -%%% Purpose: Test server support functions. -%%%------------------------------------------------------------------- --module(test_server_sup). --export([timetrap/2, timetrap/3, timetrap/4, - timetrap_cancel/1, capture_get/1, messages_get/1, - timecall/3, call_crash/5, app_test/2, check_new_crash_dumps/0, - cleanup_crash_dumps/0, crash_dump_dir/0, tar_crash_dumps/0, - get_username/0, get_os_family/0, - hostatom/0, hostatom/1, hoststr/0, hoststr/1, - framework_call/2,framework_call/3,framework_call/4, - format_loc/1, - util_start/0, util_stop/0, unique_name/0, - call_trace/1, - appup_test/1]). --include("test_server_internal.hrl"). --define(crash_dump_tar,"crash_dumps.tar.gz"). --define(src_listing_ext, ".src.html"). --record(util_state, {starter, latest_name}). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timetrap(Timeout,Scale,Pid) -> Handle -%% Handle = term() -%% -%% Creates a time trap, that will kill the given process if the -%% trap is not cancelled with timetrap_cancel/1, within Timeout -%% milliseconds. -%% Scale says if the time should be scaled up to compensate for -%% delays during the test (e.g. if cover is running). - -timetrap(Timeout0, Pid) -> - timetrap(Timeout0, Timeout0, true, Pid). - -timetrap(Timeout0, Scale, Pid) -> - timetrap(Timeout0, Timeout0, Scale, Pid). - -timetrap(Timeout0, ReportTVal, Scale, Pid) -> - process_flag(priority, max), - Timeout = if not Scale -> Timeout0; - true -> test_server:timetrap_scale_factor() * Timeout0 - end, - TruncTO = trunc(Timeout), - receive - after TruncTO -> - kill_the_process(Pid, Timeout0, TruncTO, ReportTVal) - end. - -kill_the_process(Pid, Timeout0, TruncTO, ReportTVal) -> - case is_process_alive(Pid) of - true -> - TimeToReport = if Timeout0 == ReportTVal -> TruncTO; - true -> ReportTVal end, - MFLs = test_server:get_loc(Pid), - Mon = erlang:monitor(process, Pid), - Trap = {timetrap_timeout,TimeToReport,MFLs}, - exit(Pid, Trap), - receive - {'DOWN', Mon, process, Pid, _} -> - ok - after 10000 -> - %% Pid is probably trapping exits, hit it harder... - catch error_logger:warning_msg( - "Testcase process ~w not " - "responding to timetrap " - "timeout:~n" - " ~p.~n" - "Killing testcase...~n", - [Pid, Trap]), - exit(Pid, kill) - end; - false -> - ok - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% timetrap_cancel(Handle) -> ok -%% Handle = term() -%% -%% Cancels a time trap. -timetrap_cancel(Handle) -> - unlink(Handle), - MonRef = erlang:monitor(process, Handle), - exit(Handle, kill), - receive {'DOWN',MonRef,_,_,_} -> ok - after - 2000 -> - erlang:demonitor(MonRef, [flush]), - ok - end. - -capture_get(Msgs) -> - receive - {captured,Msg} -> - capture_get([Msg|Msgs]) - after 0 -> - lists:reverse(Msgs) - end. - -messages_get(Msgs) -> - receive - Msg -> - messages_get([Msg|Msgs]) - after 0 -> - lists:reverse(Msgs) - end. - -timecall(M, F, A) -> - {Elapsed, Val} = timer:tc(M, F, A), - {Elapsed / 1000000, Val}. - - -call_crash(Time,Crash,M,F,A) -> - OldTrapExit = process_flag(trap_exit,true), - Pid = spawn_link(M,F,A), - Answer = - receive - {'EXIT',Crash} -> - ok; - {'EXIT',Pid,Crash} -> - ok; - {'EXIT',_Reason} when Crash==any -> - ok; - {'EXIT',Pid,_Reason} when Crash==any -> - ok; - {'EXIT',Reason} -> - test_server:format(12, "Wrong crash reason. Wanted ~p, got ~p.", - [Crash, Reason]), - exit({wrong_crash_reason,Reason}); - {'EXIT',Pid,Reason} -> - test_server:format(12, "Wrong crash reason. Wanted ~p, got ~p.", - [Crash, Reason]), - exit({wrong_crash_reason,Reason}); - {'EXIT',OtherPid,Reason} when OldTrapExit == false -> - exit({'EXIT',OtherPid,Reason}) - after do_trunc(Time) -> - exit(call_crash_timeout) - end, - process_flag(trap_exit,OldTrapExit), - Answer. - -do_trunc(infinity) -> infinity; -do_trunc(T) -> trunc(T). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% app_test/2 -%% -%% Checks one applications .app file for obvious errors. -%% Checks.. -%% * .. required fields -%% * .. that all modules specified actually exists -%% * .. that all requires applications exists -%% * .. that no module included in the application has export_all -%% * .. that all modules in the ebin/ dir is included -%% (This only produce a warning, as all modules does not -%% have to be included (If the `pedantic' option isn't used)) -app_test(Application, Mode) -> - case is_app(Application) of - {ok, AppFile} -> - do_app_tests(AppFile, Application, Mode); - Error -> - test_server:fail(Error) - end. - -is_app(Application) -> - case file:consult(filename:join([code:lib_dir(Application),"ebin", - atom_to_list(Application)++".app"])) of - {ok, [{application, Application, AppFile}] } -> - {ok, AppFile}; - _ -> - test_server:format(minor, - "Application (.app) file not found, " - "or it has very bad syntax.~n"), - {error, not_an_application} - end. - - -do_app_tests(AppFile, AppName, Mode) -> - DictList= - [ - {missing_fields, []}, - {missing_mods, []}, - {superfluous_mods_in_ebin, []}, - {export_all_mods, []}, - {missing_apps, []} - ], - fill_dictionary(DictList), - - %% An appfile must (?) have some fields.. - check_fields([description, modules, registered, applications], AppFile), - - %% Check for missing and extra modules. - {value, {modules, Mods}}=lists:keysearch(modules, 1, AppFile), - EBinList=lists:sort(get_ebin_modnames(AppName)), - {Missing, Extra} = common(lists:sort(Mods), EBinList), - put(superfluous_mods_in_ebin, Extra), - put(missing_mods, Missing), - - %% Check that no modules in the application has export_all. - app_check_export_all(Mods), - - %% Check that all specified applications exists. - {value, {applications, Apps}}= - lists:keysearch(applications, 1, AppFile), - check_apps(Apps), - - A=check_dict(missing_fields, "Inconsistent app file, " - "missing fields"), - B=check_dict(missing_mods, "Inconsistent app file, " - "missing modules"), - C=check_dict_tolerant(superfluous_mods_in_ebin, "Inconsistent app file, " - "Modules not included in app file.", Mode), - D=check_dict(export_all_mods, "Inconsistent app file, " - "Modules have `export_all'."), - E=check_dict(missing_apps, "Inconsistent app file, " - "missing applications."), - - erase_dictionary(DictList), - case A+B+C+D+E of - 5 -> - ok; - _ -> - test_server:fail() - end. - -app_check_export_all([]) -> - ok; -app_check_export_all([Mod|Mods]) -> - case catch apply(Mod, module_info, [compile]) of - {'EXIT', {undef,_}} -> - app_check_export_all(Mods); - COpts -> - case lists:keysearch(options, 1, COpts) of - false -> - app_check_export_all(Mods); - {value, {options, List}} -> - case lists:member(export_all, List) of - true -> - put(export_all_mods, [Mod|get(export_all_mods)]), - app_check_export_all(Mods); - false -> - app_check_export_all(Mods) - end - end - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% appup_test/1 -%% -%% Checks one applications .appup file for obvious errors. -%% Checks.. -%% * .. syntax -%% * .. that version in app file matches appup file version -%% * .. validity of appup instructions -%% -%% For library application this function checks that the proper -%% 'restart_application' upgrade and downgrade clauses exist. -appup_test(Application) -> - case is_app(Application) of - {ok, AppFile} -> - case is_appup(Application, proplists:get_value(vsn, AppFile)) of - {ok, Up, Down} -> - StartMod = proplists:get_value(mod, AppFile), - Modules = proplists:get_value(modules, AppFile), - do_appup_tests(StartMod, Application, Up, Down, Modules); - Error -> - test_server:fail(Error) - end; - Error -> - test_server:fail(Error) - end. - -is_appup(Application, Version) -> - AppupFile = atom_to_list(Application) ++ ".appup", - AppupPath = filename:join([code:lib_dir(Application), "ebin", AppupFile]), - case file:consult(AppupPath) of - {ok, [{Version, Up, Down}]} when is_list(Up), is_list(Down) -> - {ok, Up, Down}; - _ -> - test_server:format( - minor, - "Application upgrade (.appup) file not found, " - "or it has very bad syntax.~n"), - {error, appup_not_readable} - end. - -do_appup_tests(undefined, Application, Up, Down, _Modules) -> - %% library application - case Up of - [{<<".*">>, [{restart_application, Application}]}] -> - case Down of - [{<<".*">>, [{restart_application, Application}]}] -> - ok; - _ -> - test_server:format( - minor, - "Library application needs restart_application " - "downgrade instruction.~n"), - {error, library_downgrade_instruction_malformed} - end; - _ -> - test_server:format( - minor, - "Library application needs restart_application " - "upgrade instruction.~n"), - {error, library_upgrade_instruction_malformed} - end; -do_appup_tests(_, _Application, Up, Down, Modules) -> - %% normal application - case check_appup_clauses_plausible(Up, up, Modules) of - ok -> - case check_appup_clauses_plausible(Down, down, Modules) of - ok -> - test_server:format(minor, "OK~n"); - Error -> - test_server:format(minor, "ERROR ~p~n", [Error]), - test_server:fail(Error) - end; - Error -> - test_server:format(minor, "ERROR ~p~n", [Error]), - test_server:fail(Error) - end. - -check_appup_clauses_plausible([], _Direction, _Modules) -> - ok; -check_appup_clauses_plausible([{Re, Instrs} | Rest], Direction, Modules) - when is_binary(Re) -> - case re:compile(Re) of - {ok, _} -> - case check_appup_instructions(Instrs, Direction, Modules) of - ok -> - check_appup_clauses_plausible(Rest, Direction, Modules); - Error -> - Error - end; - {error, Error} -> - {error, {version_regex_malformed, Re, Error}} - end; -check_appup_clauses_plausible([{V, Instrs} | Rest], Direction, Modules) - when is_list(V) -> - case check_appup_instructions(Instrs, Direction, Modules) of - ok -> - check_appup_clauses_plausible(Rest, Direction, Modules); - Error -> - Error - end; -check_appup_clauses_plausible(Clause, _Direction, _Modules) -> - {error, {clause_malformed, Clause}}. - -check_appup_instructions(Instrs, Direction, Modules) -> - case check_instructions(Direction, Instrs, Instrs, [], [], Modules) of - {_Good, []} -> - ok; - {_, Bad} -> - {error, {bad_instructions, Bad}} - end. - -check_instructions(_, [], _, Good, Bad, _) -> - {lists:reverse(Good), lists:reverse(Bad)}; -check_instructions(UpDown, [Instr | Rest], All, Good, Bad, Modules) -> - case catch check_instruction(UpDown, Instr, All, Modules) of - ok -> - check_instructions(UpDown, Rest, All, [Instr | Good], Bad, Modules); - {error, Reason} -> - NewBad = [{Instr, Reason} | Bad], - check_instructions(UpDown, Rest, All, Good, NewBad, Modules) - end. - -check_instruction(up, {add_module, Module}, _, Modules) -> - %% A new module is added - check_module(Module, Modules); -check_instruction(down, {add_module, Module}, _, Modules) -> - %% An old module is re-added - case (catch check_module(Module, Modules)) of - {error, {unknown_module, Module, Modules}} -> ok; - ok -> throw({error, {existing_readded_module, Module}}) - end; -check_instruction(_, {load_module, Module}, _, Modules) -> - check_module(Module, Modules); -check_instruction(_, {load_module, Module, DepMods}, _, Modules) -> - check_module(Module, Modules), - check_depend(DepMods); -check_instruction(_, {load_module, Module, Pre, Post, DepMods}, _, Modules) -> - check_module(Module, Modules), - check_depend(DepMods), - check_purge(Pre), - check_purge(Post); -check_instruction(up, {delete_module, Module}, _, Modules) -> - case (catch check_module(Module, Modules)) of - {error, {unknown_module, Module, Modules}} -> - ok; - ok -> - throw({error,{existing_module_deleted, Module}}) - end; -check_instruction(down, {delete_module, Module}, _, Modules) -> - check_module(Module, Modules); -check_instruction(_, {update, Module}, _, Modules) -> - check_module(Module, Modules); -check_instruction(_, {update, Module, supervisor}, _, Modules) -> - check_module(Module, Modules); -check_instruction(_, {update, Module, DepMods}, _, Modules) - when is_list(DepMods) -> - check_module(Module, Modules); -check_instruction(_, {update, Module, Change}, _, Modules) -> - check_module(Module, Modules), - check_change(Change); -check_instruction(_, {update, Module, Change, DepMods}, _, Modules) -> - check_module(Module, Modules), - check_change(Change), - check_depend(DepMods); -check_instruction(_, {update, Module, Change, Pre, Post, DepMods}, _, Modules) -> - check_module(Module, Modules), - check_change(Change), - check_purge(Pre), - check_purge(Post), - check_depend(DepMods); -check_instruction(_, - {update, Module, Timeout, Change, Pre, Post, DepMods}, - _, - Modules) -> - check_module(Module, Modules), - check_timeout(Timeout), - check_change(Change), - check_purge(Pre), - check_purge(Post), - check_depend(DepMods); -check_instruction(_, - {update, Module, ModType, Timeout, Change, Pre, Post, DepMods}, - _, - Modules) -> - check_module(Module, Modules), - check_mod_type(ModType), - check_timeout(Timeout), - check_change(Change), - check_purge(Pre), - check_purge(Post), - check_depend(DepMods); -check_instruction(_, {restart_application, Application}, _, _) -> - check_application(Application); -check_instruction(_, {remove_application, Application}, _, _) -> - check_application(Application); -check_instruction(_, {add_application, Application}, _, _) -> - check_application(Application); -check_instruction(_, {add_application, Application, Type}, _, _) -> - check_application(Application), - check_restart_type(Type); -check_instruction(_, Instr, _, _) -> - throw({error, {low_level_or_invalid_instruction, Instr}}). - -check_module(Module, Modules) -> - case {is_atom(Module), lists:member(Module, Modules)} of - {true, true} -> ok; - {true, false} -> throw({error, {unknown_module, Module}}); - {false, _} -> throw({error, {bad_module, Module}}) - end. - -check_application(App) -> - case is_atom(App) of - true -> ok; - false -> throw({error, {bad_application, App}}) - end. - -check_depend(Dep) when is_list(Dep) -> ok; -check_depend(Dep) -> throw({error, {bad_depend, Dep}}). - -check_restart_type(permanent) -> ok; -check_restart_type(transient) -> ok; -check_restart_type(temporary) -> ok; -check_restart_type(load) -> ok; -check_restart_type(none) -> ok; -check_restart_type(Type) -> throw({error, {bad_restart_type, Type}}). - -check_timeout(T) when is_integer(T), T > 0 -> ok; -check_timeout(default) -> ok; -check_timeout(infinity) -> ok; -check_timeout(T) -> throw({error, {bad_timeout, T}}). - -check_mod_type(static) -> ok; -check_mod_type(dynamic) -> ok; -check_mod_type(Type) -> throw({error, {bad_mod_type, Type}}). - -check_purge(soft_purge) -> ok; -check_purge(brutal_purge) -> ok; -check_purge(Purge) -> throw({error, {bad_purge, Purge}}). - -check_change(soft) -> ok; -check_change({advanced, _}) -> ok; -check_change(Change) -> throw({error, {bad_change, Change}}). - -%% Given two sorted lists, L1 and L2, returns {NotInL2, NotInL1}, -%% NotInL2 is the elements of L1 which don't occurr in L2, -%% NotInL1 is the elements of L2 which don't ocurr in L1. - -common(L1, L2) -> - common(L1, L2, [], []). - -common([X|Rest1], [X|Rest2], A1, A2) -> - common(Rest1, Rest2, A1, A2); -common([X|Rest1], [Y|Rest2], A1, A2) when X < Y -> - common(Rest1, [Y|Rest2], [X|A1], A2); -common([X|Rest1], [Y|Rest2], A1, A2) -> - common([X|Rest1], Rest2, A1, [Y|A2]); -common([], L, A1, A2) -> - {A1, L++A2}; -common(L, [], A1, A2) -> - {L++A1, A2}. - -check_apps([]) -> - ok; -check_apps([App|Apps]) -> - case is_app(App) of - {ok, _AppFile} -> - ok; - {error, _} -> - put(missing_apps, [App|get(missing_apps)]) - end, - check_apps(Apps). - -check_fields([], _AppFile) -> - ok; -check_fields([L|Ls], AppFile) -> - check_field(L, AppFile), - check_fields(Ls, AppFile). - -check_field(FieldName, AppFile) -> - case lists:keymember(FieldName, 1, AppFile) of - true -> - ok; - false -> - put(missing_fields, [FieldName|get(missing_fields)]), - ok - end. - -check_dict(Dict, Reason) -> - case get(Dict) of - [] -> - 1; % All ok. - List -> - io:format("** ~ts (~ts) ->~n~p~n",[Reason, Dict, List]), - 0 - end. - -check_dict_tolerant(Dict, Reason, Mode) -> - case get(Dict) of - [] -> - 1; % All ok. - List -> - io:format("** ~ts (~ts) ->~n~p~n",[Reason, Dict, List]), - case Mode of - pedantic -> - 0; - _ -> - 1 - end - end. - -get_ebin_modnames(AppName) -> - Wc=filename:join([code:lib_dir(AppName),"ebin", - "*"++code:objfile_extension()]), - TheFun=fun(X, Acc) -> - [list_to_atom(filename:rootname( - filename:basename(X)))|Acc] end, - _Files=lists:foldl(TheFun, [], filelib:wildcard(Wc)). - -%% -%% This function removes any erl_crash_dump* files found in the -%% test server directory. Done only once when the test server -%% is started. -%% -cleanup_crash_dumps() -> - Dir = crash_dump_dir(), - Dumps = filelib:wildcard(filename:join(Dir, "erl_crash_dump*")), - delete_files(Dumps). - -crash_dump_dir() -> - filename:dirname(code:which(?MODULE)). - -tar_crash_dumps() -> - Dir = crash_dump_dir(), - case filelib:wildcard(filename:join(Dir, "erl_crash_dump*")) of - [] -> {error,no_crash_dumps}; - Dumps -> - TarFileName = filename:join(Dir,?crash_dump_tar), - {ok,Tar} = erl_tar:open(TarFileName,[write,compressed]), - lists:foreach( - fun(File) -> - ok = erl_tar:add(Tar,File,filename:basename(File),[]) - end, - Dumps), - ok = erl_tar:close(Tar), - delete_files(Dumps), - {ok,TarFileName} - end. - - -check_new_crash_dumps() -> - Dir = crash_dump_dir(), - Dumps = filelib:wildcard(filename:join(Dir, "erl_crash_dump*")), - case length(Dumps) of - 0 -> - ok; - Num -> - test_server_ctrl:format(minor, - "Found ~w crash dumps:~n", [Num]), - append_files_to_logfile(Dumps), - delete_files(Dumps) - end. - -append_files_to_logfile([]) -> ok; -append_files_to_logfile([File|Files]) -> - NodeName=from($., File), - test_server_ctrl:format(minor, "Crash dump from node ~tp:~n",[NodeName]), - Fd=get(test_server_minor_fd), - case file:read_file(File) of - {ok, Bin} -> - case file:write(Fd, Bin) of - ok -> - ok; - {error,Error} -> - %% Write failed. The following io:format/3 will probably also - %% fail, but in that case it will throw an exception so that - %% we will be aware of the problem. - io:format(Fd, "Unable to write the crash dump " - "to this file: ~p~n", [file:format_error(Error)]) - end; - _Error -> - io:format(Fd, "Failed to read: ~ts\n", [File]) - end, - append_files_to_logfile(Files). - -delete_files([]) -> ok; -delete_files([File|Files]) -> - io:format("Deleting file: ~ts~n", [File]), - case file:delete(File) of - {error, _} -> - case file:rename(File, File++".old") of - {error, Error} -> - io:format("Could neither delete nor rename file " - "~ts: ~ts.~n", [File, Error]); - _ -> - ok - end; - _ -> - ok - end, - delete_files(Files). - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% erase_dictionary(Vars) -> ok -%% Vars = [atom(),...] -%% -%% Takes a list of dictionary keys, KeyVals, erases -%% each key and returns ok. -erase_dictionary([{Var, _Val}|Vars]) -> - erase(Var), - erase_dictionary(Vars); -erase_dictionary([]) -> - ok. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% fill_dictionary(KeyVals) -> void() -%% KeyVals = [{atom(),term()},...] -%% -%% Takes each Key-Value pair, and inserts it in the process dictionary. -fill_dictionary([{Var,Val}|Vars]) -> - put(Var,Val), - fill_dictionary(Vars); -fill_dictionary([]) -> - []. - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% get_username() -> UserName -%% -%% Returns the current user -get_username() -> - getenv_any(["USER","USERNAME"]). - -getenv_any([Key|Rest]) -> - case catch os:getenv(Key) of - String when is_list(String) -> String; - false -> getenv_any(Rest) - end; -getenv_any([]) -> "". - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% get_os_family() -> OsFamily -%% -%% Returns the OS family -get_os_family() -> - {OsFamily,_OsName} = os:type(), - OsFamily. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% hostatom()/hostatom(Node) -> Host; atom() -%% hoststr() | hoststr(Node) -> Host; string() -%% -%% Returns the OS family -hostatom() -> - hostatom(node()). -hostatom(Node) -> - list_to_atom(hoststr(Node)). -hoststr() -> - hoststr(node()). -hoststr(Node) when is_atom(Node) -> - hoststr(atom_to_list(Node)); -hoststr(Node) when is_list(Node) -> - from($@, Node). - -from(H, [H | T]) -> T; -from(H, [_ | T]) -> from(H, T); -from(_H, []) -> []. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% framework_call(Callback,Func,Args,DefaultReturn) -> Return | DefaultReturn -%% -%% Calls the given Func in Callback -framework_call(Func,Args) -> - framework_call(Func,Args,ok). -framework_call(Func,Args,DefaultReturn) -> - CB = os:getenv("TEST_SERVER_FRAMEWORK"), - framework_call(CB,Func,Args,DefaultReturn). -framework_call(FW,_Func,_Args,DefaultReturn) - when FW =:= false; FW =:= "undefined" -> - DefaultReturn; -framework_call(Callback,Func,Args,DefaultReturn) -> - Mod = list_to_atom(Callback), - case code:is_loaded(Mod) of - false -> code:load_file(Mod); - _ -> ok - end, - case erlang:function_exported(Mod,Func,length(Args)) of - true -> - EH = fun(Reason) -> exit({fw_error,{Mod,Func,Reason}}) end, - SetTcState = case Func of - end_tc -> true; - init_tc -> true; - _ -> false - end, - case SetTcState of - true -> - test_server:set_tc_state({framework,Mod,Func}); - false -> - ok - end, - try apply(Mod,Func,Args) of - Result -> - Result - catch - exit:Why -> - EH(Why); - error:Why -> - EH({Why,erlang:get_stacktrace()}); - throw:Why -> - EH(Why) - end; - false -> - DefaultReturn - end. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% format_loc(Loc) -> string() -%% -%% Formats the printout of the line of code read from -%% process dictionary (test_server_loc). Adds link to -%% correct line in source code. -format_loc([{Mod,Func,Line}]) -> - [format_loc1({Mod,Func,Line})]; -format_loc([{Mod,Func,Line}|Rest]) -> - ["[",format_loc1({Mod,Func,Line}),",\n"|format_loc1(Rest)]; -format_loc([{Mod,LineOrFunc}]) -> - format_loc({Mod,LineOrFunc}); -format_loc({Mod,Func}) when is_atom(Func) -> - io_lib:format("{~w,~w}",[Mod,Func]); -format_loc(Loc) -> - io_lib:format("~p",[Loc]). - -format_loc1([{Mod,Func,Line}]) -> - [" ",format_loc1({Mod,Func,Line}),"]"]; -format_loc1([{Mod,Func,Line}|Rest]) -> - [" ",format_loc1({Mod,Func,Line}),",\n"|format_loc1(Rest)]; -format_loc1({Mod,Func,Line}) -> - ModStr = atom_to_list(Mod), - case {lists:member(no_src, get(test_server_logopts)), - lists:reverse(ModStr)} of - {false,[$E,$T,$I,$U,$S,$_|_]} -> - Link = if is_integer(Line) -> - integer_to_list(Line); - Line == last_expr -> - list_to_atom(atom_to_list(Func)++"-last_expr"); - is_atom(Line) -> - atom_to_list(Line); - true -> - Line - end, - io_lib:format("{~w,~w,<a href=\"~ts~ts#~s\">~w</a>}", - [Mod,Func, - test_server_ctrl:uri_encode(downcase(ModStr)), - ?src_listing_ext,Link,Line]); - _ -> - io_lib:format("{~w,~w,~w}",[Mod,Func,Line]) - end. - -downcase(S) -> downcase(S, []). -downcase([Uc|Rest], Result) when $A =< Uc, Uc =< $Z -> - downcase(Rest, [Uc-$A+$a|Result]); -downcase([C|Rest], Result) -> - downcase(Rest, [C|Result]); -downcase([], Result) -> - lists:reverse(Result). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% util_start() -> ok -%% -%% Start local utility process -util_start() -> - Starter = self(), - case whereis(?MODULE) of - undefined -> - spawn_link(fun() -> - register(?MODULE, self()), - util_loop(#util_state{starter=Starter}) - end); - _Pid -> - ok - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% util_stop() -> ok -%% -%% Stop local utility process -util_stop() -> - try (?MODULE ! {self(),stop}) of - _ -> - receive {?MODULE,stopped} -> ok - after 5000 -> exit(whereis(?MODULE), kill) - end - catch - _:_ -> - ok - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% unique_name() -> string() -%% -unique_name() -> - ?MODULE ! {self(),unique_name}, - receive {?MODULE,Name} -> Name - after 5000 -> exit({?MODULE,no_util_process}) - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% util_loop(State) -> ok -%% -util_loop(State) -> - receive - {From,unique_name} -> - Nr = erlang:unique_integer([positive]), - Name = integer_to_list(Nr), - if Name == State#util_state.latest_name -> - timer:sleep(1), - self() ! {From,unique_name}, - util_loop(State); - true -> - From ! {?MODULE,Name}, - util_loop(State#util_state{latest_name = Name}) - end; - {From,stop} -> - catch unlink(State#util_state.starter), - From ! {?MODULE,stopped}, - ok - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% call_trace(TraceSpecFile) -> ok -%% -%% Read terms on format {m,Mod} | {f,Mod,Func} -%% from TraceSpecFile and enable call trace for -%% specified functions. -call_trace(TraceSpec) -> - case catch try_call_trace(TraceSpec) of - {'EXIT',Reason} -> - erlang:display(Reason), - exit(Reason); - Ok -> - Ok - end. - -try_call_trace(TraceSpec) -> - case file:consult(TraceSpec) of - {ok,Terms} -> - dbg:tracer(), - %% dbg:p(self(), [p, m, sos, call]), - dbg:p(self(), [sos, call]), - lists:foreach(fun({m,M}) -> - case dbg:tpl(M,[{'_',[],[{return_trace}]}]) of - {error,What} -> exit({error,{tracing_failed,What}}); - _ -> ok - end; - ({f,M,F}) -> - case dbg:tpl(M,F,[{'_',[],[{return_trace}]}]) of - {error,What} -> exit({error,{tracing_failed,What}}); - _ -> ok - end; - (Huh) -> - exit({error,{unrecognized_trace_term,Huh}}) - end, Terms), - ok; - {_,Error} -> - exit({error,{tracing_failed,TraceSpec,Error}}) - end. - diff --git a/lib/test_server/src/things/distr_startup_SUITE.erl b/lib/test_server/src/things/distr_startup_SUITE.erl deleted file mode 100644 index aa84ab007f..0000000000 --- a/lib/test_server/src/things/distr_startup_SUITE.erl +++ /dev/null @@ -1,239 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1996-2009. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(distr_startup_SUITE). --compile([export_all]). -%%-define(line_trace,1). --include("test_server.hrl"). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -all(suite) -> [reads,writes]. - --define(iterations,10000). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -app1() -> - {application, app1, - [{description, "ERTS CXC 138 10"}, - {vsn, "2.0"}, - {applications, [kernel, stdlib]}, - {mod, {ch_sup, {app1, 1, 3}}}]}. - -app3() -> - {application, app3, - [{description, "ERTS CXC 138 10"}, - {vsn, "2.0"}, - {applications, [kernel, stdlib]}, - {mod, {ch_sup, {app3, 7, 9}}}]}. - - -config(Fd,C1,C2,C3) -> - io:format(Fd, - "[{kernel, [{sync_nodes_optional, ['~s','~s','~s']}," - "{sync_nodes_timeout, 1}," - "{distributed, [{app1, ['~s', '~s', '~s']}," - "{app2, 10000, ['~s', '~s', '~s']}," - "{app3, 5000, [{'~s', '~s'}, '~s']}]}]}].~n", - [C1,C2,C3, C1,C2,C3, C1,C2,C3, C1,C2,C3]). - -from(H, [H | T]) -> T; -from(H, [_ | T]) -> from(H, T); -from(H, []) -> []. - -%%----------------------------------------------------------------- -%% Test suite for distributed applications, tests start, load -%% etc indirectly. -%% Should be started in a CC view with: -%% erl -sname master -rsh ctrsh -%%----------------------------------------------------------------- -start_nodes(Conf) -> - % Write a config file - ?line Nodes = ?config(nodes,Conf), - ?line [C1,C2,C3|_] = Nodes, %% Need at least 3 nodes - ?line Dir = ?config(priv_dir,Conf), - ?line {ok, Fd} = file:open(Dir ++ "sys.config", write), - ?line config(Fd,C1,C2,C3), - ?line file:close(Fd), - ?line Config = Dir ++ "sys", - - % Test [cp1, cp2, cp3] - ?line {ok, Cp1} = start_node(lists:nth(1,Nodes), Config), - ?line {ok, Cp2} = start_node(lists:nth(2,Nodes), Config), - ?line {ok, Cp3} = start_node(lists:nth(3,Nodes), Config), - % Start app1 and make sure cp1 starts it - %%?line rpc:multicall([Cp1, Cp2, Cp3], application, load, [app1()]), - %%?line rpc:multicall([Cp1, Cp2, Cp3], application, start,[app1,permanent]), - ?line test_server:sleep(1000), - {Cp1,Cp2,Cp3}. - -stop_nodes({Cp1,Cp2,Cp3}) -> - ?line stop_node(Cp1), - ?line stop_node(Cp2), - ?line stop_node(Cp3). - -start_node(NodeAtHost, Config) -> - ?line NodeAtHostStr = atom_to_list(NodeAtHost), - ?line HostStr = from($@,NodeAtHostStr), - ?line NodeStr = lists:reverse(from($@,lists:reverse(NodeAtHostStr))), - ?line Host = list_to_atom(HostStr), - ?line Node = list_to_atom(NodeStr), - ?line io:format("Launching slave node ~p@~p ~p",[Node,Host,Config]), - ?line slave:start(Host, Node, lists:concat(["-config ", Config])). - -stop_node(Node) -> - ?line rpc:cast(Node, erlang, halt, []). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -start_client_process(Cp,Mode,NodeNum) -> - io:format("Starting client process at ~p in mode ~p",[Cp,Mode]), - ?line case rpc:call(Cp, erlang, spawn, - [?MODULE, client, - [Mode,NodeNum,self(),random:uniform(1000)]]) of - {badrpc,Reason} -> - ?line exit({badrpc,{Cp,Reason}}); - Client -> - ?line Client - end. - -start_clients(Mode,Conf) -> - ?line random:seed(4711,0,0), - ?line {Cp1,Cp2,Cp3} = start_nodes(Conf), - ?line Client1 = start_client_process(Cp1,Mode,1), - ?line Client2 = start_client_process(Cp2,Mode,2), - ?line Client3 = start_client_process(Cp3,Mode,3), - test_server:format(1,"All 3 nodes started, " - "power off client(s) any time...",[]), - Client1 ! go, - Client2 ! go, - Client3 ! go, - {{Cp1,Cp2,Cp3},{Client1,Client2,Client3}}. - -stop_clients(Cps) -> - test_server:format(1,"Test completed.",[]), - ?line stop_nodes(Cps). - -data() -> - {{self(),foo,bar,[1,2,3,4,5,6,7],{{{{}}}}, - "We need pretty long packages, so that there is a big risk " - "of cutting it in the middle when suddenly turning off " - "the power or breaking the connection. " - "We don't check the contents of the data very much, but " - "at least there is a magic cookie at the end (123456)." - "If that one arrives correctly, the link is ok as far " - "as we are concerned."}, - 123456}. - -reads(suite) -> []; -reads(Conf) -> - ?line {Cps,_} = start_clients(w,Conf), - ?line read_loop(?iterations,0), - ?line stop_clients(Cps), - ok. - -read_loop(0,M) -> - ok; -read_loop(N,M) -> - ?line Dog = test_server:timetrap(test_server:seconds(0.5)), - M2 = - receive - {Node,Count,{_,123456}} -> - ?line setelement(Node,M,element(Node,M)+1); - {Node,Count,Data} -> - ?line exit({network_transmission_error,Data}); - {nodedown,Node} -> - ?line test_server:format(1,"Node ~s went down",[Node]), - ?line M; - Other -> - ?line M - after test_server:seconds(0.1) -> - ?line io:format("No message!"), - ?line M - end, - ?line test_server:timetrap_cancel(Dog), - ?line M3 = - case N rem 100 of - 0 -> io:format("~p reads to go (~w msgs)",[N,M2]), - {0,0,0}; - _ -> M2 - end, - ?line read_loop(N-1,M3). - -client(w,NodeNum,Pid,Seed) -> - random:seed(Seed,0,0), - receive - go -> ok - end, - client_write_loop(Pid,0,NodeNum,data()); -client(r,NodeNum,Pid,Seed) -> - random:seed(Seed,0,0), - receive - go -> ok - end, - client_read_loop(0). - -client_write_loop(Pid,N,NodeNum,Data) -> - test_server:sleep(random:uniform(20)), - Pid ! {NodeNum,N,Data}, - client_write_loop(Pid,N+1,NodeNum,Data). - -writes(suite) -> []; -writes(Conf) -> - ?line {Cps,{C1,C2,C3}} = start_clients(r,Conf), - ?line write_loop(2*?iterations,{C1,C2,C3},data()), - ?line stop_clients(Cps), - ok. - -write_loop(0,_,_) -> - ok; -write_loop(N,Clients,Data) -> - ?line Dog = test_server:timetrap(test_server:seconds(0.5)), - ?line Client = element(random:uniform(size(Clients)),Clients), - ?line Client ! {node(),N,Data}, - ?line test_server:timetrap_cancel(Dog), - receive - {nodedown,Node} -> - ?line test_server:format(1,"Node ~s went down",[Node]) - after 0 -> - ?line ok - end, - ?line case N rem 100 of - 0 -> io:format("~p writes to go",[N]); - _ -> ok - end, - ?line write_loop(N-1,Clients,Data). - -client_read_loop(N) -> - receive - {Node,Count,{_,123456}} -> - ?line ok; - {Node,Count,Data} -> - ?line io:format("~p(~p): transmission error from node ~p(~p): ~p", - [node(),N,Node,Count,Data]); - Other -> - ?line io:format("~p(~p): got a strange message: ~p", - [node(),N,Other]) - end, - client_read_loop(N+1). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - diff --git a/lib/test_server/src/things/mnesia_power_SUITE.erl b/lib/test_server/src/things/mnesia_power_SUITE.erl deleted file mode 100644 index e9bc75e583..0000000000 --- a/lib/test_server/src/things/mnesia_power_SUITE.erl +++ /dev/null @@ -1,126 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1996-2009. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(mnesia_power_SUITE). --compile([export_all]). -%%-define(line_trace,1). --include("test_server.hrl"). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -all(suite) -> [run]. - --define(iterations,3). %% nof power-off cycles to do before acceptance --define(rows,8). %% nof database rows to use (not too big, please) - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - --record(sum_table_1,{row,a,b,c,s}). - -run(suite) -> []; -run(Config) -> - ?line mnesia:create_schema([node()]), - ?line mnesia:start(), - ?line mnesia:create_table([{name, sum_table_1}, {disc_copies,[node()]}, - {attributes,record_info(fields,sum_table_1)}]), - ?line run_test(Config,?iterations). - -run(Config,N) -> - ?line mnesia:start(), - ?line check_consistency(sum_table_1), - case N of - 0 -> ?line ok; - N -> ?line run_test(Config,N) - end. - -run_test(Config,N) -> - ?line Pid1a = start_manipulator(sum_table_1), - ?line Pid1b = start_manipulator(sum_table_1), - ?line Pid1c = start_manipulator(sum_table_1), - ?line test_server:resume_point(?MODULE,run,[Config,N-1]), - ?line test_server:format(1,"Manipulating data like crazy now, " - "power off any time..."), - ?line test_server:sleep(infinity). - -start_manipulator(Table) -> - ?line spawn_link(?MODULE,manipulator_init,[Table]). - -manipulator_init(Table) -> - random:seed(4711,0,0), - manipulator(0,Table). - -manipulator(N,Table) -> - ?line Fun = - fun() -> - ?line Row = random:uniform(?rows), - ?line A = random:uniform(100000), - ?line B = random:uniform(100000), - ?line C = random:uniform(100000), - ?line Sum = A+B+C, - ?line case mnesia:write(#sum_table_1 - {row=Row,a=A,b=B,c=C,s=Sum}) of - ok -> ok; - Other -> - ?line io:format("Trans failed: ~p\n",[Other]) - end - end, - ?line mnesia:transaction(Fun), - case mnesia:table_info(sum_table_1,size) of - 0 -> exit(still_empty); - _ -> ok - end, - case N rem 2000 of - 0 -> io:format("~p did ~p operations",[self(),N]), - check_consistency(sum_table_1); - _ -> ok - end, - ?line manipulator(N+1,Table). - -check_consistency(Table) -> - io:format("Checking consistency of table ~p\n",[Table]), - All = mnesia:table_info(Table,wild_pattern), - ?line Fun = - fun() -> - mnesia:match_object(All) - end, - ?line case mnesia:transaction(Fun) of - {atomic,Val} -> - check_consistency_rows(Val,0); - Other -> - io:format("Trans failed: ~p\n",[Other]), - exit(failed), - check_consistency(Table) - end. - -check_consistency_rows([#sum_table_1{a=A,b=B,c=C,s=Sum}|Rows],N) -> - ?line Sum=A+B+C, - ?line check_consistency_rows(Rows,N+1); -check_consistency_rows([],N) -> - io:format("All ~p rows were consistent\n",[N]), - {ok,N}; -check_consistency_rows(Thing,N) -> - io:format("Mnesia transaction returned:\n~p\n",[Thing]), - exit({bad_format,Thing}). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - - - - diff --git a/lib/test_server/src/things/random_kill_SUITE.erl b/lib/test_server/src/things/random_kill_SUITE.erl deleted file mode 100644 index 917bc2b3d5..0000000000 --- a/lib/test_server/src/things/random_kill_SUITE.erl +++ /dev/null @@ -1,82 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1996-2009. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(random_kill_SUITE). --compile([export_all]). -%%-define(line_trace,1). --include("test_server.hrl"). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -all(suite) -> [run]. - --define(iterations,25). %% Kill this many processes, - %% possibly with reboots in between - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -run(suite) -> []; -run(Config) -> - registered(?iterations). - -registered(0) -> - ok; -registered(N) -> - random:seed(3461*N,1159*N,351*N), - Pid = select_victim(registered), - test_server:resume_point(?MODULE,registered,[N-1]), - test_server:format("About to kill pid ~p (~p)\n~p", - [Pid,process_info(Pid,registered_name),info(Pid)]), - %%exit(Pid,kill), - registered(N-1). - -info(Pid) -> - Rest0 = tl(pid_to_list(Pid)), - {P1,Rest1} = get_until($.,Rest0), - {P2,Rest2} = get_until($.,Rest1), - {P3,_} = get_until($>,Rest2), - c:i(list_to_integer(P1),list_to_integer(P2),list_to_integer(P3)). - -get_until(Ch,L) -> - get_until(Ch,L,[]). -get_until(Ch,[],Acc) -> - {lists:reverse(Acc),[]}; -get_until(Ch,[Ch|T],Acc) -> - {lists:reverse(Acc),T}; -get_until(Ch,[H|T],Acc) -> - get_until(Ch,T,[H|Acc]). - -select_victim(registered) -> - Pids = - lists:map(fun(Server)-> whereis(Server) end,registered()), - ImmunePids = - [self()|lists:map(fun(Job)-> element(2,Job) end,test_server:jobs())], - SuitablePids = - lists:filter(fun(Pid)-> case lists:member(Pid,ImmunePids) of - true -> false; - false -> true - end - end, Pids), - Selected = random:uniform(length(SuitablePids)), - io:format("Selected ~p if ~p",[Selected,length(SuitablePids)]), - lists:nth(Selected,SuitablePids). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - diff --git a/lib/test_server/src/things/soft.gs.txt b/lib/test_server/src/things/soft.gs.txt deleted file mode 100644 index ec57884997..0000000000 --- a/lib/test_server/src/things/soft.gs.txt +++ /dev/null @@ -1,16 +0,0 @@ -6> gs:start(). -RealTimeViolation, 478ms (after 1164 good) -{1,<0.65.0>} -RealTimeViolation, 352ms (after 0 good) -RealTimeViolation, 492ms (after 0 good) -RealTimeViolation, 166ms (after 0 good) -RealTimeInfo, 18ms (after 7 good) -RealTimeViolation, 115ms (after 13 good) -7> application-specific initialization failed: couldn't connect to display ":0.0" -RealTimeViolation, 20340ms (after 0 good) -gs error: user backend died reason {port_handler,#Port,normal} - -RealTimeInfo, 31ms (after 21 good) -RealTimeInfo, 21ms (after 69 good) -RealTimeInfo, 21ms (after 119 good) -RealTimeInfo, 21ms (after 169 good) diff --git a/lib/test_server/src/things/verify.erl b/lib/test_server/src/things/verify.erl deleted file mode 100644 index b09d0fbda9..0000000000 --- a/lib/test_server/src/things/verify.erl +++ /dev/null @@ -1,200 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1996-2009. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(verify). - --export([dir/0, dir/1]). - -%% usage verify:dir() -%% or verify:dir(Dir) -%% -%% runs tests on all files with the extension ".t1" -%% creates an error log file verify.log in the directory where the -%% tests were run - --import(lists, [reverse/1, foldl/3, map/2]). - -dir() -> - dir("."). - -dir(Dir) -> - case file:list_dir(Dir) of - {ok, Files} -> - VFiles = collect_vers(Files, []), - VFiles1 = map(fun(F) -> Dir ++ "/" ++ F end, VFiles), - Nerrs = foldl(fun(F, Sum) -> - case file(F) of - {file,_,had,N,errors} -> - Sum + N; - no_errors -> - Sum; - Other -> - Sum + 1 - end - end, 0, VFiles1), - case Nerrs of - 0 -> no_errors; - _ -> {dir,Dir,had,Nerrs,errors} - end; - _ -> - {error, cannot,list_dir, Dir} - end. - -collect_vers([H|T], L) -> - case reverse(H) of - [$1,$t,$.|T1] -> collect_vers(T, [reverse(T1)|L]); - _ -> collect_vers(T, L) - end; -collect_vers([], L) -> - L. - -file(File) -> - case file:open(File ++ ".t1", read) of - {ok, S} -> - io:format("Verifying: ~s\n", [File]), - ErrFile = File ++ ".errs", - {ok, E} = file:open(ErrFile, write), - Bind0 = erl_eval:new_bindings(), - NErrs = do(S, {E, File, Bind0, 0}, 1), - file:close(S), - file:close(E), - case NErrs of - 0 -> - file:delete(ErrFile), - no_errors; - _ -> - {file,File,had,NErrs,errors} - end; - _ -> - error_in_opening_file - end. - -do(S, Env, Line) -> - R = io:scan_erl_exprs(S, '', Line), - do1(R, S, Env). - -do1({eof,_}, _, {_,_,_,NErrs}) -> - NErrs; -do1({ok,Toks,Next}, S, Env0) -> - E1 = handle_toks(Toks, Next, Env0), - do(S, E1, Next); -do1({error, {Line,Mod,Args}, Next}, S, E) -> - io:format("*** ~w ~p~n", [Line,Mod:format_error(Args)]), - E1 = add_error(E), - do(S, E1, Next). - -add_error({Stream, File, Bindings, N}) -> {Stream, File, Bindings, N+1}. - -handle_toks(Toks, Line, Env0) -> - %% io:format("Toks:~p\n", [Toks]). - case erl_parse:parse_exprs(Toks) of - {ok, Exprs} -> - %% io:format("Got:~p\n", [Exprs]), - eval(Exprs, Line, Env0); - {error, {LineNo, Mod, What}} -> - Str = apply(Mod, format_error, [What]), - io:format("*** Line:~w ***~s\n", [LineNo, Str]), - add_error(Env0); - Parse_error -> - io:format("Parse Error:~p\n",[Parse_error]), - add_error(Env0) - end. - -forget([{var,_,Name}], B0) -> erl_eval:del_binding(Name, B0); -forget([], _) -> erl_eval:new_bindings(). - -eval([{call,_,{atom,_,f}, Args}], _, {Stream, Bind0, Errs}) -> - Bind1 = forget(Args, Bind0), - {Stream, Bind1, Errs}; -eval(Exprs, Line, {Stream, File, Bind0, NErrs}) -> - %% io:format("Bindings >> ~p\n", [Bind0]), - %% io:format("Exprs >> ~p\n", [Exprs]), - case catch erl_eval:exprs(Exprs, Bind0) of - {'EXIT', Reason} -> - out_both(Stream, "----------------------------------~n", []), - out_both(Stream, "File:~s Error in:~s~n", [File, pp(Exprs)]), - print_bindings(Stream, Exprs, Bind0), - print_lhs(Stream, Exprs), - out_both(Stream, '*** Rhs evaluated to:~p~n',[rhs(Exprs, Bind0)]), - {Stream, File, Bind0, NErrs+1}; - {value, _, Bind1} -> - {Stream, File, Bind1, NErrs} - end. - -pp([H]) -> erl_pp:expr(H); -pp([H|T]) -> [erl_pp:expr(H),$,|pp(T)]; -pp([]) -> []. - -print_bindings(E, Form, Bindings) -> - case varsin(Form) of - [] -> - true; - Vars -> - print_vars(E, Vars, Bindings) - end. - -print_vars(E, [Var|T], Bindings) -> - case erl_eval:binding(Var, Bindings) of - {value, Val} -> - out_both(E, '~s = ~p\n',[Var, Val]); - unbound -> - out_both(E, '~s *is unbound*\n', [Var]) - end, - print_vars(E, T, Bindings); -print_vars(_, [], _) -> - true. - - -out_both(E, Format, Data) -> - io:format(Format, Data), - io:format(E, Format, Data). - -print_lhs(E, [{match, _, Lhs, Rhs}]) -> - %% io:format(">>>> here:~w\n",[Lhs]), - out_both(E, '*** Lhs was:~s\n',[erl_pp:expr(Lhs)]); -print_lhs(E, _) -> - out_both(E, '** UNDEFINED **', []). - - -rhs([{match, _, Lhs, Rhs}], Bindings) -> - case catch erl_eval:exprs([Rhs], Bindings) of - {value, Val, _} -> Val; - Other -> undefined() - end; -rhs(_, _) -> - undefined(). - -varsin(X) -> varsin(X, []). - -varsin({var,_,'_'}, L) -> - L; -varsin({var,_,V}, L) -> - case lists:member(V, L) of - true -> L; - false -> [V|L] - end; -varsin([H|T], L) -> - varsin(T, varsin(H, L)); -varsin(T, L) when tuple(T) -> - varsin(tuple_to_list(T), L); -varsin(_, L) -> - L. - -undefined() -> - '** UNDEFINED **'. diff --git a/lib/test_server/src/ts.config b/lib/test_server/src/ts.config deleted file mode 100644 index cf3d269616..0000000000 --- a/lib/test_server/src/ts.config +++ /dev/null @@ -1,46 +0,0 @@ -%% -*- erlang -*- - -%%% Change these to suite the environment. See the inet_SUITE for info about -%%% what they are used for. -%%% test_hosts are looked up using "ypmatch xx yy zz hosts.byname" -%{test_hosts,[my_ip4_host]}. - -%% IPv4 host only - no ipv6 entry must exist! -%{test_host_ipv4_only, -% {"my_ip4_host", %Short hostname -% "my_ip4_host.mydomain.com", %Long hostname -% "10.10.0.1", %IP string -% {10,10,0,1}, %IP tuple -% ["my_ip4_host"], %Any aliases -% "::ffff:10.10.0.1", %IPv6 string (compatibility addr) -% {0,0,0,0,0,65535,2570,1} %IPv6 tuple -% }}. - -%{test_dummy_host, {"dummy", -% "dummy.mydomain.com", -% "192.168.0.1", -% {192,168,0,1}, -% ["dummy"], -% "::ffff:192.168.0.1", -% {0,0,0,0,0,65535,49320,1} -% }}. - - -%%% test_hosts are looked up using "ypmatch xx yy zz ipnodes.byname" -%{ipv6_hosts,[my_ip6_host]}. - - -%{test_host_ipv6_only, -% {"my_ip6_host", %Short hostname -% "my_ip6_host.mydomain.com", %Long hostname -% "::2eff:f2b0:1ea0", %IPv6 string -% {0,0,0,0,0,12031,62128,7840}, %IPv6 tuple -% ["my_ip6_host"] %Aliases. -% }}. - -%{test_dummy_ipv6_host, {"dummy6", -% "dummy6.mydomain.com", -% "127::1", -% {295,0,0,0,0,0,0,1}, -% ["dummy6-ip6"] -% }}. diff --git a/lib/test_server/src/ts.erl b/lib/test_server/src/ts.erl deleted file mode 100644 index 8bbdc8f8cf..0000000000 --- a/lib/test_server/src/ts.erl +++ /dev/null @@ -1,1019 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2014. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%%------------------------------------------------------------------- -%%% File : ts.erl -%%% Purpose : Frontend for running tests. -%%%------------------------------------------------------------------- - --module(ts). - --export([cl_run/1, - run/0, run/1, run/2, run/3, run/4, run/5, - run_category/1, run_category/2, run_category/3, - tests/0, tests/1, suites/1, categories/1, - install/0, install/1, - estone/0, estone/1, - cross_cover_analyse/1, - compile_testcases/0, compile_testcases/1, - help/0]). - -%% Functions kept for backwards compatibility --export([bench/0, bench/1, bench/2, benchmarks/0, - smoke_test/0, smoke_test/1,smoke_test/2, smoke_tests/0]). - --export([i/0, l/1, r/0, r/1, r/2, r/3]). - -%%%---------------------------------------------------------------------- -%%% This module, ts, is the interface to all of the functionality of -%%% the TS framework. The picture below shows the relationship of -%%% the modules: -%%% -%%% +-- ts_install --+------ ts_autoconf_win32 -%%% | -%%% ts ---+ +------ ts_erl_config -%%% | | ts_lib -%%% +-- ts_run -----+------ ts_make -%%% | | ts_filelib -%%% | +------ ts_make_erl -%%% | -%%% +-- ts_benchmark -%%% -%%% The modules ts_lib and ts_filelib contains utilities used by -%%% the other modules. -%%% -%%% Module Description -%%% ------ ----------- -%%% ts Frontend to the test server framework. Contains all -%%% interface functions. -%%% ts_install Installs the test suite. On Unix, `autoconf' is -%%% is used; on Windows, ts_autoconf_win32 is used. -%%% The result is written to the file `variables'. -%%% ts_run Supervises running of the tests. -%%% ts_autconf_win32 An `autoconf' for Windows. -%%% ts_autconf_cross_env `autoconf' for other platforms (cross environment) -%%% ts_erl_config Finds out information about the Erlang system, -%%% for instance the location of erl_interface. -%%% This works for either an installed OTP or an Erlang -%%% system running in a git repository/source tree. -%%% ts_make Interface to run the `make' program on Unix -%%% and other platforms. -%%% ts_make_erl A corrected version of the standar Erlang module -%%% make (used for rebuilding test suites). -%%% ts_lib Miscellanous utility functions, each used by several -%%% other modules. -%%% ts_benchmark Supervises otp benchmarks and collects results. -%%%---------------------------------------------------------------------- - --include_lib("kernel/include/file.hrl"). --include("ts.hrl"). - --define( - install_help, - [ - " ts:install()\n", - " Install ts with no options.\n", - "\n", - " ts:install(Options)\n", - " Install ts with a list of options, see below.\n", - "\n", - "Installation options supported:\n\n", - " {longnames, true} - Use fully qualified hostnames\n", - " {verbose, Level} - Sets verbosity level for TS output (0,1,2), 0 is\n" - " quiet(default).\n" - " {crossroot, ErlTop}\n" - " - Erlang root directory on build host, ~n" - " normally same value as $ERL_TOP\n" - " {crossenv, [{Key,Val}]}\n" - " - Environmentals used by test configure on build host\n" - " {crossflags, FlagsString}\n" - " - Flags used by test configure on build host\n" - " {xcomp, XCompFile}\n" - " - The xcomp file to use for cross compiling the~n" - " testcases. Using this option will override any~n" - " cross* configurations given to ts. Note that you~n" - " have to have a correct ERL_TOP as well.~n" - ]). - -help() -> - case filelib:is_file(?variables) of - false -> help(uninstalled); - true -> help(installed) - end. - -help(uninstalled) -> - H = ["ts is not yet installed. To install use:\n\n"], - show_help([H,?install_help]); -help(installed) -> - H = ["\n", - "Run functions:\n\n", - " ts:run()\n", - " Run the tests for all apps. The tests are defined by the\n", - " main test specification for each app: ../App_test/App.spec.\n", - "\n", - " ts:run(Apps)\n", - " Apps = atom() | [atom()]\n", - " Run the tests for an app, or set of apps. The tests are\n", - " defined by the main test specification for each app:\n", - " ../App_test/App.spec.\n", - "\n", - " ts:run(App, Suites)\n", - " App = atom(), Suites = atom() | [atom()]\n", - " Run one or more test suites for App (i.e. modules named\n", - " *_SUITE.erl, located in ../App_test/).\n", - "\n", - " ts:run(App, Suite, TestCases)\n", - " App = atom(), Suite = atom(),\n", - " TestCases = TCs | {testcase,TCs}, TCs = atom() | [atom()]\n", - " Run one or more test cases (functions) in Suite.\n", - "\n", - " ts:run(App, Suite, {group,Groups})\n", - " App = atom(), Suite = atom(), Groups = atom() | [atom()]\n", - " Run one or more test case groups in Suite.\n", - "\n", - " ts:run(App, Suite, {group,Group}, {testcase,TestCases})\n", - " App = atom(), Suite = atom(), Group = atom(),\n", - " TestCases = atom() | [atom()]\n", - " Run one or more test cases in a test case group in Suite.\n", - "\n", - " ts:run_category(TestCategory)\n", - " TestCategory = smoke | essential | bench | atom()\n", - " Run the specified category of tests for all apps.\n", - " For each app, the tests are defined by the specification:\n", - " ../App_test/App_TestCategory.spec.\n", - "\n", - " ts:run_category(Apps, TestCategory)\n", - " Apps = atom() | [atom()],\n", - " TestCategory = smoke | essential | bench | atom()\n", - " Run the specified category of tests for the given app or apps.\n", - "\n", - " Note that the test category parameter may have arbitrary value,\n", - " but should correspond to an existing test specification with file\n", - " name: ../App_test/App_TestCategory.spec.\n", - " Predefined categories exist for smoke tests, essential tests and\n", - " benchmark tests. The corresponding specs are:\n", - " ../*_test/Spec_smoke.spec, ../*_test/Spec_essential.spec and\n", - " ../*_test/Spec_bench.spec.\n", - "\n", - " All above run functions can take an additional last argument,\n", - " Options, which is a list of options (e.g. ts:run(App, Options),\n", - " or ts:run_category(Apps, TestCategory, Options)).\n", - "\n", - "Run options supported:\n\n", - " batch - Do not start a new xterm\n", - " {verbose, Level} - Same as the verbosity option for install\n", - " verbose - Same as {verbose, 1}\n", - " {vars, Vars} - Variables in addition to the 'variables' file\n", - " Can be any of the install options\n", - " {trace, TraceSpec}- Start call trace on target and slave nodes\n", - " TraceSpec is the name of a file containing\n", - " trace specifications or a list of trace\n", - " specification elements.\n", - " {config, Path} - Specify which directory ts should get it's \n" - " config files from. The files should follow\n" - " the convention lib/test_server/src/ts*.config.\n" - " These config files can also be specified by\n" - " setting the TEST_CONFIG_PATH environment\n" - " variable to the directory where the config\n" - " files are. The default location is\n" - " tests/test_server/.\n" - "\n", - "Supported trace information elements:\n\n", - " {tp | tpl, Mod, [] | match_spec()}\n", - " {tp | tpl, Mod, Func, [] | match_spec()}\n", - " {tp | tpl, Mod, Func, Arity, [] | match_spec()}\n", - " {ctp | ctpl, Mod}\n", - " {ctp | ctpl, Mod, Func}\n", - " {ctp | ctpl, Mod, Func, Arity}\n", - "\n\n", - "Support functions:\n\n", - " ts:tests()\n", - " Returns all apps available for testing.\n", - "\n", - " ts:tests(TestCategory)\n", - " Returns all apps that provide tests in the given category.\n", - "\n", - " ts:suites(App)\n", - " Returns all available test suites for App,\n", - " i.e. ../App_test/*_SUITE.erl\n", - "\n", - " ts:categories(App)\n", - " Returns all test categories available for App.\n", - "\n", - " ts:estone()\n", - " Runs estone_SUITE in the kernel application with no run options\n", - "\n", - " ts:estone(Opts)\n", - " Runs estone_SUITE in the kernel application with the given\n", - " run options\n", - "\n", - " ts:cross_cover_analyse(Level)\n", - " Use after ts:run with option cover or cover_details. Analyses\n", - " modules specified with a 'cross' statement in the cover spec file.\n", - " Level can be 'overview' or 'details'.\n", - "\n", - " ts:compile_testcases()\n", - " ts:compile_testcases(Apps)\n", - " Compiles all test cases for the given apps, for usage in a\n", - " cross compilation environment.\n", - "\n\n", - "Installation (already done):\n\n" - ], - show_help([H,?install_help]). - -show_help(H) -> - io:format(lists:flatten(H)). - - -%% Installs tests. -install() -> - ts_install:install(install_local,[]). -install(Options) when is_list(Options) -> - ts_install:install(install_local,Options). - -%% run/0 -%% Runs all specs found by ts:tests(), if any, or returns -%% {error, no_tests_available}. (batch) -run() -> - case ts:tests() of - [] -> - {error, no_tests_available}; - _ -> - check_and_run(fun(Vars) -> run_all(Vars) end) - end. -run_all(_Vars) -> - run_some(tests(), [batch]). - -run_some([], _Opts) -> - ok; -run_some(Apps, Opts) -> - case proplists:get_value(test_category, Opts) of - bench -> - check_and_run(fun(Vars) -> ts_benchmark:run(Apps, Opts, Vars) end); - _Other -> - run_some1(Apps, Opts) - end. - -run_some1([], _Opts) -> - ok; -run_some1([{App,Mod}|Apps], Opts) -> - case run(App, Mod, Opts) of - ok -> ok; - Error -> io:format("~p: ~p~n",[{App,Mod},Error]) - end, - run_some1(Apps, Opts); -run_some1([App|Apps], Opts) -> - case run(App, Opts) of - ok -> ok; - Error -> io:format("~p: ~p~n",[App,Error]) - end, - run_some1(Apps, Opts). - -%% This can be used from command line. Both App and -%% TestCategory must be specified. App may be 'all' -%% and TestCategory may be 'main'. Examples: -%% erl -s ts cl_run kernel smoke <options> -%% erl -s ts cl_run kernel main <options> -%% erl -s ts cl_run all essential <options> -%% erl -s ts cl_run all main <options> -%% When using the 'main' category and running with cover, -%% one can also use the cross_cover_analysis flag. -cl_run([App,Cat|Options0]) when is_atom(App) -> - - AllAtomsFun = fun(X) when is_atom(X) -> true; - (_) -> false - end, - Options1 = - case lists:all(AllAtomsFun, Options0) of - true -> - %% Could be from command line - lists:map(fun(Opt) -> - to_erlang_term(Opt) - end, Options0) -- [batch]; - false -> - Options0 -- [batch] - end, - %% Make sure there is exactly one occurence of 'batch' - Options2 = [batch|Options1], - - Result = - case {App,Cat} of - {all,main} -> - run(tests(), Options2); - {all,Cat} -> - run_category(Cat, Options2); - {_,main} -> - run(App, Options2); - {_,Cat} -> - run_category(App, Cat, Options2) - end, - case check_for_cross_cover_analysis_flag(Options2) of - false -> - ok; - Level -> - cross_cover_analyse(Level) - end, - Result. - -%% run/1 -%% Runs tests for one app (interactive). -run(App) when is_atom(App) -> - Options = check_test_get_opts(App, []), - File = atom_to_list(App), - run_test(File, [{spec,[File++".spec"]},{allow_user_terms,true}], Options); - -%% This can be used from command line, e.g. -%% erl -s ts run all <options> -%% erl -s ts run main <options> -run([all,main|Opts]) -> - cl_run([all,main|Opts]); -run([all|Opts]) -> - cl_run([all,main|Opts]); -run([main|Opts]) -> - cl_run([all,main|Opts]); -%% Backwards compatible -run([all_tests|Opts]) -> - cl_run([all,main|Opts]); - -%% run/1 -%% Runs the main tests for all available apps -run(Apps) when is_list(Apps) -> - run(Apps, [batch]). - -%% run/2 -%% Runs the main tests for all available apps -run(Apps, Opts) when is_list(Apps), is_list(Opts) -> - run_some(Apps, Opts); - -%% Runs tests for one app with list of suites or with options -run(App, ModsOrOpts) when is_atom(App), - is_list(ModsOrOpts) -> - case is_list_of_suites(ModsOrOpts) of - false -> - run(App, {opts_list,ModsOrOpts}); - true -> - run_some([{App,M} || M <- ModsOrOpts], - [batch]) - end; - -run(App, {opts_list,Opts}) -> - Options = check_test_get_opts(App, Opts), - File = atom_to_list(App), - - %% check if other test category than main has been specified - {CatSpecName,TestCat} = - case proplists:get_value(test_category, Opts) of - undefined -> - {"",main}; - Cat -> - {"_" ++ atom_to_list(Cat),Cat} - end, - - WhatToDo = - case App of - %% Known to exist but fails generic tests below - emulator -> test; - system -> test; - erl_interface -> test; - epmd -> test; - _ -> - case code:lib_dir(App) of - {error,bad_name} -> - %% Application does not exist - skip; - Path -> - case file:read_file_info(filename:join(Path,"ebin")) of - {ok,#file_info{type=directory}} -> - %% Erlang application is built - test; - _ -> - case filelib:wildcard( - filename:join([Path,"priv","*.jar"])) of - [] -> - %% The application is not built - skip; - [_|_] -> - %% Java application is built - test - end - end - end - end, - case WhatToDo of - skip -> - SkipSpec = create_skip_spec(App, suites(App)), - run_test(File, [{spec,[SkipSpec]}], Options); - test when TestCat == bench -> - check_and_run(fun(Vars) -> - ts_benchmark:run([App], Options, Vars) - end); - test -> - Spec = File ++ CatSpecName ++ ".spec", - run_test(File, [{spec,[Spec]},{allow_user_terms,true}], Options) - end; - -%% Runs one module for an app (interactive) -run(App, Mod) when is_atom(App), is_atom(Mod) -> - run_test({atom_to_list(App),Mod}, - [{suite,Mod}], - [interactive]). - -%% run/3 -%% Run one module for an app with Opts -run(App, Mod, Opts) when is_atom(App), - is_atom(Mod), - is_list(Opts) -> - Options = check_test_get_opts(App, Opts), - run_test({atom_to_list(App),Mod}, - [{suite,Mod}], Options); - -%% Run multiple modules with Opts -run(App, Mods, Opts) when is_atom(App), - is_list(Mods), - is_list(Opts) -> - run_some([{App,M} || M <- Mods], Opts); - -%% Runs one test case in a module. -run(App, Mod, Case) when is_atom(App), - is_atom(Mod), - is_atom(Case) -> - Options = check_test_get_opts(App, []), - Args = [{suite,Mod},{testcase,Case}], - run_test(atom_to_list(App), Args, Options); - -%% Runs one or more groups in a module. -run(App, Mod, Grs={group,_Groups}) when is_atom(App), - is_atom(Mod) -> - Options = check_test_get_opts(App, []), - Args = [{suite,Mod},Grs], - run_test(atom_to_list(App), Args, Options); - -%% Runs one or more test cases in a module. -run(App, Mod, TCs={testcase,_Cases}) when is_atom(App), - is_atom(Mod) -> - Options = check_test_get_opts(App, []), - Args = [{suite,Mod},TCs], - run_test(atom_to_list(App), Args, Options). - -%% run/4 -%% Run one test case in a module with Options. -run(App, Mod, Case, Opts) when is_atom(App), - is_atom(Mod), - is_atom(Case), - is_list(Opts) -> - Options = check_test_get_opts(App, Opts), - Args = [{suite,Mod},{testcase,Case}], - run_test(atom_to_list(App), Args, Options); - -%% Run one or more test cases in a module with Options. -run(App, Mod, {testcase,Cases}, Opts) when is_atom(App), - is_atom(Mod) -> - run(App, Mod, Cases, Opts); -run(App, Mod, Cases, Opts) when is_atom(App), - is_atom(Mod), - is_list(Cases), - is_list(Opts) -> - Options = check_test_get_opts(App, Opts), - Args = [{suite,Mod},Cases], - run_test(atom_to_list(App), Args, Options); - -%% Run one or more test cases in a group. -run(App, Mod, Gr={group,_Group}, {testcase,Cases}) when is_atom(App), - is_atom(Mod) -> - run(App, Mod, Gr, Cases, [batch]); - - -%% Run one or more groups in a module with Options. -run(App, Mod, Grs={group,_Groups}, Opts) when is_atom(App), - is_atom(Mod), - is_list(Opts) -> - Options = check_test_get_opts(App, Opts), - Args = [{suite,Mod},Grs], - run_test(atom_to_list(App), Args, Options). - -%% run/5 -%% Run one or more test cases in a group with Options. -run(App, Mod, Group, Cases, Opts) when is_atom(App), - is_atom(Mod), - is_list(Opts) -> - Group1 = if is_tuple(Group) -> Group; true -> {group,Group} end, - Cases1 = if is_tuple(Cases) -> Cases; true -> {testcase,Cases} end, - Options = check_test_get_opts(App, Opts), - Args = [{suite,Mod},Group1,Cases1], - run_test(atom_to_list(App), Args, Options). - -%% run_category/1 -run_category(TestCategory) when is_atom(TestCategory) -> - run_category(TestCategory, [batch]). - -%% run_category/2 -run_category(TestCategory, Opts) when is_atom(TestCategory), - is_list(Opts) -> - case ts:tests(TestCategory) of - [] -> - {error, no_tests_available}; - Apps -> - Opts1 = [{test_category,TestCategory} | Opts], - run_some(Apps, Opts1) - end; - -run_category(Apps, TestCategory) when is_atom(TestCategory) -> - run_category(Apps, TestCategory, [batch]). - -%% run_category/3 -run_category(App, TestCategory, Opts) -> - Apps = if is_atom(App) -> [App]; - is_list(App) -> App - end, - Opts1 = [{test_category,TestCategory} | Opts], - run_some(Apps, Opts1). - -%%----------------------------------------------------------------- -%% Functions kept for backwards compatibility - -bench() -> - run_category(bench, []). -bench(Opts) when is_list(Opts) -> - run_category(bench, Opts); -bench(App) -> - run_category(App, bench, []). -bench(App, Opts) when is_atom(App) -> - run_category(App, bench, Opts); -bench(Apps, Opts) when is_list(Apps) -> - run_category(Apps, bench, Opts). - -benchmarks() -> - tests(bench). - -smoke_test() -> - run_category(smoke, []). -smoke_test(Opts) when is_list(Opts) -> - run_category(smoke, Opts); -smoke_test(App) -> - run_category(App, smoke, []). -smoke_test(App, Opts) when is_atom(App) -> - run_category(App, smoke, Opts); -smoke_test(Apps, Opts) when is_list(Apps) -> - run_category(Apps, smoke, Opts). - -smoke_tests() -> - tests(smoke). - -%%----------------------------------------------------------------- - -is_list_of_suites(List) -> - lists:all(fun(Suite) -> - S = if is_atom(Suite) -> atom_to_list(Suite); - true -> Suite - end, - try lists:last(string:tokens(S,"_")) of - "SUITE" -> true; - "suite" -> true; - _ -> false - catch - _:_ -> false - end - end, List). - -%% Create a spec to skip all SUITES, this is used when the application -%% to be tested is not part of the OTP release to be tested. -create_skip_spec(App, SuitesToSkip) -> - {ok,Cwd} = file:get_cwd(), - AppString = atom_to_list(App), - Specname = AppString++"_skip.spec", - {ok,D} = file:open(filename:join([filename:dirname(Cwd), - AppString++"_test",Specname]), - [write]), - TestDir = "\"../"++AppString++"_test\"", - io:format(D,"{suites, "++TestDir++", all}.~n",[]), - io:format(D,"{skip_suites, "++TestDir++", ~w, \"Skipped as application" - " is not in path!\"}.",[SuitesToSkip]), - Specname. - -%% Check testspec for App to be valid and get possible options -%% from the list. -check_test_get_opts(App, Opts) -> - validate_test(App), - Mode = configmember(batch, {batch, interactive}, Opts), - Vars = configvars(Opts), - Trace = get_config(trace,Opts), - ConfigPath = get_config(config,Opts), - KeepTopcase = configmember(keep_topcase, {keep_topcase,[]}, Opts), - Cover = configcover(App,Opts), - lists:flatten([Vars,Mode,Trace,KeepTopcase,Cover,ConfigPath]). - -to_erlang_term(Atom) -> - String = atom_to_list(Atom), - {ok, Tokens, _} = erl_scan:string(lists:append([String, ". "])), - {ok, Term} = erl_parse:parse_term(Tokens), - Term. - -%% Validate that Testspec really is a testspec, -%% and exit if not. -validate_test(Testspec) -> - case lists:member(Testspec, tests()) of - true -> - ok; - false -> - io:format("This testspec does not seem to be " - "available.~n Please try ts:tests() " - "to see available tests.~n"), - exit(self(), {error, test_not_available}) - end. - -configvars(Opts) -> - case lists:keysearch(vars, 1, Opts) of - {value, {vars, List}} -> - List0 = special_vars(Opts), - Key = fun(T) -> element(1,T) end, - DelDupList = - lists:filter(fun(V) -> - case lists:keysearch(Key(V),1,List0) of - {value,_} -> false; - _ -> true - end - end, List), - {vars, [List0|DelDupList]}; - _ -> - {vars, special_vars(Opts)} - end. - -%% Allow some shortcuts in the options... -special_vars(Opts) -> - SpecVars = - case lists:member(verbose, Opts) of - true -> - [{verbose, 1}]; - false -> - case lists:keysearch(verbose, 1, Opts) of - {value, {verbose, Lvl}} -> - [{verbose, Lvl}]; - _ -> - [{verbose, 0}] - end - end, - SpecVars1 = - case lists:keysearch(diskless, 1, Opts) of - {value,{diskless, true}} -> - [{diskless, true} | SpecVars]; - _ -> - SpecVars - end, - case lists:keysearch(testcase_callback, 1, Opts) of - {value,{testcase_callback, CBM, CBF}} -> - [{ts_testcase_callback, {CBM,CBF}} | SpecVars1]; - {value,{testcase_callback, CB}} -> - [{ts_testcase_callback, CB} | SpecVars1]; - _ -> - SpecVars1 - end. - -get_config(Key,Config) -> - case lists:keysearch(Key,1,Config) of - {value,Value} -> Value; - false -> [] - end. - -configcover(Testspec,[cover|_]) -> - {cover,Testspec,default_coverfile(Testspec),overview}; -configcover(Testspec,[cover_details|_]) -> - {cover,Testspec,default_coverfile(Testspec),details}; -configcover(Testspec,[{cover,File}|_]) -> - {cover,Testspec,File,overview}; -configcover(Testspec,[{cover_details,File}|_]) -> - {cover,Testspec,File,details}; -configcover(Testspec,[_H|T]) -> - configcover(Testspec,T); -configcover(_Testspec,[]) -> - []. - -default_coverfile(Testspec) -> - {ok,Cwd} = file:get_cwd(), - CoverFile = filename:join([filename:dirname(Cwd), - atom_to_list(Testspec)++"_test", - atom_to_list(Testspec)++".cover"]), - case filelib:is_file(CoverFile) of - true -> - CoverFile; - false -> - none - end. - -configmember(Member, {True, False}, Config) -> - case lists:member(Member, Config) of - true -> - True; - false -> - False - end. - - -check_for_cross_cover_analysis_flag(Config) -> - check_for_cross_cover_analysis_flag(Config,false,false). -check_for_cross_cover_analysis_flag([cover|Config],false,false) -> - check_for_cross_cover_analysis_flag(Config,overview,false); -check_for_cross_cover_analysis_flag([cover|_Config],false,true) -> - overview; -check_for_cross_cover_analysis_flag([cover_details|Config],false,false) -> - check_for_cross_cover_analysis_flag(Config,details,false); -check_for_cross_cover_analysis_flag([cover_details|_Config],false,true) -> - details; -check_for_cross_cover_analysis_flag([cross_cover_analysis|Config],false,_) -> - check_for_cross_cover_analysis_flag(Config,false,true); -check_for_cross_cover_analysis_flag([cross_cover_analysis|_Config],Level,_) -> - Level; -check_for_cross_cover_analysis_flag([_|Config],Level,CrossFlag) -> - check_for_cross_cover_analysis_flag(Config,Level,CrossFlag); -check_for_cross_cover_analysis_flag([],_,_) -> - false. - - -%% Returns all available apps. -tests() -> - {ok, Cwd} = file:get_cwd(), - ts_lib:specs(Cwd). - -%% Returns all apps that provide tests in the given test category -tests(main) -> - {ok, Cwd} = file:get_cwd(), - ts_lib:specs(Cwd); -tests(bench) -> - ts_benchmark:benchmarks(); -tests(TestCategory) -> - {ok, Cwd} = file:get_cwd(), - ts_lib:specialized_specs(Cwd, atom_to_list(TestCategory)). - -%% Returns a list of available test suites for App. -suites(App) -> - {ok, Cwd} = file:get_cwd(), - ts_lib:suites(Cwd, atom_to_list(App)). - -%% Returns all available test categories for App -categories(App) -> - {ok, Cwd} = file:get_cwd(), - ts_lib:test_categories(Cwd, atom_to_list(App)). - -%% -%% estone/0, estone/1 -%% Opts = same as Opts or Config for the run(...) function, -%% e.g. [batch] -%% -estone() -> run(emulator,estone_SUITE). -estone(Opts) when is_list(Opts) -> run(emulator,estone_SUITE,Opts). - -%% -%% cross_cover_analyse/1 -%% Level = details | overview -%% Can be called on any node after a test (with cover) is -%% completed. The node's current directory must be the same as when -%% the tests were run. -%% -cross_cover_analyse([Level]) -> - cross_cover_analyse(Level); -cross_cover_analyse(Level) -> - Apps = get_last_app_tests(), - test_server_ctrl:cross_cover_analyse(Level,Apps). - -get_last_app_tests() -> - AllTests = filelib:wildcard(filename:join(["*","*_test.logs"])), - {ok,RE} = re:compile("^[^/]*/[^\.]*\.(.*)_test\.logs$"), - get_last_app_tests(AllTests,RE,[]). - -get_last_app_tests([Dir|Dirs],RE,Acc) -> - NewAcc = - case re:run(Dir,RE,[{capture,all,list}]) of - {match,[Dir,AppStr]} -> - Dir1 = filename:dirname(Dir), % cover logs in ct_run.<t> dir - App = list_to_atom(AppStr), - case lists:keytake(App,1,Acc) of - {value,{App,LastDir},Rest} -> - if Dir1 > LastDir -> - [{App,Dir1}|Rest]; - true -> - Acc - end; - false -> - [{App,Dir1} | Acc] - end; - _ -> - Acc - end, - get_last_app_tests(Dirs,RE,NewAcc); -get_last_app_tests([],_,Acc) -> - Acc. - -%%% Implementation. - -check_and_run(Fun) -> - case file:consult(?variables) of - {ok, Vars} -> - check_and_run(Fun, Vars); - {error, Error} when is_atom(Error) -> - {error, not_installed}; - {error, Reason} -> - {error, {bad_installation, file:format_error(Reason)}} - end. - -check_and_run(Fun, Vars) -> - Platform = ts_install:platform_id(Vars), - case lists:keysearch(platform_id, 1, Vars) of - {value, {_, Platform}} -> - case catch apply(Fun, [Vars]) of - {'EXIT', Reason} -> - exit(Reason); - Other -> - Other - end; - {value, {_, OriginalPlatform}} -> - io:format("These test suites were installed for '~s'.\n", - [OriginalPlatform]), - io:format("But the current platform is '~s'.\nPlease " - "install for this platform before running " - "any tests.\n", [Platform]), - {error, inconsistent_platforms}; - false -> - {error, {bad_installation, no_platform}} - end. - -run_test(File, Args, Options) -> - check_and_run(fun(Vars) -> run_test(File, Args, Options, Vars) end). - -run_test(File, Args, Options, Vars) -> - ts_run:run(File, Args, Options, Vars). - - -%% This module provides some convenient shortcuts to running -%% the test server from within a started Erlang shell. -%% (This are here for backwards compatibility.) -%% -%% r() -%% r(Opts) -%% r(SpecOrMod) -%% r(SpecOrMod, Opts) -%% r(Mod, Case) -%% r(Mod, Case, Opts) -%% Each of these functions starts the test server if it -%% isn't already running, then runs the test case(s) selected -%% by the aguments. -%% SpecOrMod can be a module name or the name of a test spec file, -%% with the extension .spec or .spec.OsType. The module Mod will -%% be reloaded before running the test cases. -%% Opts = [Opt], -%% Opt = {Cover,AppOrCoverFile} | {Cover,App,CoverFile} -%% Cover = cover | cover_details -%% AppOrCoverFile = App | CoverFile -%% App = atom(), an application name -%% CoverFile = string(), name of a cover file -%% (see doc of test_server_ctrl:cover/2/3) -%% -%% i() -%% Shows information about the jobs being run, by dumping -%% the process information for the test_server. -%% -%% l(Mod) -%% This function reloads a module just like c:l/1, but works -%% even for a module in one of the sticky library directories -%% (for instance, lists can be reloaded). - -%% Runs all tests cases in the current directory. - -r() -> - r([]). -r(Opts) when is_list(Opts), is_atom(hd(Opts)) -> - ensure_ts_started(Opts), - test_server_ctrl:add_dir("current_dir", "."); - -%% Checks if argument is a spec file or a module -%% (spec file must be named "*.spec" or "*.spec.OsType") -%% If module, reloads module and runs all test cases in it. -%% If spec, runs all test cases in it. - -r(SpecOrMod) -> - r(SpecOrMod,[]). -r(SpecOrMod,Opts) when is_list(Opts) -> - ensure_ts_started(Opts), - case filename:extension(SpecOrMod) of - [] -> - l(SpecOrMod), - test_server_ctrl:add_module(SpecOrMod); - ".spec" -> - test_server_ctrl:add_spec(SpecOrMod); - _ -> - Spec2 = filename:rootname(SpecOrMod), - case filename:extension(Spec2) of - ".spec" -> - %% *.spec.Type - test_server_ctrl:add_spec(SpecOrMod); - _ -> - {error, unknown_filetype} - end - end; - -%% Reloads the given module and runs the given test case in it. - -r(Mod, Case) -> - r(Mod,Case,[]). -r(Mod, Case, Opts) -> - ensure_ts_started(Opts), - l(Mod), - test_server_ctrl:add_case(Mod, Case). - -%% Shows information about the jobs being run. - -i() -> - ensure_ts_started([]), - hformat("Job", "Current", "Total", "Success", "Failed", "Skipped"), - i(test_server_ctrl:jobs()). - -i([{Name, Pid}|Rest]) when is_pid(Pid) -> - {dictionary, PI} = process_info(Pid, dictionary), - {value, {_, CaseNum}} = lists:keysearch(test_server_case_num, 1, PI), - {value, {_, Cases}} = lists:keysearch(test_server_cases, 1, PI), - {value, {_, Failed}} = lists:keysearch(test_server_failed, 1, PI), - {value, {_, {UserSkipped,AutoSkipped}}} = lists:keysearch(test_server_skipped, 1, PI), - {value, {_, Ok}} = lists:keysearch(test_server_ok, 1, PI), - nformat(Name, CaseNum, Cases, Ok, Failed, UserSkipped+AutoSkipped), - i(Rest); -i([]) -> - ok. - -hformat(A1, A2, A3, A4, A5, A6) -> - io:format("~-20s ~8s ~8s ~8s ~8s ~8s~n", [A1,A2,A3,A4,A5,A6]). - -nformat(A1, A2, A3, A4, A5, A6) -> - io:format("~-20s ~8w ~8w ~8w ~8w ~8w~n", [A1,A2,A3,A4,A5,A6]). - -%% Force load of a module even if it is in a sticky directory. - -l(Mod) -> - case do_load(Mod) of - {error, sticky_directory} -> - Dir = filename:dirname(code:which(Mod)), - code:unstick_dir(Dir), - do_load(Mod), - code:stick_dir(Dir); - X -> - X - end. - - -ensure_ts_started(Opts) -> - Pid = case whereis(test_server_ctrl) of - undefined -> - test_server_ctrl:start(); - P when is_pid(P) -> - P - end, - case Opts of - [{Cover,AppOrCoverFile}] when Cover==cover; Cover==cover_details -> - test_server_ctrl:cover(AppOrCoverFile,cover_type(Cover)); - [{Cover,App,CoverFile}] when Cover==cover; Cover==cover_details -> - test_server_ctrl:cover(App,CoverFile,cover_type(Cover)); - _ -> - ok - end, - Pid. - -cover_type(cover) -> overview; -cover_type(cover_details) -> details. - -do_load(Mod) -> - code:purge(Mod), - code:load_file(Mod). - - -compile_testcases() -> - compile_datadirs("../*/*_data"). - -compile_testcases(App) when is_atom(App) -> - compile_testcases([App]); -compile_testcases([App | T]) -> - compile_datadirs(io_lib:format("../~s_test/*_data", [App])), - compile_testcases(T); -compile_testcases([]) -> - ok. - -compile_datadirs(DataDirs) -> - {ok,Variables} = file:consult("variables"), - - lists:foreach(fun(Dir) -> - ts_lib:make_non_erlang(Dir, Variables) - end, - filelib:wildcard(DataDirs)). diff --git a/lib/test_server/src/ts.hrl b/lib/test_server/src/ts.hrl deleted file mode 100644 index 4c940fdc4f..0000000000 --- a/lib/test_server/src/ts.hrl +++ /dev/null @@ -1,38 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2012. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%% Defines ripped out from test_server (these must remain the same -%% as in test_server). - --define(logdir_ext, ".logs"). --define(suitelog_name, "suite.log"). --define(last_file, "last_name"). --define(last_link, "last_link"). --define(last_test, "last_test"). --define(run_summary, "suite.summary"). --define(cover_total,"total_cover.log"). --define(variables, "variables"). --define(cross_variables, "variables-cross"). --define(LF, [10]). % Newline in VxWorks script --define(CHAR_PER_LINE, 60). % Characters per VxWorks script building line --define(CROSS_COOKIE, "cross"). % cookie used when cross platform testing --define(TS_PORT, 7887). --define(TEST_SERVER_SCRIPT, "test_server_vx.script"). - diff --git a/lib/test_server/src/ts.unix.config b/lib/test_server/src/ts.unix.config deleted file mode 100644 index 1ba5d9033e..0000000000 --- a/lib/test_server/src/ts.unix.config +++ /dev/null @@ -1,6 +0,0 @@ -%% -*- erlang -*- - -%% Always run a (VNC) X server on host -%% {xserver, "xserver.example.com:66"}. - -{unix,[{telnet,"belegost"},{username,"telnet-test"},{password,"tset-tenlet"},{keep_alive,true}]}. diff --git a/lib/test_server/src/ts.win32.config b/lib/test_server/src/ts.win32.config deleted file mode 100644 index cae587bea8..0000000000 --- a/lib/test_server/src/ts.win32.config +++ /dev/null @@ -1,8 +0,0 @@ -%% -*- erlang -*- - -%%% There is no equivalent command to ypmatch on Win32... :-( -%{hardcoded_hosts, -% [{"127.0.0.1","localhost"}]}. - -%{hardcoded_ipv6_hosts, -% [{"::1","localhost"}]}. diff --git a/lib/test_server/src/ts_autoconf_win32.erl b/lib/test_server/src/ts_autoconf_win32.erl deleted file mode 100644 index 288305b406..0000000000 --- a/lib/test_server/src/ts_autoconf_win32.erl +++ /dev/null @@ -1,256 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2012. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%% Purpose : Autoconf for Windows. - --module(ts_autoconf_win32). --export([configure/0]). - --include("ts.hrl"). - -configure() -> - case variables() of - {ok, Vars} -> - ts_lib:subst_file("conf_vars.in", "conf_vars", Vars); - Error -> - Error - end. - -variables() -> - run_tests(tests(), []). - -run_tests([{Prompt, Tester}|Rest], Vars) -> - io:format("checking ~s... ", [Prompt]), - case catch Tester(Vars) of - {'EXIT', Reason} -> - io:format("FAILED~nExit status: ~p~n", [Reason]), - {error, auto_conf_failed}; - {Result, NewVars} -> - io:format("~s~n", [lists:concat([Result])]), - run_tests(Rest, NewVars) - end; -run_tests([], Vars) -> - {ok, Vars}. - -%%% The tests. - -tests() -> - [{"host system type", fun system_type/1}, - {"CPU type", fun cpu/1}, - {"for C compiler", fun c_compiler/1}, - {"for make program", fun make/1}, - {"for location of SSL libraries", fun ssl/1}, - {"for location of Java compiler", fun javac/1}]. - -system_type(Vars) -> - Os = case os:type() of - {win32, nt} -> - case os:version() of - {4,_,_} -> "Windows NT"; - {5,0,_} -> "Windows 2000"; - {5,1,_} -> "Windows XP"; - {5,2,_} -> "Windows 2003"; - {6,0,_} -> "Windows Vista"; - {6,1,_} -> "Windows 7"; - {_,_,_} -> "Windows NCC-1701-D" - end; - {win32, windows} -> - case os:version() of - {4,0,_} -> "Windows 95"; - {4,10,_} -> "Windows 98" - end; - {win32, _} -> "Windows" - end, - {Os, [{host_os, Os}, {host, "win32"}|Vars]}. - -cpu(Vars) -> - Arch = os:getenv("PROCESSOR_ARCHITECTURE"), - Level0 = os:getenv("PROCESSOR_Level"), - Cpu = case {Arch, Level0} of - {"x86", Level} when is_list(Level) -> - "i" ++ Level ++ "86"; - {Other, _Level} when is_list(Other) -> - Other; - {false, _} -> - "i386" - end, - {Cpu, [{host_cpu, Cpu}|Vars]}. - -c_compiler(Vars) -> - try - CompTests = [{msc, fun visual_cxx/1}, - {gnuc, fun mingw32/1}], - %% First try to find the same compiler that the system - %% was built with... - UsedCompiler = case erlang:system_info(c_compiler_used) of - {UsedCmplr, _} -> - case lists:keysearch(UsedCmplr, 1, CompTests) of - {value, {UsedCmplr, CompTest}} -> - CompTest(Vars); - _ -> - ok - end, - UsedCmplr; - undefined -> - undefined - end, - %% ... then try to find a compiler... - lists:foreach(fun ({Cmplr, _CmplrTst}) when Cmplr =:= UsedCompiler -> - ok; % Have already checked for this one - ({_Cmplr, CmplrTst}) -> - CmplrTst(Vars) - end, - CompTests), - {no, Vars} - catch - throw:{_Path, _NewVars} = Res -> Res - end. - -visual_cxx(Vars) -> - case os:find_executable("cl") of - false -> - {no, Vars}; - Path when is_list(Path) -> - {DEFAULT_THR_LIB, - ERTS_THR_LIB, - DLL, - DBG_LINK, - DBG_COMP, - OPT} = - case is_debug_build() of - true -> - {"-MTd ", - "-MDd ", - "-LDd ", - "-debug -pdb:none ", - "-Z7 -DDEBUG", - " "}; - false -> - {"-MT ", - "-MD ", - "-LD ", - " ", - " ", - "-Ox "} - end, - WIN32 = "-D__WIN32__ ", - ERTS_CFLAGS = ERTS_THR_LIB ++ WIN32 ++ OPT ++ DBG_COMP, - LIBS = "ws2_32.lib", - CC = "cl -nologo", - throw({Path, [{'CC', CC}, - {'LD', CC}, - {'SHLIB_LD', CC}, - {'SHLIB_LDFLAGS', ERTS_THR_LIB ++ DLL}, - {'SHLIB_LDLIBS', "-link " ++ DBG_LINK ++ "kernel32.lib"}, - {'SHLIB_EXTRACT_ALL', ""}, - {'CFLAGS', DEFAULT_THR_LIB ++ WIN32 ++ DBG_COMP}, - {'EI_CFLAGS', DEFAULT_THR_LIB ++ WIN32 ++ DBG_COMP}, - {'ERTS_CFLAGS', ERTS_CFLAGS}, - {'SHLIB_CFLAGS', ERTS_CFLAGS++DLL}, - {'CROSSLDFLAGS', ""}, - {'DEFS', common_c_defs()}, - {'SHLIB_SUFFIX', ".dll"}, - {'ERTS_LIBS', ERTS_THR_LIB ++ LIBS}, - {'LIBS', DEFAULT_THR_LIB ++ "-link " ++ DBG_LINK ++ LIBS}, - {obj,".obj"}, - {exe, ".exe"}, - {test_c_compiler, "{msc, undefined}"} - | Vars]}) - end. - -mingw32(Vars) -> - Gcc = "mingw32-gcc", - case os:find_executable(Gcc) of - false -> - {no, Vars}; - Path when is_list(Path) -> - {DBG_COMP, - OPT} = - case is_debug_build() of - true -> - {"-DDEBUG", - " "}; - false -> - {" ", - "-O2 "} - end, - WIN32 = "-D__WIN32__ ", - ERTS_CFLAGS = WIN32 ++ "-g " ++ OPT ++ DBG_COMP, - LIBS = "-lws2_32", - CC = Gcc, - throw({Path, [{'CC', CC}, - {'LD', CC}, - {'SHLIB_LD', CC}, - {'SHLIB_LDFLAGS', "-shared "}, - {'SHLIB_LDLIBS', " -lkernel32"}, - {'SHLIB_EXTRACT_ALL', ""}, - {'CFLAGS', WIN32 ++ DBG_COMP}, - {'EI_CFLAGS', WIN32 ++ DBG_COMP}, - {'ERTS_CFLAGS', ERTS_CFLAGS}, - {'SHLIB_CFLAGS', ERTS_CFLAGS}, - {'CROSSLDFLAGS', ""}, - {'DEFS', common_c_defs()}, - {'SHLIB_SUFFIX', ".dll"}, - {'ERTS_LIBS', LIBS}, - {'LIBS', LIBS}, - {obj,".o"}, - {exe, ".exe"}, - {test_c_compiler, "{gnuc, undefined}"} - | Vars]}) - end. - -common_c_defs() -> - "-DHAVE_STRERROR=1". - -make(Vars) -> - try - find_make("nmake -nologo", Vars), - find_make("mingw32-make", Vars) - catch - throw:{_Path, _NewVars} = Res -> Res - end. - -find_make(MakeCmd, Vars) -> - [Make|_] = string:tokens(MakeCmd, " \t"), - case os:find_executable(Make) of - false -> - {no, Vars}; - Path when is_list(Path) -> - throw({Path, [{make_command, MakeCmd} | Vars]}) - end. - -ssl(Vars) -> - {"win32",[{'SSLEAY_ROOT',"win32"}|Vars]}. - -javac(Vars) -> - case os:find_executable("javac") of - false -> - {no, Vars}; - Path when is_list(Path) -> - {Path, [{'JAVAC', "javac"} | Vars]} - end. - -is_debug_build() -> - case catch string:str(erlang:system_info(system_version), "debug") of - Int when is_integer(Int), Int > 0 -> - true; - _ -> - false - end. diff --git a/lib/test_server/src/ts_benchmark.erl b/lib/test_server/src/ts_benchmark.erl deleted file mode 100644 index 3e55edefb0..0000000000 --- a/lib/test_server/src/ts_benchmark.erl +++ /dev/null @@ -1,87 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2012-2012. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(ts_benchmark). - --include_lib("common_test/include/ct_event.hrl"). --include_lib("kernel/include/file.hrl"). --include("ts.hrl"). - --export([benchmarks/0, - run/3]). - -%% gen_event callbacks --export([init/1, handle_event/2]). - -benchmarks() -> - {ok, Cwd} = file:get_cwd(), - ts_lib:specialized_specs(Cwd,"bench"). - -run(Specs, Opts, Vars) -> - {ok, Cwd} = file:get_cwd(), - {{YY,MM,DD},{HH,Mi,SS}} = calendar:local_time(), - BName = lists:concat([YY,"_",MM,"_",DD,"T",HH,"_",Mi,"_",SS]), - BDir = filename:join([Cwd,BName]), - file:make_dir(BDir), - [ts_run:run(atom_to_list(Spec), - [{spec, [atom_to_list(Spec)++"_bench.spec"]}], - [{event_handler, {ts_benchmark, [Spec,BDir]}}|Opts],Vars) - || Spec <- Specs], - file:delete(filename:join(Cwd,"latest_benchmark")), - {ok,D} = file:open(filename:join(Cwd,"latest_benchmark"),[write]), - io:format(D,BDir,[]), - file:close(D). - - -%%%=================================================================== -%%% gen_event callbacks -%%%=================================================================== - --record(state, { spec, suite, tc, stats_dir}). - -init([Spec,Dir]) -> - {ok, #state{ spec = Spec, stats_dir = Dir }}. - -handle_event(#event{name = tc_start, data = {Suite,Tc}}, State) -> - {ok,State#state{ suite = Suite, tc = Tc}}; -handle_event(#event{name = benchmark_data, data = Data}, State) -> - Spec = proplists:get_value(application, Data, State#state.spec), - Suite = proplists:get_value(suite, Data, State#state.suite), - Tc = proplists:get_value(name, Data, State#state.tc), - Value = proplists:get_value(value, Data), - {ok, D} = file:open(filename:join( - [State#state.stats_dir, - lists:concat([e(Spec),"-",e(Suite),"-", - e(Tc),".ebench"])]), - [append]), - io:format(D, "~p~n",[Value]), - file:close(D), - {ok, State}; -handle_event(_Event, State) -> - {ok, State}. - - -e(Atom) when is_atom(Atom) -> - Atom; -e(Str) when is_list(Str) -> - lists:map(fun($/) -> - $\\; - (C) -> - C - end,Str). diff --git a/lib/test_server/src/ts_erl_config.erl b/lib/test_server/src/ts_erl_config.erl deleted file mode 100644 index ab7363c106..0000000000 --- a/lib/test_server/src/ts_erl_config.erl +++ /dev/null @@ -1,403 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%% Purpose : Updates variable list with variables depending on -%%% running Erlang system. - --module(ts_erl_config). - - --export([variables/2]). - -%% Returns a list of key, value pairs. - -variables(Base0, OsType) -> - Base1 = erl_include(Base0), - Base2 = get_app_vars(fun erl_interface/2, Base1, OsType), - Base3 = get_app_vars(fun ic/2, Base2, OsType), - Base4 = get_app_vars(fun jinterface/2, Base3, OsType), - Base5 = dl_vars(Base4, Base3, OsType), - Base6 = emu_vars(Base5), - Base7 = get_app_vars(fun ssl/2, Base6, OsType), - Base8 = erts_lib(Base7, OsType), - Base = separators(Base8, OsType), - [{'EMULATOR', tl(code:objfile_extension())}, - {emu_threads, atom_to_list(erlang:system_info(threads))}, - {type_marker, case is_debug_build() of - true -> - ".debug"; - false -> - "" - end} - | Base]. - -get_app_vars(AppFun, Vars, OsType) -> - case catch AppFun(Vars,OsType) of - Res when is_list(Res) -> - Res; - {cannot_find_app, App} -> - io:format("* WARNING: Cannot find ~p!~n", [App]), - Vars; - {'EXIT', Reason} -> - exit(Reason); - Garbage -> - exit({unexpected_internal_error, Garbage}) - end. - -dl_vars(Vars, Base3, OsType) -> - ShlibRules0 = ".SUFFIXES:\n" ++ - ".SUFFIXES: @dll@ @obj@ .c\n\n" ++ - ".c@dll@:\n" ++ - "\t@CC@ -c @SHLIB_CFLAGS@ $(SHLIB_EXTRA_CFLAGS) -I@erl_include@ @DEFS@ $<\n" ++ - "\t@SHLIB_LD@ @CROSSLDFLAGS@ @SHLIB_LDFLAGS@ $(SHLIB_EXTRA_LDFLAGS) -o $@ $*@obj@ @SHLIB_LDLIBS@ $(SHLIB_EXTRA_LDLIBS)", - - ShlibRules = ts_lib:subst(ShlibRules0, Vars), - case get_app_vars2(fun jinterface/2, Base3, OsType) of - {App, not_found} -> - [{'SHLIB_RULES', ShlibRules}, {App, "not_found"}|Vars]; - _ -> - [{'SHLIB_RULES', ShlibRules}|Vars] - end. -get_app_vars2(AppFun, Vars, OsType) -> - case catch AppFun(Vars,OsType) of - Res when is_list(Res) -> - {jinterface, ok}; - {cannot_find_app, App} -> - {App, not_found}; - {'EXIT', Reason} -> - exit(Reason); - Garbage -> - exit({unexpected_internal_error, Garbage}) - end. - -erts_lib_name(multi_threaded, {win32, V}) -> - link_library("erts_MD" ++ case is_debug_build() of - true -> "d"; - false -> "" - end, - {win32, V}); -erts_lib_name(single_threaded, {win32, V}) -> - link_library("erts_ML" ++ case is_debug_build() of - true -> "d"; - false -> "" - end, - {win32, V}); -erts_lib_name(multi_threaded, OsType) -> - link_library("erts_r", OsType); -erts_lib_name(single_threaded, OsType) -> - link_library("erts", OsType). - -erts_lib(Vars,OsType) -> - {ErtsLibInclude, - ErtsLibIncludeGenerated, - ErtsLibIncludeInternal, - ErtsLibIncludeInternalGenerated, - ErtsLibPath, - ErtsLibInternalPath, - ErtsLibEthreadMake, - ErtsLibInternalMake - } - = case erl_root(Vars) of - {installed, _Root} -> - Erts = lib_dir(Vars, erts), - ErtsInclude = filename:join([Erts, "include"]), - ErtsIncludeInternal = filename:join([ErtsInclude, "internal"]), - ErtsLib = filename:join([Erts, "lib"]), - ErtsLibInternal = filename:join([ErtsLib, "internal"]), - ErtsEthreadMake = filename:join([ErtsIncludeInternal, "ethread.mk"]), - ErtsInternalMake = filename:join([ErtsIncludeInternal, "erts_internal.mk"]), - - {ErtsInclude, - ErtsInclude, - ErtsIncludeInternal, - ErtsIncludeInternal, - ErtsLib, - ErtsLibInternal, - ErtsEthreadMake, - ErtsInternalMake}; - {srctree, Root, Target} -> - Erts = filename:join([Root, "erts"]), - ErtsInclude = filename:join([Erts, "include"]), - ErtsIncludeTarget = filename:join([ErtsInclude, Target]), - ErtsIncludeInternal = filename:join([ErtsInclude, - "internal"]), - ErtsIncludeInternalTarget = filename:join([ErtsIncludeInternal, - Target]), - ErtsLib = filename:join([Erts, "lib", Target]), - ErtsLibInternal = filename:join([Erts, - "lib", - "internal", - Target]), - ErtsEthreadMake = filename:join([ErtsIncludeInternalTarget, "ethread.mk"]), - ErtsInternalMake = filename:join([ErtsIncludeInternalTarget, "erts_internal.mk"]), - - {ErtsInclude, - ErtsIncludeTarget, - ErtsIncludeInternal, - ErtsIncludeInternalTarget, - ErtsLib, - ErtsLibInternal, - ErtsEthreadMake, - ErtsInternalMake} - end, - [{erts_lib_include, - quote(filename:nativename(ErtsLibInclude))}, - {erts_lib_include_generated, - quote(filename:nativename(ErtsLibIncludeGenerated))}, - {erts_lib_include_internal, - quote(filename:nativename(ErtsLibIncludeInternal))}, - {erts_lib_include_internal_generated, - quote(filename:nativename(ErtsLibIncludeInternalGenerated))}, - {erts_lib_path, quote(filename:nativename(ErtsLibPath))}, - {erts_lib_internal_path, quote(filename:nativename(ErtsLibInternalPath))}, - {erts_lib_multi_threaded, erts_lib_name(multi_threaded, OsType)}, - {erts_lib_single_threaded, erts_lib_name(single_threaded, OsType)}, - {erts_lib_make_ethread, quote(ErtsLibEthreadMake)}, - {erts_lib_make_internal, quote(ErtsLibInternalMake)} - | Vars]. - -erl_include(Vars) -> - Include = - case erl_root(Vars) of - {installed, Root} -> - quote(filename:join([Root, "usr", "include"])); - {srctree, Root, Target} -> - quote(filename:join([Root, "erts", "emulator", "beam"])) - ++ " -I" ++ quote(filename:join([Root, "erts", "emulator"])) - ++ system_include(Root, Vars) - ++ " -I" ++ quote(filename:join([Root, "erts", "include"])) - ++ " -I" ++ quote(filename:join([Root, "erts", "include", Target])) - end, - [{erl_include, filename:nativename(Include)}|Vars]. - - -system_include(Root, Vars) -> - SysDir = - case ts_lib:var(os, Vars) of - "Windows" ++ _T -> "sys/win32"; - _ -> "sys/unix" - end, - " -I" ++ quote(filename:nativename(filename:join([Root, "erts", "emulator", SysDir]))). - -erl_interface(Vars,OsType) -> - {Incl, {LibPath, MkIncl}} = - case lib_dir(Vars, erl_interface) of - {error, bad_name} -> - throw({cannot_find_app, erl_interface}); - Dir -> - {filename:join(Dir, "include"), - case erl_root(Vars) of - {installed, _Root} -> - {filename:join(Dir, "lib"), - filename:join([Dir, "src", "eidefs.mk"])}; - {srctree, _Root, Target} -> - {filename:join([Dir, "obj", Target]), - filename:join([Dir, "src", Target, "eidefs.mk"])} - end} - end, - Lib = link_library("erl_interface",OsType), - Lib1 = link_library("ei",OsType), - {LibDrv, Lib1Drv} = - case erlang:system_info(threads) of - false -> - case OsType of - {unix,_} -> - {link_library("erl_interface_st",OsType), - link_library("ei_st",OsType)}; - _ -> - {Lib, Lib1} - end; - true -> - case OsType of - {win32, _} -> - {link_library("erl_interface_md",OsType), - link_library("ei_md",OsType)}; - _ -> - {Lib, Lib1} - end - end, - ThreadLib = case OsType of - % FIXME: FreeBSD uses gcc flag '-pthread' or linking with - % "libc_r". So it has to be last of libs. This is an - % temporary solution, should be configured elsewhere. - - % This temporary solution have now failed! - % A new temporary solution is installed ... - % {unix,freebsd} -> "-lc_r"; - {unix,freebsd} -> - "-lpthread"; - {unix,_} -> - "-lpthread"; - _ -> - "" - end, - [{erl_interface_libpath, quote(filename:nativename(LibPath))}, - {erl_interface_sock_libs, sock_libraries(OsType)}, - {erl_interface_lib, quote(filename:join(LibPath, Lib))}, - {erl_interface_eilib, quote(filename:join(LibPath, Lib1))}, - {erl_interface_lib_drv, quote(filename:join(LibPath, LibDrv))}, - {erl_interface_eilib_drv, quote(filename:join(LibPath, Lib1Drv))}, - {erl_interface_threadlib, ThreadLib}, - {erl_interface_include, quote(filename:nativename(Incl))}, - {erl_interface_mk_include, quote(filename:nativename(MkIncl))} - | Vars]. - -ic(Vars, OsType) -> - {ClassPath, LibPath, Incl} = - case lib_dir(Vars, ic) of - {error, bad_name} -> - throw({cannot_find_app, ic}); - Dir -> - {filename:join([Dir, "priv", "ic.jar"]), - case erl_root(Vars) of - {installed, _Root} -> - filename:join([Dir, "priv", "lib"]); - {srctree, _Root, Target} -> - filename:join([Dir, "priv", "lib", Target]) - end, - filename:join(Dir, "include")} - end, - [{ic_classpath, quote(filename:nativename(ClassPath))}, - {ic_libpath, quote(filename:nativename(LibPath))}, - {ic_lib, quote(filename:join(filename:nativename(LibPath),link_library("ic", OsType)))}, - {ic_include_path, quote(filename:nativename(Incl))}|Vars]. - -jinterface(Vars, _OsType) -> - ClassPath = - case lib_dir(Vars, jinterface) of - {error, bad_name} -> - throw({cannot_find_app, jinterface}); - Dir -> - filename:join([Dir, "priv", "OtpErlang.jar"]) - end, - [{jinterface_classpath, quote(filename:nativename(ClassPath))}|Vars]. - -lib_dir(Vars, Lib) -> - LibLibDir = case Lib of - erts -> - filename:join([code:root_dir(), - "erts-" ++ erlang:system_info(version)]); - _ -> - code:lib_dir(Lib) - end, - case {get_var(crossroot, Vars), LibLibDir} of - {{error, _}, _} -> %no crossroot - LibLibDir; - {CrossRoot, _} -> - %% XXX: Ugly. So ugly I won't comment it - %% /Patrik - CLibDirList = case Lib of - erts -> - [CrossRoot, "erts"]; - _ -> - [CrossRoot, "lib", atom_to_list(Lib)] - end, - CLibDir = filename:join(CLibDirList), - Cmd = "ls -d " ++ CLibDir ++ "*", - XLibDir = lists:last(string:tokens(os:cmd(Cmd),"\n")), - case file:list_dir(XLibDir) of - {error, enoent} -> - []; - _ -> - XLibDir - end - end. - -erl_root(Vars) -> - Root = case get_var(crossroot,Vars) of - {error, notfound} -> code:root_dir(); - CrossRoot -> CrossRoot - end, - case ts_lib:erlang_type(Root) of - {srctree, _Version} -> - Target = get_var(target, Vars), - {srctree, Root, Target}; - {_, _Version} -> - {installed, Root} - end. - - -get_var(Key, Vars) -> - case lists:keysearch(Key, 1, Vars) of - {value, {Key, Value}} -> - Value; - _ -> - {error, notfound} - end. - - -sock_libraries({win32, _}) -> - "ws2_32.lib"; -sock_libraries({unix, _}) -> - "". % Included in general libraries if needed. - -link_library(LibName,{win32, _}) -> - LibName ++ ".lib"; -link_library(LibName,{unix, _}) -> - "lib" ++ LibName ++ ".a"; -link_library(_LibName,_Other) -> - exit({link_library, not_supported}). - -%% Returns emulator specific variables. -emu_vars(Vars) -> - [{is_source_build, is_source_build()}, - {erl_name, atom_to_list(lib:progname())}|Vars]. - -is_source_build() -> - string:str(erlang:system_info(system_version), "[source]") > 0. - -is_debug_build() -> - case catch string:str(erlang:system_info(system_version), "debug") of - Int when is_integer(Int), Int > 0 -> - true; - _ -> - false - end. -%% -%% ssl_libdir -%% -ssl(Vars, _OsType) -> - case lib_dir(Vars, ssl) of - {error, bad_name} -> - throw({cannot_find_app, ssl}); - Dir -> - [{ssl_libdir, quote(filename:nativename(Dir))}| Vars] - end. - -separators(Vars, {win32,_}) -> - [{'DS',"\\"},{'PS',";"}|Vars]; -separators(Vars, _) -> - [{'DS',"/"},{'PS',":"}|Vars]. - -quote(List) -> - case lists:member($ , List) of - false -> List; - true -> make_quote(List) - end. - -make_quote(List) -> - case os:type() of - {win32, _} -> %% nmake" - [$"] ++ List ++ [$"]; - _ -> %% make - BackQuote = fun($ , Acc) -> [$\\ , $ |Acc]; - (Char, Acc) -> [Char|Acc] end, - lists:foldr(BackQuote, [], List) - end. diff --git a/lib/test_server/src/ts_install.erl b/lib/test_server/src/ts_install.erl deleted file mode 100644 index 600a576820..0000000000 --- a/lib/test_server/src/ts_install.erl +++ /dev/null @@ -1,465 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(ts_install). - --export([install/2, platform_id/1]). - --include("ts.hrl"). --include_lib("kernel/include/file.hrl"). - -install(install_local, Options) -> - install(os:type(), Options); - -install(TargetSystem, Options) -> - case file:consult(?variables) of - {ok, Vars} -> - case proplists:get_value(cross,Vars) of - "yes" when Options == []-> - target_install(Vars); - _ -> - build_install(TargetSystem, Options) - end; - _ -> - build_install(TargetSystem, Options) - end. - - -build_install(TargetSystem, Options) -> - XComp = parse_xcomp_file(proplists:get_value(xcomp,Options)), - case autoconf(TargetSystem, XComp++Options) of - {ok, Vars0} -> - OsType = os_type(TargetSystem), - Vars1 = ts_erl_config:variables(Vars0++XComp++Options,OsType), - {Options1, Vars2} = add_vars(Vars1, Options), - Vars3 = lists:flatten([Options1|Vars2]), - write_terms(?variables, Vars3); - {error, Reason} -> - {error, Reason} - end. - -os_type({unix,_}=OsType) -> OsType; -os_type({win32,_}=OsType) -> OsType. - -target_install(CrossVars) -> - io:format("Cross installation detected, skipping configure and data_dir make~n"), - case file:rename(?variables,?cross_variables) of - ok -> - ok; - _ -> - io:format("Could not find variables file from cross make~n"), - throw(cross_installation_failed) - end, - CPU = proplists:get_value('CPU',CrossVars), - OS = proplists:get_value(os,CrossVars), - {Options,Vars} = add_vars([{cross,"yes"},{'CPU',CPU},{os,OS}],[]), - Variables = lists:flatten([Options|Vars]), - write_terms(?variables, Variables). - -%% Autoconf for various platforms. -%% unix uses the configure script -%% win32 uses ts_autoconf_win32 - -autoconf(TargetSystem, XComp) -> - case autoconf1(TargetSystem, XComp) of - ok -> - autoconf2(file:read_file("conf_vars")); - Error -> - Error - end. - -autoconf1({win32, _},[{cross,"no"}]) -> - ts_autoconf_win32:configure(); -autoconf1({unix, _},XCompFile) -> - unix_autoconf(XCompFile); -autoconf1(_,_) -> - io:format("cross compilation not supported for that this platform~n"), - throw(cross_installation_failed). - -autoconf2({ok, Bin}) -> - get_vars(ts_lib:b2s(Bin), name, [], []); -autoconf2(Error) -> - Error. - -get_vars([$:|Rest], name, Current, Result) -> - Name = list_to_atom(lists:reverse(Current)), - get_vars(Rest, value, [], [Name|Result]); -get_vars([$\r|Rest], value, Current, Result) -> - get_vars(Rest, value, Current, Result); -get_vars([$\n|Rest], value, Current, [Name|Result]) -> - Value = lists:reverse(Current), - get_vars(Rest, name, [], [{Name, Value}|Result]); -get_vars([C|Rest], State, Current, Result) -> - get_vars(Rest, State, [C|Current], Result); -get_vars([], name, [], Result) -> - {ok, Result}; -get_vars(_, _, _, _) -> - {error, fatal_bad_conf_vars}. - -config_flags() -> - case os:getenv("CONFIG_FLAGS") of - false -> []; - CF -> string:tokens(CF, " \t\n") - end. - -unix_autoconf(XConf) -> - Configure = filename:absname("configure"), - Flags = proplists:get_value(crossflags,XConf,[]), - Env = proplists:get_value(crossenv,XConf,[]), - Host = get_xcomp_flag("host", Flags), - Build = get_xcomp_flag("build", Flags), - Threads = [" --enable-shlib-thread-safety" || - erlang:system_info(threads) /= false], - Debug = [" --enable-debug-mode" || - string:str(erlang:system_info(system_version),"debug") > 0], - MXX_Build = [Y || Y <- config_flags(), - Y == "--enable-m64-build" - orelse Y == "--enable-m32-build"], - Args = Host ++ Build ++ Threads ++ Debug ++ " " ++ MXX_Build, - case filelib:is_file(Configure) of - true -> - OSXEnv = macosx_cflags(), - UnQuotedEnv = assign_vars(unquote(Env++OSXEnv)), - io:format("Running ~s~nEnv: ~p~n", - [lists:flatten(Configure ++ Args),UnQuotedEnv]), - Port = open_port({spawn, lists:flatten(["\"",Configure,"\"",Args])}, - [stream, eof, {env,UnQuotedEnv}]), - ts_lib:print_data(Port); - false -> - {error, no_configure_script} - end. - -unquote([{Var,Val}|T]) -> - [{Var,unquote(Val)}|unquote(T)]; -unquote([]) -> - []; -unquote("\""++Rest) -> - lists:reverse(tl(lists:reverse(Rest))); -unquote(String) -> - String. - -assign_vars([]) -> - []; -assign_vars([{VAR,FlagsStr} | VARs]) -> - [{VAR,assign_vars(FlagsStr)} | assign_vars(VARs)]; -assign_vars(FlagsStr) -> - Flags = [assign_all_vars(Str,[]) || Str <- string:tokens(FlagsStr, [$ ])], - string:strip(lists:flatten(lists:map(fun(Flag) -> - Flag ++ " " - end, Flags)), right). - -assign_all_vars([$$ | Rest], FlagSoFar) -> - {VarName,Rest1} = get_var_name(Rest, []), - assign_all_vars(Rest1, FlagSoFar ++ assign_var(VarName)); -assign_all_vars([Char | Rest], FlagSoFar) -> - assign_all_vars(Rest, FlagSoFar ++ [Char]); -assign_all_vars([], Flag) -> - Flag. - -get_var_name([Ch | Rest] = Str, VarR) -> - case valid_char(Ch) of - true -> get_var_name(Rest, [Ch | VarR]); - false -> {lists:reverse(VarR),Str} - end; -get_var_name([], VarR) -> - {lists:reverse(VarR),[]}. - -assign_var(VarName) -> - case os:getenv(VarName) of - false -> ""; - Val -> Val - end. - -valid_char(Ch) when Ch >= $a, Ch =< $z -> true; -valid_char(Ch) when Ch >= $A, Ch =< $Z -> true; -valid_char(Ch) when Ch >= $0, Ch =< $9 -> true; -valid_char($_) -> true; -valid_char(_) -> false. - -get_xcomp_flag(Flag, Flags) -> - get_xcomp_flag(Flag, Flag, Flags). -get_xcomp_flag(Flag, Tag, Flags) -> - case proplists:get_value(Flag,Flags) of - undefined -> ""; - "guess" -> [" --",Tag,"=",os:cmd("$ERL_TOP/erts/autoconf/config.guess")]; - HostVal -> [" --",Tag,"=",HostVal] - end. - - -macosx_cflags() -> - case os:type() of - {unix, darwin} -> - %% To ensure that the drivers we build can be loaded - %% by the emulator, add either -m32 or -m64 to CFLAGS. - WordSize = erlang:system_info(wordsize), - Mflag = "-m" ++ integer_to_list(8*WordSize), - [{"CFLAGS", Mflag},{"LDFLAGS", Mflag}]; - _ -> - [] - end. - -parse_xcomp_file(undefined) -> - [{cross,"no"}]; -parse_xcomp_file(Filepath) -> - {ok,Bin} = file:read_file(Filepath), - Lines = binary:split(Bin,<<"\n">>,[global,trim]), - {Envs,Flags} = parse_xcomp_file(Lines,[],[]), - [{cross,"yes"},{crossroot,os:getenv("ERL_TOP")}, - {crossenv,Envs},{crossflags,Flags}]. - -parse_xcomp_file([<<A:8,_/binary>> = Line|R],Envs,Flags) - when $A =< A, A =< $Z -> - [Var,Value] = binary:split(Line,<<"=">>), - parse_xcomp_file(R,[{ts_lib:b2s(Var), - ts_lib:b2s(Value)}|Envs],Flags); -parse_xcomp_file([<<"erl_xcomp_",Line/binary>>|R],Envs,Flags) -> - [Var,Value] = binary:split(Line,<<"=">>), - parse_xcomp_file(R,Envs,[{ts_lib:b2s(Var), - ts_lib:b2s(Value)}|Flags]); -parse_xcomp_file([_|R],Envs,Flags) -> - parse_xcomp_file(R,Envs,Flags); -parse_xcomp_file([],Envs,Flags) -> - {lists:reverse(Envs),lists:reverse(Flags)}. - -write_terms(Name, Terms) -> - case file:open(Name, [write]) of - {ok, Fd} -> - Result = write_terms1(Fd, remove_duplicates(Terms)), - file:close(Fd), - Result; - {error, Reason} -> - {error, Reason} - end. - -write_terms1(Fd, [Term|Rest]) -> - ok = io:format(Fd, "~p.\n", [Term]), - write_terms1(Fd, Rest); -write_terms1(_, []) -> - ok. - -remove_duplicates(List) -> - lists:reverse( - lists:foldl(fun({Key,Val},Acc) -> - R = make_ref(), - case proplists:get_value(Key,Acc,R) of - R -> [{Key,Val}|Acc]; - _Else -> - Acc - end - end,[],List)). - -add_vars(Vars0, Opts0) -> - {Opts,LongNames} = - case lists:keymember(longnames, 1, Opts0) of - true -> - {lists:keydelete(longnames, 1, Opts0),true}; - false -> - {Opts0,false} - end, - {PlatformId, PlatformLabel, PlatformFilename, Version} = - platform([{longnames, LongNames}|Vars0]), - NetDir = lists:concat(["/net", hostname()]), - Mounted = case file:read_file_info(NetDir) of - {ok, #file_info{type = directory}} -> NetDir; - _ -> "" - end, - {Opts, [{longnames, LongNames}, - {platform_id, PlatformId}, - {platform_filename, PlatformFilename}, - {rsh_name, get_rsh_name()}, - {platform_label, PlatformLabel}, - {ts_net_dir, Mounted}, - {erl_flags, []}, - {erl_release, Version}, - {ts_testcase_callback, get_testcase_callback()} | Vars0]}. - -get_testcase_callback() -> - case os:getenv("TS_TESTCASE_CALLBACK") of - ModFunc when is_list(ModFunc), ModFunc /= "" -> - case string:tokens(ModFunc, " ") of - [_Mod,_Func] -> ModFunc; - _ -> "" - end; - _ -> - case init:get_argument(ts_testcase_callback) of - {ok,[[Mod,Func]]} -> Mod ++ " " ++ Func; - _ -> "" - end - end. - -get_rsh_name() -> - case os:getenv("ERL_RSH") of - false -> "rsh"; - Str -> Str - end. - -platform_id(Vars) -> - {Id,_,_,_} = platform(Vars), - Id. - -platform(Vars) -> - Hostname = hostname(), - - {Type,Version} = ts_lib:erlang_type(), - Cpu = ts_lib:var('CPU', Vars), - Os = ts_lib:var(os, Vars), - - ErlType = to_upper(atom_to_list(Type)), - OsType = ts_lib:initial_capital(Os), - CpuType = ts_lib:initial_capital(Cpu), - LinuxDist = linux_dist(), - ExtraLabel = extra_platform_label(), - Schedulers = schedulers(), - BindType = bind_type(), - KP = kernel_poll(), - IOTHR = io_thread(), - LC = lock_checking(), - MT = modified_timing(), - AsyncThreads = async_threads(), - Debug = debug(), - CpuBits = word_size(), - Common = lists:concat([Hostname,"/",OsType,"/",CpuType,CpuBits,LinuxDist, - Schedulers,BindType,KP,IOTHR,LC,MT,AsyncThreads, - Debug,ExtraLabel]), - PlatformId = lists:concat([ErlType, " ", Version, Common]), - PlatformLabel = ErlType ++ Common, - PlatformFilename = platform_as_filename(PlatformId), - {PlatformId, PlatformLabel, PlatformFilename, Version}. - -platform_as_filename(Label) -> - lists:map(fun($ ) -> $_; - ($/) -> $_; - (C) when $A =< C, C =< $Z -> C - $A + $a; - (C) -> C end, - Label). - -to_upper(String) -> - lists:map(fun(C) when $a =< C, C =< $z -> C - $a + $A; - (C) -> C end, - String). - -word_size() -> - case {erlang:system_info({wordsize,external}), - erlang:system_info({wordsize,internal})} of - {4,4} -> ""; - {8,8} -> "/64"; - {8,4} -> "/Halfword" - end. - -linux_dist() -> - case os:type() of - {unix,linux} -> - linux_dist_1([fun linux_dist_suse/0]); - _ -> "" - end. - -linux_dist_1([F|T]) -> - case F() of - "" -> linux_dist_1(T); - Str -> Str - end; -linux_dist_1([]) -> "". - -linux_dist_suse() -> - case filelib:is_file("/etc/SuSE-release") of - false -> ""; - true -> - Ver0 = os:cmd("awk '/^VERSION/ {print $3}' /etc/SuSE-release"), - [_|Ver1] = lists:reverse(Ver0), - Ver = lists:reverse(Ver1), - "/Suse" ++ Ver - end. - -hostname() -> - case catch inet:gethostname() of - {ok, Hostname} when is_list(Hostname) -> - "/" ++ lists:takewhile(fun (C) -> C /= $. end, Hostname); - _ -> - "/localhost" - end. - -async_threads() -> - case catch erlang:system_info(threads) of - true -> "/A"++integer_to_list(erlang:system_info(thread_pool_size)); - _ -> "" - end. - -schedulers() -> - case catch erlang:system_info(smp_support) of - true -> - case {erlang:system_info(schedulers), - erlang:system_info(schedulers_online)} of - {S,S} -> - "/S"++integer_to_list(S); - {S,O} -> - "/S"++integer_to_list(S) ++ ":" ++ - integer_to_list(O) - end; - _ -> "" - end. - -bind_type() -> - case catch erlang:system_info(scheduler_bind_type) of - thread_no_node_processor_spread -> "/sbttnnps"; - no_node_processor_spread -> "/sbtnnps"; - no_node_thread_spread -> "/sbtnnts"; - processor_spread -> "/sbtps"; - thread_spread -> "/sbtts"; - no_spread -> "/sbtns"; - _ -> "" - end. - - -debug() -> - case string:str(erlang:system_info(system_version), "debug") of - 0 -> ""; - _ -> "/Debug" - end. - -lock_checking() -> - case catch erlang:system_info(lock_checking) of - true -> "/LC"; - _ -> "" - end. - -modified_timing() -> - case catch erlang:system_info(modified_timing_level) of - N when is_integer(N) -> - "/T" ++ integer_to_list(N); - _ -> "" - end. - -kernel_poll() -> - case catch erlang:system_info(kernel_poll) of - true -> "/KP"; - _ -> "" - end. - -io_thread() -> - case catch erlang:system_info(io_thread) of - true -> "/IOTHR"; - _ -> "" - end. - -extra_platform_label() -> - case os:getenv("TS_EXTRA_PLATFORM_LABEL") of - [] -> ""; - [_|_]=Label -> "/" ++ Label; - false -> "" - end. diff --git a/lib/test_server/src/ts_install_cth.erl b/lib/test_server/src/ts_install_cth.erl deleted file mode 100644 index ec0d54ccde..0000000000 --- a/lib/test_server/src/ts_install_cth.erl +++ /dev/null @@ -1,253 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2010-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%% @doc TS Installed SCB -%%% -%%% This module does what the make parts of the ts:run/x command did, -%%% but not the Makefile.first parts! So they have to be done by ts or -%%% manually!! - --module(ts_install_cth). - -%% Suite Callbacks --export([id/1]). --export([init/2]). - --export([pre_init_per_suite/3]). --export([post_init_per_suite/4]). --export([pre_end_per_suite/3]). --export([post_end_per_suite/4]). - --export([pre_init_per_group/3]). --export([post_init_per_group/4]). --export([pre_end_per_group/3]). --export([post_end_per_group/4]). - --export([pre_init_per_testcase/3]). --export([post_end_per_testcase/4]). - --export([on_tc_fail/3]). --export([on_tc_skip/3]). - --export([terminate/1]). - --include_lib("kernel/include/file.hrl"). - --type config() :: proplists:proplist(). --type reason() :: term(). --type skip_or_fail() :: {skip, reason()} | - {auto_skip, reason()} | - {fail, reason()}. - --record(state, { ts_conf_dir, target_system, install_opts, nodenames, nodes }). - -%% @doc The id of this SCB --spec id(Opts :: term()) -> - Id :: term(). -id(_Opts) -> - ?MODULE. - -%% @doc Always called before any other callback function. --spec init(Id :: term(), Opts :: proplists:proplist()) -> - {ok, State :: #state{}}. -init(_Id, Opts) -> - Nodenames = proplists:get_value(nodenames, Opts, 0), - Nodes = proplists:get_value(nodes, Opts, 0), - TSConfDir = proplists:get_value(ts_conf_dir, Opts), - TargetSystem = proplists:get_value(target_system, Opts, install_local), - InstallOpts = proplists:get_value(install_opts, Opts, []), - {ok, #state{ nodenames = Nodenames, - nodes = Nodes, - ts_conf_dir = TSConfDir, - target_system = TargetSystem, - install_opts = InstallOpts } }. - -%% @doc Called before init_per_suite is called. --spec pre_init_per_suite(Suite :: atom(), - Config :: config(), - State :: #state{}) -> - {config() | skip_or_fail(), NewState :: #state{}}. -pre_init_per_suite(Suite,Config,#state{ ts_conf_dir = undefined} = State) -> - DataDir = proplists:get_value(data_dir, Config), - ParentDir = filename:join( - lists:reverse( - tl(lists:reverse(filename:split(DataDir))))), - TSConfDir = filename:join([ParentDir, "..","test_server"]), - pre_init_per_suite(Suite, Config, State#state{ ts_conf_dir = TSConfDir }); -pre_init_per_suite(_Suite,Config,State) -> - DataDir = proplists:get_value(data_dir, Config), - try - {ok,Variables} = - file:consult(filename:join(State#state.ts_conf_dir,"variables")), - case proplists:get_value(cross,Variables) of - "yes" -> - ct:log("Not making data dir as tests have been cross compiled"); - _ -> - ts_lib:make_non_erlang(DataDir, Variables) - end, - - {add_node_name(Config, State), State} - catch error:{badmatch,{error,enoent}} -> - {add_node_name(Config, State), State}; - Error:Reason -> - Stack = erlang:get_stacktrace(), - ct:pal("~p failed! ~p:{~p,~p}",[?MODULE,Error,Reason,Stack]), - {{fail,{?MODULE,{Error,Reason, Stack}}},State} - end. - -%% @doc Called after init_per_suite. --spec post_init_per_suite(Suite :: atom(), - Config :: config(), - Return :: config() | skip_or_fail(), - State :: #state{}) -> - {config() | skip_or_fail(), NewState :: #state{}}. -post_init_per_suite(_Suite,_Config,Return,State) -> - test_server_ctrl:kill_slavenodes(), - {Return, State}. - -%% @doc Called before end_per_suite. --spec pre_end_per_suite(Suite :: atom(), - Config :: config() | skip_or_fail(), - State :: #state{}) -> - {ok | skip_or_fail(), NewState :: #state{}}. -pre_end_per_suite(_Suite,Config,State) -> - {Config, State}. - -%% @doc Called after end_per_suite. --spec post_end_per_suite(Suite :: atom(), - Config :: config(), - Return :: term(), - State :: #state{}) -> - {ok | skip_or_fail(), NewState :: #state{}}. -post_end_per_suite(_Suite,_Config,Return,State) -> - {Return, State}. - -%% @doc Called before each init_per_group. --spec pre_init_per_group(Group :: atom(), - Config :: config(), - State :: #state{}) -> - {config() | skip_or_fail(), NewState :: #state{}}. -pre_init_per_group(_Group,Config,State) -> - {add_node_name(Config, State), State}. - -%% @doc Called after each init_per_group. --spec post_init_per_group(Group :: atom(), - Config :: config(), - Return :: config() | skip_or_fail(), - State :: #state{}) -> - {config() | skip_or_fail(), NewState :: #state{}}. -post_init_per_group(_Group,_Config,Return,State) -> - {Return, State}. - -%% @doc Called after each end_per_group. --spec pre_end_per_group(Group :: atom(), - Config :: config() | skip_or_fail(), - State :: #state{}) -> - {ok | skip_or_fail(), NewState :: #state{}}. -pre_end_per_group(_Group,Config,State) -> - {Config, State}. - -%% @doc Called after each end_per_group. --spec post_end_per_group(Group :: atom(), - Config :: config(), - Return :: term(), - State :: #state{}) -> - {ok | skip_or_fail(), NewState :: #state{}}. -post_end_per_group(_Group,_Config,Return,State) -> - {Return, State}. - -%% @doc Called before each test case. --spec pre_init_per_testcase(TC :: atom(), - Config :: config(), - State :: #state{}) -> - {config() | skip_or_fail(), NewState :: #state{}}. -pre_init_per_testcase(_TC,Config,State) -> - {add_node_name(Config, State), State}. - -%% @doc Called after each test case. --spec post_end_per_testcase(TC :: atom(), - Config :: config(), - Return :: term(), - State :: #state{}) -> - {ok | skip_or_fail(), NewState :: #state{}}. -post_end_per_testcase(_TC,_Config,Return,State) -> - {Return, State}. - -%% @doc Called after a test case failed. --spec on_tc_fail(TC :: init_per_suite | end_per_suite | - init_per_group | end_per_group | atom(), - Reason :: term(), State :: #state{}) -> - NewState :: #state{}. -on_tc_fail(_TC, _Reason, State) -> - State. - -%% @doc Called when a test case is skipped. --spec on_tc_skip(TC :: end_per_suite | init_per_group | end_per_group | atom(), - {tc_auto_skip, {failed, {Mod :: atom(), Function :: atom(), - Reason :: term()}}} | - {tc_user_skip, {skipped, Reason :: term()}}, - State :: #state{}) -> - NewState :: #state{}. -on_tc_skip(_TC, _Reason, State) -> - State. - -%% @doc Called when the scope of the SCB is done. --spec terminate(State :: #state{}) -> - term(). -terminate(_State) -> - ok. - -%%% ============================================================================ -%%% Local functions -%%% ============================================================================ - -%% Add a nodename to config if it does not exist -add_node_name(Config, State) -> - case proplists:get_value(nodenames, Config) of - undefined -> - lists:keystore( - nodenames, 1, Config, - {nodenames,generate_nodenames(State#state.nodenames)}); - _Else -> - Config - end. - - -%% Copied from test_server_ctrl.erl -generate_nodenames(Num) -> - {ok,Name} = inet:gethostname(), - generate_nodenames2(Num, [Name], []). - -generate_nodenames2(0, _Hosts, Acc) -> - Acc; -generate_nodenames2(N, Hosts, Acc) -> - Host=lists:nth((N rem (length(Hosts)))+1, Hosts), - Name=list_to_atom(temp_nodename("nod",N) ++ "@" ++ Host), - generate_nodenames2(N-1, Hosts, [Name|Acc]). - -%% We cannot use erlang:unique_integer([positive]) -%% here since this code in run on older test releases as well. -temp_nodename(Base,I) -> - {A,B,C} = os:timestamp(), - Nstr = integer_to_list(I), - Astr = integer_to_list(A), - Bstr = integer_to_list(B), - Cstr = integer_to_list(C), - Base++Nstr++Astr++Bstr++Cstr. diff --git a/lib/test_server/src/ts_lib.erl b/lib/test_server/src/ts_lib.erl deleted file mode 100644 index 7c3f450194..0000000000 --- a/lib/test_server/src/ts_lib.erl +++ /dev/null @@ -1,372 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(ts_lib). - --include_lib("kernel/include/file.hrl"). --include("ts.hrl"). - -%% Avoid warning for local function error/1 clashing with autoimported BIF. --compile({no_auto_import,[error/1]}). --export([error/1, var/2, erlang_type/0, - erlang_type/1, - initial_capital/1, - specs/1, suites/2, - test_categories/2, specialized_specs/2, - subst_file/3, subst/2, print_data/1, - make_non_erlang/2, - maybe_atom_to_list/1, progress/4, - b2s/1 - ]). - -error(Reason) -> - throw({error, Reason}). - -%% Returns the value for a variable - -var(Name, Vars) -> - case lists:keysearch(Name, 1, Vars) of - {value, {Name, Value}} -> - Value; - false -> - error({bad_installation, {undefined_var, Name, Vars}}) - end. - -%% Returns the level of verbosity (0-X) -verbosity(Vars) -> - % Check for a single verbose option. - case lists:member(verbose, Vars) of - true -> - 1; - false -> - case lists:keysearch(verbose, 1, Vars) of - {value, {verbose, Level}} -> - Level; - _ -> - 0 - end - end. - -% Displays output to the console if verbosity is equal or more -% than Level. -progress(Vars, Level, Format, Args) -> - V=verbosity(Vars), - if - V>=Level -> - io:format(Format, Args); - true -> - ok - end. - -%% Returns: {Type, Version} where Type is otp|src - -erlang_type() -> - erlang_type(code:root_dir()). -erlang_type(RootDir) -> - {_, Version} = init:script_id(), - RelDir = filename:join(RootDir, "releases"), % Only in installed - case filelib:is_file(RelDir) of - true -> {otp,Version}; % installed OTP - false -> {srctree,Version} % source code tree - end. - -%% Upcases the first letter in a string. - -initial_capital([C|Rest]) when $a =< C, C =< $z -> - [C-$a+$A|Rest]; -initial_capital(String) -> - String. - -specialized_specs(Dir,PostFix) -> - Specs = filelib:wildcard(filename:join([filename:dirname(Dir), - "*_test", "*_"++PostFix++".spec"])), - sort_tests([begin - DirPart = filename:dirname(Name), - AppTest = hd(lists:reverse(filename:split(DirPart))), - list_to_atom(string:substr(AppTest, 1, length(AppTest)-5)) - end || Name <- Specs]). - -specs(Dir) -> - Specs = filelib:wildcard(filename:join([filename:dirname(Dir), - "*_test", "*.{dyn,}spec"])), - %% Make sure only to include the main spec for each application - MainSpecs = - lists:flatmap(fun(FullName) -> - [Spec,TestDir|_] = - lists:reverse(filename:split(FullName)), - [_TestSuffix|TDParts] = - lists:reverse(string:tokens(TestDir,[$_,$.])), - [_SpecSuffix|SParts] = - lists:reverse(string:tokens(Spec,[$_,$.])), - if TDParts == SParts -> - [filename_to_atom(FullName)]; - true -> - [] - end - end, Specs), - sort_tests(MainSpecs). - -test_categories(Dir, App) -> - Specs = filelib:wildcard(filename:join([filename:dirname(Dir), - App++"_test", "*.spec"])), - lists:flatmap(fun(FullName) -> - [Spec,_TestDir|_] = - lists:reverse(filename:split(FullName)), - case filename:rootname(Spec -- App) of - "" -> - []; - [_Sep | Cat] -> - [list_to_atom(Cat)] - end - end, Specs). - -suites(Dir, App) -> - Glob=filename:join([filename:dirname(Dir), App++"_test", - "*_SUITE.erl"]), - Suites=filelib:wildcard(Glob), - [filename_to_atom(Name) || Name <- Suites]. - -filename_to_atom(Name) -> - list_to_atom(filename:rootname(filename:basename(Name))). - -%% Sorts a list of either log files directories or spec files. - -sort_tests(Tests) -> - Sorted = lists:usort([{suite_order(filename_to_atom(X)), X} || - X <- Tests]), - [X || {_, X} <- Sorted]. - -%% This defines the order in which tests should be run and be presented -%% in index files. - -suite_order(emulator) -> 0; -suite_order(test_server) -> 1; -suite_order(kernel) -> 4; -suite_order(stdlib) -> 6; -suite_order(compiler) -> 8; -suite_order(hipe) -> 9; -suite_order(erl_interface) -> 12; -suite_order(jinterface) -> 14; -suite_order(sasl) -> 16; -suite_order(tools) -> 18; -suite_order(runtime_tools) -> 19; -suite_order(parsetools) -> 20; -suite_order(debugger) -> 22; -suite_order(ic) -> 24; -suite_order(orber) -> 26; -suite_order(inets) -> 28; -suite_order(asn1) -> 30; -suite_order(os_mon) -> 32; -suite_order(snmp) -> 38; -suite_order(mnesia) -> 44; -suite_order(system) -> 999; %% IMPORTANT: system SHOULD always be last! -suite_order(_) -> 200. - -%% Substitute all occurrences of @var@ in the In file, using -%% the list of variables in Vars, producing the output file Out. -%% Returns: ok | {error, Reason} - -subst_file(In, Out, Vars) -> - case file:read_file(In) of - {ok, Bin} -> - Subst = subst(b2s(Bin), Vars, []), - case file:write_file(Out, unicode:characters_to_binary(Subst)) of - ok -> - ok; - {error, Reason} -> - {error, {file_write, Reason}} - end; - Error -> - Error - end. - -subst(String, Vars) -> - subst(String, Vars, []). - -subst([$@, $_|Rest], Vars, Result) -> - subst_var([$_|Rest], Vars, Result, []); -subst([$@, C|Rest], Vars, Result) when $A =< C, C =< $Z -> - subst_var([C|Rest], Vars, Result, []); -subst([$@, C|Rest], Vars, Result) when $a =< C, C =< $z -> - subst_var([C|Rest], Vars, Result, []); -subst([C|Rest], Vars, Result) -> - subst(Rest, Vars, [C|Result]); -subst([], _Vars, Result) -> - lists:reverse(Result). - -subst_var([$@|Rest], Vars, Result, VarAcc) -> - Key = list_to_atom(lists:reverse(VarAcc)), - {Result1,Rest1} = do_subst_var(Key, Rest, Vars, Result, VarAcc), - subst(Rest1, Vars, Result1); - -subst_var([C|Rest], Vars, Result, VarAcc) -> - subst_var(Rest, Vars, Result, [C|VarAcc]); -subst_var([], Vars, Result, VarAcc) -> - subst([], Vars, [VarAcc++[$@|Result]]). - -%% handle conditional -do_subst_var(Cond, Rest, Vars, Result, _VarAcc) when Cond == 'IFEQ' ; - Cond == 'IFNEQ' -> - {Bool,Comment,Rest1} = do_test(Rest, Vars, Cond), - Rest2 = extract_clause(Bool, Rest1), - {lists:reverse(Comment, Result),Rest2}; -%% variable substitution -do_subst_var(Key, Rest, Vars, Result, VarAcc) -> - case lists:keysearch(Key, 1, Vars) of - {value, {Key, Value}} -> - {lists:reverse(Value, Result),Rest}; - false -> - {[$@|VarAcc++[$@|Result]],Rest} - end. - -%% check arguments in "@IF[N]EQ@ (Arg1, Arg2)" for equality -do_test(Rest, Vars, Test) -> - {Arg1,Rest1} = get_arg(Rest, Vars, $,, []), - {Arg2,Rest2} = get_arg(Rest1, Vars, 41, []), % $) - Result = case Arg1 of - Arg2 when Test == 'IFEQ' -> true; - Arg2 when Test == 'IFNEQ' -> false; - _ when Test == 'IFNEQ' -> true; - _ -> false - end, - Comment = io_lib:format("# Result of test: ~s (~s, ~s) -> ~w", - [atom_to_list(Test),Arg1,Arg2,Result]), - {Result,Comment,Rest2}. - -%% extract an argument -get_arg([$(|Rest], Vars, Stop, _) -> - get_arg(Rest, Vars, Stop, []); -get_arg([Stop|Rest], Vars, Stop, Acc) -> - Arg = string:strip(lists:reverse(Acc)), - Subst = subst(Arg, Vars), - {Subst,Rest}; -get_arg([C|Rest], Vars, Stop, Acc) -> - get_arg(Rest, Vars, Stop, [C|Acc]). - -%% keep only the true or false conditional clause -extract_clause(true, Rest) -> - extract_clause(true, Rest, []); -extract_clause(false, Rest) -> - Rest1 = discard_clause(Rest), % discard true clause - extract_clause(false, Rest1, []). - -%% true clause buffered, done -extract_clause(true, [$@,$E,$L,$S,$E,$@|Rest], Acc) -> - Rest1 = discard_clause(Rest), % discard false clause - lists:reverse(Acc, Rest1); -%% buffering of false clause starts now -extract_clause(false, [$@,$E,$L,$S,$E,$@|Rest], _Acc) -> - extract_clause(false, Rest, []); -%% true clause buffered, done -extract_clause(true, [$@,$E,$N,$D,$I,$F,$@|Rest], Acc) -> - lists:reverse(Acc, Rest); -%% false clause buffered, done -extract_clause(false, [$@,$E,$N,$D,$I,$F,$@|Rest], Acc) -> - lists:reverse(Acc, Rest); -%% keep buffering -extract_clause(Bool, [C|Rest], Acc) -> - extract_clause(Bool, Rest, [C|Acc]); -%% parse error -extract_clause(_, [], Acc) -> - lists:reverse(Acc). - -discard_clause([$@,$E,$L,$S,$E,$@|Rest]) -> - Rest; -discard_clause([$@,$E,$N,$D,$I,$F,$@|Rest]) -> - Rest; -discard_clause([_C|Rest]) -> - discard_clause(Rest); -discard_clause([]) -> % parse error - []. - - -print_data(Port) -> - receive - {Port, {data, Bytes}} -> - io:put_chars(Bytes), - print_data(Port); - {Port, eof} -> - Port ! {self(), close}, - receive - {Port, closed} -> - true - end, - receive - {'EXIT', Port, _} -> - ok - after 1 -> % force context switch - ok - end - end. - -maybe_atom_to_list(To_list) when is_list(To_list) -> - To_list; -maybe_atom_to_list(To_list) when is_atom(To_list)-> - atom_to_list(To_list). - - -%% Configure and run all the Makefiles in the data dir of the suite -%% in question -make_non_erlang(DataDir, Variables) -> - %% Make the stuff in all_SUITE_data if it exists - AllDir = filename:join(DataDir,"../all_SUITE_data"), - case filelib:is_dir(AllDir) of - true -> - make_non_erlang_do(AllDir,Variables); - false -> - ok - end, - make_non_erlang_do(DataDir, Variables). - -make_non_erlang_do(DataDir, Variables) -> - try - MakeCommand = proplists:get_value(make_command,Variables), - - FirstMakefile = filename:join(DataDir,"Makefile.first"), - case filelib:is_regular(FirstMakefile) of - true -> - io:format("Making ~p",[FirstMakefile]), - ok = ts_make:make( - MakeCommand, DataDir, filename:basename(FirstMakefile)); - false -> - ok - end, - - MakefileSrc = filename:join(DataDir,"Makefile.src"), - MakefileDest = filename:join(DataDir,"Makefile"), - case filelib:is_regular(MakefileSrc) of - true -> - ok = ts_lib:subst_file(MakefileSrc,MakefileDest,Variables), - io:format("Making ~p",[MakefileDest]), - ok = ts_make:make([{makefile,"Makefile"},{data_dir,DataDir} - | Variables]); - false -> - ok - end - after - timer:sleep(100) %% maybe unnecessary now when we don't do set_cwd anymore - end. - -b2s(Bin) -> - unicode:characters_to_list(Bin,default_encoding()). - -default_encoding() -> - try epp:default_encoding() - catch error:undef -> latin1 - end. diff --git a/lib/test_server/src/ts_make.erl b/lib/test_server/src/ts_make.erl deleted file mode 100644 index 0178f4d836..0000000000 --- a/lib/test_server/src/ts_make.erl +++ /dev/null @@ -1,114 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(ts_make). - --export([make/1,make/3,unmake/1]). - --include("test_server.hrl"). - -%% Functions to be called from make test cases. - -make(Config) when is_list(Config) -> - DataDir = proplists:get_value(data_dir, Config), - Makefile = proplists:get_value(makefile, Config), - Make = proplists:get_value(make_command, Config), - case make(Make, DataDir, Makefile) of - ok -> ok; - {error,Reason} -> exit({make_failed,Reason}) - end. - -unmake(Config) when is_list(Config) -> - ok. - -%% Runs `make' in the given directory. -%% Result: ok | {error, Reason} - -make(Make, Dir, Makefile) -> - {RunFile, RunCmd, Script} = run_make_script(os:type(), Make, Dir, Makefile), - case file:write_file(RunFile, Script) of - ok -> - Log = filename:join(Dir, "make.log"), - file:delete(Log), - Port = open_port({spawn, RunCmd}, [eof,stream,in,stderr_to_stdout]), - case get_port_data(Port, [], false) of - "*ok*" ++ _ -> ok; - "*error*" ++ _ -> {error, make}; - Other ->{error,{make,Other}} - end; - Error -> Error - end. - -get_port_data(Port, Last0, Complete0) -> - receive - {Port,{data,Bytes}} -> - {Last, Complete} = update_last(Bytes, Last0, Complete0), - get_port_data(Port, Last, Complete); - {Port, eof} -> - Result = update_last(eof, Last0, Complete0), - unlink(Port), - exit(Port, die), - Result - end. - -update_last([C|Rest], Line, true) -> - try - %% Utf-8 list to utf-8 binary - %% (e.g. we assume utf-8 bytes from port) - io:put_chars(list_to_binary(Line)) - catch - error:badarg -> - %% io:put_chars/1 badarged - %% this likely means we had unicode code points - %% in our bytes buffer (e.g warning from gcc with åäö) - io:put_chars(unicode:characters_to_binary(Line)) - end, - io:nl(), - update_last([C|Rest], [], false); -update_last([$\r|Rest], Result, Complete) -> - update_last(Rest, Result, Complete); -update_last([$\n|Rest], Result, _Complete) -> - update_last(Rest, lists:reverse(Result), true); -update_last([C|Rest], Result, Complete) -> - update_last(Rest, [C|Result], Complete); -update_last([], Result, Complete) -> - {Result, Complete}; -update_last(eof, Result, _) -> - unicode:characters_to_list(list_to_binary(Result)). - -run_make_script({win32, _}, Make, Dir, Makefile) -> - {"run_make.bat", - ".\\run_make", - ["@echo off\r\n", - "cd \"", filename:nativename(Dir), "\"\r\n", - Make, " -f ", Makefile, " \r\n", - "if errorlevel 1 echo *error*\r\n", - "if not errorlevel 1 echo *ok*\r\n"]}; -run_make_script({unix, _}, Make, Dir, Makefile) -> - {"run_make", - "/bin/sh ./run_make", - ["#!/bin/sh\n", - "cd \"", Dir, "\"\n", - Make, " -f ", Makefile, " 2>&1\n", - "case $? in\n", - " 0) echo '*ok*';;\n", - " *) echo '*error*';;\n", - "esac\n"]}; -run_make_script(_Other, _Make, _Dir, _Makefile) -> - exit(dont_know_how_to_make_script_on_this_platform). diff --git a/lib/test_server/src/ts_run.erl b/lib/test_server/src/ts_run.erl deleted file mode 100644 index 188094921d..0000000000 --- a/lib/test_server/src/ts_run.erl +++ /dev/null @@ -1,455 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%% Purpose : Supervises running of test cases. - --module(ts_run). - --export([run/4,ct_run_test/2]). - --define(DEFAULT_MAKE_TIMETRAP_MINUTES, 60). --define(DEFAULT_UNMAKE_TIMETRAP_MINUTES, 15). - --include("ts.hrl"). - --import(lists, [member/2,filter/2]). - --record(state, - {file, % File given. - mod, % Module to run. - test_server_args, % Arguments to test server. - command, % Command to run. - test_dir, % Directory for test suite. - makefiles, % List of all makefiles. - makefile, % Current makefile. - batch, % Are we running in batch mode? - data_wc, % Wildcard for data dirs. - topcase, % Top case specification. - all % Set if we have all_SUITE_data - }). - --define(tracefile,"traceinfo"). - -%% Options is a slightly modified version of the options given to -%% ts:run. Vars0 are from the variables file. -run(File, Args0, Options, Vars0) -> - Vars= - case lists:keysearch(vars, 1, Options) of - {value, {vars, Vars1}} -> - Vars1++Vars0; - _ -> - Vars0 - end, - {Batch,Runner} = - case {member(interactive, Options), member(batch, Options)} of - {false, true} -> - {true, fun run_batch/3}; - _ -> - {false, fun run_interactive/3} - end, - Hooks = [fun init_state/3, - fun run_preinits/3, - fun make_command/3, - Runner], - Args = make_common_test_args(Args0,Options,Vars), - St = #state{file=File,test_server_args=Args,batch=Batch}, - R = execute(Hooks, Vars, [], St), - case R of - {ok,_,_,_} -> ok; - Error -> Error - end. - -execute([Hook|Rest], Vars0, Spec0, St0) -> - case Hook(Vars0, Spec0, St0) of - ok -> - execute(Rest, Vars0, Spec0, St0); - {ok, Vars, Spec, St} -> - execute(Rest, Vars, Spec, St); - Error -> - Error - end; -execute([], Vars, Spec, St) -> - {ok, Vars, Spec, St}. - -%% Wrapper to run tests using ct:run_test/1 and handle any errors. - -ct_run_test(Dir, CommonTestArgs) -> - try - ok = file:set_cwd(Dir), - case ct:run_test(CommonTestArgs) of - {_,_,_} -> - ok; - {error,Error} -> - io:format("ERROR: ~P\n", [Error,20]); - Other -> - io:format("~P\n", [Other,20]) - end - catch - _:Crash -> - io:format("CRASH: ~P\n", [Crash,20]) - end. - -%% -%% Deletes File from Files when File is on the form .../<SUITE>_data/<file> -%% when all of <SUITE> has been skipped in Spec, i.e. there -%% exists a {skip, {<SUITE>, _}} tuple in Spec. -%% -del_skipped_suite_data_dir(Files, Spec) -> - SkipDirNames = lists:foldl(fun ({skip, {SS, _C}}, SSs) -> - [atom_to_list(SS) ++ "_data" | SSs]; - (_, SSs) -> - SSs - end, - [], - Spec), - filter(fun (File) -> - not member(filename:basename(filename:dirname(File)), - SkipDirNames) - end, - Files). - -%% Initialize our internal state. - -init_state(Vars, [], St0) -> - {FileBase,Wc0,Mod} = - case St0#state.file of - {Fil,Mod0} -> {Fil, atom_to_list(Mod0) ++ "*_data",Mod0}; - Fil -> {Fil,"*_SUITE_data",[]} - end, - {ok,Cwd} = file:get_cwd(), - TestDir = filename:join(filename:dirname(Cwd), FileBase++"_test"), - case filelib:is_dir(TestDir) of - true -> - Wc = filename:join(TestDir, Wc0), - {ok,Vars,[],St0#state{file=FileBase,mod=Mod, - test_dir=TestDir,data_wc=Wc}}; - false -> - {error,{no_test_directory,TestDir}} - end. - -%% Run any "Makefile.first" files first. -%% XXX We should fake a failing test case if the make fails. - -run_preinits(Vars, Spec, St) -> - Wc = filename:join(St#state.data_wc, "Makefile.first"), - run_pre_makefiles(del_skipped_suite_data_dir(filelib:wildcard(Wc), Spec), - Vars, Spec, St), - {ok,Vars,Spec,St}. - -run_pre_makefiles([Makefile|Ms], Vars0, Spec0, St0) -> - Hooks = [fun run_pre_makefile/3], - case execute(Hooks, Vars0, Spec0, St0#state{makefile=Makefile}) of - {error,_Reason}=Error -> Error; - {ok,Vars,Spec,St} -> run_pre_makefiles(Ms, Vars, Spec, St) - end; -run_pre_makefiles([], Vars, Spec, St) -> {ok,Vars,Spec,St}. - -run_pre_makefile(Vars, Spec, St) -> - Makefile = St#state.makefile, - Shortname = filename:basename(Makefile), - DataDir = filename:dirname(Makefile), - Make = ts_lib:var(make_command, Vars), - case ts_make:make(Make,DataDir, Shortname) of - ok -> {ok,Vars,Spec,St}; - {error,_Reason}=Error -> Error - end. - -get_config_files() -> - TSConfig = "ts.config", - [TSConfig | case os:type() of - {unix,_} -> ["ts.unix.config"]; - {win32,_} -> ["ts.win32.config"]; - _ -> [] - end]. - -%% Makes the command to start up the Erlang node to run the tests. - -backslashify([$\\, $" | T]) -> - [$\\, $" | backslashify(T)]; -backslashify([$" | T]) -> - [$\\, $" | backslashify(T)]; -backslashify([H | T]) -> - [H | backslashify(T)]; -backslashify([]) -> - []. - -make_command(Vars, Spec, State) -> - {ok,Cwd} = file:get_cwd(), - TestDir = State#state.test_dir, - TestPath = filename:nativename(TestDir), - Erl = case os:getenv("TS_RUN_VALGRIND") of - false -> - atom_to_list(lib:progname()); - _ -> - case State#state.file of - Dir when is_list(Dir) -> - os:putenv("VALGRIND_LOGFILE_PREFIX", Dir++"-"); - _ -> - ok - end, - "cerl -valgrind" ++ - case erlang:system_info(smp_support) of - true -> " -smp"; - false -> "" - end - end, - Naming = - case ts_lib:var(longnames, Vars) of - true -> - " -name "; - false -> - " -sname " - end, - ExtraArgs = - case lists:keysearch(erl_start_args,1,Vars) of - {value,{erl_start_args,Args}} -> Args; - false -> "" - end, - CrashFile = filename:join(Cwd,State#state.file ++ "_erl_crash.dump"), - case filelib:is_file(CrashFile) of - true -> - io:format("ts_run: Deleting dump: ~ts\n",[CrashFile]), - file:delete(CrashFile); - false -> - ok - end, - - %% If Common Test specific variables are needed, add them here - %% on form: "{key1,value1}" "{key2,value2}" ... - NetDir = ts_lib:var(ts_net_dir, Vars), - TestVars = [ "\"{net_dir,\\\"",NetDir,"\\\"}\"" ], - - %% NOTE: Do not use ' in these commands as it wont work on windows - Cmd = [Erl, Naming, "test_server" - " -rsh ", ts_lib:var(rsh_name, Vars), - " -env PATH \"", - backslashify(lists:flatten([TestPath, path_separator(), - remove_path_spaces()])), - "\"", - " -env ERL_CRASH_DUMP ", CrashFile, - %% uncomment the line below to disable exception formatting - %% " -test_server_format_exception false", - " -boot start_sasl -sasl errlog_type error", - " -pz \"",Cwd,"\"", - " -ct_test_vars ",TestVars, - " -eval \"ts_run:ct_run_test(\\\"",TestDir,"\\\", ", - backslashify(lists:flatten(State#state.test_server_args)),")\"" - " ", - ExtraArgs], - {ok, Vars, Spec, State#state{command=lists:flatten(Cmd)}}. - - -run_batch(Vars, _Spec, State) -> - process_flag(trap_exit, true), - Command = State#state.command ++ " -noinput -s erlang halt", - ts_lib:progress(Vars, 1, "Command: ~ts~n", [Command]), - io:format(user, "Command: ~ts~n",[Command]), - Port = open_port({spawn, Command}, [stream, in, eof]), - Timeout = 30000 * case os:getenv("TS_RUN_VALGRIND") of - false -> 1; - _ -> 100 - end, - tricky_print_data(Port, Timeout). - -tricky_print_data(Port, Timeout) -> - receive - {Port, {data, Bytes}} -> - io:put_chars(Bytes), - tricky_print_data(Port, Timeout); - {Port, eof} -> - Port ! {self(), close}, - receive - {Port, closed} -> - true - end, - receive - {'EXIT', Port, _} -> - ok - after 1 -> % force context switch - ok - end - after Timeout -> - case erl_epmd:names() of - {ok,Names} -> - case is_testnode_dead(Names) of - true -> - io:put_chars("WARNING: No EOF, but " - "test_server node is down!\n"); - false -> - tricky_print_data(Port, Timeout) - end; - _ -> - tricky_print_data(Port, Timeout) - end - end. - -is_testnode_dead([]) -> true; -is_testnode_dead([{"test_server",_}|_]) -> false; -is_testnode_dead([_|T]) -> is_testnode_dead(T). - -run_interactive(Vars, _Spec, State) -> - Command = State#state.command, - ts_lib:progress(Vars, 1, "Command: ~s~n", [Command]), - case ts_lib:var(os, Vars) of - "Windows 95" -> - %% Windows 95 strikes again! We must redirect standard - %% input and output for the `start' command, to force - %% standard input and output to the Erlang shell to be - %% connected to the newly started console. - %% Without these redirections, the Erlang shell would be - %% connected to the pipes provided by the port program - %% and there would be an inactive console window. - os:cmd("start < nul > nul w" ++ Command), - ok; - "Windows 98" -> - os:cmd("start < nul > nul w" ++ Command), - ok; - "Windows"++_ -> - os:cmd("start w" ++ Command), - ok; - _Other -> - %% Assuming ts and controller always run on solaris - start_xterm(Command) - end. - -start_xterm(Command) -> - case os:find_executable("xterm") of - false -> - io:format("The `xterm' program was not found.\n"), - {error, no_xterm}; - _Xterm -> - case os:getenv("DISPLAY") of - false -> - io:format("DISPLAY is not set.\n"), - {error, display_not_set}; - Display -> - io:format("Starting xterm (DISPLAY=~s)...\n", - [Display]), - os:cmd("xterm -sl 10000 -e " ++ Command ++ "&"), - ok - end - end. - -path_separator() -> - case os:type() of - {win32, _} -> ";"; - {unix, _} -> ":" - end. - - -make_common_test_args(Args0, Options0, _Vars) -> - Trace = - case lists:keysearch(trace,1,Options0) of - {value,{trace,TI}} when is_tuple(TI); is_tuple(hd(TI)) -> - ok = file:write_file(?tracefile,io_lib:format("~p.~n",[TI])), - [{ct_trace,?tracefile}]; - {value,{trace,TIFile}} when is_atom(TIFile) -> - [{ct_trace,atom_to_list(TIFile)}]; - {value,{trace,TIFile}} -> - [{ct_trace,TIFile}]; - false -> - [] - end, - Cover = - case lists:keysearch(cover,1,Options0) of - {value,{cover, App, none, _Analyse}} -> - io:format("No cover file found for ~p~n",[App]), - []; - {value,{cover,_App,File,_Analyse}} -> - [{cover,to_list(File)},{cover_stop,false}]; - false -> - [] - end, - - Logdir = case lists:keysearch(logdir, 1, Options0) of - {value,{logdir, _}} -> - []; - false -> - [{logdir,"../test_server"}] - end, - - TimeTrap = [{scale_timetraps, true}], - - {ConfigPath, - Options} = case {os:getenv("TEST_CONFIG_PATH"), - lists:keysearch(config, 1, Options0)} of - {_,{value, {config, Path}}} -> - {Path,lists:keydelete(config, 1, Options0)}; - {false,false} -> - {"../test_server",Options0}; - {Path,_} -> - {Path,Options0} - end, - ConfigFiles = [{config,[filename:join(ConfigPath,File) - || File <- get_config_files()]}], - io_lib:format("~100000p",[[{abort_if_missing_suites,true} | - Args0++Trace++Cover++Logdir++ - ConfigFiles++Options++TimeTrap]]). - -to_list(X) when is_atom(X) -> - atom_to_list(X); -to_list(X) when is_list(X) -> - X. - -%% -%% Paths and spaces handling for w2k and XP -%% -remove_path_spaces() -> - Path = os:getenv("PATH"), - case os:type() of - {win32,nt} -> - remove_path_spaces(Path); - _ -> - Path - end. - -remove_path_spaces(Path) -> - SPath = split_path(Path), - [NSHead|NSTail] = lists:map(fun(X) -> filename:nativename( - filename:join( - translate_path(split_one(X)))) - end, - SPath), - NSHead ++ lists:flatten([[$;|X] || X <- NSTail]). - -translate_path(PList) -> - %io:format("translate_path([~p|~p]~n",[Base,PList]), - translate_path(PList,[]). - - -translate_path([],_) -> - []; -translate_path([PC | T],BaseList) -> - FullPath = filename:nativename(filename:join(BaseList ++ [PC])), - NewPC = case catch file:altname(FullPath) of - {ok,X} -> - X; - _ -> - PC - end, - %io:format("NewPC:~s, DirList:~p~n",[NewPC,DirList]), - NewBase = BaseList ++ [NewPC], - [NewPC | translate_path(T,NewBase)]. - -split_one(Path) -> - filename:split(Path). - -split_path(Path) -> - string:tokens(Path,";"). diff --git a/lib/test_server/test/Makefile b/lib/test_server/test/Makefile deleted file mode 100644 index 8eec940505..0000000000 --- a/lib/test_server/test/Makefile +++ /dev/null @@ -1,92 +0,0 @@ -# -# %CopyrightBegin% -# -# Copyright Ericsson AB 1997-2012. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# %CopyrightEnd% -# - -include $(ERL_TOP)/make/target.mk -include $(ERL_TOP)/make/$(TARGET)/otp.mk - -# ---------------------------------------------------- -# Target Specs -# ---------------------------------------------------- - -MODULES= \ - test_server_SUITE \ - test_server_test_lib \ - erl2html2_SUITE - -ERL_FILES= $(MODULES:%=%.erl) - -TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR)) -INSTALL_PROGS= $(TARGET_FILES) - -EMAKEFILE=Emakefile -COVERFILE=test_server.cover - -# ---------------------------------------------------- -# Release directory specification -# ---------------------------------------------------- -RELSYSDIR = $(RELEASE_PATH)/test_server_test - -# ---------------------------------------------------- -# FLAGS -# ---------------------------------------------------- - -ERL_MAKE_FLAGS += -pa $(ERL_TOP)/lib/test_server/ebin -ERL_COMPILE_FLAGS += -I$(ERL_TOP)/lib/test_server/include -ERL_COMPILE_FLAGS += -I$(ERL_TOP)/lib/test_server/test - -EBIN = . - -# ---------------------------------------------------- -# Targets -# ---------------------------------------------------- - -.PHONY: make_emakefile - -make_emakefile: - $(ERL_TOP)/make/make_emakefile $(ERL_COMPILE_FLAGS) -o$(EBIN) \ - '*_SUITE_make' > $(EMAKEFILE) - $(ERL_TOP)/make/make_emakefile $(ERL_COMPILE_FLAGS) -o$(EBIN) $(MODULES)\ - >> $(EMAKEFILE) - -tests debug opt: make_emakefile - erl $(ERL_MAKE_FLAGS) -make - -clean: - rm -f $(EMAKEFILE) - rm -f $(TARGET_FILES) $(GEN_FILES) - rm -f core - -docs: - -# ---------------------------------------------------- -# Release Target -# ---------------------------------------------------- -include $(ERL_TOP)/make/otp_release_targets.mk - -release_spec: opt - -release_tests_spec: make_emakefile - $(INSTALL_DIR) "$(RELSYSDIR)" - $(INSTALL_DATA) $(EMAKEFILE) $(ERL_FILES) $(COVERFILE) "$(RELSYSDIR)" - $(INSTALL_DATA) test_server_test_lib.hrl test_server.spec test_server.cover "$(RELSYSDIR)" - chmod -R u+w "$(RELSYSDIR)" - @tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -) - -release_docs_spec: diff --git a/lib/test_server/test/erl2html2_SUITE.erl b/lib/test_server/test/erl2html2_SUITE.erl deleted file mode 100644 index 9e6389109b..0000000000 --- a/lib/test_server/test/erl2html2_SUITE.erl +++ /dev/null @@ -1,277 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2012. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(erl2html2_SUITE). - --compile(export_all). - --include_lib("common_test/include/ct.hrl"). - - --define(HEADER, - ["<!DOCTYPE HTML PUBLIC", - "\"-//W3C//DTD HTML 3.2 Final//EN\">\n", - "<!-- autogenerated by 'erl2html2' -->\n", - "<html>\n", - "<head><title>Module ", Src, "</title>\n", - "<meta http-equiv=\"cache-control\" ", - "content=\"no-cache\">\n", - "</head>\n", - "<body bgcolor=\"white\" text=\"black\" ", - "link=\"blue\" vlink=\"purple\" alink=\"red\">\n"]). - -%%-------------------------------------------------------------------- -%% @spec suite() -> Info -%% Info = [tuple()] -%% @end -%%-------------------------------------------------------------------- -suite() -> - [{timetrap,{seconds,30}}, - {ct_hooks,[ts_install_cth,test_server_test_lib]}]. - -%%-------------------------------------------------------------------- -%% @spec init_per_suite(Config0) -> -%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} -%% Config0 = Config1 = [tuple()] -%% Reason = term() -%% @end -%%-------------------------------------------------------------------- -init_per_suite(Config) -> - Config. - -%%-------------------------------------------------------------------- -%% @spec end_per_suite(Config0) -> void() | {save_config,Config1} -%% Config0 = Config1 = [tuple()] -%% @end -%%-------------------------------------------------------------------- -end_per_suite(_Config) -> - ok. - -%%-------------------------------------------------------------------- -%% @spec init_per_group(GroupName, Config0) -> -%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} -%% GroupName = atom() -%% Config0 = Config1 = [tuple()] -%% Reason = term() -%% @end -%%-------------------------------------------------------------------- -init_per_group(_GroupName, Config) -> - Config. - -%%-------------------------------------------------------------------- -%% @spec end_per_group(GroupName, Config0) -> -%% void() | {save_config,Config1} -%% GroupName = atom() -%% Config0 = Config1 = [tuple()] -%% @end -%%-------------------------------------------------------------------- -end_per_group(_GroupName, _Config) -> - ok. - -%%-------------------------------------------------------------------- -%% @spec init_per_testcase(TestCase, Config0) -> -%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} -%% TestCase = atom() -%% Config0 = Config1 = [tuple()] -%% Reason = term() -%% @end -%%-------------------------------------------------------------------- -init_per_testcase(_TestCase, Config) -> - Config. - -%%-------------------------------------------------------------------- -%% @spec end_per_testcase(TestCase, Config0) -> -%% void() | {save_config,Config1} | {fail,Reason} -%% TestCase = atom() -%% Config0 = Config1 = [tuple()] -%% Reason = term() -%% @end -%%-------------------------------------------------------------------- -end_per_testcase(_TestCase, _Config) -> - ok. - -%%-------------------------------------------------------------------- -%% @spec groups() -> [Group] -%% Group = {GroupName,Properties,GroupsAndTestCases} -%% GroupName = atom() -%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}] -%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase] -%% TestCase = atom() -%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}} -%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail | -%% repeat_until_any_ok | repeat_until_any_fail -%% N = integer() | forever -%% @end -%%-------------------------------------------------------------------- -groups() -> - []. - -%%-------------------------------------------------------------------- -%% @spec all() -> GroupsAndTestCases | {skip,Reason} -%% GroupsAndTestCases = [{group,GroupName} | TestCase] -%% GroupName = atom() -%% TestCase = atom() -%% Reason = term() -%% @end -%%-------------------------------------------------------------------- -all() -> - [macros_defined, macros_undefined]. - -%%-------------------------------------------------------------------- -%% @spec TestCase(Config0) -> -%% ok | exit() | {skip,Reason} | {comment,Comment} | -%% {save_config,Config1} | {skip_and_save,Reason,Config1} -%% Config0 = Config1 = [tuple()] -%% Reason = term() -%% Comment = term() -%% @end -%%-------------------------------------------------------------------- -macros_defined(Config) -> - %% let erl2html2 use epp as parser - DataDir = ?config(data_dir,Config), - InclDir = filename:join(DataDir, "include"), - {Src,Dst} = convert_module("m1",[InclDir],Config), - {true,L} = check_line_numbers(Src,Dst), - ok = check_link_targets(Src,Dst,L,[{baz,0}],[]), - ok. - -macros_undefined(Config) -> - %% let erl2html2 use epp_dodger as parser - {Src,Dst} = convert_module("m1",[],Config), - {true,L} = check_line_numbers(Src,Dst), - ok = check_link_targets(Src,Dst,L,[{baz,0}],[{quux,0}]), - ok. - -convert_module(Mod,InclDirs,Config) -> - DataDir = ?config(data_dir,Config), - PrivDir = ?config(priv_dir,Config), - Src = filename:join(DataDir,Mod++".erl"), - Dst = filename:join(PrivDir,Mod++".erl.html"), - io:format("<a href=\"~s\">~s</a>\n",[Src,filename:basename(Src)]), - ok = erl2html2:convert(Src, Dst, InclDirs, "<html><body>"), - io:format("<a href=\"~s\">~s</a>\n",[Dst,filename:basename(Dst)]), - {Src,Dst}. - -%% Check that there are the same number of lines in each file, and -%% that all line numbers are displayed in the dst file. -check_line_numbers(Src,Dst) -> - {ok,SFd} = file:open(Src,[read]), - {ok,DFd} = file:open(Dst,[read]), - {ok,SN} = count_src_lines(SFd,0), - ok = file:close(SFd), - {ok,DN} = read_dst_line_numbers(DFd), - ok = file:close(DFd), - {SN == DN,SN}. - -count_src_lines(Fd,N) -> - case io:get_line(Fd,"") of - eof -> - {ok,N}; - {error,Reason} -> - {error,Reason,N}; - _Line -> - count_src_lines(Fd,N+1) - end. - -read_dst_line_numbers(Fd) -> - "<html><body><pre>\n" = io:get_line(Fd,""), - read_dst_line_numbers(Fd,0). -read_dst_line_numbers(Fd,Last) when is_integer(Last) -> - case io:get_line(Fd,"") of - eof -> - {ok,Last}; - {error,Reason} -> - {error,Reason,Last}; - "</pre>"++_ -> - {ok,Last}; - "</body>"++_ -> - {ok,Last}; - Line -> - %% erlang:display(Line), - Num = check_line_number(Last,Line,Line), - read_dst_line_numbers(Fd,Num) - end. - -check_line_number(Last,Line,OrigLine) -> - case Line of - "<a name="++_ -> - [$>|Rest] = lists:dropwhile(fun($>) -> false; (_) -> true end,Line), - check_line_number(Last,Rest,OrigLine); - _ -> - [N |_] = string:tokens(Line,":"), -% erlang:display(N), - Num = - try list_to_integer(string:strip(N)) - catch _:_ -> ct:fail({no_line_number_after,Last,OrigLine}) - end, - if Num == Last+1 -> - Num; - true -> - ct:fail({unexpected_integer,Num,Last}) - end - end. - - -%% Check that there is one link target for each line and one for each -%% function. -%% The test module has -compile(export_all), so all functions are -%% found by listing the exported ones. -check_link_targets(Src,Dst,L,RmFncs,ShouldRemain) -> - Mod = list_to_atom(filename:basename(filename:rootname(Src))), - Exports = Mod:module_info(exports)--[{module_info,0},{module_info,1}|RmFncs], - LastExprFuncs = [Func || {Func,_A} <- Exports], - {ok,{FAs,Fs,L},_} = - xmerl_sax_parser:file(Dst, - [{event_fun,fun sax_event/3}, - {event_state,{Exports,LastExprFuncs,0}}]), - true = (length(FAs) == length(ShouldRemain)), - [] = [FA || FA <- FAs, not lists:member(FA,ShouldRemain)], - [] = [F || F <- Fs, not lists:keymember(F,1,ShouldRemain)], - ok. - -sax_event(Event,_Loc,State) -> - sax_event(Event,State). - -sax_event({startElement,_Uri,"a",_QN,Attrs},{Exports,LastExprFuncs,PrevLine}) -> - {_,_,"name",Name} = lists:keyfind("name",3,Attrs), - case catch list_to_integer(Name) of - Line when is_integer(Line) -> - case PrevLine + 1 of - Line -> - {Exports,LastExprFuncs,Line}; - Other -> - ct:fail({unexpected_line_number_target,Other}) - end; - {'EXIT',_} -> - {match,[FStr,EndStr]} = - re:run(Name,"^(.*)-(last_expr|[0-9]+)$", - [{capture,all_but_first,list}]), - F = list_to_atom(http_uri:decode(FStr)), - case EndStr of - "last_expr" -> - true = lists:member(F,LastExprFuncs), - {Exports,lists:delete(F,LastExprFuncs),PrevLine}; - _ -> - A = list_to_integer(EndStr), - A = proplists:get_value(F,Exports), - {lists:delete({F,A},Exports),LastExprFuncs,PrevLine} - end - end; -sax_event(_,State) -> - State. diff --git a/lib/test_server/test/erl2html2_SUITE_data/Makefile.src b/lib/test_server/test/erl2html2_SUITE_data/Makefile.src deleted file mode 100644 index 942ac0584b..0000000000 --- a/lib/test_server/test/erl2html2_SUITE_data/Makefile.src +++ /dev/null @@ -1,2 +0,0 @@ -all: - erlc -Iinclude m1.erl
\ No newline at end of file diff --git a/lib/test_server/test/erl2html2_SUITE_data/header1.hrl b/lib/test_server/test/erl2html2_SUITE_data/header1.hrl deleted file mode 100644 index 53d1b79ac5..0000000000 --- a/lib/test_server/test/erl2html2_SUITE_data/header1.hrl +++ /dev/null @@ -1,4 +0,0 @@ -baz() -> - ok. - --define(MACRO_DEFINING_A_FUNCTION,quux() -> ok). diff --git a/lib/test_server/test/erl2html2_SUITE_data/include/header2.hrl b/lib/test_server/test/erl2html2_SUITE_data/include/header2.hrl deleted file mode 100644 index e69de29bb2..0000000000 --- a/lib/test_server/test/erl2html2_SUITE_data/include/header2.hrl +++ /dev/null diff --git a/lib/test_server/test/erl2html2_SUITE_data/include/header3.hrl b/lib/test_server/test/erl2html2_SUITE_data/include/header3.hrl deleted file mode 100644 index 2a20850a3a..0000000000 --- a/lib/test_server/test/erl2html2_SUITE_data/include/header3.hrl +++ /dev/null @@ -1 +0,0 @@ --define(EPP_SWITCH, on). diff --git a/lib/test_server/test/erl2html2_SUITE_data/m1.erl b/lib/test_server/test/erl2html2_SUITE_data/m1.erl deleted file mode 100644 index 1d405963a5..0000000000 --- a/lib/test_server/test/erl2html2_SUITE_data/m1.erl +++ /dev/null @@ -1,52 +0,0 @@ -%% Comment with <html> code & </html> -%% and also some "quotes" and 'single quotes' - --module(m1). - --compile(export_all). - --include("header1.hrl"). --include("header2.hrl"). --include("header3.hrl"). - --define(MACRO1,value). - -%% This macro is used to select parser in erl2html2. -%% If EPP_SWITCH is defined epp is used, else epp_dodger. -epp_switch() -> - ?EPP_SWITCH. - -%%% Comment -foo(x) -> - %% Comment - ok_x; -foo(y) -> - %% Second clause - ok_y. - -'quoted_foo'() -> - ok. - -'quoted_foo_with_"_and_/'() -> - ok. - -'quoted_foo_with_(_and_)'() -> - ok. - -'quoted_foo_with_<_and_>'() -> - ok. - -bar() -> - do_something(), -ok. % indentation error, OTP-9710 - -%% Function inside macro definition -?MACRO_DEFINING_A_FUNCTION. - -%% Two function one one line -quuux() -> ok. quuuux() -> ok. - -%% do_something/0 does something -do_something() -> - ?MACRO1. -%% comments after last line diff --git a/lib/test_server/test/test_server.cover b/lib/test_server/test/test_server.cover deleted file mode 100644 index 052415377d..0000000000 --- a/lib/test_server/test/test_server.cover +++ /dev/null @@ -1 +0,0 @@ -{incl_app,test_server,details}. diff --git a/lib/test_server/test/test_server.spec b/lib/test_server/test/test_server.spec deleted file mode 100644 index a3b4d01d08..0000000000 --- a/lib/test_server/test/test_server.spec +++ /dev/null @@ -1 +0,0 @@ -{suites, "../test_server_test", all}. diff --git a/lib/test_server/test/test_server_SUITE.erl b/lib/test_server/test/test_server_SUITE.erl deleted file mode 100644 index 6adf5b8a78..0000000000 --- a/lib/test_server/test/test_server_SUITE.erl +++ /dev/null @@ -1,449 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2010-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% -%%%------------------------------------------------------------------- -%%% @author Lukas Larsson <[email protected]> -%%% @copyright (C) 2011, Erlang Solutions Ltd. -%%% @doc -%%% -%%% @end -%%% Created : 15 Feb 2011 by Lukas Larsson <[email protected]> -%%%------------------------------------------------------------------- --module(test_server_SUITE). - -%% Note: This directive should only be used in test suites. --compile(export_all). - --include_lib("common_test/include/ct.hrl"). --include("test_server_test_lib.hrl"). --include_lib("kernel/include/file.hrl"). - -%%-------------------------------------------------------------------- -%% COMMON TEST CALLBACK FUNCTIONS -%%-------------------------------------------------------------------- - -%% @spec suite() -> Info -suite() -> - [{ct_hooks,[ts_install_cth,test_server_test_lib]}]. - - -%% @spec init_per_suite(Config0) -> -%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} -init_per_suite(Config) -> - [{path_dirs,[proplists:get_value(data_dir,Config)]} | Config]. - -%% @spec end_per_suite(Config) -> _ -end_per_suite(_Config) -> - io:format("TEST_SERVER_FRAMEWORK: ~p",[os:getenv("TEST_SERVER_FRAMEWORK")]), - ok. - -%% @spec init_per_group(GroupName, Config0) -> -%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} -init_per_group(_GroupName, Config) -> - Config. - -%% @spec end_per_group(GroupName, Config0) -> -%% void() | {save_config,Config1} -end_per_group(_GroupName, _Config) -> - ok. - -%% @spec init_per_testcase(TestCase, Config0) -> -%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} -init_per_testcase(_TestCase, Config) -> - Config. - -%% @spec end_per_testcase(TestCase, Config0) -> -%% void() | {save_config,Config1} | {fail,Reason} -end_per_testcase(test_server_unicode, _Config) -> - [_,Host] = string:tokens(atom_to_list(node()), "@"), - N1 = list_to_atom("test_server_tester_latin1" ++ "@" ++ Host), - N2 = list_to_atom("test_server_tester_utf8" ++ "@" ++ Host), - test_server:stop_node(N1), - test_server:stop_node(N2), - ok; -end_per_testcase(_TestCase, _Config) -> - ok. - -%% @spec: groups() -> [Group] -groups() -> - []. - -%% @spec all() -> GroupsAndTestCases | {skip,Reason} -all() -> - [test_server_SUITE, test_server_parallel01_SUITE, - test_server_conf02_SUITE, test_server_conf01_SUITE, - test_server_skip_SUITE, test_server_shuffle01_SUITE, - test_server_break_SUITE, test_server_cover_SUITE, - test_server_unicode]. - - -%%-------------------------------------------------------------------- -%% TEST CASES -%%-------------------------------------------------------------------- -%% @spec TestCase(Config0) -> -%% ok | exit() | {skip,Reason} | {comment,Comment} | -%% {save_config,Config1} | {skip_and_save,Reason,Config1} -test_server_SUITE(Config) -> -% rpc:call(Node,dbg, tracer,[]), -% rpc:call(Node,dbg, p,[all,c]), -% rpc:call(Node,dbg, tpl,[test_server_ctrl,x]), - run_test_server_tests("test_server_SUITE", - [{test_server_SUITE,skip_case7,"SKIPPED!"}], - 40, 1, 32, 21, 9, 1, 11, 2, 27, Config). - -test_server_parallel01_SUITE(Config) -> - run_test_server_tests("test_server_parallel01_SUITE", [], - 37, 0, 19, 19, 0, 0, 0, 0, 37, Config). - -test_server_shuffle01_SUITE(Config) -> - run_test_server_tests("test_server_shuffle01_SUITE", [], - 130, 0, 0, 76, 0, 0, 0, 0, 130, Config). - -test_server_skip_SUITE(Config) -> - run_test_server_tests("test_server_skip_SUITE", [], - 3, 0, 1, 0, 1, 0, 3, 0, 0, Config). - -test_server_conf01_SUITE(Config) -> - run_test_server_tests("test_server_conf01_SUITE", [], - 24, 0, 12, 12, 0, 0, 0, 0, 24, Config). - -test_server_conf02_SUITE(Config) -> - run_test_server_tests("test_server_conf02_SUITE", [], - 26, 0, 12, 12, 0, 0, 0, 0, 26, Config). - -test_server_break_SUITE(Config) -> - run_test_server_tests("test_server_break_SUITE", [], - 8, 2, 6, 4, 0, 0, 0, 2, 6, Config). - -test_server_cover_SUITE(Config) -> - case test_server:is_cover() of - true -> - {skip, "Cover already running"}; - false -> - PrivDir = ?config(priv_dir,Config), - - %% Test suite has two test cases - %% tc1 calls cover_helper:foo/0 - %% tc2 calls cover_helper:bar/0 - %% Each function in cover_helper is one line. - %% - %% First test run skips tc2, so only cover_helper:foo/0 is executed. - %% Cover file specifies to include cover_helper in this test run. - CoverFile1 = filename:join(PrivDir,"t1.cover"), - CoverSpec1 = {include,[cover_helper]}, - file:write_file(CoverFile1,io_lib:format("~p.~n",[CoverSpec1])), - run_test_server_tests("test_server_cover_SUITE", - [{test_server_cover_SUITE,tc2,"SKIPPED!"}], - 4, 0, 2, 1, 1, 0, 1, 0, 3, - CoverFile1, Config), - - %% Next test run skips tc1, so only cover_helper:bar/0 is executed. - %% Cover file specifies cross compilation of cover_helper - CoverFile2 = filename:join(PrivDir,"t2.cover"), - CoverSpec2 = {cross,[{t1,[cover_helper]}]}, - file:write_file(CoverFile2,io_lib:format("~p.~n",[CoverSpec2])), - run_test_server_tests("test_server_cover_SUITE", - [{test_server_cover_SUITE,tc1,"SKIPPED!"}], - 4, 0, 2, 1, 1, 0, 1, 0, 3, CoverFile2, Config), - - %% Cross cover analyse - WorkDir = ?config(work_dir,Config), - WC = filename:join([WorkDir,"test_server_cover_SUITE.logs","run.*"]), - [D2,D1|_] = lists:reverse(lists:sort(filelib:wildcard(WC))), - TagDirs = [{t1,D1},{t2,D2}], - test_server_ctrl:cross_cover_analyse(details,TagDirs), - - %% Check that cover log shows only what is really included - %% in the test and cross cover log show the accumulated - %% result. - {ok,Cover1} = file:read_file(filename:join(D1,"cover.log")), - [{cover_helper,{1,1,_}}] = binary_to_term(Cover1), - {ok,Cover2} = file:read_file(filename:join(D2,"cover.log")), - [] = binary_to_term(Cover2), - {ok,Cross} = file:read_file(filename:join(D1,"cross_cover.log")), - [{cover_helper,{2,0,_}}] = binary_to_term(Cross), - ok - end. - -test_server_unicode(Config) -> - run_test_server_tests("test_server_unicode_SUITE", [], - 5, 0, 3, 3, 0, 0, 0, 0, 5, Config), - - %% Create and run two test suites - one with filename and content - %% in latin1 (if the default filename mode is latin1) and one with - %% filename and content in utf8. Both have name and content - %% including letters äöå. Check that all logs are generated with - %% utf8 encoded filenames. - case file:native_name_encoding() of - utf8 -> - ok; - latin1 -> - generate_and_run_unicode_test(Config,latin1) - end, - generate_and_run_unicode_test(Config,utf8). - -%%%----------------------------------------------------------------- -run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc, - NUsrSkip, NAutoSkip, - NActualSkip, NActualFail, NActualSucc, Config) -> - run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc, - NUsrSkip, NAutoSkip, - NActualSkip, NActualFail, NActualSucc, false, Config). - -run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc, - NUsrSkip, NAutoSkip, - NActualSkip, NActualFail, NActualSucc, Cover, Config) -> - Node = proplists:get_value(node, Config), - Encoding = rpc:call(Node,file,native_name_encoding,[]), - WorkDir = proplists:get_value(work_dir, Config), - LogDir = filename:join(WorkDir, SuiteName++".logs"), - LogDirUri = test_server_ctrl:uri_encode(LogDir, Encoding), - ct:log("<a href=\"file://~s\">Test case log files</a>\n", [LogDirUri]), - - {ok,_Pid} = rpc:call(Node,test_server_ctrl, start, []), - case Cover of - false -> - ok; - _ -> - rpc:call(Node,test_server_ctrl,cover,[Cover,details]) - end, - rpc:call(Node, - test_server_ctrl,add_dir_with_skip, - [SuiteName, - [proplists:get_value(data_dir,Config)],SuiteName, - Skip]), - - until(fun() -> - rpc:call(Node,test_server_ctrl,jobs,[]) =:= [] - end), - - rpc:call(Node,test_server_ctrl, stop, []), - - LogDir1 = translate_filename(LogDir,Encoding), - LastRunDir = get_latest_run_dir(LogDir1), - LastSuiteLog = filename:join(LastRunDir,"suite.log"), - {ok,Data} = test_server_test_lib:parse_suite(LastSuiteLog), - check([{"Number of cases",NCases,Data#suite.n_cases}, - {"Number failed",NFail,Data#suite.n_cases_failed}, - {"Number expected",NExpected,Data#suite.n_cases_expected}, - {"Number successful",NSucc,Data#suite.n_cases_succ}, - {"Number user skipped",NUsrSkip,Data#suite.n_cases_user_skip}, - {"Number auto skipped",NAutoSkip,Data#suite.n_cases_auto_skip}], ok), - {NActualSkip,NActualFail,NActualSucc} = - lists:foldl(fun(#tc{ result = skip },{S,F,Su}) -> - {S+1,F,Su}; - (#tc{ result = auto_skip },{S,F,Su}) -> - {S+1,F,Su}; - (#tc{ result = ok },{S,F,Su}) -> - {S,F,Su+1}; - (#tc{ result = failed },{S,F,Su}) -> - {S,F+1,Su} - end,{0,0,0},Data#suite.cases), - Data. - -translate_filename(Filename,EncodingOnTestNode) -> - case {file:native_name_encoding(),EncodingOnTestNode} of - {X,X} -> Filename; - {utf8,latin1} -> list_to_binary(Filename); - {latin1,utf8} -> unicode:characters_to_binary(Filename) - end. - -get_latest_run_dir(Dir) -> - %% For the time being, filelib:wildcard can not take a binary - %% argument, so we avoid using this here. - case file:list_dir(Dir) of - {ok,Files} -> - {ok,RE} = re:compile(<<"^run.[1-2][-_\.0-9]*$">>), - RunDirs = lists:filter( - fun(F) -> - L = l(F), - case re:run(F,RE) of - {match,[{0,L}]} -> true; - _ -> false - end - end, Files), - case RunDirs of - [] -> - Dir; - [H|T] -> - filename:join(Dir,get_latest_dir(T,H)) - end; - _ -> - Dir - end. - -l(X) when is_binary(X) -> size(X); -l(X) when is_list(X) -> length(X). - -get_latest_dir([H|T],Latest) when H>Latest -> - get_latest_dir(T,H); -get_latest_dir([_|T],Latest) -> - get_latest_dir(T,Latest); -get_latest_dir([],Latest) -> - Latest. - -check([{Str,Same,Same}|T], Status) -> - io:format("~s: ~p\n", [Str,Same]), - check(T, Status); -check([{Str,Expected,Actual}|T], _) -> - io:format("~s: expected ~p, actual ~p\n", [Str,Expected,Actual]), - check(T, error); -check([], ok) -> ok; -check([], error) -> ?t:fail(). - -until(Fun) -> - case Fun() of - true -> - ok; - false -> - timer:sleep(100), - until(Fun) - end. - -generate_and_run_unicode_test(Config0,Encoding) -> - DataDir = ?config(data_dir,Config0), - Suite = create_unicode_test_suite(DataDir,Encoding), - - %% We can not run this test on default node since it must be - %% started with correct file name mode (+fnu/+fnl). - %% OBS: the node are stopped by end_per_testcase/2 - Config1 = lists:keydelete(node,1,Config0), - Config2 = lists:keydelete(work_dir,1,Config1), - NodeName = list_to_atom("test_server_tester_" ++ atom_to_list(Encoding)), - Config = start_node(Config2,NodeName,erts_switch(Encoding)), - - %% Compile the suite - Node = proplists:get_value(node,Config), - {ok,Mod} = rpc:call(Node,compile,file,[Suite,[{outdir,DataDir}]]), - ModStr = atom_to_list(Mod), - - %% Clean logdir - LogDir0 = filename:join(DataDir,ModStr++".logs"), - LogDir = translate_filename(LogDir0,Encoding), - rm_dir(LogDir), - - %% Run the test - run_test_server_tests(ModStr, [], 3, 0, 1, 1, 0, 0, 0, 0, 3, Config), - - %% Check that all logs are created with utf8 encoded filenames - true = filelib:is_dir(LogDir), - - RunDir = get_latest_run_dir(LogDir), - true = filelib:is_dir(RunDir), - - LowerModStr = string:to_lower(ModStr), - SuiteHtml = translate_filename(LowerModStr++".src.html",Encoding), - true = filelib:is_regular(filename:join(RunDir,SuiteHtml)), - - TCLog = translate_filename(LowerModStr++".tc_äöå.html",Encoding), - true = filelib:is_regular(filename:join(RunDir,TCLog)), - ok. - -%% Same as test_server_test_lib:start_slave, but starts a peer with -%% additional arguments. -%% The reason for this is that we need to start nodes with +fnu/+fnl, -%% and that will not work well with a slave node since slave nodes run -%% remote file system on master - i.e. they will use same file name -%% mode as the master. -start_node(Config,Name,Args) -> - [_,Host] = string:tokens(atom_to_list(node()), "@"), - ct:log("Trying to start ~w@~s~n",[Name,Host]), - case test_server:start_node(Name, peer, [{args,Args}]) of - {error,Reason} -> - test_server:fail(Reason); - {ok,Node} -> - ct:log("Node ~p started~n", [Node]), - test_server_test_lib:prepare_tester_node(Node,Config) - end. - -create_unicode_test_suite(Dir,Encoding) -> - ModStr = "test_server_"++atom_to_list(Encoding)++"_äöå_SUITE", - File = filename:join(Dir,ModStr++".erl"), - Suite = - ["%% -*- ",epp:encoding_to_string(Encoding)," -*-\n", - "-module(",ModStr,").\n" - "\n" - "-export([all/1, init_per_suite/1, end_per_suite/1]).\n" - "-export([init_per_testcase/2, end_per_testcase/2]).\n" - "-export([tc_äöå/1]).\n" - "\n" - "-include_lib(\"test_server/include/test_server.hrl\").\n" - "\n" - "all(suite) ->\n" - " [tc_äöå].\n" - "\n" - "init_per_suite(Config) ->\n" - " Config.\n" - "\n" - "end_per_suite(_Config) ->\n" - " ok.\n" - "\n" - "init_per_testcase(_Case,Config) ->\n" - " init_timetrap(500,Config).\n" - "\n" - "init_timetrap(T,Config) ->\n" - " Dog = ?t:timetrap(T),\n" - " [{watchdog, Dog}|Config].\n" - "\n" - "end_per_testcase(_Case,Config) ->\n" - " cancel_timetrap(Config).\n" - "\n" - "cancel_timetrap(Config) ->\n" - " Dog=?config(watchdog, Config),\n" - " ?t:timetrap_cancel(Dog),\n" - " ok.\n" - "\n" - "tc_äöå(Config) when is_list(Config) ->\n" - " true = filelib:is_dir(?config(priv_dir,Config)),\n" - " ok.\n"], - {ok,Fd} = file:open(raw_filename(File,Encoding),[write,{encoding,Encoding}]), - io:put_chars(Fd,Suite), - ok = file:close(Fd), - File. - -raw_filename(Name,latin1) -> list_to_binary(Name); -raw_filename(Name,utf8) -> unicode:characters_to_binary(Name). - -rm_dir(Dir) -> - case file:list_dir(Dir) of - {error,enoent} -> - ok; - {ok,Files} -> - rm_files([filename:join(Dir, F) || F <- Files]), - file:del_dir(Dir) - end. - -rm_files([F | Fs]) -> - case file:read_file_info(F) of - {ok,#file_info{type=directory}} -> - rm_dir(F), - rm_files(Fs); - {ok,_Regular} -> - case file:delete(F) of - ok -> - rm_files(Fs); - {error,Errno} -> - exit({del_failed,F,Errno}) - end - end; -rm_files([]) -> - ok. - -erts_switch(latin1) -> "+fnl"; -erts_switch(utf8) -> "+fnu". diff --git a/lib/test_server/test/test_server_SUITE_data/Makefile.src b/lib/test_server/test/test_server_SUITE_data/Makefile.src deleted file mode 100644 index 5aeb035572..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/Makefile.src +++ /dev/null @@ -1,11 +0,0 @@ -all: - erlc test_server_SUITE.erl - erlc test_server_parallel01_SUITE.erl - erlc test_server_conf01_SUITE.erl - erlc test_server_shuffle01_SUITE.erl - erlc test_server_conf02_SUITE.erl - erlc test_server_skip_SUITE.erl - erlc test_server_break_SUITE.erl - erlc test_server_cover_SUITE.erl - erlc +debug_info test_server_cover_SUITE_data/cover_helper.erl - erlc test_server_unicode_SUITE.erl diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl deleted file mode 100644 index 79d8defb22..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl +++ /dev/null @@ -1,515 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%%------------------------------------------------------------------ -%%% Test Server self test. -%%%------------------------------------------------------------------ --module(test_server_SUITE). --include_lib("test_server/include/test_server.hrl"). --include_lib("test_server/include/test_server_line.hrl"). --include_lib("kernel/include/file.hrl"). --export([all/1]). - --export([init_per_suite/1, end_per_suite/1]). --export([init_per_testcase/2, end_per_testcase/2, fin_per_testcase/2]). --export([config/1, comment/1, timetrap/1, timetrap_cancel/1, multiply_timetrap/1, - init_per_s/1, init_per_tc/1, end_per_tc/1, - timeconv/1, msgs/1, capture/1, timecall/1, - do_times/1, do_times_mfa/1, do_times_fun/1, - skip_cases/1, skip_case1/1, skip_case2/1, skip_case3/1, - skip_case4/1, skip_case5/1, skip_case6/1, skip_case7/1, - skip_case8/1, skip_case9/1, - conf_init/1, check_new_conf/1, conf_cleanup/1, - check_old_conf/1, conf_init_fail/1, start_stop_node/1, - cleanup_nodes_init/1, check_survive_nodes/1, cleanup_nodes_fin/1, - commercial/1, - io_invalid_data/1, print_unexpected/1]). - --export([dummy_function/0,dummy_function/1,doer/1]). - -all(doc) -> ["Test Server self test"]; -all(suite) -> - [config, comment, timetrap, timetrap_cancel, multiply_timetrap, - init_per_s, init_per_tc, end_per_tc, - timeconv, msgs, capture, timecall, do_times, skip_cases, - commercial, io_invalid_data, print_unexpected, - {conf, conf_init, [check_new_conf], conf_cleanup}, - check_old_conf, - {conf, conf_init_fail,[conf_member_skip],conf_cleanup_skip}, - start_stop_node, - {conf, cleanup_nodes_init,[check_survive_nodes],cleanup_nodes_fin}, - config - ]. - - -init_per_suite(Config) -> - [{init_per_suite_var,ok}|Config]. - -end_per_suite(_Config) -> - ok. - -init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> - Dog = ?t:timetrap(?t:minutes(2)), - Config1 = [{watchdog, Dog}|Config], - case Func of - init_per_tc -> - [{strange_var, 1}|Config1]; - skip_case8 -> - {skipped, "This case should be noted as `Skipped'"}; - skip_case9 -> - {skip, "This case should be noted as `Skipped'"}; - _ -> - Config1 - end; -init_per_testcase(Func, Config) -> - io:format("Func:~p",[Func]), - io:format("Config:~p",[Config]), - ?t:fail("Arguments to init_per_testcase not correct"). - -end_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> - Dog=?config(watchdog, Config), - ?t:timetrap_cancel(Dog), - case Func of - end_per_tc -> io:format("CLEANUP => this test case is ok\n"); - _Other -> ok - end; -end_per_testcase(Func, Config) -> - io:format("Func:~p",[Func]), - io:format("Config:~p",[Config]), - ?t:fail("Arguments to end_per_testcase not correct"). - -fin_per_testcase(Func, Config) -> - io:format("Func:~p",[Func]), - io:format("Config:~p",[Config]), - ?t:fail("fin_per_testcase/2 called, should have called end_per_testcase/2"). - - -config(suite) -> []; -config(doc) -> ["Test that the Config variable is decent, ", - "and that the std config variables are correct ", - "(check that data/priv dir exists)." - "Also check that ?config macro works."]; -config(Config) when is_list(Config) -> - is_tuplelist(Config), - {value,{data_dir,Dd}}=lists:keysearch(data_dir,1,Config), - {value,{priv_dir,Dp}}=lists:keysearch(priv_dir,1,Config), - true=is_dir(Dd), - {ok, _Bin}=file:read_file(filename:join(Dd, "dummy_file")), - true=is_dir(Dp), - - Dd = ?config(data_dir,Config), - Dp = ?config(priv_dir,Config), - ok; -config(_Config) -> - ?t:fail("Config variable is not a list."). - -is_tuplelist([]) -> - true; -is_tuplelist([{_A,_B}|Rest]) -> - is_tuplelist(Rest); -is_tuplelist(_) -> - false. - -is_dir(Dir) -> - case file:read_file_info(Dir) of - {ok, #file_info{type=directory}} -> - true; - _ -> - false - end. - -comment(suite) -> []; -comment(doc) -> ["Print a comment in the HTML log"]; -comment(Config) when is_list(Config) -> - ?t:comment("This comment should not occur in the HTML log because a later" - " comment shall overwrite it"), - ?t:comment("This comment is printed with the comment/1 function." - " It should occur in the HTML log"). - - - -timetrap(suite) -> []; -timetrap(doc) -> ["Test that timetrap works."]; -timetrap(Config) when is_list(Config) -> - TrapAfter = 3000, - Dog=?t:timetrap(TrapAfter), - process_flag(trap_exit, true), - TimeOut = TrapAfter * test_server:timetrap_scale_factor() + 1000, - receive - {'EXIT', Dog, {timetrap_timeout, _, _}} -> - ok; - {'EXIT', _OtherPid, {timetrap_timeout, _, _}} -> - ?t:fail("EXIT signal from wrong process") - after - TimeOut -> - ?t:fail("Timetrap is not working.") - end, - ?t:timetrap_cancel(Dog), - ok. - - -timetrap_cancel(suite) -> []; -timetrap_cancel(doc) -> ["Test that timetrap_cancel works."]; -timetrap_cancel(Config) when is_list(Config) -> - Dog=?t:timetrap(1000), - receive - after - 500 -> - ok - end, - ?t:timetrap_cancel(Dog), - receive - after 1000 -> - ok - end, - ok. - -multiply_timetrap(suite) -> []; -multiply_timetrap(doc) -> ["Test multiply timetrap"]; -multiply_timetrap(Config) when is_list(Config) -> - %% This simulates the call to test_server_ctrl:multiply_timetraps/1: - put(test_server_multiply_timetraps,{2,true}), - - Dog = ?t:timetrap(500), - timer:sleep(800), - ?t:timetrap_cancel(Dog), - - %% Reset - put(test_server_multiply_timetraps,1), - ok. - - -init_per_s(suite) -> []; -init_per_s(doc) -> ["Test that a Config that is altered in ", - "init_per_suite gets through to the testcases."]; -init_per_s(Config) -> - %% Check that the config var sent from init_per_suite - %% really exists. - {value, {init_per_suite_var, ok}} = - lists:keysearch(init_per_suite_var,1,Config), - - %% Check that the other variables still exist. - {value,{data_dir,_Dd}}=lists:keysearch(data_dir,1,Config), - {value,{priv_dir,_Dp}}=lists:keysearch(priv_dir,1,Config), - ok. - -init_per_tc(suite) -> []; -init_per_tc(doc) -> ["Test that a Config that is altered in ", - "init_per_testcase gets through to the ", - "actual testcase."]; -init_per_tc(Config) -> - %% Check that the config var sent from init_per_testcase - %% really exists. - {value, {strange_var, 1}} = lists:keysearch(strange_var,1,Config), - - %% Check that the other variables still exist. - {value,{data_dir,_Dd}}=lists:keysearch(data_dir,1,Config), - {value,{priv_dir,_Dp}}=lists:keysearch(priv_dir,1,Config), - ok. - -end_per_tc(suite) -> []; -end_per_tc(doc) -> ["Test that end_per_testcase/2 is called even if" - " test case fails"]; -end_per_tc(Config) when is_list(Config) -> - ?t:fail("This case should fail! Check that \"CLEANUP\" is" - " printed in the minor log file."). - - -timeconv(suite) -> []; -timeconv(doc) -> ["Test that the time unit conversion functions ", - "works."]; -timeconv(Config) when is_list(Config) -> - Val=2, - Secs=Val*1000, - Mins=Secs*60, - Hrs=Mins*60, - Secs=?t:seconds(2), - Mins=?t:minutes(2), - Hrs=?t:hours(2), - ok. - - -msgs(suite) -> []; -msgs(doc) -> ["Tests the messages_get function."]; -msgs(Config) when is_list(Config) -> - self() ! {hej, du}, - self() ! {lite, "data"}, - self() ! en_atom, - [{hej, du}, {lite, "data"}, en_atom] = ?t:messages_get(), - ok. - -capture(suite) -> []; -capture(doc) -> ["Test that the capture functions work properly."]; -capture(Config) when is_list(Config) -> - String1="abcedfghjiklmnopqrstuvwxyz", - String2="0123456789", - ?t:capture_start(), - io:format(String1), - [String1]=?t:capture_get(), - io:format(String2), - [String2]=?t:capture_get(), - ?t:capture_stop(), - []=?t:capture_get(), - io:format(String2), - []=?t:capture_get(), - ok. - -timecall(suite) -> []; -timecall(doc) -> ["Tests that timed calls work."]; -timecall(Config) when is_list(Config) -> - {_Time1, liten_apa_e_oxo_farlig} = ?t:timecall(?MODULE, dummy_function, []), - {Time2, jag_ar_en_gorilla} = ?t:timecall(?MODULE, dummy_function, [gorilla]), - DTime=round(Time2), - if - DTime<1 -> - ?t:fail("Timecall reported a too low time."); - DTime==1 -> - ok; - DTime>1 -> - ?t:fail("Timecall reported a too high time.") - end, - ok. - -dummy_function() -> - liten_apa_e_oxo_farlig. -dummy_function(gorilla) -> - receive after 1000 -> ok end, - jag_ar_en_gorilla. - - -do_times(suite) -> [do_times_mfa, do_times_fun]; -do_times(doc) -> ["Test the do_times function."]. - -do_times_mfa(suite) -> []; -do_times_mfa(doc) -> ["Test the do_times function with M,F,A given."]; -do_times_mfa(Config) when is_list(Config) -> - ?t:do_times(100, ?MODULE, doer, [self()]), - 100=length(?t:messages_get()), - ok. - -do_times_fun(suite) -> []; -do_times_fun(doc) -> ["Test the do_times function with fun given."]; -do_times_fun(Config) when is_list(Config) -> - Self = self(), - ?t:do_times(100, fun() -> doer(Self) end), - 100=length(?t:messages_get()), - ok. - -doer(From) -> - From ! a, - ok. - -skip_cases(doc) -> ["Test all possible ways to skip a test case."]; -skip_cases(suite) -> [skip_case1, skip_case2, skip_case3, skip_case4, - skip_case5, skip_case6, skip_case7, skip_case8, - skip_case9]. - -skip_case1(suite) -> []; -skip_case1(doc) -> ["Test that you can return {skipped, Reason}," - " and that Reason is in the comment field in the HTML log"]; -skip_case1(Config) when is_list(Config) -> - %% If this comment shows, the case failed!! - ?t:comment("ERROR: This case should have been noted as `Skipped'"), - %% The Reason in {skipped, Reason} should overwrite a 'comment' - {skipped, "This case should be noted as `Skipped'"}. - -skip_case2(suite) -> []; -skip_case2(doc) -> ["Test that you can return {skipped, Reason}," - " and that Reason is in the comment field in the HTML log"]; -skip_case2(Config) when is_list(Config) -> - %% If this comment shows, the case failed!! - ?t:comment("ERROR: This case should have been noted as `Skipped'"), - %% The Reason in {skipped, Reason} should overwrite a 'comment' - exit({skipped, "This case should be noted as `Skipped'"}). - -skip_case3(suite) -> []; -skip_case3(doc) -> ["Test that you can return {skip, Reason}," - " and that Reason is in the comment field in the HTML log"]; -skip_case3(Config) when is_list(Config) -> - %% If this comment shows, the case failed!! - ?t:comment("ERROR: This case should have been noted as `Skipped'"), - %% The Reason in {skip, Reason} should overwrite a 'comment' - {skip, "This case should be noted as `Skipped'"}. - -skip_case4(suite) -> []; -skip_case4(doc) -> ["Test that you can return {skip, Reason}," - " and that Reason is in the comment field in the HTML log"]; -skip_case4(Config) when is_list(Config) -> - %% If this comment shows, the case failed!! - ?t:comment("ERROR: This case should have been noted as `Skipped'"), - %% The Reason in {skip, Reason} should overwrite a 'comment' - exit({skip, "This case should be noted as `Skipped'"}). - -skip_case5(suite) -> {skipped, "This case should be noted as `Skipped'"}; -skip_case5(doc) -> ["Test that you can return {skipped, Reason}" - " from the specification clause"]. - -skip_case6(suite) -> {skip, "This case should be noted as `Skipped'"}; -skip_case6(doc) -> ["Test that you can return {skip, Reason}" - " from the specification clause"]. - -skip_case7(suite) -> []; -skip_case7(doc) -> ["Test that skip works from a test specification file"]; -skip_case7(Config) when is_list(Config) -> - %% This case shall be skipped by adding - %% {skip, {test_server_SUITE, skip_case7, Reason}}. - %% to the test specification file. - ?t:fail("This case should have been Skipped by the .spec file"). - -skip_case8(suite) -> []; -skip_case8(doc) -> ["Test that {skipped, Reason} works from" - " init_per_testcase/2"]; -skip_case8(Config) when is_list(Config) -> - %% This case shall be skipped by adding a specific clause to - %% returning {skipped, Reason} from init_per_testcase/2 for this case. - ?t:fail("This case should have been Skipped by init_per_testcase/2"). - -skip_case9(suite) -> []; -skip_case9(doc) -> ["Test that {skip, Reason} works from a init_per_testcase/2"]; -skip_case9(Config) when is_list(Config) -> - %% This case shall be skipped by adding a specific clause to - %% returning {skip, Reason} from init_per_testcase/2 for this case. - ?t:fail("This case should have been Skipped by init_per_testcase/2"). - -conf_init(doc) -> ["Test successful conf case: Change Config parameter"]; -conf_init(Config) when is_list(Config) -> - [{conf_init_var,1389}|Config]. - -check_new_conf(suite) -> []; -check_new_conf(doc) -> ["Check that Config parameter changed by" - " conf_init is used"]; -check_new_conf(Config) when is_list(Config) -> - 1389 = ?config(conf_init_var,Config), - ok. - -conf_cleanup(doc) -> ["Test successful conf case: Restore Config parameter"]; -conf_cleanup(Config) when is_list(Config) -> - lists:keydelete(conf_init_var,1,Config). - -check_old_conf(suite) -> []; -check_old_conf(doc) -> ["Test that the restored Config is used after a" - " conf cleanup"]; -check_old_conf(Config) when is_list(Config) -> - undefined = ?config(conf_init_var,Config), - ok. - -conf_init_fail(doc) -> ["Test that config members are skipped if" - " conf init function fails."]; -conf_init_fail(Config) when is_list(Config) -> - ?t:fail("This case should fail! Check that conf_member_skip and" - " conf_cleanup_skip are skipped."). - - - -start_stop_node(suite) -> []; -start_stop_node(doc) -> ["Test start and stop of slave and peer nodes"]; -start_stop_node(Config) when is_list(Config) -> - {ok,Node2} = ?t:start_node(node2,peer,[]), - {error, _} = ?t:start_node(node2,peer,[{fail_on_error,false}]), - true = lists:member(Node2,nodes()), - - {ok,Node3} = ?t:start_node(node3,slave,[]), - {error, _} = ?t:start_node(node3,slave,[]), - true = lists:member(Node3,nodes()), - - {ok,Node4} = ?t:start_node(node4,peer,[{wait,false}]), - case lists:member(Node4,nodes()) of - true -> - ?t:comment("WARNING: Node started with {wait,false}" - " is up faster than expected..."); - false -> - test_server:wait_for_node(Node4), - true = lists:member(Node4,nodes()) - end, - - true = ?t:stop_node(Node2), - false = lists:member(Node2,nodes()), - - true = ?t:stop_node(Node3), - false = lists:member(Node3,nodes()), - - true = ?t:stop_node(Node4), - false = lists:member(Node4,nodes()), - timer:sleep(2000), - false = ?t:stop_node(Node4), - - ok. - -cleanup_nodes_init(doc) -> ["Test that nodes are terminated when test case" - " is finished unless {cleanup,false} is given."]; -cleanup_nodes_init(Config) when is_list(Config) -> - {ok,DieSlave} = ?t:start_node(die_slave, slave, []), - {ok,SurviveSlave} = ?t:start_node(survive_slave, slave, [{cleanup,false}]), - {ok,DiePeer} = ?t:start_node(die_peer, peer, []), - {ok,SurvivePeer} = ?t:start_node(survive_peer, peer, [{cleanup,false}]), - [{die_slave,DieSlave}, - {survive_slave,SurviveSlave}, - {die_peer,DiePeer}, - {survive_peer,SurvivePeer} | Config]. - - - -check_survive_nodes(suite) -> []; -check_survive_nodes(doc) -> ["Test that nodes with {cleanup,false} survived"]; -check_survive_nodes(Config) when is_list(Config) -> - timer:sleep(1000), - false = lists:member(?config(die_slave,Config),nodes()), - true = lists:member(?config(survive_slave,Config),nodes()), - false = lists:member(?config(die_peer,Config),nodes()), - true = lists:member(?config(survive_peer,Config),nodes()), - ok. - - -cleanup_nodes_fin(doc) -> ["Test that nodes started with {cleanup,false}" - " can be stopped"]; -cleanup_nodes_fin(Config) when is_list(Config) -> - Slave = ?config(survive_slave,Config), - Peer = ?config(survive_peer,Config), - - true = ?t:stop_node(Slave), - false = lists:member(Slave,nodes()), - true = ?t:stop_node(Peer), - false = lists:member(Peer,nodes()), - - C1 = lists:keydelete(die_slave,1,Config), - C2 = lists:keydelete(survive_slave,1,C1), - C3 = lists:keydelete(die_peer,1,C2), - lists:keydelete(survive_peer,1,C3). - -commercial(Config) when is_list(Config) -> - case ?t:is_commercial() of - false -> {comment,"Open-source build"}; - true -> {comment,"Commercial build"} - end. - -io_invalid_data(Config) when is_list(Config) -> - ok = io:put_chars("valid: " ++ [42]), - %% OTP-10991 caused this to hang and produce a timetrap timeout: - {'EXIT',{badarg,_}} = (catch io:put_chars("invalid: " ++ [42.0])), - ok. - -print_unexpected(Config) when is_list(Config) -> - Str = "-x-x-x- test_server_SUITE:print_unexpected -> Unexpected data -x-x-x-", - test_server_io:print_unexpected(Str), - UnexpectedLog = filename:join(filename:dirname(?config(tc_logfile,Config)), - "unexpected_io.log.html"), - {ok,Bin} = file:read_file(UnexpectedLog), - match = re:run(Bin, Str, [global,{capture,none}]), - ok. diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_SUITE_data/dummy_file b/lib/test_server/test/test_server_SUITE_data/test_server_SUITE_data/dummy_file deleted file mode 100644 index 65c88fbd75..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_SUITE_data/dummy_file +++ /dev/null @@ -1 +0,0 @@ -Dummy file.
\ No newline at end of file diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl deleted file mode 100644 index ae9f018bc8..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl +++ /dev/null @@ -1,149 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2012. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(test_server_break_SUITE). - --export([all/1, init_per_suite/1, end_per_suite/1]). --export([init_per_testcase/2, end_per_testcase/2]). --export([break_in_init_tc/1, - break_in_tc/1, - break_in_end_tc/1, - break_in_end_tc_after_fail/1, - break_in_end_tc_after_abort/1, - check_all_breaks/1]). - --include_lib("test_server/include/test_server.hrl"). - -all(suite) -> - [break_in_init_tc, - break_in_tc, - break_in_end_tc, - break_in_end_tc_after_fail, - break_in_end_tc_after_abort, - check_all_breaks]. %must be the last test - checks result of previous tests - -init_per_suite(Config) -> - spawn(fun break_and_continue_sup/0), - Config. - -end_per_suite(_Config) -> - ok. - -init_per_testcase(Case,Config) when Case==break_in_init_tc -> - Config1 = init_timetrap(500,Config), - break_and_check(Case), - Config1; -init_per_testcase(Case,Config) when Case==check_all_breaks -> - init_timetrap({seconds,20},Config); -init_per_testcase(_Case,Config) -> - init_timetrap(500,Config). - -init_timetrap(T,Config) -> - Dog = ?t:timetrap(T), - [{watchdog, Dog}|Config]. - -end_per_testcase(Case,Config) when Case==break_in_end_tc; - Case==break_in_end_tc_after_fail; - Case==break_in_end_tc_after_abort -> - break_and_check(Case), - cancel_timetrap(Config); -end_per_testcase(_Case,Config) -> - cancel_timetrap(Config). - -cancel_timetrap(Config) -> - Dog=?config(watchdog, Config), - ?t:timetrap_cancel(Dog), - ok. - - -%%%----------------------------------------------------------------- -%%% Test cases - -break_in_init_tc(Config) when is_list(Config) -> - ok. - -break_in_tc(Config) when is_list(Config) -> - break_and_check(break_in_tc), - ok. - -break_in_end_tc(Config) when is_list(Config) -> - ok. - -break_in_end_tc_after_fail(Config) when is_list(Config) -> - ?t:fail(test_case_should_fail). - -break_in_end_tc_after_abort(Config) when is_list(Config) -> - ?t:adjusted_sleep(2000). % will cause a timetrap timeout - -%% This test case checks that all breaks in previous test cases was -%% also continued, and that the break lasted as long as expected. -%% The reason for this is that some of the breaks above are in -%% end_per_testcase, and failures there will only produce a warning, -%% not an error - so this is to catch the error for real. -check_all_breaks(Config) when is_list(Config) -> - break_and_continue_sup ! {done,self()}, - receive {Breaks,Continued} -> - check_all_breaks(Breaks,Continued) - end. -%%%----------------------------------------------------------------- -%%% Internal functions - - -check_all_breaks([{From,Case,T,Start}|Breaks],[{From,End}|Continued]) -> - Diff = timer:now_diff(End,Start), - DiffSec = round(Diff/1000000), - TSec = round(T/1000000), - if DiffSec==TSec -> - ?t:format("Break in ~p successfully continued after ~p second(s)~n", - [Case,DiffSec]), - check_all_breaks(Breaks,Continued); - true -> - ?t:format("Faulty duration of break in ~p: continued after ~p second(s)~n", - [Case,DiffSec]), - ?t:fail({faulty_diff,Case,DiffSec,TSec}) - end; -check_all_breaks([],[]) -> - ok; -check_all_breaks(Breaks,Continued) -> - %% This is probably a case of a missing continue - i.e. a break - %% has been started, but it was never continued. - ?t:fail({no_match_in_breaks_and_continued,Breaks,Continued}). - -break_and_check(Case) -> - break_and_continue_sup ! {break,Case,1000,self()}, - ?t:break(atom_to_list(Case)), - break_and_continue_sup ! {continued,self()}, - ok. - -break_and_continue_sup() -> - register(break_and_continue_sup,self()), - break_and_continue_loop([],[]). - -break_and_continue_loop(Breaks,Continued) -> - receive - {break,Case,T,From} -> - Start = now(), - {RealT,_} = timer:tc(?t,adjusted_sleep,[T]), - ?t:continue(), - break_and_continue_loop([{From,Case,RealT,Start}|Breaks],Continued); - {continued,From} -> - break_and_continue_loop(Breaks,[{From,now()}|Continued]); - {done,From} -> - From ! {lists:reverse(Breaks),lists:reverse(Continued)} - end. diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_conf01_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_conf01_SUITE.erl deleted file mode 100644 index f634bc3a46..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_conf01_SUITE.erl +++ /dev/null @@ -1,188 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2009-2011. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%%------------------------------------------------------------------ -%%% Test Server self test. -%%%------------------------------------------------------------------ --module(test_server_conf01_SUITE). --include_lib("test_server/include/test_server.hrl"). - --compile(export_all). - -all(doc) -> ["Test simple conf case structure, with and without nested cases"]; -all(suite) -> - [ - {conf, conf1_init, [conf1_tc1, conf1_tc2], conf1_end}, - - {conf, [], conf2_init, [conf2_tc1, conf2_tc2], conf2_end}, - - {conf, conf3_init, [conf3_tc1, - - {conf, [], conf4_init, [conf4_tc1, conf4_tc2], conf4_end}, - - conf3_tc2], conf3_end}, - - conf5 - ]. - -%%---------- conf cases ---------- - -conf1_init(Config) when is_list(Config) -> - [{cc1,conf1}|Config]. -conf1_end(_Config) -> - ok. - -conf2_init(Config) when is_list(Config) -> - [{cc2,conf2}|Config]. -conf2_end(_Config) -> - ok. - -conf3_init(Config) when is_list(Config) -> - [{cc3,conf3}|Config]. -conf3_end(_Config) -> - ok. - -conf4_init(Config) when is_list(Config) -> - [{cc4,conf4}|Config]. -conf4_end(_Config) -> - ok. - -conf5_init(Config) when is_list(Config) -> - [{cc5,conf5}|Config]. -conf5_end(_Config) -> - ok. - -conf6_init(Config) when is_list(Config) -> - [{cc6,conf6}|Config]. -conf6_end(_Config) -> - ok. - - -conf5(suite) -> % test specification - [{conf, conf5_init, [conf5_tc1, - - {conf, [], conf6_init, [conf6_tc1, conf6_tc2], conf6_end}, - - conf5_tc2], conf5_end}]. - - -%%---------- test cases ---------- - -conf1_tc1(Config) when is_list(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - conf1 = ?config(cc1,Config), - ok. -conf1_tc2(Config) when is_list(Config) -> - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf1 = ?config(cc1,Config), - ok. - -conf2_tc1(Config) when is_list(Config) -> - undefined = ?config(cc1,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - conf2 = ?config(cc2,Config), - ok. -conf2_tc2(Config) when is_list(Config) -> - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf2 = ?config(cc2,Config), - ok. - -conf3_tc1(Config) when is_list(Config) -> - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - conf3 = ?config(cc3,Config), - ok. -conf3_tc2(Config) when is_list(Config) -> - conf3 = ?config(cc3,Config), - undefined = ?config(cc4,Config), - ok. - -conf4_tc1(Config) when is_list(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - conf3 = ?config(cc3,Config), - conf4 = ?config(cc4,Config), - ok. -conf4_tc2(Config) when is_list(Config) -> - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf3 = ?config(cc3,Config), - conf4 = ?config(cc4,Config), - ok. - -conf5_tc1(Config) when is_list(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf5 = ?config(cc5,Config), - ok. -conf5_tc2(Config) when is_list(Config) -> - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf5 = ?config(cc5,Config), - undefined = ?config(cc6,Config), - ok. - -conf6_tc1(Config) when is_list(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf5 = ?config(cc5,Config), - conf6 = ?config(cc6,Config), - ok. -conf6_tc2(Config) when is_list(Config) -> - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf5 = ?config(cc5,Config), - conf6 = ?config(cc6,Config), - ok. - diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_conf02_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_conf02_SUITE.erl deleted file mode 100644 index f9cca8653b..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_conf02_SUITE.erl +++ /dev/null @@ -1,295 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2009-2011. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%%------------------------------------------------------------------ -%%% Test Server self test. -%%%------------------------------------------------------------------ --module(test_server_conf02_SUITE). --include_lib("test_server/include/test_server.hrl"). - --compile(export_all). - -all(doc) -> ["Test simple conf case structure, with and without nested cases"]; -all(suite) -> - [ - {conf, conf1_init, [conf1_tc1, conf1_tc2], conf1_end}, - - {conf, [], conf2_init, [conf2_tc1, conf2_tc2], conf2_end}, - - {conf, conf3_init, [conf3_tc1, - - {conf, [], conf4_init, [conf4_tc1, conf4_tc2], conf4_end}, - - conf3_tc2], conf3_end}, - - conf5 - ]. - - -%%---------- conf cases ---------- - -init_per_suite(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - [{suite,init}|Config]. -end_per_suite(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - init = ?config(suite,Config), - ok. - -init_per_testcase(TC=conf1_tc1, Config) -> - init = ?config(suite,Config), - [{tc11,TC}|Config]; -init_per_testcase(TC=conf1_tc2, Config) -> - [{tc12,TC}|Config]; -init_per_testcase(TC=conf2_tc1, Config) -> - [{tc21,TC}|Config]; -init_per_testcase(TC=conf2_tc2, Config) -> - [{tc22,TC}|Config]; -init_per_testcase(TC=conf3_tc1, Config) -> - [{tc31,TC}|Config]; -init_per_testcase(TC=conf3_tc2, Config) -> - [{tc32,TC}|Config]; -init_per_testcase(TC=conf4_tc1, Config) -> - [{tc41,TC}|Config]; -init_per_testcase(TC=conf4_tc2, Config) -> - [{tc42,TC}|Config]; -init_per_testcase(TC=conf5_tc1, Config) -> - [{tc51,TC}|Config]; -init_per_testcase(TC=conf5_tc2, Config) -> - [{tc52,TC}|Config]; -init_per_testcase(TC=conf6_tc1, Config) -> - [{tc61,TC}|Config]; -init_per_testcase(TC=conf6_tc2, Config) -> - init = ?config(suite,Config), - [{tc62,TC}|Config]. - -end_per_testcase(TC=conf1_tc1, Config) -> - init = ?config(suite,Config), - TC = ?config(tc11,Config), - ok; -end_per_testcase(TC=conf1_tc2, Config) -> - TC = ?config(tc12,Config), - ok; -end_per_testcase(TC=conf2_tc1, Config) -> - TC = ?config(tc21,Config), - ok; -end_per_testcase(TC=conf2_tc2, Config) -> - TC = ?config(tc22,Config), - ok; -end_per_testcase(TC=conf3_tc1, Config) -> - TC = ?config(tc31,Config), - ok; -end_per_testcase(TC=conf3_tc2, Config) -> - TC = ?config(tc32,Config), - ok; -end_per_testcase(TC=conf4_tc1, Config) -> - TC = ?config(tc41,Config), - ok; -end_per_testcase(TC=conf4_tc2, Config) -> - TC = ?config(tc42,Config), - ok; -end_per_testcase(TC=conf5_tc1, Config) -> - TC = ?config(tc51,Config), - ok; -end_per_testcase(TC=conf5_tc2, Config) -> - TC = ?config(tc52,Config), - ok; -end_per_testcase(TC=conf6_tc1, Config) -> - TC = ?config(tc61,Config), - ok; -end_per_testcase(TC=conf6_tc2, Config) -> - init = ?config(suite,Config), - TC = ?config(tc62,Config), - ok. - -conf1_init(Config) when is_list(Config) -> - init = ?config(suite,Config), - [{cc1,conf1}|Config]. -conf1_end(_Config) -> - ok. - -conf2_init(Config) when is_list(Config) -> - [{cc2,conf2}|Config]. -conf2_end(_Config) -> - ok. - -conf3_init(Config) when is_list(Config) -> - [{cc3,conf3}|Config]. -conf3_end(_Config) -> - ok. - -conf4_init(Config) when is_list(Config) -> - [{cc4,conf4}|Config]. -conf4_end(_Config) -> - ok. - -conf5_init(Config) when is_list(Config) -> - [{cc5,conf5}|Config]. -conf5_end(_Config) -> - ok. - -conf6_init(Config) when is_list(Config) -> - init = ?config(suite,Config), - [{cc6,conf6}|Config]. -conf6_end(_Config) -> - ok. - -conf5(suite) -> % test specification - [{conf, conf5_init, [conf5_tc1, - - {conf, [], conf6_init, [conf6_tc1, conf6_tc2], conf6_end}, - - conf5_tc2], conf5_end}]. - -%%---------- test cases ---------- - -conf1_tc1(Config) when is_list(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - init = ?config(suite,Config), - conf1 = ?config(cc1,Config), - conf1_tc1 = ?config(tc11,Config), - ok. -conf1_tc2(Config) when is_list(Config) -> - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - init = ?config(suite,Config), - conf1 = ?config(cc1,Config), - conf1_tc2 = ?config(tc12,Config), - ok. - -conf2_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - undefined = ?config(cc1,Config), - undefined = ?config(tc11,Config), - conf2 = ?config(cc2,Config), - conf2_tc1 = ?config(tc21,Config), - ok. -conf2_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - conf2 = ?config(cc2,Config), - undefined = ?config(tc21,Config), - conf2_tc2 = ?config(tc22,Config), - ok. - -conf3_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - undefined = ?config(cc2,Config), - undefined = ?config(tc22,Config), - conf3 = ?config(cc3,Config), - conf3_tc1 = ?config(tc31,Config), - ok. -conf3_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - conf3 = ?config(cc3,Config), - undefined = ?config(cc4,Config), - undefined = ?config(tc31,Config), - undefined = ?config(tc41,Config), - conf3_tc2 = ?config(tc32,Config), - ok. - -conf4_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - conf3 = ?config(cc3,Config), - conf4 = ?config(cc4,Config), - undefined = ?config(tc32,Config), - conf4_tc1 = ?config(tc41,Config), - ok. -conf4_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf3 = ?config(cc3,Config), - conf4 = ?config(cc4,Config), - undefined = ?config(tc41,Config), - conf4_tc2 = ?config(tc42,Config), - ok. - -conf5_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf5 = ?config(cc5,Config), - undefined = ?config(tc42,Config), - conf5_tc1 = ?config(tc51,Config), - ok. -conf5_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf5 = ?config(cc5,Config), - undefined = ?config(cc6,Config), - undefined = ?config(tc51,Config), - undefined = ?config(tc62,Config), - conf5_tc2 = ?config(tc52,Config), - ok. - -conf6_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf5 = ?config(cc5,Config), - conf6 = ?config(cc6,Config), - undefined = ?config(tc52,Config), - conf6_tc1 = ?config(tc61,Config), - ok. -conf6_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf5 = ?config(cc5,Config), - conf6 = ?config(cc6,Config), - undefined = ?config(tc61,Config), - conf6_tc2 = ?config(tc62,Config), - ok. diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl deleted file mode 100644 index ab5ccec7a2..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE.erl +++ /dev/null @@ -1,59 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2012. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(test_server_cover_SUITE). - --export([all/1, init_per_suite/1, end_per_suite/1]). --export([init_per_testcase/2, end_per_testcase/2]). --export([tc1/1, tc2/1]). - --include_lib("test_server/include/test_server.hrl"). - -all(suite) -> - [tc1,tc2]. - -init_per_suite(Config) -> - Config. - -end_per_suite(_Config) -> - ok. - -init_per_testcase(_Case,Config) -> - Dog = ?t:timetrap({minutes,10}), - [{watchdog, Dog}|Config]. - -end_per_testcase(_Case,Config) -> - Dog=?config(watchdog, Config), - ?t:timetrap_cancel(Dog), - ok. - - -%%%----------------------------------------------------------------- -%%% Test cases -tc1(Config) when is_list(Config) -> - cover_helper:foo(), - ok. - -tc2(Config) when is_list(Config) -> - cover_helper:bar(), - ok. - -%%%----------------------------------------------------------------- -%%% Internal functions - diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl b/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl deleted file mode 100644 index 6c74eb4e8a..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_cover_SUITE_data/cover_helper.erl +++ /dev/null @@ -1,4 +0,0 @@ --module(cover_helper). --compile(export_all). -foo() -> ok. -bar() -> ok. diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_parallel01_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_parallel01_SUITE.erl deleted file mode 100644 index 0385435710..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_parallel01_SUITE.erl +++ /dev/null @@ -1,519 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2009-2011. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%%------------------------------------------------------------------ -%%% Test Server self test. -%%%------------------------------------------------------------------ --module(test_server_parallel01_SUITE). --include_lib("test_server/include/test_server.hrl"). - --compile(export_all). - -%% ------------------------------------------------------------------- -%% Notes on parallel execution of test cases -%% ------------------------------------------------------------------- -%% -%% A group nested under a parallel group will start executing in -%% parallel with previous (parallel) test cases (no matter what -%% properties the nested group has). Test cases are however never -%% executed in parallel with the start or end conf case of the same -%% group! Because of this, the test_server_ctrl loop waits at -%% the end conf of a group for all parallel cases to finish -%% before the end conf case actually executes. This has the effect -%% that it's only after a nested group has finished that any -%% remaining parallel cases in the previous group get spawned (*). -%% Example (all parallel cases): -%% -%% group1_init |----> -%% group1_case1 | ---------> -%% group1_case2 | ---------------------------------> -%% group2_init | ----> -%% group2_case1 | ------> -%% group2_case2 | ----------> -%% group2_end | ---> -%% group1_case3 (*)| ----> -%% group1_case4 (*)| --> -%% group1_end | ---> -%% - -all(doc) -> ["Test simple conf case structure, with and without nested cases"]; -all(suite) -> - [ - {conf, [parallel], conf1_init, [conf1_tc1, conf1_tc2], conf1_end}, - - {conf, [parallel], conf2_init, [conf2_tc1, conf2_tc2], conf2_end}, - - {conf, [parallel], conf3_init, [conf3_tc1, conf3_tc1, - - {conf, [], - conf4_init, [conf4_tc1, conf4_tc2], conf4_end}, - - conf3_tc2], conf3_end}, - - conf5, - - {conf, [parallel], conf7_init, [conf7_tc1, conf7_tc1, - - {conf, [parallel], - conf8_init, [conf8_tc1, conf8_tc2], conf8_end}, - - conf7_tc2], conf7_end} - - ]. - - -%%---------- conf cases ---------- - -init_per_suite(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - [{suite,init}|Config]. -end_per_suite(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - init = ?config(suite,Config), - ok. - -init_per_testcase(TC=conf1_tc1, Config) -> - init = ?config(suite,Config), - [{tc11,TC}|Config]; -init_per_testcase(TC=conf1_tc2, Config) -> - [{tc12,TC}|Config]; -init_per_testcase(TC=conf2_tc1, Config) -> - [{tc21,TC}|Config]; -init_per_testcase(TC=conf2_tc2, Config) -> - [{tc22,TC}|Config]; -init_per_testcase(TC=conf3_tc1, Config) -> - [{tc31,TC}|Config]; -init_per_testcase(TC=conf3_tc2, Config) -> - [{tc32,TC}|Config]; -init_per_testcase(TC=conf4_tc1, Config) -> - [{tc41,TC}|Config]; -init_per_testcase(TC=conf4_tc2, Config) -> - [{tc42,TC}|Config]; -init_per_testcase(TC=conf5_tc1, Config) -> - [{tc51,TC}|Config]; -init_per_testcase(TC=conf5_tc2, Config) -> - [{tc52,TC}|Config]; -init_per_testcase(TC=conf6_tc1, Config) -> - [{tc61,TC}|Config]; -init_per_testcase(TC=conf6_tc2, Config) -> - init = ?config(suite,Config), - [{tc62,TC}|Config]; -init_per_testcase(TC=conf7_tc1, Config) -> - [{tc71,TC}|Config]; -init_per_testcase(TC=conf7_tc2, Config) -> - [{tc72,TC}|Config]; -init_per_testcase(TC=conf8_tc1, Config) -> - [{tc81,TC}|Config]; -init_per_testcase(TC=conf8_tc2, Config) -> - init = ?config(suite,Config), - [{tc82,TC}|Config]. - -end_per_testcase(TC=conf1_tc1, Config) -> - init = ?config(suite,Config), - TC = ?config(tc11,Config), - ok; -end_per_testcase(TC=conf1_tc2, Config) -> - TC = ?config(tc12,Config), - ok; -end_per_testcase(TC=conf2_tc1, Config) -> - TC = ?config(tc21,Config), - ok; -end_per_testcase(TC=conf2_tc2, Config) -> - TC = ?config(tc22,Config), - ok; -end_per_testcase(TC=conf3_tc1, Config) -> - TC = ?config(tc31,Config), - ok; -end_per_testcase(TC=conf3_tc2, Config) -> - TC = ?config(tc32,Config), - ok; -end_per_testcase(TC=conf4_tc1, Config) -> - TC = ?config(tc41,Config), - ok; -end_per_testcase(TC=conf4_tc2, Config) -> - TC = ?config(tc42,Config), - ok; -end_per_testcase(TC=conf5_tc1, Config) -> - TC = ?config(tc51,Config), - ok; -end_per_testcase(TC=conf5_tc2, Config) -> - TC = ?config(tc52,Config), - ok; -end_per_testcase(TC=conf6_tc1, Config) -> - TC = ?config(tc61,Config), - ok; -end_per_testcase(TC=conf6_tc2, Config) -> - init = ?config(suite,Config), - TC = ?config(tc62,Config), - ok; -end_per_testcase(TC=conf7_tc1, Config) -> - TC = ?config(tc71,Config), - ok; -end_per_testcase(TC=conf7_tc2, Config) -> - TC = ?config(tc72,Config), - ok; -end_per_testcase(TC=conf8_tc1, Config) -> - TC = ?config(tc81,Config), - ok; -end_per_testcase(TC=conf8_tc2, Config) -> - init = ?config(suite,Config), - TC = ?config(tc82,Config), - ok. - -conf1_init(Config) when is_list(Config) -> - test_server:comment(io_lib:format("~p",[now()])), - [parallel] = ?config(tc_group_properties,Config), - init = ?config(suite,Config), - [{t0,now()},{cc1,conf1}|Config]. -conf1_end(Config) -> - %% check 2s & 3s < 4s - Ms = timer:now_diff(now(),?config(t0,Config)), - test_server:comment(io_lib:format("~p",[now()])), - if Ms > 4000000 -> exit({bad_parallel_exec,Ms}); - Ms < 3000000 -> exit({bad_parallel_exec,Ms}); - true -> ok - end. - -conf2_init(Config) when is_list(Config) -> - test_server:comment(io_lib:format("~p",[now()])), - [parallel] = ?config(tc_group_properties,Config), - [{t0,now()},{cc2,conf2}|Config]. -conf2_end(Config) -> - %% check 3s & 2s < 4s - Ms = timer:now_diff(now(),?config(t0,Config)), - test_server:comment(io_lib:format("~p",[now()])), - if Ms > 4000000 -> exit({bad_parallel_exec,Ms}); - Ms < 3000000 -> exit({bad_parallel_exec,Ms}); - true -> ok - end. - -conf3_init(Config) when is_list(Config) -> - test_server:comment(io_lib:format("~p",[now()])), - [parallel] = ?config(tc_group_properties,Config), - [{t0,now()},{cc3,conf3}|Config]. -conf3_end(Config) -> - %% check 6s & 6s & (2s & 3s) & 1s = ~6s - Ms = timer:now_diff(now(),?config(t0,Config)), - test_server:comment(io_lib:format("~p",[now()])), - if Ms > 7000000 -> exit({bad_parallel_exec,Ms}); - Ms < 6000000 -> exit({bad_parallel_exec,Ms}); - true -> ok - end. - -conf4_init(Config) when is_list(Config) -> - test_server:comment(io_lib:format("~p",[now()])), - [] = ?config(tc_group_properties,Config), - [{t0,now()},{cc4,conf4}|Config]. -conf4_end(Config) -> - %% check 2s & 3s >= 5s - Ms = timer:now_diff(now(),?config(t0,Config)), - test_server:comment(io_lib:format("~p",[now()])), - if Ms > 6000000 -> exit({bad_parallel_exec,Ms}); - Ms < 5000000 -> exit({bad_parallel_exec,Ms}); - true -> ok - end. - -conf5_init(Config) when is_list(Config) -> - test_server:comment(io_lib:format("~p",[now()])), - [] = ?config(tc_group_properties,Config), - [{t0,now()},{cc5,conf5}|Config]. -conf5_end(Config) -> - %% check 1s & 1s & (3s & 2s) & 1s = ~6s - Ms = timer:now_diff(now(),?config(t0,Config)), - test_server:comment(io_lib:format("~p",[now()])), - if Ms > 7500000 -> exit({bad_parallel_exec,Ms}); - Ms < 6000000 -> exit({bad_parallel_exec,Ms}); - true -> ok - end. - -conf6_init(Config) when is_list(Config) -> - test_server:comment(io_lib:format("~p",[now()])), - [parallel] = ?config(tc_group_properties,Config), - init = ?config(suite,Config), - [{t0,now()},{cc6,conf6}|Config]. -conf6_end(Config) -> - %% check 3s & 2s < 5s - Ms = timer:now_diff(now(),?config(t0,Config)), - test_server:comment(io_lib:format("~p",[now()])), - if Ms > 4500000 -> exit({bad_parallel_exec,Ms}); - Ms < 3000000 -> exit({bad_parallel_exec,Ms}); - true -> ok - end. - -conf5(suite) -> % test specification - [{conf, conf5_init, [conf5_tc1, conf5_tc1, - - {conf, [parallel], conf6_init, [conf6_tc1, conf6_tc2], conf6_end}, - - conf5_tc2], conf5_end}]. - -conf7_init(Config) when is_list(Config) -> - test_server:comment(io_lib:format("~p",[now()])), - [parallel] = ?config(tc_group_properties,Config), - [{t0,now()},{cc7,conf7}|Config]. -conf7_end(Config) -> - %% check 1s & 1s & (2s & 2s) & 1s = ~3s - Ms = timer:now_diff(now(),?config(t0,Config)), - test_server:comment(io_lib:format("~p",[now()])), - if Ms > 4000000 -> exit({bad_parallel_exec,Ms}); - Ms < 3000000 -> exit({bad_parallel_exec,Ms}); - true -> ok - end. - -conf8_init(Config) when is_list(Config) -> - test_server:comment(io_lib:format("~p",[now()])), - [parallel] = ?config(tc_group_properties,Config), - init = ?config(suite,Config), - [{t0,now()},{cc8,conf8}|Config]. -conf8_end(Config) -> - %% check 2s & 2s < 4s - Ms = timer:now_diff(now(),?config(t0,Config)), - test_server:comment(io_lib:format("~p",[now()])), - if Ms > 3000000 -> exit({bad_parallel_exec,Ms}); - Ms < 2000000 -> exit({bad_parallel_exec,Ms}); - true -> ok - end. - - -%%---------- test cases ---------- - -conf1_tc1(Config) when is_list(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - init = ?config(suite,Config), - conf1 = ?config(cc1,Config), - conf1_tc1 = ?config(tc11,Config), - timer:sleep(2000), - test_server:comment(io_lib:format("~p",[now()])), - ok. -conf1_tc2(Config) when is_list(Config) -> - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - init = ?config(suite,Config), - conf1 = ?config(cc1,Config), - conf1_tc2 = ?config(tc12,Config), - timer:sleep(3000), - test_server:comment(io_lib:format("~p",[now()])), - ok. - -conf2_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - undefined = ?config(cc1,Config), - undefined = ?config(tc11,Config), - conf2 = ?config(cc2,Config), - conf2_tc1 = ?config(tc21,Config), - timer:sleep(3000), - test_server:comment(io_lib:format("~p",[now()])), - ok. -conf2_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - conf2 = ?config(cc2,Config), - undefined = ?config(tc21,Config), - conf2_tc2 = ?config(tc22,Config), - timer:sleep(2000), - test_server:comment(io_lib:format("~p",[now()])), - ok. - -conf3_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - undefined = ?config(cc2,Config), - undefined = ?config(tc22,Config), - conf3 = ?config(cc3,Config), - conf3_tc1 = ?config(tc31,Config), - timer:sleep(6000), - test_server:comment(io_lib:format("~p",[now()])), - ok. -conf3_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - conf3 = ?config(cc3,Config), - undefined = ?config(cc4,Config), - undefined = ?config(tc31,Config), - undefined = ?config(tc41,Config), - conf3_tc2 = ?config(tc32,Config), - timer:sleep(1000), - test_server:comment(io_lib:format("~p",[now()])), - ok. - -conf4_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - conf3 = ?config(cc3,Config), - conf4 = ?config(cc4,Config), - undefined = ?config(tc32,Config), - conf4_tc1 = ?config(tc41,Config), - timer:sleep(2000), - test_server:comment(io_lib:format("~p",[now()])), - ok. -conf4_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf3 = ?config(cc3,Config), - conf4 = ?config(cc4,Config), - undefined = ?config(tc41,Config), - conf4_tc2 = ?config(tc42,Config), - timer:sleep(3000), - test_server:comment(io_lib:format("~p",[now()])), - ok. - -conf5_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf5 = ?config(cc5,Config), - undefined = ?config(tc42,Config), - conf5_tc1 = ?config(tc51,Config), - timer:sleep(1000), - test_server:comment(io_lib:format("~p",[now()])), - ok. -conf5_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf5 = ?config(cc5,Config), - undefined = ?config(cc6,Config), - undefined = ?config(tc51,Config), - undefined = ?config(tc62,Config), - conf5_tc2 = ?config(tc52,Config), - timer:sleep(1000), - test_server:comment(io_lib:format("~p",[now()])), - ok. - -conf6_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf5 = ?config(cc5,Config), - conf6 = ?config(cc6,Config), - undefined = ?config(tc52,Config), - conf6_tc1 = ?config(tc61,Config), - timer:sleep(3000), - test_server:comment(io_lib:format("~p",[now()])), - ok. -conf6_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf5 = ?config(cc5,Config), - conf6 = ?config(cc6,Config), - undefined = ?config(tc61,Config), - conf6_tc2 = ?config(tc62,Config), - timer:sleep(2000), - test_server:comment(io_lib:format("~p",[now()])), - ok. - -conf7_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - undefined = ?config(cc5,Config), - undefined = ?config(cc6,Config), - conf7 = ?config(cc7,Config), - undefined = ?config(tc62,Config), - conf7_tc1 = ?config(tc71,Config), - timer:sleep(1000), - test_server:comment(io_lib:format("~p",[now()])), - ok. -conf7_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf7 = ?config(cc7,Config), - undefined = ?config(cc8,Config), - undefined = ?config(tc71,Config), - undefined = ?config(tc82,Config), - conf7_tc2 = ?config(tc72,Config), - timer:sleep(1000), - test_server:comment(io_lib:format("~p",[now()])), - ok. - -conf8_tc1(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - undefined = ?config(cc5,Config), - undefined = ?config(cc6,Config), - conf7 = ?config(cc7,Config), - conf8 = ?config(cc8,Config), - undefined = ?config(tc72,Config), - conf8_tc1 = ?config(tc81,Config), - timer:sleep(2000), - test_server:comment(io_lib:format("~p",[now()])), - ok. -conf8_tc2(Config) when is_list(Config) -> - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf7 = ?config(cc7,Config), - conf8 = ?config(cc8,Config), - undefined = ?config(tc81,Config), - conf8_tc2 = ?config(tc82,Config), - timer:sleep(2000), - test_server:comment(io_lib:format("~p",[now()])), - ok. diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_shuffle01_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_shuffle01_SUITE.erl deleted file mode 100644 index 847c7b6bdd..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_shuffle01_SUITE.erl +++ /dev/null @@ -1,472 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2009-2011. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% - -%%%------------------------------------------------------------------ -%%% Test Server self test. -%%%------------------------------------------------------------------ --module(test_server_shuffle01_SUITE). --include_lib("test_server/include/test_server.hrl"). - --compile(export_all). - -all(doc) -> ["Test simple conf case structure, with and without nested cases"]; -all(suite) -> - [ - {conf, [shuffle], conf1_init, [conf1_tc1, conf1_tc2, conf1_tc3], conf1_end}, - - {conf, [{shuffle,{1,2,3}}], conf2_init, [conf2_tc1, conf2_tc2, conf2_tc3], conf2_end}, - - {conf, [shuffle], conf3_init, [conf3_tc1, conf3_tc2, conf3_tc3, - - {conf, [], conf4_init, - [conf4_tc1, conf4_tc2], conf4_end}], - conf3_end}, - - conf5, - - {conf, [shuffle,{repeat,5},parallel], conf7_init, [conf7_tc1, - - {conf, [{shuffle,{3,2,1}},{repeat,3}], - conf8_init, [conf8_tc1, conf8_tc2, conf8_tc3], - conf8_end}, - - conf7_tc2, conf7_tc3], conf7_end} - - ]. - - -%%---------- conf cases ---------- - -init_per_suite(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - [{suite,init}|Config]. -end_per_suite(Config) -> - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - init = ?config(suite,Config), - ok. - -init_per_testcase(TC=conf1_tc1, Config) -> - init = ?config(suite,Config), - [{tc11,TC}|Config]; -init_per_testcase(TC=conf1_tc2, Config) -> - [{tc12,TC}|Config]; -init_per_testcase(TC=conf1_tc3, Config) -> - [{tc13,TC}|Config]; -init_per_testcase(TC=conf2_tc1, Config) -> - [{tc21,TC}|Config]; -init_per_testcase(TC=conf2_tc2, Config) -> - [{tc22,TC}|Config]; -init_per_testcase(TC=conf2_tc3, Config) -> - [{tc23,TC}|Config]; -init_per_testcase(TC=conf3_tc1, Config) -> - [{tc31,TC}|Config]; -init_per_testcase(TC=conf3_tc2, Config) -> - [{tc32,TC}|Config]; -init_per_testcase(TC=conf3_tc3, Config) -> - [{tc33,TC}|Config]; -init_per_testcase(TC=conf4_tc1, Config) -> - [{tc41,TC}|Config]; -init_per_testcase(TC=conf4_tc2, Config) -> - [{tc42,TC}|Config]; -init_per_testcase(TC=conf5_tc1, Config) -> - [{tc51,TC}|Config]; -init_per_testcase(TC=conf5_tc2, Config) -> - [{tc52,TC}|Config]; -init_per_testcase(TC=conf6_tc1, Config) -> - [{tc61,TC}|Config]; -init_per_testcase(TC=conf6_tc2, Config) -> - init = ?config(suite,Config), - [{tc62,TC}|Config]; -init_per_testcase(TC=conf6_tc3, Config) -> - [{tc63,TC}|Config]; -init_per_testcase(TC=conf7_tc1, Config) -> - [{tc71,TC}|Config]; -init_per_testcase(TC=conf7_tc2, Config) -> - [{tc72,TC}|Config]; -init_per_testcase(TC=conf7_tc3, Config) -> - [{tc73,TC}|Config]; -init_per_testcase(TC=conf8_tc1, Config) -> - [{tc81,TC}|Config]; -init_per_testcase(TC=conf8_tc2, Config) -> - init = ?config(suite,Config), - [{tc82,TC}|Config]; -init_per_testcase(TC=conf8_tc3, Config) -> - [{tc83,TC}|Config]. - -end_per_testcase(TC=conf1_tc1, Config) -> - init = ?config(suite,Config), - TC = ?config(tc11,Config), - ok; -end_per_testcase(TC=conf1_tc2, Config) -> - TC = ?config(tc12,Config), - ok; -end_per_testcase(TC=conf1_tc3, Config) -> - TC = ?config(tc13,Config), - ok; -end_per_testcase(TC=conf2_tc1, Config) -> - TC = ?config(tc21,Config), - ok; -end_per_testcase(TC=conf2_tc2, Config) -> - TC = ?config(tc22,Config), - ok; -end_per_testcase(TC=conf2_tc3, Config) -> - TC = ?config(tc23,Config), - ok; -end_per_testcase(TC=conf3_tc1, Config) -> - TC = ?config(tc31,Config), - ok; -end_per_testcase(TC=conf3_tc2, Config) -> - TC = ?config(tc32,Config), - ok; -end_per_testcase(TC=conf3_tc3, Config) -> - TC = ?config(tc33,Config), - ok; -end_per_testcase(TC=conf4_tc1, Config) -> - TC = ?config(tc41,Config), - ok; -end_per_testcase(TC=conf4_tc2, Config) -> - TC = ?config(tc42,Config), - ok; -end_per_testcase(TC=conf5_tc1, Config) -> - TC = ?config(tc51,Config), - ok; -end_per_testcase(TC=conf5_tc2, Config) -> - TC = ?config(tc52,Config), - ok; -end_per_testcase(TC=conf6_tc1, Config) -> - TC = ?config(tc61,Config), - ok; -end_per_testcase(TC=conf6_tc2, Config) -> - init = ?config(suite,Config), - TC = ?config(tc62,Config), - ok; -end_per_testcase(TC=conf6_tc3, Config) -> - TC = ?config(tc63,Config), - ok; -end_per_testcase(TC=conf7_tc1, Config) -> - TC = ?config(tc71,Config), - ok; -end_per_testcase(TC=conf7_tc2, Config) -> - TC = ?config(tc72,Config), - ok; -end_per_testcase(TC=conf7_tc3, Config) -> - TC = ?config(tc73,Config), - ok; -end_per_testcase(TC=conf8_tc1, Config) -> - TC = ?config(tc81,Config), - ok; -end_per_testcase(TC=conf8_tc2, Config) -> - init = ?config(suite,Config), - TC = ?config(tc82,Config), - ok; -end_per_testcase(TC=conf8_tc3, Config) -> - TC = ?config(tc83,Config), - ok. - - -conf1_init(Config) when is_list(Config) -> - init = ?config(suite,Config), - [{shuffle,{_,_,_}}] = ?config(tc_group_properties,Config), - test_server:comment("Shuffle (random seed)"), - [{cc1,conf1}|Config]. -conf1_end(_Config) -> - ok. - -conf2_init(Config) when is_list(Config) -> - [{shuffle,{1,2,3}}] = ?config(tc_group_properties,Config), - test_server:comment("Shuffle (user seed)"), - [{cc2,conf2}|Config]. -conf2_end(_Config) -> - ok. - -conf3_init(Config) when is_list(Config) -> - [{shuffle,{_,_,_}}] = ?config(tc_group_properties,Config), - test_server:comment("Shuffle (random)"), - [{cc3,conf3}|Config]. -conf3_end(_Config) -> - ok. - -conf4_init(Config) when is_list(Config) -> - [] = ?config(tc_group_properties,Config), - test_server:comment("No shuffle"), - [{cc4,conf4}|Config]. -conf4_end(_Config) -> - ok. - -conf5_init(Config) when is_list(Config) -> - [] = ?config(tc_group_properties,Config), - test_server:comment("No shuffle"), - [{cc5,conf5}|Config]. -conf5_end(_Config) -> - ok. - -conf6_init(Config) when is_list(Config) -> - [{shuffle,{_,_,_}}] = ?config(tc_group_properties,Config), - test_server:comment("Shuffle (random)"), - init = ?config(suite,Config), - [{cc6,conf6}|Config]. -conf6_end(_Config) -> - ok. - -conf5(suite) -> % test specification - [{conf, conf5_init, [conf5_tc1, - - {conf, [shuffle], conf6_init, - [conf6_tc1, conf6_tc2, conf6_tc3], - conf6_end}, - - conf5_tc2], conf5_end}]. - -conf7_init(Config) when is_list(Config) -> - test_server:comment("Group 7, Shuffle (random seed)"), - case proplists:get_value(shuffle,?config(tc_group_properties,Config)) of - {_,_,_} -> ok - end, - [{cc7,conf7}|Config]. -conf7_end(_Config) -> - ok. - -conf8_init(Config) when is_list(Config) -> - test_server:comment("Group 8, Shuffle (user start seed)"), - case proplists:get_value(shuffle,?config(tc_group_properties,Config)) of - {_,_,_} -> ok - end, - init = ?config(suite,Config), - [{cc8,conf8}|Config]. -conf8_end(_Config) -> - ok. - - -%%---------- test cases ---------- - -conf1_tc1(Config) when is_list(Config) -> - test_server:comment("Case 1"), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - init = ?config(suite,Config), - conf1 = ?config(cc1,Config), - conf1_tc1 = ?config(tc11,Config), - ok. -conf1_tc2(Config) when is_list(Config) -> - test_server:comment("Case 2"), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - init = ?config(suite,Config), - conf1 = ?config(cc1,Config), - conf1_tc2 = ?config(tc12,Config), - ok. -conf1_tc3(suite) -> []; -conf1_tc3(_Config) -> - test_server:comment("Case 3"), - ok. - -conf2_tc1(Config) when is_list(Config) -> - test_server:comment("Case 1"), - init = ?config(suite,Config), - undefined = ?config(cc1,Config), - conf2 = ?config(cc2,Config), - conf2_tc1 = ?config(tc21,Config), - ok. -conf2_tc2(Config) when is_list(Config) -> - test_server:comment("Case 2"), - init = ?config(suite,Config), - conf2 = ?config(cc2,Config), - conf2_tc2 = ?config(tc22,Config), - ok. -conf2_tc3(suite) -> []; -conf2_tc3(_Config) -> - test_server:comment("Case 3"), - ok. - -conf3_tc1(Config) when is_list(Config) -> - test_server:comment("Case 1"), - init = ?config(suite,Config), - undefined = ?config(cc2,Config), - conf3 = ?config(cc3,Config), - conf3_tc1 = ?config(tc31,Config), - ok. -conf3_tc2(Config) when is_list(Config) -> - test_server:comment("Case 2"), - init = ?config(suite,Config), - conf3 = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf3_tc2 = ?config(tc32,Config), - ok. -conf3_tc3(suite) -> []; -conf3_tc3(_Config) -> - test_server:comment("Case 3"), - ok. - -conf4_tc1(Config) when is_list(Config) -> - test_server:comment("Case 1"), - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - conf3 = ?config(cc3,Config), - conf4 = ?config(cc4,Config), - conf4_tc1 = ?config(tc41,Config), - ok. -conf4_tc2(Config) when is_list(Config) -> - test_server:comment("Case 2"), - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf3 = ?config(cc3,Config), - conf4 = ?config(cc4,Config), - conf4_tc2 = ?config(tc42,Config), - ok. - -conf5_tc1(Config) when is_list(Config) -> - test_server:comment("Case 1"), - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf5 = ?config(cc5,Config), - conf5_tc1 = ?config(tc51,Config), - ok. -conf5_tc2(Config) when is_list(Config) -> - test_server:comment("Case 2"), - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf5 = ?config(cc5,Config), - undefined = ?config(cc6,Config), - conf5_tc2 = ?config(tc52,Config), - ok. - -conf6_tc1(Config) when is_list(Config) -> - test_server:comment("Case 1"), - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - conf5 = ?config(cc5,Config), - conf6 = ?config(cc6,Config), - conf6_tc1 = ?config(tc61,Config), - ok. -conf6_tc2(Config) when is_list(Config) -> - test_server:comment("Case 2"), - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf5 = ?config(cc5,Config), - conf6 = ?config(cc6,Config), - conf6_tc2 = ?config(tc62,Config), - ok. -conf6_tc3(suite) -> []; -conf6_tc3(_Config) -> - test_server:comment("Case 3"), - ok. - -conf7_tc1(Config) when is_list(Config) -> - test_server:comment("Case 1"), - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - undefined = ?config(cc5,Config), - undefined = ?config(cc6,Config), - conf7 = ?config(cc7,Config), - conf7_tc1 = ?config(tc71,Config), - ok. -conf7_tc2(Config) when is_list(Config) -> - test_server:comment("Case 2"), - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf7 = ?config(cc7,Config), - undefined = ?config(cc8,Config), - conf7_tc2 = ?config(tc72,Config), - ok. -conf7_tc3(suite) -> []; -conf7_tc3(_Config) -> - test_server:comment("Case 3"), - ok. - -conf8_tc1(Config) when is_list(Config) -> - test_server:comment("Case 1"), - init = ?config(suite,Config), - case ?config(data_dir,Config) of - undefined -> exit(no_data_dir); - _ -> ok - end, - undefined = ?config(cc1,Config), - undefined = ?config(cc2,Config), - undefined = ?config(cc3,Config), - undefined = ?config(cc4,Config), - undefined = ?config(cc5,Config), - undefined = ?config(cc6,Config), - conf7 = ?config(cc7,Config), - conf8 = ?config(cc8,Config), - conf8_tc1 = ?config(tc81,Config), - ok. -conf8_tc2(Config) when is_list(Config) -> - test_server:comment("Case 2"), - init = ?config(suite,Config), - case ?config(priv_dir,Config) of - undefined -> exit(no_priv_dir); - _ -> ok - end, - conf7 = ?config(cc7,Config), - conf8 = ?config(cc8,Config), - conf8_tc2 = ?config(tc82,Config), - ok. -conf8_tc3(suite) -> []; -conf8_tc3(_Config) -> - test_server:comment("Case 3"), - ok. diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_skip_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_skip_SUITE.erl deleted file mode 100644 index 871bd21ee7..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_skip_SUITE.erl +++ /dev/null @@ -1,44 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2004-2011. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(test_server_skip_SUITE). - --export([all/1, init_per_suite/1, end_per_suite/1]). --export([dummy/1]). - --include_lib("test_server/include/test_server.hrl"). --include_lib("test_server/include/test_server_line.hrl"). - -all(suite) -> - [dummy]. - -init_per_suite(Config) when is_list(Config) -> - {skip,"Skipping init_per_suite - check that \'dummy\' and" - " \'end_per_suite\' are also skipped"}. - -dummy(suite) -> []; -dummy(doc) -> ["This testcase should never be executed"]; -dummy(Config) when is_list(Config) -> - ?t:fail("This testcase should be executed since" - " init_per_suite/1 is skipped"). - -end_per_suite(doc) -> ["This testcase should never be executed"]; -end_per_suite(Config) when is_list(Config) -> - ?t:fail("end_per_suite/1 should not be executed when" - " init_per_suite/1 is skipped"). diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_unicode_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_unicode_SUITE.erl deleted file mode 100644 index 3a3366218b..0000000000 --- a/lib/test_server/test/test_server_SUITE_data/test_server_unicode_SUITE.erl +++ /dev/null @@ -1,82 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(test_server_unicode_SUITE). - --export([all/1, init_per_suite/1, end_per_suite/1]). --export([init_per_testcase/2, end_per_testcase/2]). --export(['#=@: difficult_case_name_äöå'/1, - print_and_log_unicode/1, - print_and_log_latin1/1]). - --include_lib("test_server/include/test_server.hrl"). - -all(suite) -> - ['#=@: difficult_case_name_äöå', - print_and_log_unicode, - print_and_log_latin1]. - -init_per_suite(Config) -> - Config. - -end_per_suite(_Config) -> - ok. - -init_per_testcase(_Case,Config) -> - init_timetrap(500,Config). - -init_timetrap(T,Config) -> - Dog = ?t:timetrap(T), - [{watchdog, Dog}|Config]. - -end_per_testcase(_Case,Config) -> - cancel_timetrap(Config). - -cancel_timetrap(Config) -> - Dog=?config(watchdog, Config), - ?t:timetrap_cancel(Dog), - ok. - - -%%%----------------------------------------------------------------- -%%% Test cases - -'#=@: difficult_case_name_äöå'(Config) when is_list(Config) -> - ok. - -print_and_log_unicode(Config) when is_list(Config) -> - String = "שלום-שלום+של 日本語", - test_server:comment(String), - test_server:capture_start(), - io:format("String with ts: ~ts",[String]), - test_server:capture_stop(), - "String with ts: "++String = lists:flatten(test_server:capture_get()), - ok. - -print_and_log_latin1(Config) when is_list(Config) -> - String = "æøå", - test_server:comment(String), - test_server:capture_start(), - io:format("String with s: ~s",[String]), - io:format("String with ts: ~ts",[String]), - test_server:capture_stop(), - ["String with s: "++String, - "String with ts: "++String] = - [lists:flatten(L) || L<- test_server:capture_get()], - ok. diff --git a/lib/test_server/test/test_server_test_lib.erl b/lib/test_server/test/test_server_test_lib.erl deleted file mode 100644 index e2680938e0..0000000000 --- a/lib/test_server/test/test_server_test_lib.erl +++ /dev/null @@ -1,217 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 2009-2013. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(test_server_test_lib). - --export([parse_suite/1]). --export([init/2, pre_init_per_testcase/3, post_end_per_testcase/4]). - -%% for test_server_SUITE when node can not be started as slave --export([prepare_tester_node/2]). - --include("test_server_test_lib.hrl"). - -%% The CTH hooks all tests -init(_Id, _Opts) -> - []. - -pre_init_per_testcase(_TC,Config,State) -> - case os:type() of - {win32, _} -> - %% Extend timeout for windows as starting node - %% can take a long time there - test_server:timetrap( 120000 * test_server:timetrap_scale_factor()); - _ -> - ok - end, - {start_slave(Config, 50),State}. - -start_slave(Config,_Level) -> - [_,Host] = string:tokens(atom_to_list(node()), "@"), - - ct:log("Trying to start ~s~n", - ["test_server_tester@"++Host]), - case slave:start(Host, test_server_tester, []) of - {error,Reason} -> - test_server:fail(Reason); - {ok,Node} -> - ct:log("Node ~p started~n", [Node]), - IsCover = test_server:is_cover(), - if IsCover -> - cover:start(Node); - true-> - ok - end, - prepare_tester_node(Node,Config) - end. - -prepare_tester_node(Node,Config) -> - DataDir = proplists:get_value(data_dir, Config), - %% We would normally use priv_dir for temporary data, - %% but the pathnames gets too long on Windows. - %% Until the run-time system can support long pathnames, - %% use the data dir. - WorkDir = DataDir, - - %% WorkDir as well as directory of Test Server suites - %% have to be in code path on Test Server node. - [_ | Parts] = lists:reverse(filename:split(DataDir)), - TSDir = filename:join(lists:reverse(Parts)), - AddPathDirs = case proplists:get_value(path_dirs, Config) of - undefined -> []; - Ds -> Ds - end, - PathDirs = [WorkDir,TSDir | AddPathDirs], - [true = rpc:call(Node, code, add_patha, [D]) || D <- PathDirs], - io:format("Dirs added to code path (on ~w):~n", - [Node]), - [io:format("~s~n", [D]) || D <- PathDirs], - - true = rpc:call(Node, os, putenv, - ["TEST_SERVER_FRAMEWORK", "undefined"]), - - ok = rpc:call(Node, file, set_cwd, [WorkDir]), - [{node,Node}, {work_dir,WorkDir} | Config]. - -post_end_per_testcase(_TC, Config, Return, State) -> - Node = proplists:get_value(node, Config), - Cover = test_server:is_cover(), - if Cover-> cover:flush(Node); - true -> ok - end, - erlang:monitor_node(Node, true), - slave:stop(Node), - receive - {nodedown, Node} -> - if Cover -> cover:stop(Node); - true -> ok - end - after 5000 -> - erlang:monitor_node(Node, false), - receive {nodedown, Node} -> ok after 0 -> ok end %flush - end, - {Return, State}. - -%% Parse an .suite log file -parse_suite(FileName) -> - - case file:open(FileName, [read, raw, read_ahead]) of - {ok, Fd} -> - Data = parse_suite(Fd, #suite{ }), - file:close(Fd), - {ok, Data}; - _ -> - error - end. - -fline(Fd) -> - case prim_file:read_line(Fd) of - eof -> eof; - {ok, Line} -> Line - end. - -parse_suite(Fd, S) -> - _Started = fline(Fd), - _Starting = fline(Fd), - "=cases" ++ NCases = fline(Fd), - "=user" ++ _User = fline(Fd), - "=host" ++ Host = fline(Fd), - "=hosts" ++ _Hosts = fline(Fd), - "=emulator_vsn" ++ Evsn = fline(Fd), - "=emulator" ++ Emu = fline(Fd), - "=otp_release" ++ OtpRel = fline(Fd), - "=started" ++ Start = fline(Fd), - NewS = parse_cases(Fd, S#suite{ - n_cases_expected = list_to_int(clean(NCases)), - host = list_to_binary(clean(Host)), - emulator_vsn = list_to_binary(clean(Evsn)), - emulator = list_to_binary(clean(Emu)), - otp_release = list_to_binary(clean(OtpRel)), - started = list_to_binary(clean(Start)) - }), - "=failed" ++ Failed = fline(Fd), - "=successful" ++ Succ = fline(Fd), - "=user_skipped" ++ UsrSkip = fline(Fd), - "=auto_skipped" ++ AutSkip = fline(Fd), - NewS#suite{ n_cases_failed = list_to_int(clean(Failed)), - n_cases_succ = list_to_int(clean(Succ)), - n_cases_user_skip = list_to_int(clean(UsrSkip)), - n_cases_auto_skip = list_to_int(clean(AutSkip)) }. - - -parse_cases(Fd, #suite{ n_cases = N, - cases = Cases } = S) -> - case parse_case(Fd) of - finished -> S#suite{ log_ok = true }; - {eof, Tc} -> - S#suite{ n_cases = N + 1, - cases = [Tc#tc{ result = crashed }|Cases]}; - {ok, Case} -> - parse_cases(Fd, S#suite{ n_cases = N + 1, - cases = [Case|Cases]}) - end. - -parse_case(Fd) -> parse_case(Fd, #tc{}). -parse_case(Fd, Tc) -> parse_case(fline(Fd), Fd, Tc). - -parse_case(eof, _, Tc) -> {eof, Tc}; -parse_case("=case" ++ Case, Fd, Tc) -> - Name = list_to_binary(clean(Case)), - parse_case(fline(Fd), Fd, Tc#tc{ name = Name }); -parse_case("=logfile" ++ File, Fd, Tc) -> - Log = list_to_binary(clean(File)), - parse_case(fline(Fd), Fd, Tc#tc{ logfile = Log }); -parse_case("=elapsed" ++ Elapsed, Fd, Tc) -> - {ok, [Time], _} = io_lib:fread("~f", clean(Elapsed)), - parse_case(fline(Fd), Fd, Tc#tc{ elapsed = Time }); -parse_case("=result" ++ Result, _, Tc) -> - case clean(Result) of - "ok" ++ _ -> - {ok, Tc#tc{ result = ok } }; - "failed" ++ _ -> - {ok, Tc#tc{ result = failed } }; - "skipped" ++ _ -> - {ok, Tc#tc{ result = skip } }; - "auto_skipped" ++ _ -> - {ok, Tc#tc{ result = auto_skip } } - end; -parse_case("=finished" ++ _ , _Fd, #tc{ name = undefined }) -> - finished; -parse_case(_, Fd, Tc) -> - parse_case(fline(Fd), Fd, Tc). - -skip([]) -> []; -skip([$ |Ts]) -> skip(Ts); -skip(Ts) -> Ts. - -%rmnl(L) -> L. -rmnl([]) -> []; -rmnl([$\n | Ts]) -> rmnl(Ts); -rmnl([T|Ts]) -> [T | rmnl(Ts)]. - -clean(L) -> - rmnl(skip(L)). - -list_to_int(L) -> - try - list_to_integer(L) - catch - _:_ -> - 0 - end. diff --git a/lib/test_server/test/test_server_test_lib.hrl b/lib/test_server/test/test_server_test_lib.hrl deleted file mode 100644 index 27b7be9618..0000000000 --- a/lib/test_server/test/test_server_test_lib.hrl +++ /dev/null @@ -1,23 +0,0 @@ --record(tc, { - name, - result, - elapsed, - logfile - }). - --record(suite, { - application, - n_cases = 0, - n_cases_failed = 0, - n_cases_expected = 0, - n_cases_succ, - n_cases_user_skip, - n_cases_auto_skip, - cases = [], - host, - emulator_vsn, - emulator, - otp_release, - started, - log_ok = false - }). diff --git a/lib/test_server/vsn.mk b/lib/test_server/vsn.mk deleted file mode 100644 index 3a3815c557..0000000000 --- a/lib/test_server/vsn.mk +++ /dev/null @@ -1 +0,0 @@ -TEST_SERVER_VSN = 3.9.1 |